about summary refs log tree commit diff
path: root/src/librustc_codegen_llvm
diff options
context:
space:
mode:
authorDenis Merigoux <denis.merigoux@gmail.com>2018-10-03 13:49:57 +0200
committerEduard-Mihai Burtescu <edy.burt@gmail.com>2018-11-16 15:07:24 +0200
commitc0a428ee702329b0ad818a67a6ecc9617df267c7 (patch)
treec47602d70ccd0f3d54f47e29730e880751afada4 /src/librustc_codegen_llvm
parent915382f7306be7841c4254cee13fa55a865bdd8b (diff)
downloadrust-c0a428ee702329b0ad818a67a6ecc9617df267c7.tar.gz
rust-c0a428ee702329b0ad818a67a6ecc9617df267c7.zip
Great separation of librustc_codegen_llvm: librustc_codegen_ssa compiles
Diffstat (limited to 'src/librustc_codegen_llvm')
-rw-r--r--src/librustc_codegen_llvm/base.rs924
-rw-r--r--src/librustc_codegen_llvm/builder.rs8
-rw-r--r--src/librustc_codegen_llvm/callee.rs32
-rw-r--r--src/librustc_codegen_llvm/common.rs85
-rw-r--r--src/librustc_codegen_llvm/consts.rs52
-rw-r--r--src/librustc_codegen_llvm/context.rs4
-rw-r--r--src/librustc_codegen_llvm/debuginfo/create_scope_map.rs15
-rw-r--r--src/librustc_codegen_llvm/debuginfo/mod.rs71
-rw-r--r--src/librustc_codegen_llvm/debuginfo/source_loc.rs12
-rw-r--r--src/librustc_codegen_llvm/diagnostics.rs33
-rw-r--r--src/librustc_codegen_llvm/glue.rs122
-rw-r--r--src/librustc_codegen_llvm/interfaces/abi.rs23
-rw-r--r--src/librustc_codegen_llvm/interfaces/asm.rs28
-rw-r--r--src/librustc_codegen_llvm/interfaces/builder.rs285
-rw-r--r--src/librustc_codegen_llvm/interfaces/consts.rs64
-rw-r--r--src/librustc_codegen_llvm/interfaces/debuginfo.rs71
-rw-r--r--src/librustc_codegen_llvm/interfaces/intrinsic.rs37
-rw-r--r--src/librustc_codegen_llvm/interfaces/mod.rs72
-rw-r--r--src/librustc_codegen_llvm/interfaces/type_.rs127
-rw-r--r--src/librustc_codegen_llvm/lib.rs34
-rw-r--r--src/librustc_codegen_llvm/meth.rs126
-rw-r--r--src/librustc_codegen_llvm/mir/analyze.rs383
-rw-r--r--src/librustc_codegen_llvm/mir/block.rs1111
-rw-r--r--src/librustc_codegen_llvm/mir/constant.rs161
-rw-r--r--src/librustc_codegen_llvm/mir/mod.rs682
-rw-r--r--src/librustc_codegen_llvm/mir/operand.rs474
-rw-r--r--src/librustc_codegen_llvm/mir/place.rs497
-rw-r--r--src/librustc_codegen_llvm/mir/rvalue.rs998
-rw-r--r--src/librustc_codegen_llvm/mir/statement.rs115
-rw-r--r--src/librustc_codegen_llvm/mono_item.rs98
-rw-r--r--src/librustc_codegen_llvm/type_.rs6
31 files changed, 94 insertions, 6656 deletions
diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs
index 4e69bf8e8b3..bd396e89e07 100644
--- a/src/librustc_codegen_llvm/base.rs
+++ b/src/librustc_codegen_llvm/base.rs
@@ -38,7 +38,7 @@ use rustc::middle::weak_lang_items;
 use rustc::mir::mono::{Linkage, Visibility, Stats, CodegenUnitNameBuilder};
 use rustc::middle::cstore::{EncodedMetadata};
 use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
+use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, HasTyCtxt};
 use rustc::ty::query::Providers;
 use rustc::middle::cstore::{self, LinkagePreference};
 use rustc::middle::exported_symbols;
@@ -66,7 +66,6 @@ use rustc::util::nodemap::FxHashMap;
 use CrateInfo;
 use rustc_data_structures::small_c_str::SmallCStr;
 use rustc_data_structures::sync::Lrc;
-use rustc_data_structures::indexed_vec::Idx;
 
 use interfaces::*;
 
@@ -88,500 +87,6 @@ use mir::operand::OperandValue;
 
 use rustc_codegen_utils::check_for_rustc_errors_attr;
 
-pub struct StatRecorder<'a, 'tcx, Cx: 'a + CodegenMethods<'tcx>> {
-    cx: &'a Cx,
-    name: Option<String>,
-    istart: usize,
-    _marker: marker::PhantomData<&'tcx ()>,
-}
-
-impl<'a, 'tcx, Cx: CodegenMethods<'tcx>> StatRecorder<'a, 'tcx, Cx> {
-    pub fn new(cx: &'a Cx, name: String) -> Self {
-        let istart = cx.stats().borrow().n_llvm_insns;
-        StatRecorder {
-            cx,
-            name: Some(name),
-            istart,
-            _marker: marker::PhantomData,
-        }
-    }
-}
-
-impl<'a, 'tcx, Cx: CodegenMethods<'tcx>> Drop for StatRecorder<'a, 'tcx, Cx> {
-    fn drop(&mut self) {
-        if self.cx.sess().codegen_stats() {
-            let mut stats = self.cx.stats().borrow_mut();
-            let iend = stats.n_llvm_insns;
-            stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart));
-            stats.n_fns += 1;
-            // Reset LLVM insn count to avoid compound costs.
-            stats.n_llvm_insns = self.istart;
-        }
-    }
-}
-
-pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind,
-                                signed: bool)
-                                -> IntPredicate {
-    match op {
-        hir::BinOpKind::Eq => IntPredicate::IntEQ,
-        hir::BinOpKind::Ne => IntPredicate::IntNE,
-        hir::BinOpKind::Lt => if signed { IntPredicate::IntSLT } else { IntPredicate::IntULT },
-        hir::BinOpKind::Le => if signed { IntPredicate::IntSLE } else { IntPredicate::IntULE },
-        hir::BinOpKind::Gt => if signed { IntPredicate::IntSGT } else { IntPredicate::IntUGT },
-        hir::BinOpKind::Ge => if signed { IntPredicate::IntSGE } else { IntPredicate::IntUGE },
-        op => {
-            bug!("comparison_op_to_icmp_predicate: expected comparison operator, \
-                  found {:?}",
-                 op)
-        }
-    }
-}
-
-pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
-    match op {
-        hir::BinOpKind::Eq => RealPredicate::RealOEQ,
-        hir::BinOpKind::Ne => RealPredicate::RealUNE,
-        hir::BinOpKind::Lt => RealPredicate::RealOLT,
-        hir::BinOpKind::Le => RealPredicate::RealOLE,
-        hir::BinOpKind::Gt => RealPredicate::RealOGT,
-        hir::BinOpKind::Ge => RealPredicate::RealOGE,
-        op => {
-            bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \
-                  found {:?}",
-                 op);
-        }
-    }
-}
-
-pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    lhs: Bx::Value,
-    rhs: Bx::Value,
-    t: Ty<'tcx>,
-    ret_ty: Bx::Type,
-    op: hir::BinOpKind
-) -> Bx::Value {
-    let signed = match t.sty {
-        ty::Float(_) => {
-            let cmp = bin_op_to_fcmp_predicate(op);
-            return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty);
-        },
-        ty::Uint(_) => false,
-        ty::Int(_) => true,
-        _ => bug!("compare_simd_types: invalid SIMD type"),
-    };
-
-    let cmp = bin_op_to_icmp_predicate(op, signed);
-    // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
-    // to get the correctly sized type. This will compile to a single instruction
-    // once the IR is converted to assembly if the SIMD instruction is supported
-    // by the target architecture.
-    bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty)
-}
-
-/// Retrieve the information we are losing (making dynamic) in an unsizing
-/// adjustment.
-///
-/// The `old_info` argument is a bit funny. It is intended for use
-/// in an upcast, where the new vtable for an object will be derived
-/// from the old one.
-pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
-    cx: &Cx,
-    source: Ty<'tcx>,
-    target: Ty<'tcx>,
-    old_info: Option<Cx::Value>,
-) -> Cx::Value {
-    let (source, target) = cx.tcx().struct_lockstep_tails(source, target);
-    match (&source.sty, &target.sty) {
-        (&ty::Array(_, len), &ty::Slice(_)) => {
-            cx.const_usize(len.unwrap_usize(cx.tcx()))
-        }
-        (&ty::Dynamic(..), &ty::Dynamic(..)) => {
-            // For now, upcasts are limited to changes in marker
-            // traits, and hence never actually require an actual
-            // change to the vtable.
-            old_info.expect("unsized_info: missing old info for trait upcast")
-        }
-        (_, &ty::Dynamic(ref data, ..)) => {
-            let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target))
-                .field(cx, abi::FAT_PTR_EXTRA);
-            cx.static_ptrcast(meth::get_vtable(cx, source, data.principal()),
-                            cx.backend_type(vtable_ptr))
-        }
-        _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
-                  source,
-                  target),
-    }
-}
-
-/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
-pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    src: Bx::Value,
-    src_ty: Ty<'tcx>,
-    dst_ty: Ty<'tcx>
-) -> (Bx::Value, Bx::Value) {
-    debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
-    match (&src_ty.sty, &dst_ty.sty) {
-        (&ty::Ref(_, a, _),
-         &ty::Ref(_, b, _)) |
-        (&ty::Ref(_, a, _),
-         &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
-        (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
-         &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
-            assert!(bx.cx().type_is_sized(a));
-            let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
-            (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
-        }
-        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
-            let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
-            assert!(bx.cx().type_is_sized(a));
-            let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
-            (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
-        }
-        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
-            assert_eq!(def_a, def_b);
-
-            let src_layout = bx.cx().layout_of(src_ty);
-            let dst_layout = bx.cx().layout_of(dst_ty);
-            let mut result = None;
-            for i in 0..src_layout.fields.count() {
-                let src_f = src_layout.field(bx.cx(), i);
-                assert_eq!(src_layout.fields.offset(i).bytes(), 0);
-                assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
-                if src_f.is_zst() {
-                    continue;
-                }
-                assert_eq!(src_layout.size, src_f.size);
-
-                let dst_f = dst_layout.field(bx.cx(), i);
-                assert_ne!(src_f.ty, dst_f.ty);
-                assert_eq!(result, None);
-                result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty));
-            }
-            let (lldata, llextra) = result.unwrap();
-            // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-            (bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)),
-             bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true)))
-        }
-        _ => bug!("unsize_thin_ptr: called on bad types"),
-    }
-}
-
-/// Coerce `src`, which is a reference to a value of type `src_ty`,
-/// to a value of type `dst_ty` and store the result in `dst`
-pub fn coerce_unsized_into<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    src: PlaceRef<'tcx, Bx::Value>,
-    dst: PlaceRef<'tcx, Bx::Value>
-)  {
-    let src_ty = src.layout.ty;
-    let dst_ty = dst.layout.ty;
-    let coerce_ptr = || {
-        let (base, info) = match bx.load_operand(src).val {
-            OperandValue::Pair(base, info) => {
-                // fat-ptr to fat-ptr unsize preserves the vtable
-                // i.e. &'a fmt::Debug+Send => &'a fmt::Debug
-                // So we need to pointercast the base to ensure
-                // the types match up.
-                let thin_ptr = dst.layout.field(bx.cx(), abi::FAT_PTR_ADDR);
-                (bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info)
-            }
-            OperandValue::Immediate(base) => {
-                unsize_thin_ptr(bx, base, src_ty, dst_ty)
-            }
-            OperandValue::Ref(..) => bug!()
-        };
-        OperandValue::Pair(base, info).store(bx, dst);
-    };
-    match (&src_ty.sty, &dst_ty.sty) {
-        (&ty::Ref(..), &ty::Ref(..)) |
-        (&ty::Ref(..), &ty::RawPtr(..)) |
-        (&ty::RawPtr(..), &ty::RawPtr(..)) => {
-            coerce_ptr()
-        }
-        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
-            coerce_ptr()
-        }
-
-        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
-            assert_eq!(def_a, def_b);
-
-            for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
-                let src_f = src.project_field(bx, i);
-                let dst_f = dst.project_field(bx, i);
-
-                if dst_f.layout.is_zst() {
-                    continue;
-                }
-
-                if src_f.layout.ty == dst_f.layout.ty {
-                    memcpy_ty(bx, dst_f.llval, dst_f.align, src_f.llval, src_f.align,
-                              src_f.layout, MemFlags::empty());
-                } else {
-                    coerce_unsized_into(bx, src_f, dst_f);
-                }
-            }
-        }
-        _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}",
-                  src_ty,
-                  dst_ty),
-    }
-}
-
-pub fn cast_shift_expr_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    op: hir::BinOpKind,
-    lhs: Bx::Value,
-    rhs: Bx::Value
-) -> Bx::Value {
-    cast_shift_rhs(bx, op, lhs, rhs, |a, b| bx.trunc(a, b), |a, b| bx.zext(a, b))
-}
-
-fn cast_shift_rhs<'a, 'tcx: 'a, F, G, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    op: hir::BinOpKind,
-    lhs: Bx::Value,
-    rhs: Bx::Value,
-    trunc: F,
-    zext: G
-) -> Bx::Value
-    where F: FnOnce(
-        Bx::Value,
-        Bx::Type
-    ) -> Bx::Value,
-    G: FnOnce(
-        Bx::Value,
-        Bx::Type
-    ) -> Bx::Value
-{
-    // Shifts may have any size int on the rhs
-    if op.is_shift() {
-        let mut rhs_llty = bx.cx().val_ty(rhs);
-        let mut lhs_llty = bx.cx().val_ty(lhs);
-        if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
-            rhs_llty = bx.cx().element_type(rhs_llty)
-        }
-        if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
-            lhs_llty = bx.cx().element_type(lhs_llty)
-        }
-        let rhs_sz = bx.cx().int_width(rhs_llty);
-        let lhs_sz = bx.cx().int_width(lhs_llty);
-        if lhs_sz < rhs_sz {
-            trunc(rhs, lhs_llty)
-        } else if lhs_sz > rhs_sz {
-            // FIXME (#1877: If in the future shifting by negative
-            // values is no longer undefined then this is wrong.
-            zext(rhs, lhs_llty)
-        } else {
-            rhs
-        }
-    } else {
-        rhs
-    }
-}
-
-/// Returns whether this session's target will use SEH-based unwinding.
-///
-/// This is only true for MSVC targets, and even then the 64-bit MSVC target
-/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
-/// 64-bit MinGW) instead of "full SEH".
-pub fn wants_msvc_seh(sess: &Session) -> bool {
-    sess.target.target.options.is_like_msvc
-}
-
-pub fn call_assume<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    val: Bx::Value
-) {
-    let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume");
-    bx.call(assume_intrinsic, &[val], None);
-}
-
-pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    val: Bx::Value
-) -> Bx::Value {
-    if bx.cx().val_ty(val) == bx.cx().type_i1() {
-        bx.zext(val, bx.cx().type_i8())
-    } else {
-        val
-    }
-}
-
-pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    val: Bx::Value,
-    layout: layout::TyLayout,
-) -> Bx::Value {
-    if let layout::Abi::Scalar(ref scalar) = layout.abi {
-        return to_immediate_scalar(bx, val, scalar);
-    }
-    val
-}
-
-pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    val: Bx::Value,
-    scalar: &layout::Scalar,
-) -> Bx::Value {
-    if scalar.is_bool() {
-        return bx.trunc(val, bx.cx().type_i1());
-    }
-    val
-}
-
-pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    dst: Bx::Value,
-    dst_align: Align,
-    src: Bx::Value,
-    src_align: Align,
-    layout: TyLayout<'tcx>,
-    flags: MemFlags,
-) {
-    let size = layout.size.bytes();
-    if size == 0 {
-        return;
-    }
-
-    bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
-}
-
-pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    cx: &'a Bx::CodegenCx,
-    instance: Instance<'tcx>,
-) {
-    let _s = if cx.sess().codegen_stats() {
-        let mut instance_name = String::new();
-        DefPathBasedNames::new(cx.tcx(), true, true)
-            .push_def_path(instance.def_id(), &mut instance_name);
-        Some(StatRecorder::new(cx, instance_name))
-    } else {
-        None
-    };
-
-    // this is an info! to allow collecting monomorphization statistics
-    // and to allow finding the last function before LLVM aborts from
-    // release builds.
-    info!("codegen_instance({})", instance);
-
-    let sig = instance.fn_sig(cx.tcx());
-    let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
-
-    let lldecl = cx.instances().borrow().get(&instance).cloned().unwrap_or_else(||
-        bug!("Instance `{:?}` not already declared", instance));
-
-    cx.stats().borrow_mut().n_closures += 1;
-
-    let mir = cx.tcx().instance_mir(instance.def);
-    mir::codegen_mir::<Bx>(cx, lldecl, &mir, instance, sig);
-}
-
-pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
-    let sect = match attrs.link_section {
-        Some(name) => name,
-        None => return,
-    };
-    unsafe {
-        let buf = SmallCStr::new(&sect.as_str());
-        llvm::LLVMSetSection(llval, buf.as_ptr());
-    }
-}
-
-/// Create the `main` function which will initialize the rust runtime and call
-/// users main function.
-fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    cx: &'a Bx::CodegenCx
-) {
-    let (main_def_id, span) = match *cx.sess().entry_fn.borrow() {
-        Some((id, span, _)) => {
-            (cx.tcx().hir.local_def_id(id), span)
-        }
-        None => return,
-    };
-
-    let instance = Instance::mono(cx.tcx(), main_def_id);
-
-    if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
-        // We want to create the wrapper in the same codegen unit as Rust's main
-        // function.
-        return;
-    }
-
-    let main_llfn = cx.get_fn(instance);
-
-    let et = cx.sess().entry_fn.get().map(|e| e.2);
-    match et {
-        Some(EntryFnType::Main) => create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, true),
-        Some(EntryFnType::Start) => create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, false),
-        None => {}    // Do nothing.
-    }
-
-    fn create_entry_fn<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-        cx: &'a Bx::CodegenCx,
-        sp: Span,
-        rust_main: Bx::Value,
-        rust_main_def_id: DefId,
-        use_start_lang_item: bool,
-    ) {
-        let llfty =
-            cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int());
-
-        let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
-        // Given that `main()` has no arguments,
-        // then its return type cannot have
-        // late-bound regions, since late-bound
-        // regions must appear in the argument
-        // listing.
-        let main_ret_ty = cx.tcx().erase_regions(
-            &main_ret_ty.no_bound_vars().unwrap(),
-        );
-
-        if cx.get_defined_value("main").is_some() {
-            // FIXME: We should be smart and show a better diagnostic here.
-            cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
-                     .help("did you use #[no_mangle] on `fn main`? Use #[start] instead")
-                     .emit();
-            cx.sess().abort_if_errors();
-            bug!();
-        }
-        let llfn = cx.declare_cfn("main", llfty);
-
-        // `main` should respect same config for frame pointer elimination as rest of code
-        cx.set_frame_pointer_elimination(llfn);
-        cx.apply_target_cpu_attr(llfn);
-
-        let bx = Bx::new_block(&cx, llfn, "top");
-
-        bx.insert_reference_to_gdb_debug_scripts_section_global();
-
-        // Params from native main() used as args for rust start function
-        let param_argc = cx.get_param(llfn, 0);
-        let param_argv = cx.get_param(llfn, 1);
-        let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
-        let arg_argv = param_argv;
-
-        let (start_fn, args) = if use_start_lang_item {
-            let start_def_id = cx.tcx().require_lang_item(StartFnLangItem);
-            let start_fn = callee::resolve_and_get_fn(
-                cx,
-                start_def_id,
-                cx.tcx().intern_substs(&[main_ret_ty.into()]),
-            );
-            (start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())),
-                            arg_argc, arg_argv])
-        } else {
-            debug!("using user-defined start fn");
-            (rust_main, vec![arg_argc, arg_argv])
-        };
-
-        let result = bx.call(start_fn, &args, None);
-        bx.ret(bx.intcast(result, cx.type_int(), true));
-    }
-}
-
 pub(crate) fn write_metadata<'a, 'gcx>(
     tcx: TyCtxt<'a, 'gcx, 'gcx>,
     llvm_module: &ModuleLlvm
@@ -675,397 +180,6 @@ pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> {
     }
 }
 
-fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
-                                 cgu: &CodegenUnit<'tcx>)
-                                 -> CguReuse {
-    if !tcx.dep_graph.is_fully_enabled() {
-        return CguReuse::No
-    }
-
-    let work_product_id = &cgu.work_product_id();
-    if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
-        // We don't have anything cached for this CGU. This can happen
-        // if the CGU did not exist in the previous session.
-        return CguReuse::No
-    }
-
-    // Try to mark the CGU as green. If it we can do so, it means that nothing
-    // affecting the LLVM module has changed and we can re-use a cached version.
-    // If we compile with any kind of LTO, this means we can re-use the bitcode
-    // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
-    // know that later). If we are not doing LTO, there is only one optimized
-    // version of each module, so we re-use that.
-    let dep_node = cgu.codegen_dep_node(tcx);
-    assert!(!tcx.dep_graph.dep_node_exists(&dep_node),
-        "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
-        cgu.name());
-
-    if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
-        // We can re-use either the pre- or the post-thinlto state
-        if tcx.sess.lto() != Lto::No {
-            CguReuse::PreLto
-        } else {
-            CguReuse::PostLto
-        }
-    } else {
-        CguReuse::No
-    }
-}
-
-pub fn codegen_crate<B: BackendMethods>(
-    backend: B,
-    tcx: TyCtxt<'a, 'tcx, 'tcx>,
-    rx: mpsc::Receiver<Box<dyn Any + Send>>
-) -> B::OngoingCodegen {
-
-    check_for_rustc_errors_attr(tcx);
-
-    let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
-
-    // Codegen the metadata.
-    tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen));
-
-    let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE,
-                                                            &["crate"],
-                                                            Some("metadata")).as_str()
-                                                                             .to_string();
-    let metadata_llvm_module = backend.new_metadata(tcx.sess, &metadata_cgu_name);
-    let metadata = time(tcx.sess, "write metadata", || {
-        backend.write_metadata(tcx, &metadata_llvm_module)
-    });
-    tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen));
-
-    let metadata_module = ModuleCodegen {
-        name: metadata_cgu_name,
-        module_llvm: metadata_llvm_module,
-        kind: ModuleKind::Metadata,
-    };
-
-    let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph {
-        Some(time_graph::TimeGraph::new())
-    } else {
-        None
-    };
-
-    // Skip crate items and just output metadata in -Z no-codegen mode.
-    if tcx.sess.opts.debugging_opts.no_codegen ||
-       !tcx.sess.opts.output_types.should_codegen() {
-        let ongoing_codegen = backend.start_async_codegen(
-            tcx,
-            time_graph,
-            metadata,
-            rx,
-            1);
-
-        backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, metadata_module);
-        backend.codegen_finished(&ongoing_codegen, tcx);
-
-        assert_and_save_dep_graph(tcx);
-
-        backend.check_for_errors(&ongoing_codegen, tcx.sess);
-
-        return ongoing_codegen;
-    }
-
-    // Run the monomorphization collector and partition the collected items into
-    // codegen units.
-    let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1;
-    let codegen_units = (*codegen_units).clone();
-
-    // Force all codegen_unit queries so they are already either red or green
-    // when compile_codegen_unit accesses them. We are not able to re-execute
-    // the codegen_unit query from just the DepNode, so an unknown color would
-    // lead to having to re-execute compile_codegen_unit, possibly
-    // unnecessarily.
-    if tcx.dep_graph.is_fully_enabled() {
-        for cgu in &codegen_units {
-            tcx.codegen_unit(cgu.name().clone());
-        }
-    }
-
-    let ongoing_codegen = backend.start_async_codegen(
-        tcx,
-        time_graph.clone(),
-        metadata,
-        rx,
-        codegen_units.len());
-    let ongoing_codegen = AbortCodegenOnDrop::<B>(Some(ongoing_codegen));
-
-    // Codegen an allocator shim, if necessary.
-    //
-    // If the crate doesn't have an `allocator_kind` set then there's definitely
-    // no shim to generate. Otherwise we also check our dependency graph for all
-    // our output crate types. If anything there looks like its a `Dynamic`
-    // linkage, then it's already got an allocator shim and we'll be using that
-    // one instead. If nothing exists then it's our job to generate the
-    // allocator!
-    let any_dynamic_crate = tcx.sess.dependency_formats.borrow()
-        .iter()
-        .any(|(_, list)| {
-            use rustc::middle::dependency_format::Linkage;
-            list.iter().any(|&linkage| linkage == Linkage::Dynamic)
-        });
-    let allocator_module = if any_dynamic_crate {
-        None
-    } else if let Some(kind) = *tcx.sess.allocator_kind.get() {
-        let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE,
-                                                       &["crate"],
-                                                       Some("allocator")).as_str()
-                                                                         .to_string();
-        let modules = backend.new_metadata(tcx.sess, &llmod_id);
-        time(tcx.sess, "write allocator module", || {
-            backend.codegen_allocator(tcx, &modules, kind)
-        });
-
-        Some(ModuleCodegen {
-            name: llmod_id,
-            module_llvm: modules,
-            kind: ModuleKind::Allocator,
-        })
-    } else {
-        None
-    };
-
-    if let Some(allocator_module) = allocator_module {
-        backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, allocator_module);
-    }
-
-    backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, metadata_module);
-
-    // We sort the codegen units by size. This way we can schedule work for LLVM
-    // a bit more efficiently.
-    let codegen_units = {
-        let mut codegen_units = codegen_units;
-        codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate()));
-        codegen_units
-    };
-
-    let mut total_codegen_time = Duration::new(0, 0);
-    let mut all_stats = Stats::default();
-
-    for cgu in codegen_units.into_iter() {
-        backend.wait_for_signal_to_codegen_item(&ongoing_codegen);
-        backend.check_for_errors(&ongoing_codegen, tcx.sess);
-
-        let cgu_reuse = determine_cgu_reuse(tcx, &cgu);
-        tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
-
-        match cgu_reuse {
-            CguReuse::No => {
-                let _timing_guard = time_graph.as_ref().map(|time_graph| {
-                    time_graph.start(write::CODEGEN_WORKER_TIMELINE,
-                                     write::CODEGEN_WORK_PACKAGE_KIND,
-                                     &format!("codegen {}", cgu.name()))
-                });
-                let start_time = Instant::now();
-                let stats = backend.compile_codegen_unit(tcx, *cgu.name());
-                all_stats.extend(stats);
-                total_codegen_time += start_time.elapsed();
-                false
-            }
-            CguReuse::PreLto => {
-                write::submit_pre_lto_module_to_llvm(tcx, CachedModuleCodegen {
-                    name: cgu.name().to_string(),
-                    source: cgu.work_product(tcx),
-                });
-                true
-            }
-            CguReuse::PostLto => {
-                write::submit_post_lto_module_to_llvm(tcx, CachedModuleCodegen {
-                    name: cgu.name().to_string(),
-                    source: cgu.work_product(tcx),
-                });
-                true
-            }
-        };
-    }
-
-    backend.codegen_finished(&ongoing_codegen, tcx);
-
-    // Since the main thread is sometimes blocked during codegen, we keep track
-    // -Ztime-passes output manually.
-    print_time_passes_entry(tcx.sess.time_passes(),
-                            "codegen to LLVM IR",
-                            total_codegen_time);
-
-    rustc_incremental::assert_module_sources::assert_module_sources(tcx);
-
-    symbol_names_test::report_symbol_names(tcx);
-
-    if tcx.sess.codegen_stats() {
-        println!("--- codegen stats ---");
-        println!("n_glues_created: {}", all_stats.n_glues_created);
-        println!("n_null_glues: {}", all_stats.n_null_glues);
-        println!("n_real_glues: {}", all_stats.n_real_glues);
-
-        println!("n_fns: {}", all_stats.n_fns);
-        println!("n_inlines: {}", all_stats.n_inlines);
-        println!("n_closures: {}", all_stats.n_closures);
-        println!("fn stats:");
-        all_stats.fn_stats.sort_by_key(|&(_, insns)| insns);
-        for &(ref name, insns) in all_stats.fn_stats.iter() {
-            println!("{} insns, {}", insns, *name);
-        }
-    }
-
-    if tcx.sess.count_llvm_insns() {
-        for (k, v) in all_stats.llvm_insns.iter() {
-            println!("{:7} {}", *v, *k);
-        }
-    }
-
-    backend.check_for_errors(&ongoing_codegen, tcx.sess);
-
-    assert_and_save_dep_graph(tcx);
-    ongoing_codegen.into_inner()
-}
-
-/// A curious wrapper structure whose only purpose is to call `codegen_aborted`
-/// when it's dropped abnormally.
-///
-/// In the process of working on rust-lang/rust#55238 a mysterious segfault was
-/// stumbled upon. The segfault was never reproduced locally, but it was
-/// suspected to be related to the fact that codegen worker threads were
-/// sticking around by the time the main thread was exiting, causing issues.
-///
-/// This structure is an attempt to fix that issue where the `codegen_aborted`
-/// message will block until all workers have finished. This should ensure that
-/// even if the main codegen thread panics we'll wait for pending work to
-/// complete before returning from the main thread, hopefully avoiding
-/// segfaults.
-///
-/// If you see this comment in the code, then it means that this workaround
-/// worked! We may yet one day track down the mysterious cause of that
-/// segfault...
-struct AbortCodegenOnDrop<B: BackendMethods>(Option<B::OngoingCodegen>);
-
-impl<B: BackendMethods> AbortCodegenOnDrop<B> {
-    fn into_inner(mut self) -> B::OngoingCodegen {
-        self.0.take().unwrap()
-    }
-}
-
-impl<B: BackendMethods> Deref for AbortCodegenOnDrop<B> {
-    type Target = B::OngoingCodegen;
-
-    fn deref(&self) -> &B::OngoingCodegen {
-        self.0.as_ref().unwrap()
-    }
-}
-
-impl<B: BackendMethods> DerefMut for AbortCodegenOnDrop<B> {
-    fn deref_mut(&mut self) -> &mut B::OngoingCodegen {
-        self.0.as_mut().unwrap()
-    }
-}
-
-impl<B: BackendMethods> Drop for AbortCodegenOnDrop<B> {
-    fn drop(&mut self) {
-        if let Some(codegen) = self.0.take() {
-            B::codegen_aborted(codegen);
-        }
-    }
-}
-
-fn assert_and_save_dep_graph<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>) {
-    time(tcx.sess,
-         "assert dep graph",
-         || rustc_incremental::assert_dep_graph(tcx));
-
-    time(tcx.sess,
-         "serialize dep graph",
-         || rustc_incremental::save_dep_graph(tcx));
-}
-
-impl CrateInfo {
-    pub fn new(tcx: TyCtxt) -> CrateInfo {
-        let mut info = CrateInfo {
-            panic_runtime: None,
-            compiler_builtins: None,
-            profiler_runtime: None,
-            sanitizer_runtime: None,
-            is_no_builtins: Default::default(),
-            native_libraries: Default::default(),
-            used_libraries: tcx.native_libraries(LOCAL_CRATE),
-            link_args: tcx.link_args(LOCAL_CRATE),
-            crate_name: Default::default(),
-            used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic),
-            used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic),
-            used_crate_source: Default::default(),
-            wasm_imports: Default::default(),
-            lang_item_to_crate: Default::default(),
-            missing_lang_items: Default::default(),
-        };
-        let lang_items = tcx.lang_items();
-
-        let load_wasm_items = tcx.sess.crate_types.borrow()
-            .iter()
-            .any(|c| *c != config::CrateType::Rlib) &&
-            tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown";
-
-        if load_wasm_items {
-            info.load_wasm_imports(tcx, LOCAL_CRATE);
-        }
-
-        let crates = tcx.crates();
-
-        let n_crates = crates.len();
-        info.native_libraries.reserve(n_crates);
-        info.crate_name.reserve(n_crates);
-        info.used_crate_source.reserve(n_crates);
-        info.missing_lang_items.reserve(n_crates);
-
-        for &cnum in crates.iter() {
-            info.native_libraries.insert(cnum, tcx.native_libraries(cnum));
-            info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string());
-            info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum));
-            if tcx.is_panic_runtime(cnum) {
-                info.panic_runtime = Some(cnum);
-            }
-            if tcx.is_compiler_builtins(cnum) {
-                info.compiler_builtins = Some(cnum);
-            }
-            if tcx.is_profiler_runtime(cnum) {
-                info.profiler_runtime = Some(cnum);
-            }
-            if tcx.is_sanitizer_runtime(cnum) {
-                info.sanitizer_runtime = Some(cnum);
-            }
-            if tcx.is_no_builtins(cnum) {
-                info.is_no_builtins.insert(cnum);
-            }
-            if load_wasm_items {
-                info.load_wasm_imports(tcx, cnum);
-            }
-            let missing = tcx.missing_lang_items(cnum);
-            for &item in missing.iter() {
-                if let Ok(id) = lang_items.require(item) {
-                    info.lang_item_to_crate.insert(item, id.krate);
-                }
-            }
-
-            // No need to look for lang items that are whitelisted and don't
-            // actually need to exist.
-            let missing = missing.iter()
-                .cloned()
-                .filter(|&l| !weak_lang_items::whitelisted(tcx, l))
-                .collect();
-            info.missing_lang_items.insert(cnum, missing);
-        }
-
-        return info
-    }
-
-    fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) {
-        self.wasm_imports.extend(tcx.wasm_import_module_map(cnum).iter().map(|(&id, module)| {
-            let instance = Instance::mono(tcx, id);
-            let import_name = tcx.symbol_name(instance);
-
-            (import_name.to_string(), module.clone())
-        }));
-    }
-}
-
 pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>,
                                   cgu_name: InternedString)
                                   -> Stats {
@@ -1141,35 +255,15 @@ pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>,
     }
 }
 
-pub fn provide_both(providers: &mut Providers) {
-    providers.dllimport_foreign_items = |tcx, krate| {
-        let module_map = tcx.foreign_modules(krate);
-        let module_map = module_map.iter()
-            .map(|lib| (lib.def_id, lib))
-            .collect::<FxHashMap<_, _>>();
-
-        let dllimports = tcx.native_libraries(krate)
-            .iter()
-            .filter(|lib| {
-                if lib.kind != cstore::NativeLibraryKind::NativeUnknown {
-                    return false
-                }
-                let cfg = match lib.cfg {
-                    Some(ref cfg) => cfg,
-                    None => return true,
-                };
-                attr::cfg_matches(cfg, &tcx.sess.parse_sess, None)
-            })
-            .filter_map(|lib| lib.foreign_module)
-            .map(|id| &module_map[&id])
-            .flat_map(|module| module.foreign_items.iter().cloned())
-            .collect();
-        Lrc::new(dllimports)
-    };
-
-    providers.is_dllimport_foreign_item = |tcx, def_id| {
-        tcx.dllimport_foreign_items(def_id.krate).contains(&def_id)
+pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
+    let sect = match attrs.link_section {
+        Some(name) => name,
+        None => return,
     };
+    unsafe {
+        let buf = SmallCStr::new(&sect.as_str());
+        llvm::LLVMSetSection(llval, buf.as_ptr());
+    }
 }
 
 pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {
diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs
index efd2e146f35..40fddef8be3 100644
--- a/src/librustc_codegen_llvm/builder.rs
+++ b/src/librustc_codegen_llvm/builder.rs
@@ -53,14 +53,6 @@ fn noname() -> *const c_char {
     &CNULL
 }
 
-bitflags! {
-    pub struct MemFlags: u8 {
-        const VOLATILE = 1 << 0;
-        const NONTEMPORAL = 1 << 1;
-        const UNALIGNED = 1 << 2;
-    }
-}
-
 impl BackendTypes for Builder<'_, 'll, 'tcx> {
     type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
     type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs
index e049a43fd6a..d5b398d8a33 100644
--- a/src/librustc_codegen_llvm/callee.rs
+++ b/src/librustc_codegen_llvm/callee.rs
@@ -202,35 +202,3 @@ pub fn get_fn(
 
     llfn
 }
-
-pub fn resolve_and_get_fn<'tcx, Cx: CodegenMethods<'tcx>>(
-    cx: &Cx,
-    def_id: DefId,
-    substs: &'tcx Substs<'tcx>,
-) -> Cx::Value {
-    cx.get_fn(
-        ty::Instance::resolve(
-            cx.tcx(),
-            ty::ParamEnv::reveal_all(),
-            def_id,
-            substs
-        ).unwrap()
-    )
-}
-
-pub fn resolve_and_get_fn_for_vtable<'tcx,
-    Cx: Backend<'tcx> + MiscMethods<'tcx> + TypeMethods<'tcx>
->(
-    cx: &Cx,
-    def_id: DefId,
-    substs: &'tcx Substs<'tcx>,
-) -> Cx::Value {
-    cx.get_fn(
-        ty::Instance::resolve_for_vtable(
-            cx.tcx(),
-            ty::ParamEnv::reveal_all(),
-            def_id,
-            substs
-        ).unwrap()
-    )
-}
diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs
index 7dc4b00f794..2211ec5247f 100644
--- a/src/librustc_codegen_llvm/common.rs
+++ b/src/librustc_codegen_llvm/common.rs
@@ -405,88 +405,3 @@ pub fn struct_in_context(
 fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 {
     ((hi as u128) << 64) | (lo as u128)
 }
-
-pub fn langcall(tcx: TyCtxt,
-                span: Option<Span>,
-                msg: &str,
-                li: LangItem)
-                -> DefId {
-    tcx.lang_items().require(li).unwrap_or_else(|s| {
-        let msg = format!("{} {}", msg, s);
-        match span {
-            Some(span) => tcx.sess.span_fatal(span, &msg[..]),
-            None => tcx.sess.fatal(&msg[..]),
-        }
-    })
-}
-
-// To avoid UB from LLVM, these two functions mask RHS with an
-// appropriate mask unconditionally (i.e. the fallback behavior for
-// all shifts). For 32- and 64-bit types, this matches the semantics
-// of Java. (See related discussion on #1877 and #10183.)
-
-pub fn build_unchecked_lshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    lhs: Bx::Value,
-    rhs: Bx::Value
-) -> Bx::Value {
-    let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs);
-    // #1877, #10183: Ensure that input is always valid
-    let rhs = shift_mask_rhs(bx, rhs);
-    bx.shl(lhs, rhs)
-}
-
-pub fn build_unchecked_rshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    lhs_t: Ty<'tcx>,
-    lhs: Bx::Value,
-    rhs: Bx::Value
-) -> Bx::Value {
-    let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs);
-    // #1877, #10183: Ensure that input is always valid
-    let rhs = shift_mask_rhs(bx, rhs);
-    let is_signed = lhs_t.is_signed();
-    if is_signed {
-        bx.ashr(lhs, rhs)
-    } else {
-        bx.lshr(lhs, rhs)
-    }
-}
-
-fn shift_mask_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    rhs: Bx::Value
-) -> Bx::Value {
-    let rhs_llty = bx.cx().val_ty(rhs);
-    bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false))
-}
-
-pub fn shift_mask_val<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    llty: Bx::Type,
-    mask_llty: Bx::Type,
-    invert: bool
-) -> Bx::Value {
-    let kind = bx.cx().type_kind(llty);
-    match kind {
-        TypeKind::Integer => {
-            // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
-            let val = bx.cx().int_width(llty) - 1;
-            if invert {
-                bx.cx().const_int(mask_llty, !val as i64)
-            } else {
-                bx.cx().const_uint(mask_llty, val)
-            }
-        },
-        TypeKind::Vector => {
-            let mask = shift_mask_val(
-                bx,
-                bx.cx().element_type(llty),
-                bx.cx().element_type(mask_llty),
-                invert
-            );
-            bx.vector_splat(bx.cx().vector_length(mask_llty), mask)
-        },
-        _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
-    }
-}
diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs
index cbbda28994b..8a5a817f574 100644
--- a/src/librustc_codegen_llvm/consts.rs
+++ b/src/librustc_codegen_llvm/consts.rs
@@ -31,6 +31,58 @@ use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags};
 
 use std::ffi::{CStr, CString};
 
+pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
+    let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1);
+    let dl = cx.data_layout();
+    let pointer_size = dl.pointer_size.bytes() as usize;
+
+    let mut next_offset = 0;
+    for &(offset, ((), alloc_id)) in alloc.relocations.iter() {
+        let offset = offset.bytes();
+        assert_eq!(offset as usize as u64, offset);
+        let offset = offset as usize;
+        if offset > next_offset {
+            llvals.push(cx.const_bytes(&alloc.bytes[next_offset..offset]));
+        }
+        let ptr_offset = read_target_uint(
+            dl.endian,
+            &alloc.bytes[offset..(offset + pointer_size)],
+        ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
+        llvals.push(cx.scalar_to_backend(
+            Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
+            &layout::Scalar {
+                value: layout::Primitive::Pointer,
+                valid_range: 0..=!0
+            },
+            cx.type_i8p()
+        ));
+        next_offset = offset + pointer_size;
+    }
+    if alloc.bytes.len() >= next_offset {
+        llvals.push(cx.const_bytes(&alloc.bytes[next_offset ..]));
+    }
+
+    cx.const_struct(&llvals, true)
+}
+
+pub fn codegen_static_initializer(
+    cx: &CodegenCx<'ll, 'tcx>,
+    def_id: DefId,
+) -> Result<(&'ll Value, &'tcx Allocation), ErrorHandled> {
+    let instance = ty::Instance::mono(cx.tcx, def_id);
+    let cid = GlobalId {
+        instance,
+        promoted: None,
+    };
+    let param_env = ty::ParamEnv::reveal_all();
+    let static_ = cx.tcx.const_eval(param_env.and(cid))?;
+
+    let alloc = match static_.val {
+        ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc,
+        _ => bug!("static const eval returned {:#?}", static_),
+    };
+    Ok((const_alloc_to_llvm(cx, alloc), alloc))
+}
 
 fn set_global_alignment(cx: &CodegenCx<'ll, '_>,
                         gv: &'ll Value,
diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs
index b0d153f8efc..3eb9728e90c 100644
--- a/src/librustc_codegen_llvm/context.rs
+++ b/src/librustc_codegen_llvm/context.rs
@@ -10,6 +10,7 @@
 
 use attributes;
 use llvm;
+use llvm_util;
 use rustc::dep_graph::DepGraphSafe;
 use rustc::hir;
 use debuginfo;
@@ -445,6 +446,9 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         attributes::apply_target_cpu_attr(self, llfn)
     }
 
+    fn closure_env_needs_indirect_debuginfo(&self) {
+        llvm_util::get_major_version() < 6
+    }
 
     fn create_used_variable(&self) {
         let name = const_cstr!("llvm.used");
diff --git a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs
index 0fd5f7fb8cd..8eb266f8069 100644
--- a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs
+++ b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs
@@ -26,21 +26,6 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec};
 
 use syntax_pos::BytePos;
 
-#[derive(Clone, Copy, Debug)]
-pub struct MirDebugScope<D> {
-    pub scope_metadata: Option<D>,
-    // Start and end offsets of the file to which this DIScope belongs.
-    // These are used to quickly determine whether some span refers to the same file.
-    pub file_start_pos: BytePos,
-    pub file_end_pos: BytePos,
-}
-
-impl<D> MirDebugScope<D> {
-    pub fn is_valid(&self) -> bool {
-        self.scope_metadata.is_some()
-    }
-}
-
 /// Produce DIScope DIEs for each MIR Scope which has variables defined in it.
 /// If debuginfo is disabled, the returned vector is empty.
 pub fn create_mir_scopes(
diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs
index 8ef7350747d..ddd346e99d5 100644
--- a/src/librustc_codegen_llvm/debuginfo/mod.rs
+++ b/src/librustc_codegen_llvm/debuginfo/mod.rs
@@ -111,54 +111,6 @@ impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> {
     }
 }
 
-pub enum FunctionDebugContext<D> {
-    RegularContext(FunctionDebugContextData<D>),
-    DebugInfoDisabled,
-    FunctionWithoutDebugInfo,
-}
-
-impl<D> FunctionDebugContext<D> {
-    pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData<D> {
-        match *self {
-            FunctionDebugContext::RegularContext(ref data) => data,
-            FunctionDebugContext::DebugInfoDisabled => {
-                span_bug!(span, "{}", Self::debuginfo_disabled_message());
-            }
-            FunctionDebugContext::FunctionWithoutDebugInfo => {
-                span_bug!(span, "{}", Self::should_be_ignored_message());
-            }
-        }
-    }
-
-    fn debuginfo_disabled_message() -> &'static str {
-        "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!"
-    }
-
-    fn should_be_ignored_message() -> &'static str {
-        "debuginfo: Error trying to access FunctionDebugContext for function that should be \
-         ignored by debug info!"
-    }
-}
-
-pub struct FunctionDebugContextData<D> {
-    fn_metadata: D,
-    source_locations_enabled: Cell<bool>,
-    pub defining_crate: CrateNum,
-}
-
-pub enum VariableAccess<'a, V> {
-    // The llptr given is an alloca containing the variable's value
-    DirectVariable { alloca: V },
-    // The llptr given is an alloca containing the start of some pointer chain
-    // leading to the variable's content.
-    IndirectVariable { alloca: V, address_operations: &'a [i64] }
-}
-
-pub enum VariableKind {
-    ArgumentVariable(usize /*index*/),
-    LocalVariable,
-}
-
 /// Create any deferred debug metadata nodes
 pub fn finalize(cx: &CodegenCx) {
     if cx.dbg_cx.is_none() {
@@ -578,15 +530,24 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     }
 
     fn extend_scope_to_file(
-        &self,
-        scope_metadata: &'ll DIScope,
-        file: &syntax_pos::SourceFile,
-        defining_crate: CrateNum,
-    ) -> &'ll DILexicalBlock {
-        metadata::extend_scope_to_file(&self, scope_metadata, file, defining_crate)
-    }
+         &self,
+         scope_metadata: &'ll DIScope,
+         file: &syntax_pos::SourceFile,
+         defining_crate: CrateNum,
+     ) -> &'ll DILexicalBlock {
+         metadata::extend_scope_to_file(&self, scope_metadata, file, defining_crate)
+     }
 
     fn debuginfo_finalize(&self) {
         finalize(self)
     }
+
+    fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> &[i64] {
+        unsafe {
+            [llvm::LLVMRustDIBuilderCreateOpDeref(),
+             llvm::LLVMRustDIBuilderCreateOpPlusUconst(),
+             byte_offset_of_var_in_env as i64,
+             llvm::LLVMRustDIBuilderCreateOpDeref()]
+        };
+    }
 }
diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs
index 514649290e2..a4fe912d1d7 100644
--- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs
+++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs
@@ -50,18 +50,6 @@ pub fn set_source_location<D>(
     set_debug_location(bx, dbg_loc);
 }
 
-/// Enables emitting source locations for the given functions.
-///
-/// Since we don't want source locations to be emitted for the function prelude,
-/// they are disabled when beginning to codegen a new function. This functions
-/// switches source location emitting on and must therefore be called before the
-/// first real statement/expression of the function is codegened.
-pub fn start_emitting_source_locations<D>(dbg_context: &FunctionDebugContext<D>) {
-    if let FunctionDebugContext::RegularContext(ref data) = *dbg_context {
-        data.source_locations_enabled.set(true);
-    }
-}
-
 
 #[derive(Copy, Clone, PartialEq)]
 pub enum InternalDebugLocation<'ll> {
diff --git a/src/librustc_codegen_llvm/diagnostics.rs b/src/librustc_codegen_llvm/diagnostics.rs
index 5721938c9c0..94776f17c79 100644
--- a/src/librustc_codegen_llvm/diagnostics.rs
+++ b/src/librustc_codegen_llvm/diagnostics.rs
@@ -47,37 +47,4 @@ unsafe { simd_add(i32x2(0, 0), i32x2(1, 2)); } // ok!
 ```
 "##,
 
-E0668: r##"
-Malformed inline assembly rejected by LLVM.
-
-LLVM checks the validity of the constraints and the assembly string passed to
-it. This error implies that LLVM seems something wrong with the inline
-assembly call.
-
-In particular, it can happen if you forgot the closing bracket of a register
-constraint (see issue #51430):
-```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail)
-#![feature(asm)]
-
-fn main() {
-    let rax: u64;
-    unsafe {
-        asm!("" :"={rax"(rax));
-        println!("Accumulator is: {}", rax);
-    }
-}
-```
-"##,
-
-E0669: r##"
-Cannot convert inline assembly operand to a single LLVM value.
-
-This error usually happens when trying to pass in a value to an input inline
-assembly operand that is actually a pair of values. In particular, this can
-happen when trying to pass in a slice, for instance a `&str`. In Rust, these
-values are represented internally as a pair of values, the pointer and its
-length. When passed as an input operand, this pair of values can not be
-coerced into a register and thus we must fail with an error.
-"##,
-
 }
diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs
deleted file mode 100644
index 5e1a03031dd..00000000000
--- a/src/librustc_codegen_llvm/glue.rs
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//!
-//
-// Code relating to drop glue.
-
-use std;
-
-use rustc_codegen_ssa::common::IntPredicate;
-use meth;
-use rustc::ty::layout::LayoutOf;
-use rustc::ty::{self, Ty};
-use interfaces::*;
-
-pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    t: Ty<'tcx>,
-    info: Option<Bx::Value>
-) -> (Bx::Value, Bx::Value) {
-    debug!("calculate size of DST: {}; with lost info: {:?}",
-           t, info);
-    if bx.cx().type_is_sized(t) {
-        let (size, align) = bx.cx().layout_of(t).size_and_align();
-        debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}",
-               t, info, size, align);
-        let size = bx.cx().const_usize(size.bytes());
-        let align = bx.cx().const_usize(align.abi());
-        return (size, align);
-    }
-    match t.sty {
-        ty::Dynamic(..) => {
-            // load size/align from vtable
-            let vtable = info.unwrap();
-            (meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable))
-        }
-        ty::Slice(_) | ty::Str => {
-            let unit = t.sequence_element_type(bx.tcx());
-            // The info in this case is the length of the str, so the size is that
-            // times the unit size.
-            let (size, align) = bx.cx().layout_of(unit).size_and_align();
-            (bx.mul(info.unwrap(), bx.cx().const_usize(size.bytes())),
-             bx.cx().const_usize(align.abi()))
-        }
-        _ => {
-            let cx = bx.cx();
-            // First get the size of all statically known fields.
-            // Don't use size_of because it also rounds up to alignment, which we
-            // want to avoid, as the unsized field's alignment could be smaller.
-            assert!(!t.is_simd());
-            let layout = cx.layout_of(t);
-            debug!("DST {} layout: {:?}", t, layout);
-
-            let i = layout.fields.count() - 1;
-            let sized_size = layout.fields.offset(i).bytes();
-            let sized_align = layout.align.abi();
-            debug!("DST {} statically sized prefix size: {} align: {}",
-                   t, sized_size, sized_align);
-            let sized_size = cx.const_usize(sized_size);
-            let sized_align = cx.const_usize(sized_align);
-
-            // Recurse to get the size of the dynamically sized field (must be
-            // the last field).
-            let field_ty = layout.field(cx, i).ty;
-            let (unsized_size, mut unsized_align) = size_and_align_of_dst(bx, field_ty, info);
-
-            // FIXME (#26403, #27023): We should be adding padding
-            // to `sized_size` (to accommodate the `unsized_align`
-            // required of the unsized field that follows) before
-            // summing it with `sized_size`. (Note that since #26403
-            // is unfixed, we do not yet add the necessary padding
-            // here. But this is where the add would go.)
-
-            // Return the sum of sizes and max of aligns.
-            let size = bx.add(sized_size, unsized_size);
-
-            // Packed types ignore the alignment of their fields.
-            if let ty::Adt(def, _) = t.sty {
-                if def.repr.packed() {
-                    unsized_align = sized_align;
-                }
-            }
-
-            // Choose max of two known alignments (combined value must
-            // be aligned according to more restrictive of the two).
-            let align = match (bx.cx().const_to_opt_u128(sized_align, false),
-                               bx.cx().const_to_opt_u128(unsized_align, false)) {
-                (Some(sized_align), Some(unsized_align)) => {
-                    // If both alignments are constant, (the sized_align should always be), then
-                    // pick the correct alignment statically.
-                    cx.const_usize(std::cmp::max(sized_align, unsized_align) as u64)
-                }
-                _ => bx.select(bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align),
-                               sized_align,
-                               unsized_align)
-            };
-
-            // Issue #27023: must add any necessary padding to `size`
-            // (to make it a multiple of `align`) before returning it.
-            //
-            // Namely, the returned size should be, in C notation:
-            //
-            //   `size + ((size & (align-1)) ? align : 0)`
-            //
-            // emulated via the semi-standard fast bit trick:
-            //
-            //   `(size + (align-1)) & -align`
-
-            let addend = bx.sub(align, bx.cx().const_usize(1));
-            let size = bx.and(bx.add(size, addend), bx.neg(align));
-
-            (size, align)
-        }
-    }
-}
diff --git a/src/librustc_codegen_llvm/interfaces/abi.rs b/src/librustc_codegen_llvm/interfaces/abi.rs
deleted file mode 100644
index 528599e9690..00000000000
--- a/src/librustc_codegen_llvm/interfaces/abi.rs
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::HasCodegen;
-use abi::FnType;
-use rustc::ty::{FnSig, Instance, Ty};
-
-pub trait AbiMethods<'tcx> {
-    fn new_fn_type(&self, sig: FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>>;
-    fn new_vtable(&self, sig: FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>>;
-    fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>>;
-}
-
-pub trait AbiBuilderMethods<'tcx>: HasCodegen<'tcx> {
-    fn apply_attrs_callsite(&self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value);
-}
diff --git a/src/librustc_codegen_llvm/interfaces/asm.rs b/src/librustc_codegen_llvm/interfaces/asm.rs
deleted file mode 100644
index ffe9679fcd6..00000000000
--- a/src/librustc_codegen_llvm/interfaces/asm.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::Backend;
-use super::HasCodegen;
-use mir::place::PlaceRef;
-use rustc::hir::{GlobalAsm, InlineAsm};
-
-pub trait AsmBuilderMethods<'tcx>: HasCodegen<'tcx> {
-    // Take an inline assembly expression and splat it out via LLVM
-    fn codegen_inline_asm(
-        &self,
-        ia: &InlineAsm,
-        outputs: Vec<PlaceRef<'tcx, Self::Value>>,
-        inputs: Vec<Self::Value>,
-    ) -> bool;
-}
-
-pub trait AsmMethods<'tcx>: Backend<'tcx> {
-    fn codegen_global_asm(&self, ga: &GlobalAsm);
-}
diff --git a/src/librustc_codegen_llvm/interfaces/builder.rs b/src/librustc_codegen_llvm/interfaces/builder.rs
deleted file mode 100644
index 2e1abb12e16..00000000000
--- a/src/librustc_codegen_llvm/interfaces/builder.rs
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::abi::AbiBuilderMethods;
-use super::asm::AsmBuilderMethods;
-use super::debuginfo::DebugInfoBuilderMethods;
-use super::intrinsic::IntrinsicCallMethods;
-use super::type_::ArgTypeMethods;
-use super::HasCodegen;
-use builder::MemFlags;
-use libc::c_char;
-use mir::operand::OperandRef;
-use mir::place::PlaceRef;
-use rustc::ty::layout::{Align, Size};
-use rustc_codegen_ssa::common::{
-    AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope,
-};
-
-use std::borrow::Cow;
-use std::ops::Range;
-use syntax::ast::AsmDialect;
-
-pub trait BuilderMethods<'a, 'tcx: 'a>:
-    HasCodegen<'tcx>
-    + DebugInfoBuilderMethods<'tcx>
-    + ArgTypeMethods<'tcx>
-    + AbiBuilderMethods<'tcx>
-    + IntrinsicCallMethods<'tcx>
-    + AsmBuilderMethods<'tcx>
-{
-    fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self;
-    fn with_cx(cx: &'a Self::CodegenCx) -> Self;
-    fn build_sibling_block<'b>(&self, name: &'b str) -> Self;
-    fn cx(&self) -> &Self::CodegenCx;
-    fn llfn(&self) -> Self::Value;
-    fn llbb(&self) -> Self::BasicBlock;
-    fn count_insn(&self, category: &str);
-
-    fn set_value_name(&self, value: Self::Value, name: &str);
-    fn position_at_end(&self, llbb: Self::BasicBlock);
-    fn position_at_start(&self, llbb: Self::BasicBlock);
-    fn ret_void(&self);
-    fn ret(&self, v: Self::Value);
-    fn br(&self, dest: Self::BasicBlock);
-    fn cond_br(&self, cond: Self::Value, then_llbb: Self::BasicBlock, else_llbb: Self::BasicBlock);
-    fn switch(&self, v: Self::Value, else_llbb: Self::BasicBlock, num_cases: usize) -> Self::Value;
-    fn invoke(
-        &self,
-        llfn: Self::Value,
-        args: &[Self::Value],
-        then: Self::BasicBlock,
-        catch: Self::BasicBlock,
-        funclet: Option<&Self::Funclet>,
-    ) -> Self::Value;
-    fn unreachable(&self);
-    fn add(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fadd(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fadd_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn sub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fsub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fsub_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn mul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fmul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fmul_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn udiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn exactudiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn sdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn exactsdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fdiv_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn urem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn srem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn frem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn frem_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn shl(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn lshr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn ashr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn and(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn or(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn xor(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn neg(&self, v: Self::Value) -> Self::Value;
-    fn fneg(&self, v: Self::Value) -> Self::Value;
-    fn not(&self, v: Self::Value) -> Self::Value;
-
-    fn alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
-    fn dynamic_alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
-    fn array_alloca(
-        &self,
-        ty: Self::Type,
-        len: Self::Value,
-        name: &str,
-        align: Align,
-    ) -> Self::Value;
-
-    fn load(&self, ptr: Self::Value, align: Align) -> Self::Value;
-    fn volatile_load(&self, ptr: Self::Value) -> Self::Value;
-    fn atomic_load(&self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value;
-    fn load_operand(&self, place: PlaceRef<'tcx, Self::Value>) -> OperandRef<'tcx, Self::Value>;
-
-    fn range_metadata(&self, load: Self::Value, range: Range<u128>);
-    fn nonnull_metadata(&self, load: Self::Value);
-
-    fn store(&self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
-    fn store_with_flags(
-        &self,
-        val: Self::Value,
-        ptr: Self::Value,
-        align: Align,
-        flags: MemFlags,
-    ) -> Self::Value;
-    fn atomic_store(&self, val: Self::Value, ptr: Self::Value, order: AtomicOrdering, size: Size);
-
-    fn gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
-    fn inbounds_gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
-    fn struct_gep(&self, ptr: Self::Value, idx: u64) -> Self::Value;
-
-    fn trunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn sext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn fptoui(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn fptosi(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn uitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn sitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn fptrunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn fpext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn ptrtoint(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn inttoptr(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn bitcast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn intcast(&self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
-    fn pointercast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-
-    fn icmp(&self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fcmp(&self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-
-    fn empty_phi(&self, ty: Self::Type) -> Self::Value;
-    fn phi(&self, ty: Self::Type, vals: &[Self::Value], bbs: &[Self::BasicBlock]) -> Self::Value;
-    fn inline_asm_call(
-        &self,
-        asm: *const c_char,
-        cons: *const c_char,
-        inputs: &[Self::Value],
-        output: Self::Type,
-        volatile: bool,
-        alignstack: bool,
-        dia: AsmDialect,
-    ) -> Option<Self::Value>;
-
-    fn memcpy(
-        &self,
-        dst: Self::Value,
-        dst_align: Align,
-        src: Self::Value,
-        src_align: Align,
-        size: Self::Value,
-        flags: MemFlags,
-    );
-    fn memmove(
-        &self,
-        dst: Self::Value,
-        dst_align: Align,
-        src: Self::Value,
-        src_align: Align,
-        size: Self::Value,
-        flags: MemFlags,
-    );
-    fn memset(
-        &self,
-        ptr: Self::Value,
-        fill_byte: Self::Value,
-        size: Self::Value,
-        align: Align,
-        flags: MemFlags,
-    );
-
-    fn minnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn maxnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn select(
-        &self,
-        cond: Self::Value,
-        then_val: Self::Value,
-        else_val: Self::Value,
-    ) -> Self::Value;
-
-    fn va_arg(&self, list: Self::Value, ty: Self::Type) -> Self::Value;
-    fn extract_element(&self, vec: Self::Value, idx: Self::Value) -> Self::Value;
-    fn insert_element(&self, vec: Self::Value, elt: Self::Value, idx: Self::Value) -> Self::Value;
-    fn shuffle_vector(&self, v1: Self::Value, v2: Self::Value, mask: Self::Value) -> Self::Value;
-    fn vector_splat(&self, num_elts: usize, elt: Self::Value) -> Self::Value;
-    fn vector_reduce_fadd_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value;
-    fn vector_reduce_fmul_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value;
-    fn vector_reduce_add(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_mul(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_and(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_or(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_xor(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_fmin(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_fmax(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_fmin_fast(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_fmax_fast(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_min(&self, src: Self::Value, is_signed: bool) -> Self::Value;
-    fn vector_reduce_max(&self, src: Self::Value, is_signed: bool) -> Self::Value;
-    fn extract_value(&self, agg_val: Self::Value, idx: u64) -> Self::Value;
-    fn insert_value(&self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
-
-    fn landing_pad(&self, ty: Self::Type, pers_fn: Self::Value, num_clauses: usize) -> Self::Value;
-    fn add_clause(&self, landing_pad: Self::Value, clause: Self::Value);
-    fn set_cleanup(&self, landing_pad: Self::Value);
-    fn resume(&self, exn: Self::Value) -> Self::Value;
-    fn cleanup_pad(&self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
-    fn cleanup_ret(&self, funclet: &Self::Funclet, unwind: Option<Self::BasicBlock>)
-        -> Self::Value;
-    fn catch_pad(&self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
-    fn catch_ret(&self, funclet: &Self::Funclet, unwind: Self::BasicBlock) -> Self::Value;
-    fn catch_switch(
-        &self,
-        parent: Option<Self::Value>,
-        unwind: Option<Self::BasicBlock>,
-        num_handlers: usize,
-    ) -> Self::Value;
-    fn add_handler(&self, catch_switch: Self::Value, handler: Self::BasicBlock);
-    fn set_personality_fn(&self, personality: Self::Value);
-
-    fn atomic_cmpxchg(
-        &self,
-        dst: Self::Value,
-        cmp: Self::Value,
-        src: Self::Value,
-        order: AtomicOrdering,
-        failure_order: AtomicOrdering,
-        weak: bool,
-    ) -> Self::Value;
-    fn atomic_rmw(
-        &self,
-        op: AtomicRmwBinOp,
-        dst: Self::Value,
-        src: Self::Value,
-        order: AtomicOrdering,
-    ) -> Self::Value;
-    fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope);
-    fn add_case(&self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock);
-    fn add_incoming_to_phi(&self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock);
-    fn set_invariant_load(&self, load: Self::Value);
-
-    /// Returns the ptr value that should be used for storing `val`.
-    fn check_store(&self, val: Self::Value, ptr: Self::Value) -> Self::Value;
-
-    /// Returns the args that should be used for a call to `llfn`.
-    fn check_call<'b>(
-        &self,
-        typ: &str,
-        llfn: Self::Value,
-        args: &'b [Self::Value],
-    ) -> Cow<'b, [Self::Value]>
-    where
-        [Self::Value]: ToOwned;
-    fn lifetime_start(&self, ptr: Self::Value, size: Size);
-    fn lifetime_end(&self, ptr: Self::Value, size: Size);
-
-    /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
-    /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
-    /// and the intrinsic for `lt` and passes them to `emit`, which is in
-    /// charge of generating code to call the passed intrinsic on whatever
-    /// block of generated code is targeted for the intrinsic.
-    ///
-    /// If LLVM lifetime intrinsic support is disabled (i.e.  optimizations
-    /// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
-    fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: Self::Value, size: Size);
-
-    fn call(
-        &self,
-        llfn: Self::Value,
-        args: &[Self::Value],
-        funclet: Option<&Self::Funclet>,
-    ) -> Self::Value;
-    fn zext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-
-    fn delete_basic_block(&self, bb: Self::BasicBlock);
-    fn do_not_inline(&self, llret: Self::Value);
-}
diff --git a/src/librustc_codegen_llvm/interfaces/consts.rs b/src/librustc_codegen_llvm/interfaces/consts.rs
deleted file mode 100644
index c0a54452195..00000000000
--- a/src/librustc_codegen_llvm/interfaces/consts.rs
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::Backend;
-use mir::place::PlaceRef;
-use rustc::mir::interpret::Allocation;
-use rustc::mir::interpret::Scalar;
-use rustc::ty::layout;
-use syntax::symbol::LocalInternedString;
-
-pub trait ConstMethods<'tcx>: Backend<'tcx> {
-    // Constant constructors
-
-    fn const_null(&self, t: Self::Type) -> Self::Value;
-    fn const_undef(&self, t: Self::Type) -> Self::Value;
-    fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
-    fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
-    fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value;
-    fn const_bool(&self, val: bool) -> Self::Value;
-    fn const_i32(&self, i: i32) -> Self::Value;
-    fn const_u32(&self, i: u32) -> Self::Value;
-    fn const_u64(&self, i: u64) -> Self::Value;
-    fn const_usize(&self, i: u64) -> Self::Value;
-    fn const_u8(&self, i: u8) -> Self::Value;
-
-    // This is a 'c-like' raw string, which differs from
-    // our boxed-and-length-annotated strings.
-    fn const_cstr(&self, s: LocalInternedString, null_terminated: bool) -> Self::Value;
-
-    fn const_str_slice(&self, s: LocalInternedString) -> Self::Value;
-    fn const_fat_ptr(&self, ptr: Self::Value, meta: Self::Value) -> Self::Value;
-    fn const_struct(&self, elts: &[Self::Value], packed: bool) -> Self::Value;
-    fn const_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value;
-    fn const_vector(&self, elts: &[Self::Value]) -> Self::Value;
-    fn const_bytes(&self, bytes: &[u8]) -> Self::Value;
-
-    fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value;
-    fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>;
-    fn const_to_uint(&self, v: Self::Value) -> u64;
-    fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option<u128>;
-
-    fn is_const_integral(&self, v: Self::Value) -> bool;
-    fn is_const_real(&self, v: Self::Value) -> bool;
-
-    fn scalar_to_backend(
-        &self,
-        cv: Scalar,
-        layout: &layout::Scalar,
-        llty: Self::Type,
-    ) -> Self::Value;
-    fn from_const_alloc(
-        &self,
-        layout: layout::TyLayout<'tcx>,
-        alloc: &Allocation,
-        offset: layout::Size,
-    ) -> PlaceRef<'tcx, Self::Value>;
-}
diff --git a/src/librustc_codegen_llvm/interfaces/debuginfo.rs b/src/librustc_codegen_llvm/interfaces/debuginfo.rs
deleted file mode 100644
index 24f6cb85c7b..00000000000
--- a/src/librustc_codegen_llvm/interfaces/debuginfo.rs
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::Backend;
-use super::HasCodegen;
-use debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, VariableKind};
-use monomorphize::Instance;
-use rustc::hir::def_id::CrateNum;
-use rustc::mir;
-use rustc::ty::{self, Ty};
-use rustc_data_structures::indexed_vec::IndexVec;
-use syntax::ast::Name;
-use syntax_pos::{SourceFile, Span};
-
-pub trait DebugInfoMethods<'tcx>: Backend<'tcx> {
-    fn create_vtable_metadata(&self, ty: Ty<'tcx>, vtable: Self::Value);
-
-    /// Creates the function-specific debug context.
-    ///
-    /// Returns the FunctionDebugContext for the function which holds state needed
-    /// for debug info creation. The function may also return another variant of the
-    /// FunctionDebugContext enum which indicates why no debuginfo should be created
-    /// for the function.
-    fn create_function_debug_context(
-        &self,
-        instance: Instance<'tcx>,
-        sig: ty::FnSig<'tcx>,
-        llfn: Self::Value,
-        mir: &mir::Mir,
-    ) -> FunctionDebugContext<Self::DIScope>;
-
-    fn create_mir_scopes(
-        &self,
-        mir: &mir::Mir,
-        debug_context: &FunctionDebugContext<Self::DIScope>,
-    ) -> IndexVec<mir::SourceScope, MirDebugScope<Self::DIScope>>;
-    fn extend_scope_to_file(
-        &self,
-        scope_metadata: Self::DIScope,
-        file: &SourceFile,
-        defining_crate: CrateNum,
-    ) -> Self::DIScope;
-    fn debuginfo_finalize(&self);
-}
-
-pub trait DebugInfoBuilderMethods<'tcx>: HasCodegen<'tcx> {
-    fn declare_local(
-        &self,
-        dbg_context: &FunctionDebugContext<Self::DIScope>,
-        variable_name: Name,
-        variable_type: Ty<'tcx>,
-        scope_metadata: Self::DIScope,
-        variable_access: VariableAccess<'_, Self::Value>,
-        variable_kind: VariableKind,
-        span: Span,
-    );
-    fn set_source_location(
-        &self,
-        debug_context: &FunctionDebugContext<Self::DIScope>,
-        scope: Option<Self::DIScope>,
-        span: Span,
-    );
-    fn insert_reference_to_gdb_debug_scripts_section_global(&self);
-}
diff --git a/src/librustc_codegen_llvm/interfaces/intrinsic.rs b/src/librustc_codegen_llvm/interfaces/intrinsic.rs
deleted file mode 100644
index 1ea377b5a1d..00000000000
--- a/src/librustc_codegen_llvm/interfaces/intrinsic.rs
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::Backend;
-use super::HasCodegen;
-use abi::FnType;
-use mir::operand::OperandRef;
-use rustc::ty::Ty;
-use syntax_pos::Span;
-
-pub trait IntrinsicCallMethods<'tcx>: HasCodegen<'tcx> {
-    /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
-    /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
-    /// add them to librustc_codegen_llvm/context.rs
-    fn codegen_intrinsic_call(
-        &self,
-        callee_ty: Ty<'tcx>,
-        fn_ty: &FnType<'tcx, Ty<'tcx>>,
-        args: &[OperandRef<'tcx, Self::Value>],
-        llresult: Self::Value,
-        span: Span,
-    );
-}
-
-pub trait IntrinsicDeclarationMethods<'tcx>: Backend<'tcx> {
-    fn get_intrinsic(&self, key: &str) -> Self::Value;
-
-    /// Declare any llvm intrinsics that you might need
-    fn declare_intrinsic(&self, key: &str) -> Option<Self::Value>;
-}
diff --git a/src/librustc_codegen_llvm/interfaces/mod.rs b/src/librustc_codegen_llvm/interfaces/mod.rs
deleted file mode 100644
index 5fff0567585..00000000000
--- a/src/librustc_codegen_llvm/interfaces/mod.rs
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-mod abi;
-mod asm;
-mod builder;
-mod consts;
-mod debuginfo;
-mod intrinsic;
-mod type_;
-
-pub use self::abi::{AbiBuilderMethods, AbiMethods};
-pub use self::asm::{AsmBuilderMethods, AsmMethods};
-pub use self::builder::BuilderMethods;
-pub use self::consts::ConstMethods;
-pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods};
-pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods};
-pub use self::type_::{
-    ArgTypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods,
-};
-pub use rustc_codegen_ssa::interfaces::{
-    Backend, BackendMethods, BackendTypes, CodegenObject, DeclareMethods, MiscMethods,
-    PreDefineMethods, StaticMethods,
-};
-
-pub trait CodegenMethods<'tcx>:
-    Backend<'tcx>
-    + TypeMethods<'tcx>
-    + MiscMethods<'tcx>
-    + ConstMethods<'tcx>
-    + StaticMethods<'tcx>
-    + DebugInfoMethods<'tcx>
-    + AbiMethods<'tcx>
-    + IntrinsicDeclarationMethods<'tcx>
-    + DeclareMethods<'tcx>
-    + AsmMethods<'tcx>
-    + PreDefineMethods<'tcx>
-{
-}
-
-impl<'tcx, T> CodegenMethods<'tcx> for T where
-    Self: Backend<'tcx>
-        + TypeMethods<'tcx>
-        + MiscMethods<'tcx>
-        + ConstMethods<'tcx>
-        + StaticMethods<'tcx>
-        + DebugInfoMethods<'tcx>
-        + AbiMethods<'tcx>
-        + IntrinsicDeclarationMethods<'tcx>
-        + DeclareMethods<'tcx>
-        + AsmMethods<'tcx>
-        + PreDefineMethods<'tcx>
-{}
-
-pub trait HasCodegen<'tcx>: Backend<'tcx> {
-    type CodegenCx: CodegenMethods<'tcx>
-        + BackendTypes<
-            Value = Self::Value,
-            BasicBlock = Self::BasicBlock,
-            Type = Self::Type,
-            Context = Self::Context,
-            Funclet = Self::Funclet,
-            DIScope = Self::DIScope,
-        >;
-}
diff --git a/src/librustc_codegen_llvm/interfaces/type_.rs b/src/librustc_codegen_llvm/interfaces/type_.rs
deleted file mode 100644
index fe4b7a0b852..00000000000
--- a/src/librustc_codegen_llvm/interfaces/type_.rs
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::Backend;
-use super::HasCodegen;
-use mir::place::PlaceRef;
-use rustc::ty::layout::TyLayout;
-use rustc::ty::layout::{self, Align, Size};
-use rustc::ty::Ty;
-use rustc::util::nodemap::FxHashMap;
-use rustc_codegen_ssa::common::TypeKind;
-use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg};
-use std::cell::RefCell;
-use syntax::ast;
-
-pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
-    fn type_void(&self) -> Self::Type;
-    fn type_metadata(&self) -> Self::Type;
-    fn type_i1(&self) -> Self::Type;
-    fn type_i8(&self) -> Self::Type;
-    fn type_i16(&self) -> Self::Type;
-    fn type_i32(&self) -> Self::Type;
-    fn type_i64(&self) -> Self::Type;
-    fn type_i128(&self) -> Self::Type;
-
-    // Creates an integer type with the given number of bits, e.g. i24
-    fn type_ix(&self, num_bits: u64) -> Self::Type;
-
-    fn type_f32(&self) -> Self::Type;
-    fn type_f64(&self) -> Self::Type;
-    fn type_x86_mmx(&self) -> Self::Type;
-
-    fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
-    fn type_variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
-    fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type;
-    fn type_named_struct(&self, name: &str) -> Self::Type;
-    fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type;
-    fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type;
-    fn type_kind(&self, ty: Self::Type) -> TypeKind;
-    fn set_struct_body(&self, ty: Self::Type, els: &[Self::Type], packed: bool);
-    fn type_ptr_to(&self, ty: Self::Type) -> Self::Type;
-    fn element_type(&self, ty: Self::Type) -> Self::Type;
-
-    /// Return the number of elements in `self` if it is a LLVM vector type.
-    fn vector_length(&self, ty: Self::Type) -> usize;
-
-    fn func_params_types(&self, ty: Self::Type) -> Vec<Self::Type>;
-    fn float_width(&self, ty: Self::Type) -> usize;
-
-    /// Retrieve the bit width of the integer type `self`.
-    fn int_width(&self, ty: Self::Type) -> u64;
-
-    fn val_ty(&self, v: Self::Value) -> Self::Type;
-    fn scalar_lltypes(&self) -> &RefCell<FxHashMap<Ty<'tcx>, Self::Type>>;
-}
-
-pub trait DerivedTypeMethods<'tcx>: Backend<'tcx> {
-    fn type_bool(&self) -> Self::Type;
-    fn type_i8p(&self) -> Self::Type;
-    fn type_isize(&self) -> Self::Type;
-    fn type_int(&self) -> Self::Type;
-    fn type_int_from_ty(&self, t: ast::IntTy) -> Self::Type;
-    fn type_uint_from_ty(&self, t: ast::UintTy) -> Self::Type;
-    fn type_float_from_ty(&self, t: ast::FloatTy) -> Self::Type;
-    fn type_from_integer(&self, i: layout::Integer) -> Self::Type;
-
-    /// Return a LLVM type that has at most the required alignment,
-    /// as a conservative approximation for unknown pointee types.
-    fn type_pointee_for_abi_align(&self, align: Align) -> Self::Type;
-
-    /// Return a LLVM type that has at most the required alignment,
-    /// and exactly the required size, as a best-effort padding array.
-    fn type_padding_filler(&self, size: Size, align: Align) -> Self::Type;
-
-    fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool;
-    fn type_is_sized(&self, ty: Ty<'tcx>) -> bool;
-    fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool;
-    fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool;
-}
-
-pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
-    fn backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type;
-    fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type;
-    fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type;
-    fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type;
-    fn reg_backend_type(&self, ty: &Reg) -> Self::Type;
-    fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type;
-    fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool;
-    fn scalar_pair_element_backend_type<'a>(
-        &self,
-        layout: TyLayout<'tcx>,
-        index: usize,
-        immediate: bool,
-    ) -> Self::Type;
-}
-
-pub trait ArgTypeMethods<'tcx>: HasCodegen<'tcx> {
-    fn store_fn_arg(
-        &self,
-        ty: &ArgType<'tcx, Ty<'tcx>>,
-        idx: &mut usize,
-        dst: PlaceRef<'tcx, Self::Value>,
-    );
-    fn store_arg_ty(
-        &self,
-        ty: &ArgType<'tcx, Ty<'tcx>>,
-        val: Self::Value,
-        dst: PlaceRef<'tcx, Self::Value>,
-    );
-    fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> Self::Type;
-}
-
-pub trait TypeMethods<'tcx>:
-    BaseTypeMethods<'tcx> + DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx>
-{
-}
-
-impl<T> TypeMethods<'tcx> for T where
-    Self: BaseTypeMethods<'tcx> + DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx>
-{}
diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs
index 8d5214ce376..41796d24d9e 100644
--- a/src/librustc_codegen_llvm/lib.rs
+++ b/src/librustc_codegen_llvm/lib.rs
@@ -39,7 +39,6 @@
 use back::write::create_target_machine;
 use syntax_pos::symbol::Symbol;
 
-#[macro_use] extern crate bitflags;
 extern crate flate2;
 extern crate libc;
 #[macro_use] extern crate rustc;
@@ -92,7 +91,7 @@ use rustc::util::time_graph;
 use rustc::util::nodemap::{FxHashSet, FxHashMap};
 use rustc::util::profiling::ProfileCategory;
 use rustc_mir::monomorphize;
-use rustc_codegen_ssa::{ModuleCodegen, CompiledModule};
+use rustc_codegen_ssa::{interfaces, ModuleCodegen, CompiledModule};
 use rustc_codegen_utils::codegen_backend::CodegenBackend;
 use rustc_data_structures::svh::Svh;
 
@@ -108,8 +107,6 @@ mod back {
     pub mod wasm;
 }
 
-mod interfaces;
-
 mod abi;
 mod allocator;
 mod asm;
@@ -122,7 +119,6 @@ mod consts;
 mod context;
 mod debuginfo;
 mod declare;
-mod glue;
 mod intrinsic;
 
 // The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912.
@@ -130,8 +126,6 @@ mod intrinsic;
 
 mod llvm_util;
 mod metadata;
-mod meth;
-mod mir;
 mod mono_item;
 mod type_;
 mod type_of;
@@ -171,6 +165,12 @@ impl BackendMethods for LlvmCodegenBackend {
     ) {
         codegen.submit_pre_codegened_module_to_llvm(tcx, module)
     }
+    fn submit_pre_lto_module_to_llvm(&self, tcx: TyCtxt, module: CachedModuleCodegen) {
+        write::submit_pre_lto_module_to_llvm(tcx, module)
+    }
+    fn submit_post_lto_module_to_llvm(&self, tcx: TyCtxt, module: CachedModuleCodegen) {
+        write::submit_post_lto_module_to_llvm(tcx, module)
+    }
     fn codegen_aborted(codegen: OngoingCodegen) {
         codegen.codegen_aborted();
     }
@@ -378,24 +378,4 @@ struct CodegenResults {
     linker_info: rustc_codegen_utils::linker::LinkerInfo,
     crate_info: CrateInfo,
 }
-
-/// Misc info we load from metadata to persist beyond the tcx
-struct CrateInfo {
-    panic_runtime: Option<CrateNum>,
-    compiler_builtins: Option<CrateNum>,
-    profiler_runtime: Option<CrateNum>,
-    sanitizer_runtime: Option<CrateNum>,
-    is_no_builtins: FxHashSet<CrateNum>,
-    native_libraries: FxHashMap<CrateNum, Lrc<Vec<NativeLibrary>>>,
-    crate_name: FxHashMap<CrateNum, String>,
-    used_libraries: Lrc<Vec<NativeLibrary>>,
-    link_args: Lrc<Vec<String>>,
-    used_crate_source: FxHashMap<CrateNum, Lrc<CrateSource>>,
-    used_crates_static: Vec<(CrateNum, LibSource)>,
-    used_crates_dynamic: Vec<(CrateNum, LibSource)>,
-    wasm_imports: FxHashMap<String, String>,
-    lang_item_to_crate: FxHashMap<LangItem, CrateNum>,
-    missing_lang_items: FxHashMap<CrateNum, Vec<LangItem>>,
-}
-
 __build_diagnostic_array! { librustc_codegen_llvm, DIAGNOSTICS }
diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs
deleted file mode 100644
index 2be1c288440..00000000000
--- a/src/librustc_codegen_llvm/meth.rs
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use abi::FnType;
-use callee;
-use monomorphize;
-
-use interfaces::*;
-
-use rustc::ty::{self, Ty};
-
-#[derive(Copy, Clone, Debug)]
-pub struct VirtualIndex(u64);
-
-pub const DESTRUCTOR: VirtualIndex = VirtualIndex(0);
-pub const SIZE: VirtualIndex = VirtualIndex(1);
-pub const ALIGN: VirtualIndex = VirtualIndex(2);
-
-impl<'a, 'tcx: 'a> VirtualIndex {
-    pub fn from_index(index: usize) -> Self {
-        VirtualIndex(index as u64 + 3)
-    }
-
-    pub fn get_fn<Bx: BuilderMethods<'a, 'tcx>>(
-        self,
-        bx: &Bx,
-        llvtable: Bx::Value,
-        fn_ty: &FnType<'tcx, Ty<'tcx>>
-    ) -> Bx::Value {
-        // Load the data pointer from the object.
-        debug!("get_fn({:?}, {:?})", llvtable, self);
-
-        let llvtable = bx.pointercast(
-            llvtable,
-            bx.cx().type_ptr_to(bx.cx().fn_ptr_backend_type(fn_ty))
-        );
-        let ptr_align = bx.tcx().data_layout.pointer_align;
-        let ptr = bx.load(
-            bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
-            ptr_align
-        );
-        bx.nonnull_metadata(ptr);
-        // Vtable loads are invariant
-        bx.set_invariant_load(ptr);
-        ptr
-    }
-
-    pub fn get_usize<Bx: BuilderMethods<'a, 'tcx>>(
-        self,
-        bx: &Bx,
-        llvtable: Bx::Value
-    ) -> Bx::Value {
-        // Load the data pointer from the object.
-        debug!("get_int({:?}, {:?})", llvtable, self);
-
-        let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize()));
-        let usize_align = bx.tcx().data_layout.pointer_align;
-        let ptr = bx.load(
-            bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
-            usize_align
-        );
-        // Vtable loads are invariant
-        bx.set_invariant_load(ptr);
-        ptr
-    }
-}
-
-/// Creates a dynamic vtable for the given type and vtable origin.
-/// This is used only for objects.
-///
-/// The vtables are cached instead of created on every call.
-///
-/// The `trait_ref` encodes the erased self type. Hence if we are
-/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
-/// `trait_ref` would map `T:Trait`.
-pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
-    cx: &Cx,
-    ty: Ty<'tcx>,
-    trait_ref: ty::PolyExistentialTraitRef<'tcx>,
-) -> Cx::Value {
-    let tcx = cx.tcx();
-
-    debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref);
-
-    // Check the cache.
-    if let Some(&val) = cx.vtables().borrow().get(&(ty, trait_ref)) {
-        return val;
-    }
-
-    // Not in the cache. Build it.
-    let nullptr = cx.const_null(cx.type_i8p());
-
-    let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty));
-    let methods = methods.iter().cloned().map(|opt_mth| {
-        opt_mth.map_or(nullptr, |(def_id, substs)| {
-            callee::resolve_and_get_fn_for_vtable(cx, def_id, substs)
-        })
-    });
-
-    let (size, align) = cx.layout_of(ty).size_and_align();
-    // /////////////////////////////////////////////////////////////////////////////////////////////
-    // If you touch this code, be sure to also make the corresponding changes to
-    // `get_vtable` in rust_mir/interpret/traits.rs
-    // /////////////////////////////////////////////////////////////////////////////////////////////
-    let components: Vec<_> = [
-        cx.get_fn(monomorphize::resolve_drop_in_place(cx.tcx(), ty)),
-        cx.const_usize(size.bytes()),
-        cx.const_usize(align.abi())
-    ].iter().cloned().chain(methods).collect();
-
-    let vtable_const = cx.const_struct(&components, false);
-    let align = cx.data_layout().pointer_align;
-    let vtable = cx.static_addr_of(vtable_const, align, Some("vtable"));
-
-    cx.create_vtable_metadata(ty, vtable);
-
-    cx.vtables().borrow_mut().insert((ty, trait_ref), vtable);
-    vtable
-}
diff --git a/src/librustc_codegen_llvm/mir/analyze.rs b/src/librustc_codegen_llvm/mir/analyze.rs
deleted file mode 100644
index 9e54330b4ef..00000000000
--- a/src/librustc_codegen_llvm/mir/analyze.rs
+++ /dev/null
@@ -1,383 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! An analysis to determine which locals require allocas and
-//! which do not.
-
-use rustc_data_structures::bit_set::BitSet;
-use rustc_data_structures::graph::dominators::Dominators;
-use rustc_data_structures::indexed_vec::{Idx, IndexVec};
-use rustc::mir::{self, Location, TerminatorKind};
-use rustc::mir::visit::{Visitor, PlaceContext, MutatingUseContext, NonMutatingUseContext};
-use rustc::mir::traversal;
-use rustc::ty;
-use rustc::ty::layout::{LayoutOf, HasTyCtxt};
-use type_of::LayoutLlvmExt;
-use super::FunctionCx;
-use interfaces::*;
-
-pub fn non_ssa_locals<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    fx: &FunctionCx<'a, 'tcx, Bx>
-) -> BitSet<mir::Local> {
-    let mir = fx.mir;
-    let mut analyzer = LocalAnalyzer::new(fx);
-
-    analyzer.visit_mir(mir);
-
-    for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() {
-        let ty = fx.monomorphize(&ty);
-        debug!("local {} has type {:?}", index, ty);
-        let layout = fx.cx.layout_of(ty);
-        if layout.is_llvm_immediate() {
-            // These sorts of types are immediates that we can store
-            // in an Value without an alloca.
-        } else if layout.is_llvm_scalar_pair() {
-            // We allow pairs and uses of any of their 2 fields.
-        } else {
-            // These sorts of types require an alloca. Note that
-            // is_llvm_immediate() may *still* be true, particularly
-            // for newtypes, but we currently force some types
-            // (e.g. structs) into an alloca unconditionally, just so
-            // that we don't have to deal with having two pathways
-            // (gep vs extractvalue etc).
-            analyzer.not_ssa(mir::Local::new(index));
-        }
-    }
-
-    analyzer.non_ssa_locals
-}
-
-struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> {
-    fx: &'mir FunctionCx<'a, 'tcx, Bx>,
-    dominators: Dominators<mir::BasicBlock>,
-    non_ssa_locals: BitSet<mir::Local>,
-    // The location of the first visited direct assignment to each
-    // local, or an invalid location (out of bounds `block` index).
-    first_assignment: IndexVec<mir::Local, Location>
-}
-
-impl<Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
-    fn new(fx: &'mir FunctionCx<'a, 'tcx, Bx>) -> Self {
-        let invalid_location =
-            mir::BasicBlock::new(fx.mir.basic_blocks().len()).start_location();
-        let mut analyzer = LocalAnalyzer {
-            fx,
-            dominators: fx.mir.dominators(),
-            non_ssa_locals: BitSet::new_empty(fx.mir.local_decls.len()),
-            first_assignment: IndexVec::from_elem(invalid_location, &fx.mir.local_decls)
-        };
-
-        // Arguments get assigned to by means of the function being called
-        for arg in fx.mir.args_iter() {
-            analyzer.first_assignment[arg] = mir::START_BLOCK.start_location();
-        }
-
-        analyzer
-    }
-
-    fn first_assignment(&self, local: mir::Local) -> Option<Location> {
-        let location = self.first_assignment[local];
-        if location.block.index() < self.fx.mir.basic_blocks().len() {
-            Some(location)
-        } else {
-            None
-        }
-    }
-
-    fn not_ssa(&mut self, local: mir::Local) {
-        debug!("marking {:?} as non-SSA", local);
-        self.non_ssa_locals.insert(local);
-    }
-
-    fn assign(&mut self, local: mir::Local, location: Location) {
-        if self.first_assignment(local).is_some() {
-            self.not_ssa(local);
-        } else {
-            self.first_assignment[local] = location;
-        }
-    }
-}
-
-impl<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
-    for LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
-    fn visit_assign(&mut self,
-                    block: mir::BasicBlock,
-                    place: &mir::Place<'tcx>,
-                    rvalue: &mir::Rvalue<'tcx>,
-                    location: Location) {
-        debug!("visit_assign(block={:?}, place={:?}, rvalue={:?})", block, place, rvalue);
-
-        if let mir::Place::Local(index) = *place {
-            self.assign(index, location);
-            if !self.fx.rvalue_creates_operand(rvalue) {
-                self.not_ssa(index);
-            }
-        } else {
-            self.visit_place(
-                place,
-                PlaceContext::MutatingUse(MutatingUseContext::Store),
-                location
-            );
-        }
-
-        self.visit_rvalue(rvalue, location);
-    }
-
-    fn visit_terminator_kind(&mut self,
-                             block: mir::BasicBlock,
-                             kind: &mir::TerminatorKind<'tcx>,
-                             location: Location) {
-        let check = match *kind {
-            mir::TerminatorKind::Call {
-                func: mir::Operand::Constant(ref c),
-                ref args, ..
-            } => match c.ty.sty {
-                ty::FnDef(did, _) => Some((did, args)),
-                _ => None,
-            },
-            _ => None,
-        };
-        if let Some((def_id, args)) = check {
-            if Some(def_id) == self.fx.cx.tcx().lang_items().box_free_fn() {
-                // box_free(x) shares with `drop x` the property that it
-                // is not guaranteed to be statically dominated by the
-                // definition of x, so x must always be in an alloca.
-                if let mir::Operand::Move(ref place) = args[0] {
-                    self.visit_place(
-                        place,
-                        PlaceContext::MutatingUse(MutatingUseContext::Drop),
-                        location
-                    );
-                }
-            }
-        }
-
-        self.super_terminator_kind(block, kind, location);
-    }
-
-    fn visit_place(&mut self,
-                   place: &mir::Place<'tcx>,
-                   context: PlaceContext<'tcx>,
-                   location: Location) {
-        debug!("visit_place(place={:?}, context={:?})", place, context);
-        let cx = self.fx.cx;
-
-        if let mir::Place::Projection(ref proj) = *place {
-            // Allow uses of projections that are ZSTs or from scalar fields.
-            let is_consume = match context {
-                PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy) |
-                PlaceContext::NonMutatingUse(NonMutatingUseContext::Move) => true,
-                _ => false
-            };
-            if is_consume {
-                let base_ty = proj.base.ty(self.fx.mir, cx.tcx());
-                let base_ty = self.fx.monomorphize(&base_ty);
-
-                // ZSTs don't require any actual memory access.
-                let elem_ty = base_ty
-                    .projection_ty(cx.tcx(), &proj.elem)
-                    .to_ty(cx.tcx());
-                let elem_ty = self.fx.monomorphize(&elem_ty);
-                if cx.layout_of(elem_ty).is_zst() {
-                    return;
-                }
-
-                if let mir::ProjectionElem::Field(..) = proj.elem {
-                    let layout = cx.layout_of(base_ty.to_ty(cx.tcx()));
-                    if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() {
-                        // Recurse with the same context, instead of `Projection`,
-                        // potentially stopping at non-operand projections,
-                        // which would trigger `not_ssa` on locals.
-                        self.visit_place(&proj.base, context, location);
-                        return;
-                    }
-                }
-            }
-
-            // A deref projection only reads the pointer, never needs the place.
-            if let mir::ProjectionElem::Deref = proj.elem {
-                return self.visit_place(
-                    &proj.base,
-                    PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
-                    location
-                );
-            }
-        }
-
-        self.super_place(place, context, location);
-    }
-
-    fn visit_local(&mut self,
-                   &local: &mir::Local,
-                   context: PlaceContext<'tcx>,
-                   location: Location) {
-        match context {
-            PlaceContext::MutatingUse(MutatingUseContext::Call) => {
-                self.assign(local, location);
-            }
-
-            PlaceContext::NonUse(_) |
-            PlaceContext::MutatingUse(MutatingUseContext::Retag) => {}
-
-            PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy) |
-            PlaceContext::NonMutatingUse(NonMutatingUseContext::Move) => {
-                // Reads from uninitialized variables (e.g. in dead code, after
-                // optimizations) require locals to be in (uninitialized) memory.
-                // NB: there can be uninitialized reads of a local visited after
-                // an assignment to that local, if they happen on disjoint paths.
-                let ssa_read = match self.first_assignment(local) {
-                    Some(assignment_location) => {
-                        assignment_location.dominates(location, &self.dominators)
-                    }
-                    None => false
-                };
-                if !ssa_read {
-                    self.not_ssa(local);
-                }
-            }
-
-            PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect) |
-            PlaceContext::MutatingUse(MutatingUseContext::Store) |
-            PlaceContext::MutatingUse(MutatingUseContext::AsmOutput) |
-            PlaceContext::MutatingUse(MutatingUseContext::Borrow(..)) |
-            PlaceContext::MutatingUse(MutatingUseContext::Projection) |
-            PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow(..)) |
-            PlaceContext::NonMutatingUse(NonMutatingUseContext::UniqueBorrow(..)) |
-            PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow(..)) |
-            PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection) => {
-                self.not_ssa(local);
-            }
-
-            PlaceContext::MutatingUse(MutatingUseContext::Drop) => {
-                let ty = mir::Place::Local(local).ty(self.fx.mir, self.fx.cx.tcx());
-                let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx()));
-
-                // Only need the place if we're actually dropping it.
-                if self.fx.cx.type_needs_drop(ty) {
-                    self.not_ssa(local);
-                }
-            }
-        }
-    }
-}
-
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum CleanupKind {
-    NotCleanup,
-    Funclet,
-    Internal { funclet: mir::BasicBlock }
-}
-
-impl CleanupKind {
-    pub fn funclet_bb(self, for_bb: mir::BasicBlock) -> Option<mir::BasicBlock> {
-        match self {
-            CleanupKind::NotCleanup => None,
-            CleanupKind::Funclet => Some(for_bb),
-            CleanupKind::Internal { funclet } => Some(funclet),
-        }
-    }
-}
-
-pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec<mir::BasicBlock, CleanupKind> {
-    fn discover_masters<'tcx>(result: &mut IndexVec<mir::BasicBlock, CleanupKind>,
-                              mir: &mir::Mir<'tcx>) {
-        for (bb, data) in mir.basic_blocks().iter_enumerated() {
-            match data.terminator().kind {
-                TerminatorKind::Goto { .. } |
-                TerminatorKind::Resume |
-                TerminatorKind::Abort |
-                TerminatorKind::Return |
-                TerminatorKind::GeneratorDrop |
-                TerminatorKind::Unreachable |
-                TerminatorKind::SwitchInt { .. } |
-                TerminatorKind::Yield { .. } |
-                TerminatorKind::FalseEdges { .. } |
-                TerminatorKind::FalseUnwind { .. } => {
-                    /* nothing to do */
-                }
-                TerminatorKind::Call { cleanup: unwind, .. } |
-                TerminatorKind::Assert { cleanup: unwind, .. } |
-                TerminatorKind::DropAndReplace { unwind, .. } |
-                TerminatorKind::Drop { unwind, .. } => {
-                    if let Some(unwind) = unwind {
-                        debug!("cleanup_kinds: {:?}/{:?} registering {:?} as funclet",
-                               bb, data, unwind);
-                        result[unwind] = CleanupKind::Funclet;
-                    }
-                }
-            }
-        }
-    }
-
-    fn propagate<'tcx>(result: &mut IndexVec<mir::BasicBlock, CleanupKind>,
-                       mir: &mir::Mir<'tcx>) {
-        let mut funclet_succs = IndexVec::from_elem(None, mir.basic_blocks());
-
-        let mut set_successor = |funclet: mir::BasicBlock, succ| {
-            match funclet_succs[funclet] {
-                ref mut s @ None => {
-                    debug!("set_successor: updating successor of {:?} to {:?}",
-                           funclet, succ);
-                    *s = Some(succ);
-                },
-                Some(s) => if s != succ {
-                    span_bug!(mir.span, "funclet {:?} has 2 parents - {:?} and {:?}",
-                              funclet, s, succ);
-                }
-            }
-        };
-
-        for (bb, data) in traversal::reverse_postorder(mir) {
-            let funclet = match result[bb] {
-                CleanupKind::NotCleanup => continue,
-                CleanupKind::Funclet => bb,
-                CleanupKind::Internal { funclet } => funclet,
-            };
-
-            debug!("cleanup_kinds: {:?}/{:?}/{:?} propagating funclet {:?}",
-                   bb, data, result[bb], funclet);
-
-            for &succ in data.terminator().successors() {
-                let kind = result[succ];
-                debug!("cleanup_kinds: propagating {:?} to {:?}/{:?}",
-                       funclet, succ, kind);
-                match kind {
-                    CleanupKind::NotCleanup => {
-                        result[succ] = CleanupKind::Internal { funclet };
-                    }
-                    CleanupKind::Funclet => {
-                        if funclet != succ {
-                            set_successor(funclet, succ);
-                        }
-                    }
-                    CleanupKind::Internal { funclet: succ_funclet } => {
-                        if funclet != succ_funclet {
-                            // `succ` has 2 different funclet going into it, so it must
-                            // be a funclet by itself.
-
-                            debug!("promoting {:?} to a funclet and updating {:?}", succ,
-                                   succ_funclet);
-                            result[succ] = CleanupKind::Funclet;
-                            set_successor(succ_funclet, succ);
-                            set_successor(funclet, succ);
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-    let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, mir.basic_blocks());
-
-    discover_masters(&mut result, mir);
-    propagate(&mut result, mir);
-    debug!("cleanup_kinds: result={:?}", result);
-    result
-}
diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs
deleted file mode 100644
index d72fdb2fb23..00000000000
--- a/src/librustc_codegen_llvm/mir/block.rs
+++ /dev/null
@@ -1,1111 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc::middle::lang_items;
-use rustc::ty::{self, Ty, TypeFoldable};
-use rustc::ty::layout::{self, LayoutOf, HasTyCtxt};
-use rustc::mir;
-use rustc::mir::interpret::EvalErrorKind;
-use abi::{Abi, FnType, PassMode};
-use rustc_target::abi::call::ArgType;
-use base;
-use builder::MemFlags;
-use common;
-use rustc_codegen_ssa::common::IntPredicate;
-use meth;
-use monomorphize;
-
-use interfaces::*;
-
-use syntax::symbol::Symbol;
-use syntax_pos::Pos;
-
-use super::{FunctionCx, LocalRef};
-use super::place::PlaceRef;
-use super::operand::OperandRef;
-use super::operand::OperandValue::{Pair, Ref, Immediate};
-
-impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
-    pub fn codegen_block(
-        &mut self,
-        bb: mir::BasicBlock,
-    ) {
-        let mut bx = self.build_block(bb);
-        let data = &self.mir[bb];
-
-        debug!("codegen_block({:?}={:?})", bb, data);
-
-        for statement in &data.statements {
-            bx = self.codegen_statement(bx, statement);
-        }
-
-        self.codegen_terminator(bx, bb, data.terminator());
-    }
-
-    fn codegen_terminator(
-        &mut self,
-        mut bx: Bx,
-        bb: mir::BasicBlock,
-        terminator: &mir::Terminator<'tcx>
-    ) {
-        debug!("codegen_terminator: {:?}", terminator);
-
-        // Create the cleanup bundle, if needed.
-        let tcx = self.cx.tcx();
-        let span = terminator.source_info.span;
-        let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
-
-        // HACK(eddyb) force the right lifetimes, NLL can't figure them out.
-        fn funclet_closure_factory<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-            funclet_bb: Option<mir::BasicBlock>
-        ) -> impl for<'b> Fn(
-            &'b FunctionCx<'a, 'tcx, Bx>,
-        ) -> Option<&'b Bx::Funclet> {
-            move |this| {
-                match funclet_bb {
-                    Some(funclet_bb) => this.funclets[funclet_bb].as_ref(),
-                    None => None,
-                }
-            }
-        }
-        let funclet = funclet_closure_factory(funclet_bb);
-
-        let lltarget = |this: &mut Self, target: mir::BasicBlock| {
-            let lltarget = this.blocks[target];
-            let target_funclet = this.cleanup_kinds[target].funclet_bb(target);
-            match (funclet_bb, target_funclet) {
-                (None, None) => (lltarget, false),
-                (Some(f), Some(t_f))
-                    if f == t_f || !base::wants_msvc_seh(tcx.sess)
-                    => (lltarget, false),
-                (None, Some(_)) => {
-                    // jump *into* cleanup - need a landing pad if GNU
-                    (this.landing_pad_to(target), false)
-                }
-                (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", terminator),
-                (Some(_), Some(_)) => {
-                    (this.landing_pad_to(target), true)
-                }
-            }
-        };
-
-        let llblock = |this: &mut Self, target: mir::BasicBlock| {
-            let (lltarget, is_cleanupret) = lltarget(this, target);
-            if is_cleanupret {
-                // MSVC cross-funclet jump - need a trampoline
-
-                debug!("llblock: creating cleanup trampoline for {:?}", target);
-                let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target);
-                let trampoline = this.new_block(name);
-                trampoline.cleanup_ret(funclet(this).unwrap(), Some(lltarget));
-                trampoline.llbb()
-            } else {
-                lltarget
-            }
-        };
-
-        let funclet_br =
-            |this: &mut Self, bx: &Bx, target: mir::BasicBlock| {
-                let (lltarget, is_cleanupret) = lltarget(this, target);
-                if is_cleanupret {
-                    // micro-optimization: generate a `ret` rather than a jump
-                    // to a trampoline.
-                    bx.cleanup_ret(funclet(this).unwrap(), Some(lltarget));
-                } else {
-                    bx.br(lltarget);
-                }
-            };
-
-        let do_call = |
-            this: &mut Self,
-            bx: &Bx,
-            fn_ty: FnType<'tcx, Ty<'tcx>>,
-            fn_ptr: Bx::Value,
-            llargs: &[Bx::Value],
-            destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
-            cleanup: Option<mir::BasicBlock>
-        | {
-            if let Some(cleanup) = cleanup {
-                let ret_bx = if let Some((_, target)) = destination {
-                    this.blocks[target]
-                } else {
-                    this.unreachable_block()
-                };
-                let invokeret = bx.invoke(fn_ptr,
-                                          &llargs,
-                                          ret_bx,
-                                          llblock(this, cleanup),
-                                          funclet(this));
-                bx.apply_attrs_callsite(&fn_ty, invokeret);
-
-                if let Some((ret_dest, target)) = destination {
-                    let ret_bx = this.build_block(target);
-                    this.set_debug_loc(&ret_bx, terminator.source_info);
-                    this.store_return(&ret_bx, ret_dest, &fn_ty.ret, invokeret);
-                }
-            } else {
-                let llret = bx.call(fn_ptr, &llargs, funclet(this));
-                bx.apply_attrs_callsite(&fn_ty, llret);
-                if this.mir[bb].is_cleanup {
-                    // Cleanup is always the cold path. Don't inline
-                    // drop glue. Also, when there is a deeply-nested
-                    // struct, there are "symmetry" issues that cause
-                    // exponential inlining - see issue #41696.
-                    bx.do_not_inline(llret);
-                }
-
-                if let Some((ret_dest, target)) = destination {
-                    this.store_return(bx, ret_dest, &fn_ty.ret, llret);
-                    funclet_br(this, bx, target);
-                } else {
-                    bx.unreachable();
-                }
-            }
-        };
-
-        self.set_debug_loc(&bx, terminator.source_info);
-        match terminator.kind {
-            mir::TerminatorKind::Resume => {
-                if let Some(funclet) = funclet(self) {
-                    bx.cleanup_ret(funclet, None);
-                } else {
-                    let slot = self.get_personality_slot(&bx);
-                    let lp0 = bx.load_operand(slot.project_field(&bx, 0)).immediate();
-                    let lp1 = bx.load_operand(slot.project_field(&bx, 1)).immediate();
-                    slot.storage_dead(&bx);
-
-                    if !bx.cx().sess().target.target.options.custom_unwind_resume {
-                        let mut lp = bx.cx().const_undef(self.landing_pad_type());
-                        lp = bx.insert_value(lp, lp0, 0);
-                        lp = bx.insert_value(lp, lp1, 1);
-                        bx.resume(lp);
-                    } else {
-                        bx.call(bx.cx().eh_unwind_resume(), &[lp0], funclet(self));
-                        bx.unreachable();
-                    }
-                }
-            }
-
-            mir::TerminatorKind::Abort => {
-                // Call core::intrinsics::abort()
-                let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
-                bx.call(fnname, &[], None);
-                bx.unreachable();
-            }
-
-            mir::TerminatorKind::Goto { target } => {
-                funclet_br(self, &bx, target);
-            }
-
-            mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
-                let discr = self.codegen_operand(&bx, discr);
-                if targets.len() == 2 {
-                    // If there are two targets, emit br instead of switch
-                    let lltrue = llblock(self, targets[0]);
-                    let llfalse = llblock(self, targets[1]);
-                    if switch_ty == bx.tcx().types.bool {
-                        // Don't generate trivial icmps when switching on bool
-                        if let [0] = values[..] {
-                            bx.cond_br(discr.immediate(), llfalse, lltrue);
-                        } else {
-                            assert_eq!(&values[..], &[1]);
-                            bx.cond_br(discr.immediate(), lltrue, llfalse);
-                        }
-                    } else {
-                        let switch_llty = bx.cx().immediate_backend_type(
-                            bx.cx().layout_of(switch_ty)
-                        );
-                        let llval = bx.cx().const_uint_big(switch_llty, values[0]);
-                        let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
-                        bx.cond_br(cmp, lltrue, llfalse);
-                    }
-                } else {
-                    let (otherwise, targets) = targets.split_last().unwrap();
-                    let switch = bx.switch(discr.immediate(),
-                                           llblock(self, *otherwise),
-                                           values.len());
-                    let switch_llty = bx.cx().immediate_backend_type(
-                        bx.cx().layout_of(switch_ty)
-                    );
-                    for (&value, target) in values.iter().zip(targets) {
-                        let llval = bx.cx().const_uint_big(switch_llty, value);
-                        let llbb = llblock(self, *target);
-                        bx.add_case(switch, llval, llbb)
-                    }
-                }
-            }
-
-            mir::TerminatorKind::Return => {
-                let llval = match self.fn_ty.ret.mode {
-                    PassMode::Ignore | PassMode::Indirect(..) => {
-                        bx.ret_void();
-                        return;
-                    }
-
-                    PassMode::Direct(_) | PassMode::Pair(..) => {
-                        let op = self.codegen_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE));
-                        if let Ref(llval, _, align) = op.val {
-                            bx.load(llval, align)
-                        } else {
-                            op.immediate_or_packed_pair(&bx)
-                        }
-                    }
-
-                    PassMode::Cast(cast_ty) => {
-                        let op = match self.locals[mir::RETURN_PLACE] {
-                            LocalRef::Operand(Some(op)) => op,
-                            LocalRef::Operand(None) => bug!("use of return before def"),
-                            LocalRef::Place(cg_place) => {
-                                OperandRef {
-                                    val: Ref(cg_place.llval, None, cg_place.align),
-                                    layout: cg_place.layout
-                                }
-                            }
-                            LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
-                        };
-                        let llslot = match op.val {
-                            Immediate(_) | Pair(..) => {
-                                let scratch = PlaceRef::alloca(&bx, self.fn_ty.ret.layout, "ret");
-                                op.val.store(&bx, scratch);
-                                scratch.llval
-                            }
-                            Ref(llval, _, align) => {
-                                assert_eq!(align.abi(), op.layout.align.abi(),
-                                           "return place is unaligned!");
-                                llval
-                            }
-                        };
-                        bx.load(
-                            bx.pointercast(llslot, bx.cx().type_ptr_to(
-                                bx.cx().cast_backend_type(&cast_ty)
-                            )),
-                            self.fn_ty.ret.layout.align)
-                    }
-                };
-                bx.ret(llval);
-            }
-
-            mir::TerminatorKind::Unreachable => {
-                bx.unreachable();
-            }
-
-            mir::TerminatorKind::Drop { ref location, target, unwind } => {
-                let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx());
-                let ty = self.monomorphize(&ty);
-                let drop_fn = monomorphize::resolve_drop_in_place(bx.cx().tcx(), ty);
-
-                if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
-                    // we don't actually need to drop anything.
-                    funclet_br(self, &bx, target);
-                    return
-                }
-
-                let place = self.codegen_place(&bx, location);
-                let (args1, args2);
-                let mut args = if let Some(llextra) = place.llextra {
-                    args2 = [place.llval, llextra];
-                    &args2[..]
-                } else {
-                    args1 = [place.llval];
-                    &args1[..]
-                };
-                let (drop_fn, fn_ty) = match ty.sty {
-                    ty::Dynamic(..) => {
-                        let sig = drop_fn.fn_sig(tcx);
-                        let sig = tcx.normalize_erasing_late_bound_regions(
-                            ty::ParamEnv::reveal_all(),
-                            &sig,
-                        );
-                        let fn_ty = bx.cx().new_vtable(sig, &[]);
-                        let vtable = args[1];
-                        args = &args[..1];
-                        (meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty)
-                    }
-                    _ => {
-                        (bx.cx().get_fn(drop_fn),
-                         bx.cx().fn_type_of_instance(&drop_fn))
-                    }
-                };
-                do_call(self, &bx, fn_ty, drop_fn, args,
-                        Some((ReturnDest::Nothing, target)),
-                        unwind);
-            }
-
-            mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
-                let cond = self.codegen_operand(&bx, cond).immediate();
-                let mut const_cond = bx.cx().const_to_opt_u128(cond, false).map(|c| c == 1);
-
-                // This case can currently arise only from functions marked
-                // with #[rustc_inherit_overflow_checks] and inlined from
-                // another crate (mostly core::num generic/#[inline] fns),
-                // while the current crate doesn't use overflow checks.
-                // NOTE: Unlike binops, negation doesn't have its own
-                // checked operation, just a comparison with the minimum
-                // value, so we have to check for the assert message.
-                if !bx.cx().check_overflow() {
-                    if let mir::interpret::EvalErrorKind::OverflowNeg = *msg {
-                        const_cond = Some(expected);
-                    }
-                }
-
-                // Don't codegen the panic block if success if known.
-                if const_cond == Some(expected) {
-                    funclet_br(self, &bx, target);
-                    return;
-                }
-
-                // Pass the condition through llvm.expect for branch hinting.
-                let expect = bx.cx().get_intrinsic(&"llvm.expect.i1");
-                let cond = bx.call(expect, &[cond, bx.cx().const_bool(expected)], None);
-
-                // Create the failure block and the conditional branch to it.
-                let lltarget = llblock(self, target);
-                let panic_block = self.new_block("panic");
-                if expected {
-                    bx.cond_br(cond, lltarget, panic_block.llbb());
-                } else {
-                    bx.cond_br(cond, panic_block.llbb(), lltarget);
-                }
-
-                // After this point, bx is the block for the call to panic.
-                bx = panic_block;
-                self.set_debug_loc(&bx, terminator.source_info);
-
-                // Get the location information.
-                let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo());
-                let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
-                let filename = bx.cx().const_str_slice(filename);
-                let line = bx.cx().const_u32(loc.line as u32);
-                let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1);
-                let align = tcx.data_layout.aggregate_align
-                    .max(tcx.data_layout.i32_align)
-                    .max(tcx.data_layout.pointer_align);
-
-                // Put together the arguments to the panic entry point.
-                let (lang_item, args) = match *msg {
-                    EvalErrorKind::BoundsCheck { ref len, ref index } => {
-                        let len = self.codegen_operand(&bx, len).immediate();
-                        let index = self.codegen_operand(&bx, index).immediate();
-
-                        let file_line_col = bx.cx().const_struct(&[filename, line, col], false);
-                        let file_line_col = bx.cx().static_addr_of(
-                            file_line_col,
-                            align,
-                            Some("panic_bounds_check_loc")
-                        );
-                        (lang_items::PanicBoundsCheckFnLangItem,
-                         vec![file_line_col, index, len])
-                    }
-                    _ => {
-                        let str = msg.description();
-                        let msg_str = Symbol::intern(str).as_str();
-                        let msg_str = bx.cx().const_str_slice(msg_str);
-                        let msg_file_line_col = bx.cx().const_struct(
-                            &[msg_str, filename, line, col],
-                            false
-                        );
-                        let msg_file_line_col = bx.cx().static_addr_of(
-                            msg_file_line_col,
-                            align,
-                            Some("panic_loc")
-                        );
-                        (lang_items::PanicFnLangItem,
-                         vec![msg_file_line_col])
-                    }
-                };
-
-                // Obtain the panic entry point.
-                let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
-                let instance = ty::Instance::mono(bx.tcx(), def_id);
-                let fn_ty = bx.cx().fn_type_of_instance(&instance);
-                let llfn = bx.cx().get_fn(instance);
-
-                // Codegen the actual panic invoke/call.
-                do_call(self, &bx, fn_ty, llfn, &args, None, cleanup);
-            }
-
-            mir::TerminatorKind::DropAndReplace { .. } => {
-                bug!("undesugared DropAndReplace in codegen: {:?}", terminator);
-            }
-
-            mir::TerminatorKind::Call {
-                ref func,
-                ref args,
-                ref destination,
-                cleanup,
-                from_hir_call: _
-            } => {
-                // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
-                let callee = self.codegen_operand(&bx, func);
-
-                let (instance, mut llfn) = match callee.layout.ty.sty {
-                    ty::FnDef(def_id, substs) => {
-                        (Some(ty::Instance::resolve(bx.cx().tcx(),
-                                                    ty::ParamEnv::reveal_all(),
-                                                    def_id,
-                                                    substs).unwrap()),
-                         None)
-                    }
-                    ty::FnPtr(_) => {
-                        (None, Some(callee.immediate()))
-                    }
-                    _ => bug!("{} is not callable", callee.layout.ty)
-                };
-                let def = instance.map(|i| i.def);
-                let sig = callee.layout.ty.fn_sig(bx.tcx());
-                let sig = bx.tcx().normalize_erasing_late_bound_regions(
-                    ty::ParamEnv::reveal_all(),
-                    &sig,
-                );
-                let abi = sig.abi;
-
-                // Handle intrinsics old codegen wants Expr's for, ourselves.
-                let intrinsic = match def {
-                    Some(ty::InstanceDef::Intrinsic(def_id))
-                        => Some(bx.tcx().item_name(def_id).as_str()),
-                    _ => None
-                };
-                let intrinsic = intrinsic.as_ref().map(|s| &s[..]);
-
-                if intrinsic == Some("transmute") {
-                    if let Some(destination_ref) = destination.as_ref() {
-                        let &(ref dest, target) = destination_ref;
-                        self.codegen_transmute(&bx, &args[0], dest);
-                        funclet_br(self, &bx, target);
-                    } else {
-                        // If we are trying to transmute to an uninhabited type,
-                        // it is likely there is no allotted destination. In fact,
-                        // transmuting to an uninhabited type is UB, which means
-                        // we can do what we like. Here, we declare that transmuting
-                        // into an uninhabited type is impossible, so anything following
-                        // it must be unreachable.
-                        assert_eq!(bx.cx().layout_of(sig.output()).abi, layout::Abi::Uninhabited);
-                        bx.unreachable();
-                    }
-                    return;
-                }
-
-                let extra_args = &args[sig.inputs().len()..];
-                let extra_args = extra_args.iter().map(|op_arg| {
-                    let op_ty = op_arg.ty(self.mir, bx.tcx());
-                    self.monomorphize(&op_ty)
-                }).collect::<Vec<_>>();
-
-                let fn_ty = match def {
-                    Some(ty::InstanceDef::Virtual(..)) => {
-                        bx.cx().new_vtable(sig, &extra_args)
-                    }
-                    Some(ty::InstanceDef::DropGlue(_, None)) => {
-                        // empty drop glue - a nop.
-                        let &(_, target) = destination.as_ref().unwrap();
-                        funclet_br(self, &bx, target);
-                        return;
-                    }
-                    _ => bx.cx().new_fn_type(sig, &extra_args)
-                };
-
-                // emit a panic instead of instantiating an uninhabited type
-                if (intrinsic == Some("init") || intrinsic == Some("uninit")) &&
-                    fn_ty.ret.layout.abi.is_uninhabited()
-                {
-                    let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo());
-                    let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
-                    let filename = bx.cx().const_str_slice(filename);
-                    let line = bx.cx().const_u32(loc.line as u32);
-                    let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1);
-                    let align = tcx.data_layout.aggregate_align
-                        .max(tcx.data_layout.i32_align)
-                        .max(tcx.data_layout.pointer_align);
-
-                    let str = format!(
-                        "Attempted to instantiate uninhabited type {} using mem::{}",
-                        sig.output(),
-                        if intrinsic == Some("init") { "zeroed" } else { "uninitialized" }
-                    );
-                    let msg_str = Symbol::intern(&str).as_str();
-                    let msg_str = bx.cx().const_str_slice(msg_str);
-                    let msg_file_line_col = bx.cx().const_struct(
-                        &[msg_str, filename, line, col],
-                        false,
-                    );
-                    let msg_file_line_col = bx.cx().static_addr_of(
-                        msg_file_line_col,
-                        align,
-                        Some("panic_loc"),
-                    );
-
-                    // Obtain the panic entry point.
-                    let def_id =
-                        common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem);
-                    let instance = ty::Instance::mono(bx.tcx(), def_id);
-                    let fn_ty = bx.cx().fn_type_of_instance(&instance);
-                    let llfn = bx.cx().get_fn(instance);
-
-                    // Codegen the actual panic invoke/call.
-                    do_call(
-                        self,
-                        &bx,
-                        fn_ty,
-                        llfn,
-                        &[msg_file_line_col],
-                        destination.as_ref().map(|(_, bb)| (ReturnDest::Nothing, *bb)),
-                        cleanup,
-                    );
-                    return;
-                }
-
-                // The arguments we'll be passing. Plus one to account for outptr, if used.
-                let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize;
-                let mut llargs = Vec::with_capacity(arg_count);
-
-                // Prepare the return value destination
-                let ret_dest = if let Some((ref dest, _)) = *destination {
-                    let is_intrinsic = intrinsic.is_some();
-                    self.make_return_dest(&bx, dest, &fn_ty.ret, &mut llargs,
-                                          is_intrinsic)
-                } else {
-                    ReturnDest::Nothing
-                };
-
-                if intrinsic.is_some() && intrinsic != Some("drop_in_place") {
-                    let dest = match ret_dest {
-                        _ if fn_ty.ret.is_indirect() => llargs[0],
-                        ReturnDest::Nothing => {
-                            bx.cx().const_undef(bx.cx().type_ptr_to(bx.memory_ty(&fn_ty.ret)))
-                        }
-                        ReturnDest::IndirectOperand(dst, _) |
-                        ReturnDest::Store(dst) => dst.llval,
-                        ReturnDest::DirectOperand(_) =>
-                            bug!("Cannot use direct operand with an intrinsic call")
-                    };
-
-                    let args: Vec<_> = args.iter().enumerate().map(|(i, arg)| {
-                        // The indices passed to simd_shuffle* in the
-                        // third argument must be constant. This is
-                        // checked by const-qualification, which also
-                        // promotes any complex rvalues to constants.
-                        if i == 2 && intrinsic.unwrap().starts_with("simd_shuffle") {
-                            match *arg {
-                                // The shuffle array argument is usually not an explicit constant,
-                                // but specified directly in the code. This means it gets promoted
-                                // and we can then extract the value by evaluating the promoted.
-                                mir::Operand::Copy(mir::Place::Promoted(box(index, ty))) |
-                                mir::Operand::Move(mir::Place::Promoted(box(index, ty))) => {
-                                    let param_env = ty::ParamEnv::reveal_all();
-                                    let cid = mir::interpret::GlobalId {
-                                        instance: self.instance,
-                                        promoted: Some(index),
-                                    };
-                                    let c = bx.tcx().const_eval(param_env.and(cid));
-                                    let (llval, ty) = self.simd_shuffle_indices(
-                                        &bx,
-                                        terminator.source_info.span,
-                                        ty,
-                                        c,
-                                    );
-                                    return OperandRef {
-                                        val: Immediate(llval),
-                                        layout: bx.cx().layout_of(ty),
-                                    };
-
-                                },
-                                mir::Operand::Copy(_) |
-                                mir::Operand::Move(_) => {
-                                    span_bug!(span, "shuffle indices must be constant");
-                                }
-                                mir::Operand::Constant(ref constant) => {
-                                    let c = self.eval_mir_constant(&bx, constant);
-                                    let (llval, ty) = self.simd_shuffle_indices(
-                                        &bx,
-                                        constant.span,
-                                        constant.ty,
-                                        c,
-                                    );
-                                    return OperandRef {
-                                        val: Immediate(llval),
-                                        layout: bx.cx().layout_of(ty)
-                                    };
-                                }
-                            }
-                        }
-
-                        self.codegen_operand(&bx, arg)
-                    }).collect();
-
-
-                    let callee_ty = instance.as_ref().unwrap().ty(bx.cx().tcx());
-                    bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest,
-                                               terminator.source_info.span);
-
-                    if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
-                        self.store_return(&bx, ret_dest, &fn_ty.ret, dst.llval);
-                    }
-
-                    if let Some((_, target)) = *destination {
-                        funclet_br(self, &bx, target);
-                    } else {
-                        bx.unreachable();
-                    }
-
-                    return;
-                }
-
-                // Split the rust-call tupled arguments off.
-                let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
-                    let (tup, args) = args.split_last().unwrap();
-                    (args, Some(tup))
-                } else {
-                    (&args[..], None)
-                };
-
-                'make_args: for (i, arg) in first_args.iter().enumerate() {
-                    let mut op = self.codegen_operand(&bx, arg);
-
-                    if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
-                        if let Pair(..) = op.val {
-                            // In the case of Rc<Self>, we need to explicitly pass a
-                            // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
-                            // that is understood elsewhere in the compiler as a method on
-                            // `dyn Trait`.
-                            // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
-                            // we get a value of a built-in pointer type
-                            'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
-                                            && !op.layout.ty.is_region_ptr()
-                            {
-                                'iter_fields: for i in 0..op.layout.fields.count() {
-                                    let field = op.extract_field(&bx, i);
-                                    if !field.layout.is_zst() {
-                                        // we found the one non-zero-sized field that is allowed
-                                        // now find *its* non-zero-sized field, or stop if it's a
-                                        // pointer
-                                        op = field;
-                                        continue 'descend_newtypes
-                                    }
-                                }
-
-                                span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
-                            }
-
-                            // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
-                            // data pointer and vtable. Look up the method in the vtable, and pass
-                            // the data pointer as the first argument
-                            match op.val {
-                                Pair(data_ptr, meta) => {
-                                    llfn = Some(meth::VirtualIndex::from_index(idx)
-                                        .get_fn(&bx, meta, &fn_ty));
-                                    llargs.push(data_ptr);
-                                    continue 'make_args
-                                }
-                                other => bug!("expected a Pair, got {:?}", other)
-                            }
-                        } else if let Ref(data_ptr, Some(meta), _) = op.val {
-                            // by-value dynamic dispatch
-                            llfn = Some(meth::VirtualIndex::from_index(idx)
-                                .get_fn(&bx, meta, &fn_ty));
-                            llargs.push(data_ptr);
-                            continue;
-                        } else {
-                            span_bug!(span, "can't codegen a virtual call on {:?}", op);
-                        }
-                    }
-
-                    // The callee needs to own the argument memory if we pass it
-                    // by-ref, so make a local copy of non-immediate constants.
-                    match (arg, op.val) {
-                        (&mir::Operand::Copy(_), Ref(_, None, _)) |
-                        (&mir::Operand::Constant(_), Ref(_, None, _)) => {
-                            let tmp = PlaceRef::alloca(&bx, op.layout, "const");
-                            op.val.store(&bx, tmp);
-                            op.val = Ref(tmp.llval, None, tmp.align);
-                        }
-                        _ => {}
-                    }
-
-                    self.codegen_argument(&bx, op, &mut llargs, &fn_ty.args[i]);
-                }
-                if let Some(tup) = untuple {
-                    self.codegen_arguments_untupled(&bx, tup, &mut llargs,
-                        &fn_ty.args[first_args.len()..])
-                }
-
-                let fn_ptr = match (llfn, instance) {
-                    (Some(llfn), _) => llfn,
-                    (None, Some(instance)) => bx.cx().get_fn(instance),
-                    _ => span_bug!(span, "no llfn for call"),
-                };
-
-                do_call(self, &bx, fn_ty, fn_ptr, &llargs,
-                        destination.as_ref().map(|&(_, target)| (ret_dest, target)),
-                        cleanup);
-            }
-            mir::TerminatorKind::GeneratorDrop |
-            mir::TerminatorKind::Yield { .. } => bug!("generator ops in codegen"),
-            mir::TerminatorKind::FalseEdges { .. } |
-            mir::TerminatorKind::FalseUnwind { .. } => bug!("borrowck false edges in codegen"),
-        }
-    }
-
-    fn codegen_argument(
-        &mut self,
-        bx: &Bx,
-        op: OperandRef<'tcx, Bx::Value>,
-        llargs: &mut Vec<Bx::Value>,
-        arg: &ArgType<'tcx, Ty<'tcx>>
-    ) {
-        // Fill padding with undef value, where applicable.
-        if let Some(ty) = arg.pad {
-            llargs.push(bx.cx().const_undef(bx.cx().reg_backend_type(&ty)))
-        }
-
-        if arg.is_ignore() {
-            return;
-        }
-
-        if let PassMode::Pair(..) = arg.mode {
-            match op.val {
-                Pair(a, b) => {
-                    llargs.push(a);
-                    llargs.push(b);
-                    return;
-                }
-                _ => bug!("codegen_argument: {:?} invalid for pair argument", op)
-            }
-        } else if arg.is_unsized_indirect() {
-            match op.val {
-                Ref(a, Some(b), _) => {
-                    llargs.push(a);
-                    llargs.push(b);
-                    return;
-                }
-                _ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op)
-            }
-        }
-
-        // Force by-ref if we have to load through a cast pointer.
-        let (mut llval, align, by_ref) = match op.val {
-            Immediate(_) | Pair(..) => {
-                match arg.mode {
-                    PassMode::Indirect(..) | PassMode::Cast(_) => {
-                        let scratch = PlaceRef::alloca(bx, arg.layout, "arg");
-                        op.val.store(bx, scratch);
-                        (scratch.llval, scratch.align, true)
-                    }
-                    _ => {
-                        (op.immediate_or_packed_pair(bx), arg.layout.align, false)
-                    }
-                }
-            }
-            Ref(llval, _, align) => {
-                if arg.is_indirect() && align.abi() < arg.layout.align.abi() {
-                    // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
-                    // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
-                    // have scary latent bugs around.
-
-                    let scratch = PlaceRef::alloca(bx, arg.layout, "arg");
-                    base::memcpy_ty(bx, scratch.llval, scratch.align, llval, align,
-                                    op.layout, MemFlags::empty());
-                    (scratch.llval, scratch.align, true)
-                } else {
-                    (llval, align, true)
-                }
-            }
-        };
-
-        if by_ref && !arg.is_indirect() {
-            // Have to load the argument, maybe while casting it.
-            if let PassMode::Cast(ty) = arg.mode {
-                llval = bx.load(bx.pointercast(llval, bx.cx().type_ptr_to(
-                    bx.cx().cast_backend_type(&ty))
-                ), align.min(arg.layout.align));
-            } else {
-                // We can't use `PlaceRef::load` here because the argument
-                // may have a type we don't treat as immediate, but the ABI
-                // used for this call is passing it by-value. In that case,
-                // the load would just produce `OperandValue::Ref` instead
-                // of the `OperandValue::Immediate` we need for the call.
-                llval = bx.load(llval, align);
-                if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
-                    if scalar.is_bool() {
-                        bx.range_metadata(llval, 0..2);
-                    }
-                }
-                // We store bools as i8 so we need to truncate to i1.
-                llval = base::to_immediate(bx, llval, arg.layout);
-            }
-        }
-
-        llargs.push(llval);
-    }
-
-    fn codegen_arguments_untupled(
-        &mut self,
-        bx: &Bx,
-        operand: &mir::Operand<'tcx>,
-        llargs: &mut Vec<Bx::Value>,
-        args: &[ArgType<'tcx, Ty<'tcx>>]
-    ) {
-        let tuple = self.codegen_operand(bx, operand);
-
-        // Handle both by-ref and immediate tuples.
-        if let Ref(llval, None, align) = tuple.val {
-            let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align);
-            for i in 0..tuple.layout.fields.count() {
-                let field_ptr = tuple_ptr.project_field(bx, i);
-                self.codegen_argument(bx, bx.load_operand(field_ptr), llargs, &args[i]);
-            }
-        } else if let Ref(_, Some(_), _) = tuple.val {
-            bug!("closure arguments must be sized")
-        } else {
-            // If the tuple is immediate, the elements are as well.
-            for i in 0..tuple.layout.fields.count() {
-                let op = tuple.extract_field(bx, i);
-                self.codegen_argument(bx, op, llargs, &args[i]);
-            }
-        }
-    }
-
-    fn get_personality_slot(
-        &mut self,
-        bx: &Bx
-    ) -> PlaceRef<'tcx, Bx::Value> {
-        let cx = bx.cx();
-        if let Some(slot) = self.personality_slot {
-            slot
-        } else {
-            let layout = cx.layout_of(cx.tcx().intern_tup(&[
-                cx.tcx().mk_mut_ptr(cx.tcx().types.u8),
-                cx.tcx().types.i32
-            ]));
-            let slot = PlaceRef::alloca(bx, layout, "personalityslot");
-            self.personality_slot = Some(slot);
-            slot
-        }
-    }
-
-    /// Return the landingpad wrapper around the given basic block
-    ///
-    /// No-op in MSVC SEH scheme.
-    fn landing_pad_to(
-        &mut self,
-        target_bb: mir::BasicBlock
-    ) -> Bx::BasicBlock {
-        if let Some(block) = self.landing_pads[target_bb] {
-            return block;
-        }
-
-        let block = self.blocks[target_bb];
-        let landing_pad = self.landing_pad_uncached(block);
-        self.landing_pads[target_bb] = Some(landing_pad);
-        landing_pad
-    }
-
-    fn landing_pad_uncached(
-        &mut self,
-        target_bb: Bx::BasicBlock
-    ) -> Bx::BasicBlock {
-        if base::wants_msvc_seh(self.cx.sess()) {
-            span_bug!(self.mir.span, "landing pad was not inserted?")
-        }
-
-        let bx = self.new_block("cleanup");
-
-        let llpersonality = self.cx.eh_personality();
-        let llretty = self.landing_pad_type();
-        let lp = bx.landing_pad(llretty, llpersonality, 1);
-        bx.set_cleanup(lp);
-
-        let slot = self.get_personality_slot(&bx);
-        slot.storage_live(&bx);
-        Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&bx, slot);
-
-        bx.br(target_bb);
-        bx.llbb()
-    }
-
-    fn landing_pad_type(&self) -> Bx::Type {
-        let cx = self.cx;
-        cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false)
-    }
-
-    fn unreachable_block(
-        &mut self
-    ) -> Bx::BasicBlock {
-        self.unreachable_block.unwrap_or_else(|| {
-            let bx = self.new_block("unreachable");
-            bx.unreachable();
-            self.unreachable_block = Some(bx.llbb());
-            bx.llbb()
-        })
-    }
-
-    pub fn new_block(&self, name: &str) -> Bx {
-        Bx::new_block(self.cx, self.llfn, name)
-    }
-
-    pub fn build_block(
-        &self,
-        bb: mir::BasicBlock
-    ) -> Bx {
-        let bx = Bx::with_cx(self.cx);
-        bx.position_at_end(self.blocks[bb]);
-        bx
-    }
-
-    fn make_return_dest(
-        &mut self,
-        bx: &Bx,
-        dest: &mir::Place<'tcx>,
-        fn_ret: &ArgType<'tcx, Ty<'tcx>>,
-        llargs: &mut Vec<Bx::Value>, is_intrinsic: bool
-    ) -> ReturnDest<'tcx, Bx::Value> {
-        // If the return is ignored, we can just return a do-nothing ReturnDest
-        if fn_ret.is_ignore() {
-            return ReturnDest::Nothing;
-        }
-        let dest = if let mir::Place::Local(index) = *dest {
-            match self.locals[index] {
-                LocalRef::Place(dest) => dest,
-                LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
-                LocalRef::Operand(None) => {
-                    // Handle temporary places, specifically Operand ones, as
-                    // they don't have allocas
-                    return if fn_ret.is_indirect() {
-                        // Odd, but possible, case, we have an operand temporary,
-                        // but the calling convention has an indirect return.
-                        let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret");
-                        tmp.storage_live(bx);
-                        llargs.push(tmp.llval);
-                        ReturnDest::IndirectOperand(tmp, index)
-                    } else if is_intrinsic {
-                        // Currently, intrinsics always need a location to store
-                        // the result. so we create a temporary alloca for the
-                        // result
-                        let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret");
-                        tmp.storage_live(bx);
-                        ReturnDest::IndirectOperand(tmp, index)
-                    } else {
-                        ReturnDest::DirectOperand(index)
-                    };
-                }
-                LocalRef::Operand(Some(_)) => {
-                    bug!("place local already assigned to");
-                }
-            }
-        } else {
-            self.codegen_place(bx, dest)
-        };
-        if fn_ret.is_indirect() {
-            if dest.align.abi() < dest.layout.align.abi() {
-                // Currently, MIR code generation does not create calls
-                // that store directly to fields of packed structs (in
-                // fact, the calls it creates write only to temps),
-                //
-                // If someone changes that, please update this code path
-                // to create a temporary.
-                span_bug!(self.mir.span, "can't directly store to unaligned value");
-            }
-            llargs.push(dest.llval);
-            ReturnDest::Nothing
-        } else {
-            ReturnDest::Store(dest)
-        }
-    }
-
-    fn codegen_transmute(
-        &mut self,
-        bx: &Bx,
-        src: &mir::Operand<'tcx>,
-        dst: &mir::Place<'tcx>
-    ) {
-        if let mir::Place::Local(index) = *dst {
-            match self.locals[index] {
-                LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
-                LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
-                LocalRef::Operand(None) => {
-                    let dst_layout = bx.cx().layout_of(self.monomorphized_place_ty(dst));
-                    assert!(!dst_layout.ty.has_erasable_regions());
-                    let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp");
-                    place.storage_live(bx);
-                    self.codegen_transmute_into(bx, src, place);
-                    let op = bx.load_operand(place);
-                    place.storage_dead(bx);
-                    self.locals[index] = LocalRef::Operand(Some(op));
-                }
-                LocalRef::Operand(Some(op)) => {
-                    assert!(op.layout.is_zst(),
-                            "assigning to initialized SSAtemp");
-                }
-            }
-        } else {
-            let dst = self.codegen_place(bx, dst);
-            self.codegen_transmute_into(bx, src, dst);
-        }
-    }
-
-    fn codegen_transmute_into(
-        &mut self,
-        bx: &Bx,
-        src: &mir::Operand<'tcx>,
-        dst: PlaceRef<'tcx, Bx::Value>
-    ) {
-        let src = self.codegen_operand(bx, src);
-        let llty = bx.cx().backend_type(src.layout);
-        let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty));
-        let align = src.layout.align.min(dst.layout.align);
-        src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
-    }
-
-
-    // Stores the return value of a function call into it's final location.
-    fn store_return(
-        &mut self,
-        bx: &Bx,
-        dest: ReturnDest<'tcx, Bx::Value>,
-        ret_ty: &ArgType<'tcx, Ty<'tcx>>,
-        llval: Bx::Value
-    ) {
-        use self::ReturnDest::*;
-
-        match dest {
-            Nothing => (),
-            Store(dst) => bx.store_arg_ty(&ret_ty, llval, dst),
-            IndirectOperand(tmp, index) => {
-                let op = bx.load_operand(tmp);
-                tmp.storage_dead(bx);
-                self.locals[index] = LocalRef::Operand(Some(op));
-            }
-            DirectOperand(index) => {
-                // If there is a cast, we have to store and reload.
-                let op = if let PassMode::Cast(_) = ret_ty.mode {
-                    let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret");
-                    tmp.storage_live(bx);
-                    bx.store_arg_ty(&ret_ty, llval, tmp);
-                    let op = bx.load_operand(tmp);
-                    tmp.storage_dead(bx);
-                    op
-                } else {
-                    OperandRef::from_immediate_or_packed_pair(bx, llval, ret_ty.layout)
-                };
-                self.locals[index] = LocalRef::Operand(Some(op));
-            }
-        }
-    }
-}
-
-enum ReturnDest<'tcx, V> {
-    // Do nothing, the return value is indirect or ignored
-    Nothing,
-    // Store the return value to the pointer
-    Store(PlaceRef<'tcx, V>),
-    // Stores an indirect return value to an operand local place
-    IndirectOperand(PlaceRef<'tcx, V>, mir::Local),
-    // Stores a direct return value to an operand local place
-    DirectOperand(mir::Local)
-}
diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs
deleted file mode 100644
index a052473beec..00000000000
--- a/src/librustc_codegen_llvm/mir/constant.rs
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc::mir::interpret::{ErrorHandled, read_target_uint};
-use rustc_mir::const_eval::const_field;
-use rustc::hir::def_id::DefId;
-use rustc::mir;
-use rustc_data_structures::indexed_vec::Idx;
-use rustc::mir::interpret::{GlobalId, Pointer, Allocation, ConstValue};
-use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size};
-use common::CodegenCx;
-use syntax::source_map::Span;
-use value::Value;
-use interfaces::*;
-
-use super::FunctionCx;
-
-pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
-    let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1);
-    let dl = cx.data_layout();
-    let pointer_size = dl.pointer_size.bytes() as usize;
-
-    let mut next_offset = 0;
-    for &(offset, ((), alloc_id)) in alloc.relocations.iter() {
-        let offset = offset.bytes();
-        assert_eq!(offset as usize as u64, offset);
-        let offset = offset as usize;
-        if offset > next_offset {
-            llvals.push(cx.const_bytes(&alloc.bytes[next_offset..offset]));
-        }
-        let ptr_offset = read_target_uint(
-            dl.endian,
-            &alloc.bytes[offset..(offset + pointer_size)],
-        ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
-        llvals.push(cx.scalar_to_backend(
-            Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
-            &layout::Scalar {
-                value: layout::Primitive::Pointer,
-                valid_range: 0..=!0
-            },
-            cx.type_i8p()
-        ));
-        next_offset = offset + pointer_size;
-    }
-    if alloc.bytes.len() >= next_offset {
-        llvals.push(cx.const_bytes(&alloc.bytes[next_offset ..]));
-    }
-
-    cx.const_struct(&llvals, true)
-}
-
-pub fn codegen_static_initializer(
-    cx: &CodegenCx<'ll, 'tcx>,
-    def_id: DefId,
-) -> Result<(&'ll Value, &'tcx Allocation), ErrorHandled> {
-    let instance = ty::Instance::mono(cx.tcx, def_id);
-    let cid = GlobalId {
-        instance,
-        promoted: None,
-    };
-    let param_env = ty::ParamEnv::reveal_all();
-    let static_ = cx.tcx.const_eval(param_env.and(cid))?;
-
-    let alloc = match static_.val {
-        ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc,
-        _ => bug!("static const eval returned {:#?}", static_),
-    };
-    Ok((const_alloc_to_llvm(cx, alloc), alloc))
-}
-
-impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
-    fn fully_evaluate(
-        &mut self,
-        bx: &Bx,
-        constant: &'tcx ty::Const<'tcx>,
-    ) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> {
-        match constant.val {
-            ConstValue::Unevaluated(def_id, ref substs) => {
-                let tcx = bx.tcx();
-                let param_env = ty::ParamEnv::reveal_all();
-                let instance = ty::Instance::resolve(tcx, param_env, def_id, substs).unwrap();
-                let cid = GlobalId {
-                    instance,
-                    promoted: None,
-                };
-                tcx.const_eval(param_env.and(cid))
-            },
-            _ => Ok(constant),
-        }
-    }
-
-    pub fn eval_mir_constant(
-        &mut self,
-        bx: &Bx,
-        constant: &mir::Constant<'tcx>,
-    ) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> {
-        let c = self.monomorphize(&constant.literal);
-        self.fully_evaluate(bx, c)
-    }
-
-    /// process constant containing SIMD shuffle indices
-    pub fn simd_shuffle_indices(
-        &mut self,
-        bx: &Bx,
-        span: Span,
-        ty: Ty<'tcx>,
-        constant: Result<&'tcx ty::Const<'tcx>, ErrorHandled>,
-    ) -> (Bx::Value, Ty<'tcx>) {
-        constant
-            .and_then(|c| {
-                let field_ty = c.ty.builtin_index().unwrap();
-                let fields = match c.ty.sty {
-                    ty::Array(_, n) => n.unwrap_usize(bx.tcx()),
-                    ref other => bug!("invalid simd shuffle type: {}", other),
-                };
-                let values: Result<Vec<_>, ErrorHandled> = (0..fields).map(|field| {
-                    let field = const_field(
-                        bx.tcx(),
-                        ty::ParamEnv::reveal_all(),
-                        self.instance,
-                        None,
-                        mir::Field::new(field as usize),
-                        c,
-                    )?;
-                    if let Some(prim) = field.val.try_to_scalar() {
-                        let layout = bx.cx().layout_of(field_ty);
-                        let scalar = match layout.abi {
-                            layout::Abi::Scalar(ref x) => x,
-                            _ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
-                        };
-                        Ok(bx.cx().scalar_to_backend(
-                            prim, scalar,
-                            bx.cx().immediate_backend_type(layout),
-                        ))
-                    } else {
-                        bug!("simd shuffle field {:?}", field)
-                    }
-                }).collect();
-                let llval = bx.cx().const_struct(&values?, false);
-                Ok((llval, c.ty))
-            })
-            .unwrap_or_else(|_| {
-                bx.tcx().sess.span_err(
-                    span,
-                    "could not evaluate shuffle_indices at compile time",
-                );
-                // We've errored, so we don't have to produce working code.
-                let ty = self.monomorphize(&ty);
-                let llty = bx.cx().backend_type(bx.cx().layout_of(ty));
-                (bx.cx().const_undef(llty), ty)
-            })
-    }
-}
diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs
deleted file mode 100644
index c7e2a5d2af3..00000000000
--- a/src/librustc_codegen_llvm/mir/mod.rs
+++ /dev/null
@@ -1,682 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use libc::c_uint;
-use llvm;
-use llvm_util;
-use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts};
-use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt};
-use rustc::mir::{self, Mir};
-use rustc::ty::subst::Substs;
-use rustc::session::config::DebugInfo;
-use base;
-use debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext};
-use monomorphize::Instance;
-use abi::{FnType, PassMode};
-use interfaces::*;
-
-use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
-use syntax::symbol::keywords;
-
-use std::iter;
-
-use rustc_data_structures::bit_set::BitSet;
-use rustc_data_structures::indexed_vec::IndexVec;
-
-pub use self::constant::codegen_static_initializer;
-
-use self::analyze::CleanupKind;
-use self::place::PlaceRef;
-use rustc::mir::traversal;
-
-use self::operand::{OperandRef, OperandValue};
-
-/// Master context for codegenning from MIR.
-pub struct FunctionCx<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> {
-    instance: Instance<'tcx>,
-
-    mir: &'a mir::Mir<'tcx>,
-
-    debug_context: FunctionDebugContext<Bx::DIScope>,
-
-    llfn: Bx::Value,
-
-    cx: &'a Bx::CodegenCx,
-
-    fn_ty: FnType<'tcx, Ty<'tcx>>,
-
-    /// When unwinding is initiated, we have to store this personality
-    /// value somewhere so that we can load it and re-use it in the
-    /// resume instruction. The personality is (afaik) some kind of
-    /// value used for C++ unwinding, which must filter by type: we
-    /// don't really care about it very much. Anyway, this value
-    /// contains an alloca into which the personality is stored and
-    /// then later loaded when generating the DIVERGE_BLOCK.
-    personality_slot: Option<PlaceRef<'tcx, Bx::Value,>>,
-
-    /// A `Block` for each MIR `BasicBlock`
-    blocks: IndexVec<mir::BasicBlock, Bx::BasicBlock>,
-
-    /// The funclet status of each basic block
-    cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
-
-    /// When targeting MSVC, this stores the cleanup info for each funclet
-    /// BB. This is initialized as we compute the funclets' head block in RPO.
-    funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
-
-    /// This stores the landing-pad block for a given BB, computed lazily on GNU
-    /// and eagerly on MSVC.
-    landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
-
-    /// Cached unreachable block
-    unreachable_block: Option<Bx::BasicBlock>,
-
-    /// The location where each MIR arg/var/tmp/ret is stored. This is
-    /// usually an `PlaceRef` representing an alloca, but not always:
-    /// sometimes we can skip the alloca and just store the value
-    /// directly using an `OperandRef`, which makes for tighter LLVM
-    /// IR. The conditions for using an `OperandRef` are as follows:
-    ///
-    /// - the type of the local must be judged "immediate" by `is_llvm_immediate`
-    /// - the operand must never be referenced indirectly
-    ///     - we should not take its address using the `&` operator
-    ///     - nor should it appear in a place path like `tmp.a`
-    /// - the operand must be defined by an rvalue that can generate immediate
-    ///   values
-    ///
-    /// Avoiding allocs can also be important for certain intrinsics,
-    /// notably `expect`.
-    locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>,
-
-    /// Debug information for MIR scopes.
-    scopes: IndexVec<mir::SourceScope, debuginfo::MirDebugScope<Bx::DIScope>>,
-
-    /// If this function is being monomorphized, this contains the type substitutions used.
-    param_substs: &'tcx Substs<'tcx>,
-}
-
-impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
-    pub fn monomorphize<T>(&self, value: &T) -> T
-        where T: TypeFoldable<'tcx>
-    {
-        self.cx.tcx().subst_and_normalize_erasing_regions(
-            self.param_substs,
-            ty::ParamEnv::reveal_all(),
-            value,
-        )
-    }
-
-    pub fn set_debug_loc(
-        &mut self,
-        bx: &Bx,
-        source_info: mir::SourceInfo
-    ) {
-        let (scope, span) = self.debug_loc(source_info);
-        bx.set_source_location(&self.debug_context, scope, span);
-    }
-
-    pub fn debug_loc(&self, source_info: mir::SourceInfo) -> (Option<Bx::DIScope>, Span) {
-        // Bail out if debug info emission is not enabled.
-        match self.debug_context {
-            FunctionDebugContext::DebugInfoDisabled |
-            FunctionDebugContext::FunctionWithoutDebugInfo => {
-                return (self.scopes[source_info.scope].scope_metadata, source_info.span);
-            }
-            FunctionDebugContext::RegularContext(_) =>{}
-        }
-
-        // In order to have a good line stepping behavior in debugger, we overwrite debug
-        // locations of macro expansions with that of the outermost expansion site
-        // (unless the crate is being compiled with `-Z debug-macros`).
-        if source_info.span.ctxt() == NO_EXPANSION ||
-           self.cx.sess().opts.debugging_opts.debug_macros {
-            let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo());
-            (scope, source_info.span)
-        } else {
-            // Walk up the macro expansion chain until we reach a non-expanded span.
-            // We also stop at the function body level because no line stepping can occur
-            // at the level above that.
-            let mut span = source_info.span;
-            while span.ctxt() != NO_EXPANSION && span.ctxt() != self.mir.span.ctxt() {
-                if let Some(info) = span.ctxt().outer().expn_info() {
-                    span = info.call_site;
-                } else {
-                    break;
-                }
-            }
-            let scope = self.scope_metadata_for_loc(source_info.scope, span.lo());
-            // Use span of the outermost expansion site, while keeping the original lexical scope.
-            (scope, span)
-        }
-    }
-
-    // DILocations inherit source file name from the parent DIScope.  Due to macro expansions
-    // it may so happen that the current span belongs to a different file than the DIScope
-    // corresponding to span's containing source scope.  If so, we need to create a DIScope
-    // "extension" into that file.
-    fn scope_metadata_for_loc(&self, scope_id: mir::SourceScope, pos: BytePos)
-                              -> Option<Bx::DIScope> {
-        let scope_metadata = self.scopes[scope_id].scope_metadata;
-        if pos < self.scopes[scope_id].file_start_pos ||
-           pos >= self.scopes[scope_id].file_end_pos {
-            let sm = self.cx.sess().source_map();
-            let defining_crate = self.debug_context.get_ref(DUMMY_SP).defining_crate;
-            Some(self.cx.extend_scope_to_file(
-                scope_metadata.unwrap(),
-                &sm.lookup_char_pos(pos).file,
-                defining_crate,
-            ))
-        } else {
-            scope_metadata
-        }
-    }
-}
-
-enum LocalRef<'tcx, V> {
-    Place(PlaceRef<'tcx, V>),
-    /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place).
-    /// `*p` is the fat pointer that references the actual unsized place.
-    /// Every time it is initialized, we have to reallocate the place
-    /// and update the fat pointer. That's the reason why it is indirect.
-    UnsizedPlace(PlaceRef<'tcx, V>),
-    Operand(Option<OperandRef<'tcx, V>>),
-}
-
-impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> {
-    fn new_operand<Cx: CodegenMethods<'tcx, Value = V>>(
-        cx: &Cx,
-        layout: TyLayout<'tcx>,
-    ) -> LocalRef<'tcx, V> {
-        if layout.is_zst() {
-            // Zero-size temporaries aren't always initialized, which
-            // doesn't matter because they don't contain data, but
-            // we need something in the operand.
-            LocalRef::Operand(Some(OperandRef::new_zst(cx, layout)))
-        } else {
-            LocalRef::Operand(None)
-        }
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-
-pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    cx: &'a Bx::CodegenCx,
-    llfn: Bx::Value,
-    mir: &'a Mir<'tcx>,
-    instance: Instance<'tcx>,
-    sig: ty::FnSig<'tcx>,
-) {
-    let fn_ty = cx.new_fn_type(sig, &[]);
-    debug!("fn_ty: {:?}", fn_ty);
-    let debug_context =
-        cx.create_function_debug_context(instance, sig, llfn, mir);
-    let bx = Bx::new_block(cx, llfn, "start");
-
-    if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
-        bx.set_personality_fn(cx.eh_personality());
-    }
-
-    let cleanup_kinds = analyze::cleanup_kinds(&mir);
-    // Allocate a `Block` for every basic block, except
-    // the start block, if nothing loops back to it.
-    let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty();
-    let block_bxs: IndexVec<mir::BasicBlock, Bx::BasicBlock> =
-        mir.basic_blocks().indices().map(|bb| {
-            if bb == mir::START_BLOCK && !reentrant_start_block {
-                bx.llbb()
-            } else {
-                bx.build_sibling_block(&format!("{:?}", bb)).llbb()
-            }
-        }).collect();
-
-    // Compute debuginfo scopes from MIR scopes.
-    let scopes = cx.create_mir_scopes(mir, &debug_context);
-    let (landing_pads, funclets) = create_funclets(mir, &bx, &cleanup_kinds, &block_bxs);
-
-    let mut fx = FunctionCx {
-        instance,
-        mir,
-        llfn,
-        fn_ty,
-        cx,
-        personality_slot: None,
-        blocks: block_bxs,
-        unreachable_block: None,
-        cleanup_kinds,
-        landing_pads,
-        funclets,
-        scopes,
-        locals: IndexVec::new(),
-        debug_context,
-        param_substs: {
-            assert!(!instance.substs.needs_infer());
-            instance.substs
-        },
-    };
-
-    let memory_locals = analyze::non_ssa_locals(&fx);
-
-    // Allocate variable and temp allocas
-    fx.locals = {
-        let args = arg_local_refs(&bx, &fx, &fx.scopes, &memory_locals);
-
-        let allocate_local = |local| {
-            let decl = &mir.local_decls[local];
-            let layout = bx.cx().layout_of(fx.monomorphize(&decl.ty));
-            assert!(!layout.ty.has_erasable_regions());
-
-            if let Some(name) = decl.name {
-                // User variable
-                let debug_scope = fx.scopes[decl.visibility_scope];
-                let dbg = debug_scope.is_valid() &&
-                    bx.cx().sess().opts.debuginfo == DebugInfo::Full;
-
-                if !memory_locals.contains(local) && !dbg {
-                    debug!("alloc: {:?} ({}) -> operand", local, name);
-                    return LocalRef::new_operand(bx.cx(), layout);
-                }
-
-                debug!("alloc: {:?} ({}) -> place", local, name);
-                if layout.is_unsized() {
-                    let indirect_place =
-                        PlaceRef::alloca_unsized_indirect(&bx, layout, &name.as_str());
-                    // FIXME: add an appropriate debuginfo
-                    LocalRef::UnsizedPlace(indirect_place)
-                } else {
-                    let place = PlaceRef::alloca(&bx, layout, &name.as_str());
-                    if dbg {
-                        let (scope, span) = fx.debug_loc(mir::SourceInfo {
-                            span: decl.source_info.span,
-                            scope: decl.visibility_scope,
-                        });
-                        bx.declare_local(&fx.debug_context, name, layout.ty, scope.unwrap(),
-                            VariableAccess::DirectVariable { alloca: place.llval },
-                            VariableKind::LocalVariable, span);
-                    }
-                    LocalRef::Place(place)
-                }
-            } else {
-                // Temporary or return place
-                if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() {
-                    debug!("alloc: {:?} (return place) -> place", local);
-                    let llretptr = fx.cx.get_param(llfn, 0);
-                    LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align))
-                } else if memory_locals.contains(local) {
-                    debug!("alloc: {:?} -> place", local);
-                    if layout.is_unsized() {
-                        let indirect_place =
-                            PlaceRef::alloca_unsized_indirect(&bx, layout, &format!("{:?}", local));
-                        LocalRef::UnsizedPlace(indirect_place)
-                    } else {
-                        LocalRef::Place(PlaceRef::alloca(&bx, layout, &format!("{:?}", local)))
-                    }
-                } else {
-                    // If this is an immediate local, we do not create an
-                    // alloca in advance. Instead we wait until we see the
-                    // definition and update the operand there.
-                    debug!("alloc: {:?} -> operand", local);
-                    LocalRef::new_operand(bx.cx(), layout)
-                }
-            }
-        };
-
-        let retptr = allocate_local(mir::RETURN_PLACE);
-        iter::once(retptr)
-            .chain(args.into_iter())
-            .chain(mir.vars_and_temps_iter().map(allocate_local))
-            .collect()
-    };
-
-    // Branch to the START block, if it's not the entry block.
-    if reentrant_start_block {
-        bx.br(fx.blocks[mir::START_BLOCK]);
-    }
-
-    // Up until here, IR instructions for this function have explicitly not been annotated with
-    // source code location, so we don't step into call setup code. From here on, source location
-    // emitting should be enabled.
-    debuginfo::start_emitting_source_locations(&fx.debug_context);
-
-    let rpo = traversal::reverse_postorder(&mir);
-    let mut visited = BitSet::new_empty(mir.basic_blocks().len());
-
-    // Codegen the body of each block using reverse postorder
-    for (bb, _) in rpo {
-        visited.insert(bb.index());
-        fx.codegen_block(bb);
-    }
-
-    // Remove blocks that haven't been visited, or have no
-    // predecessors.
-    for bb in mir.basic_blocks().indices() {
-        // Unreachable block
-        if !visited.contains(bb.index()) {
-            debug!("codegen_mir: block {:?} was not visited", bb);
-            bx.delete_basic_block(fx.blocks[bb]);
-        }
-    }
-}
-
-fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    mir: &'a Mir<'tcx>,
-    bx: &Bx,
-    cleanup_kinds: &IndexVec<mir::BasicBlock, CleanupKind>,
-    block_bxs: &IndexVec<mir::BasicBlock, Bx::BasicBlock>)
-    -> (IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
-        IndexVec<mir::BasicBlock, Option<Bx::Funclet>>)
-{
-    block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| {
-        match *cleanup_kind {
-            CleanupKind::Funclet if base::wants_msvc_seh(bx.cx().sess()) => {}
-            _ => return (None, None)
-        }
-
-        let funclet;
-        let ret_llbb;
-        match mir[bb].terminator.as_ref().map(|t| &t.kind) {
-            // This is a basic block that we're aborting the program for,
-            // notably in an `extern` function. These basic blocks are inserted
-            // so that we assert that `extern` functions do indeed not panic,
-            // and if they do we abort the process.
-            //
-            // On MSVC these are tricky though (where we're doing funclets). If
-            // we were to do a cleanuppad (like below) the normal functions like
-            // `longjmp` would trigger the abort logic, terminating the
-            // program. Instead we insert the equivalent of `catch(...)` for C++
-            // which magically doesn't trigger when `longjmp` files over this
-            // frame.
-            //
-            // Lots more discussion can be found on #48251 but this codegen is
-            // modeled after clang's for:
-            //
-            //      try {
-            //          foo();
-            //      } catch (...) {
-            //          bar();
-            //      }
-            Some(&mir::TerminatorKind::Abort) => {
-                let cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb));
-                let cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb));
-                ret_llbb = cs_bx.llbb();
-
-                let cs = cs_bx.catch_switch(None, None, 1);
-                cs_bx.add_handler(cs, cp_bx.llbb());
-
-                // The "null" here is actually a RTTI type descriptor for the
-                // C++ personality function, but `catch (...)` has no type so
-                // it's null. The 64 here is actually a bitfield which
-                // represents that this is a catch-all block.
-                let null = bx.cx().const_null(bx.cx().type_i8p());
-                let sixty_four = bx.cx().const_i32(64);
-                funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
-                cp_bx.br(llbb);
-            }
-            _ => {
-                let cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb));
-                ret_llbb = cleanup_bx.llbb();
-                funclet = cleanup_bx.cleanup_pad(None, &[]);
-                cleanup_bx.br(llbb);
-            }
-        };
-
-        (Some(ret_llbb), Some(funclet))
-    }).unzip()
-}
-
-/// Produce, for each argument, a `Value` pointing at the
-/// argument's value. As arguments are places, these are always
-/// indirect.
-fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    fx: &FunctionCx<'a, 'tcx, Bx>,
-    scopes: &IndexVec<
-        mir::SourceScope,
-        debuginfo::MirDebugScope<Bx::DIScope>
-    >,
-    memory_locals: &BitSet<mir::Local>,
-) -> Vec<LocalRef<'tcx, Bx::Value>> {
-    let mir = fx.mir;
-    let tcx = bx.tcx();
-    let mut idx = 0;
-    let mut llarg_idx = fx.fn_ty.ret.is_indirect() as usize;
-
-    // Get the argument scope, if it exists and if we need it.
-    let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE];
-    let arg_scope = if bx.cx().sess().opts.debuginfo == DebugInfo::Full {
-        arg_scope.scope_metadata
-    } else {
-        None
-    };
-
-    mir.args_iter().enumerate().map(|(arg_index, local)| {
-        let arg_decl = &mir.local_decls[local];
-
-        let name = if let Some(name) = arg_decl.name {
-            name.as_str().to_string()
-        } else {
-            format!("arg{}", arg_index)
-        };
-
-        if Some(local) == mir.spread_arg {
-            // This argument (e.g. the last argument in the "rust-call" ABI)
-            // is a tuple that was spread at the ABI level and now we have
-            // to reconstruct it into a tuple local variable, from multiple
-            // individual LLVM function arguments.
-
-            let arg_ty = fx.monomorphize(&arg_decl.ty);
-            let tupled_arg_tys = match arg_ty.sty {
-                ty::Tuple(ref tys) => tys,
-                _ => bug!("spread argument isn't a tuple?!")
-            };
-
-            let place = PlaceRef::alloca(bx, bx.cx().layout_of(arg_ty), &name);
-            for i in 0..tupled_arg_tys.len() {
-                let arg = &fx.fn_ty.args[idx];
-                idx += 1;
-                if arg.pad.is_some() {
-                    llarg_idx += 1;
-                }
-                bx.store_fn_arg(arg, &mut llarg_idx, place.project_field(bx, i));
-            }
-
-            // Now that we have one alloca that contains the aggregate value,
-            // we can create one debuginfo entry for the argument.
-            arg_scope.map(|scope| {
-                let variable_access = VariableAccess::DirectVariable {
-                    alloca: place.llval
-                };
-                bx.declare_local(
-                    &fx.debug_context,
-                    arg_decl.name.unwrap_or(keywords::Invalid.name()),
-                    arg_ty, scope,
-                    variable_access,
-                    VariableKind::ArgumentVariable(arg_index + 1),
-                    DUMMY_SP
-                );
-            });
-
-            return LocalRef::Place(place);
-        }
-
-        let arg = &fx.fn_ty.args[idx];
-        idx += 1;
-        if arg.pad.is_some() {
-            llarg_idx += 1;
-        }
-
-        if arg_scope.is_none() && !memory_locals.contains(local) {
-            // We don't have to cast or keep the argument in the alloca.
-            // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
-            // of putting everything in allocas just so we can use llvm.dbg.declare.
-            let local = |op| LocalRef::Operand(Some(op));
-            match arg.mode {
-                PassMode::Ignore => {
-                    return local(OperandRef::new_zst(bx.cx(), arg.layout));
-                }
-                PassMode::Direct(_) => {
-                    let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
-                    bx.set_value_name(llarg, &name);
-                    llarg_idx += 1;
-                    return local(
-                        OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout));
-                }
-                PassMode::Pair(..) => {
-                    let a = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
-                    bx.set_value_name(a, &(name.clone() + ".0"));
-                    llarg_idx += 1;
-
-                    let b = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
-                    bx.set_value_name(b, &(name + ".1"));
-                    llarg_idx += 1;
-
-                    return local(OperandRef {
-                        val: OperandValue::Pair(a, b),
-                        layout: arg.layout
-                    });
-                }
-                _ => {}
-            }
-        }
-
-        let place = if arg.is_sized_indirect() {
-            // Don't copy an indirect argument to an alloca, the caller
-            // already put it in a temporary alloca and gave it up.
-            // FIXME: lifetimes
-            let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
-            bx.set_value_name(llarg, &name);
-            llarg_idx += 1;
-            PlaceRef::new_sized(llarg, arg.layout, arg.layout.align)
-        } else if arg.is_unsized_indirect() {
-            // As the storage for the indirect argument lives during
-            // the whole function call, we just copy the fat pointer.
-            let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
-            llarg_idx += 1;
-            let llextra = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
-            llarg_idx += 1;
-            let indirect_operand = OperandValue::Pair(llarg, llextra);
-
-            let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout, &name);
-            indirect_operand.store(bx, tmp);
-            tmp
-        } else {
-            let tmp = PlaceRef::alloca(bx, arg.layout, &name);
-            bx.store_fn_arg(arg, &mut llarg_idx, tmp);
-            tmp
-        };
-        arg_scope.map(|scope| {
-            // Is this a regular argument?
-            if arg_index > 0 || mir.upvar_decls.is_empty() {
-                // The Rust ABI passes indirect variables using a pointer and a manual copy, so we
-                // need to insert a deref here, but the C ABI uses a pointer and a copy using the
-                // byval attribute, for which LLVM always does the deref itself,
-                // so we must not add it.
-                let variable_access = VariableAccess::DirectVariable {
-                    alloca: place.llval
-                };
-
-                bx.declare_local(
-                    &fx.debug_context,
-                    arg_decl.name.unwrap_or(keywords::Invalid.name()),
-                    arg.layout.ty,
-                    scope,
-                    variable_access,
-                    VariableKind::ArgumentVariable(arg_index + 1),
-                    DUMMY_SP
-                );
-                return;
-            }
-
-            // Or is it the closure environment?
-            let (closure_layout, env_ref) = match arg.layout.ty.sty {
-                ty::RawPtr(ty::TypeAndMut { ty, .. }) |
-                ty::Ref(_, ty, _)  => (bx.cx().layout_of(ty), true),
-                _ => (arg.layout, false)
-            };
-
-            let (def_id, upvar_substs) = match closure_layout.ty.sty {
-                ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
-                ty::Generator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
-                _ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_layout.ty)
-            };
-            let upvar_tys = upvar_substs.upvar_tys(def_id, tcx);
-
-            // Store the pointer to closure data in an alloca for debuginfo
-            // because that's what the llvm.dbg.declare intrinsic expects.
-
-            // FIXME(eddyb) this shouldn't be necessary but SROA seems to
-            // mishandle DW_OP_plus not preceded by DW_OP_deref, i.e. it
-            // doesn't actually strip the offset when splitting the closure
-            // environment into its components so it ends up out of bounds.
-            // (cuviper) It seems to be fine without the alloca on LLVM 6 and later.
-            let env_alloca = !env_ref && llvm_util::get_major_version() < 6;
-            let env_ptr = if env_alloca {
-                let scratch = PlaceRef::alloca(bx,
-                    bx.cx().layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
-                    "__debuginfo_env_ptr");
-                bx.store(place.llval, scratch.llval, scratch.align);
-                scratch.llval
-            } else {
-                place.llval
-            };
-
-            for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() {
-                let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes();
-
-                let ops = unsafe {
-                    [llvm::LLVMRustDIBuilderCreateOpDeref(),
-                     llvm::LLVMRustDIBuilderCreateOpPlusUconst(),
-                     byte_offset_of_var_in_env as i64,
-                     llvm::LLVMRustDIBuilderCreateOpDeref()]
-                };
-
-                // The environment and the capture can each be indirect.
-
-                // FIXME(eddyb) see above why we sometimes have to keep
-                // a pointer in an alloca for debuginfo atm.
-                let mut ops = if env_ref || env_alloca { &ops[..] } else { &ops[1..] };
-
-                let ty = if let (true, &ty::Ref(_, ty, _)) = (decl.by_ref, &ty.sty) {
-                    ty
-                } else {
-                    ops = &ops[..ops.len() - 1];
-                    ty
-                };
-
-                let variable_access = VariableAccess::IndirectVariable {
-                    alloca: env_ptr,
-                    address_operations: &ops
-                };
-                bx.declare_local(
-                    &fx.debug_context,
-                    decl.debug_name,
-                    ty,
-                    scope,
-                    variable_access,
-                    VariableKind::LocalVariable,
-                    DUMMY_SP
-                );
-            }
-        });
-        if arg.is_unsized_indirect() {
-            LocalRef::UnsizedPlace(place)
-        } else {
-            LocalRef::Place(place)
-        }
-    }).collect()
-}
-
-mod analyze;
-mod block;
-pub mod constant;
-pub mod place;
-pub mod operand;
-mod rvalue;
-mod statement;
diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs
deleted file mode 100644
index f2f60315728..00000000000
--- a/src/librustc_codegen_llvm/mir/operand.rs
+++ /dev/null
@@ -1,474 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc::mir::interpret::{ConstValue, ErrorHandled};
-use rustc::mir;
-use rustc::ty;
-use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
-
-use base;
-use builder::MemFlags;
-use glue;
-
-use interfaces::*;
-
-use std::fmt;
-
-use super::{FunctionCx, LocalRef};
-use super::place::PlaceRef;
-
-/// The representation of a Rust value. The enum variant is in fact
-/// uniquely determined by the value's type, but is kept as a
-/// safety check.
-#[derive(Copy, Clone, Debug)]
-pub enum OperandValue<V> {
-    /// A reference to the actual operand. The data is guaranteed
-    /// to be valid for the operand's lifetime.
-    /// The second value, if any, is the extra data (vtable or length)
-    /// which indicates that it refers to an unsized rvalue.
-    Ref(V, Option<V>, Align),
-    /// A single LLVM value.
-    Immediate(V),
-    /// A pair of immediate LLVM values. Used by fat pointers too.
-    Pair(V, V)
-}
-
-/// An `OperandRef` is an "SSA" reference to a Rust value, along with
-/// its type.
-///
-/// NOTE: unless you know a value's type exactly, you should not
-/// generate LLVM opcodes acting on it and instead act via methods,
-/// to avoid nasty edge cases. In particular, using `Builder::store`
-/// directly is sure to cause problems -- use `OperandRef::store`
-/// instead.
-#[derive(Copy, Clone)]
-pub struct OperandRef<'tcx, V> {
-    // The value.
-    pub val: OperandValue<V>,
-
-    // The layout of value, based on its Rust type.
-    pub layout: TyLayout<'tcx>,
-}
-
-impl<V: CodegenObject> fmt::Debug for OperandRef<'tcx, V> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
-    }
-}
-
-impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
-    pub fn new_zst<Cx: CodegenMethods<'tcx, Value = V>>(
-        cx: &Cx,
-        layout: TyLayout<'tcx>
-    ) -> OperandRef<'tcx, V> {
-        assert!(layout.is_zst());
-        OperandRef {
-            val: OperandValue::Immediate(cx.const_undef(cx.immediate_backend_type(layout))),
-            layout
-        }
-    }
-
-    pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        bx: &Bx,
-        val: &'tcx ty::Const<'tcx>
-    ) -> Result<Self, ErrorHandled> {
-        let layout = bx.cx().layout_of(val.ty);
-
-        if layout.is_zst() {
-            return Ok(OperandRef::new_zst(bx.cx(), layout));
-        }
-
-        let val = match val.val {
-            ConstValue::Unevaluated(..) => bug!(),
-            ConstValue::Scalar(x) => {
-                let scalar = match layout.abi {
-                    layout::Abi::Scalar(ref x) => x,
-                    _ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
-                };
-                let llval = bx.cx().scalar_to_backend(
-                    x,
-                    scalar,
-                    bx.cx().immediate_backend_type(layout),
-                );
-                OperandValue::Immediate(llval)
-            },
-            ConstValue::ScalarPair(a, b) => {
-                let (a_scalar, b_scalar) = match layout.abi {
-                    layout::Abi::ScalarPair(ref a, ref b) => (a, b),
-                    _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout)
-                };
-                let a_llval = bx.cx().scalar_to_backend(
-                    a,
-                    a_scalar,
-                    bx.cx().scalar_pair_element_backend_type(layout, 0, true),
-                );
-                let b_llval = bx.cx().scalar_to_backend(
-                    b,
-                    b_scalar,
-                    bx.cx().scalar_pair_element_backend_type(layout, 1, true),
-                );
-                OperandValue::Pair(a_llval, b_llval)
-            },
-            ConstValue::ByRef(_, alloc, offset) => {
-                return Ok(bx.load_operand(bx.cx().from_const_alloc(layout, alloc, offset)));
-            },
-        };
-
-        Ok(OperandRef {
-            val,
-            layout
-        })
-    }
-
-    /// Asserts that this operand refers to a scalar and returns
-    /// a reference to its value.
-    pub fn immediate(self) -> V {
-        match self.val {
-            OperandValue::Immediate(s) => s,
-            _ => bug!("not immediate: {:?}", self)
-        }
-    }
-
-    pub fn deref<Cx: CodegenMethods<'tcx, Value = V>>(
-        self,
-        cx: &Cx
-    ) -> PlaceRef<'tcx, V> {
-        let projected_ty = self.layout.ty.builtin_deref(true)
-            .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty;
-        let (llptr, llextra) = match self.val {
-            OperandValue::Immediate(llptr) => (llptr, None),
-            OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)),
-            OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self)
-        };
-        let layout = cx.layout_of(projected_ty);
-        PlaceRef {
-            llval: llptr,
-            llextra,
-            layout,
-            align: layout.align,
-        }
-    }
-
-    /// If this operand is a `Pair`, we return an aggregate with the two values.
-    /// For other cases, see `immediate`.
-    pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        self,
-        bx: &Bx
-    ) -> V {
-        if let OperandValue::Pair(a, b) = self.val {
-            let llty = bx.cx().backend_type(self.layout);
-            debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}",
-                   self, llty);
-            // Reconstruct the immediate aggregate.
-            let mut llpair = bx.cx().const_undef(llty);
-            llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0);
-            llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1);
-            llpair
-        } else {
-            self.immediate()
-        }
-    }
-
-    /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
-    pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        bx: &Bx,
-        llval: V,
-        layout: TyLayout<'tcx>
-    ) -> Self {
-        let val = if let layout::Abi::ScalarPair(ref a, ref b) = layout.abi {
-            debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}",
-                    llval, layout);
-
-            // Deconstruct the immediate aggregate.
-            let a_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 0), a);
-            let b_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 1), b);
-            OperandValue::Pair(a_llval, b_llval)
-        } else {
-            OperandValue::Immediate(llval)
-        };
-        OperandRef { val, layout }
-    }
-
-    pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        &self,
-        bx: &Bx,
-        i: usize
-    ) -> Self {
-        let field = self.layout.field(bx.cx(), i);
-        let offset = self.layout.fields.offset(i);
-
-        let mut val = match (self.val, &self.layout.abi) {
-            // If the field is ZST, it has no data.
-            _ if field.is_zst() => {
-                return OperandRef::new_zst(bx.cx(), field);
-            }
-
-            // Newtype of a scalar, scalar pair or vector.
-            (OperandValue::Immediate(_), _) |
-            (OperandValue::Pair(..), _) if field.size == self.layout.size => {
-                assert_eq!(offset.bytes(), 0);
-                self.val
-            }
-
-            // Extract a scalar component from a pair.
-            (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => {
-                if offset.bytes() == 0 {
-                    assert_eq!(field.size, a.value.size(bx.cx()));
-                    OperandValue::Immediate(a_llval)
-                } else {
-                    assert_eq!(offset, a.value.size(bx.cx())
-                        .abi_align(b.value.align(bx.cx())));
-                    assert_eq!(field.size, b.value.size(bx.cx()));
-                    OperandValue::Immediate(b_llval)
-                }
-            }
-
-            // `#[repr(simd)]` types are also immediate.
-            (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => {
-                OperandValue::Immediate(
-                    bx.extract_element(llval, bx.cx().const_usize(i as u64)))
-            }
-
-            _ => bug!("OperandRef::extract_field({:?}): not applicable", self)
-        };
-
-        // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-        match val {
-            OperandValue::Immediate(ref mut llval) => {
-                *llval = bx.bitcast(*llval, bx.cx().immediate_backend_type(field));
-            }
-            OperandValue::Pair(ref mut a, ref mut b) => {
-                *a = bx.bitcast(*a, bx.cx().scalar_pair_element_backend_type(field, 0, true));
-                *b = bx.bitcast(*b, bx.cx().scalar_pair_element_backend_type(field, 1, true));
-            }
-            OperandValue::Ref(..) => bug!()
-        }
-
-        OperandRef {
-            val,
-            layout: field
-        }
-    }
-}
-
-impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
-    pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        self,
-        bx: &Bx,
-        dest: PlaceRef<'tcx, V>
-    ) {
-        self.store_with_flags(bx, dest, MemFlags::empty());
-    }
-
-    pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        self,
-        bx: &Bx,
-        dest: PlaceRef<'tcx, V>
-    ) {
-        self.store_with_flags(bx, dest, MemFlags::VOLATILE);
-    }
-
-    pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        self,
-        bx: &Bx,
-        dest: PlaceRef<'tcx, V>,
-    ) {
-        self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
-    }
-
-    pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        self,
-        bx: &Bx,
-        dest: PlaceRef<'tcx, V>
-    ) {
-        self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
-    }
-
-    fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        self,
-        bx: &Bx,
-        dest: PlaceRef<'tcx, V>,
-        flags: MemFlags,
-    ) {
-        debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
-        // Avoid generating stores of zero-sized values, because the only way to have a zero-sized
-        // value is through `undef`, and store itself is useless.
-        if dest.layout.is_zst() {
-            return;
-        }
-        match self {
-            OperandValue::Ref(r, None, source_align) => {
-                base::memcpy_ty(bx, dest.llval, dest.align, r, source_align,
-                                dest.layout, flags)
-            }
-            OperandValue::Ref(_, Some(_), _) => {
-                bug!("cannot directly store unsized values");
-            }
-            OperandValue::Immediate(s) => {
-                let val = base::from_immediate(bx, s);
-                bx.store_with_flags(val, dest.llval, dest.align, flags);
-            }
-            OperandValue::Pair(a, b) => {
-                for (i, &x) in [a, b].iter().enumerate() {
-                    let llptr = bx.struct_gep(dest.llval, i as u64);
-                    let val = base::from_immediate(bx, x);
-                    bx.store_with_flags(val, llptr, dest.align, flags);
-                }
-            }
-        }
-    }
-    pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        self,
-        bx: &Bx,
-        indirect_dest: PlaceRef<'tcx, V>
-    ) {
-        debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
-        let flags = MemFlags::empty();
-
-        // `indirect_dest` must have `*mut T` type. We extract `T` out of it.
-        let unsized_ty = indirect_dest.layout.ty.builtin_deref(true)
-            .unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest)).ty;
-
-        let (llptr, llextra) =
-            if let OperandValue::Ref(llptr, Some(llextra), _) = self {
-                (llptr, llextra)
-            } else {
-                bug!("store_unsized called with a sized value")
-            };
-
-        // FIXME: choose an appropriate alignment, or use dynamic align somehow
-        let max_align = Align::from_bits(128, 128).unwrap();
-        let min_align = Align::from_bits(8, 8).unwrap();
-
-        // Allocate an appropriate region on the stack, and copy the value into it
-        let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
-        let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align);
-        bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags);
-
-        // Store the allocated region and the extra to the indirect place.
-        let indirect_operand = OperandValue::Pair(lldst, llextra);
-        indirect_operand.store(bx, indirect_dest);
-    }
-}
-
-impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
-    fn maybe_codegen_consume_direct(
-        &mut self,
-        bx: &Bx,
-        place: &mir::Place<'tcx>
-    ) -> Option<OperandRef<'tcx, Bx::Value>> {
-        debug!("maybe_codegen_consume_direct(place={:?})", place);
-
-        // watch out for locals that do not have an
-        // alloca; they are handled somewhat differently
-        if let mir::Place::Local(index) = *place {
-            match self.locals[index] {
-                LocalRef::Operand(Some(o)) => {
-                    return Some(o);
-                }
-                LocalRef::Operand(None) => {
-                    bug!("use of {:?} before def", place);
-                }
-                LocalRef::Place(..) | LocalRef::UnsizedPlace(..) => {
-                    // use path below
-                }
-            }
-        }
-
-        // Moves out of scalar and scalar pair fields are trivial.
-        if let &mir::Place::Projection(ref proj) = place {
-            if let Some(o) = self.maybe_codegen_consume_direct(bx, &proj.base) {
-                match proj.elem {
-                    mir::ProjectionElem::Field(ref f, _) => {
-                        return Some(o.extract_field(bx, f.index()));
-                    }
-                    mir::ProjectionElem::Index(_) |
-                    mir::ProjectionElem::ConstantIndex { .. } => {
-                        // ZSTs don't require any actual memory access.
-                        // FIXME(eddyb) deduplicate this with the identical
-                        // checks in `codegen_consume` and `extract_field`.
-                        let elem = o.layout.field(bx.cx(), 0);
-                        if elem.is_zst() {
-                            return Some(OperandRef::new_zst(bx.cx(), elem));
-                        }
-                    }
-                    _ => {}
-                }
-            }
-        }
-
-        None
-    }
-
-    pub fn codegen_consume(
-        &mut self,
-        bx: &Bx,
-        place: &mir::Place<'tcx>
-    ) -> OperandRef<'tcx, Bx::Value> {
-        debug!("codegen_consume(place={:?})", place);
-
-        let ty = self.monomorphized_place_ty(place);
-        let layout = bx.cx().layout_of(ty);
-
-        // ZSTs don't require any actual memory access.
-        if layout.is_zst() {
-            return OperandRef::new_zst(bx.cx(), layout);
-        }
-
-        if let Some(o) = self.maybe_codegen_consume_direct(bx, place) {
-            return o;
-        }
-
-        // for most places, to consume them we just load them
-        // out from their home
-        bx.load_operand(self.codegen_place(bx, place))
-    }
-
-    pub fn codegen_operand(
-        &mut self,
-        bx: &Bx,
-        operand: &mir::Operand<'tcx>
-    ) -> OperandRef<'tcx, Bx::Value> {
-        debug!("codegen_operand(operand={:?})", operand);
-
-        match *operand {
-            mir::Operand::Copy(ref place) |
-            mir::Operand::Move(ref place) => {
-                self.codegen_consume(bx, place)
-            }
-
-            mir::Operand::Constant(ref constant) => {
-                let ty = self.monomorphize(&constant.ty);
-                self.eval_mir_constant(bx, constant)
-                    .and_then(|c| OperandRef::from_const(bx, c))
-                    .unwrap_or_else(|err| {
-                        match err {
-                            // errored or at least linted
-                            ErrorHandled::Reported => {},
-                            ErrorHandled::TooGeneric => {
-                                bug!("codgen encountered polymorphic constant")
-                            },
-                        }
-                        // Allow RalfJ to sleep soundly knowing that even refactorings that remove
-                        // the above error (or silence it under some conditions) will not cause UB
-                        let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
-                        bx.call(fnname, &[], None);
-                        // We've errored, so we don't have to produce working code.
-                        let layout = bx.cx().layout_of(ty);
-                        bx.load_operand(PlaceRef::new_sized(
-                            bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))),
-                            layout,
-                            layout.align,
-                        ))
-                    })
-            }
-        }
-    }
-}
diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs
deleted file mode 100644
index 8fa35d3aaf2..00000000000
--- a/src/librustc_codegen_llvm/mir/place.rs
+++ /dev/null
@@ -1,497 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
-use rustc::mir;
-use rustc::mir::tcx::PlaceTy;
-use builder::MemFlags;
-use rustc_codegen_ssa::common::IntPredicate;
-use type_of::LayoutLlvmExt;
-use glue;
-
-use interfaces::*;
-
-use super::{FunctionCx, LocalRef};
-use super::operand::OperandValue;
-
-#[derive(Copy, Clone, Debug)]
-pub struct PlaceRef<'tcx, V> {
-    /// Pointer to the contents of the place
-    pub llval: V,
-
-    /// This place's extra data if it is unsized, or null
-    pub llextra: Option<V>,
-
-    /// Monomorphized type of this place, including variant information
-    pub layout: TyLayout<'tcx>,
-
-    /// What alignment we know for this place
-    pub align: Align,
-}
-
-impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
-    pub fn new_sized(
-        llval: V,
-        layout: TyLayout<'tcx>,
-        align: Align,
-    ) -> PlaceRef<'tcx, V> {
-        assert!(!layout.is_unsized());
-        PlaceRef {
-            llval,
-            llextra: None,
-            layout,
-            align
-        }
-    }
-
-    pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        bx: &Bx,
-        layout: TyLayout<'tcx>,
-        name: &str
-    ) -> Self {
-        debug!("alloca({:?}: {:?})", name, layout);
-        assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
-        let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align);
-        Self::new_sized(tmp, layout, layout.align)
-    }
-
-    /// Returns a place for an indirect reference to an unsized place.
-    pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        bx: &Bx,
-        layout: TyLayout<'tcx>,
-        name: &str,
-    ) -> Self {
-        debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
-        assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
-        let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
-        let ptr_layout = bx.cx().layout_of(ptr_ty);
-        Self::alloca(bx, ptr_layout, name)
-    }
-
-    pub fn len<Cx: CodegenMethods<'tcx, Value = V>>(
-        &self,
-        cx: &Cx
-    ) -> V {
-        if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
-            if self.layout.is_unsized() {
-                assert_eq!(count, 0);
-                self.llextra.unwrap()
-            } else {
-                cx.const_usize(count)
-            }
-        } else {
-            bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
-        }
-    }
-
-}
-
-impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
-    /// Access a field, at a point when the value's case is known.
-    pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        self, bx: &Bx,
-        ix: usize,
-    ) -> Self {
-        let cx = bx.cx();
-        let field = self.layout.field(cx, ix);
-        let offset = self.layout.fields.offset(ix);
-        let effective_field_align = self.align.restrict_for_offset(offset);
-
-        let simple = || {
-            // Unions and newtypes only use an offset of 0.
-            let llval = if offset.bytes() == 0 {
-                self.llval
-            } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
-                // Offsets have to match either first or second field.
-                assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx)));
-                bx.struct_gep(self.llval, 1)
-            } else {
-                bx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
-            };
-            PlaceRef {
-                // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-                llval: bx.pointercast(llval, cx.type_ptr_to(cx.backend_type(field))),
-                llextra: if cx.type_has_metadata(field.ty) {
-                    self.llextra
-                } else {
-                    None
-                },
-                layout: field,
-                align: effective_field_align,
-            }
-        };
-
-        // Simple cases, which don't need DST adjustment:
-        //   * no metadata available - just log the case
-        //   * known alignment - sized types, [T], str or a foreign type
-        //   * packed struct - there is no alignment padding
-        match field.ty.sty {
-            _ if self.llextra.is_none() => {
-                debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
-                    ix, self.llval);
-                return simple();
-            }
-            _ if !field.is_unsized() => return simple(),
-            ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
-            ty::Adt(def, _) => {
-                if def.repr.packed() {
-                    // FIXME(eddyb) generalize the adjustment when we
-                    // start supporting packing to larger alignments.
-                    assert_eq!(self.layout.align.abi(), 1);
-                    return simple();
-                }
-            }
-            _ => {}
-        }
-
-        // We need to get the pointer manually now.
-        // We do this by casting to a *i8, then offsetting it by the appropriate amount.
-        // We do this instead of, say, simply adjusting the pointer from the result of a GEP
-        // because the field may have an arbitrary alignment in the LLVM representation
-        // anyway.
-        //
-        // To demonstrate:
-        //   struct Foo<T: ?Sized> {
-        //      x: u16,
-        //      y: T
-        //   }
-        //
-        // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
-        // the `y` field has 16-bit alignment.
-
-        let meta = self.llextra;
-
-        let unaligned_offset = cx.const_usize(offset.bytes());
-
-        // Get the alignment of the field
-        let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
-
-        // Bump the unaligned offset up to the appropriate alignment using the
-        // following expression:
-        //
-        //   (unaligned offset + (align - 1)) & -align
-
-        // Calculate offset
-        let align_sub_1 = bx.sub(unsized_align, cx.const_usize(1u64));
-        let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
-        bx.neg(unsized_align));
-
-        debug!("struct_field_ptr: DST field offset: {:?}", offset);
-
-        // Cast and adjust pointer
-        let byte_ptr = bx.pointercast(self.llval, cx.type_i8p());
-        let byte_ptr = bx.gep(byte_ptr, &[offset]);
-
-        // Finally, cast back to the type expected
-        let ll_fty = cx.backend_type(field);
-        debug!("struct_field_ptr: Field type is {:?}", ll_fty);
-
-        PlaceRef {
-            llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
-            llextra: self.llextra,
-            layout: field,
-            align: effective_field_align,
-        }
-    }
-
-    /// Obtain the actual discriminant of a value.
-    pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        self,
-        bx: &Bx,
-        cast_to: Ty<'tcx>
-    ) -> V {
-        let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
-        if self.layout.abi.is_uninhabited() {
-            return bx.cx().const_undef(cast_to);
-        }
-        match self.layout.variants {
-            layout::Variants::Single { index } => {
-                let discr_val = self.layout.ty.ty_adt_def().map_or(
-                    index.as_u32() as u128,
-                    |def| def.discriminant_for_variant(bx.cx().tcx(), index).val);
-                return bx.cx().const_uint_big(cast_to, discr_val);
-            }
-            layout::Variants::Tagged { .. } |
-            layout::Variants::NicheFilling { .. } => {},
-        }
-
-        let discr = self.project_field(bx, 0);
-        let lldiscr = bx.load_operand(discr).immediate();
-        match self.layout.variants {
-            layout::Variants::Single { .. } => bug!(),
-            layout::Variants::Tagged { ref tag, .. } => {
-                let signed = match tag.value {
-                    // We use `i1` for bytes that are always `0` or `1`,
-                    // e.g. `#[repr(i8)] enum E { A, B }`, but we can't
-                    // let LLVM interpret the `i1` as signed, because
-                    // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
-                    layout::Int(_, signed) => !tag.is_bool() && signed,
-                    _ => false
-                };
-                bx.intcast(lldiscr, cast_to, signed)
-            }
-            layout::Variants::NicheFilling {
-                dataful_variant,
-                ref niche_variants,
-                niche_start,
-                ..
-            } => {
-                let niche_llty = bx.cx().immediate_backend_type(discr.layout);
-                if niche_variants.start() == niche_variants.end() {
-                    // FIXME(eddyb) Check the actual primitive type here.
-                    let niche_llval = if niche_start == 0 {
-                        // HACK(eddyb) Using `c_null` as it works on all types.
-                        bx.cx().const_null(niche_llty)
-                    } else {
-                        bx.cx().const_uint_big(niche_llty, niche_start)
-                    };
-                    bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval),
-                        bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
-                        bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
-                } else {
-                    // Rebase from niche values to discriminant values.
-                    let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
-                    let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta));
-                    let lldiscr_max =
-                        bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64);
-                    bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max),
-                        bx.intcast(lldiscr, cast_to, false),
-                        bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
-                }
-            }
-        }
-    }
-
-    /// Set the discriminant for a new value of the given case of the given
-    /// representation.
-    pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        &self,
-        bx: &Bx,
-        variant_index: VariantIdx
-    ) {
-        if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
-            return;
-        }
-        match self.layout.variants {
-            layout::Variants::Single { index } => {
-                assert_eq!(index, variant_index);
-            }
-            layout::Variants::Tagged { .. } => {
-                let ptr = self.project_field(bx, 0);
-                let to = self.layout.ty.ty_adt_def().unwrap()
-                    .discriminant_for_variant(bx.tcx(), variant_index)
-                    .val;
-                bx.store(
-                    bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
-                    ptr.llval,
-                    ptr.align);
-            }
-            layout::Variants::NicheFilling {
-                dataful_variant,
-                ref niche_variants,
-                niche_start,
-                ..
-            } => {
-                if variant_index != dataful_variant {
-                    if bx.cx().sess().target.target.arch == "arm" ||
-                       bx.cx().sess().target.target.arch == "aarch64" {
-                        // Issue #34427: As workaround for LLVM bug on ARM,
-                        // use memset of 0 before assigning niche value.
-                        let fill_byte = bx.cx().const_u8(0);
-                        let (size, align) = self.layout.size_and_align();
-                        let size = bx.cx().const_usize(size.bytes());
-                        bx.memset(self.llval, fill_byte, size, align, MemFlags::empty());
-                    }
-
-                    let niche = self.project_field(bx, 0);
-                    let niche_llty = bx.cx().immediate_backend_type(niche.layout);
-                    let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
-                    let niche_value = (niche_value as u128)
-                        .wrapping_add(niche_start);
-                    // FIXME(eddyb) Check the actual primitive type here.
-                    let niche_llval = if niche_value == 0 {
-                        // HACK(eddyb) Using `c_null` as it works on all types.
-                        bx.cx().const_null(niche_llty)
-                    } else {
-                        bx.cx().const_uint_big(niche_llty, niche_value)
-                    };
-                    OperandValue::Immediate(niche_llval).store(bx, niche);
-                }
-            }
-        }
-    }
-
-    pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        &self,
-        bx: &Bx,
-        llindex: V
-    ) -> Self {
-        PlaceRef {
-            llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
-            llextra: None,
-            layout: self.layout.field(bx.cx(), 0),
-            align: self.align
-        }
-    }
-
-    pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        &self,
-        bx: &Bx,
-        variant_index: VariantIdx
-    ) -> Self {
-        let mut downcast = *self;
-        downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
-
-        // Cast to the appropriate variant struct type.
-        let variant_ty = bx.cx().backend_type(downcast.layout);
-        downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
-
-        downcast
-    }
-
-    pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
-        bx.lifetime_start(self.llval, self.layout.size);
-    }
-
-    pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
-        bx.lifetime_end(self.llval, self.layout.size);
-    }
-}
-
-impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
-    pub fn codegen_place(
-        &mut self,
-        bx: &Bx,
-        place: &mir::Place<'tcx>
-    ) -> PlaceRef<'tcx, Bx::Value> {
-        debug!("codegen_place(place={:?})", place);
-
-        let cx = bx.cx();
-        let tcx = cx.tcx();
-
-        if let mir::Place::Local(index) = *place {
-            match self.locals[index] {
-                LocalRef::Place(place) => {
-                    return place;
-                }
-                LocalRef::UnsizedPlace(place) => {
-                    return bx.load_operand(place).deref(cx);
-                }
-                LocalRef::Operand(..) => {
-                    bug!("using operand local {:?} as place", place);
-                }
-            }
-        }
-
-        let result = match *place {
-            mir::Place::Local(_) => bug!(), // handled above
-            mir::Place::Promoted(box (index, ty)) => {
-                let param_env = ty::ParamEnv::reveal_all();
-                let cid = mir::interpret::GlobalId {
-                    instance: self.instance,
-                    promoted: Some(index),
-                };
-                let layout = cx.layout_of(self.monomorphize(&ty));
-                match bx.tcx().const_eval(param_env.and(cid)) {
-                    Ok(val) => match val.val {
-                        mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
-                            bx.cx().from_const_alloc(layout, alloc, offset)
-                        }
-                        _ => bug!("promoteds should have an allocation: {:?}", val),
-                    },
-                    Err(_) => {
-                        // this is unreachable as long as runtime
-                        // and compile-time agree on values
-                        // With floats that won't always be true
-                        // so we generate an abort
-                        let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
-                        bx.call(fnname, &[], None);
-                        let llval = bx.cx().const_undef(
-                            bx.cx().type_ptr_to(bx.cx().backend_type(layout))
-                        );
-                        PlaceRef::new_sized(llval, layout, layout.align)
-                    }
-                }
-            }
-            mir::Place::Static(box mir::Static { def_id, ty }) => {
-                let layout = cx.layout_of(self.monomorphize(&ty));
-                PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align)
-            },
-            mir::Place::Projection(box mir::Projection {
-                ref base,
-                elem: mir::ProjectionElem::Deref
-            }) => {
-                // Load the pointer from its location.
-                self.codegen_consume(bx, base).deref(bx.cx())
-            }
-            mir::Place::Projection(ref projection) => {
-                let cg_base = self.codegen_place(bx, &projection.base);
-
-                match projection.elem {
-                    mir::ProjectionElem::Deref => bug!(),
-                    mir::ProjectionElem::Field(ref field, _) => {
-                        cg_base.project_field(bx, field.index())
-                    }
-                    mir::ProjectionElem::Index(index) => {
-                        let index = &mir::Operand::Copy(mir::Place::Local(index));
-                        let index = self.codegen_operand(bx, index);
-                        let llindex = index.immediate();
-                        cg_base.project_index(bx, llindex)
-                    }
-                    mir::ProjectionElem::ConstantIndex { offset,
-                                                         from_end: false,
-                                                         min_length: _ } => {
-                        let lloffset = bx.cx().const_usize(offset as u64);
-                        cg_base.project_index(bx, lloffset)
-                    }
-                    mir::ProjectionElem::ConstantIndex { offset,
-                                                         from_end: true,
-                                                         min_length: _ } => {
-                        let lloffset = bx.cx().const_usize(offset as u64);
-                        let lllen = cg_base.len(bx.cx());
-                        let llindex = bx.sub(lllen, lloffset);
-                        cg_base.project_index(bx, llindex)
-                    }
-                    mir::ProjectionElem::Subslice { from, to } => {
-                        let mut subslice = cg_base.project_index(bx,
-                            bx.cx().const_usize(from as u64));
-                        let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
-                            .projection_ty(tcx, &projection.elem).to_ty(tcx);
-                        subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
-
-                        if subslice.layout.is_unsized() {
-                            subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
-                                bx.cx().const_usize((from as u64) + (to as u64))));
-                        }
-
-                        // Cast the place pointer type to the new
-                        // array or slice type (*[%_; new_len]).
-                        subslice.llval = bx.pointercast(subslice.llval,
-                            bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
-
-                        subslice
-                    }
-                    mir::ProjectionElem::Downcast(_, v) => {
-                        cg_base.project_downcast(bx, v)
-                    }
-                }
-            }
-        };
-        debug!("codegen_place(place={:?}) => {:?}", place, result);
-        result
-    }
-
-    pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
-        let tcx = self.cx.tcx();
-        let place_ty = place.ty(self.mir, tcx);
-        self.monomorphize(&place_ty.to_ty(tcx))
-    }
-}
diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs
deleted file mode 100644
index 9870c93a508..00000000000
--- a/src/librustc_codegen_llvm/mir/rvalue.rs
+++ /dev/null
@@ -1,998 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc::ty::{self, Ty};
-use rustc::ty::cast::{CastTy, IntTy};
-use rustc::ty::layout::{self, LayoutOf, HasTyCtxt};
-use rustc::mir;
-use rustc::middle::lang_items::ExchangeMallocFnLangItem;
-use rustc_apfloat::{ieee, Float, Status, Round};
-use std::{u128, i128};
-
-use base;
-use builder::MemFlags;
-use callee;
-use common;
-use rustc_codegen_ssa::common::{RealPredicate, IntPredicate};
-use monomorphize;
-use type_of::LayoutLlvmExt;
-
-use interfaces::*;
-
-use super::{FunctionCx, LocalRef};
-use super::operand::{OperandRef, OperandValue};
-use super::place::PlaceRef;
-
-impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
-    pub fn codegen_rvalue(
-        &mut self,
-        bx: Bx,
-        dest: PlaceRef<'tcx, Bx::Value>,
-        rvalue: &mir::Rvalue<'tcx>
-    ) -> Bx {
-        debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})",
-               dest.llval, rvalue);
-
-        match *rvalue {
-           mir::Rvalue::Use(ref operand) => {
-               let cg_operand = self.codegen_operand(&bx, operand);
-               // FIXME: consider not copying constants through stack. (fixable by codegenning
-               // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
-               cg_operand.val.store(&bx, dest);
-               bx
-           }
-
-            mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => {
-                // The destination necessarily contains a fat pointer, so if
-                // it's a scalar pair, it's a fat pointer or newtype thereof.
-                if dest.layout.is_llvm_scalar_pair() {
-                    // into-coerce of a thin pointer to a fat pointer - just
-                    // use the operand path.
-                    let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
-                    temp.val.store(&bx, dest);
-                    return bx;
-                }
-
-                // Unsize of a nontrivial struct. I would prefer for
-                // this to be eliminated by MIR building, but
-                // `CoerceUnsized` can be passed by a where-clause,
-                // so the (generic) MIR may not be able to expand it.
-                let operand = self.codegen_operand(&bx, source);
-                match operand.val {
-                    OperandValue::Pair(..) |
-                    OperandValue::Immediate(_) => {
-                        // unsize from an immediate structure. We don't
-                        // really need a temporary alloca here, but
-                        // avoiding it would require us to have
-                        // `coerce_unsized_into` use extractvalue to
-                        // index into the struct, and this case isn't
-                        // important enough for it.
-                        debug!("codegen_rvalue: creating ugly alloca");
-                        let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp");
-                        scratch.storage_live(&bx);
-                        operand.val.store(&bx, scratch);
-                        base::coerce_unsized_into(&bx, scratch, dest);
-                        scratch.storage_dead(&bx);
-                    }
-                    OperandValue::Ref(llref, None, align) => {
-                        let source = PlaceRef::new_sized(llref, operand.layout, align);
-                        base::coerce_unsized_into(&bx, source, dest);
-                    }
-                    OperandValue::Ref(_, Some(_), _) => {
-                        bug!("unsized coercion on an unsized rvalue")
-                    }
-                }
-                bx
-            }
-
-            mir::Rvalue::Repeat(ref elem, count) => {
-                let cg_elem = self.codegen_operand(&bx, elem);
-
-                // Do not generate the loop for zero-sized elements or empty arrays.
-                if dest.layout.is_zst() {
-                    return bx;
-                }
-
-                let start = dest.project_index(&bx, bx.cx().const_usize(0)).llval;
-
-                if let OperandValue::Immediate(v) = cg_elem.val {
-                    let size = bx.cx().const_usize(dest.layout.size.bytes());
-
-                    // Use llvm.memset.p0i8.* to initialize all zero arrays
-                    if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 {
-                        let fill = bx.cx().const_u8(0);
-                        bx.memset(start, fill, size, dest.align, MemFlags::empty());
-                        return bx;
-                    }
-
-                    // Use llvm.memset.p0i8.* to initialize byte arrays
-                    let v = base::from_immediate(&bx, v);
-                    if bx.cx().val_ty(v) == bx.cx().type_i8() {
-                        bx.memset(start, v, size, dest.align, MemFlags::empty());
-                        return bx;
-                    }
-                }
-
-                let count = bx.cx().const_usize(count);
-                let end = dest.project_index(&bx, count).llval;
-
-                let header_bx = bx.build_sibling_block("repeat_loop_header");
-                let body_bx = bx.build_sibling_block("repeat_loop_body");
-                let next_bx = bx.build_sibling_block("repeat_loop_next");
-
-                bx.br(header_bx.llbb());
-                let current = header_bx.phi(bx.cx().val_ty(start), &[start], &[bx.llbb()]);
-
-                let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
-                header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
-
-                cg_elem.val.store(&body_bx,
-                    PlaceRef::new_sized(current, cg_elem.layout, dest.align));
-
-                let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]);
-                body_bx.br(header_bx.llbb());
-                header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
-
-                next_bx
-            }
-
-            mir::Rvalue::Aggregate(ref kind, ref operands) => {
-                let (dest, active_field_index) = match **kind {
-                    mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
-                        dest.codegen_set_discr(&bx, variant_index);
-                        if adt_def.is_enum() {
-                            (dest.project_downcast(&bx, variant_index), active_field_index)
-                        } else {
-                            (dest, active_field_index)
-                        }
-                    }
-                    _ => (dest, None)
-                };
-                for (i, operand) in operands.iter().enumerate() {
-                    let op = self.codegen_operand(&bx, operand);
-                    // Do not generate stores and GEPis for zero-sized fields.
-                    if !op.layout.is_zst() {
-                        let field_index = active_field_index.unwrap_or(i);
-                        op.val.store(&bx, dest.project_field(&bx, field_index));
-                    }
-                }
-                bx
-            }
-
-            _ => {
-                assert!(self.rvalue_creates_operand(rvalue));
-                let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
-                temp.val.store(&bx, dest);
-                bx
-            }
-        }
-    }
-
-    pub fn codegen_rvalue_unsized(
-        &mut self,
-        bx: Bx,
-        indirect_dest: PlaceRef<'tcx, Bx::Value>,
-        rvalue: &mir::Rvalue<'tcx>,
-    ) -> Bx {
-        debug!("codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
-               indirect_dest.llval, rvalue);
-
-        match *rvalue {
-            mir::Rvalue::Use(ref operand) => {
-                let cg_operand = self.codegen_operand(&bx, operand);
-                cg_operand.val.store_unsized(&bx, indirect_dest);
-                bx
-            }
-
-            _ => bug!("unsized assignment other than Rvalue::Use"),
-        }
-    }
-
-    pub fn codegen_rvalue_operand(
-        &mut self,
-        bx: Bx,
-        rvalue: &mir::Rvalue<'tcx>
-    ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
-        assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue);
-
-        match *rvalue {
-            mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
-                let operand = self.codegen_operand(&bx, source);
-                debug!("cast operand is {:?}", operand);
-                let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
-
-                let val = match *kind {
-                    mir::CastKind::ReifyFnPointer => {
-                        match operand.layout.ty.sty {
-                            ty::FnDef(def_id, substs) => {
-                                if bx.cx().tcx().has_attr(def_id, "rustc_args_required_const") {
-                                    bug!("reifying a fn ptr that requires \
-                                          const arguments");
-                                }
-                                OperandValue::Immediate(
-                                    callee::resolve_and_get_fn(bx.cx(), def_id, substs))
-                            }
-                            _ => {
-                                bug!("{} cannot be reified to a fn ptr", operand.layout.ty)
-                            }
-                        }
-                    }
-                    mir::CastKind::ClosureFnPointer => {
-                        match operand.layout.ty.sty {
-                            ty::Closure(def_id, substs) => {
-                                let instance = monomorphize::resolve_closure(
-                                    bx.cx().tcx(), def_id, substs, ty::ClosureKind::FnOnce);
-                                OperandValue::Immediate(bx.cx().get_fn(instance))
-                            }
-                            _ => {
-                                bug!("{} cannot be cast to a fn ptr", operand.layout.ty)
-                            }
-                        }
-                    }
-                    mir::CastKind::UnsafeFnPointer => {
-                        // this is a no-op at the LLVM level
-                        operand.val
-                    }
-                    mir::CastKind::Unsize => {
-                        assert!(cast.is_llvm_scalar_pair());
-                        match operand.val {
-                            OperandValue::Pair(lldata, llextra) => {
-                                // unsize from a fat pointer - this is a
-                                // "trait-object-to-supertrait" coercion, for
-                                // example,
-                                //   &'a fmt::Debug+Send => &'a fmt::Debug,
-
-                                // HACK(eddyb) have to bitcast pointers
-                                // until LLVM removes pointee types.
-                                let lldata = bx.pointercast(lldata,
-                                    bx.cx().scalar_pair_element_backend_type(cast, 0, true));
-                                OperandValue::Pair(lldata, llextra)
-                            }
-                            OperandValue::Immediate(lldata) => {
-                                // "standard" unsize
-                                let (lldata, llextra) = base::unsize_thin_ptr(&bx, lldata,
-                                    operand.layout.ty, cast.ty);
-                                OperandValue::Pair(lldata, llextra)
-                            }
-                            OperandValue::Ref(..) => {
-                                bug!("by-ref operand {:?} in codegen_rvalue_operand",
-                                     operand);
-                            }
-                        }
-                    }
-                    mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => {
-                        if let OperandValue::Pair(data_ptr, meta) = operand.val {
-                            if cast.is_llvm_scalar_pair() {
-                                let data_cast = bx.pointercast(data_ptr,
-                                    bx.cx().scalar_pair_element_backend_type(cast, 0, true));
-                                OperandValue::Pair(data_cast, meta)
-                            } else { // cast to thin-ptr
-                                // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
-                                // pointer-cast of that pointer to desired pointer type.
-                                let llcast_ty = bx.cx().immediate_backend_type(cast);
-                                let llval = bx.pointercast(data_ptr, llcast_ty);
-                                OperandValue::Immediate(llval)
-                            }
-                        } else {
-                            bug!("Unexpected non-Pair operand")
-                        }
-                    }
-                    mir::CastKind::Misc => {
-                        assert!(cast.is_llvm_immediate());
-                        let ll_t_out = bx.cx().immediate_backend_type(cast);
-                        if operand.layout.abi.is_uninhabited() {
-                            let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
-                            return (bx, OperandRef {
-                                val,
-                                layout: cast,
-                            });
-                        }
-                        let r_t_in = CastTy::from_ty(operand.layout.ty)
-                            .expect("bad input type for cast");
-                        let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
-                        let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
-                        match operand.layout.variants {
-                            layout::Variants::Single { index } => {
-                                if let Some(def) = operand.layout.ty.ty_adt_def() {
-                                    let discr_val = def
-                                        .discriminant_for_variant(bx.cx().tcx(), index)
-                                        .val;
-                                    let discr = bx.cx().const_uint_big(ll_t_out, discr_val);
-                                    return (bx, OperandRef {
-                                        val: OperandValue::Immediate(discr),
-                                        layout: cast,
-                                    });
-                                }
-                            }
-                            layout::Variants::Tagged { .. } |
-                            layout::Variants::NicheFilling { .. } => {},
-                        }
-                        let llval = operand.immediate();
-
-                        let mut signed = false;
-                        if let layout::Abi::Scalar(ref scalar) = operand.layout.abi {
-                            if let layout::Int(_, s) = scalar.value {
-                                // We use `i1` for bytes that are always `0` or `1`,
-                                // e.g. `#[repr(i8)] enum E { A, B }`, but we can't
-                                // let LLVM interpret the `i1` as signed, because
-                                // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
-                                signed = !scalar.is_bool() && s;
-
-                                let er = scalar.valid_range_exclusive(bx.cx());
-                                if er.end != er.start &&
-                                   scalar.valid_range.end() > scalar.valid_range.start() {
-                                    // We want `table[e as usize]` to not
-                                    // have bound checks, and this is the most
-                                    // convenient place to put the `assume`.
-
-                                    base::call_assume(&bx, bx.icmp(
-                                        IntPredicate::IntULE,
-                                        llval,
-                                        bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end())
-                                    ));
-                                }
-                            }
-                        }
-
-                        let newval = match (r_t_in, r_t_out) {
-                            (CastTy::Int(_), CastTy::Int(_)) => {
-                                bx.intcast(llval, ll_t_out, signed)
-                            }
-                            (CastTy::Float, CastTy::Float) => {
-                                let srcsz = bx.cx().float_width(ll_t_in);
-                                let dstsz = bx.cx().float_width(ll_t_out);
-                                if dstsz > srcsz {
-                                    bx.fpext(llval, ll_t_out)
-                                } else if srcsz > dstsz {
-                                    bx.fptrunc(llval, ll_t_out)
-                                } else {
-                                    llval
-                                }
-                            }
-                            (CastTy::Ptr(_), CastTy::Ptr(_)) |
-                            (CastTy::FnPtr, CastTy::Ptr(_)) |
-                            (CastTy::RPtr(_), CastTy::Ptr(_)) =>
-                                bx.pointercast(llval, ll_t_out),
-                            (CastTy::Ptr(_), CastTy::Int(_)) |
-                            (CastTy::FnPtr, CastTy::Int(_)) =>
-                                bx.ptrtoint(llval, ll_t_out),
-                            (CastTy::Int(_), CastTy::Ptr(_)) => {
-                                let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
-                                bx.inttoptr(usize_llval, ll_t_out)
-                            }
-                            (CastTy::Int(_), CastTy::Float) =>
-                                cast_int_to_float(&bx, signed, llval, ll_t_in, ll_t_out),
-                            (CastTy::Float, CastTy::Int(IntTy::I)) =>
-                                cast_float_to_int(&bx, true, llval, ll_t_in, ll_t_out),
-                            (CastTy::Float, CastTy::Int(_)) =>
-                                cast_float_to_int(&bx, false, llval, ll_t_in, ll_t_out),
-                            _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty)
-                        };
-                        OperandValue::Immediate(newval)
-                    }
-                };
-                (bx, OperandRef {
-                    val,
-                    layout: cast
-                })
-            }
-
-            mir::Rvalue::Ref(_, bk, ref place) => {
-                let cg_place = self.codegen_place(&bx, place);
-
-                let ty = cg_place.layout.ty;
-
-                // Note: places are indirect, so storing the `llval` into the
-                // destination effectively creates a reference.
-                let val = if !bx.cx().type_has_metadata(ty) {
-                    OperandValue::Immediate(cg_place.llval)
-                } else {
-                    OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
-                };
-                (bx, OperandRef {
-                    val,
-                    layout: self.cx.layout_of(self.cx.tcx().mk_ref(
-                        self.cx.tcx().types.re_erased,
-                        ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() }
-                    )),
-                })
-            }
-
-            mir::Rvalue::Len(ref place) => {
-                let size = self.evaluate_array_len(&bx, place);
-                let operand = OperandRef {
-                    val: OperandValue::Immediate(size),
-                    layout: bx.cx().layout_of(bx.tcx().types.usize),
-                };
-                (bx, operand)
-            }
-
-            mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
-                let lhs = self.codegen_operand(&bx, lhs);
-                let rhs = self.codegen_operand(&bx, rhs);
-                let llresult = match (lhs.val, rhs.val) {
-                    (OperandValue::Pair(lhs_addr, lhs_extra),
-                     OperandValue::Pair(rhs_addr, rhs_extra)) => {
-                        self.codegen_fat_ptr_binop(&bx, op,
-                                                 lhs_addr, lhs_extra,
-                                                 rhs_addr, rhs_extra,
-                                                 lhs.layout.ty)
-                    }
-
-                    (OperandValue::Immediate(lhs_val),
-                     OperandValue::Immediate(rhs_val)) => {
-                        self.codegen_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty)
-                    }
-
-                    _ => bug!()
-                };
-                let operand = OperandRef {
-                    val: OperandValue::Immediate(llresult),
-                    layout: bx.cx().layout_of(
-                        op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
-                };
-                (bx, operand)
-            }
-            mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
-                let lhs = self.codegen_operand(&bx, lhs);
-                let rhs = self.codegen_operand(&bx, rhs);
-                let result = self.codegen_scalar_checked_binop(&bx, op,
-                                                             lhs.immediate(), rhs.immediate(),
-                                                             lhs.layout.ty);
-                let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
-                let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
-                let operand = OperandRef {
-                    val: result,
-                    layout: bx.cx().layout_of(operand_ty)
-                };
-
-                (bx, operand)
-            }
-
-            mir::Rvalue::UnaryOp(op, ref operand) => {
-                let operand = self.codegen_operand(&bx, operand);
-                let lloperand = operand.immediate();
-                let is_float = operand.layout.ty.is_fp();
-                let llval = match op {
-                    mir::UnOp::Not => bx.not(lloperand),
-                    mir::UnOp::Neg => if is_float {
-                        bx.fneg(lloperand)
-                    } else {
-                        bx.neg(lloperand)
-                    }
-                };
-                (bx, OperandRef {
-                    val: OperandValue::Immediate(llval),
-                    layout: operand.layout,
-                })
-            }
-
-            mir::Rvalue::Discriminant(ref place) => {
-                let discr_ty = rvalue.ty(&*self.mir, bx.tcx());
-                let discr =  self.codegen_place(&bx, place)
-                    .codegen_get_discr(&bx, discr_ty);
-                (bx, OperandRef {
-                    val: OperandValue::Immediate(discr),
-                    layout: self.cx.layout_of(discr_ty)
-                })
-            }
-
-            mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
-                assert!(bx.cx().type_is_sized(ty));
-                let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes());
-                let tcx = self.cx.tcx();
-                (bx, OperandRef {
-                    val: OperandValue::Immediate(val),
-                    layout: self.cx.layout_of(tcx.types.usize),
-                })
-            }
-
-            mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
-                let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
-                let (size, align) = bx.cx().layout_of(content_ty).size_and_align();
-                let llsize = bx.cx().const_usize(size.bytes());
-                let llalign = bx.cx().const_usize(align.abi());
-                let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
-                let llty_ptr = bx.cx().backend_type(box_layout);
-
-                // Allocate space:
-                let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
-                    Ok(id) => id,
-                    Err(s) => {
-                        bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
-                    }
-                };
-                let instance = ty::Instance::mono(bx.tcx(), def_id);
-                let r = bx.cx().get_fn(instance);
-                let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr);
-
-                let operand = OperandRef {
-                    val: OperandValue::Immediate(val),
-                    layout: box_layout,
-                };
-                (bx, operand)
-            }
-            mir::Rvalue::Use(ref operand) => {
-                let operand = self.codegen_operand(&bx, operand);
-                (bx, operand)
-            }
-            mir::Rvalue::Repeat(..) |
-            mir::Rvalue::Aggregate(..) => {
-                // According to `rvalue_creates_operand`, only ZST
-                // aggregate rvalues are allowed to be operands.
-                let ty = rvalue.ty(self.mir, self.cx.tcx());
-                (bx, OperandRef::new_zst(self.cx,
-                    self.cx.layout_of(self.monomorphize(&ty))))
-            }
-        }
-    }
-
-    fn evaluate_array_len(
-        &mut self,
-        bx: &Bx,
-        place: &mir::Place<'tcx>,
-    ) -> Bx::Value {
-        // ZST are passed as operands and require special handling
-        // because codegen_place() panics if Local is operand.
-        if let mir::Place::Local(index) = *place {
-            if let LocalRef::Operand(Some(op)) = self.locals[index] {
-                if let ty::Array(_, n) = op.layout.ty.sty {
-                    let n = n.unwrap_usize(bx.cx().tcx());
-                    return bx.cx().const_usize(n);
-                }
-            }
-        }
-        // use common size calculation for non zero-sized types
-        let cg_value = self.codegen_place(bx, place);
-        return cg_value.len(bx.cx());
-    }
-
-    pub fn codegen_scalar_binop(
-        &mut self,
-        bx: &Bx,
-        op: mir::BinOp,
-        lhs: Bx::Value,
-        rhs: Bx::Value,
-        input_ty: Ty<'tcx>,
-    ) -> Bx::Value {
-        let is_float = input_ty.is_fp();
-        let is_signed = input_ty.is_signed();
-        let is_unit = input_ty.is_unit();
-        match op {
-            mir::BinOp::Add => if is_float {
-                bx.fadd(lhs, rhs)
-            } else {
-                bx.add(lhs, rhs)
-            },
-            mir::BinOp::Sub => if is_float {
-                bx.fsub(lhs, rhs)
-            } else {
-                bx.sub(lhs, rhs)
-            },
-            mir::BinOp::Mul => if is_float {
-                bx.fmul(lhs, rhs)
-            } else {
-                bx.mul(lhs, rhs)
-            },
-            mir::BinOp::Div => if is_float {
-                bx.fdiv(lhs, rhs)
-            } else if is_signed {
-                bx.sdiv(lhs, rhs)
-            } else {
-                bx.udiv(lhs, rhs)
-            },
-            mir::BinOp::Rem => if is_float {
-                bx.frem(lhs, rhs)
-            } else if is_signed {
-                bx.srem(lhs, rhs)
-            } else {
-                bx.urem(lhs, rhs)
-            },
-            mir::BinOp::BitOr => bx.or(lhs, rhs),
-            mir::BinOp::BitAnd => bx.and(lhs, rhs),
-            mir::BinOp::BitXor => bx.xor(lhs, rhs),
-            mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]),
-            mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
-            mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
-            mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
-            mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit {
-                bx.cx().const_bool(match op {
-                    mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
-                    mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
-                    _ => unreachable!()
-                })
-            } else if is_float {
-                bx.fcmp(
-                    base::bin_op_to_fcmp_predicate(op.to_hir_binop()),
-                    lhs, rhs
-                )
-            } else {
-                bx.icmp(
-                    base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
-                    lhs, rhs
-                )
-            }
-        }
-    }
-
-    pub fn codegen_fat_ptr_binop(
-        &mut self,
-        bx: &Bx,
-        op: mir::BinOp,
-        lhs_addr: Bx::Value,
-        lhs_extra: Bx::Value,
-        rhs_addr: Bx::Value,
-        rhs_extra: Bx::Value,
-        _input_ty: Ty<'tcx>,
-    ) -> Bx::Value {
-        match op {
-            mir::BinOp::Eq => {
-                bx.and(
-                    bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
-                    bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra)
-                )
-            }
-            mir::BinOp::Ne => {
-                bx.or(
-                    bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr),
-                    bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra)
-                )
-            }
-            mir::BinOp::Le | mir::BinOp::Lt |
-            mir::BinOp::Ge | mir::BinOp::Gt => {
-                // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
-                let (op, strict_op) = match op {
-                    mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
-                    mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
-                    mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
-                    mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
-                    _ => bug!(),
-                };
-
-                bx.or(
-                    bx.icmp(strict_op, lhs_addr, rhs_addr),
-                    bx.and(
-                        bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
-                        bx.icmp(op, lhs_extra, rhs_extra)
-                    )
-                )
-            }
-            _ => {
-                bug!("unexpected fat ptr binop");
-            }
-        }
-    }
-
-    pub fn codegen_scalar_checked_binop(
-        &mut self,
-        bx: &Bx,
-        op: mir::BinOp,
-        lhs: Bx::Value,
-        rhs: Bx::Value,
-        input_ty: Ty<'tcx>
-    ) -> OperandValue<Bx::Value> {
-        // This case can currently arise only from functions marked
-        // with #[rustc_inherit_overflow_checks] and inlined from
-        // another crate (mostly core::num generic/#[inline] fns),
-        // while the current crate doesn't use overflow checks.
-        if !bx.cx().check_overflow() {
-            let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
-            return OperandValue::Pair(val, bx.cx().const_bool(false));
-        }
-
-        let (val, of) = match op {
-            // These are checked using intrinsics
-            mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
-                let oop = match op {
-                    mir::BinOp::Add => OverflowOp::Add,
-                    mir::BinOp::Sub => OverflowOp::Sub,
-                    mir::BinOp::Mul => OverflowOp::Mul,
-                    _ => unreachable!()
-                };
-                let intrinsic = get_overflow_intrinsic(oop, bx, input_ty);
-                let res = bx.call(intrinsic, &[lhs, rhs], None);
-
-                (bx.extract_value(res, 0),
-                 bx.extract_value(res, 1))
-            }
-            mir::BinOp::Shl | mir::BinOp::Shr => {
-                let lhs_llty = bx.cx().val_ty(lhs);
-                let rhs_llty = bx.cx().val_ty(rhs);
-                let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
-                let outer_bits = bx.and(rhs, invert_mask);
-
-                let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
-                let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
-
-                (val, of)
-            }
-            _ => {
-                bug!("Operator `{:?}` is not a checkable operator", op)
-            }
-        };
-
-        OperandValue::Pair(val, of)
-    }
-}
-
-impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
-    pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
-        match *rvalue {
-            mir::Rvalue::Ref(..) |
-            mir::Rvalue::Len(..) |
-            mir::Rvalue::Cast(..) | // (*)
-            mir::Rvalue::BinaryOp(..) |
-            mir::Rvalue::CheckedBinaryOp(..) |
-            mir::Rvalue::UnaryOp(..) |
-            mir::Rvalue::Discriminant(..) |
-            mir::Rvalue::NullaryOp(..) |
-            mir::Rvalue::Use(..) => // (*)
-                true,
-            mir::Rvalue::Repeat(..) |
-            mir::Rvalue::Aggregate(..) => {
-                let ty = rvalue.ty(self.mir, self.cx.tcx());
-                let ty = self.monomorphize(&ty);
-                self.cx.layout_of(ty).is_zst()
-            }
-        }
-
-        // (*) this is only true if the type is suitable
-    }
-}
-
-#[derive(Copy, Clone)]
-enum OverflowOp {
-    Add, Sub, Mul
-}
-
-fn get_overflow_intrinsic<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    oop: OverflowOp,
-    bx: &Bx,
-    ty: Ty
-) -> Bx::Value {
-    use syntax::ast::IntTy::*;
-    use syntax::ast::UintTy::*;
-    use rustc::ty::{Int, Uint};
-
-    let tcx = bx.tcx();
-
-    let new_sty = match ty.sty {
-        Int(Isize) => Int(tcx.sess.target.isize_ty),
-        Uint(Usize) => Uint(tcx.sess.target.usize_ty),
-        ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
-        _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
-    };
-
-    let name = match oop {
-        OverflowOp::Add => match new_sty {
-            Int(I8) => "llvm.sadd.with.overflow.i8",
-            Int(I16) => "llvm.sadd.with.overflow.i16",
-            Int(I32) => "llvm.sadd.with.overflow.i32",
-            Int(I64) => "llvm.sadd.with.overflow.i64",
-            Int(I128) => "llvm.sadd.with.overflow.i128",
-
-            Uint(U8) => "llvm.uadd.with.overflow.i8",
-            Uint(U16) => "llvm.uadd.with.overflow.i16",
-            Uint(U32) => "llvm.uadd.with.overflow.i32",
-            Uint(U64) => "llvm.uadd.with.overflow.i64",
-            Uint(U128) => "llvm.uadd.with.overflow.i128",
-
-            _ => unreachable!(),
-        },
-        OverflowOp::Sub => match new_sty {
-            Int(I8) => "llvm.ssub.with.overflow.i8",
-            Int(I16) => "llvm.ssub.with.overflow.i16",
-            Int(I32) => "llvm.ssub.with.overflow.i32",
-            Int(I64) => "llvm.ssub.with.overflow.i64",
-            Int(I128) => "llvm.ssub.with.overflow.i128",
-
-            Uint(U8) => "llvm.usub.with.overflow.i8",
-            Uint(U16) => "llvm.usub.with.overflow.i16",
-            Uint(U32) => "llvm.usub.with.overflow.i32",
-            Uint(U64) => "llvm.usub.with.overflow.i64",
-            Uint(U128) => "llvm.usub.with.overflow.i128",
-
-            _ => unreachable!(),
-        },
-        OverflowOp::Mul => match new_sty {
-            Int(I8) => "llvm.smul.with.overflow.i8",
-            Int(I16) => "llvm.smul.with.overflow.i16",
-            Int(I32) => "llvm.smul.with.overflow.i32",
-            Int(I64) => "llvm.smul.with.overflow.i64",
-            Int(I128) => "llvm.smul.with.overflow.i128",
-
-            Uint(U8) => "llvm.umul.with.overflow.i8",
-            Uint(U16) => "llvm.umul.with.overflow.i16",
-            Uint(U32) => "llvm.umul.with.overflow.i32",
-            Uint(U64) => "llvm.umul.with.overflow.i64",
-            Uint(U128) => "llvm.umul.with.overflow.i128",
-
-            _ => unreachable!(),
-        },
-    };
-
-    bx.cx().get_intrinsic(&name)
-}
-
-fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    signed: bool,
-    x: Bx::Value,
-    int_ty: Bx::Type,
-    float_ty: Bx::Type
-) -> Bx::Value {
-    // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding.
-    // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity).
-    // LLVM's uitofp produces undef in those cases, so we manually check for that case.
-    let is_u128_to_f32 = !signed &&
-        bx.cx().int_width(int_ty) == 128 &&
-        bx.cx().float_width(float_ty) == 32;
-    if is_u128_to_f32 {
-        // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity,
-        // and for everything else LLVM's uitofp works just fine.
-        use rustc_apfloat::ieee::Single;
-        use rustc_apfloat::Float;
-        const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1)
-                                            << (Single::MAX_EXP - Single::PRECISION as i16);
-        let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
-        let overflow = bx.icmp(IntPredicate::IntUGE, x, max);
-        let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32);
-        let infinity = bx.bitcast(infinity_bits, float_ty);
-        bx.select(overflow, infinity, bx.uitofp(x, float_ty))
-    } else {
-        if signed {
-            bx.sitofp(x, float_ty)
-        } else {
-            bx.uitofp(x, float_ty)
-        }
-    }
-}
-
-fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
-    signed: bool,
-    x: Bx::Value,
-    float_ty: Bx::Type,
-    int_ty: Bx::Type
-) -> Bx::Value {
-    let fptosui_result = if signed {
-        bx.fptosi(x, int_ty)
-    } else {
-        bx.fptoui(x, int_ty)
-    };
-
-    if !bx.cx().sess().opts.debugging_opts.saturating_float_casts {
-        return fptosui_result;
-    }
-    // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
-    // destination integer type after rounding towards zero. This `undef` value can cause UB in
-    // safe code (see issue #10184), so we implement a saturating conversion on top of it:
-    // Semantically, the mathematical value of the input is rounded towards zero to the next
-    // mathematical integer, and then the result is clamped into the range of the destination
-    // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
-    // the destination integer type. NaN is mapped to 0.
-    //
-    // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
-    // a value representable in int_ty.
-    // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
-    // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
-    // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
-    // representable. Note that this only works if float_ty's exponent range is sufficiently large.
-    // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
-    // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
-    // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
-    // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
-    // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
-    let int_max = |signed: bool, int_ty: Bx::Type| -> u128 {
-        let shift_amount = 128 - bx.cx().int_width(int_ty);
-        if signed {
-            i128::MAX as u128 >> shift_amount
-        } else {
-            u128::MAX >> shift_amount
-        }
-    };
-    let int_min = |signed: bool, int_ty: Bx::Type| -> i128 {
-        if signed {
-            i128::MIN >> (128 - bx.cx().int_width(int_ty))
-        } else {
-            0
-        }
-    };
-
-    let compute_clamp_bounds_single =
-    |signed: bool, int_ty: Bx::Type| -> (u128, u128) {
-        let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
-        assert_eq!(rounded_min.status, Status::OK);
-        let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
-        assert!(rounded_max.value.is_finite());
-        (rounded_min.value.to_bits(), rounded_max.value.to_bits())
-    };
-    let compute_clamp_bounds_double =
-    |signed: bool, int_ty: Bx::Type| -> (u128, u128) {
-        let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
-        assert_eq!(rounded_min.status, Status::OK);
-        let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
-        assert!(rounded_max.value.is_finite());
-        (rounded_min.value.to_bits(), rounded_max.value.to_bits())
-    };
-
-    let float_bits_to_llval = |bits| {
-        let bits_llval = match bx.cx().float_width(float_ty) {
-            32 => bx.cx().const_u32(bits as u32),
-            64 => bx.cx().const_u64(bits as u64),
-            n => bug!("unsupported float width {}", n),
-        };
-        bx.bitcast(bits_llval, float_ty)
-    };
-    let (f_min, f_max) = match bx.cx().float_width(float_ty) {
-        32 => compute_clamp_bounds_single(signed, int_ty),
-        64 => compute_clamp_bounds_double(signed, int_ty),
-        n => bug!("unsupported float width {}", n),
-    };
-    let f_min = float_bits_to_llval(f_min);
-    let f_max = float_bits_to_llval(f_max);
-    // To implement saturation, we perform the following steps:
-    //
-    // 1. Cast x to an integer with fpto[su]i. This may result in undef.
-    // 2. Compare x to f_min and f_max, and use the comparison results to select:
-    //  a) int_ty::MIN if x < f_min or x is NaN
-    //  b) int_ty::MAX if x > f_max
-    //  c) the result of fpto[su]i otherwise
-    // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
-    //
-    // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
-    // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
-    // undef does not introduce any non-determinism either.
-    // More importantly, the above procedure correctly implements saturating conversion.
-    // Proof (sketch):
-    // If x is NaN, 0 is returned by definition.
-    // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
-    // This yields three cases to consider:
-    // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
-    //     saturating conversion for inputs in that range.
-    // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
-    //     (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
-    //     than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
-    //     is correct.
-    // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
-    //     int_ty::MIN and therefore the return value of int_ty::MIN is correct.
-    // QED.
-
-    // Step 1 was already performed above.
-
-    // Step 2: We use two comparisons and two selects, with %s1 being the result:
-    //     %less_or_nan = fcmp ult %x, %f_min
-    //     %greater = fcmp olt %x, %f_max
-    //     %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
-    //     %s1 = select %greater, int_ty::MAX, %s0
-    // Note that %less_or_nan uses an *unordered* comparison. This comparison is true if the
-    // operands are not comparable (i.e., if x is NaN). The unordered comparison ensures that s1
-    // becomes int_ty::MIN if x is NaN.
-    // Performance note: Unordered comparison can be lowered to a "flipped" comparison and a
-    // negation, and the negation can be merged into the select. Therefore, it not necessarily any
-    // more expensive than a ordered ("normal") comparison. Whether these optimizations will be
-    // performed is ultimately up to the backend, but at least x86 does perform them.
-    let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
-    let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
-    let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_ty));
-    let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_ty) as u128);
-    let s0 = bx.select(less_or_nan, int_min, fptosui_result);
-    let s1 = bx.select(greater, int_max, s0);
-
-    // Step 3: NaN replacement.
-    // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
-    // Therefore we only need to execute this step for signed integer types.
-    if signed {
-        // LLVM has no isNaN predicate, so we use (x == x) instead
-        bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().const_uint(int_ty, 0))
-    } else {
-        s1
-    }
-}
diff --git a/src/librustc_codegen_llvm/mir/statement.rs b/src/librustc_codegen_llvm/mir/statement.rs
deleted file mode 100644
index 40af52c05a3..00000000000
--- a/src/librustc_codegen_llvm/mir/statement.rs
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc::mir;
-
-use interfaces::BuilderMethods;
-use super::FunctionCx;
-use super::LocalRef;
-use super::OperandValue;
-use interfaces::*;
-
-impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
-    pub fn codegen_statement(
-        &mut self,
-        bx: Bx,
-        statement: &mir::Statement<'tcx>
-    ) -> Bx {
-        debug!("codegen_statement(statement={:?})", statement);
-
-        self.set_debug_loc(&bx, statement.source_info);
-        match statement.kind {
-            mir::StatementKind::Assign(ref place, ref rvalue) => {
-                if let mir::Place::Local(index) = *place {
-                    match self.locals[index] {
-                        LocalRef::Place(cg_dest) => {
-                            self.codegen_rvalue(bx, cg_dest, rvalue)
-                        }
-                        LocalRef::UnsizedPlace(cg_indirect_dest) => {
-                            self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
-                        }
-                        LocalRef::Operand(None) => {
-                            let (bx, operand) = self.codegen_rvalue_operand(bx, rvalue);
-                            self.locals[index] = LocalRef::Operand(Some(operand));
-                            bx
-                        }
-                        LocalRef::Operand(Some(op)) => {
-                            if !op.layout.is_zst() {
-                                span_bug!(statement.source_info.span,
-                                          "operand {:?} already assigned",
-                                          rvalue);
-                            }
-
-                            // If the type is zero-sized, it's already been set here,
-                            // but we still need to make sure we codegen the operand
-                            self.codegen_rvalue_operand(bx, rvalue).0
-                        }
-                    }
-                } else {
-                    let cg_dest = self.codegen_place(&bx, place);
-                    self.codegen_rvalue(bx, cg_dest, rvalue)
-                }
-            }
-            mir::StatementKind::SetDiscriminant{ref place, variant_index} => {
-                self.codegen_place(&bx, place)
-                    .codegen_set_discr(&bx, variant_index);
-                bx
-            }
-            mir::StatementKind::StorageLive(local) => {
-                if let LocalRef::Place(cg_place) = self.locals[local] {
-                    cg_place.storage_live(&bx);
-                } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
-                    cg_indirect_place.storage_live(&bx);
-                }
-                bx
-            }
-            mir::StatementKind::StorageDead(local) => {
-                if let LocalRef::Place(cg_place) = self.locals[local] {
-                    cg_place.storage_dead(&bx);
-                } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
-                    cg_indirect_place.storage_dead(&bx);
-                }
-                bx
-            }
-            mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
-                let outputs = outputs.iter().map(|output| {
-                    self.codegen_place(&bx, output)
-                }).collect();
-
-                let input_vals = inputs.iter()
-                    .fold(Vec::with_capacity(inputs.len()), |mut acc, (span, input)| {
-                        let op = self.codegen_operand(&bx, input);
-                        if let OperandValue::Immediate(_) = op.val {
-                            acc.push(op.immediate());
-                        } else {
-                            span_err!(bx.cx().sess(), span.to_owned(), E0669,
-                                     "invalid value for constraint in inline assembly");
-                        }
-                        acc
-                });
-
-                if input_vals.len() == inputs.len() {
-                    let res = bx.codegen_inline_asm(asm, outputs, input_vals);
-                    if !res {
-                        span_err!(bx.cx().sess(), statement.source_info.span, E0668,
-                                  "malformed inline assembly");
-                    }
-                }
-                bx
-            }
-            mir::StatementKind::FakeRead(..) |
-            mir::StatementKind::EndRegion(..) |
-            mir::StatementKind::Retag { .. } |
-            mir::StatementKind::EscapeToRaw { .. } |
-            mir::StatementKind::AscribeUserType(..) |
-            mir::StatementKind::Nop => bx,
-        }
-    }
-}
diff --git a/src/librustc_codegen_llvm/mono_item.rs b/src/librustc_codegen_llvm/mono_item.rs
index 1defb2c16f8..750e2ab0741 100644
--- a/src/librustc_codegen_llvm/mono_item.rs
+++ b/src/librustc_codegen_llvm/mono_item.rs
@@ -8,12 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! Walks the crate looking for items/impl-items/trait-items that have
-//! either a `rustc_symbol_name` or `rustc_item_path` attribute and
-//! generates an error giving, respectively, the symbol name or
-//! item-path. This is used for unit testing the code that generates
-//! paths etc in all kinds of annoying scenarios.
-
 use attributes;
 use base;
 use context::CodegenCx;
@@ -31,98 +25,6 @@ use interfaces::*;
 
 pub use rustc::mir::mono::MonoItem;
 
-pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt;
-
-pub trait MonoItemExt<'a, 'tcx: 'a>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> {
-    fn define<Bx: BuilderMethods<'a, 'tcx>>(&self, cx: &'a Bx::CodegenCx) {
-        debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}",
-               self.to_string(cx.tcx()),
-               self.to_raw_string(),
-               cx.codegen_unit().name());
-
-        match *self.as_mono_item() {
-            MonoItem::Static(def_id) => {
-                let tcx = cx.tcx();
-                let is_mutable = match tcx.describe_def(def_id) {
-                    Some(Def::Static(_, is_mutable)) => is_mutable,
-                    Some(other) => {
-                        bug!("Expected Def::Static, found {:?}", other)
-                    }
-                    None => {
-                        bug!("Expected Def::Static for {:?}, found nothing", def_id)
-                    }
-                };
-                cx.codegen_static(def_id, is_mutable);
-            }
-            MonoItem::GlobalAsm(node_id) => {
-                let item = cx.tcx().hir.expect_item(node_id);
-                if let hir::ItemKind::GlobalAsm(ref ga) = item.node {
-                    cx.codegen_global_asm(ga);
-                } else {
-                    span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type")
-                }
-            }
-            MonoItem::Fn(instance) => {
-                base::codegen_instance::<Bx>(&cx, instance);
-            }
-        }
-
-        debug!("END IMPLEMENTING '{} ({})' in cgu {}",
-               self.to_string(cx.tcx()),
-               self.to_raw_string(),
-               cx.codegen_unit().name());
-    }
-
-    fn predefine<Bx: BuilderMethods<'a, 'tcx>>(
-        &self,
-        cx: &'a Bx::CodegenCx,
-        linkage: Linkage,
-        visibility: Visibility
-    ) {
-        debug!("BEGIN PREDEFINING '{} ({})' in cgu {}",
-               self.to_string(cx.tcx()),
-               self.to_raw_string(),
-               cx.codegen_unit().name());
-
-        let symbol_name = self.symbol_name(cx.tcx()).as_str();
-
-        debug!("symbol {}", &symbol_name);
-
-        match *self.as_mono_item() {
-            MonoItem::Static(def_id) => {
-                cx.predefine_static(def_id, linkage, visibility, &symbol_name);
-            }
-            MonoItem::Fn(instance) => {
-                cx.predefine_fn(instance, linkage, visibility, &symbol_name);
-            }
-            MonoItem::GlobalAsm(..) => {}
-        }
-
-        debug!("END PREDEFINING '{} ({})' in cgu {}",
-               self.to_string(cx.tcx()),
-               self.to_raw_string(),
-               cx.codegen_unit().name());
-    }
-
-    fn to_raw_string(&self) -> String {
-        match *self.as_mono_item() {
-            MonoItem::Fn(instance) => {
-                format!("Fn({:?}, {})",
-                        instance.def,
-                        instance.substs.as_ptr() as usize)
-            }
-            MonoItem::Static(id) => {
-                format!("Static({:?})", id)
-            }
-            MonoItem::GlobalAsm(id) => {
-                format!("GlobalAsm({:?})", id)
-            }
-        }
-    }
-}
-
-impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {}
-
 impl PreDefineMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     fn predefine_static(&self,
                                   def_id: DefId,
diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs
index 00dd3be8c9f..d21542a50fb 100644
--- a/src/librustc_codegen_llvm/type_.rs
+++ b/src/librustc_codegen_llvm/type_.rs
@@ -400,6 +400,12 @@ impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool {
         layout.is_llvm_immediate()
     }
+    fn is_backend_scalar_pair(&self, ty: &TyLayout<'tcx>) -> bool {
+        ty.is_llvm_scalar_pair()
+    }
+    fn backend_field_index(&self, ty: &TyLayout<'tcx>, index: usize) -> u64 {
+        ty.llvm_field_index()
+    }
     fn scalar_pair_element_backend_type<'a>(
         &self,
         layout: TyLayout<'tcx>,