about summary refs log tree commit diff
path: root/compiler/rustc_codegen_ssa/src/mir
diff options
context:
space:
mode:
authormark <markm@cs.wisc.edu>2020-08-27 22:58:48 -0500
committerVadim Petrochenkov <vadim.petrochenkov@gmail.com>2020-08-30 18:45:07 +0300
commit9e5f7d5631b8f4009ac1c693e585d4b7108d4275 (patch)
tree158a05eb3f204a8e72939b58427d0c2787a4eade /compiler/rustc_codegen_ssa/src/mir
parentdb534b3ac286cf45688c3bbae6aa6e77439e52d2 (diff)
downloadrust-9e5f7d5631b8f4009ac1c693e585d4b7108d4275.tar.gz
rust-9e5f7d5631b8f4009ac1c693e585d4b7108d4275.zip
mv compiler to compiler/
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/mir')
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/analyze.rs448
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs1416
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/constant.rs91
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs35
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs361
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs492
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs471
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs502
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs1006
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/statement.rs124
10 files changed, 4946 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
new file mode 100644
index 00000000000..2e386c1e594
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -0,0 +1,448 @@
+//! An analysis to determine which locals require allocas and
+//! which do not.
+
+use super::FunctionCx;
+use crate::traits::*;
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::traversal;
+use rustc_middle::mir::visit::{
+    MutatingUseContext, NonMutatingUseContext, NonUseContext, PlaceContext, Visitor,
+};
+use rustc_middle::mir::{self, Location, TerminatorKind};
+use rustc_middle::ty;
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_target::abi::LayoutOf;
+
+pub fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    fx: &FunctionCx<'a, 'tcx, Bx>,
+) -> BitSet<mir::Local> {
+    let mir = fx.mir;
+    let mut analyzer = LocalAnalyzer::new(fx);
+
+    analyzer.visit_body(&mir);
+
+    for (local, decl) in mir.local_decls.iter_enumerated() {
+        let ty = fx.monomorphize(&decl.ty);
+        debug!("local {:?} has type `{}`", local, ty);
+        let layout = fx.cx.spanned_layout_of(ty, decl.source_info.span);
+        if fx.cx.is_backend_immediate(layout) {
+            // These sorts of types are immediates that we can store
+            // in an Value without an alloca.
+        } else if fx.cx.is_backend_scalar_pair(layout) {
+            // We allow pairs and uses of any of their 2 fields.
+        } else {
+            // These sorts of types require an alloca. Note that
+            // is_llvm_immediate() may *still* be true, particularly
+            // for newtypes, but we currently force some types
+            // (e.g., structs) into an alloca unconditionally, just so
+            // that we don't have to deal with having two pathways
+            // (gep vs extractvalue etc).
+            analyzer.not_ssa(local);
+        }
+    }
+
+    analyzer.non_ssa_locals
+}
+
+struct LocalAnalyzer<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
+    fx: &'mir FunctionCx<'a, 'tcx, Bx>,
+    dominators: Dominators<mir::BasicBlock>,
+    non_ssa_locals: BitSet<mir::Local>,
+    // The location of the first visited direct assignment to each
+    // local, or an invalid location (out of bounds `block` index).
+    first_assignment: IndexVec<mir::Local, Location>,
+}
+
+impl<Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
+    fn new(fx: &'mir FunctionCx<'a, 'tcx, Bx>) -> Self {
+        let invalid_location = mir::BasicBlock::new(fx.mir.basic_blocks().len()).start_location();
+        let dominators = fx.mir.dominators();
+        let mut analyzer = LocalAnalyzer {
+            fx,
+            dominators,
+            non_ssa_locals: BitSet::new_empty(fx.mir.local_decls.len()),
+            first_assignment: IndexVec::from_elem(invalid_location, &fx.mir.local_decls),
+        };
+
+        // Arguments get assigned to by means of the function being called
+        for arg in fx.mir.args_iter() {
+            analyzer.first_assignment[arg] = mir::START_BLOCK.start_location();
+        }
+
+        analyzer
+    }
+
+    fn first_assignment(&self, local: mir::Local) -> Option<Location> {
+        let location = self.first_assignment[local];
+        if location.block.index() < self.fx.mir.basic_blocks().len() {
+            Some(location)
+        } else {
+            None
+        }
+    }
+
+    fn not_ssa(&mut self, local: mir::Local) {
+        debug!("marking {:?} as non-SSA", local);
+        self.non_ssa_locals.insert(local);
+    }
+
+    fn assign(&mut self, local: mir::Local, location: Location) {
+        if self.first_assignment(local).is_some() {
+            self.not_ssa(local);
+        } else {
+            self.first_assignment[local] = location;
+        }
+    }
+
+    fn process_place(
+        &mut self,
+        place_ref: &mir::PlaceRef<'tcx>,
+        context: PlaceContext,
+        location: Location,
+    ) {
+        let cx = self.fx.cx;
+
+        if let &[ref proj_base @ .., elem] = place_ref.projection {
+            let mut base_context = if context.is_mutating_use() {
+                PlaceContext::MutatingUse(MutatingUseContext::Projection)
+            } else {
+                PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
+            };
+
+            // Allow uses of projections that are ZSTs or from scalar fields.
+            let is_consume = match context {
+                PlaceContext::NonMutatingUse(
+                    NonMutatingUseContext::Copy | NonMutatingUseContext::Move,
+                ) => true,
+                _ => false,
+            };
+            if is_consume {
+                let base_ty =
+                    mir::Place::ty_from(place_ref.local, proj_base, self.fx.mir, cx.tcx());
+                let base_ty = self.fx.monomorphize(&base_ty);
+
+                // ZSTs don't require any actual memory access.
+                let elem_ty = base_ty.projection_ty(cx.tcx(), self.fx.monomorphize(&elem)).ty;
+                let span = self.fx.mir.local_decls[place_ref.local].source_info.span;
+                if cx.spanned_layout_of(elem_ty, span).is_zst() {
+                    return;
+                }
+
+                if let mir::ProjectionElem::Field(..) = elem {
+                    let layout = cx.spanned_layout_of(base_ty.ty, span);
+                    if cx.is_backend_immediate(layout) || cx.is_backend_scalar_pair(layout) {
+                        // Recurse with the same context, instead of `Projection`,
+                        // potentially stopping at non-operand projections,
+                        // which would trigger `not_ssa` on locals.
+                        base_context = context;
+                    }
+                }
+            }
+
+            if let mir::ProjectionElem::Deref = elem {
+                // Deref projections typically only read the pointer.
+                // (the exception being `VarDebugInfo` contexts, handled below)
+                base_context = PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy);
+
+                // Indirect debuginfo requires going through memory, that only
+                // the debugger accesses, following our emitted DWARF pointer ops.
+                //
+                // FIXME(eddyb) Investigate the possibility of relaxing this, but
+                // note that `llvm.dbg.declare` *must* be used for indirect places,
+                // even if we start using `llvm.dbg.value` for all other cases,
+                // as we don't necessarily know when the value changes, but only
+                // where it lives in memory.
+                //
+                // It's possible `llvm.dbg.declare` could support starting from
+                // a pointer that doesn't point to an `alloca`, but this would
+                // only be useful if we know the pointer being `Deref`'d comes
+                // from an immutable place, and if `llvm.dbg.declare` calls
+                // must be at the very start of the function, then only function
+                // arguments could contain such pointers.
+                if context == PlaceContext::NonUse(NonUseContext::VarDebugInfo) {
+                    // We use `NonUseContext::VarDebugInfo` for the base,
+                    // which might not force the base local to memory,
+                    // so we have to do it manually.
+                    self.visit_local(&place_ref.local, context, location);
+                }
+            }
+
+            // `NonUseContext::VarDebugInfo` needs to flow all the
+            // way down to the base local (see `visit_local`).
+            if context == PlaceContext::NonUse(NonUseContext::VarDebugInfo) {
+                base_context = context;
+            }
+
+            self.process_place(
+                &mir::PlaceRef { local: place_ref.local, projection: proj_base },
+                base_context,
+                location,
+            );
+            // HACK(eddyb) this emulates the old `visit_projection_elem`, this
+            // entire `visit_place`-like `process_place` method should be rewritten,
+            // now that we have moved to the "slice of projections" representation.
+            if let mir::ProjectionElem::Index(local) = elem {
+                self.visit_local(
+                    &local,
+                    PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+                    location,
+                );
+            }
+        } else {
+            // FIXME this is super_place code, is repeated here to avoid cloning place or changing
+            // visit_place API
+            let mut context = context;
+
+            if !place_ref.projection.is_empty() {
+                context = if context.is_mutating_use() {
+                    PlaceContext::MutatingUse(MutatingUseContext::Projection)
+                } else {
+                    PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
+                };
+            }
+
+            self.visit_local(&place_ref.local, context, location);
+            self.visit_projection(place_ref.local, place_ref.projection, context, location);
+        }
+    }
+}
+
+impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
+    for LocalAnalyzer<'mir, 'a, 'tcx, Bx>
+{
+    fn visit_assign(
+        &mut self,
+        place: &mir::Place<'tcx>,
+        rvalue: &mir::Rvalue<'tcx>,
+        location: Location,
+    ) {
+        debug!("visit_assign(place={:?}, rvalue={:?})", place, rvalue);
+
+        if let Some(index) = place.as_local() {
+            self.assign(index, location);
+            let decl_span = self.fx.mir.local_decls[index].source_info.span;
+            if !self.fx.rvalue_creates_operand(rvalue, decl_span) {
+                self.not_ssa(index);
+            }
+        } else {
+            self.visit_place(place, PlaceContext::MutatingUse(MutatingUseContext::Store), location);
+        }
+
+        self.visit_rvalue(rvalue, location);
+    }
+
+    fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+        let check = match terminator.kind {
+            mir::TerminatorKind::Call { func: mir::Operand::Constant(ref c), ref args, .. } => {
+                match c.literal.ty.kind {
+                    ty::FnDef(did, _) => Some((did, args)),
+                    _ => None,
+                }
+            }
+            _ => None,
+        };
+        if let Some((def_id, args)) = check {
+            if Some(def_id) == self.fx.cx.tcx().lang_items().box_free_fn() {
+                // box_free(x) shares with `drop x` the property that it
+                // is not guaranteed to be statically dominated by the
+                // definition of x, so x must always be in an alloca.
+                if let mir::Operand::Move(ref place) = args[0] {
+                    self.visit_place(
+                        place,
+                        PlaceContext::MutatingUse(MutatingUseContext::Drop),
+                        location,
+                    );
+                }
+            }
+        }
+
+        self.super_terminator(terminator, location);
+    }
+
+    fn visit_place(&mut self, place: &mir::Place<'tcx>, context: PlaceContext, location: Location) {
+        debug!("visit_place(place={:?}, context={:?})", place, context);
+        self.process_place(&place.as_ref(), context, location);
+    }
+
+    fn visit_local(&mut self, &local: &mir::Local, context: PlaceContext, location: Location) {
+        match context {
+            PlaceContext::MutatingUse(MutatingUseContext::Call)
+            | PlaceContext::MutatingUse(MutatingUseContext::Yield) => {
+                self.assign(local, location);
+            }
+
+            PlaceContext::NonUse(_) | PlaceContext::MutatingUse(MutatingUseContext::Retag) => {}
+
+            PlaceContext::NonMutatingUse(
+                NonMutatingUseContext::Copy | NonMutatingUseContext::Move,
+            ) => {
+                // Reads from uninitialized variables (e.g., in dead code, after
+                // optimizations) require locals to be in (uninitialized) memory.
+                // N.B., there can be uninitialized reads of a local visited after
+                // an assignment to that local, if they happen on disjoint paths.
+                let ssa_read = match self.first_assignment(local) {
+                    Some(assignment_location) => {
+                        assignment_location.dominates(location, &self.dominators)
+                    }
+                    None => false,
+                };
+                if !ssa_read {
+                    self.not_ssa(local);
+                }
+            }
+
+            PlaceContext::MutatingUse(
+                MutatingUseContext::Store
+                | MutatingUseContext::AsmOutput
+                | MutatingUseContext::Borrow
+                | MutatingUseContext::AddressOf
+                | MutatingUseContext::Projection,
+            )
+            | PlaceContext::NonMutatingUse(
+                NonMutatingUseContext::Inspect
+                | NonMutatingUseContext::SharedBorrow
+                | NonMutatingUseContext::UniqueBorrow
+                | NonMutatingUseContext::ShallowBorrow
+                | NonMutatingUseContext::AddressOf
+                | NonMutatingUseContext::Projection,
+            ) => {
+                self.not_ssa(local);
+            }
+
+            PlaceContext::MutatingUse(MutatingUseContext::Drop) => {
+                let ty = self.fx.mir.local_decls[local].ty;
+                let ty = self.fx.monomorphize(&ty);
+
+                // Only need the place if we're actually dropping it.
+                if self.fx.cx.type_needs_drop(ty) {
+                    self.not_ssa(local);
+                }
+            }
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum CleanupKind {
+    NotCleanup,
+    Funclet,
+    Internal { funclet: mir::BasicBlock },
+}
+
+impl CleanupKind {
+    pub fn funclet_bb(self, for_bb: mir::BasicBlock) -> Option<mir::BasicBlock> {
+        match self {
+            CleanupKind::NotCleanup => None,
+            CleanupKind::Funclet => Some(for_bb),
+            CleanupKind::Internal { funclet } => Some(funclet),
+        }
+    }
+}
+
+pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKind> {
+    fn discover_masters<'tcx>(
+        result: &mut IndexVec<mir::BasicBlock, CleanupKind>,
+        mir: &mir::Body<'tcx>,
+    ) {
+        for (bb, data) in mir.basic_blocks().iter_enumerated() {
+            match data.terminator().kind {
+                TerminatorKind::Goto { .. }
+                | TerminatorKind::Resume
+                | TerminatorKind::Abort
+                | TerminatorKind::Return
+                | TerminatorKind::GeneratorDrop
+                | TerminatorKind::Unreachable
+                | TerminatorKind::SwitchInt { .. }
+                | TerminatorKind::Yield { .. }
+                | TerminatorKind::FalseEdge { .. }
+                | TerminatorKind::FalseUnwind { .. }
+                | TerminatorKind::InlineAsm { .. } => { /* nothing to do */ }
+                TerminatorKind::Call { cleanup: unwind, .. }
+                | TerminatorKind::Assert { cleanup: unwind, .. }
+                | TerminatorKind::DropAndReplace { unwind, .. }
+                | TerminatorKind::Drop { unwind, .. } => {
+                    if let Some(unwind) = unwind {
+                        debug!(
+                            "cleanup_kinds: {:?}/{:?} registering {:?} as funclet",
+                            bb, data, unwind
+                        );
+                        result[unwind] = CleanupKind::Funclet;
+                    }
+                }
+            }
+        }
+    }
+
+    fn propagate<'tcx>(result: &mut IndexVec<mir::BasicBlock, CleanupKind>, mir: &mir::Body<'tcx>) {
+        let mut funclet_succs = IndexVec::from_elem(None, mir.basic_blocks());
+
+        let mut set_successor = |funclet: mir::BasicBlock, succ| match funclet_succs[funclet] {
+            ref mut s @ None => {
+                debug!("set_successor: updating successor of {:?} to {:?}", funclet, succ);
+                *s = Some(succ);
+            }
+            Some(s) => {
+                if s != succ {
+                    span_bug!(
+                        mir.span,
+                        "funclet {:?} has 2 parents - {:?} and {:?}",
+                        funclet,
+                        s,
+                        succ
+                    );
+                }
+            }
+        };
+
+        for (bb, data) in traversal::reverse_postorder(mir) {
+            let funclet = match result[bb] {
+                CleanupKind::NotCleanup => continue,
+                CleanupKind::Funclet => bb,
+                CleanupKind::Internal { funclet } => funclet,
+            };
+
+            debug!(
+                "cleanup_kinds: {:?}/{:?}/{:?} propagating funclet {:?}",
+                bb, data, result[bb], funclet
+            );
+
+            for &succ in data.terminator().successors() {
+                let kind = result[succ];
+                debug!("cleanup_kinds: propagating {:?} to {:?}/{:?}", funclet, succ, kind);
+                match kind {
+                    CleanupKind::NotCleanup => {
+                        result[succ] = CleanupKind::Internal { funclet };
+                    }
+                    CleanupKind::Funclet => {
+                        if funclet != succ {
+                            set_successor(funclet, succ);
+                        }
+                    }
+                    CleanupKind::Internal { funclet: succ_funclet } => {
+                        if funclet != succ_funclet {
+                            // `succ` has 2 different funclet going into it, so it must
+                            // be a funclet by itself.
+
+                            debug!(
+                                "promoting {:?} to a funclet and updating {:?}",
+                                succ, succ_funclet
+                            );
+                            result[succ] = CleanupKind::Funclet;
+                            set_successor(succ_funclet, succ);
+                            set_successor(funclet, succ);
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, mir.basic_blocks());
+
+    discover_masters(&mut result, mir);
+    propagate(&mut result, mir);
+    debug!("cleanup_kinds: result={:?}", result);
+    result
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
new file mode 100644
index 00000000000..8048a569f79
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -0,0 +1,1416 @@
+use super::operand::OperandRef;
+use super::operand::OperandValue::{Immediate, Pair, Ref};
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::common::{self, IntPredicate};
+use crate::meth;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_ast as ast;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::vec::Idx;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{AllocId, ConstValue, Pointer, Scalar};
+use rustc_middle::mir::AssertKind;
+use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
+use rustc_middle::ty::{self, Instance, Ty, TypeFoldable};
+use rustc_span::source_map::Span;
+use rustc_span::{sym, Symbol};
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use rustc_target::abi::{self, LayoutOf};
+use rustc_target::spec::abi::Abi;
+
+use std::borrow::Cow;
+
+/// Used by `FunctionCx::codegen_terminator` for emitting common patterns
+/// e.g., creating a basic block, calling a function, etc.
+struct TerminatorCodegenHelper<'tcx> {
+    bb: mir::BasicBlock,
+    terminator: &'tcx mir::Terminator<'tcx>,
+    funclet_bb: Option<mir::BasicBlock>,
+}
+
+impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
+    /// Returns the associated funclet from `FunctionCx::funclets` for the
+    /// `funclet_bb` member if it is not `None`.
+    fn funclet<'b, Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        fx: &'b mut FunctionCx<'a, 'tcx, Bx>,
+    ) -> Option<&'b Bx::Funclet> {
+        match self.funclet_bb {
+            Some(funcl) => fx.funclets[funcl].as_ref(),
+            None => None,
+        }
+    }
+
+    fn lltarget<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        fx: &mut FunctionCx<'a, 'tcx, Bx>,
+        target: mir::BasicBlock,
+    ) -> (Bx::BasicBlock, bool) {
+        let span = self.terminator.source_info.span;
+        let lltarget = fx.blocks[target];
+        let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
+        match (self.funclet_bb, target_funclet) {
+            (None, None) => (lltarget, false),
+            (Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) => {
+                (lltarget, false)
+            }
+            // jump *into* cleanup - need a landing pad if GNU
+            (None, Some(_)) => (fx.landing_pad_to(target), false),
+            (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
+            (Some(_), Some(_)) => (fx.landing_pad_to(target), true),
+        }
+    }
+
+    /// Create a basic block.
+    fn llblock<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        fx: &mut FunctionCx<'a, 'tcx, Bx>,
+        target: mir::BasicBlock,
+    ) -> Bx::BasicBlock {
+        let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+        if is_cleanupret {
+            // MSVC cross-funclet jump - need a trampoline
+
+            debug!("llblock: creating cleanup trampoline for {:?}", target);
+            let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
+            let mut trampoline = fx.new_block(name);
+            trampoline.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
+            trampoline.llbb()
+        } else {
+            lltarget
+        }
+    }
+
+    fn funclet_br<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        fx: &mut FunctionCx<'a, 'tcx, Bx>,
+        bx: &mut Bx,
+        target: mir::BasicBlock,
+    ) {
+        let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+        if is_cleanupret {
+            // micro-optimization: generate a `ret` rather than a jump
+            // to a trampoline.
+            bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
+        } else {
+            bx.br(lltarget);
+        }
+    }
+
+    /// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional
+    /// return destination `destination` and the cleanup function `cleanup`.
+    fn do_call<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        fx: &mut FunctionCx<'a, 'tcx, Bx>,
+        bx: &mut Bx,
+        fn_abi: FnAbi<'tcx, Ty<'tcx>>,
+        fn_ptr: Bx::Value,
+        llargs: &[Bx::Value],
+        destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
+        cleanup: Option<mir::BasicBlock>,
+    ) {
+        // If there is a cleanup block and the function we're calling can unwind, then
+        // do an invoke, otherwise do a call.
+        if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) {
+            let ret_bx = if let Some((_, target)) = destination {
+                fx.blocks[target]
+            } else {
+                fx.unreachable_block()
+            };
+            let invokeret =
+                bx.invoke(fn_ptr, &llargs, ret_bx, self.llblock(fx, cleanup), self.funclet(fx));
+            bx.apply_attrs_callsite(&fn_abi, invokeret);
+
+            if let Some((ret_dest, target)) = destination {
+                let mut ret_bx = fx.build_block(target);
+                fx.set_debug_loc(&mut ret_bx, self.terminator.source_info);
+                fx.store_return(&mut ret_bx, ret_dest, &fn_abi.ret, invokeret);
+            }
+        } else {
+            let llret = bx.call(fn_ptr, &llargs, self.funclet(fx));
+            bx.apply_attrs_callsite(&fn_abi, llret);
+            if fx.mir[self.bb].is_cleanup {
+                // Cleanup is always the cold path. Don't inline
+                // drop glue. Also, when there is a deeply-nested
+                // struct, there are "symmetry" issues that cause
+                // exponential inlining - see issue #41696.
+                bx.do_not_inline(llret);
+            }
+
+            if let Some((ret_dest, target)) = destination {
+                fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
+                self.funclet_br(fx, bx, target);
+            } else {
+                bx.unreachable();
+            }
+        }
+    }
+
+    // Generate sideeffect intrinsic if jumping to any of the targets can form
+    // a loop.
+    fn maybe_sideeffect<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        mir: &'tcx mir::Body<'tcx>,
+        bx: &mut Bx,
+        targets: &[mir::BasicBlock],
+    ) {
+        if bx.tcx().sess.opts.debugging_opts.insert_sideeffect {
+            if targets.iter().any(|&target| {
+                target <= self.bb
+                    && target.start_location().is_predecessor_of(self.bb.start_location(), mir)
+            }) {
+                bx.sideeffect();
+            }
+        }
+    }
+}
+
+/// Codegen implementations for some terminator variants.
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    /// Generates code for a `Resume` terminator.
+    fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, mut bx: Bx) {
+        if let Some(funclet) = helper.funclet(self) {
+            bx.cleanup_ret(funclet, None);
+        } else {
+            let slot = self.get_personality_slot(&mut bx);
+            let lp0 = slot.project_field(&mut bx, 0);
+            let lp0 = bx.load_operand(lp0).immediate();
+            let lp1 = slot.project_field(&mut bx, 1);
+            let lp1 = bx.load_operand(lp1).immediate();
+            slot.storage_dead(&mut bx);
+
+            let mut lp = bx.const_undef(self.landing_pad_type());
+            lp = bx.insert_value(lp, lp0, 0);
+            lp = bx.insert_value(lp, lp1, 1);
+            bx.resume(lp);
+        }
+    }
+
+    fn codegen_switchint_terminator(
+        &mut self,
+        helper: TerminatorCodegenHelper<'tcx>,
+        mut bx: Bx,
+        discr: &mir::Operand<'tcx>,
+        switch_ty: Ty<'tcx>,
+        values: &Cow<'tcx, [u128]>,
+        targets: &Vec<mir::BasicBlock>,
+    ) {
+        let discr = self.codegen_operand(&mut bx, &discr);
+        // `switch_ty` is redundant, sanity-check that.
+        assert_eq!(discr.layout.ty, switch_ty);
+        if targets.len() == 2 {
+            // If there are two targets, emit br instead of switch
+            let lltrue = helper.llblock(self, targets[0]);
+            let llfalse = helper.llblock(self, targets[1]);
+            if switch_ty == bx.tcx().types.bool {
+                helper.maybe_sideeffect(self.mir, &mut bx, targets.as_slice());
+                // Don't generate trivial icmps when switching on bool
+                if let [0] = values[..] {
+                    bx.cond_br(discr.immediate(), llfalse, lltrue);
+                } else {
+                    assert_eq!(&values[..], &[1]);
+                    bx.cond_br(discr.immediate(), lltrue, llfalse);
+                }
+            } else {
+                let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty));
+                let llval = bx.const_uint_big(switch_llty, values[0]);
+                let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
+                helper.maybe_sideeffect(self.mir, &mut bx, targets.as_slice());
+                bx.cond_br(cmp, lltrue, llfalse);
+            }
+        } else {
+            helper.maybe_sideeffect(self.mir, &mut bx, targets.as_slice());
+            let (otherwise, targets) = targets.split_last().unwrap();
+            bx.switch(
+                discr.immediate(),
+                helper.llblock(self, *otherwise),
+                values
+                    .iter()
+                    .zip(targets)
+                    .map(|(&value, target)| (value, helper.llblock(self, *target))),
+            );
+        }
+    }
+
+    fn codegen_return_terminator(&mut self, mut bx: Bx) {
+        // Call `va_end` if this is the definition of a C-variadic function.
+        if self.fn_abi.c_variadic {
+            // The `VaList` "spoofed" argument is just after all the real arguments.
+            let va_list_arg_idx = self.fn_abi.args.len();
+            match self.locals[mir::Local::new(1 + va_list_arg_idx)] {
+                LocalRef::Place(va_list) => {
+                    bx.va_end(va_list.llval);
+                }
+                _ => bug!("C-variadic function must have a `VaList` place"),
+            }
+        }
+        if self.fn_abi.ret.layout.abi.is_uninhabited() {
+            // Functions with uninhabited return values are marked `noreturn`,
+            // so we should make sure that we never actually do.
+            // We play it safe by using a well-defined `abort`, but we could go for immediate UB
+            // if that turns out to be helpful.
+            bx.abort();
+            // `abort` does not terminate the block, so we still need to generate
+            // an `unreachable` terminator after it.
+            bx.unreachable();
+            return;
+        }
+        let llval = match self.fn_abi.ret.mode {
+            PassMode::Ignore | PassMode::Indirect(..) => {
+                bx.ret_void();
+                return;
+            }
+
+            PassMode::Direct(_) | PassMode::Pair(..) => {
+                let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref());
+                if let Ref(llval, _, align) = op.val {
+                    bx.load(llval, align)
+                } else {
+                    op.immediate_or_packed_pair(&mut bx)
+                }
+            }
+
+            PassMode::Cast(cast_ty) => {
+                let op = match self.locals[mir::RETURN_PLACE] {
+                    LocalRef::Operand(Some(op)) => op,
+                    LocalRef::Operand(None) => bug!("use of return before def"),
+                    LocalRef::Place(cg_place) => OperandRef {
+                        val: Ref(cg_place.llval, None, cg_place.align),
+                        layout: cg_place.layout,
+                    },
+                    LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
+                };
+                let llslot = match op.val {
+                    Immediate(_) | Pair(..) => {
+                        let scratch = PlaceRef::alloca(&mut bx, self.fn_abi.ret.layout);
+                        op.val.store(&mut bx, scratch);
+                        scratch.llval
+                    }
+                    Ref(llval, _, align) => {
+                        assert_eq!(align, op.layout.align.abi, "return place is unaligned!");
+                        llval
+                    }
+                };
+                let addr = bx.pointercast(llslot, bx.type_ptr_to(bx.cast_backend_type(&cast_ty)));
+                bx.load(addr, self.fn_abi.ret.layout.align.abi)
+            }
+        };
+        bx.ret(llval);
+    }
+
+    fn codegen_drop_terminator(
+        &mut self,
+        helper: TerminatorCodegenHelper<'tcx>,
+        mut bx: Bx,
+        location: mir::Place<'tcx>,
+        target: mir::BasicBlock,
+        unwind: Option<mir::BasicBlock>,
+    ) {
+        let ty = location.ty(self.mir, bx.tcx()).ty;
+        let ty = self.monomorphize(&ty);
+        let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
+
+        if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
+            // we don't actually need to drop anything.
+            helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+            helper.funclet_br(self, &mut bx, target);
+            return;
+        }
+
+        let place = self.codegen_place(&mut bx, location.as_ref());
+        let (args1, args2);
+        let mut args = if let Some(llextra) = place.llextra {
+            args2 = [place.llval, llextra];
+            &args2[..]
+        } else {
+            args1 = [place.llval];
+            &args1[..]
+        };
+        let (drop_fn, fn_abi) = match ty.kind {
+            // FIXME(eddyb) perhaps move some of this logic into
+            // `Instance::resolve_drop_in_place`?
+            ty::Dynamic(..) => {
+                let virtual_drop = Instance {
+                    def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
+                    substs: drop_fn.substs,
+                };
+                let fn_abi = FnAbi::of_instance(&bx, virtual_drop, &[]);
+                let vtable = args[1];
+                args = &args[..1];
+                (meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_abi), fn_abi)
+            }
+            _ => (bx.get_fn_addr(drop_fn), FnAbi::of_instance(&bx, drop_fn, &[])),
+        };
+        helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+        helper.do_call(
+            self,
+            &mut bx,
+            fn_abi,
+            drop_fn,
+            args,
+            Some((ReturnDest::Nothing, target)),
+            unwind,
+        );
+    }
+
+    fn codegen_assert_terminator(
+        &mut self,
+        helper: TerminatorCodegenHelper<'tcx>,
+        mut bx: Bx,
+        terminator: &mir::Terminator<'tcx>,
+        cond: &mir::Operand<'tcx>,
+        expected: bool,
+        msg: &mir::AssertMessage<'tcx>,
+        target: mir::BasicBlock,
+        cleanup: Option<mir::BasicBlock>,
+    ) {
+        let span = terminator.source_info.span;
+        let cond = self.codegen_operand(&mut bx, cond).immediate();
+        let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
+
+        // This case can currently arise only from functions marked
+        // with #[rustc_inherit_overflow_checks] and inlined from
+        // another crate (mostly core::num generic/#[inline] fns),
+        // while the current crate doesn't use overflow checks.
+        // NOTE: Unlike binops, negation doesn't have its own
+        // checked operation, just a comparison with the minimum
+        // value, so we have to check for the assert message.
+        if !bx.check_overflow() {
+            if let AssertKind::OverflowNeg(_) = *msg {
+                const_cond = Some(expected);
+            }
+        }
+
+        // Don't codegen the panic block if success if known.
+        if const_cond == Some(expected) {
+            helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+            helper.funclet_br(self, &mut bx, target);
+            return;
+        }
+
+        // Pass the condition through llvm.expect for branch hinting.
+        let cond = bx.expect(cond, expected);
+
+        // Create the failure block and the conditional branch to it.
+        let lltarget = helper.llblock(self, target);
+        let panic_block = self.new_block("panic");
+        helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+        if expected {
+            bx.cond_br(cond, lltarget, panic_block.llbb());
+        } else {
+            bx.cond_br(cond, panic_block.llbb(), lltarget);
+        }
+
+        // After this point, bx is the block for the call to panic.
+        bx = panic_block;
+        self.set_debug_loc(&mut bx, terminator.source_info);
+
+        // Get the location information.
+        let location = self.get_caller_location(&mut bx, span).immediate();
+
+        // Put together the arguments to the panic entry point.
+        let (lang_item, args) = match msg {
+            AssertKind::BoundsCheck { ref len, ref index } => {
+                let len = self.codegen_operand(&mut bx, len).immediate();
+                let index = self.codegen_operand(&mut bx, index).immediate();
+                // It's `fn panic_bounds_check(index: usize, len: usize)`,
+                // and `#[track_caller]` adds an implicit third argument.
+                (LangItem::PanicBoundsCheck, vec![index, len, location])
+            }
+            _ => {
+                let msg_str = Symbol::intern(msg.description());
+                let msg = bx.const_str(msg_str);
+                // It's `pub fn panic(expr: &str)`, with the wide reference being passed
+                // as two arguments, and `#[track_caller]` adds an implicit third argument.
+                (LangItem::Panic, vec![msg.0, msg.1, location])
+            }
+        };
+
+        // Obtain the panic entry point.
+        let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
+        let instance = ty::Instance::mono(bx.tcx(), def_id);
+        let fn_abi = FnAbi::of_instance(&bx, instance, &[]);
+        let llfn = bx.get_fn_addr(instance);
+
+        // Codegen the actual panic invoke/call.
+        helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup);
+    }
+
+    /// Returns `true` if this is indeed a panic intrinsic and codegen is done.
+    fn codegen_panic_intrinsic(
+        &mut self,
+        helper: &TerminatorCodegenHelper<'tcx>,
+        bx: &mut Bx,
+        intrinsic: Option<Symbol>,
+        instance: Option<Instance<'tcx>>,
+        span: Span,
+        destination: &Option<(mir::Place<'tcx>, mir::BasicBlock)>,
+        cleanup: Option<mir::BasicBlock>,
+    ) -> bool {
+        // Emit a panic or a no-op for `assert_*` intrinsics.
+        // These are intrinsics that compile to panics so that we can get a message
+        // which mentions the offending type, even from a const context.
+        #[derive(Debug, PartialEq)]
+        enum AssertIntrinsic {
+            Inhabited,
+            ZeroValid,
+            UninitValid,
+        };
+        let panic_intrinsic = intrinsic.and_then(|i| match i {
+            sym::assert_inhabited => Some(AssertIntrinsic::Inhabited),
+            sym::assert_zero_valid => Some(AssertIntrinsic::ZeroValid),
+            sym::assert_uninit_valid => Some(AssertIntrinsic::UninitValid),
+            _ => None,
+        });
+        if let Some(intrinsic) = panic_intrinsic {
+            use AssertIntrinsic::*;
+            let ty = instance.unwrap().substs.type_at(0);
+            let layout = bx.layout_of(ty);
+            let do_panic = match intrinsic {
+                Inhabited => layout.abi.is_uninhabited(),
+                // We unwrap as the error type is `!`.
+                ZeroValid => !layout.might_permit_raw_init(bx, /*zero:*/ true).unwrap(),
+                // We unwrap as the error type is `!`.
+                UninitValid => !layout.might_permit_raw_init(bx, /*zero:*/ false).unwrap(),
+            };
+            if do_panic {
+                let msg_str = if layout.abi.is_uninhabited() {
+                    // Use this error even for the other intrinsics as it is more precise.
+                    format!("attempted to instantiate uninhabited type `{}`", ty)
+                } else if intrinsic == ZeroValid {
+                    format!("attempted to zero-initialize type `{}`, which is invalid", ty)
+                } else {
+                    format!("attempted to leave type `{}` uninitialized, which is invalid", ty)
+                };
+                let msg = bx.const_str(Symbol::intern(&msg_str));
+                let location = self.get_caller_location(bx, span).immediate();
+
+                // Obtain the panic entry point.
+                // FIXME: dedup this with `codegen_assert_terminator` above.
+                let def_id = common::langcall(bx.tcx(), Some(span), "", LangItem::Panic);
+                let instance = ty::Instance::mono(bx.tcx(), def_id);
+                let fn_abi = FnAbi::of_instance(bx, instance, &[]);
+                let llfn = bx.get_fn_addr(instance);
+
+                if let Some((_, target)) = destination.as_ref() {
+                    helper.maybe_sideeffect(self.mir, bx, &[*target]);
+                }
+                // Codegen the actual panic invoke/call.
+                helper.do_call(
+                    self,
+                    bx,
+                    fn_abi,
+                    llfn,
+                    &[msg.0, msg.1, location],
+                    destination.as_ref().map(|(_, bb)| (ReturnDest::Nothing, *bb)),
+                    cleanup,
+                );
+            } else {
+                // a NOP
+                let target = destination.as_ref().unwrap().1;
+                helper.maybe_sideeffect(self.mir, bx, &[target]);
+                helper.funclet_br(self, bx, target)
+            }
+            true
+        } else {
+            false
+        }
+    }
+
+    fn codegen_call_terminator(
+        &mut self,
+        helper: TerminatorCodegenHelper<'tcx>,
+        mut bx: Bx,
+        terminator: &mir::Terminator<'tcx>,
+        func: &mir::Operand<'tcx>,
+        args: &Vec<mir::Operand<'tcx>>,
+        destination: &Option<(mir::Place<'tcx>, mir::BasicBlock)>,
+        cleanup: Option<mir::BasicBlock>,
+        fn_span: Span,
+    ) {
+        let span = terminator.source_info.span;
+        // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
+        let callee = self.codegen_operand(&mut bx, func);
+
+        let (instance, mut llfn) = match callee.layout.ty.kind {
+            ty::FnDef(def_id, substs) => (
+                Some(
+                    ty::Instance::resolve(bx.tcx(), ty::ParamEnv::reveal_all(), def_id, substs)
+                        .unwrap()
+                        .unwrap()
+                        .polymorphize(bx.tcx()),
+                ),
+                None,
+            ),
+            ty::FnPtr(_) => (None, Some(callee.immediate())),
+            _ => bug!("{} is not callable", callee.layout.ty),
+        };
+        let def = instance.map(|i| i.def);
+
+        if let Some(ty::InstanceDef::DropGlue(_, None)) = def {
+            // Empty drop glue; a no-op.
+            let &(_, target) = destination.as_ref().unwrap();
+            helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+            helper.funclet_br(self, &mut bx, target);
+            return;
+        }
+
+        // FIXME(eddyb) avoid computing this if possible, when `instance` is
+        // available - right now `sig` is only needed for getting the `abi`
+        // and figuring out how many extra args were passed to a C-variadic `fn`.
+        let sig = callee.layout.ty.fn_sig(bx.tcx());
+        let abi = sig.abi();
+
+        // Handle intrinsics old codegen wants Expr's for, ourselves.
+        let intrinsic = match def {
+            Some(ty::InstanceDef::Intrinsic(def_id)) => Some(bx.tcx().item_name(def_id)),
+            _ => None,
+        };
+
+        let extra_args = &args[sig.inputs().skip_binder().len()..];
+        let extra_args = extra_args
+            .iter()
+            .map(|op_arg| {
+                let op_ty = op_arg.ty(self.mir, bx.tcx());
+                self.monomorphize(&op_ty)
+            })
+            .collect::<Vec<_>>();
+
+        let fn_abi = match instance {
+            Some(instance) => FnAbi::of_instance(&bx, instance, &extra_args),
+            None => FnAbi::of_fn_ptr(&bx, sig, &extra_args),
+        };
+
+        if intrinsic == Some(sym::transmute) {
+            if let Some(destination_ref) = destination.as_ref() {
+                let &(dest, target) = destination_ref;
+                self.codegen_transmute(&mut bx, &args[0], dest);
+                helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+                helper.funclet_br(self, &mut bx, target);
+            } else {
+                // If we are trying to transmute to an uninhabited type,
+                // it is likely there is no allotted destination. In fact,
+                // transmuting to an uninhabited type is UB, which means
+                // we can do what we like. Here, we declare that transmuting
+                // into an uninhabited type is impossible, so anything following
+                // it must be unreachable.
+                assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited);
+                bx.unreachable();
+            }
+            return;
+        }
+
+        if self.codegen_panic_intrinsic(
+            &helper,
+            &mut bx,
+            intrinsic,
+            instance,
+            span,
+            destination,
+            cleanup,
+        ) {
+            return;
+        }
+
+        // The arguments we'll be passing. Plus one to account for outptr, if used.
+        let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize;
+        let mut llargs = Vec::with_capacity(arg_count);
+
+        // Prepare the return value destination
+        let ret_dest = if let Some((dest, _)) = *destination {
+            let is_intrinsic = intrinsic.is_some();
+            self.make_return_dest(&mut bx, dest, &fn_abi.ret, &mut llargs, is_intrinsic)
+        } else {
+            ReturnDest::Nothing
+        };
+
+        if intrinsic == Some(sym::caller_location) {
+            if let Some((_, target)) = destination.as_ref() {
+                let location = self.get_caller_location(&mut bx, fn_span);
+
+                if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
+                    location.val.store(&mut bx, tmp);
+                }
+                self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate());
+
+                helper.maybe_sideeffect(self.mir, &mut bx, &[*target]);
+                helper.funclet_br(self, &mut bx, *target);
+            }
+            return;
+        }
+
+        if intrinsic.is_some() && intrinsic != Some(sym::drop_in_place) {
+            let intrinsic = intrinsic.unwrap();
+            let dest = match ret_dest {
+                _ if fn_abi.ret.is_indirect() => llargs[0],
+                ReturnDest::Nothing => {
+                    bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret)))
+                }
+                ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
+                ReturnDest::DirectOperand(_) => {
+                    bug!("Cannot use direct operand with an intrinsic call")
+                }
+            };
+
+            let args: Vec<_> = args
+                .iter()
+                .enumerate()
+                .map(|(i, arg)| {
+                    // The indices passed to simd_shuffle* in the
+                    // third argument must be constant. This is
+                    // checked by const-qualification, which also
+                    // promotes any complex rvalues to constants.
+                    if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") {
+                        if let mir::Operand::Constant(constant) = arg {
+                            let c = self.eval_mir_constant(constant);
+                            let (llval, ty) = self.simd_shuffle_indices(
+                                &bx,
+                                constant.span,
+                                constant.literal.ty,
+                                c,
+                            );
+                            return OperandRef { val: Immediate(llval), layout: bx.layout_of(ty) };
+                        } else {
+                            span_bug!(span, "shuffle indices must be constant");
+                        }
+                    }
+
+                    self.codegen_operand(&mut bx, arg)
+                })
+                .collect();
+
+            bx.codegen_intrinsic_call(
+                *instance.as_ref().unwrap(),
+                &fn_abi,
+                &args,
+                dest,
+                terminator.source_info.span,
+            );
+
+            if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
+                self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval);
+            }
+
+            if let Some((_, target)) = *destination {
+                helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+                helper.funclet_br(self, &mut bx, target);
+            } else {
+                bx.unreachable();
+            }
+
+            return;
+        }
+
+        // Split the rust-call tupled arguments off.
+        let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
+            let (tup, args) = args.split_last().unwrap();
+            (args, Some(tup))
+        } else {
+            (&args[..], None)
+        };
+
+        'make_args: for (i, arg) in first_args.iter().enumerate() {
+            let mut op = self.codegen_operand(&mut bx, arg);
+
+            if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
+                if let Pair(..) = op.val {
+                    // In the case of Rc<Self>, we need to explicitly pass a
+                    // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
+                    // that is understood elsewhere in the compiler as a method on
+                    // `dyn Trait`.
+                    // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
+                    // we get a value of a built-in pointer type
+                    'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
+                        && !op.layout.ty.is_region_ptr()
+                    {
+                        for i in 0..op.layout.fields.count() {
+                            let field = op.extract_field(&mut bx, i);
+                            if !field.layout.is_zst() {
+                                // we found the one non-zero-sized field that is allowed
+                                // now find *its* non-zero-sized field, or stop if it's a
+                                // pointer
+                                op = field;
+                                continue 'descend_newtypes;
+                            }
+                        }
+
+                        span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+                    }
+
+                    // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
+                    // data pointer and vtable. Look up the method in the vtable, and pass
+                    // the data pointer as the first argument
+                    match op.val {
+                        Pair(data_ptr, meta) => {
+                            llfn = Some(
+                                meth::VirtualIndex::from_index(idx).get_fn(&mut bx, meta, &fn_abi),
+                            );
+                            llargs.push(data_ptr);
+                            continue 'make_args;
+                        }
+                        other => bug!("expected a Pair, got {:?}", other),
+                    }
+                } else if let Ref(data_ptr, Some(meta), _) = op.val {
+                    // by-value dynamic dispatch
+                    llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(&mut bx, meta, &fn_abi));
+                    llargs.push(data_ptr);
+                    continue;
+                } else {
+                    span_bug!(span, "can't codegen a virtual call on {:?}", op);
+                }
+            }
+
+            // The callee needs to own the argument memory if we pass it
+            // by-ref, so make a local copy of non-immediate constants.
+            match (arg, op.val) {
+                (&mir::Operand::Copy(_), Ref(_, None, _))
+                | (&mir::Operand::Constant(_), Ref(_, None, _)) => {
+                    let tmp = PlaceRef::alloca(&mut bx, op.layout);
+                    op.val.store(&mut bx, tmp);
+                    op.val = Ref(tmp.llval, None, tmp.align);
+                }
+                _ => {}
+            }
+
+            self.codegen_argument(&mut bx, op, &mut llargs, &fn_abi.args[i]);
+        }
+        if let Some(tup) = untuple {
+            self.codegen_arguments_untupled(
+                &mut bx,
+                tup,
+                &mut llargs,
+                &fn_abi.args[first_args.len()..],
+            )
+        }
+
+        let needs_location =
+            instance.map_or(false, |i| i.def.requires_caller_location(self.cx.tcx()));
+        if needs_location {
+            assert_eq!(
+                fn_abi.args.len(),
+                args.len() + 1,
+                "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR",
+            );
+            let location = self.get_caller_location(&mut bx, fn_span);
+            debug!(
+                "codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
+                terminator, location, fn_span
+            );
+
+            let last_arg = fn_abi.args.last().unwrap();
+            self.codegen_argument(&mut bx, location, &mut llargs, last_arg);
+        }
+
+        let fn_ptr = match (llfn, instance) {
+            (Some(llfn), _) => llfn,
+            (None, Some(instance)) => bx.get_fn_addr(instance),
+            _ => span_bug!(span, "no llfn for call"),
+        };
+
+        if let Some((_, target)) = destination.as_ref() {
+            helper.maybe_sideeffect(self.mir, &mut bx, &[*target]);
+        }
+        helper.do_call(
+            self,
+            &mut bx,
+            fn_abi,
+            fn_ptr,
+            &llargs,
+            destination.as_ref().map(|&(_, target)| (ret_dest, target)),
+            cleanup,
+        );
+    }
+
+    fn codegen_asm_terminator(
+        &mut self,
+        helper: TerminatorCodegenHelper<'tcx>,
+        mut bx: Bx,
+        terminator: &mir::Terminator<'tcx>,
+        template: &[ast::InlineAsmTemplatePiece],
+        operands: &[mir::InlineAsmOperand<'tcx>],
+        options: ast::InlineAsmOptions,
+        line_spans: &[Span],
+        destination: Option<mir::BasicBlock>,
+    ) {
+        let span = terminator.source_info.span;
+
+        let operands: Vec<_> = operands
+            .iter()
+            .map(|op| match *op {
+                mir::InlineAsmOperand::In { reg, ref value } => {
+                    let value = self.codegen_operand(&mut bx, value);
+                    InlineAsmOperandRef::In { reg, value }
+                }
+                mir::InlineAsmOperand::Out { reg, late, ref place } => {
+                    let place = place.map(|place| self.codegen_place(&mut bx, place.as_ref()));
+                    InlineAsmOperandRef::Out { reg, late, place }
+                }
+                mir::InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => {
+                    let in_value = self.codegen_operand(&mut bx, in_value);
+                    let out_place =
+                        out_place.map(|out_place| self.codegen_place(&mut bx, out_place.as_ref()));
+                    InlineAsmOperandRef::InOut { reg, late, in_value, out_place }
+                }
+                mir::InlineAsmOperand::Const { ref value } => {
+                    if let mir::Operand::Constant(constant) = value {
+                        let const_value = self
+                            .eval_mir_constant(constant)
+                            .unwrap_or_else(|_| span_bug!(span, "asm const cannot be resolved"));
+                        let ty = constant.literal.ty;
+                        let size = bx.layout_of(ty).size;
+                        let scalar = match const_value {
+                            // Promoted constants are evaluated into a ByRef instead of a Scalar,
+                            // but we want the scalar value here.
+                            ConstValue::ByRef { alloc, offset } => {
+                                let ptr = Pointer::new(AllocId(0), offset);
+                                alloc
+                                    .read_scalar(&bx, ptr, size)
+                                    .and_then(|s| s.check_init())
+                                    .unwrap_or_else(|e| {
+                                        bx.tcx().sess.span_err(
+                                            span,
+                                            &format!("Could not evaluate asm const: {}", e),
+                                        );
+
+                                        // We are erroring out, just emit a dummy constant.
+                                        Scalar::from_u64(0)
+                                    })
+                            }
+                            _ => span_bug!(span, "expected ByRef for promoted asm const"),
+                        };
+                        let value = scalar.assert_bits(size);
+                        let string = match ty.kind {
+                            ty::Uint(_) => value.to_string(),
+                            ty::Int(int_ty) => {
+                                match int_ty.normalize(bx.tcx().sess.target.ptr_width) {
+                                    ast::IntTy::I8 => (value as i8).to_string(),
+                                    ast::IntTy::I16 => (value as i16).to_string(),
+                                    ast::IntTy::I32 => (value as i32).to_string(),
+                                    ast::IntTy::I64 => (value as i64).to_string(),
+                                    ast::IntTy::I128 => (value as i128).to_string(),
+                                    ast::IntTy::Isize => unreachable!(),
+                                }
+                            }
+                            ty::Float(ast::FloatTy::F32) => {
+                                f32::from_bits(value as u32).to_string()
+                            }
+                            ty::Float(ast::FloatTy::F64) => {
+                                f64::from_bits(value as u64).to_string()
+                            }
+                            _ => span_bug!(span, "asm const has bad type {}", ty),
+                        };
+                        InlineAsmOperandRef::Const { string }
+                    } else {
+                        span_bug!(span, "asm const is not a constant");
+                    }
+                }
+                mir::InlineAsmOperand::SymFn { ref value } => {
+                    let literal = self.monomorphize(&value.literal);
+                    if let ty::FnDef(def_id, substs) = literal.ty.kind {
+                        let instance = ty::Instance::resolve_for_fn_ptr(
+                            bx.tcx(),
+                            ty::ParamEnv::reveal_all(),
+                            def_id,
+                            substs,
+                        )
+                        .unwrap();
+                        InlineAsmOperandRef::SymFn { instance }
+                    } else {
+                        span_bug!(span, "invalid type for asm sym (fn)");
+                    }
+                }
+                mir::InlineAsmOperand::SymStatic { def_id } => {
+                    InlineAsmOperandRef::SymStatic { def_id }
+                }
+            })
+            .collect();
+
+        bx.codegen_inline_asm(template, &operands, options, line_spans);
+
+        if let Some(target) = destination {
+            helper.funclet_br(self, &mut bx, target);
+        } else {
+            bx.unreachable();
+        }
+    }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn codegen_block(&mut self, bb: mir::BasicBlock) {
+        let mut bx = self.build_block(bb);
+        let mir = self.mir;
+        let data = &mir[bb];
+
+        debug!("codegen_block({:?}={:?})", bb, data);
+
+        for statement in &data.statements {
+            bx = self.codegen_statement(bx, statement);
+        }
+
+        self.codegen_terminator(bx, bb, data.terminator());
+    }
+
+    fn codegen_terminator(
+        &mut self,
+        mut bx: Bx,
+        bb: mir::BasicBlock,
+        terminator: &'tcx mir::Terminator<'tcx>,
+    ) {
+        debug!("codegen_terminator: {:?}", terminator);
+
+        // Create the cleanup bundle, if needed.
+        let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
+        let helper = TerminatorCodegenHelper { bb, terminator, funclet_bb };
+
+        self.set_debug_loc(&mut bx, terminator.source_info);
+        match terminator.kind {
+            mir::TerminatorKind::Resume => self.codegen_resume_terminator(helper, bx),
+
+            mir::TerminatorKind::Abort => {
+                bx.abort();
+                // `abort` does not terminate the block, so we still need to generate
+                // an `unreachable` terminator after it.
+                bx.unreachable();
+            }
+
+            mir::TerminatorKind::Goto { target } => {
+                helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+                helper.funclet_br(self, &mut bx, target);
+            }
+
+            mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
+                self.codegen_switchint_terminator(helper, bx, discr, switch_ty, values, targets);
+            }
+
+            mir::TerminatorKind::Return => {
+                self.codegen_return_terminator(bx);
+            }
+
+            mir::TerminatorKind::Unreachable => {
+                bx.unreachable();
+            }
+
+            mir::TerminatorKind::Drop { place, target, unwind } => {
+                self.codegen_drop_terminator(helper, bx, place, target, unwind);
+            }
+
+            mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
+                self.codegen_assert_terminator(
+                    helper, bx, terminator, cond, expected, msg, target, cleanup,
+                );
+            }
+
+            mir::TerminatorKind::DropAndReplace { .. } => {
+                bug!("undesugared DropAndReplace in codegen: {:?}", terminator);
+            }
+
+            mir::TerminatorKind::Call {
+                ref func,
+                ref args,
+                ref destination,
+                cleanup,
+                from_hir_call: _,
+                fn_span,
+            } => {
+                self.codegen_call_terminator(
+                    helper,
+                    bx,
+                    terminator,
+                    func,
+                    args,
+                    destination,
+                    cleanup,
+                    fn_span,
+                );
+            }
+            mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => {
+                bug!("generator ops in codegen")
+            }
+            mir::TerminatorKind::FalseEdge { .. } | mir::TerminatorKind::FalseUnwind { .. } => {
+                bug!("borrowck false edges in codegen")
+            }
+
+            mir::TerminatorKind::InlineAsm {
+                template,
+                ref operands,
+                options,
+                line_spans,
+                destination,
+            } => {
+                self.codegen_asm_terminator(
+                    helper,
+                    bx,
+                    terminator,
+                    template,
+                    operands,
+                    options,
+                    line_spans,
+                    destination,
+                );
+            }
+        }
+    }
+
+    fn codegen_argument(
+        &mut self,
+        bx: &mut Bx,
+        op: OperandRef<'tcx, Bx::Value>,
+        llargs: &mut Vec<Bx::Value>,
+        arg: &ArgAbi<'tcx, Ty<'tcx>>,
+    ) {
+        // Fill padding with undef value, where applicable.
+        if let Some(ty) = arg.pad {
+            llargs.push(bx.const_undef(bx.reg_backend_type(&ty)))
+        }
+
+        if arg.is_ignore() {
+            return;
+        }
+
+        if let PassMode::Pair(..) = arg.mode {
+            match op.val {
+                Pair(a, b) => {
+                    llargs.push(a);
+                    llargs.push(b);
+                    return;
+                }
+                _ => bug!("codegen_argument: {:?} invalid for pair argument", op),
+            }
+        } else if arg.is_unsized_indirect() {
+            match op.val {
+                Ref(a, Some(b), _) => {
+                    llargs.push(a);
+                    llargs.push(b);
+                    return;
+                }
+                _ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op),
+            }
+        }
+
+        // Force by-ref if we have to load through a cast pointer.
+        let (mut llval, align, by_ref) = match op.val {
+            Immediate(_) | Pair(..) => match arg.mode {
+                PassMode::Indirect(..) | PassMode::Cast(_) => {
+                    let scratch = PlaceRef::alloca(bx, arg.layout);
+                    op.val.store(bx, scratch);
+                    (scratch.llval, scratch.align, true)
+                }
+                _ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),
+            },
+            Ref(llval, _, align) => {
+                if arg.is_indirect() && align < arg.layout.align.abi {
+                    // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
+                    // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
+                    // have scary latent bugs around.
+
+                    let scratch = PlaceRef::alloca(bx, arg.layout);
+                    base::memcpy_ty(
+                        bx,
+                        scratch.llval,
+                        scratch.align,
+                        llval,
+                        align,
+                        op.layout,
+                        MemFlags::empty(),
+                    );
+                    (scratch.llval, scratch.align, true)
+                } else {
+                    (llval, align, true)
+                }
+            }
+        };
+
+        if by_ref && !arg.is_indirect() {
+            // Have to load the argument, maybe while casting it.
+            if let PassMode::Cast(ty) = arg.mode {
+                let addr = bx.pointercast(llval, bx.type_ptr_to(bx.cast_backend_type(&ty)));
+                llval = bx.load(addr, align.min(arg.layout.align.abi));
+            } else {
+                // We can't use `PlaceRef::load` here because the argument
+                // may have a type we don't treat as immediate, but the ABI
+                // used for this call is passing it by-value. In that case,
+                // the load would just produce `OperandValue::Ref` instead
+                // of the `OperandValue::Immediate` we need for the call.
+                llval = bx.load(llval, align);
+                if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
+                    if scalar.is_bool() {
+                        bx.range_metadata(llval, 0..2);
+                    }
+                }
+                // We store bools as `i8` so we need to truncate to `i1`.
+                llval = base::to_immediate(bx, llval, arg.layout);
+            }
+        }
+
+        llargs.push(llval);
+    }
+
+    fn codegen_arguments_untupled(
+        &mut self,
+        bx: &mut Bx,
+        operand: &mir::Operand<'tcx>,
+        llargs: &mut Vec<Bx::Value>,
+        args: &[ArgAbi<'tcx, Ty<'tcx>>],
+    ) {
+        let tuple = self.codegen_operand(bx, operand);
+
+        // Handle both by-ref and immediate tuples.
+        if let Ref(llval, None, align) = tuple.val {
+            let tuple_ptr = PlaceRef::new_sized_aligned(llval, tuple.layout, align);
+            for i in 0..tuple.layout.fields.count() {
+                let field_ptr = tuple_ptr.project_field(bx, i);
+                let field = bx.load_operand(field_ptr);
+                self.codegen_argument(bx, field, llargs, &args[i]);
+            }
+        } else if let Ref(_, Some(_), _) = tuple.val {
+            bug!("closure arguments must be sized")
+        } else {
+            // If the tuple is immediate, the elements are as well.
+            for i in 0..tuple.layout.fields.count() {
+                let op = tuple.extract_field(bx, i);
+                self.codegen_argument(bx, op, llargs, &args[i]);
+            }
+        }
+    }
+
+    fn get_caller_location(&mut self, bx: &mut Bx, span: Span) -> OperandRef<'tcx, Bx::Value> {
+        self.caller_location.unwrap_or_else(|| {
+            let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+            let caller = bx.tcx().sess.source_map().lookup_char_pos(topmost.lo());
+            let const_loc = bx.tcx().const_caller_location((
+                Symbol::intern(&caller.file.name.to_string()),
+                caller.line as u32,
+                caller.col_display as u32 + 1,
+            ));
+            OperandRef::from_const(bx, const_loc, bx.tcx().caller_location_ty())
+        })
+    }
+
+    fn get_personality_slot(&mut self, bx: &mut Bx) -> PlaceRef<'tcx, Bx::Value> {
+        let cx = bx.cx();
+        if let Some(slot) = self.personality_slot {
+            slot
+        } else {
+            let layout = cx.layout_of(
+                cx.tcx().intern_tup(&[cx.tcx().mk_mut_ptr(cx.tcx().types.u8), cx.tcx().types.i32]),
+            );
+            let slot = PlaceRef::alloca(bx, layout);
+            self.personality_slot = Some(slot);
+            slot
+        }
+    }
+
+    /// Returns the landing-pad wrapper around the given basic block.
+    ///
+    /// No-op in MSVC SEH scheme.
+    fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> Bx::BasicBlock {
+        if let Some(block) = self.landing_pads[target_bb] {
+            return block;
+        }
+
+        let block = self.blocks[target_bb];
+        let landing_pad = self.landing_pad_uncached(block);
+        self.landing_pads[target_bb] = Some(landing_pad);
+        landing_pad
+    }
+
+    fn landing_pad_uncached(&mut self, target_bb: Bx::BasicBlock) -> Bx::BasicBlock {
+        if base::wants_msvc_seh(self.cx.sess()) {
+            span_bug!(self.mir.span, "landing pad was not inserted?")
+        }
+
+        let mut bx = self.new_block("cleanup");
+
+        let llpersonality = self.cx.eh_personality();
+        let llretty = self.landing_pad_type();
+        let lp = bx.landing_pad(llretty, llpersonality, 1);
+        bx.set_cleanup(lp);
+
+        let slot = self.get_personality_slot(&mut bx);
+        slot.storage_live(&mut bx);
+        Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot);
+
+        bx.br(target_bb);
+        bx.llbb()
+    }
+
+    fn landing_pad_type(&self) -> Bx::Type {
+        let cx = self.cx;
+        cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false)
+    }
+
+    fn unreachable_block(&mut self) -> Bx::BasicBlock {
+        self.unreachable_block.unwrap_or_else(|| {
+            let mut bx = self.new_block("unreachable");
+            bx.unreachable();
+            self.unreachable_block = Some(bx.llbb());
+            bx.llbb()
+        })
+    }
+
+    pub fn new_block(&self, name: &str) -> Bx {
+        Bx::new_block(self.cx, self.llfn, name)
+    }
+
+    pub fn build_block(&self, bb: mir::BasicBlock) -> Bx {
+        let mut bx = Bx::with_cx(self.cx);
+        bx.position_at_end(self.blocks[bb]);
+        bx
+    }
+
+    fn make_return_dest(
+        &mut self,
+        bx: &mut Bx,
+        dest: mir::Place<'tcx>,
+        fn_ret: &ArgAbi<'tcx, Ty<'tcx>>,
+        llargs: &mut Vec<Bx::Value>,
+        is_intrinsic: bool,
+    ) -> ReturnDest<'tcx, Bx::Value> {
+        // If the return is ignored, we can just return a do-nothing `ReturnDest`.
+        if fn_ret.is_ignore() {
+            return ReturnDest::Nothing;
+        }
+        let dest = if let Some(index) = dest.as_local() {
+            match self.locals[index] {
+                LocalRef::Place(dest) => dest,
+                LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
+                LocalRef::Operand(None) => {
+                    // Handle temporary places, specifically `Operand` ones, as
+                    // they don't have `alloca`s.
+                    return if fn_ret.is_indirect() {
+                        // Odd, but possible, case, we have an operand temporary,
+                        // but the calling convention has an indirect return.
+                        let tmp = PlaceRef::alloca(bx, fn_ret.layout);
+                        tmp.storage_live(bx);
+                        llargs.push(tmp.llval);
+                        ReturnDest::IndirectOperand(tmp, index)
+                    } else if is_intrinsic {
+                        // Currently, intrinsics always need a location to store
+                        // the result, so we create a temporary `alloca` for the
+                        // result.
+                        let tmp = PlaceRef::alloca(bx, fn_ret.layout);
+                        tmp.storage_live(bx);
+                        ReturnDest::IndirectOperand(tmp, index)
+                    } else {
+                        ReturnDest::DirectOperand(index)
+                    };
+                }
+                LocalRef::Operand(Some(_)) => {
+                    bug!("place local already assigned to");
+                }
+            }
+        } else {
+            self.codegen_place(
+                bx,
+                mir::PlaceRef { local: dest.local, projection: &dest.projection },
+            )
+        };
+        if fn_ret.is_indirect() {
+            if dest.align < dest.layout.align.abi {
+                // Currently, MIR code generation does not create calls
+                // that store directly to fields of packed structs (in
+                // fact, the calls it creates write only to temps).
+                //
+                // If someone changes that, please update this code path
+                // to create a temporary.
+                span_bug!(self.mir.span, "can't directly store to unaligned value");
+            }
+            llargs.push(dest.llval);
+            ReturnDest::Nothing
+        } else {
+            ReturnDest::Store(dest)
+        }
+    }
+
+    fn codegen_transmute(&mut self, bx: &mut Bx, src: &mir::Operand<'tcx>, dst: mir::Place<'tcx>) {
+        if let Some(index) = dst.as_local() {
+            match self.locals[index] {
+                LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
+                LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
+                LocalRef::Operand(None) => {
+                    let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst.as_ref()));
+                    assert!(!dst_layout.ty.has_erasable_regions());
+                    let place = PlaceRef::alloca(bx, dst_layout);
+                    place.storage_live(bx);
+                    self.codegen_transmute_into(bx, src, place);
+                    let op = bx.load_operand(place);
+                    place.storage_dead(bx);
+                    self.locals[index] = LocalRef::Operand(Some(op));
+                    self.debug_introduce_local(bx, index);
+                }
+                LocalRef::Operand(Some(op)) => {
+                    assert!(op.layout.is_zst(), "assigning to initialized SSAtemp");
+                }
+            }
+        } else {
+            let dst = self.codegen_place(bx, dst.as_ref());
+            self.codegen_transmute_into(bx, src, dst);
+        }
+    }
+
+    fn codegen_transmute_into(
+        &mut self,
+        bx: &mut Bx,
+        src: &mir::Operand<'tcx>,
+        dst: PlaceRef<'tcx, Bx::Value>,
+    ) {
+        let src = self.codegen_operand(bx, src);
+        let llty = bx.backend_type(src.layout);
+        let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
+        let align = src.layout.align.abi.min(dst.align);
+        src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, align));
+    }
+
+    // Stores the return value of a function call into it's final location.
+    fn store_return(
+        &mut self,
+        bx: &mut Bx,
+        dest: ReturnDest<'tcx, Bx::Value>,
+        ret_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        llval: Bx::Value,
+    ) {
+        use self::ReturnDest::*;
+
+        match dest {
+            Nothing => (),
+            Store(dst) => bx.store_arg(&ret_abi, llval, dst),
+            IndirectOperand(tmp, index) => {
+                let op = bx.load_operand(tmp);
+                tmp.storage_dead(bx);
+                self.locals[index] = LocalRef::Operand(Some(op));
+                self.debug_introduce_local(bx, index);
+            }
+            DirectOperand(index) => {
+                // If there is a cast, we have to store and reload.
+                let op = if let PassMode::Cast(_) = ret_abi.mode {
+                    let tmp = PlaceRef::alloca(bx, ret_abi.layout);
+                    tmp.storage_live(bx);
+                    bx.store_arg(&ret_abi, llval, tmp);
+                    let op = bx.load_operand(tmp);
+                    tmp.storage_dead(bx);
+                    op
+                } else {
+                    OperandRef::from_immediate_or_packed_pair(bx, llval, ret_abi.layout)
+                };
+                self.locals[index] = LocalRef::Operand(Some(op));
+                self.debug_introduce_local(bx, index);
+            }
+        }
+    }
+}
+
+enum ReturnDest<'tcx, V> {
+    // Do nothing; the return value is indirect or ignored.
+    Nothing,
+    // Store the return value to the pointer.
+    Store(PlaceRef<'tcx, V>),
+    // Store an indirect return value to an operand local place.
+    IndirectOperand(PlaceRef<'tcx, V>, mir::Local),
+    // Store a direct return value to an operand local place.
+    DirectOperand(mir::Local),
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs
new file mode 100644
index 00000000000..4943e279c7e
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs
@@ -0,0 +1,91 @@
+use crate::mir::operand::OperandRef;
+use crate::traits::*;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{ConstValue, ErrorHandled};
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::source_map::Span;
+use rustc_target::abi::Abi;
+
+use super::FunctionCx;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn eval_mir_constant_to_operand(
+        &mut self,
+        bx: &mut Bx,
+        constant: &mir::Constant<'tcx>,
+    ) -> Result<OperandRef<'tcx, Bx::Value>, ErrorHandled> {
+        let val = self.eval_mir_constant(constant)?;
+        let ty = self.monomorphize(&constant.literal.ty);
+        Ok(OperandRef::from_const(bx, val, ty))
+    }
+
+    pub fn eval_mir_constant(
+        &mut self,
+        constant: &mir::Constant<'tcx>,
+    ) -> Result<ConstValue<'tcx>, ErrorHandled> {
+        match self.monomorphize(&constant.literal).val {
+            ty::ConstKind::Unevaluated(def, substs, promoted) => self
+                .cx
+                .tcx()
+                .const_eval_resolve(ty::ParamEnv::reveal_all(), def, substs, promoted, None)
+                .map_err(|err| {
+                    if promoted.is_none() {
+                        self.cx
+                            .tcx()
+                            .sess
+                            .span_err(constant.span, "erroneous constant encountered");
+                    }
+                    err
+                }),
+            ty::ConstKind::Value(value) => Ok(value),
+            err => span_bug!(
+                constant.span,
+                "encountered bad ConstKind after monomorphizing: {:?}",
+                err
+            ),
+        }
+    }
+
+    /// process constant containing SIMD shuffle indices
+    pub fn simd_shuffle_indices(
+        &mut self,
+        bx: &Bx,
+        span: Span,
+        ty: Ty<'tcx>,
+        constant: Result<ConstValue<'tcx>, ErrorHandled>,
+    ) -> (Bx::Value, Ty<'tcx>) {
+        constant
+            .map(|val| {
+                let field_ty = ty.builtin_index().unwrap();
+                let c = ty::Const::from_value(bx.tcx(), val, ty);
+                let values: Vec<_> = bx
+                    .tcx()
+                    .destructure_const(ty::ParamEnv::reveal_all().and(&c))
+                    .fields
+                    .iter()
+                    .map(|field| {
+                        if let Some(prim) = field.val.try_to_scalar() {
+                            let layout = bx.layout_of(field_ty);
+                            let scalar = match layout.abi {
+                                Abi::Scalar(ref x) => x,
+                                _ => bug!("from_const: invalid ByVal layout: {:#?}", layout),
+                            };
+                            bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
+                        } else {
+                            bug!("simd shuffle field {:?}", field)
+                        }
+                    })
+                    .collect();
+                let llval = bx.const_struct(&values, false);
+                (llval, c.ty)
+            })
+            .unwrap_or_else(|_| {
+                bx.tcx().sess.span_err(span, "could not evaluate shuffle_indices at compile time");
+                // We've errored, so we don't have to produce working code.
+                let ty = self.monomorphize(&ty);
+                let llty = bx.backend_type(bx.layout_of(ty));
+                (bx.const_undef(llty), ty)
+            })
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
new file mode 100644
index 00000000000..a2ad27b925c
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
@@ -0,0 +1,35 @@
+use crate::traits::*;
+
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::Coverage;
+
+use super::FunctionCx;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn codegen_coverage(&self, bx: &mut Bx, coverage: Coverage) {
+        let Coverage { kind, code_region } = coverage;
+        match kind {
+            CoverageKind::Counter { function_source_hash, id } => {
+                bx.add_counter_region(self.instance, function_source_hash, id, code_region);
+
+                let coverageinfo = bx.tcx().coverageinfo(self.instance.def_id());
+
+                let fn_name = bx.create_pgo_func_name_var(self.instance);
+                let hash = bx.const_u64(function_source_hash);
+                let num_counters = bx.const_u32(coverageinfo.num_counters);
+                let id = bx.const_u32(u32::from(id));
+                debug!(
+                    "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+                    fn_name, hash, num_counters, id,
+                );
+                bx.instrprof_increment(fn_name, hash, num_counters, id);
+            }
+            CoverageKind::Expression { id, lhs, op, rhs } => {
+                bx.add_counter_expression_region(self.instance, id, lhs, op, rhs, code_region);
+            }
+            CoverageKind::Unreachable => {
+                bx.add_unreachable_region(self.instance, code_region);
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
new file mode 100644
index 00000000000..d8a530d98fa
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -0,0 +1,361 @@
+use crate::traits::*;
+use rustc_hir::def_id::CrateNum;
+use rustc_index::vec::IndexVec;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir;
+use rustc_middle::ty;
+use rustc_session::config::DebugInfo;
+use rustc_span::symbol::{kw, Symbol};
+use rustc_span::{BytePos, Span};
+use rustc_target::abi::{LayoutOf, Size};
+
+use super::operand::OperandValue;
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+pub struct FunctionDebugContext<D> {
+    pub scopes: IndexVec<mir::SourceScope, DebugScope<D>>,
+    pub defining_crate: CrateNum,
+}
+
+#[derive(Copy, Clone)]
+pub enum VariableKind {
+    ArgumentVariable(usize /*index*/),
+    LocalVariable,
+}
+
+/// Like `mir::VarDebugInfo`, but within a `mir::Local`.
+#[derive(Copy, Clone)]
+pub struct PerLocalVarDebugInfo<'tcx, D> {
+    pub name: Symbol,
+    pub source_info: mir::SourceInfo,
+
+    /// `DIVariable` returned by `create_dbg_var`.
+    pub dbg_var: Option<D>,
+
+    /// `.place.projection` from `mir::VarDebugInfo`.
+    pub projection: &'tcx ty::List<mir::PlaceElem<'tcx>>,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct DebugScope<D> {
+    pub scope_metadata: Option<D>,
+    // Start and end offsets of the file to which this DIScope belongs.
+    // These are used to quickly determine whether some span refers to the same file.
+    pub file_start_pos: BytePos,
+    pub file_end_pos: BytePos,
+}
+
+impl<D> DebugScope<D> {
+    pub fn is_valid(&self) -> bool {
+        self.scope_metadata.is_some()
+    }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn set_debug_loc(&self, bx: &mut Bx, source_info: mir::SourceInfo) {
+        let (scope, span) = self.debug_loc(source_info);
+        if let Some(scope) = scope {
+            bx.set_source_location(scope, span);
+        }
+    }
+
+    pub fn debug_loc(&self, source_info: mir::SourceInfo) -> (Option<Bx::DIScope>, Span) {
+        // Bail out if debug info emission is not enabled.
+        match self.debug_context {
+            None => return (None, source_info.span),
+            Some(_) => {}
+        }
+
+        // In order to have a good line stepping behavior in debugger, we overwrite debug
+        // locations of macro expansions with that of the outermost expansion site
+        // (unless the crate is being compiled with `-Z debug-macros`).
+        if !source_info.span.from_expansion() || self.cx.sess().opts.debugging_opts.debug_macros {
+            let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo());
+            (scope, source_info.span)
+        } else {
+            // Walk up the macro expansion chain until we reach a non-expanded span.
+            // We also stop at the function body level because no line stepping can occur
+            // at the level above that.
+            let span = rustc_span::hygiene::walk_chain(source_info.span, self.mir.span.ctxt());
+            let scope = self.scope_metadata_for_loc(source_info.scope, span.lo());
+            // Use span of the outermost expansion site, while keeping the original lexical scope.
+            (scope, span)
+        }
+    }
+
+    // DILocations inherit source file name from the parent DIScope.  Due to macro expansions
+    // it may so happen that the current span belongs to a different file than the DIScope
+    // corresponding to span's containing source scope.  If so, we need to create a DIScope
+    // "extension" into that file.
+    fn scope_metadata_for_loc(
+        &self,
+        scope_id: mir::SourceScope,
+        pos: BytePos,
+    ) -> Option<Bx::DIScope> {
+        let debug_context = self.debug_context.as_ref()?;
+        let scope_metadata = debug_context.scopes[scope_id].scope_metadata;
+        if pos < debug_context.scopes[scope_id].file_start_pos
+            || pos >= debug_context.scopes[scope_id].file_end_pos
+        {
+            let sm = self.cx.sess().source_map();
+            let defining_crate = debug_context.defining_crate;
+            Some(self.cx.extend_scope_to_file(
+                scope_metadata.unwrap(),
+                &sm.lookup_char_pos(pos).file,
+                defining_crate,
+            ))
+        } else {
+            scope_metadata
+        }
+    }
+
+    /// Apply debuginfo and/or name, after creating the `alloca` for a local,
+    /// or initializing the local with an operand (whichever applies).
+    pub fn debug_introduce_local(&self, bx: &mut Bx, local: mir::Local) {
+        let full_debug_info = bx.sess().opts.debuginfo == DebugInfo::Full;
+
+        // FIXME(eddyb) maybe name the return place as `_0` or `return`?
+        if local == mir::RETURN_PLACE && !self.mir.local_decls[mir::RETURN_PLACE].is_user_variable()
+        {
+            return;
+        }
+
+        let vars = match &self.per_local_var_debug_info {
+            Some(per_local) => &per_local[local],
+            None => return,
+        };
+        let whole_local_var = vars.iter().find(|var| var.projection.is_empty()).copied();
+        let has_proj = || vars.iter().any(|var| !var.projection.is_empty());
+
+        let fallback_var = if self.mir.local_kind(local) == mir::LocalKind::Arg {
+            let arg_index = local.index() - 1;
+
+            // Add debuginfo even to unnamed arguments.
+            // FIXME(eddyb) is this really needed?
+            if arg_index == 0 && has_proj() {
+                // Hide closure environments from debuginfo.
+                // FIXME(eddyb) shouldn't `ArgumentVariable` indices
+                // be offset to account for the hidden environment?
+                None
+            } else if whole_local_var.is_some() {
+                // No need to make up anything, there is a `mir::VarDebugInfo`
+                // covering the whole local.
+                // FIXME(eddyb) take `whole_local_var.source_info.scope` into
+                // account, just in case it doesn't use `ArgumentVariable`
+                // (after #67586 gets fixed).
+                None
+            } else {
+                let name = kw::Invalid;
+                let decl = &self.mir.local_decls[local];
+                let (scope, span) = if full_debug_info {
+                    self.debug_loc(decl.source_info)
+                } else {
+                    (None, decl.source_info.span)
+                };
+                let dbg_var = scope.map(|scope| {
+                    // FIXME(eddyb) is this `+ 1` needed at all?
+                    let kind = VariableKind::ArgumentVariable(arg_index + 1);
+
+                    self.cx.create_dbg_var(
+                        self.debug_context.as_ref().unwrap(),
+                        name,
+                        self.monomorphize(&decl.ty),
+                        scope,
+                        kind,
+                        span,
+                    )
+                });
+
+                Some(PerLocalVarDebugInfo {
+                    name,
+                    source_info: decl.source_info,
+                    dbg_var,
+                    projection: ty::List::empty(),
+                })
+            }
+        } else {
+            None
+        };
+
+        let local_ref = &self.locals[local];
+
+        let name = if bx.sess().fewer_names() {
+            None
+        } else {
+            Some(match whole_local_var.or(fallback_var) {
+                Some(var) if var.name != kw::Invalid => var.name.to_string(),
+                _ => format!("{:?}", local),
+            })
+        };
+
+        if let Some(name) = &name {
+            match local_ref {
+                LocalRef::Place(place) | LocalRef::UnsizedPlace(place) => {
+                    bx.set_var_name(place.llval, name);
+                }
+                LocalRef::Operand(Some(operand)) => match operand.val {
+                    OperandValue::Ref(x, ..) | OperandValue::Immediate(x) => {
+                        bx.set_var_name(x, name);
+                    }
+                    OperandValue::Pair(a, b) => {
+                        // FIXME(eddyb) these are scalar components,
+                        // maybe extract the high-level fields?
+                        bx.set_var_name(a, &(name.clone() + ".0"));
+                        bx.set_var_name(b, &(name.clone() + ".1"));
+                    }
+                },
+                LocalRef::Operand(None) => {}
+            }
+        }
+
+        if !full_debug_info || vars.is_empty() && fallback_var.is_none() {
+            return;
+        }
+
+        let base = match local_ref {
+            LocalRef::Operand(None) => return,
+
+            LocalRef::Operand(Some(operand)) => {
+                // Don't spill operands onto the stack in naked functions.
+                // See: https://github.com/rust-lang/rust/issues/42779
+                let attrs = bx.tcx().codegen_fn_attrs(self.instance.def_id());
+                if attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+                    return;
+                }
+
+                // "Spill" the value onto the stack, for debuginfo,
+                // without forcing non-debuginfo uses of the local
+                // to also load from the stack every single time.
+                // FIXME(#68817) use `llvm.dbg.value` instead,
+                // at least for the cases which LLVM handles correctly.
+                let spill_slot = PlaceRef::alloca(bx, operand.layout);
+                if let Some(name) = name {
+                    bx.set_var_name(spill_slot.llval, &(name + ".dbg.spill"));
+                }
+                operand.val.store(bx, spill_slot);
+                spill_slot
+            }
+
+            LocalRef::Place(place) => *place,
+
+            // FIXME(eddyb) add debuginfo for unsized places too.
+            LocalRef::UnsizedPlace(_) => return,
+        };
+
+        let vars = vars.iter().copied().chain(fallback_var);
+
+        for var in vars {
+            let mut layout = base.layout;
+            let mut direct_offset = Size::ZERO;
+            // FIXME(eddyb) use smallvec here.
+            let mut indirect_offsets = vec![];
+
+            for elem in &var.projection[..] {
+                match *elem {
+                    mir::ProjectionElem::Deref => {
+                        indirect_offsets.push(Size::ZERO);
+                        layout = bx.cx().layout_of(
+                            layout
+                                .ty
+                                .builtin_deref(true)
+                                .unwrap_or_else(|| {
+                                    span_bug!(var.source_info.span, "cannot deref `{}`", layout.ty)
+                                })
+                                .ty,
+                        );
+                    }
+                    mir::ProjectionElem::Field(field, _) => {
+                        let i = field.index();
+                        let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset);
+                        *offset += layout.fields.offset(i);
+                        layout = layout.field(bx.cx(), i);
+                    }
+                    mir::ProjectionElem::Downcast(_, variant) => {
+                        layout = layout.for_variant(bx.cx(), variant);
+                    }
+                    _ => span_bug!(
+                        var.source_info.span,
+                        "unsupported var debuginfo place `{:?}`",
+                        mir::Place { local, projection: var.projection },
+                    ),
+                }
+            }
+
+            let (scope, span) = self.debug_loc(var.source_info);
+            if let Some(scope) = scope {
+                if let Some(dbg_var) = var.dbg_var {
+                    bx.dbg_var_addr(
+                        dbg_var,
+                        scope,
+                        base.llval,
+                        direct_offset,
+                        &indirect_offsets,
+                        span,
+                    );
+                }
+            }
+        }
+    }
+
+    pub fn debug_introduce_locals(&self, bx: &mut Bx) {
+        if bx.sess().opts.debuginfo == DebugInfo::Full || !bx.sess().fewer_names() {
+            for local in self.locals.indices() {
+                self.debug_introduce_local(bx, local);
+            }
+        }
+    }
+
+    /// Partition all `VarDebugInfo` in `self.mir`, by their base `Local`.
+    pub fn compute_per_local_var_debug_info(
+        &self,
+    ) -> Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>> {
+        let full_debug_info = self.cx.sess().opts.debuginfo == DebugInfo::Full;
+
+        if !full_debug_info && self.cx.sess().fewer_names() {
+            return None;
+        }
+
+        let mut per_local = IndexVec::from_elem(vec![], &self.mir.local_decls);
+        for var in &self.mir.var_debug_info {
+            let (scope, span) = if full_debug_info {
+                self.debug_loc(var.source_info)
+            } else {
+                (None, var.source_info.span)
+            };
+            let dbg_var = scope.map(|scope| {
+                let place = var.place;
+                let var_ty = self.monomorphized_place_ty(place.as_ref());
+                let var_kind = if self.mir.local_kind(place.local) == mir::LocalKind::Arg
+                    && place.projection.is_empty()
+                    && var.source_info.scope == mir::OUTERMOST_SOURCE_SCOPE
+                {
+                    let arg_index = place.local.index() - 1;
+
+                    // FIXME(eddyb) shouldn't `ArgumentVariable` indices be
+                    // offset in closures to account for the hidden environment?
+                    // Also, is this `+ 1` needed at all?
+                    VariableKind::ArgumentVariable(arg_index + 1)
+                } else {
+                    VariableKind::LocalVariable
+                };
+                self.cx.create_dbg_var(
+                    self.debug_context.as_ref().unwrap(),
+                    var.name,
+                    var_ty,
+                    scope,
+                    var_kind,
+                    span,
+                )
+            });
+
+            per_local[var.place.local].push(PerLocalVarDebugInfo {
+                name: var.name,
+                source_info: var.source_info,
+                dbg_var,
+                projection: var.place.projection,
+            });
+        }
+        Some(per_local)
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
new file mode 100644
index 00000000000..26e6c354702
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -0,0 +1,492 @@
+use crate::base;
+use crate::traits::*;
+use rustc_errors::ErrorReported;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt, TyAndLayout};
+use rustc_middle::ty::{self, Instance, Ty, TypeFoldable};
+use rustc_target::abi::call::{FnAbi, PassMode};
+use rustc_target::abi::HasDataLayout;
+
+use std::iter;
+
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+
+use self::analyze::CleanupKind;
+use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo};
+use self::place::PlaceRef;
+use rustc_middle::mir::traversal;
+
+use self::operand::{OperandRef, OperandValue};
+
+/// Master context for codegenning from MIR.
+pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
+    instance: Instance<'tcx>,
+
+    mir: &'tcx mir::Body<'tcx>,
+
+    debug_context: Option<FunctionDebugContext<Bx::DIScope>>,
+
+    llfn: Bx::Function,
+
+    cx: &'a Bx::CodegenCx,
+
+    fn_abi: FnAbi<'tcx, Ty<'tcx>>,
+
+    /// When unwinding is initiated, we have to store this personality
+    /// value somewhere so that we can load it and re-use it in the
+    /// resume instruction. The personality is (afaik) some kind of
+    /// value used for C++ unwinding, which must filter by type: we
+    /// don't really care about it very much. Anyway, this value
+    /// contains an alloca into which the personality is stored and
+    /// then later loaded when generating the DIVERGE_BLOCK.
+    personality_slot: Option<PlaceRef<'tcx, Bx::Value>>,
+
+    /// A `Block` for each MIR `BasicBlock`
+    blocks: IndexVec<mir::BasicBlock, Bx::BasicBlock>,
+
+    /// The funclet status of each basic block
+    cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
+
+    /// When targeting MSVC, this stores the cleanup info for each funclet
+    /// BB. This is initialized as we compute the funclets' head block in RPO.
+    funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
+
+    /// This stores the landing-pad block for a given BB, computed lazily on GNU
+    /// and eagerly on MSVC.
+    landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
+
+    /// Cached unreachable block
+    unreachable_block: Option<Bx::BasicBlock>,
+
+    /// The location where each MIR arg/var/tmp/ret is stored. This is
+    /// usually an `PlaceRef` representing an alloca, but not always:
+    /// sometimes we can skip the alloca and just store the value
+    /// directly using an `OperandRef`, which makes for tighter LLVM
+    /// IR. The conditions for using an `OperandRef` are as follows:
+    ///
+    /// - the type of the local must be judged "immediate" by `is_llvm_immediate`
+    /// - the operand must never be referenced indirectly
+    ///     - we should not take its address using the `&` operator
+    ///     - nor should it appear in a place path like `tmp.a`
+    /// - the operand must be defined by an rvalue that can generate immediate
+    ///   values
+    ///
+    /// Avoiding allocs can also be important for certain intrinsics,
+    /// notably `expect`.
+    locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>,
+
+    /// All `VarDebugInfo` from the MIR body, partitioned by `Local`.
+    /// This is `None` if no var`#[non_exhaustive]`iable debuginfo/names are needed.
+    per_local_var_debug_info:
+        Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>>,
+
+    /// Caller location propagated if this function has `#[track_caller]`.
+    caller_location: Option<OperandRef<'tcx, Bx::Value>>,
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn monomorphize<T>(&self, value: &T) -> T
+    where
+        T: Copy + TypeFoldable<'tcx>,
+    {
+        debug!("monomorphize: self.instance={:?}", self.instance);
+        if let Some(substs) = self.instance.substs_for_mir_body() {
+            self.cx.tcx().subst_and_normalize_erasing_regions(
+                substs,
+                ty::ParamEnv::reveal_all(),
+                &value,
+            )
+        } else {
+            self.cx.tcx().normalize_erasing_regions(ty::ParamEnv::reveal_all(), *value)
+        }
+    }
+}
+
+enum LocalRef<'tcx, V> {
+    Place(PlaceRef<'tcx, V>),
+    /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place).
+    /// `*p` is the fat pointer that references the actual unsized place.
+    /// Every time it is initialized, we have to reallocate the place
+    /// and update the fat pointer. That's the reason why it is indirect.
+    UnsizedPlace(PlaceRef<'tcx, V>),
+    Operand(Option<OperandRef<'tcx, V>>),
+}
+
+impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> {
+    fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &mut Bx,
+        layout: TyAndLayout<'tcx>,
+    ) -> LocalRef<'tcx, V> {
+        if layout.is_zst() {
+            // Zero-size temporaries aren't always initialized, which
+            // doesn't matter because they don't contain data, but
+            // we need something in the operand.
+            LocalRef::Operand(Some(OperandRef::new_zst(bx, layout)))
+        } else {
+            LocalRef::Operand(None)
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    cx: &'a Bx::CodegenCx,
+    instance: Instance<'tcx>,
+) {
+    assert!(!instance.substs.needs_infer());
+
+    let llfn = cx.get_fn(instance);
+
+    let mir = cx.tcx().instance_mir(instance.def);
+
+    let fn_abi = FnAbi::of_instance(cx, instance, &[]);
+    debug!("fn_abi: {:?}", fn_abi);
+
+    let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir);
+
+    let mut bx = Bx::new_block(cx, llfn, "start");
+
+    if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
+        bx.set_personality_fn(cx.eh_personality());
+    }
+
+    bx.sideeffect();
+
+    let cleanup_kinds = analyze::cleanup_kinds(&mir);
+    // Allocate a `Block` for every basic block, except
+    // the start block, if nothing loops back to it.
+    let reentrant_start_block = !mir.predecessors()[mir::START_BLOCK].is_empty();
+    let block_bxs: IndexVec<mir::BasicBlock, Bx::BasicBlock> = mir
+        .basic_blocks()
+        .indices()
+        .map(|bb| {
+            if bb == mir::START_BLOCK && !reentrant_start_block {
+                bx.llbb()
+            } else {
+                bx.build_sibling_block(&format!("{:?}", bb)).llbb()
+            }
+        })
+        .collect();
+
+    let (landing_pads, funclets) = create_funclets(&mir, &mut bx, &cleanup_kinds, &block_bxs);
+    let mut fx = FunctionCx {
+        instance,
+        mir,
+        llfn,
+        fn_abi,
+        cx,
+        personality_slot: None,
+        blocks: block_bxs,
+        unreachable_block: None,
+        cleanup_kinds,
+        landing_pads,
+        funclets,
+        locals: IndexVec::new(),
+        debug_context,
+        per_local_var_debug_info: None,
+        caller_location: None,
+    };
+
+    fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info();
+
+    for const_ in &mir.required_consts {
+        if let Err(err) = fx.eval_mir_constant(const_) {
+            match err {
+                // errored or at least linted
+                ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {}
+                ErrorHandled::TooGeneric => {
+                    span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err)
+                }
+            }
+        }
+    }
+
+    let memory_locals = analyze::non_ssa_locals(&fx);
+
+    // Allocate variable and temp allocas
+    fx.locals = {
+        let args = arg_local_refs(&mut bx, &mut fx, &memory_locals);
+
+        let mut allocate_local = |local| {
+            let decl = &mir.local_decls[local];
+            let layout = bx.layout_of(fx.monomorphize(&decl.ty));
+            assert!(!layout.ty.has_erasable_regions());
+
+            if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
+                debug!("alloc: {:?} (return place) -> place", local);
+                let llretptr = bx.get_param(0);
+                return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
+            }
+
+            if memory_locals.contains(local) {
+                debug!("alloc: {:?} -> place", local);
+                if layout.is_unsized() {
+                    LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout))
+                } else {
+                    LocalRef::Place(PlaceRef::alloca(&mut bx, layout))
+                }
+            } else {
+                debug!("alloc: {:?} -> operand", local);
+                LocalRef::new_operand(&mut bx, layout)
+            }
+        };
+
+        let retptr = allocate_local(mir::RETURN_PLACE);
+        iter::once(retptr)
+            .chain(args.into_iter())
+            .chain(mir.vars_and_temps_iter().map(allocate_local))
+            .collect()
+    };
+
+    // Apply debuginfo to the newly allocated locals.
+    fx.debug_introduce_locals(&mut bx);
+
+    // Branch to the START block, if it's not the entry block.
+    if reentrant_start_block {
+        bx.br(fx.blocks[mir::START_BLOCK]);
+    }
+
+    let rpo = traversal::reverse_postorder(&mir);
+    let mut visited = BitSet::new_empty(mir.basic_blocks().len());
+
+    // Codegen the body of each block using reverse postorder
+    for (bb, _) in rpo {
+        visited.insert(bb.index());
+        fx.codegen_block(bb);
+    }
+
+    // Remove blocks that haven't been visited, or have no
+    // predecessors.
+    for bb in mir.basic_blocks().indices() {
+        // Unreachable block
+        if !visited.contains(bb.index()) {
+            debug!("codegen_mir: block {:?} was not visited", bb);
+            unsafe {
+                bx.delete_basic_block(fx.blocks[bb]);
+            }
+        }
+    }
+}
+
+fn create_funclets<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    mir: &'tcx mir::Body<'tcx>,
+    bx: &mut Bx,
+    cleanup_kinds: &IndexVec<mir::BasicBlock, CleanupKind>,
+    block_bxs: &IndexVec<mir::BasicBlock, Bx::BasicBlock>,
+) -> (
+    IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
+    IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
+) {
+    block_bxs
+        .iter_enumerated()
+        .zip(cleanup_kinds)
+        .map(|((bb, &llbb), cleanup_kind)| {
+            match *cleanup_kind {
+                CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {}
+                _ => return (None, None),
+            }
+
+            let funclet;
+            let ret_llbb;
+            match mir[bb].terminator.as_ref().map(|t| &t.kind) {
+                // This is a basic block that we're aborting the program for,
+                // notably in an `extern` function. These basic blocks are inserted
+                // so that we assert that `extern` functions do indeed not panic,
+                // and if they do we abort the process.
+                //
+                // On MSVC these are tricky though (where we're doing funclets). If
+                // we were to do a cleanuppad (like below) the normal functions like
+                // `longjmp` would trigger the abort logic, terminating the
+                // program. Instead we insert the equivalent of `catch(...)` for C++
+                // which magically doesn't trigger when `longjmp` files over this
+                // frame.
+                //
+                // Lots more discussion can be found on #48251 but this codegen is
+                // modeled after clang's for:
+                //
+                //      try {
+                //          foo();
+                //      } catch (...) {
+                //          bar();
+                //      }
+                Some(&mir::TerminatorKind::Abort) => {
+                    let mut cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb));
+                    let mut cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb));
+                    ret_llbb = cs_bx.llbb();
+
+                    let cs = cs_bx.catch_switch(None, None, 1);
+                    cs_bx.add_handler(cs, cp_bx.llbb());
+
+                    // The "null" here is actually a RTTI type descriptor for the
+                    // C++ personality function, but `catch (...)` has no type so
+                    // it's null. The 64 here is actually a bitfield which
+                    // represents that this is a catch-all block.
+                    let null = bx.const_null(
+                        bx.type_i8p_ext(bx.cx().data_layout().instruction_address_space),
+                    );
+                    let sixty_four = bx.const_i32(64);
+                    funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
+                    cp_bx.br(llbb);
+                }
+                _ => {
+                    let mut cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb));
+                    ret_llbb = cleanup_bx.llbb();
+                    funclet = cleanup_bx.cleanup_pad(None, &[]);
+                    cleanup_bx.br(llbb);
+                }
+            };
+
+            (Some(ret_llbb), Some(funclet))
+        })
+        .unzip()
+}
+
+/// Produces, for each argument, a `Value` pointing at the
+/// argument's value. As arguments are places, these are always
+/// indirect.
+fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    fx: &mut FunctionCx<'a, 'tcx, Bx>,
+    memory_locals: &BitSet<mir::Local>,
+) -> Vec<LocalRef<'tcx, Bx::Value>> {
+    let mir = fx.mir;
+    let mut idx = 0;
+    let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize;
+
+    let args = mir
+        .args_iter()
+        .enumerate()
+        .map(|(arg_index, local)| {
+            let arg_decl = &mir.local_decls[local];
+
+            if Some(local) == mir.spread_arg {
+                // This argument (e.g., the last argument in the "rust-call" ABI)
+                // is a tuple that was spread at the ABI level and now we have
+                // to reconstruct it into a tuple local variable, from multiple
+                // individual LLVM function arguments.
+
+                let arg_ty = fx.monomorphize(&arg_decl.ty);
+                let tupled_arg_tys = match arg_ty.kind {
+                    ty::Tuple(ref tys) => tys,
+                    _ => bug!("spread argument isn't a tuple?!"),
+                };
+
+                let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
+                for i in 0..tupled_arg_tys.len() {
+                    let arg = &fx.fn_abi.args[idx];
+                    idx += 1;
+                    if arg.pad.is_some() {
+                        llarg_idx += 1;
+                    }
+                    let pr_field = place.project_field(bx, i);
+                    bx.store_fn_arg(arg, &mut llarg_idx, pr_field);
+                }
+
+                return LocalRef::Place(place);
+            }
+
+            if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() {
+                let arg_ty = fx.monomorphize(&arg_decl.ty);
+
+                let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
+                bx.va_start(va_list.llval);
+
+                return LocalRef::Place(va_list);
+            }
+
+            let arg = &fx.fn_abi.args[idx];
+            idx += 1;
+            if arg.pad.is_some() {
+                llarg_idx += 1;
+            }
+
+            if !memory_locals.contains(local) {
+                // We don't have to cast or keep the argument in the alloca.
+                // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
+                // of putting everything in allocas just so we can use llvm.dbg.declare.
+                let local = |op| LocalRef::Operand(Some(op));
+                match arg.mode {
+                    PassMode::Ignore => {
+                        return local(OperandRef::new_zst(bx, arg.layout));
+                    }
+                    PassMode::Direct(_) => {
+                        let llarg = bx.get_param(llarg_idx);
+                        llarg_idx += 1;
+                        return local(OperandRef::from_immediate_or_packed_pair(
+                            bx, llarg, arg.layout,
+                        ));
+                    }
+                    PassMode::Pair(..) => {
+                        let (a, b) = (bx.get_param(llarg_idx), bx.get_param(llarg_idx + 1));
+                        llarg_idx += 2;
+
+                        return local(OperandRef {
+                            val: OperandValue::Pair(a, b),
+                            layout: arg.layout,
+                        });
+                    }
+                    _ => {}
+                }
+            }
+
+            if arg.is_sized_indirect() {
+                // Don't copy an indirect argument to an alloca, the caller
+                // already put it in a temporary alloca and gave it up.
+                // FIXME: lifetimes
+                let llarg = bx.get_param(llarg_idx);
+                llarg_idx += 1;
+                LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout))
+            } else if arg.is_unsized_indirect() {
+                // As the storage for the indirect argument lives during
+                // the whole function call, we just copy the fat pointer.
+                let llarg = bx.get_param(llarg_idx);
+                llarg_idx += 1;
+                let llextra = bx.get_param(llarg_idx);
+                llarg_idx += 1;
+                let indirect_operand = OperandValue::Pair(llarg, llextra);
+
+                let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout);
+                indirect_operand.store(bx, tmp);
+                LocalRef::UnsizedPlace(tmp)
+            } else {
+                let tmp = PlaceRef::alloca(bx, arg.layout);
+                bx.store_fn_arg(arg, &mut llarg_idx, tmp);
+                LocalRef::Place(tmp)
+            }
+        })
+        .collect::<Vec<_>>();
+
+    if fx.instance.def.requires_caller_location(bx.tcx()) {
+        assert_eq!(
+            fx.fn_abi.args.len(),
+            args.len() + 1,
+            "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR",
+        );
+
+        let arg = fx.fn_abi.args.last().unwrap();
+        match arg.mode {
+            PassMode::Direct(_) => (),
+            _ => bug!("caller location must be PassMode::Direct, found {:?}", arg.mode),
+        }
+
+        fx.caller_location = Some(OperandRef {
+            val: OperandValue::Immediate(bx.get_param(llarg_idx)),
+            layout: arg.layout,
+        });
+    }
+
+    args
+}
+
+mod analyze;
+mod block;
+pub mod constant;
+pub mod coverageinfo;
+pub mod debuginfo;
+pub mod operand;
+pub mod place;
+mod rvalue;
+mod statement;
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
new file mode 100644
index 00000000000..937c7457c63
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -0,0 +1,471 @@
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::glue;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_errors::ErrorReported;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{ConstValue, ErrorHandled, Pointer, Scalar};
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::Ty;
+use rustc_target::abi::{Abi, Align, LayoutOf, Size};
+
+use std::fmt;
+
+/// The representation of a Rust value. The enum variant is in fact
+/// uniquely determined by the value's type, but is kept as a
+/// safety check.
+#[derive(Copy, Clone, Debug)]
+pub enum OperandValue<V> {
+    /// A reference to the actual operand. The data is guaranteed
+    /// to be valid for the operand's lifetime.
+    /// The second value, if any, is the extra data (vtable or length)
+    /// which indicates that it refers to an unsized rvalue.
+    Ref(V, Option<V>, Align),
+    /// A single LLVM value.
+    Immediate(V),
+    /// A pair of immediate LLVM values. Used by fat pointers too.
+    Pair(V, V),
+}
+
+/// An `OperandRef` is an "SSA" reference to a Rust value, along with
+/// its type.
+///
+/// NOTE: unless you know a value's type exactly, you should not
+/// generate LLVM opcodes acting on it and instead act via methods,
+/// to avoid nasty edge cases. In particular, using `Builder::store`
+/// directly is sure to cause problems -- use `OperandRef::store`
+/// instead.
+#[derive(Copy, Clone)]
+pub struct OperandRef<'tcx, V> {
+    // The value.
+    pub val: OperandValue<V>,
+
+    // The layout of value, based on its Rust type.
+    pub layout: TyAndLayout<'tcx>,
+}
+
+impl<V: CodegenObject> fmt::Debug for OperandRef<'tcx, V> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
+    }
+}
+
+impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
+    pub fn new_zst<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &mut Bx,
+        layout: TyAndLayout<'tcx>,
+    ) -> OperandRef<'tcx, V> {
+        assert!(layout.is_zst());
+        OperandRef {
+            val: OperandValue::Immediate(bx.const_undef(bx.immediate_backend_type(layout))),
+            layout,
+        }
+    }
+
+    pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &mut Bx,
+        val: ConstValue<'tcx>,
+        ty: Ty<'tcx>,
+    ) -> Self {
+        let layout = bx.layout_of(ty);
+
+        if layout.is_zst() {
+            return OperandRef::new_zst(bx, layout);
+        }
+
+        let val = match val {
+            ConstValue::Scalar(x) => {
+                let scalar = match layout.abi {
+                    Abi::Scalar(ref x) => x,
+                    _ => bug!("from_const: invalid ByVal layout: {:#?}", layout),
+                };
+                let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
+                OperandValue::Immediate(llval)
+            }
+            ConstValue::Slice { data, start, end } => {
+                let a_scalar = match layout.abi {
+                    Abi::ScalarPair(ref a, _) => a,
+                    _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout),
+                };
+                let a = Scalar::from(Pointer::new(
+                    bx.tcx().create_memory_alloc(data),
+                    Size::from_bytes(start),
+                ));
+                let a_llval = bx.scalar_to_backend(
+                    a,
+                    a_scalar,
+                    bx.scalar_pair_element_backend_type(layout, 0, true),
+                );
+                let b_llval = bx.const_usize((end - start) as u64);
+                OperandValue::Pair(a_llval, b_llval)
+            }
+            ConstValue::ByRef { alloc, offset } => {
+                return bx.load_operand(bx.from_const_alloc(layout, alloc, offset));
+            }
+        };
+
+        OperandRef { val, layout }
+    }
+
+    /// Asserts that this operand refers to a scalar and returns
+    /// a reference to its value.
+    pub fn immediate(self) -> V {
+        match self.val {
+            OperandValue::Immediate(s) => s,
+            _ => bug!("not immediate: {:?}", self),
+        }
+    }
+
+    pub fn deref<Cx: LayoutTypeMethods<'tcx>>(self, cx: &Cx) -> PlaceRef<'tcx, V> {
+        let projected_ty = self
+            .layout
+            .ty
+            .builtin_deref(true)
+            .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self))
+            .ty;
+        let (llptr, llextra) = match self.val {
+            OperandValue::Immediate(llptr) => (llptr, None),
+            OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)),
+            OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self),
+        };
+        let layout = cx.layout_of(projected_ty);
+        PlaceRef { llval: llptr, llextra, layout, align: layout.align.abi }
+    }
+
+    /// If this operand is a `Pair`, we return an aggregate with the two values.
+    /// For other cases, see `immediate`.
+    pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+    ) -> V {
+        if let OperandValue::Pair(a, b) = self.val {
+            let llty = bx.cx().backend_type(self.layout);
+            debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty);
+            // Reconstruct the immediate aggregate.
+            let mut llpair = bx.cx().const_undef(llty);
+            let imm_a = base::from_immediate(bx, a);
+            let imm_b = base::from_immediate(bx, b);
+            llpair = bx.insert_value(llpair, imm_a, 0);
+            llpair = bx.insert_value(llpair, imm_b, 1);
+            llpair
+        } else {
+            self.immediate()
+        }
+    }
+
+    /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
+    pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &mut Bx,
+        llval: V,
+        layout: TyAndLayout<'tcx>,
+    ) -> Self {
+        let val = if let Abi::ScalarPair(ref a, ref b) = layout.abi {
+            debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
+
+            // Deconstruct the immediate aggregate.
+            let a_llval = bx.extract_value(llval, 0);
+            let a_llval = base::to_immediate_scalar(bx, a_llval, a);
+            let b_llval = bx.extract_value(llval, 1);
+            let b_llval = base::to_immediate_scalar(bx, b_llval, b);
+            OperandValue::Pair(a_llval, b_llval)
+        } else {
+            OperandValue::Immediate(llval)
+        };
+        OperandRef { val, layout }
+    }
+
+    pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        &self,
+        bx: &mut Bx,
+        i: usize,
+    ) -> Self {
+        let field = self.layout.field(bx.cx(), i);
+        let offset = self.layout.fields.offset(i);
+
+        let mut val = match (self.val, &self.layout.abi) {
+            // If the field is ZST, it has no data.
+            _ if field.is_zst() => {
+                return OperandRef::new_zst(bx, field);
+            }
+
+            // Newtype of a scalar, scalar pair or vector.
+            (OperandValue::Immediate(_) | OperandValue::Pair(..), _)
+                if field.size == self.layout.size =>
+            {
+                assert_eq!(offset.bytes(), 0);
+                self.val
+            }
+
+            // Extract a scalar component from a pair.
+            (OperandValue::Pair(a_llval, b_llval), &Abi::ScalarPair(ref a, ref b)) => {
+                if offset.bytes() == 0 {
+                    assert_eq!(field.size, a.value.size(bx.cx()));
+                    OperandValue::Immediate(a_llval)
+                } else {
+                    assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
+                    assert_eq!(field.size, b.value.size(bx.cx()));
+                    OperandValue::Immediate(b_llval)
+                }
+            }
+
+            // `#[repr(simd)]` types are also immediate.
+            (OperandValue::Immediate(llval), &Abi::Vector { .. }) => {
+                OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
+            }
+
+            _ => bug!("OperandRef::extract_field({:?}): not applicable", self),
+        };
+
+        // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
+        // Bools in union fields needs to be truncated.
+        let to_immediate_or_cast = |bx: &mut Bx, val, ty| {
+            if ty == bx.cx().type_i1() { bx.trunc(val, ty) } else { bx.bitcast(val, ty) }
+        };
+
+        match val {
+            OperandValue::Immediate(ref mut llval) => {
+                *llval = to_immediate_or_cast(bx, *llval, bx.cx().immediate_backend_type(field));
+            }
+            OperandValue::Pair(ref mut a, ref mut b) => {
+                *a = to_immediate_or_cast(
+                    bx,
+                    *a,
+                    bx.cx().scalar_pair_element_backend_type(field, 0, true),
+                );
+                *b = to_immediate_or_cast(
+                    bx,
+                    *b,
+                    bx.cx().scalar_pair_element_backend_type(field, 1, true),
+                );
+            }
+            OperandValue::Ref(..) => bug!(),
+        }
+
+        OperandRef { val, layout: field }
+    }
+}
+
+impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
+    pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        dest: PlaceRef<'tcx, V>,
+    ) {
+        self.store_with_flags(bx, dest, MemFlags::empty());
+    }
+
+    pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        dest: PlaceRef<'tcx, V>,
+    ) {
+        self.store_with_flags(bx, dest, MemFlags::VOLATILE);
+    }
+
+    pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        dest: PlaceRef<'tcx, V>,
+    ) {
+        self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
+    }
+
+    pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        dest: PlaceRef<'tcx, V>,
+    ) {
+        self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
+    }
+
+    fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        dest: PlaceRef<'tcx, V>,
+        flags: MemFlags,
+    ) {
+        debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
+        // Avoid generating stores of zero-sized values, because the only way to have a zero-sized
+        // value is through `undef`, and store itself is useless.
+        if dest.layout.is_zst() {
+            return;
+        }
+        match self {
+            OperandValue::Ref(r, None, source_align) => {
+                base::memcpy_ty(bx, dest.llval, dest.align, r, source_align, dest.layout, flags)
+            }
+            OperandValue::Ref(_, Some(_), _) => {
+                bug!("cannot directly store unsized values");
+            }
+            OperandValue::Immediate(s) => {
+                let val = base::from_immediate(bx, s);
+                bx.store_with_flags(val, dest.llval, dest.align, flags);
+            }
+            OperandValue::Pair(a, b) => {
+                let (a_scalar, b_scalar) = match dest.layout.abi {
+                    Abi::ScalarPair(ref a, ref b) => (a, b),
+                    _ => bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout),
+                };
+                let b_offset = a_scalar.value.size(bx).align_to(b_scalar.value.align(bx).abi);
+
+                let llptr = bx.struct_gep(dest.llval, 0);
+                let val = base::from_immediate(bx, a);
+                let align = dest.align;
+                bx.store_with_flags(val, llptr, align, flags);
+
+                let llptr = bx.struct_gep(dest.llval, 1);
+                let val = base::from_immediate(bx, b);
+                let align = dest.align.restrict_for_offset(b_offset);
+                bx.store_with_flags(val, llptr, align, flags);
+            }
+        }
+    }
+
+    pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        indirect_dest: PlaceRef<'tcx, V>,
+    ) {
+        debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
+        let flags = MemFlags::empty();
+
+        // `indirect_dest` must have `*mut T` type. We extract `T` out of it.
+        let unsized_ty = indirect_dest
+            .layout
+            .ty
+            .builtin_deref(true)
+            .unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest))
+            .ty;
+
+        let (llptr, llextra) = if let OperandValue::Ref(llptr, Some(llextra), _) = self {
+            (llptr, llextra)
+        } else {
+            bug!("store_unsized called with a sized value")
+        };
+
+        // FIXME: choose an appropriate alignment, or use dynamic align somehow
+        let max_align = Align::from_bits(128).unwrap();
+        let min_align = Align::from_bits(8).unwrap();
+
+        // Allocate an appropriate region on the stack, and copy the value into it
+        let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
+        let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, max_align);
+        bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags);
+
+        // Store the allocated region and the extra to the indirect place.
+        let indirect_operand = OperandValue::Pair(lldst, llextra);
+        indirect_operand.store(bx, indirect_dest);
+    }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    fn maybe_codegen_consume_direct(
+        &mut self,
+        bx: &mut Bx,
+        place_ref: mir::PlaceRef<'tcx>,
+    ) -> Option<OperandRef<'tcx, Bx::Value>> {
+        debug!("maybe_codegen_consume_direct(place_ref={:?})", place_ref);
+
+        match self.locals[place_ref.local] {
+            LocalRef::Operand(Some(mut o)) => {
+                // Moves out of scalar and scalar pair fields are trivial.
+                for elem in place_ref.projection.iter() {
+                    match elem {
+                        mir::ProjectionElem::Field(ref f, _) => {
+                            o = o.extract_field(bx, f.index());
+                        }
+                        mir::ProjectionElem::Index(_)
+                        | mir::ProjectionElem::ConstantIndex { .. } => {
+                            // ZSTs don't require any actual memory access.
+                            // FIXME(eddyb) deduplicate this with the identical
+                            // checks in `codegen_consume` and `extract_field`.
+                            let elem = o.layout.field(bx.cx(), 0);
+                            if elem.is_zst() {
+                                o = OperandRef::new_zst(bx, elem);
+                            } else {
+                                return None;
+                            }
+                        }
+                        _ => return None,
+                    }
+                }
+
+                Some(o)
+            }
+            LocalRef::Operand(None) => {
+                bug!("use of {:?} before def", place_ref);
+            }
+            LocalRef::Place(..) | LocalRef::UnsizedPlace(..) => {
+                // watch out for locals that do not have an
+                // alloca; they are handled somewhat differently
+                None
+            }
+        }
+    }
+
+    pub fn codegen_consume(
+        &mut self,
+        bx: &mut Bx,
+        place_ref: mir::PlaceRef<'tcx>,
+    ) -> OperandRef<'tcx, Bx::Value> {
+        debug!("codegen_consume(place_ref={:?})", place_ref);
+
+        let ty = self.monomorphized_place_ty(place_ref);
+        let layout = bx.cx().layout_of(ty);
+
+        // ZSTs don't require any actual memory access.
+        if layout.is_zst() {
+            return OperandRef::new_zst(bx, layout);
+        }
+
+        if let Some(o) = self.maybe_codegen_consume_direct(bx, place_ref) {
+            return o;
+        }
+
+        // for most places, to consume them we just load them
+        // out from their home
+        let place = self.codegen_place(bx, place_ref);
+        bx.load_operand(place)
+    }
+
+    pub fn codegen_operand(
+        &mut self,
+        bx: &mut Bx,
+        operand: &mir::Operand<'tcx>,
+    ) -> OperandRef<'tcx, Bx::Value> {
+        debug!("codegen_operand(operand={:?})", operand);
+
+        match *operand {
+            mir::Operand::Copy(ref place) | mir::Operand::Move(ref place) => {
+                self.codegen_consume(bx, place.as_ref())
+            }
+
+            mir::Operand::Constant(ref constant) => {
+                self.eval_mir_constant_to_operand(bx, constant).unwrap_or_else(|err| {
+                    match err {
+                        // errored or at least linted
+                        ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {}
+                        ErrorHandled::TooGeneric => {
+                            bug!("codegen encountered polymorphic constant")
+                        }
+                    }
+                    // Allow RalfJ to sleep soundly knowing that even refactorings that remove
+                    // the above error (or silence it under some conditions) will not cause UB.
+                    bx.abort();
+                    // We still have to return an operand but it doesn't matter,
+                    // this code is unreachable.
+                    let ty = self.monomorphize(&constant.literal.ty);
+                    let layout = bx.cx().layout_of(ty);
+                    bx.load_operand(PlaceRef::new_sized(
+                        bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))),
+                        layout,
+                    ))
+                })
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
new file mode 100644
index 00000000000..05656774f0e
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -0,0 +1,502 @@
+use super::operand::OperandValue;
+use super::{FunctionCx, LocalRef};
+
+use crate::common::IntPredicate;
+use crate::glue;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::mir;
+use rustc_middle::mir::tcx::PlaceTy;
+use rustc_middle::ty::layout::{HasTyCtxt, TyAndLayout};
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::{Abi, Align, FieldsShape, Int, TagEncoding};
+use rustc_target::abi::{LayoutOf, VariantIdx, Variants};
+
+#[derive(Copy, Clone, Debug)]
+pub struct PlaceRef<'tcx, V> {
+    /// A pointer to the contents of the place.
+    pub llval: V,
+
+    /// This place's extra data if it is unsized, or `None` if null.
+    pub llextra: Option<V>,
+
+    /// The monomorphized type of this place, including variant information.
+    pub layout: TyAndLayout<'tcx>,
+
+    /// The alignment we know for this place.
+    pub align: Align,
+}
+
+impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
+    pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
+        assert!(!layout.is_unsized());
+        PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
+    }
+
+    pub fn new_sized_aligned(
+        llval: V,
+        layout: TyAndLayout<'tcx>,
+        align: Align,
+    ) -> PlaceRef<'tcx, V> {
+        assert!(!layout.is_unsized());
+        PlaceRef { llval, llextra: None, layout, align }
+    }
+
+    // FIXME(eddyb) pass something else for the name so no work is done
+    // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
+    pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &mut Bx,
+        layout: TyAndLayout<'tcx>,
+    ) -> Self {
+        assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
+        let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
+        Self::new_sized(tmp, layout)
+    }
+
+    /// Returns a place for an indirect reference to an unsized place.
+    // FIXME(eddyb) pass something else for the name so no work is done
+    // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
+    pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &mut Bx,
+        layout: TyAndLayout<'tcx>,
+    ) -> Self {
+        assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
+        let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
+        let ptr_layout = bx.cx().layout_of(ptr_ty);
+        Self::alloca(bx, ptr_layout)
+    }
+
+    pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
+        if let FieldsShape::Array { count, .. } = self.layout.fields {
+            if self.layout.is_unsized() {
+                assert_eq!(count, 0);
+                self.llextra.unwrap()
+            } else {
+                cx.const_usize(count)
+            }
+        } else {
+            bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
+        }
+    }
+}
+
+impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
+    /// Access a field, at a point when the value's case is known.
+    pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        ix: usize,
+    ) -> Self {
+        let field = self.layout.field(bx.cx(), ix);
+        let offset = self.layout.fields.offset(ix);
+        let effective_field_align = self.align.restrict_for_offset(offset);
+
+        let mut simple = || {
+            // Unions and newtypes only use an offset of 0.
+            let llval = if offset.bytes() == 0 {
+                self.llval
+            } else if let Abi::ScalarPair(ref a, ref b) = self.layout.abi {
+                // Offsets have to match either first or second field.
+                assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
+                bx.struct_gep(self.llval, 1)
+            } else {
+                bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
+            };
+            PlaceRef {
+                // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
+                llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
+                llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
+                layout: field,
+                align: effective_field_align,
+            }
+        };
+
+        // Simple cases, which don't need DST adjustment:
+        //   * no metadata available - just log the case
+        //   * known alignment - sized types, `[T]`, `str` or a foreign type
+        //   * packed struct - there is no alignment padding
+        match field.ty.kind {
+            _ if self.llextra.is_none() => {
+                debug!(
+                    "unsized field `{}`, of `{:?}` has no metadata for adjustment",
+                    ix, self.llval
+                );
+                return simple();
+            }
+            _ if !field.is_unsized() => return simple(),
+            ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
+            ty::Adt(def, _) => {
+                if def.repr.packed() {
+                    // FIXME(eddyb) generalize the adjustment when we
+                    // start supporting packing to larger alignments.
+                    assert_eq!(self.layout.align.abi.bytes(), 1);
+                    return simple();
+                }
+            }
+            _ => {}
+        }
+
+        // We need to get the pointer manually now.
+        // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
+        // We do this instead of, say, simply adjusting the pointer from the result of a GEP
+        // because the field may have an arbitrary alignment in the LLVM representation
+        // anyway.
+        //
+        // To demonstrate:
+        //
+        //     struct Foo<T: ?Sized> {
+        //         x: u16,
+        //         y: T
+        //     }
+        //
+        // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
+        // the `y` field has 16-bit alignment.
+
+        let meta = self.llextra;
+
+        let unaligned_offset = bx.cx().const_usize(offset.bytes());
+
+        // Get the alignment of the field
+        let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
+
+        // Bump the unaligned offset up to the appropriate alignment using the
+        // following expression:
+        //
+        //     (unaligned offset + (align - 1)) & -align
+
+        // Calculate offset.
+        let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
+        let and_lhs = bx.add(unaligned_offset, align_sub_1);
+        let and_rhs = bx.neg(unsized_align);
+        let offset = bx.and(and_lhs, and_rhs);
+
+        debug!("struct_field_ptr: DST field offset: {:?}", offset);
+
+        // Cast and adjust pointer.
+        let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
+        let byte_ptr = bx.gep(byte_ptr, &[offset]);
+
+        // Finally, cast back to the type expected.
+        let ll_fty = bx.cx().backend_type(field);
+        debug!("struct_field_ptr: Field type is {:?}", ll_fty);
+
+        PlaceRef {
+            llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
+            llextra: self.llextra,
+            layout: field,
+            align: effective_field_align,
+        }
+    }
+
+    /// Obtain the actual discriminant of a value.
+    pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        cast_to: Ty<'tcx>,
+    ) -> V {
+        let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
+        if self.layout.abi.is_uninhabited() {
+            return bx.cx().const_undef(cast_to);
+        }
+        let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
+            Variants::Single { index } => {
+                let discr_val = self
+                    .layout
+                    .ty
+                    .discriminant_for_variant(bx.cx().tcx(), index)
+                    .map_or(index.as_u32() as u128, |discr| discr.val);
+                return bx.cx().const_uint_big(cast_to, discr_val);
+            }
+            Variants::Multiple { ref tag, ref tag_encoding, tag_field, .. } => {
+                (tag, tag_encoding, tag_field)
+            }
+        };
+
+        // Read the tag/niche-encoded discriminant from memory.
+        let tag = self.project_field(bx, tag_field);
+        let tag = bx.load_operand(tag);
+
+        // Decode the discriminant (specifically if it's niche-encoded).
+        match *tag_encoding {
+            TagEncoding::Direct => {
+                let signed = match tag_scalar.value {
+                    // We use `i1` for bytes that are always `0` or `1`,
+                    // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
+                    // let LLVM interpret the `i1` as signed, because
+                    // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
+                    Int(_, signed) => !tag_scalar.is_bool() && signed,
+                    _ => false,
+                };
+                bx.intcast(tag.immediate(), cast_to, signed)
+            }
+            TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+                // Rebase from niche values to discriminants, and check
+                // whether the result is in range for the niche variants.
+                let niche_llty = bx.cx().immediate_backend_type(tag.layout);
+                let tag = tag.immediate();
+
+                // We first compute the "relative discriminant" (wrt `niche_variants`),
+                // that is, if `n = niche_variants.end() - niche_variants.start()`,
+                // we remap `niche_start..=niche_start + n` (which may wrap around)
+                // to (non-wrap-around) `0..=n`, to be able to check whether the
+                // discriminant corresponds to a niche variant with one comparison.
+                // We also can't go directly to the (variant index) discriminant
+                // and check that it is in the range `niche_variants`, because
+                // that might not fit in the same type, on top of needing an extra
+                // comparison (see also the comment on `let niche_discr`).
+                let relative_discr = if niche_start == 0 {
+                    // Avoid subtracting `0`, which wouldn't work for pointers.
+                    // FIXME(eddyb) check the actual primitive type here.
+                    tag
+                } else {
+                    bx.sub(tag, bx.cx().const_uint_big(niche_llty, niche_start))
+                };
+                let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
+                let is_niche = if relative_max == 0 {
+                    // Avoid calling `const_uint`, which wouldn't work for pointers.
+                    // Also use canonical == 0 instead of non-canonical u<= 0.
+                    // FIXME(eddyb) check the actual primitive type here.
+                    bx.icmp(IntPredicate::IntEQ, relative_discr, bx.cx().const_null(niche_llty))
+                } else {
+                    let relative_max = bx.cx().const_uint(niche_llty, relative_max as u64);
+                    bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
+                };
+
+                // NOTE(eddyb) this addition needs to be performed on the final
+                // type, in case the niche itself can't represent all variant
+                // indices (e.g. `u8` niche with more than `256` variants,
+                // but enough uninhabited variants so that the remaining variants
+                // fit in the niche).
+                // In other words, `niche_variants.end - niche_variants.start`
+                // is representable in the niche, but `niche_variants.end`
+                // might not be, in extreme cases.
+                let niche_discr = {
+                    let relative_discr = if relative_max == 0 {
+                        // HACK(eddyb) since we have only one niche, we know which
+                        // one it is, and we can avoid having a dynamic value here.
+                        bx.cx().const_uint(cast_to, 0)
+                    } else {
+                        bx.intcast(relative_discr, cast_to, false)
+                    };
+                    bx.add(
+                        relative_discr,
+                        bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
+                    )
+                };
+
+                bx.select(
+                    is_niche,
+                    niche_discr,
+                    bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
+                )
+            }
+        }
+    }
+
+    /// Sets the discriminant for a new value of the given case of the given
+    /// representation.
+    pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        &self,
+        bx: &mut Bx,
+        variant_index: VariantIdx,
+    ) {
+        if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
+            // We play it safe by using a well-defined `abort`, but we could go for immediate UB
+            // if that turns out to be helpful.
+            bx.abort();
+            return;
+        }
+        match self.layout.variants {
+            Variants::Single { index } => {
+                assert_eq!(index, variant_index);
+            }
+            Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
+                let ptr = self.project_field(bx, tag_field);
+                let to =
+                    self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
+                bx.store(
+                    bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
+                    ptr.llval,
+                    ptr.align,
+                );
+            }
+            Variants::Multiple {
+                tag_encoding:
+                    TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+                tag_field,
+                ..
+            } => {
+                if variant_index != dataful_variant {
+                    if bx.cx().sess().target.target.arch == "arm"
+                        || bx.cx().sess().target.target.arch == "aarch64"
+                    {
+                        // FIXME(#34427): as workaround for LLVM bug on ARM,
+                        // use memset of 0 before assigning niche value.
+                        let fill_byte = bx.cx().const_u8(0);
+                        let size = bx.cx().const_usize(self.layout.size.bytes());
+                        bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
+                    }
+
+                    let niche = self.project_field(bx, tag_field);
+                    let niche_llty = bx.cx().immediate_backend_type(niche.layout);
+                    let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
+                    let niche_value = (niche_value as u128).wrapping_add(niche_start);
+                    // FIXME(eddyb): check the actual primitive type here.
+                    let niche_llval = if niche_value == 0 {
+                        // HACK(eddyb): using `c_null` as it works on all types.
+                        bx.cx().const_null(niche_llty)
+                    } else {
+                        bx.cx().const_uint_big(niche_llty, niche_value)
+                    };
+                    OperandValue::Immediate(niche_llval).store(bx, niche);
+                }
+            }
+        }
+    }
+
+    pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        &self,
+        bx: &mut Bx,
+        llindex: V,
+    ) -> Self {
+        // Statically compute the offset if we can, otherwise just use the element size,
+        // as this will yield the lowest alignment.
+        let layout = self.layout.field(bx, 0);
+        let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
+            layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
+        } else {
+            layout.size
+        };
+
+        PlaceRef {
+            llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
+            llextra: None,
+            layout,
+            align: self.align.restrict_for_offset(offset),
+        }
+    }
+
+    pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        &self,
+        bx: &mut Bx,
+        variant_index: VariantIdx,
+    ) -> Self {
+        let mut downcast = *self;
+        downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
+
+        // Cast to the appropriate variant struct type.
+        let variant_ty = bx.cx().backend_type(downcast.layout);
+        downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
+
+        downcast
+    }
+
+    pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
+        bx.lifetime_start(self.llval, self.layout.size);
+    }
+
+    pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
+        bx.lifetime_end(self.llval, self.layout.size);
+    }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn codegen_place(
+        &mut self,
+        bx: &mut Bx,
+        place_ref: mir::PlaceRef<'tcx>,
+    ) -> PlaceRef<'tcx, Bx::Value> {
+        debug!("codegen_place(place_ref={:?})", place_ref);
+        let cx = self.cx;
+        let tcx = self.cx.tcx();
+
+        let result = match place_ref {
+            mir::PlaceRef { local, projection: [] } => match self.locals[local] {
+                LocalRef::Place(place) => {
+                    return place;
+                }
+                LocalRef::UnsizedPlace(place) => {
+                    return bx.load_operand(place).deref(cx);
+                }
+                LocalRef::Operand(..) => {
+                    bug!("using operand local {:?} as place", place_ref);
+                }
+            },
+            mir::PlaceRef { local, projection: [proj_base @ .., mir::ProjectionElem::Deref] } => {
+                // Load the pointer from its location.
+                self.codegen_consume(bx, mir::PlaceRef { local, projection: proj_base })
+                    .deref(bx.cx())
+            }
+            mir::PlaceRef { local, projection: &[ref proj_base @ .., elem] } => {
+                // FIXME turn this recursion into iteration
+                let cg_base =
+                    self.codegen_place(bx, mir::PlaceRef { local, projection: proj_base });
+
+                match elem {
+                    mir::ProjectionElem::Deref => bug!(),
+                    mir::ProjectionElem::Field(ref field, _) => {
+                        cg_base.project_field(bx, field.index())
+                    }
+                    mir::ProjectionElem::Index(index) => {
+                        let index = &mir::Operand::Copy(mir::Place::from(index));
+                        let index = self.codegen_operand(bx, index);
+                        let llindex = index.immediate();
+                        cg_base.project_index(bx, llindex)
+                    }
+                    mir::ProjectionElem::ConstantIndex {
+                        offset,
+                        from_end: false,
+                        min_length: _,
+                    } => {
+                        let lloffset = bx.cx().const_usize(offset as u64);
+                        cg_base.project_index(bx, lloffset)
+                    }
+                    mir::ProjectionElem::ConstantIndex {
+                        offset,
+                        from_end: true,
+                        min_length: _,
+                    } => {
+                        let lloffset = bx.cx().const_usize(offset as u64);
+                        let lllen = cg_base.len(bx.cx());
+                        let llindex = bx.sub(lllen, lloffset);
+                        cg_base.project_index(bx, llindex)
+                    }
+                    mir::ProjectionElem::Subslice { from, to, from_end } => {
+                        let mut subslice =
+                            cg_base.project_index(bx, bx.cx().const_usize(from as u64));
+                        let projected_ty =
+                            PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, elem).ty;
+                        subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
+
+                        if subslice.layout.is_unsized() {
+                            assert!(from_end, "slice subslices should be `from_end`");
+                            subslice.llextra = Some(bx.sub(
+                                cg_base.llextra.unwrap(),
+                                bx.cx().const_usize((from as u64) + (to as u64)),
+                            ));
+                        }
+
+                        // Cast the place pointer type to the new
+                        // array or slice type (`*[%_; new_len]`).
+                        subslice.llval = bx.pointercast(
+                            subslice.llval,
+                            bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
+                        );
+
+                        subslice
+                    }
+                    mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),
+                }
+            }
+        };
+        debug!("codegen_place(place={:?}) => {:?}", place_ref, result);
+        result
+    }
+
+    pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
+        let tcx = self.cx.tcx();
+        let place_ty = mir::Place::ty_from(place_ref.local, place_ref.projection, self.mir, tcx);
+        self.monomorphize(&place_ty.ty)
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
new file mode 100644
index 00000000000..71f924df119
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -0,0 +1,1006 @@
+use super::operand::{OperandRef, OperandValue};
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::common::{self, IntPredicate, RealPredicate};
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_apfloat::{ieee, Float, Round, Status};
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::mir;
+use rustc_middle::ty::cast::{CastTy, IntTy};
+use rustc_middle::ty::layout::{HasTyCtxt, TyAndLayout};
+use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
+use rustc_span::source_map::{Span, DUMMY_SP};
+use rustc_span::symbol::sym;
+use rustc_target::abi::{Abi, Int, LayoutOf, Variants};
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn codegen_rvalue(
+        &mut self,
+        mut bx: Bx,
+        dest: PlaceRef<'tcx, Bx::Value>,
+        rvalue: &mir::Rvalue<'tcx>,
+    ) -> Bx {
+        debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue);
+
+        match *rvalue {
+            mir::Rvalue::Use(ref operand) => {
+                let cg_operand = self.codegen_operand(&mut bx, operand);
+                // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
+                // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
+                cg_operand.val.store(&mut bx, dest);
+                bx
+            }
+
+            mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
+                // The destination necessarily contains a fat pointer, so if
+                // it's a scalar pair, it's a fat pointer or newtype thereof.
+                if bx.cx().is_backend_scalar_pair(dest.layout) {
+                    // Into-coerce of a thin pointer to a fat pointer -- just
+                    // use the operand path.
+                    let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+                    temp.val.store(&mut bx, dest);
+                    return bx;
+                }
+
+                // Unsize of a nontrivial struct. I would prefer for
+                // this to be eliminated by MIR building, but
+                // `CoerceUnsized` can be passed by a where-clause,
+                // so the (generic) MIR may not be able to expand it.
+                let operand = self.codegen_operand(&mut bx, source);
+                match operand.val {
+                    OperandValue::Pair(..) | OperandValue::Immediate(_) => {
+                        // Unsize from an immediate structure. We don't
+                        // really need a temporary alloca here, but
+                        // avoiding it would require us to have
+                        // `coerce_unsized_into` use `extractvalue` to
+                        // index into the struct, and this case isn't
+                        // important enough for it.
+                        debug!("codegen_rvalue: creating ugly alloca");
+                        let scratch = PlaceRef::alloca(&mut bx, operand.layout);
+                        scratch.storage_live(&mut bx);
+                        operand.val.store(&mut bx, scratch);
+                        base::coerce_unsized_into(&mut bx, scratch, dest);
+                        scratch.storage_dead(&mut bx);
+                    }
+                    OperandValue::Ref(llref, None, align) => {
+                        let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
+                        base::coerce_unsized_into(&mut bx, source, dest);
+                    }
+                    OperandValue::Ref(_, Some(_), _) => {
+                        bug!("unsized coercion on an unsized rvalue");
+                    }
+                }
+                bx
+            }
+
+            mir::Rvalue::Repeat(ref elem, count) => {
+                let cg_elem = self.codegen_operand(&mut bx, elem);
+
+                // Do not generate the loop for zero-sized elements or empty arrays.
+                if dest.layout.is_zst() {
+                    return bx;
+                }
+
+                if let OperandValue::Immediate(v) = cg_elem.val {
+                    let zero = bx.const_usize(0);
+                    let start = dest.project_index(&mut bx, zero).llval;
+                    let size = bx.const_usize(dest.layout.size.bytes());
+
+                    // Use llvm.memset.p0i8.* to initialize all zero arrays
+                    if bx.cx().const_to_opt_uint(v) == Some(0) {
+                        let fill = bx.cx().const_u8(0);
+                        bx.memset(start, fill, size, dest.align, MemFlags::empty());
+                        return bx;
+                    }
+
+                    // Use llvm.memset.p0i8.* to initialize byte arrays
+                    let v = base::from_immediate(&mut bx, v);
+                    if bx.cx().val_ty(v) == bx.cx().type_i8() {
+                        bx.memset(start, v, size, dest.align, MemFlags::empty());
+                        return bx;
+                    }
+                }
+
+                let count =
+                    self.monomorphize(&count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
+
+                bx.write_operand_repeatedly(cg_elem, count, dest)
+            }
+
+            mir::Rvalue::Aggregate(ref kind, ref operands) => {
+                let (dest, active_field_index) = match **kind {
+                    mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
+                        dest.codegen_set_discr(&mut bx, variant_index);
+                        if adt_def.is_enum() {
+                            (dest.project_downcast(&mut bx, variant_index), active_field_index)
+                        } else {
+                            (dest, active_field_index)
+                        }
+                    }
+                    _ => (dest, None),
+                };
+                for (i, operand) in operands.iter().enumerate() {
+                    let op = self.codegen_operand(&mut bx, operand);
+                    // Do not generate stores and GEPis for zero-sized fields.
+                    if !op.layout.is_zst() {
+                        let field_index = active_field_index.unwrap_or(i);
+                        let field = dest.project_field(&mut bx, field_index);
+                        op.val.store(&mut bx, field);
+                    }
+                }
+                bx
+            }
+
+            _ => {
+                assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
+                let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+                temp.val.store(&mut bx, dest);
+                bx
+            }
+        }
+    }
+
+    pub fn codegen_rvalue_unsized(
+        &mut self,
+        mut bx: Bx,
+        indirect_dest: PlaceRef<'tcx, Bx::Value>,
+        rvalue: &mir::Rvalue<'tcx>,
+    ) -> Bx {
+        debug!(
+            "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
+            indirect_dest.llval, rvalue
+        );
+
+        match *rvalue {
+            mir::Rvalue::Use(ref operand) => {
+                let cg_operand = self.codegen_operand(&mut bx, operand);
+                cg_operand.val.store_unsized(&mut bx, indirect_dest);
+                bx
+            }
+
+            _ => bug!("unsized assignment other than `Rvalue::Use`"),
+        }
+    }
+
+    pub fn codegen_rvalue_operand(
+        &mut self,
+        mut bx: Bx,
+        rvalue: &mir::Rvalue<'tcx>,
+    ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
+        assert!(
+            self.rvalue_creates_operand(rvalue, DUMMY_SP),
+            "cannot codegen {:?} to operand",
+            rvalue,
+        );
+
+        match *rvalue {
+            mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
+                let operand = self.codegen_operand(&mut bx, source);
+                debug!("cast operand is {:?}", operand);
+                let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
+
+                let val = match *kind {
+                    mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
+                        match operand.layout.ty.kind {
+                            ty::FnDef(def_id, substs) => {
+                                if bx.cx().tcx().has_attr(def_id, sym::rustc_args_required_const) {
+                                    bug!("reifying a fn ptr that requires const arguments");
+                                }
+                                let instance = ty::Instance::resolve_for_fn_ptr(
+                                    bx.tcx(),
+                                    ty::ParamEnv::reveal_all(),
+                                    def_id,
+                                    substs,
+                                )
+                                .unwrap()
+                                .polymorphize(bx.cx().tcx());
+                                OperandValue::Immediate(bx.get_fn_addr(instance))
+                            }
+                            _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
+                        }
+                    }
+                    mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
+                        match operand.layout.ty.kind {
+                            ty::Closure(def_id, substs) => {
+                                let instance = Instance::resolve_closure(
+                                    bx.cx().tcx(),
+                                    def_id,
+                                    substs,
+                                    ty::ClosureKind::FnOnce,
+                                )
+                                .polymorphize(bx.cx().tcx());
+                                OperandValue::Immediate(bx.cx().get_fn_addr(instance))
+                            }
+                            _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
+                        }
+                    }
+                    mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
+                        // This is a no-op at the LLVM level.
+                        operand.val
+                    }
+                    mir::CastKind::Pointer(PointerCast::Unsize) => {
+                        assert!(bx.cx().is_backend_scalar_pair(cast));
+                        match operand.val {
+                            OperandValue::Pair(lldata, llextra) => {
+                                // unsize from a fat pointer -- this is a
+                                // "trait-object-to-supertrait" coercion, for
+                                // example, `&'a fmt::Debug + Send => &'a fmt::Debug`.
+
+                                // HACK(eddyb) have to bitcast pointers
+                                // until LLVM removes pointee types.
+                                let lldata = bx.pointercast(
+                                    lldata,
+                                    bx.cx().scalar_pair_element_backend_type(cast, 0, true),
+                                );
+                                OperandValue::Pair(lldata, llextra)
+                            }
+                            OperandValue::Immediate(lldata) => {
+                                // "standard" unsize
+                                let (lldata, llextra) = base::unsize_thin_ptr(
+                                    &mut bx,
+                                    lldata,
+                                    operand.layout.ty,
+                                    cast.ty,
+                                );
+                                OperandValue::Pair(lldata, llextra)
+                            }
+                            OperandValue::Ref(..) => {
+                                bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
+                            }
+                        }
+                    }
+                    mir::CastKind::Pointer(PointerCast::MutToConstPointer)
+                    | mir::CastKind::Misc
+                        if bx.cx().is_backend_scalar_pair(operand.layout) =>
+                    {
+                        if let OperandValue::Pair(data_ptr, meta) = operand.val {
+                            if bx.cx().is_backend_scalar_pair(cast) {
+                                let data_cast = bx.pointercast(
+                                    data_ptr,
+                                    bx.cx().scalar_pair_element_backend_type(cast, 0, true),
+                                );
+                                OperandValue::Pair(data_cast, meta)
+                            } else {
+                                // cast to thin-ptr
+                                // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
+                                // pointer-cast of that pointer to desired pointer type.
+                                let llcast_ty = bx.cx().immediate_backend_type(cast);
+                                let llval = bx.pointercast(data_ptr, llcast_ty);
+                                OperandValue::Immediate(llval)
+                            }
+                        } else {
+                            bug!("unexpected non-pair operand");
+                        }
+                    }
+                    mir::CastKind::Pointer(
+                        PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
+                    )
+                    | mir::CastKind::Misc => {
+                        assert!(bx.cx().is_backend_immediate(cast));
+                        let ll_t_out = bx.cx().immediate_backend_type(cast);
+                        if operand.layout.abi.is_uninhabited() {
+                            let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
+                            return (bx, OperandRef { val, layout: cast });
+                        }
+                        let r_t_in =
+                            CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
+                        let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
+                        let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
+                        match operand.layout.variants {
+                            Variants::Single { index } => {
+                                if let Some(discr) =
+                                    operand.layout.ty.discriminant_for_variant(bx.tcx(), index)
+                                {
+                                    let discr_layout = bx.cx().layout_of(discr.ty);
+                                    let discr_t = bx.cx().immediate_backend_type(discr_layout);
+                                    let discr_val = bx.cx().const_uint_big(discr_t, discr.val);
+                                    let discr_val =
+                                        bx.intcast(discr_val, ll_t_out, discr.ty.is_signed());
+
+                                    return (
+                                        bx,
+                                        OperandRef {
+                                            val: OperandValue::Immediate(discr_val),
+                                            layout: cast,
+                                        },
+                                    );
+                                }
+                            }
+                            Variants::Multiple { .. } => {}
+                        }
+                        let llval = operand.immediate();
+
+                        let mut signed = false;
+                        if let Abi::Scalar(ref scalar) = operand.layout.abi {
+                            if let Int(_, s) = scalar.value {
+                                // We use `i1` for bytes that are always `0` or `1`,
+                                // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
+                                // let LLVM interpret the `i1` as signed, because
+                                // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
+                                signed = !scalar.is_bool() && s;
+
+                                let er = scalar.valid_range_exclusive(bx.cx());
+                                if er.end != er.start
+                                    && scalar.valid_range.end() > scalar.valid_range.start()
+                                {
+                                    // We want `table[e as usize]` to not
+                                    // have bound checks, and this is the most
+                                    // convenient place to put the `assume`.
+                                    let ll_t_in_const =
+                                        bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end());
+                                    let cmp = bx.icmp(IntPredicate::IntULE, llval, ll_t_in_const);
+                                    bx.assume(cmp);
+                                }
+                            }
+                        }
+
+                        let newval = match (r_t_in, r_t_out) {
+                            (CastTy::Int(_), CastTy::Int(_)) => bx.intcast(llval, ll_t_out, signed),
+                            (CastTy::Float, CastTy::Float) => {
+                                let srcsz = bx.cx().float_width(ll_t_in);
+                                let dstsz = bx.cx().float_width(ll_t_out);
+                                if dstsz > srcsz {
+                                    bx.fpext(llval, ll_t_out)
+                                } else if srcsz > dstsz {
+                                    bx.fptrunc(llval, ll_t_out)
+                                } else {
+                                    llval
+                                }
+                            }
+                            (CastTy::Int(_), CastTy::Float) => {
+                                if signed {
+                                    bx.sitofp(llval, ll_t_out)
+                                } else {
+                                    bx.uitofp(llval, ll_t_out)
+                                }
+                            }
+                            (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
+                                bx.pointercast(llval, ll_t_out)
+                            }
+                            (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
+                                bx.ptrtoint(llval, ll_t_out)
+                            }
+                            (CastTy::Int(_), CastTy::Ptr(_)) => {
+                                let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
+                                bx.inttoptr(usize_llval, ll_t_out)
+                            }
+                            (CastTy::Float, CastTy::Int(IntTy::I)) => {
+                                cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out, cast)
+                            }
+                            (CastTy::Float, CastTy::Int(_)) => {
+                                cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out, cast)
+                            }
+                            _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
+                        };
+                        OperandValue::Immediate(newval)
+                    }
+                };
+                (bx, OperandRef { val, layout: cast })
+            }
+
+            mir::Rvalue::Ref(_, bk, place) => {
+                let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
+                    tcx.mk_ref(
+                        tcx.lifetimes.re_erased,
+                        ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
+                    )
+                };
+                self.codegen_place_to_pointer(bx, place, mk_ref)
+            }
+
+            mir::Rvalue::AddressOf(mutability, place) => {
+                let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
+                    tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
+                };
+                self.codegen_place_to_pointer(bx, place, mk_ptr)
+            }
+
+            mir::Rvalue::Len(place) => {
+                let size = self.evaluate_array_len(&mut bx, place);
+                let operand = OperandRef {
+                    val: OperandValue::Immediate(size),
+                    layout: bx.cx().layout_of(bx.tcx().types.usize),
+                };
+                (bx, operand)
+            }
+
+            mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
+                let lhs = self.codegen_operand(&mut bx, lhs);
+                let rhs = self.codegen_operand(&mut bx, rhs);
+                let llresult = match (lhs.val, rhs.val) {
+                    (
+                        OperandValue::Pair(lhs_addr, lhs_extra),
+                        OperandValue::Pair(rhs_addr, rhs_extra),
+                    ) => self.codegen_fat_ptr_binop(
+                        &mut bx,
+                        op,
+                        lhs_addr,
+                        lhs_extra,
+                        rhs_addr,
+                        rhs_extra,
+                        lhs.layout.ty,
+                    ),
+
+                    (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
+                        self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
+                    }
+
+                    _ => bug!(),
+                };
+                let operand = OperandRef {
+                    val: OperandValue::Immediate(llresult),
+                    layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
+                };
+                (bx, operand)
+            }
+            mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
+                let lhs = self.codegen_operand(&mut bx, lhs);
+                let rhs = self.codegen_operand(&mut bx, rhs);
+                let result = self.codegen_scalar_checked_binop(
+                    &mut bx,
+                    op,
+                    lhs.immediate(),
+                    rhs.immediate(),
+                    lhs.layout.ty,
+                );
+                let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
+                let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
+                let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
+
+                (bx, operand)
+            }
+
+            mir::Rvalue::UnaryOp(op, ref operand) => {
+                let operand = self.codegen_operand(&mut bx, operand);
+                let lloperand = operand.immediate();
+                let is_float = operand.layout.ty.is_floating_point();
+                let llval = match op {
+                    mir::UnOp::Not => bx.not(lloperand),
+                    mir::UnOp::Neg => {
+                        if is_float {
+                            bx.fneg(lloperand)
+                        } else {
+                            bx.neg(lloperand)
+                        }
+                    }
+                };
+                (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
+            }
+
+            mir::Rvalue::Discriminant(ref place) => {
+                let discr_ty = rvalue.ty(self.mir, bx.tcx());
+                let discr = self
+                    .codegen_place(&mut bx, place.as_ref())
+                    .codegen_get_discr(&mut bx, discr_ty);
+                (
+                    bx,
+                    OperandRef {
+                        val: OperandValue::Immediate(discr),
+                        layout: self.cx.layout_of(discr_ty),
+                    },
+                )
+            }
+
+            mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
+                assert!(bx.cx().type_is_sized(ty));
+                let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes());
+                let tcx = self.cx.tcx();
+                (
+                    bx,
+                    OperandRef {
+                        val: OperandValue::Immediate(val),
+                        layout: self.cx.layout_of(tcx.types.usize),
+                    },
+                )
+            }
+
+            mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
+                let content_ty = self.monomorphize(&content_ty);
+                let content_layout = bx.cx().layout_of(content_ty);
+                let llsize = bx.cx().const_usize(content_layout.size.bytes());
+                let llalign = bx.cx().const_usize(content_layout.align.abi.bytes());
+                let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
+                let llty_ptr = bx.cx().backend_type(box_layout);
+
+                // Allocate space:
+                let def_id = match bx.tcx().lang_items().require(LangItem::ExchangeMalloc) {
+                    Ok(id) => id,
+                    Err(s) => {
+                        bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
+                    }
+                };
+                let instance = ty::Instance::mono(bx.tcx(), def_id);
+                let r = bx.cx().get_fn_addr(instance);
+                let call = bx.call(r, &[llsize, llalign], None);
+                let val = bx.pointercast(call, llty_ptr);
+
+                let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
+                (bx, operand)
+            }
+            mir::Rvalue::ThreadLocalRef(def_id) => {
+                assert!(bx.cx().tcx().is_static(def_id));
+                let static_ = bx.get_static(def_id);
+                let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
+                let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
+                (bx, operand)
+            }
+            mir::Rvalue::Use(ref operand) => {
+                let operand = self.codegen_operand(&mut bx, operand);
+                (bx, operand)
+            }
+            mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
+                // According to `rvalue_creates_operand`, only ZST
+                // aggregate rvalues are allowed to be operands.
+                let ty = rvalue.ty(self.mir, self.cx.tcx());
+                let operand =
+                    OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(&ty)));
+                (bx, operand)
+            }
+        }
+    }
+
+    fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
+        // ZST are passed as operands and require special handling
+        // because codegen_place() panics if Local is operand.
+        if let Some(index) = place.as_local() {
+            if let LocalRef::Operand(Some(op)) = self.locals[index] {
+                if let ty::Array(_, n) = op.layout.ty.kind {
+                    let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
+                    return bx.cx().const_usize(n);
+                }
+            }
+        }
+        // use common size calculation for non zero-sized types
+        let cg_value = self.codegen_place(bx, place.as_ref());
+        cg_value.len(bx.cx())
+    }
+
+    /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
+    fn codegen_place_to_pointer(
+        &mut self,
+        mut bx: Bx,
+        place: mir::Place<'tcx>,
+        mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
+    ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
+        let cg_place = self.codegen_place(&mut bx, place.as_ref());
+
+        let ty = cg_place.layout.ty;
+
+        // Note: places are indirect, so storing the `llval` into the
+        // destination effectively creates a reference.
+        let val = if !bx.cx().type_has_metadata(ty) {
+            OperandValue::Immediate(cg_place.llval)
+        } else {
+            OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
+        };
+        (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
+    }
+
+    pub fn codegen_scalar_binop(
+        &mut self,
+        bx: &mut Bx,
+        op: mir::BinOp,
+        lhs: Bx::Value,
+        rhs: Bx::Value,
+        input_ty: Ty<'tcx>,
+    ) -> Bx::Value {
+        let is_float = input_ty.is_floating_point();
+        let is_signed = input_ty.is_signed();
+        match op {
+            mir::BinOp::Add => {
+                if is_float {
+                    bx.fadd(lhs, rhs)
+                } else {
+                    bx.add(lhs, rhs)
+                }
+            }
+            mir::BinOp::Sub => {
+                if is_float {
+                    bx.fsub(lhs, rhs)
+                } else {
+                    bx.sub(lhs, rhs)
+                }
+            }
+            mir::BinOp::Mul => {
+                if is_float {
+                    bx.fmul(lhs, rhs)
+                } else {
+                    bx.mul(lhs, rhs)
+                }
+            }
+            mir::BinOp::Div => {
+                if is_float {
+                    bx.fdiv(lhs, rhs)
+                } else if is_signed {
+                    bx.sdiv(lhs, rhs)
+                } else {
+                    bx.udiv(lhs, rhs)
+                }
+            }
+            mir::BinOp::Rem => {
+                if is_float {
+                    bx.frem(lhs, rhs)
+                } else if is_signed {
+                    bx.srem(lhs, rhs)
+                } else {
+                    bx.urem(lhs, rhs)
+                }
+            }
+            mir::BinOp::BitOr => bx.or(lhs, rhs),
+            mir::BinOp::BitAnd => bx.and(lhs, rhs),
+            mir::BinOp::BitXor => bx.xor(lhs, rhs),
+            mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]),
+            mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
+            mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
+            mir::BinOp::Ne
+            | mir::BinOp::Lt
+            | mir::BinOp::Gt
+            | mir::BinOp::Eq
+            | mir::BinOp::Le
+            | mir::BinOp::Ge => {
+                if is_float {
+                    bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
+                } else {
+                    bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
+                }
+            }
+        }
+    }
+
+    pub fn codegen_fat_ptr_binop(
+        &mut self,
+        bx: &mut Bx,
+        op: mir::BinOp,
+        lhs_addr: Bx::Value,
+        lhs_extra: Bx::Value,
+        rhs_addr: Bx::Value,
+        rhs_extra: Bx::Value,
+        _input_ty: Ty<'tcx>,
+    ) -> Bx::Value {
+        match op {
+            mir::BinOp::Eq => {
+                let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
+                let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
+                bx.and(lhs, rhs)
+            }
+            mir::BinOp::Ne => {
+                let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
+                let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
+                bx.or(lhs, rhs)
+            }
+            mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
+                // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
+                let (op, strict_op) = match op {
+                    mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
+                    mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
+                    mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
+                    mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
+                    _ => bug!(),
+                };
+                let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
+                let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
+                let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
+                let rhs = bx.and(and_lhs, and_rhs);
+                bx.or(lhs, rhs)
+            }
+            _ => {
+                bug!("unexpected fat ptr binop");
+            }
+        }
+    }
+
+    pub fn codegen_scalar_checked_binop(
+        &mut self,
+        bx: &mut Bx,
+        op: mir::BinOp,
+        lhs: Bx::Value,
+        rhs: Bx::Value,
+        input_ty: Ty<'tcx>,
+    ) -> OperandValue<Bx::Value> {
+        // This case can currently arise only from functions marked
+        // with #[rustc_inherit_overflow_checks] and inlined from
+        // another crate (mostly core::num generic/#[inline] fns),
+        // while the current crate doesn't use overflow checks.
+        if !bx.cx().check_overflow() {
+            let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
+            return OperandValue::Pair(val, bx.cx().const_bool(false));
+        }
+
+        let (val, of) = match op {
+            // These are checked using intrinsics
+            mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
+                let oop = match op {
+                    mir::BinOp::Add => OverflowOp::Add,
+                    mir::BinOp::Sub => OverflowOp::Sub,
+                    mir::BinOp::Mul => OverflowOp::Mul,
+                    _ => unreachable!(),
+                };
+                bx.checked_binop(oop, input_ty, lhs, rhs)
+            }
+            mir::BinOp::Shl | mir::BinOp::Shr => {
+                let lhs_llty = bx.cx().val_ty(lhs);
+                let rhs_llty = bx.cx().val_ty(rhs);
+                let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
+                let outer_bits = bx.and(rhs, invert_mask);
+
+                let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
+                let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
+
+                (val, of)
+            }
+            _ => bug!("Operator `{:?}` is not a checkable operator", op),
+        };
+
+        OperandValue::Pair(val, of)
+    }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
+        match *rvalue {
+            mir::Rvalue::Ref(..) |
+            mir::Rvalue::AddressOf(..) |
+            mir::Rvalue::Len(..) |
+            mir::Rvalue::Cast(..) | // (*)
+            mir::Rvalue::BinaryOp(..) |
+            mir::Rvalue::CheckedBinaryOp(..) |
+            mir::Rvalue::UnaryOp(..) |
+            mir::Rvalue::Discriminant(..) |
+            mir::Rvalue::NullaryOp(..) |
+            mir::Rvalue::ThreadLocalRef(_) |
+            mir::Rvalue::Use(..) => // (*)
+                true,
+            mir::Rvalue::Repeat(..) |
+            mir::Rvalue::Aggregate(..) => {
+                let ty = rvalue.ty(self.mir, self.cx.tcx());
+                let ty = self.monomorphize(&ty);
+                self.cx.spanned_layout_of(ty, span).is_zst()
+            }
+        }
+
+        // (*) this is only true if the type is suitable
+    }
+}
+
+fn cast_float_to_int<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    signed: bool,
+    x: Bx::Value,
+    float_ty: Bx::Type,
+    int_ty: Bx::Type,
+    int_layout: TyAndLayout<'tcx>,
+) -> Bx::Value {
+    if let Some(false) = bx.cx().sess().opts.debugging_opts.saturating_float_casts {
+        return if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
+    }
+
+    let try_sat_result = if signed { bx.fptosi_sat(x, int_ty) } else { bx.fptoui_sat(x, int_ty) };
+    if let Some(try_sat_result) = try_sat_result {
+        return try_sat_result;
+    }
+
+    let int_width = bx.cx().int_width(int_ty);
+    let float_width = bx.cx().float_width(float_ty);
+    // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
+    // destination integer type after rounding towards zero. This `undef` value can cause UB in
+    // safe code (see issue #10184), so we implement a saturating conversion on top of it:
+    // Semantically, the mathematical value of the input is rounded towards zero to the next
+    // mathematical integer, and then the result is clamped into the range of the destination
+    // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
+    // the destination integer type. NaN is mapped to 0.
+    //
+    // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
+    // a value representable in int_ty.
+    // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
+    // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
+    // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
+    // representable. Note that this only works if float_ty's exponent range is sufficiently large.
+    // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
+    // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
+    // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
+    // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
+    // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
+    let int_max = |signed: bool, int_width: u64| -> u128 {
+        let shift_amount = 128 - int_width;
+        if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
+    };
+    let int_min = |signed: bool, int_width: u64| -> i128 {
+        if signed { i128::MIN >> (128 - int_width) } else { 0 }
+    };
+
+    let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
+        let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+        assert_eq!(rounded_min.status, Status::OK);
+        let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+        assert!(rounded_max.value.is_finite());
+        (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+    };
+    let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
+        let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+        assert_eq!(rounded_min.status, Status::OK);
+        let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+        assert!(rounded_max.value.is_finite());
+        (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+    };
+
+    let mut float_bits_to_llval = |bits| {
+        let bits_llval = match float_width {
+            32 => bx.cx().const_u32(bits as u32),
+            64 => bx.cx().const_u64(bits as u64),
+            n => bug!("unsupported float width {}", n),
+        };
+        bx.bitcast(bits_llval, float_ty)
+    };
+    let (f_min, f_max) = match float_width {
+        32 => compute_clamp_bounds_single(signed, int_width),
+        64 => compute_clamp_bounds_double(signed, int_width),
+        n => bug!("unsupported float width {}", n),
+    };
+    let f_min = float_bits_to_llval(f_min);
+    let f_max = float_bits_to_llval(f_max);
+    // To implement saturation, we perform the following steps:
+    //
+    // 1. Cast x to an integer with fpto[su]i. This may result in undef.
+    // 2. Compare x to f_min and f_max, and use the comparison results to select:
+    //  a) int_ty::MIN if x < f_min or x is NaN
+    //  b) int_ty::MAX if x > f_max
+    //  c) the result of fpto[su]i otherwise
+    // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
+    //
+    // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
+    // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
+    // undef does not introduce any non-determinism either.
+    // More importantly, the above procedure correctly implements saturating conversion.
+    // Proof (sketch):
+    // If x is NaN, 0 is returned by definition.
+    // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
+    // This yields three cases to consider:
+    // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
+    //     saturating conversion for inputs in that range.
+    // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
+    //     (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
+    //     than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
+    //     is correct.
+    // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
+    //     int_ty::MIN and therefore the return value of int_ty::MIN is correct.
+    // QED.
+
+    let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width));
+    let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
+    let zero = bx.cx().const_uint(int_ty, 0);
+
+    // The codegen here differs quite a bit depending on whether our builder's
+    // `fptosi` and `fptoui` instructions may trap for out-of-bounds values. If
+    // they don't trap then we can start doing everything inline with a
+    // `select` instruction because it's ok to execute `fptosi` and `fptoui`
+    // even if we don't use the results.
+    if !bx.fptosui_may_trap(x, int_ty) {
+        // Step 1 ...
+        let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
+        let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
+        let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
+
+        // Step 2: We use two comparisons and two selects, with %s1 being the
+        // result:
+        //     %less_or_nan = fcmp ult %x, %f_min
+        //     %greater = fcmp olt %x, %f_max
+        //     %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
+        //     %s1 = select %greater, int_ty::MAX, %s0
+        // Note that %less_or_nan uses an *unordered* comparison. This
+        // comparison is true if the operands are not comparable (i.e., if x is
+        // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
+        // x is NaN.
+        //
+        // Performance note: Unordered comparison can be lowered to a "flipped"
+        // comparison and a negation, and the negation can be merged into the
+        // select. Therefore, it not necessarily any more expensive than a
+        // ordered ("normal") comparison. Whether these optimizations will be
+        // performed is ultimately up to the backend, but at least x86 does
+        // perform them.
+        let s0 = bx.select(less_or_nan, int_min, fptosui_result);
+        let s1 = bx.select(greater, int_max, s0);
+
+        // Step 3: NaN replacement.
+        // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
+        // Therefore we only need to execute this step for signed integer types.
+        if signed {
+            // LLVM has no isNaN predicate, so we use (x == x) instead
+            let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x);
+            bx.select(cmp, s1, zero)
+        } else {
+            s1
+        }
+    } else {
+        // In this case we cannot execute `fptosi` or `fptoui` and then later
+        // discard the result. The builder is telling us that these instructions
+        // will trap on out-of-bounds values, so we need to use basic blocks and
+        // control flow to avoid executing the `fptosi` and `fptoui`
+        // instructions.
+        //
+        // The general idea of what we're constructing here is, for f64 -> i32:
+        //
+        //      ;; block so far... %0 is the argument
+        //      %result = alloca i32, align 4
+        //      %inbound_lower = fcmp oge double %0, 0xC1E0000000000000
+        //      %inbound_upper = fcmp ole double %0, 0x41DFFFFFFFC00000
+        //      ;; match (inbound_lower, inbound_upper) {
+        //      ;;     (true, true) => %0 can be converted without trapping
+        //      ;;     (false, false) => %0 is a NaN
+        //      ;;     (true, false) => %0 is too large
+        //      ;;     (false, true) => %0 is too small
+        //      ;; }
+        //      ;;
+        //      ;; The (true, true) check, go to %convert if so.
+        //      %inbounds = and i1 %inbound_lower, %inbound_upper
+        //      br i1 %inbounds, label %convert, label %specialcase
+        //
+        //  convert:
+        //      %cvt = call i32 @llvm.wasm.trunc.signed.i32.f64(double %0)
+        //      store i32 %cvt, i32* %result, align 4
+        //      br label %done
+        //
+        //  specialcase:
+        //      ;; Handle the cases where the number is NaN, too large or too small
+        //
+        //      ;; Either (true, false) or (false, true)
+        //      %is_not_nan = or i1 %inbound_lower, %inbound_upper
+        //      ;; Figure out which saturated value we are interested in if not `NaN`
+        //      %saturated = select i1 %inbound_lower, i32 2147483647, i32 -2147483648
+        //      ;; Figure out between saturated and NaN representations
+        //      %result_nan = select i1 %is_not_nan, i32 %saturated, i32 0
+        //      store i32 %result_nan, i32* %result, align 4
+        //      br label %done
+        //
+        //  done:
+        //      %r = load i32, i32* %result, align 4
+        //      ;; ...
+        let done = bx.build_sibling_block("float_cast_done");
+        let mut convert = bx.build_sibling_block("float_cast_convert");
+        let mut specialcase = bx.build_sibling_block("float_cast_specialcase");
+
+        let result = PlaceRef::alloca(bx, int_layout);
+        result.storage_live(bx);
+
+        // Use control flow to figure out whether we can execute `fptosi` in a
+        // basic block, or whether we go to a different basic block to implement
+        // the saturating logic.
+        let inbound_lower = bx.fcmp(RealPredicate::RealOGE, x, f_min);
+        let inbound_upper = bx.fcmp(RealPredicate::RealOLE, x, f_max);
+        let inbounds = bx.and(inbound_lower, inbound_upper);
+        bx.cond_br(inbounds, convert.llbb(), specialcase.llbb());
+
+        // Translation of the `convert` basic block
+        let cvt = if signed { convert.fptosi(x, int_ty) } else { convert.fptoui(x, int_ty) };
+        convert.store(cvt, result.llval, result.align);
+        convert.br(done.llbb());
+
+        // Translation of the `specialcase` basic block. Note that like above
+        // we try to be a bit clever here for unsigned conversions. In those
+        // cases the `int_min` is zero so we don't need two select instructions,
+        // just one to choose whether we need `int_max` or not. If
+        // `inbound_lower` is true then we're guaranteed to not be `NaN` and
+        // since we're greater than zero we must be saturating to `int_max`. If
+        // `inbound_lower` is false then we're either NaN or less than zero, so
+        // we saturate to zero.
+        let result_nan = if signed {
+            let is_not_nan = specialcase.or(inbound_lower, inbound_upper);
+            let saturated = specialcase.select(inbound_lower, int_max, int_min);
+            specialcase.select(is_not_nan, saturated, zero)
+        } else {
+            specialcase.select(inbound_lower, int_max, int_min)
+        };
+        specialcase.store(result_nan, result.llval, result.align);
+        specialcase.br(done.llbb());
+
+        // Translation of the `done` basic block, positioning ourselves to
+        // continue from that point as well.
+        *bx = done;
+        let ret = bx.load(result.llval, result.align);
+        result.storage_dead(bx);
+        ret
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs
new file mode 100644
index 00000000000..6f74ba77d4c
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs
@@ -0,0 +1,124 @@
+use rustc_errors::struct_span_err;
+use rustc_middle::mir;
+
+use super::FunctionCx;
+use super::LocalRef;
+use super::OperandValue;
+use crate::traits::BuilderMethods;
+use crate::traits::*;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>) -> Bx {
+        debug!("codegen_statement(statement={:?})", statement);
+
+        self.set_debug_loc(&mut bx, statement.source_info);
+        match statement.kind {
+            mir::StatementKind::Assign(box (ref place, ref rvalue)) => {
+                if let Some(index) = place.as_local() {
+                    match self.locals[index] {
+                        LocalRef::Place(cg_dest) => self.codegen_rvalue(bx, cg_dest, rvalue),
+                        LocalRef::UnsizedPlace(cg_indirect_dest) => {
+                            self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
+                        }
+                        LocalRef::Operand(None) => {
+                            let (mut bx, operand) = self.codegen_rvalue_operand(bx, rvalue);
+                            self.locals[index] = LocalRef::Operand(Some(operand));
+                            self.debug_introduce_local(&mut bx, index);
+                            bx
+                        }
+                        LocalRef::Operand(Some(op)) => {
+                            if !op.layout.is_zst() {
+                                span_bug!(
+                                    statement.source_info.span,
+                                    "operand {:?} already assigned",
+                                    rvalue
+                                );
+                            }
+
+                            // If the type is zero-sized, it's already been set here,
+                            // but we still need to make sure we codegen the operand
+                            self.codegen_rvalue_operand(bx, rvalue).0
+                        }
+                    }
+                } else {
+                    let cg_dest = self.codegen_place(&mut bx, place.as_ref());
+                    self.codegen_rvalue(bx, cg_dest, rvalue)
+                }
+            }
+            mir::StatementKind::SetDiscriminant { box ref place, variant_index } => {
+                self.codegen_place(&mut bx, place.as_ref())
+                    .codegen_set_discr(&mut bx, variant_index);
+                bx
+            }
+            mir::StatementKind::StorageLive(local) => {
+                if let LocalRef::Place(cg_place) = self.locals[local] {
+                    cg_place.storage_live(&mut bx);
+                } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
+                    cg_indirect_place.storage_live(&mut bx);
+                }
+                bx
+            }
+            mir::StatementKind::StorageDead(local) => {
+                if let LocalRef::Place(cg_place) = self.locals[local] {
+                    cg_place.storage_dead(&mut bx);
+                } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
+                    cg_indirect_place.storage_dead(&mut bx);
+                }
+                bx
+            }
+            mir::StatementKind::LlvmInlineAsm(ref asm) => {
+                let outputs = asm
+                    .outputs
+                    .iter()
+                    .map(|output| self.codegen_place(&mut bx, output.as_ref()))
+                    .collect();
+
+                let input_vals = asm.inputs.iter().fold(
+                    Vec::with_capacity(asm.inputs.len()),
+                    |mut acc, (span, input)| {
+                        let op = self.codegen_operand(&mut bx, input);
+                        if let OperandValue::Immediate(_) = op.val {
+                            acc.push(op.immediate());
+                        } else {
+                            struct_span_err!(
+                                bx.sess(),
+                                span.to_owned(),
+                                E0669,
+                                "invalid value for constraint in inline assembly"
+                            )
+                            .emit();
+                        }
+                        acc
+                    },
+                );
+
+                if input_vals.len() == asm.inputs.len() {
+                    let res = bx.codegen_llvm_inline_asm(
+                        &asm.asm,
+                        outputs,
+                        input_vals,
+                        statement.source_info.span,
+                    );
+                    if !res {
+                        struct_span_err!(
+                            bx.sess(),
+                            statement.source_info.span,
+                            E0668,
+                            "malformed inline assembly"
+                        )
+                        .emit();
+                    }
+                }
+                bx
+            }
+            mir::StatementKind::Coverage(box ref coverage) => {
+                self.codegen_coverage(&mut bx, coverage.clone());
+                bx
+            }
+            mir::StatementKind::FakeRead(..)
+            | mir::StatementKind::Retag { .. }
+            | mir::StatementKind::AscribeUserType(..)
+            | mir::StatementKind::Nop => bx,
+        }
+    }
+}