about summary refs log tree commit diff
path: root/compiler/rustc_codegen_ssa
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_ssa')
-rw-r--r--compiler/rustc_codegen_ssa/Cargo.toml36
-rw-r--r--compiler/rustc_codegen_ssa/README.md3
-rw-r--r--compiler/rustc_codegen_ssa/src/back/archive.rs56
-rw-r--r--compiler/rustc_codegen_ssa/src/back/command.rs183
-rw-r--r--compiler/rustc_codegen_ssa/src/back/link.rs2152
-rw-r--r--compiler/rustc_codegen_ssa/src/back/linker.rs1351
-rw-r--r--compiler/rustc_codegen_ssa/src/back/lto.rs107
-rw-r--r--compiler/rustc_codegen_ssa/src/back/mod.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/back/rpath.rs135
-rw-r--r--compiler/rustc_codegen_ssa/src/back/rpath/tests.rs74
-rw-r--r--compiler/rustc_codegen_ssa/src/back/symbol_export.rs446
-rw-r--r--compiler/rustc_codegen_ssa/src/back/write.rs1859
-rw-r--r--compiler/rustc_codegen_ssa/src/base.rs959
-rw-r--r--compiler/rustc_codegen_ssa/src/common.rs197
-rw-r--r--compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs67
-rw-r--r--compiler/rustc_codegen_ssa/src/coverageinfo/map.rs205
-rw-r--r--compiler/rustc_codegen_ssa/src/coverageinfo/mod.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/debuginfo/mod.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs266
-rw-r--r--compiler/rustc_codegen_ssa/src/glue.rs109
-rw-r--r--compiler/rustc_codegen_ssa/src/lib.rs171
-rw-r--r--compiler/rustc_codegen_ssa/src/meth.rs126
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/analyze.rs448
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs1416
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/constant.rs91
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs35
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs361
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs492
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs471
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs502
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs1006
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/statement.rs124
-rw-r--r--compiler/rustc_codegen_ssa/src/mono_item.rs98
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/abi.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/asm.rs61
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/backend.rs119
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/builder.rs286
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/consts.rs38
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs31
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/debuginfo.rs62
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/declare.rs65
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/intrinsic.rs30
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/misc.rs22
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/mod.rs102
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/statics.rs24
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/type_.rs137
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/write.rs64
47 files changed, 14607 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_ssa/Cargo.toml b/compiler/rustc_codegen_ssa/Cargo.toml
new file mode 100644
index 00000000000..e5df0f60941
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/Cargo.toml
@@ -0,0 +1,36 @@
+[package]
+authors = ["The Rust Project Developers"]
+name = "rustc_codegen_ssa"
+version = "0.0.0"
+edition = "2018"
+
+[lib]
+test = false
+
+[dependencies]
+bitflags = "1.2.1"
+cc = "1.0.1"
+num_cpus = "1.0"
+memmap = "0.7"
+tracing = "0.1"
+libc = "0.2.50"
+jobserver = "0.1.11"
+tempfile = "3.1"
+pathdiff = "0.2.0"
+
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_apfloat = { path = "../rustc_apfloat" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
+rustc_data_structures = { path = "../rustc_data_structures"}
+rustc_errors = { path = "../rustc_errors" }
+rustc_fs_util = { path = "../rustc_fs_util" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_incremental = { path = "../rustc_incremental" }
+rustc_index = { path = "../rustc_index" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_target = { path = "../rustc_target" }
+rustc_session = { path = "../rustc_session" }
diff --git a/compiler/rustc_codegen_ssa/README.md b/compiler/rustc_codegen_ssa/README.md
new file mode 100644
index 00000000000..7b770187b75
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/README.md
@@ -0,0 +1,3 @@
+Please read the rustc-dev-guide chapter on [Backend Agnostic Codegen][bac].
+
+[bac]: https://rustc-dev-guide.rust-lang.org/backend/backend-agnostic.html
diff --git a/compiler/rustc_codegen_ssa/src/back/archive.rs b/compiler/rustc_codegen_ssa/src/back/archive.rs
new file mode 100644
index 00000000000..f83b4b2b0c0
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/archive.rs
@@ -0,0 +1,56 @@
+use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+
+use std::io;
+use std::path::{Path, PathBuf};
+
+pub fn find_library(name: Symbol, search_paths: &[PathBuf], sess: &Session) -> PathBuf {
+    // On Windows, static libraries sometimes show up as libfoo.a and other
+    // times show up as foo.lib
+    let oslibname = format!(
+        "{}{}{}",
+        sess.target.target.options.staticlib_prefix,
+        name,
+        sess.target.target.options.staticlib_suffix
+    );
+    let unixlibname = format!("lib{}.a", name);
+
+    for path in search_paths {
+        debug!("looking for {} inside {:?}", name, path);
+        let test = path.join(&oslibname);
+        if test.exists() {
+            return test;
+        }
+        if oslibname != unixlibname {
+            let test = path.join(&unixlibname);
+            if test.exists() {
+                return test;
+            }
+        }
+    }
+    sess.fatal(&format!(
+        "could not find native static library `{}`, \
+                         perhaps an -L flag is missing?",
+        name
+    ));
+}
+
+pub trait ArchiveBuilder<'a> {
+    fn new(sess: &'a Session, output: &Path, input: Option<&Path>) -> Self;
+
+    fn add_file(&mut self, path: &Path);
+    fn remove_file(&mut self, name: &str);
+    fn src_files(&mut self) -> Vec<String>;
+
+    fn add_rlib(
+        &mut self,
+        path: &Path,
+        name: &str,
+        lto: bool,
+        skip_objects: bool,
+    ) -> io::Result<()>;
+    fn add_native_library(&mut self, name: Symbol);
+    fn update_symbols(&mut self);
+
+    fn build(self);
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/command.rs b/compiler/rustc_codegen_ssa/src/back/command.rs
new file mode 100644
index 00000000000..0208bb73abd
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/command.rs
@@ -0,0 +1,183 @@
+//! A thin wrapper around `Command` in the standard library which allows us to
+//! read the arguments that are built up.
+
+use std::ffi::{OsStr, OsString};
+use std::fmt;
+use std::io;
+use std::mem;
+use std::process::{self, Output};
+
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::LldFlavor;
+
+#[derive(Clone)]
+pub struct Command {
+    program: Program,
+    args: Vec<OsString>,
+    env: Vec<(OsString, OsString)>,
+    env_remove: Vec<OsString>,
+}
+
+#[derive(Clone)]
+enum Program {
+    Normal(OsString),
+    CmdBatScript(OsString),
+    Lld(OsString, LldFlavor),
+}
+
+impl Command {
+    pub fn new<P: AsRef<OsStr>>(program: P) -> Command {
+        Command::_new(Program::Normal(program.as_ref().to_owned()))
+    }
+
+    pub fn bat_script<P: AsRef<OsStr>>(program: P) -> Command {
+        Command::_new(Program::CmdBatScript(program.as_ref().to_owned()))
+    }
+
+    pub fn lld<P: AsRef<OsStr>>(program: P, flavor: LldFlavor) -> Command {
+        Command::_new(Program::Lld(program.as_ref().to_owned(), flavor))
+    }
+
+    fn _new(program: Program) -> Command {
+        Command { program, args: Vec::new(), env: Vec::new(), env_remove: Vec::new() }
+    }
+
+    pub fn arg<P: AsRef<OsStr>>(&mut self, arg: P) -> &mut Command {
+        self._arg(arg.as_ref());
+        self
+    }
+
+    pub fn sym_arg(&mut self, arg: Symbol) -> &mut Command {
+        self.arg(&*arg.as_str());
+        self
+    }
+
+    pub fn args<I>(&mut self, args: I) -> &mut Command
+    where
+        I: IntoIterator<Item: AsRef<OsStr>>,
+    {
+        for arg in args {
+            self._arg(arg.as_ref());
+        }
+        self
+    }
+
+    fn _arg(&mut self, arg: &OsStr) {
+        self.args.push(arg.to_owned());
+    }
+
+    pub fn env<K, V>(&mut self, key: K, value: V) -> &mut Command
+    where
+        K: AsRef<OsStr>,
+        V: AsRef<OsStr>,
+    {
+        self._env(key.as_ref(), value.as_ref());
+        self
+    }
+
+    fn _env(&mut self, key: &OsStr, value: &OsStr) {
+        self.env.push((key.to_owned(), value.to_owned()));
+    }
+
+    pub fn env_remove<K>(&mut self, key: K) -> &mut Command
+    where
+        K: AsRef<OsStr>,
+    {
+        self._env_remove(key.as_ref());
+        self
+    }
+
+    fn _env_remove(&mut self, key: &OsStr) {
+        self.env_remove.push(key.to_owned());
+    }
+
+    pub fn output(&mut self) -> io::Result<Output> {
+        self.command().output()
+    }
+
+    pub fn command(&self) -> process::Command {
+        let mut ret = match self.program {
+            Program::Normal(ref p) => process::Command::new(p),
+            Program::CmdBatScript(ref p) => {
+                let mut c = process::Command::new("cmd");
+                c.arg("/c").arg(p);
+                c
+            }
+            Program::Lld(ref p, flavor) => {
+                let mut c = process::Command::new(p);
+                c.arg("-flavor").arg(match flavor {
+                    LldFlavor::Wasm => "wasm",
+                    LldFlavor::Ld => "gnu",
+                    LldFlavor::Link => "link",
+                    LldFlavor::Ld64 => "darwin",
+                });
+                c
+            }
+        };
+        ret.args(&self.args);
+        ret.envs(self.env.clone());
+        for k in &self.env_remove {
+            ret.env_remove(k);
+        }
+        ret
+    }
+
+    // extensions
+
+    pub fn get_args(&self) -> &[OsString] {
+        &self.args
+    }
+
+    pub fn take_args(&mut self) -> Vec<OsString> {
+        mem::take(&mut self.args)
+    }
+
+    /// Returns a `true` if we're pretty sure that this'll blow OS spawn limits,
+    /// or `false` if we should attempt to spawn and see what the OS says.
+    pub fn very_likely_to_exceed_some_spawn_limit(&self) -> bool {
+        // We mostly only care about Windows in this method, on Unix the limits
+        // can be gargantuan anyway so we're pretty unlikely to hit them
+        if cfg!(unix) {
+            return false;
+        }
+
+        // Right now LLD doesn't support the `@` syntax of passing an argument
+        // through files, so regardless of the platform we try to go to the OS
+        // on this one.
+        if let Program::Lld(..) = self.program {
+            return false;
+        }
+
+        // Ok so on Windows to spawn a process is 32,768 characters in its
+        // command line [1]. Unfortunately we don't actually have access to that
+        // as it's calculated just before spawning. Instead we perform a
+        // poor-man's guess as to how long our command line will be. We're
+        // assuming here that we don't have to escape every character...
+        //
+        // Turns out though that `cmd.exe` has even smaller limits, 8192
+        // characters [2]. Linkers can often be batch scripts (for example
+        // Emscripten, Gecko's current build system) which means that we're
+        // running through batch scripts. These linkers often just forward
+        // arguments elsewhere (and maybe tack on more), so if we blow 8192
+        // bytes we'll typically cause them to blow as well.
+        //
+        // Basically as a result just perform an inflated estimate of what our
+        // command line will look like and test if it's > 8192 (we actually
+        // test against 6k to artificially inflate our estimate). If all else
+        // fails we'll fall back to the normal unix logic of testing the OS
+        // error code if we fail to spawn and automatically re-spawning the
+        // linker with smaller arguments.
+        //
+        // [1]: https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-createprocessa
+        // [2]: https://devblogs.microsoft.com/oldnewthing/?p=41553
+
+        let estimated_command_line_len = self.args.iter().map(|a| a.len()).sum::<usize>();
+        estimated_command_line_len > 1024 * 6
+    }
+}
+
+impl fmt::Debug for Command {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.command().fmt(f)
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
new file mode 100644
index 00000000000..bfcf979d125
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -0,0 +1,2152 @@
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::temp_dir::MaybeTempDir;
+use rustc_fs_util::fix_windows_verbatim_for_gcc;
+use rustc_hir::def_id::CrateNum;
+use rustc_middle::middle::cstore::{EncodedMetadata, LibSource, NativeLib};
+use rustc_middle::middle::dependency_format::Linkage;
+use rustc_session::config::{self, CFGuard, CrateType, DebugInfo};
+use rustc_session::config::{OutputFilenames, OutputType, PrintRequest, SanitizerSet};
+use rustc_session::output::{check_file_is_writeable, invalid_output_for_target, out_filename};
+use rustc_session::search_paths::PathKind;
+use rustc_session::utils::NativeLibKind;
+/// For all the linkers we support, and information they might
+/// need out of the shared crate context before we get rid of it.
+use rustc_session::{filesearch, Session};
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::crt_objects::{CrtObjects, CrtObjectsFallback};
+use rustc_target::spec::{LinkOutputKind, LinkerFlavor, LldFlavor};
+use rustc_target::spec::{PanicStrategy, RelocModel, RelroLevel};
+
+use super::archive::ArchiveBuilder;
+use super::command::Command;
+use super::linker::{self, Linker};
+use super::rpath::{self, RPathConfig};
+use crate::{looks_like_rust_object_file, CodegenResults, CrateInfo, METADATA_FILENAME};
+
+use cc::windows_registry;
+use tempfile::Builder as TempFileBuilder;
+
+use std::ffi::OsString;
+use std::path::{Path, PathBuf};
+use std::process::{ExitStatus, Output, Stdio};
+use std::{ascii, char, env, fmt, fs, io, mem, str};
+
+pub fn remove(sess: &Session, path: &Path) {
+    if let Err(e) = fs::remove_file(path) {
+        sess.err(&format!("failed to remove {}: {}", path.display(), e));
+    }
+}
+
+/// Performs the linkage portion of the compilation phase. This will generate all
+/// of the requested outputs for this compilation session.
+pub fn link_binary<'a, B: ArchiveBuilder<'a>>(
+    sess: &'a Session,
+    codegen_results: &CodegenResults,
+    outputs: &OutputFilenames,
+    crate_name: &str,
+    target_cpu: &str,
+) {
+    let _timer = sess.timer("link_binary");
+    let output_metadata = sess.opts.output_types.contains_key(&OutputType::Metadata);
+    for &crate_type in sess.crate_types().iter() {
+        // Ignore executable crates if we have -Z no-codegen, as they will error.
+        if (sess.opts.debugging_opts.no_codegen || !sess.opts.output_types.should_codegen())
+            && !output_metadata
+            && crate_type == CrateType::Executable
+        {
+            continue;
+        }
+
+        if invalid_output_for_target(sess, crate_type) {
+            bug!(
+                "invalid output type `{:?}` for target os `{}`",
+                crate_type,
+                sess.opts.target_triple
+            );
+        }
+
+        sess.time("link_binary_check_files_are_writeable", || {
+            for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+                check_file_is_writeable(obj, sess);
+            }
+        });
+
+        if outputs.outputs.should_codegen() {
+            let tmpdir = TempFileBuilder::new()
+                .prefix("rustc")
+                .tempdir()
+                .unwrap_or_else(|err| sess.fatal(&format!("couldn't create a temp dir: {}", err)));
+            let path = MaybeTempDir::new(tmpdir, sess.opts.cg.save_temps);
+            let out_filename = out_filename(sess, crate_type, outputs, crate_name);
+            match crate_type {
+                CrateType::Rlib => {
+                    let _timer = sess.timer("link_rlib");
+                    link_rlib::<B>(sess, codegen_results, RlibFlavor::Normal, &out_filename, &path)
+                        .build();
+                }
+                CrateType::Staticlib => {
+                    link_staticlib::<B>(sess, codegen_results, &out_filename, &path);
+                }
+                _ => {
+                    link_natively::<B>(
+                        sess,
+                        crate_type,
+                        &out_filename,
+                        codegen_results,
+                        path.as_ref(),
+                        target_cpu,
+                    );
+                }
+            }
+            if sess.opts.json_artifact_notifications {
+                sess.parse_sess.span_diagnostic.emit_artifact_notification(&out_filename, "link");
+            }
+        }
+    }
+
+    // Remove the temporary object file and metadata if we aren't saving temps
+    sess.time("link_binary_remove_temps", || {
+        if !sess.opts.cg.save_temps {
+            if sess.opts.output_types.should_codegen()
+                && !preserve_objects_for_their_debuginfo(sess)
+            {
+                for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+                    remove(sess, obj);
+                }
+            }
+            if let Some(ref metadata_module) = codegen_results.metadata_module {
+                if let Some(ref obj) = metadata_module.object {
+                    remove(sess, obj);
+                }
+            }
+            if let Some(ref allocator_module) = codegen_results.allocator_module {
+                if let Some(ref obj) = allocator_module.object {
+                    remove(sess, obj);
+                }
+            }
+        }
+    });
+}
+
+// The third parameter is for env vars, used on windows to set up the
+// path for MSVC to find its DLLs, and gcc to find its bundled
+// toolchain
+fn get_linker(
+    sess: &Session,
+    linker: &Path,
+    flavor: LinkerFlavor,
+    self_contained: bool,
+) -> Command {
+    let msvc_tool = windows_registry::find_tool(&sess.opts.target_triple.triple(), "link.exe");
+
+    // If our linker looks like a batch script on Windows then to execute this
+    // we'll need to spawn `cmd` explicitly. This is primarily done to handle
+    // emscripten where the linker is `emcc.bat` and needs to be spawned as
+    // `cmd /c emcc.bat ...`.
+    //
+    // This worked historically but is needed manually since #42436 (regression
+    // was tagged as #42791) and some more info can be found on #44443 for
+    // emscripten itself.
+    let mut cmd = match linker.to_str() {
+        Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker),
+        _ => match flavor {
+            LinkerFlavor::Lld(f) => Command::lld(linker, f),
+            LinkerFlavor::Msvc
+                if sess.opts.cg.linker.is_none() && sess.target.target.options.linker.is_none() =>
+            {
+                Command::new(msvc_tool.as_ref().map(|t| t.path()).unwrap_or(linker))
+            }
+            _ => Command::new(linker),
+        },
+    };
+
+    // UWP apps have API restrictions enforced during Store submissions.
+    // To comply with the Windows App Certification Kit,
+    // MSVC needs to link with the Store versions of the runtime libraries (vcruntime, msvcrt, etc).
+    let t = &sess.target.target;
+    if (flavor == LinkerFlavor::Msvc || flavor == LinkerFlavor::Lld(LldFlavor::Link))
+        && t.target_vendor == "uwp"
+    {
+        if let Some(ref tool) = msvc_tool {
+            let original_path = tool.path();
+            if let Some(ref root_lib_path) = original_path.ancestors().nth(4) {
+                let arch = match t.arch.as_str() {
+                    "x86_64" => Some("x64".to_string()),
+                    "x86" => Some("x86".to_string()),
+                    "aarch64" => Some("arm64".to_string()),
+                    "arm" => Some("arm".to_string()),
+                    _ => None,
+                };
+                if let Some(ref a) = arch {
+                    // FIXME: Move this to `fn linker_with_args`.
+                    let mut arg = OsString::from("/LIBPATH:");
+                    arg.push(format!("{}\\lib\\{}\\store", root_lib_path.display(), a.to_string()));
+                    cmd.arg(&arg);
+                } else {
+                    warn!("arch is not supported");
+                }
+            } else {
+                warn!("MSVC root path lib location not found");
+            }
+        } else {
+            warn!("link.exe not found");
+        }
+    }
+
+    // The compiler's sysroot often has some bundled tools, so add it to the
+    // PATH for the child.
+    let mut new_path = sess.host_filesearch(PathKind::All).get_tools_search_paths(self_contained);
+    let mut msvc_changed_path = false;
+    if sess.target.target.options.is_like_msvc {
+        if let Some(ref tool) = msvc_tool {
+            cmd.args(tool.args());
+            for &(ref k, ref v) in tool.env() {
+                if k == "PATH" {
+                    new_path.extend(env::split_paths(v));
+                    msvc_changed_path = true;
+                } else {
+                    cmd.env(k, v);
+                }
+            }
+        }
+    }
+
+    if !msvc_changed_path {
+        if let Some(path) = env::var_os("PATH") {
+            new_path.extend(env::split_paths(&path));
+        }
+    }
+    cmd.env("PATH", env::join_paths(new_path).unwrap());
+
+    cmd
+}
+
+pub fn each_linked_rlib(
+    info: &CrateInfo,
+    f: &mut dyn FnMut(CrateNum, &Path),
+) -> Result<(), String> {
+    let crates = info.used_crates_static.iter();
+    let mut fmts = None;
+    for (ty, list) in info.dependency_formats.iter() {
+        match ty {
+            CrateType::Executable
+            | CrateType::Staticlib
+            | CrateType::Cdylib
+            | CrateType::ProcMacro => {
+                fmts = Some(list);
+                break;
+            }
+            _ => {}
+        }
+    }
+    let fmts = match fmts {
+        Some(f) => f,
+        None => return Err("could not find formats for rlibs".to_string()),
+    };
+    for &(cnum, ref path) in crates {
+        match fmts.get(cnum.as_usize() - 1) {
+            Some(&Linkage::NotLinked | &Linkage::IncludedFromDylib) => continue,
+            Some(_) => {}
+            None => return Err("could not find formats for rlibs".to_string()),
+        }
+        let name = &info.crate_name[&cnum];
+        let path = match *path {
+            LibSource::Some(ref p) => p,
+            LibSource::MetadataOnly => {
+                return Err(format!(
+                    "could not find rlib for: `{}`, found rmeta (metadata) file",
+                    name
+                ));
+            }
+            LibSource::None => return Err(format!("could not find rlib for: `{}`", name)),
+        };
+        f(cnum, &path);
+    }
+    Ok(())
+}
+
+/// We use a temp directory here to avoid races between concurrent rustc processes,
+/// such as builds in the same directory using the same filename for metadata while
+/// building an `.rlib` (stomping over one another), or writing an `.rmeta` into a
+/// directory being searched for `extern crate` (observing an incomplete file).
+/// The returned path is the temporary file containing the complete metadata.
+pub fn emit_metadata(sess: &Session, metadata: &EncodedMetadata, tmpdir: &MaybeTempDir) -> PathBuf {
+    let out_filename = tmpdir.as_ref().join(METADATA_FILENAME);
+    let result = fs::write(&out_filename, &metadata.raw_data);
+
+    if let Err(e) = result {
+        sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e));
+    }
+
+    out_filename
+}
+
+// Create an 'rlib'
+//
+// An rlib in its current incarnation is essentially a renamed .a file. The
+// rlib primarily contains the object file of the crate, but it also contains
+// all of the object files from native libraries. This is done by unzipping
+// native libraries and inserting all of the contents into this archive.
+fn link_rlib<'a, B: ArchiveBuilder<'a>>(
+    sess: &'a Session,
+    codegen_results: &CodegenResults,
+    flavor: RlibFlavor,
+    out_filename: &Path,
+    tmpdir: &MaybeTempDir,
+) -> B {
+    info!("preparing rlib to {:?}", out_filename);
+    let mut ab = <B as ArchiveBuilder>::new(sess, out_filename, None);
+
+    for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+        ab.add_file(obj);
+    }
+
+    // Note that in this loop we are ignoring the value of `lib.cfg`. That is,
+    // we may not be configured to actually include a static library if we're
+    // adding it here. That's because later when we consume this rlib we'll
+    // decide whether we actually needed the static library or not.
+    //
+    // To do this "correctly" we'd need to keep track of which libraries added
+    // which object files to the archive. We don't do that here, however. The
+    // #[link(cfg(..))] feature is unstable, though, and only intended to get
+    // liblibc working. In that sense the check below just indicates that if
+    // there are any libraries we want to omit object files for at link time we
+    // just exclude all custom object files.
+    //
+    // Eventually if we want to stabilize or flesh out the #[link(cfg(..))]
+    // feature then we'll need to figure out how to record what objects were
+    // loaded from the libraries found here and then encode that into the
+    // metadata of the rlib we're generating somehow.
+    for lib in codegen_results.crate_info.used_libraries.iter() {
+        match lib.kind {
+            NativeLibKind::StaticBundle => {}
+            NativeLibKind::StaticNoBundle
+            | NativeLibKind::Dylib
+            | NativeLibKind::Framework
+            | NativeLibKind::RawDylib
+            | NativeLibKind::Unspecified => continue,
+        }
+        if let Some(name) = lib.name {
+            ab.add_native_library(name);
+        }
+    }
+
+    // After adding all files to the archive, we need to update the
+    // symbol table of the archive.
+    ab.update_symbols();
+
+    // Note that it is important that we add all of our non-object "magical
+    // files" *after* all of the object files in the archive. The reason for
+    // this is as follows:
+    //
+    // * When performing LTO, this archive will be modified to remove
+    //   objects from above. The reason for this is described below.
+    //
+    // * When the system linker looks at an archive, it will attempt to
+    //   determine the architecture of the archive in order to see whether its
+    //   linkable.
+    //
+    //   The algorithm for this detection is: iterate over the files in the
+    //   archive. Skip magical SYMDEF names. Interpret the first file as an
+    //   object file. Read architecture from the object file.
+    //
+    // * As one can probably see, if "metadata" and "foo.bc" were placed
+    //   before all of the objects, then the architecture of this archive would
+    //   not be correctly inferred once 'foo.o' is removed.
+    //
+    // Basically, all this means is that this code should not move above the
+    // code above.
+    match flavor {
+        RlibFlavor::Normal => {
+            // Instead of putting the metadata in an object file section, rlibs
+            // contain the metadata in a separate file.
+            ab.add_file(&emit_metadata(sess, &codegen_results.metadata, tmpdir));
+
+            // After adding all files to the archive, we need to update the
+            // symbol table of the archive. This currently dies on macOS (see
+            // #11162), and isn't necessary there anyway
+            if !sess.target.target.options.is_like_osx {
+                ab.update_symbols();
+            }
+        }
+
+        RlibFlavor::StaticlibBase => {
+            let obj = codegen_results.allocator_module.as_ref().and_then(|m| m.object.as_ref());
+            if let Some(obj) = obj {
+                ab.add_file(obj);
+            }
+        }
+    }
+
+    ab
+}
+
+// Create a static archive
+//
+// This is essentially the same thing as an rlib, but it also involves adding
+// all of the upstream crates' objects into the archive. This will slurp in
+// all of the native libraries of upstream dependencies as well.
+//
+// Additionally, there's no way for us to link dynamic libraries, so we warn
+// about all dynamic library dependencies that they're not linked in.
+//
+// There's no need to include metadata in a static archive, so ensure to not
+// link in the metadata object file (and also don't prepare the archive with a
+// metadata file).
+fn link_staticlib<'a, B: ArchiveBuilder<'a>>(
+    sess: &'a Session,
+    codegen_results: &CodegenResults,
+    out_filename: &Path,
+    tempdir: &MaybeTempDir,
+) {
+    let mut ab =
+        link_rlib::<B>(sess, codegen_results, RlibFlavor::StaticlibBase, out_filename, tempdir);
+    let mut all_native_libs = vec![];
+
+    let res = each_linked_rlib(&codegen_results.crate_info, &mut |cnum, path| {
+        let name = &codegen_results.crate_info.crate_name[&cnum];
+        let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
+
+        // Here when we include the rlib into our staticlib we need to make a
+        // decision whether to include the extra object files along the way.
+        // These extra object files come from statically included native
+        // libraries, but they may be cfg'd away with #[link(cfg(..))].
+        //
+        // This unstable feature, though, only needs liblibc to work. The only
+        // use case there is where musl is statically included in liblibc.rlib,
+        // so if we don't want the included version we just need to skip it. As
+        // a result the logic here is that if *any* linked library is cfg'd away
+        // we just skip all object files.
+        //
+        // Clearly this is not sufficient for a general purpose feature, and
+        // we'd want to read from the library's metadata to determine which
+        // object files come from where and selectively skip them.
+        let skip_object_files = native_libs
+            .iter()
+            .any(|lib| lib.kind == NativeLibKind::StaticBundle && !relevant_lib(sess, lib));
+        ab.add_rlib(
+            path,
+            &name.as_str(),
+            are_upstream_rust_objects_already_included(sess)
+                && !ignored_for_lto(sess, &codegen_results.crate_info, cnum),
+            skip_object_files,
+        )
+        .unwrap();
+
+        all_native_libs.extend(codegen_results.crate_info.native_libraries[&cnum].iter().cloned());
+    });
+    if let Err(e) = res {
+        sess.fatal(&e);
+    }
+
+    ab.update_symbols();
+    ab.build();
+
+    if !all_native_libs.is_empty() {
+        if sess.opts.prints.contains(&PrintRequest::NativeStaticLibs) {
+            print_native_static_libs(sess, &all_native_libs);
+        }
+    }
+}
+
+// Create a dynamic library or executable
+//
+// This will invoke the system linker/cc to create the resulting file. This
+// links to all upstream files as well.
+fn link_natively<'a, B: ArchiveBuilder<'a>>(
+    sess: &'a Session,
+    crate_type: CrateType,
+    out_filename: &Path,
+    codegen_results: &CodegenResults,
+    tmpdir: &Path,
+    target_cpu: &str,
+) {
+    info!("preparing {:?} to {:?}", crate_type, out_filename);
+    let (linker_path, flavor) = linker_and_flavor(sess);
+    let mut cmd = linker_with_args::<B>(
+        &linker_path,
+        flavor,
+        sess,
+        crate_type,
+        tmpdir,
+        out_filename,
+        codegen_results,
+        target_cpu,
+    );
+
+    linker::disable_localization(&mut cmd);
+
+    for &(ref k, ref v) in &sess.target.target.options.link_env {
+        cmd.env(k, v);
+    }
+    for k in &sess.target.target.options.link_env_remove {
+        cmd.env_remove(k);
+    }
+
+    if sess.opts.debugging_opts.print_link_args {
+        println!("{:?}", &cmd);
+    }
+
+    // May have not found libraries in the right formats.
+    sess.abort_if_errors();
+
+    // Invoke the system linker
+    info!("{:?}", &cmd);
+    let retry_on_segfault = env::var("RUSTC_RETRY_LINKER_ON_SEGFAULT").is_ok();
+    let mut prog;
+    let mut i = 0;
+    loop {
+        i += 1;
+        prog = sess.time("run_linker", || exec_linker(sess, &cmd, out_filename, tmpdir));
+        let output = match prog {
+            Ok(ref output) => output,
+            Err(_) => break,
+        };
+        if output.status.success() {
+            break;
+        }
+        let mut out = output.stderr.clone();
+        out.extend(&output.stdout);
+        let out = String::from_utf8_lossy(&out);
+
+        // Check to see if the link failed with "unrecognized command line option:
+        // '-no-pie'" for gcc or "unknown argument: '-no-pie'" for clang. If so,
+        // reperform the link step without the -no-pie option. This is safe because
+        // if the linker doesn't support -no-pie then it should not default to
+        // linking executables as pie. Different versions of gcc seem to use
+        // different quotes in the error message so don't check for them.
+        if sess.target.target.options.linker_is_gnu
+            && flavor != LinkerFlavor::Ld
+            && (out.contains("unrecognized command line option")
+                || out.contains("unknown argument"))
+            && out.contains("-no-pie")
+            && cmd.get_args().iter().any(|e| e.to_string_lossy() == "-no-pie")
+        {
+            info!("linker output: {:?}", out);
+            warn!("Linker does not support -no-pie command line option. Retrying without.");
+            for arg in cmd.take_args() {
+                if arg.to_string_lossy() != "-no-pie" {
+                    cmd.arg(arg);
+                }
+            }
+            info!("{:?}", &cmd);
+            continue;
+        }
+
+        // Detect '-static-pie' used with an older version of gcc or clang not supporting it.
+        // Fallback from '-static-pie' to '-static' in that case.
+        if sess.target.target.options.linker_is_gnu
+            && flavor != LinkerFlavor::Ld
+            && (out.contains("unrecognized command line option")
+                || out.contains("unknown argument"))
+            && (out.contains("-static-pie") || out.contains("--no-dynamic-linker"))
+            && cmd.get_args().iter().any(|e| e.to_string_lossy() == "-static-pie")
+        {
+            info!("linker output: {:?}", out);
+            warn!(
+                "Linker does not support -static-pie command line option. Retrying with -static instead."
+            );
+            // Mirror `add_(pre,post)_link_objects` to replace CRT objects.
+            let self_contained = crt_objects_fallback(sess, crate_type);
+            let opts = &sess.target.target.options;
+            let pre_objects = if self_contained {
+                &opts.pre_link_objects_fallback
+            } else {
+                &opts.pre_link_objects
+            };
+            let post_objects = if self_contained {
+                &opts.post_link_objects_fallback
+            } else {
+                &opts.post_link_objects
+            };
+            let get_objects = |objects: &CrtObjects, kind| {
+                objects
+                    .get(&kind)
+                    .iter()
+                    .copied()
+                    .flatten()
+                    .map(|obj| get_object_file_path(sess, obj, self_contained).into_os_string())
+                    .collect::<Vec<_>>()
+            };
+            let pre_objects_static_pie = get_objects(pre_objects, LinkOutputKind::StaticPicExe);
+            let post_objects_static_pie = get_objects(post_objects, LinkOutputKind::StaticPicExe);
+            let mut pre_objects_static = get_objects(pre_objects, LinkOutputKind::StaticNoPicExe);
+            let mut post_objects_static = get_objects(post_objects, LinkOutputKind::StaticNoPicExe);
+            // Assume that we know insertion positions for the replacement arguments from replaced
+            // arguments, which is true for all supported targets.
+            assert!(pre_objects_static.is_empty() || !pre_objects_static_pie.is_empty());
+            assert!(post_objects_static.is_empty() || !post_objects_static_pie.is_empty());
+            for arg in cmd.take_args() {
+                if arg.to_string_lossy() == "-static-pie" {
+                    // Replace the output kind.
+                    cmd.arg("-static");
+                } else if pre_objects_static_pie.contains(&arg) {
+                    // Replace the pre-link objects (replace the first and remove the rest).
+                    cmd.args(mem::take(&mut pre_objects_static));
+                } else if post_objects_static_pie.contains(&arg) {
+                    // Replace the post-link objects (replace the first and remove the rest).
+                    cmd.args(mem::take(&mut post_objects_static));
+                } else {
+                    cmd.arg(arg);
+                }
+            }
+            info!("{:?}", &cmd);
+            continue;
+        }
+
+        // Here's a terribly awful hack that really shouldn't be present in any
+        // compiler. Here an environment variable is supported to automatically
+        // retry the linker invocation if the linker looks like it segfaulted.
+        //
+        // Gee that seems odd, normally segfaults are things we want to know
+        // about!  Unfortunately though in rust-lang/rust#38878 we're
+        // experiencing the linker segfaulting on Travis quite a bit which is
+        // causing quite a bit of pain to land PRs when they spuriously fail
+        // due to a segfault.
+        //
+        // The issue #38878 has some more debugging information on it as well,
+        // but this unfortunately looks like it's just a race condition in
+        // macOS's linker with some thread pool working in the background. It
+        // seems that no one currently knows a fix for this so in the meantime
+        // we're left with this...
+        if !retry_on_segfault || i > 3 {
+            break;
+        }
+        let msg_segv = "clang: error: unable to execute command: Segmentation fault: 11";
+        let msg_bus = "clang: error: unable to execute command: Bus error: 10";
+        if out.contains(msg_segv) || out.contains(msg_bus) {
+            warn!(
+                "looks like the linker segfaulted when we tried to call it, \
+                 automatically retrying again. cmd = {:?}, out = {}.",
+                cmd, out,
+            );
+            continue;
+        }
+
+        if is_illegal_instruction(&output.status) {
+            warn!(
+                "looks like the linker hit an illegal instruction when we \
+                 tried to call it, automatically retrying again. cmd = {:?}, ]\
+                 out = {}, status = {}.",
+                cmd, out, output.status,
+            );
+            continue;
+        }
+
+        #[cfg(unix)]
+        fn is_illegal_instruction(status: &ExitStatus) -> bool {
+            use std::os::unix::prelude::*;
+            status.signal() == Some(libc::SIGILL)
+        }
+
+        #[cfg(windows)]
+        fn is_illegal_instruction(_status: &ExitStatus) -> bool {
+            false
+        }
+    }
+
+    match prog {
+        Ok(prog) => {
+            fn escape_string(s: &[u8]) -> String {
+                str::from_utf8(s).map(|s| s.to_owned()).unwrap_or_else(|_| {
+                    let mut x = "Non-UTF-8 output: ".to_string();
+                    x.extend(s.iter().flat_map(|&b| ascii::escape_default(b)).map(char::from));
+                    x
+                })
+            }
+            if !prog.status.success() {
+                let mut output = prog.stderr.clone();
+                output.extend_from_slice(&prog.stdout);
+                sess.struct_err(&format!(
+                    "linking with `{}` failed: {}",
+                    linker_path.display(),
+                    prog.status
+                ))
+                .note(&format!("{:?}", &cmd))
+                .note(&escape_string(&output))
+                .emit();
+
+                // If MSVC's `link.exe` was expected but the return code
+                // is not a Microsoft LNK error then suggest a way to fix or
+                // install the Visual Studio build tools.
+                if let Some(code) = prog.status.code() {
+                    if sess.target.target.options.is_like_msvc
+                        && flavor == LinkerFlavor::Msvc
+                        // Respect the command line override
+                        && sess.opts.cg.linker.is_none()
+                        // Match exactly "link.exe"
+                        && linker_path.to_str() == Some("link.exe")
+                        // All Microsoft `link.exe` linking error codes are
+                        // four digit numbers in the range 1000 to 9999 inclusive
+                        && (code < 1000 || code > 9999)
+                    {
+                        let is_vs_installed = windows_registry::find_vs_version().is_ok();
+                        let has_linker = windows_registry::find_tool(
+                            &sess.opts.target_triple.triple(),
+                            "link.exe",
+                        )
+                        .is_some();
+
+                        sess.note_without_error("`link.exe` returned an unexpected error");
+                        if is_vs_installed && has_linker {
+                            // the linker is broken
+                            sess.note_without_error(
+                                "the Visual Studio build tools may need to be repaired \
+                                using the Visual Studio installer",
+                            );
+                            sess.note_without_error(
+                                "or a necessary component may be missing from the \
+                                \"C++ build tools\" workload",
+                            );
+                        } else if is_vs_installed {
+                            // the linker is not installed
+                            sess.note_without_error(
+                                "in the Visual Studio installer, ensure the \
+                                \"C++ build tools\" workload is selected",
+                            );
+                        } else {
+                            // visual studio is not installed
+                            sess.note_without_error(
+                                "you may need to install Visual Studio build tools with the \
+                                \"C++ build tools\" workload",
+                            );
+                        }
+                    }
+                }
+
+                sess.abort_if_errors();
+            }
+            info!("linker stderr:\n{}", escape_string(&prog.stderr));
+            info!("linker stdout:\n{}", escape_string(&prog.stdout));
+        }
+        Err(e) => {
+            let linker_not_found = e.kind() == io::ErrorKind::NotFound;
+
+            let mut linker_error = {
+                if linker_not_found {
+                    sess.struct_err(&format!("linker `{}` not found", linker_path.display()))
+                } else {
+                    sess.struct_err(&format!(
+                        "could not exec the linker `{}`",
+                        linker_path.display()
+                    ))
+                }
+            };
+
+            linker_error.note(&e.to_string());
+
+            if !linker_not_found {
+                linker_error.note(&format!("{:?}", &cmd));
+            }
+
+            linker_error.emit();
+
+            if sess.target.target.options.is_like_msvc && linker_not_found {
+                sess.note_without_error(
+                    "the msvc targets depend on the msvc linker \
+                     but `link.exe` was not found",
+                );
+                sess.note_without_error(
+                    "please ensure that VS 2013, VS 2015, VS 2017 or VS 2019 \
+                     was installed with the Visual C++ option",
+                );
+            }
+            sess.abort_if_errors();
+        }
+    }
+
+    // On macOS, debuggers need this utility to get run to do some munging of
+    // the symbols. Note, though, that if the object files are being preserved
+    // for their debug information there's no need for us to run dsymutil.
+    if sess.target.target.options.is_like_osx
+        && sess.opts.debuginfo != DebugInfo::None
+        && !preserve_objects_for_their_debuginfo(sess)
+    {
+        if let Err(e) = Command::new("dsymutil").arg(out_filename).output() {
+            sess.fatal(&format!("failed to run dsymutil: {}", e))
+        }
+    }
+}
+
+fn link_sanitizers(sess: &Session, crate_type: CrateType, linker: &mut dyn Linker) {
+    // On macOS the runtimes are distributed as dylibs which should be linked to
+    // both executables and dynamic shared objects. Everywhere else the runtimes
+    // are currently distributed as static liraries which should be linked to
+    // executables only.
+    let needs_runtime = match crate_type {
+        CrateType::Executable => true,
+        CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro => {
+            sess.target.target.options.is_like_osx
+        }
+        CrateType::Rlib | CrateType::Staticlib => false,
+    };
+
+    if !needs_runtime {
+        return;
+    }
+
+    let sanitizer = sess.opts.debugging_opts.sanitizer;
+    if sanitizer.contains(SanitizerSet::ADDRESS) {
+        link_sanitizer_runtime(sess, linker, "asan");
+    }
+    if sanitizer.contains(SanitizerSet::LEAK) {
+        link_sanitizer_runtime(sess, linker, "lsan");
+    }
+    if sanitizer.contains(SanitizerSet::MEMORY) {
+        link_sanitizer_runtime(sess, linker, "msan");
+    }
+    if sanitizer.contains(SanitizerSet::THREAD) {
+        link_sanitizer_runtime(sess, linker, "tsan");
+    }
+}
+
+fn link_sanitizer_runtime(sess: &Session, linker: &mut dyn Linker, name: &str) {
+    let default_sysroot = filesearch::get_or_default_sysroot();
+    let default_tlib =
+        filesearch::make_target_lib_path(&default_sysroot, sess.opts.target_triple.triple());
+    let channel = option_env!("CFG_RELEASE_CHANNEL")
+        .map(|channel| format!("-{}", channel))
+        .unwrap_or_default();
+
+    match sess.opts.target_triple.triple() {
+        "x86_64-apple-darwin" => {
+            // On Apple platforms, the sanitizer is always built as a dylib, and
+            // LLVM will link to `@rpath/*.dylib`, so we need to specify an
+            // rpath to the library as well (the rpath should be absolute, see
+            // PR #41352 for details).
+            let libname = format!("rustc{}_rt.{}", channel, name);
+            let rpath = default_tlib.to_str().expect("non-utf8 component in path");
+            linker.args(&["-Wl,-rpath", "-Xlinker", rpath]);
+            linker.link_dylib(Symbol::intern(&libname));
+        }
+        "aarch64-fuchsia"
+        | "aarch64-unknown-linux-gnu"
+        | "x86_64-fuchsia"
+        | "x86_64-unknown-freebsd"
+        | "x86_64-unknown-linux-gnu" => {
+            let filename = format!("librustc{}_rt.{}.a", channel, name);
+            let path = default_tlib.join(&filename);
+            linker.link_whole_rlib(&path);
+        }
+        _ => {}
+    }
+}
+
+/// Returns a boolean indicating whether the specified crate should be ignored
+/// during LTO.
+///
+/// Crates ignored during LTO are not lumped together in the "massive object
+/// file" that we create and are linked in their normal rlib states. See
+/// comments below for what crates do not participate in LTO.
+///
+/// It's unusual for a crate to not participate in LTO. Typically only
+/// compiler-specific and unstable crates have a reason to not participate in
+/// LTO.
+pub fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool {
+    // If our target enables builtin function lowering in LLVM then the
+    // crates providing these functions don't participate in LTO (e.g.
+    // no_builtins or compiler builtins crates).
+    !sess.target.target.options.no_builtins
+        && (info.compiler_builtins == Some(cnum) || info.is_no_builtins.contains(&cnum))
+}
+
+fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
+    fn infer_from(
+        sess: &Session,
+        linker: Option<PathBuf>,
+        flavor: Option<LinkerFlavor>,
+    ) -> Option<(PathBuf, LinkerFlavor)> {
+        match (linker, flavor) {
+            (Some(linker), Some(flavor)) => Some((linker, flavor)),
+            // only the linker flavor is known; use the default linker for the selected flavor
+            (None, Some(flavor)) => Some((
+                PathBuf::from(match flavor {
+                    LinkerFlavor::Em => {
+                        if cfg!(windows) {
+                            "emcc.bat"
+                        } else {
+                            "emcc"
+                        }
+                    }
+                    LinkerFlavor::Gcc => {
+                        if cfg!(any(target_os = "solaris", target_os = "illumos")) {
+                            // On historical Solaris systems, "cc" may have
+                            // been Sun Studio, which is not flag-compatible
+                            // with "gcc".  This history casts a long shadow,
+                            // and many modern illumos distributions today
+                            // ship GCC as "gcc" without also making it
+                            // available as "cc".
+                            "gcc"
+                        } else {
+                            "cc"
+                        }
+                    }
+                    LinkerFlavor::Ld => "ld",
+                    LinkerFlavor::Msvc => "link.exe",
+                    LinkerFlavor::Lld(_) => "lld",
+                    LinkerFlavor::PtxLinker => "rust-ptx-linker",
+                }),
+                flavor,
+            )),
+            (Some(linker), None) => {
+                let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| {
+                    sess.fatal("couldn't extract file stem from specified linker")
+                });
+
+                let flavor = if stem == "emcc" {
+                    LinkerFlavor::Em
+                } else if stem == "gcc"
+                    || stem.ends_with("-gcc")
+                    || stem == "clang"
+                    || stem.ends_with("-clang")
+                {
+                    LinkerFlavor::Gcc
+                } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") {
+                    LinkerFlavor::Ld
+                } else if stem == "link" || stem == "lld-link" {
+                    LinkerFlavor::Msvc
+                } else if stem == "lld" || stem == "rust-lld" {
+                    LinkerFlavor::Lld(sess.target.target.options.lld_flavor)
+                } else {
+                    // fall back to the value in the target spec
+                    sess.target.target.linker_flavor
+                };
+
+                Some((linker, flavor))
+            }
+            (None, None) => None,
+        }
+    }
+
+    // linker and linker flavor specified via command line have precedence over what the target
+    // specification specifies
+    if let Some(ret) = infer_from(sess, sess.opts.cg.linker.clone(), sess.opts.cg.linker_flavor) {
+        return ret;
+    }
+
+    if let Some(ret) = infer_from(
+        sess,
+        sess.target.target.options.linker.clone().map(PathBuf::from),
+        Some(sess.target.target.linker_flavor),
+    ) {
+        return ret;
+    }
+
+    bug!("Not enough information provided to determine how to invoke the linker");
+}
+
+/// Returns a boolean indicating whether we should preserve the object files on
+/// the filesystem for their debug information. This is often useful with
+/// split-dwarf like schemes.
+fn preserve_objects_for_their_debuginfo(sess: &Session) -> bool {
+    // If the objects don't have debuginfo there's nothing to preserve.
+    if sess.opts.debuginfo == config::DebugInfo::None {
+        return false;
+    }
+
+    // If we're only producing artifacts that are archives, no need to preserve
+    // the objects as they're losslessly contained inside the archives.
+    let output_linked =
+        sess.crate_types().iter().any(|&x| x != CrateType::Rlib && x != CrateType::Staticlib);
+    if !output_linked {
+        return false;
+    }
+
+    // If we're on OSX then the equivalent of split dwarf is turned on by
+    // default. The final executable won't actually have any debug information
+    // except it'll have pointers to elsewhere. Historically we've always run
+    // `dsymutil` to "link all the dwarf together" but this is actually sort of
+    // a bummer for incremental compilation! (the whole point of split dwarf is
+    // that you don't do this sort of dwarf link).
+    //
+    // Basically as a result this just means that if we're on OSX and we're
+    // *not* running dsymutil then the object files are the only source of truth
+    // for debug information, so we must preserve them.
+    if sess.target.target.options.is_like_osx {
+        return !sess.opts.debugging_opts.run_dsymutil;
+    }
+
+    false
+}
+
+pub fn archive_search_paths(sess: &Session) -> Vec<PathBuf> {
+    sess.target_filesearch(PathKind::Native).search_path_dirs()
+}
+
+enum RlibFlavor {
+    Normal,
+    StaticlibBase,
+}
+
+fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLib]) {
+    let lib_args: Vec<_> = all_native_libs
+        .iter()
+        .filter(|l| relevant_lib(sess, l))
+        .filter_map(|lib| {
+            let name = lib.name?;
+            match lib.kind {
+                NativeLibKind::StaticNoBundle
+                | NativeLibKind::Dylib
+                | NativeLibKind::Unspecified => {
+                    if sess.target.target.options.is_like_msvc {
+                        Some(format!("{}.lib", name))
+                    } else {
+                        Some(format!("-l{}", name))
+                    }
+                }
+                NativeLibKind::Framework => {
+                    // ld-only syntax, since there are no frameworks in MSVC
+                    Some(format!("-framework {}", name))
+                }
+                // These are included, no need to print them
+                NativeLibKind::StaticBundle | NativeLibKind::RawDylib => None,
+            }
+        })
+        .collect();
+    if !lib_args.is_empty() {
+        sess.note_without_error(
+            "Link against the following native artifacts when linking \
+                                 against this static library. The order and any duplication \
+                                 can be significant on some platforms.",
+        );
+        // Prefix for greppability
+        sess.note_without_error(&format!("native-static-libs: {}", &lib_args.join(" ")));
+    }
+}
+
+// Because windows-gnu target is meant to be self-contained for pure Rust code it bundles
+// own mingw-w64 libraries. These libraries are usually not compatible with mingw-w64
+// installed in the system. This breaks many cases where Rust is mixed with other languages
+// (e.g. *-sys crates).
+// We prefer system mingw-w64 libraries if they are available to avoid this issue.
+fn get_crt_libs_path(sess: &Session) -> Option<PathBuf> {
+    fn find_exe_in_path<P>(exe_name: P) -> Option<PathBuf>
+    where
+        P: AsRef<Path>,
+    {
+        for dir in env::split_paths(&env::var_os("PATH")?) {
+            let full_path = dir.join(&exe_name);
+            if full_path.is_file() {
+                return Some(fix_windows_verbatim_for_gcc(&full_path));
+            }
+        }
+        None
+    }
+
+    fn probe(sess: &Session) -> Option<PathBuf> {
+        if let (linker, LinkerFlavor::Gcc) = linker_and_flavor(&sess) {
+            let linker_path = if cfg!(windows) && linker.extension().is_none() {
+                linker.with_extension("exe")
+            } else {
+                linker
+            };
+            if let Some(linker_path) = find_exe_in_path(linker_path) {
+                let mingw_arch = match &sess.target.target.arch {
+                    x if x == "x86" => "i686",
+                    x => x,
+                };
+                let mingw_bits = &sess.target.target.target_pointer_width;
+                let mingw_dir = format!("{}-w64-mingw32", mingw_arch);
+                // Here we have path/bin/gcc but we need path/
+                let mut path = linker_path;
+                path.pop();
+                path.pop();
+                // Loosely based on Clang MinGW driver
+                let probe_paths = vec![
+                    path.join(&mingw_dir).join("lib"),                // Typical path
+                    path.join(&mingw_dir).join("sys-root/mingw/lib"), // Rare path
+                    path.join(format!(
+                        "lib/mingw/tools/install/mingw{}/{}/lib",
+                        &mingw_bits, &mingw_dir
+                    )), // Chocolatey is creative
+                ];
+                for probe_path in probe_paths {
+                    if probe_path.join("crt2.o").exists() {
+                        return Some(probe_path);
+                    };
+                }
+            };
+        };
+        None
+    }
+
+    let mut system_library_path = sess.system_library_path.borrow_mut();
+    match &*system_library_path {
+        Some(Some(compiler_libs_path)) => Some(compiler_libs_path.clone()),
+        Some(None) => None,
+        None => {
+            let path = probe(sess);
+            *system_library_path = Some(path.clone());
+            path
+        }
+    }
+}
+
+fn get_object_file_path(sess: &Session, name: &str, self_contained: bool) -> PathBuf {
+    // prefer system {,dll}crt2.o libs, see get_crt_libs_path comment for more details
+    if sess.opts.debugging_opts.link_self_contained.is_none()
+        && sess.target.target.llvm_target.contains("windows-gnu")
+    {
+        if let Some(compiler_libs_path) = get_crt_libs_path(sess) {
+            let file_path = compiler_libs_path.join(name);
+            if file_path.exists() {
+                return file_path;
+            }
+        }
+    }
+    let fs = sess.target_filesearch(PathKind::Native);
+    let file_path = fs.get_lib_path().join(name);
+    if file_path.exists() {
+        return file_path;
+    }
+    // Special directory with objects used only in self-contained linkage mode
+    if self_contained {
+        let file_path = fs.get_self_contained_lib_path().join(name);
+        if file_path.exists() {
+            return file_path;
+        }
+    }
+    for search_path in fs.search_paths() {
+        let file_path = search_path.dir.join(name);
+        if file_path.exists() {
+            return file_path;
+        }
+    }
+    PathBuf::from(name)
+}
+
+fn exec_linker(
+    sess: &Session,
+    cmd: &Command,
+    out_filename: &Path,
+    tmpdir: &Path,
+) -> io::Result<Output> {
+    // When attempting to spawn the linker we run a risk of blowing out the
+    // size limits for spawning a new process with respect to the arguments
+    // we pass on the command line.
+    //
+    // Here we attempt to handle errors from the OS saying "your list of
+    // arguments is too big" by reinvoking the linker again with an `@`-file
+    // that contains all the arguments. The theory is that this is then
+    // accepted on all linkers and the linker will read all its options out of
+    // there instead of looking at the command line.
+    if !cmd.very_likely_to_exceed_some_spawn_limit() {
+        match cmd.command().stdout(Stdio::piped()).stderr(Stdio::piped()).spawn() {
+            Ok(child) => {
+                let output = child.wait_with_output();
+                flush_linked_file(&output, out_filename)?;
+                return output;
+            }
+            Err(ref e) if command_line_too_big(e) => {
+                info!("command line to linker was too big: {}", e);
+            }
+            Err(e) => return Err(e),
+        }
+    }
+
+    info!("falling back to passing arguments to linker via an @-file");
+    let mut cmd2 = cmd.clone();
+    let mut args = String::new();
+    for arg in cmd2.take_args() {
+        args.push_str(
+            &Escape {
+                arg: arg.to_str().unwrap(),
+                is_like_msvc: sess.target.target.options.is_like_msvc,
+            }
+            .to_string(),
+        );
+        args.push_str("\n");
+    }
+    let file = tmpdir.join("linker-arguments");
+    let bytes = if sess.target.target.options.is_like_msvc {
+        let mut out = Vec::with_capacity((1 + args.len()) * 2);
+        // start the stream with a UTF-16 BOM
+        for c in std::iter::once(0xFEFF).chain(args.encode_utf16()) {
+            // encode in little endian
+            out.push(c as u8);
+            out.push((c >> 8) as u8);
+        }
+        out
+    } else {
+        args.into_bytes()
+    };
+    fs::write(&file, &bytes)?;
+    cmd2.arg(format!("@{}", file.display()));
+    info!("invoking linker {:?}", cmd2);
+    let output = cmd2.output();
+    flush_linked_file(&output, out_filename)?;
+    return output;
+
+    #[cfg(unix)]
+    fn flush_linked_file(_: &io::Result<Output>, _: &Path) -> io::Result<()> {
+        Ok(())
+    }
+
+    #[cfg(windows)]
+    fn flush_linked_file(
+        command_output: &io::Result<Output>,
+        out_filename: &Path,
+    ) -> io::Result<()> {
+        // On Windows, under high I/O load, output buffers are sometimes not flushed,
+        // even long after process exit, causing nasty, non-reproducible output bugs.
+        //
+        // File::sync_all() calls FlushFileBuffers() down the line, which solves the problem.
+        //
+        // А full writeup of the original Chrome bug can be found at
+        // randomascii.wordpress.com/2018/02/25/compiler-bug-linker-bug-windows-kernel-bug/amp
+
+        if let &Ok(ref out) = command_output {
+            if out.status.success() {
+                if let Ok(of) = fs::OpenOptions::new().write(true).open(out_filename) {
+                    of.sync_all()?;
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    #[cfg(unix)]
+    fn command_line_too_big(err: &io::Error) -> bool {
+        err.raw_os_error() == Some(::libc::E2BIG)
+    }
+
+    #[cfg(windows)]
+    fn command_line_too_big(err: &io::Error) -> bool {
+        const ERROR_FILENAME_EXCED_RANGE: i32 = 206;
+        err.raw_os_error() == Some(ERROR_FILENAME_EXCED_RANGE)
+    }
+
+    struct Escape<'a> {
+        arg: &'a str,
+        is_like_msvc: bool,
+    }
+
+    impl<'a> fmt::Display for Escape<'a> {
+        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+            if self.is_like_msvc {
+                // This is "documented" at
+                // https://docs.microsoft.com/en-us/cpp/build/reference/at-specify-a-linker-response-file
+                //
+                // Unfortunately there's not a great specification of the
+                // syntax I could find online (at least) but some local
+                // testing showed that this seemed sufficient-ish to catch
+                // at least a few edge cases.
+                write!(f, "\"")?;
+                for c in self.arg.chars() {
+                    match c {
+                        '"' => write!(f, "\\{}", c)?,
+                        c => write!(f, "{}", c)?,
+                    }
+                }
+                write!(f, "\"")?;
+            } else {
+                // This is documented at https://linux.die.net/man/1/ld, namely:
+                //
+                // > Options in file are separated by whitespace. A whitespace
+                // > character may be included in an option by surrounding the
+                // > entire option in either single or double quotes. Any
+                // > character (including a backslash) may be included by
+                // > prefixing the character to be included with a backslash.
+                //
+                // We put an argument on each line, so all we need to do is
+                // ensure the line is interpreted as one whole argument.
+                for c in self.arg.chars() {
+                    match c {
+                        '\\' | ' ' => write!(f, "\\{}", c)?,
+                        c => write!(f, "{}", c)?,
+                    }
+                }
+            }
+            Ok(())
+        }
+    }
+}
+
+fn link_output_kind(sess: &Session, crate_type: CrateType) -> LinkOutputKind {
+    let kind = match (crate_type, sess.crt_static(Some(crate_type)), sess.relocation_model()) {
+        (CrateType::Executable, false, RelocModel::Pic) => LinkOutputKind::DynamicPicExe,
+        (CrateType::Executable, false, _) => LinkOutputKind::DynamicNoPicExe,
+        (CrateType::Executable, true, RelocModel::Pic) => LinkOutputKind::StaticPicExe,
+        (CrateType::Executable, true, _) => LinkOutputKind::StaticNoPicExe,
+        (_, true, _) => LinkOutputKind::StaticDylib,
+        (_, false, _) => LinkOutputKind::DynamicDylib,
+    };
+
+    // Adjust the output kind to target capabilities.
+    let opts = &sess.target.target.options;
+    let pic_exe_supported = opts.position_independent_executables;
+    let static_pic_exe_supported = opts.static_position_independent_executables;
+    let static_dylib_supported = opts.crt_static_allows_dylibs;
+    match kind {
+        LinkOutputKind::DynamicPicExe if !pic_exe_supported => LinkOutputKind::DynamicNoPicExe,
+        LinkOutputKind::StaticPicExe if !static_pic_exe_supported => LinkOutputKind::StaticNoPicExe,
+        LinkOutputKind::StaticDylib if !static_dylib_supported => LinkOutputKind::DynamicDylib,
+        _ => kind,
+    }
+}
+
+/// Whether we link to our own CRT objects instead of relying on gcc to pull them.
+/// We only provide such support for a very limited number of targets.
+fn crt_objects_fallback(sess: &Session, crate_type: CrateType) -> bool {
+    if let Some(self_contained) = sess.opts.debugging_opts.link_self_contained {
+        return self_contained;
+    }
+
+    match sess.target.target.options.crt_objects_fallback {
+        // FIXME: Find a better heuristic for "native musl toolchain is available",
+        // based on host and linker path, for example.
+        // (https://github.com/rust-lang/rust/pull/71769#issuecomment-626330237).
+        Some(CrtObjectsFallback::Musl) => sess.crt_static(Some(crate_type)),
+        // FIXME: Find some heuristic for "native mingw toolchain is available",
+        // likely based on `get_crt_libs_path` (https://github.com/rust-lang/rust/pull/67429).
+        Some(CrtObjectsFallback::Mingw) => {
+            sess.host == sess.target.target && sess.target.target.target_vendor != "uwp"
+        }
+        // FIXME: Figure out cases in which WASM needs to link with a native toolchain.
+        Some(CrtObjectsFallback::Wasm) => true,
+        None => false,
+    }
+}
+
+/// Add pre-link object files defined by the target spec.
+fn add_pre_link_objects(
+    cmd: &mut dyn Linker,
+    sess: &Session,
+    link_output_kind: LinkOutputKind,
+    self_contained: bool,
+) {
+    let opts = &sess.target.target.options;
+    let objects =
+        if self_contained { &opts.pre_link_objects_fallback } else { &opts.pre_link_objects };
+    for obj in objects.get(&link_output_kind).iter().copied().flatten() {
+        cmd.add_object(&get_object_file_path(sess, obj, self_contained));
+    }
+}
+
+/// Add post-link object files defined by the target spec.
+fn add_post_link_objects(
+    cmd: &mut dyn Linker,
+    sess: &Session,
+    link_output_kind: LinkOutputKind,
+    self_contained: bool,
+) {
+    let opts = &sess.target.target.options;
+    let objects =
+        if self_contained { &opts.post_link_objects_fallback } else { &opts.post_link_objects };
+    for obj in objects.get(&link_output_kind).iter().copied().flatten() {
+        cmd.add_object(&get_object_file_path(sess, obj, self_contained));
+    }
+}
+
+/// Add arbitrary "pre-link" args defined by the target spec or from command line.
+/// FIXME: Determine where exactly these args need to be inserted.
+fn add_pre_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
+    if let Some(args) = sess.target.target.options.pre_link_args.get(&flavor) {
+        cmd.args(args);
+    }
+    cmd.args(&sess.opts.debugging_opts.pre_link_args);
+}
+
+/// Add a link script embedded in the target, if applicable.
+fn add_link_script(cmd: &mut dyn Linker, sess: &Session, tmpdir: &Path, crate_type: CrateType) {
+    match (crate_type, &sess.target.target.options.link_script) {
+        (CrateType::Cdylib | CrateType::Executable, Some(script)) => {
+            if !sess.target.target.options.linker_is_gnu {
+                sess.fatal("can only use link script when linking with GNU-like linker");
+            }
+
+            let file_name = ["rustc", &sess.target.target.llvm_target, "linkfile.ld"].join("-");
+
+            let path = tmpdir.join(file_name);
+            if let Err(e) = fs::write(&path, script) {
+                sess.fatal(&format!("failed to write link script to {}: {}", path.display(), e));
+            }
+
+            cmd.arg("--script");
+            cmd.arg(path);
+        }
+        _ => {}
+    }
+}
+
+/// Add arbitrary "user defined" args defined from command line and by `#[link_args]` attributes.
+/// FIXME: Determine where exactly these args need to be inserted.
+fn add_user_defined_link_args(
+    cmd: &mut dyn Linker,
+    sess: &Session,
+    codegen_results: &CodegenResults,
+) {
+    cmd.args(&sess.opts.cg.link_args);
+    cmd.args(&*codegen_results.crate_info.link_args);
+}
+
+/// Add arbitrary "late link" args defined by the target spec.
+/// FIXME: Determine where exactly these args need to be inserted.
+fn add_late_link_args(
+    cmd: &mut dyn Linker,
+    sess: &Session,
+    flavor: LinkerFlavor,
+    crate_type: CrateType,
+    codegen_results: &CodegenResults,
+) {
+    if let Some(args) = sess.target.target.options.late_link_args.get(&flavor) {
+        cmd.args(args);
+    }
+    let any_dynamic_crate = crate_type == CrateType::Dylib
+        || codegen_results.crate_info.dependency_formats.iter().any(|(ty, list)| {
+            *ty == crate_type && list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+        });
+    if any_dynamic_crate {
+        if let Some(args) = sess.target.target.options.late_link_args_dynamic.get(&flavor) {
+            cmd.args(args);
+        }
+    } else {
+        if let Some(args) = sess.target.target.options.late_link_args_static.get(&flavor) {
+            cmd.args(args);
+        }
+    }
+}
+
+/// Add arbitrary "post-link" args defined by the target spec.
+/// FIXME: Determine where exactly these args need to be inserted.
+fn add_post_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
+    if let Some(args) = sess.target.target.options.post_link_args.get(&flavor) {
+        cmd.args(args);
+    }
+}
+
+/// Add object files containing code from the current crate.
+fn add_local_crate_regular_objects(cmd: &mut dyn Linker, codegen_results: &CodegenResults) {
+    for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+        cmd.add_object(obj);
+    }
+}
+
+/// Add object files for allocator code linked once for the whole crate tree.
+fn add_local_crate_allocator_objects(cmd: &mut dyn Linker, codegen_results: &CodegenResults) {
+    if let Some(obj) = codegen_results.allocator_module.as_ref().and_then(|m| m.object.as_ref()) {
+        cmd.add_object(obj);
+    }
+}
+
+/// Add object files containing metadata for the current crate.
+fn add_local_crate_metadata_objects(
+    cmd: &mut dyn Linker,
+    crate_type: CrateType,
+    codegen_results: &CodegenResults,
+) {
+    // When linking a dynamic library, we put the metadata into a section of the
+    // executable. This metadata is in a separate object file from the main
+    // object file, so we link that in here.
+    if crate_type == CrateType::Dylib || crate_type == CrateType::ProcMacro {
+        if let Some(obj) = codegen_results.metadata_module.as_ref().and_then(|m| m.object.as_ref())
+        {
+            cmd.add_object(obj);
+        }
+    }
+}
+
+/// Link native libraries corresponding to the current crate and all libraries corresponding to
+/// all its dependency crates.
+/// FIXME: Consider combining this with the functions above adding object files for the local crate.
+fn link_local_crate_native_libs_and_dependent_crate_libs<'a, B: ArchiveBuilder<'a>>(
+    cmd: &mut dyn Linker,
+    sess: &'a Session,
+    crate_type: CrateType,
+    codegen_results: &CodegenResults,
+    tmpdir: &Path,
+) {
+    // Take careful note of the ordering of the arguments we pass to the linker
+    // here. Linkers will assume that things on the left depend on things to the
+    // right. Things on the right cannot depend on things on the left. This is
+    // all formally implemented in terms of resolving symbols (libs on the right
+    // resolve unknown symbols of libs on the left, but not vice versa).
+    //
+    // For this reason, we have organized the arguments we pass to the linker as
+    // such:
+    //
+    // 1. The local object that LLVM just generated
+    // 2. Local native libraries
+    // 3. Upstream rust libraries
+    // 4. Upstream native libraries
+    //
+    // The rationale behind this ordering is that those items lower down in the
+    // list can't depend on items higher up in the list. For example nothing can
+    // depend on what we just generated (e.g., that'd be a circular dependency).
+    // Upstream rust libraries are not allowed to depend on our local native
+    // libraries as that would violate the structure of the DAG, in that
+    // scenario they are required to link to them as well in a shared fashion.
+    //
+    // Note that upstream rust libraries may contain native dependencies as
+    // well, but they also can't depend on what we just started to add to the
+    // link line. And finally upstream native libraries can't depend on anything
+    // in this DAG so far because they're only dylibs and dylibs can only depend
+    // on other dylibs (e.g., other native deps).
+    //
+    // If -Zlink-native-libraries=false is set, then the assumption is that an
+    // external build system already has the native dependencies defined, and it
+    // will provide them to the linker itself.
+    if sess.opts.debugging_opts.link_native_libraries {
+        add_local_native_libraries(cmd, sess, codegen_results);
+    }
+    add_upstream_rust_crates::<B>(cmd, sess, codegen_results, crate_type, tmpdir);
+    if sess.opts.debugging_opts.link_native_libraries {
+        add_upstream_native_libraries(cmd, sess, codegen_results, crate_type);
+    }
+}
+
+/// Add sysroot and other globally set directories to the directory search list.
+fn add_library_search_dirs(cmd: &mut dyn Linker, sess: &Session, self_contained: bool) {
+    // Prefer system mingw-w64 libs, see get_crt_libs_path comment for more details.
+    if sess.opts.debugging_opts.link_self_contained.is_none()
+        && cfg!(windows)
+        && sess.target.target.llvm_target.contains("windows-gnu")
+    {
+        if let Some(compiler_libs_path) = get_crt_libs_path(sess) {
+            cmd.include_path(&compiler_libs_path);
+        }
+    }
+
+    // The default library location, we need this to find the runtime.
+    // The location of crates will be determined as needed.
+    let lib_path = sess.target_filesearch(PathKind::All).get_lib_path();
+    cmd.include_path(&fix_windows_verbatim_for_gcc(&lib_path));
+
+    // Special directory with libraries used only in self-contained linkage mode
+    if self_contained {
+        let lib_path = sess.target_filesearch(PathKind::All).get_self_contained_lib_path();
+        cmd.include_path(&fix_windows_verbatim_for_gcc(&lib_path));
+    }
+}
+
+/// Add options making relocation sections in the produced ELF files read-only
+/// and suppressing lazy binding.
+fn add_relro_args(cmd: &mut dyn Linker, sess: &Session) {
+    match sess.opts.debugging_opts.relro_level.unwrap_or(sess.target.target.options.relro_level) {
+        RelroLevel::Full => cmd.full_relro(),
+        RelroLevel::Partial => cmd.partial_relro(),
+        RelroLevel::Off => cmd.no_relro(),
+        RelroLevel::None => {}
+    }
+}
+
+/// Add library search paths used at runtime by dynamic linkers.
+fn add_rpath_args(
+    cmd: &mut dyn Linker,
+    sess: &Session,
+    codegen_results: &CodegenResults,
+    out_filename: &Path,
+) {
+    // FIXME (#2397): At some point we want to rpath our guesses as to
+    // where extern libraries might live, based on the
+    // addl_lib_search_paths
+    if sess.opts.cg.rpath {
+        let target_triple = sess.opts.target_triple.triple();
+        let mut get_install_prefix_lib_path = || {
+            let install_prefix = option_env!("CFG_PREFIX").expect("CFG_PREFIX");
+            let tlib = filesearch::relative_target_lib_path(&sess.sysroot, target_triple);
+            let mut path = PathBuf::from(install_prefix);
+            path.push(&tlib);
+
+            path
+        };
+        let mut rpath_config = RPathConfig {
+            used_crates: &codegen_results.crate_info.used_crates_dynamic,
+            out_filename: out_filename.to_path_buf(),
+            has_rpath: sess.target.target.options.has_rpath,
+            is_like_osx: sess.target.target.options.is_like_osx,
+            linker_is_gnu: sess.target.target.options.linker_is_gnu,
+            get_install_prefix_lib_path: &mut get_install_prefix_lib_path,
+        };
+        cmd.args(&rpath::get_rpath_flags(&mut rpath_config));
+    }
+}
+
+/// Produce the linker command line containing linker path and arguments.
+/// `NO-OPT-OUT` marks the arguments that cannot be removed from the command line
+/// by the user without creating a custom target specification.
+/// `OBJECT-FILES` specify whether the arguments can add object files.
+/// `CUSTOMIZATION-POINT` means that arbitrary arguments defined by the user
+/// or by the target spec can be inserted here.
+/// `AUDIT-ORDER` - need to figure out whether the option is order-dependent or not.
+fn linker_with_args<'a, B: ArchiveBuilder<'a>>(
+    path: &Path,
+    flavor: LinkerFlavor,
+    sess: &'a Session,
+    crate_type: CrateType,
+    tmpdir: &Path,
+    out_filename: &Path,
+    codegen_results: &CodegenResults,
+    target_cpu: &str,
+) -> Command {
+    let crt_objects_fallback = crt_objects_fallback(sess, crate_type);
+    let base_cmd = get_linker(sess, path, flavor, crt_objects_fallback);
+    // FIXME: Move `/LIBPATH` addition for uwp targets from the linker construction
+    // to the linker args construction.
+    assert!(base_cmd.get_args().is_empty() || sess.target.target.target_vendor == "uwp");
+    let cmd = &mut *codegen_results.linker_info.to_linker(base_cmd, &sess, flavor, target_cpu);
+    let link_output_kind = link_output_kind(sess, crate_type);
+
+    // NO-OPT-OUT, OBJECT-FILES-MAYBE, CUSTOMIZATION-POINT
+    add_pre_link_args(cmd, sess, flavor);
+
+    // NO-OPT-OUT
+    add_link_script(cmd, sess, tmpdir, crate_type);
+
+    // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER
+    if sess.target.target.options.is_like_fuchsia && crate_type == CrateType::Executable {
+        let prefix = if sess.opts.debugging_opts.sanitizer.contains(SanitizerSet::ADDRESS) {
+            "asan/"
+        } else {
+            ""
+        };
+        cmd.arg(format!("--dynamic-linker={}ld.so.1", prefix));
+    }
+
+    // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER
+    if sess.target.target.options.eh_frame_header {
+        cmd.add_eh_frame_header();
+    }
+
+    // NO-OPT-OUT, OBJECT-FILES-NO
+    if crt_objects_fallback {
+        cmd.no_crt_objects();
+    }
+
+    // NO-OPT-OUT, OBJECT-FILES-YES
+    add_pre_link_objects(cmd, sess, link_output_kind, crt_objects_fallback);
+
+    // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER
+    if sess.target.target.options.is_like_emscripten {
+        cmd.arg("-s");
+        cmd.arg(if sess.panic_strategy() == PanicStrategy::Abort {
+            "DISABLE_EXCEPTION_CATCHING=1"
+        } else {
+            "DISABLE_EXCEPTION_CATCHING=0"
+        });
+    }
+
+    // OBJECT-FILES-YES, AUDIT-ORDER
+    link_sanitizers(sess, crate_type, cmd);
+
+    // OBJECT-FILES-NO, AUDIT-ORDER
+    // Linker plugins should be specified early in the list of arguments
+    // FIXME: How "early" exactly?
+    cmd.linker_plugin_lto();
+
+    // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER
+    // FIXME: Order-dependent, at least relatively to other args adding searh directories.
+    add_library_search_dirs(cmd, sess, crt_objects_fallback);
+
+    // OBJECT-FILES-YES
+    add_local_crate_regular_objects(cmd, codegen_results);
+
+    // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER
+    cmd.output_filename(out_filename);
+
+    // OBJECT-FILES-NO, AUDIT-ORDER
+    if crate_type == CrateType::Executable && sess.target.target.options.is_like_windows {
+        if let Some(ref s) = codegen_results.windows_subsystem {
+            cmd.subsystem(s);
+        }
+    }
+
+    // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER
+    // If we're building something like a dynamic library then some platforms
+    // need to make sure that all symbols are exported correctly from the
+    // dynamic library.
+    cmd.export_symbols(tmpdir, crate_type);
+
+    // OBJECT-FILES-YES
+    add_local_crate_metadata_objects(cmd, crate_type, codegen_results);
+
+    // OBJECT-FILES-YES
+    add_local_crate_allocator_objects(cmd, codegen_results);
+
+    // OBJECT-FILES-NO, AUDIT-ORDER
+    // FIXME: Order dependent, applies to the following objects. Where should it be placed?
+    // Try to strip as much out of the generated object by removing unused
+    // sections if possible. See more comments in linker.rs
+    if sess.opts.cg.link_dead_code != Some(true) {
+        let keep_metadata = crate_type == CrateType::Dylib;
+        cmd.gc_sections(keep_metadata);
+    }
+
+    // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER
+    cmd.set_output_kind(link_output_kind, out_filename);
+
+    // OBJECT-FILES-NO, AUDIT-ORDER
+    add_relro_args(cmd, sess);
+
+    // OBJECT-FILES-NO, AUDIT-ORDER
+    // Pass optimization flags down to the linker.
+    cmd.optimize();
+
+    // OBJECT-FILES-NO, AUDIT-ORDER
+    // Pass debuginfo and strip flags down to the linker.
+    cmd.debuginfo(sess.opts.debugging_opts.strip);
+
+    // OBJECT-FILES-NO, AUDIT-ORDER
+    // We want to prevent the compiler from accidentally leaking in any system libraries,
+    // so by default we tell linkers not to link to any default libraries.
+    if !sess.opts.cg.default_linker_libraries && sess.target.target.options.no_default_libraries {
+        cmd.no_default_libraries();
+    }
+
+    // OBJECT-FILES-YES
+    link_local_crate_native_libs_and_dependent_crate_libs::<B>(
+        cmd,
+        sess,
+        crate_type,
+        codegen_results,
+        tmpdir,
+    );
+
+    // OBJECT-FILES-NO, AUDIT-ORDER
+    if sess.opts.cg.profile_generate.enabled() || sess.opts.debugging_opts.instrument_coverage {
+        cmd.pgo_gen();
+    }
+
+    // OBJECT-FILES-NO, AUDIT-ORDER
+    if sess.opts.cg.control_flow_guard != CFGuard::Disabled {
+        cmd.control_flow_guard();
+    }
+
+    // OBJECT-FILES-NO, AUDIT-ORDER
+    add_rpath_args(cmd, sess, codegen_results, out_filename);
+
+    // OBJECT-FILES-MAYBE, CUSTOMIZATION-POINT
+    add_user_defined_link_args(cmd, sess, codegen_results);
+
+    // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER
+    cmd.finalize();
+
+    // NO-OPT-OUT, OBJECT-FILES-MAYBE, CUSTOMIZATION-POINT
+    add_late_link_args(cmd, sess, flavor, crate_type, codegen_results);
+
+    // NO-OPT-OUT, OBJECT-FILES-YES
+    add_post_link_objects(cmd, sess, link_output_kind, crt_objects_fallback);
+
+    // NO-OPT-OUT, OBJECT-FILES-MAYBE, CUSTOMIZATION-POINT
+    add_post_link_args(cmd, sess, flavor);
+
+    cmd.take_cmd()
+}
+
+// # Native library linking
+//
+// User-supplied library search paths (-L on the command line). These are
+// the same paths used to find Rust crates, so some of them may have been
+// added already by the previous crate linking code. This only allows them
+// to be found at compile time so it is still entirely up to outside
+// forces to make sure that library can be found at runtime.
+//
+// Also note that the native libraries linked here are only the ones located
+// in the current crate. Upstream crates with native library dependencies
+// may have their native library pulled in above.
+fn add_local_native_libraries(
+    cmd: &mut dyn Linker,
+    sess: &Session,
+    codegen_results: &CodegenResults,
+) {
+    let filesearch = sess.target_filesearch(PathKind::All);
+    for search_path in filesearch.search_paths() {
+        match search_path.kind {
+            PathKind::Framework => {
+                cmd.framework_path(&search_path.dir);
+            }
+            _ => {
+                cmd.include_path(&fix_windows_verbatim_for_gcc(&search_path.dir));
+            }
+        }
+    }
+
+    let relevant_libs =
+        codegen_results.crate_info.used_libraries.iter().filter(|l| relevant_lib(sess, l));
+
+    let search_path = archive_search_paths(sess);
+    for lib in relevant_libs {
+        let name = match lib.name {
+            Some(l) => l,
+            None => continue,
+        };
+        match lib.kind {
+            NativeLibKind::Dylib | NativeLibKind::Unspecified => cmd.link_dylib(name),
+            NativeLibKind::Framework => cmd.link_framework(name),
+            NativeLibKind::StaticNoBundle => cmd.link_staticlib(name),
+            NativeLibKind::StaticBundle => cmd.link_whole_staticlib(name, &search_path),
+            NativeLibKind::RawDylib => {
+                // FIXME(#58713): Proper handling for raw dylibs.
+                bug!("raw_dylib feature not yet implemented");
+            }
+        }
+    }
+}
+
+// # Rust Crate linking
+//
+// Rust crates are not considered at all when creating an rlib output. All
+// dependencies will be linked when producing the final output (instead of
+// the intermediate rlib version)
+fn add_upstream_rust_crates<'a, B: ArchiveBuilder<'a>>(
+    cmd: &mut dyn Linker,
+    sess: &'a Session,
+    codegen_results: &CodegenResults,
+    crate_type: CrateType,
+    tmpdir: &Path,
+) {
+    // All of the heavy lifting has previously been accomplished by the
+    // dependency_format module of the compiler. This is just crawling the
+    // output of that module, adding crates as necessary.
+    //
+    // Linking to a rlib involves just passing it to the linker (the linker
+    // will slurp up the object files inside), and linking to a dynamic library
+    // involves just passing the right -l flag.
+
+    let (_, data) = codegen_results
+        .crate_info
+        .dependency_formats
+        .iter()
+        .find(|(ty, _)| *ty == crate_type)
+        .expect("failed to find crate type in dependency format list");
+
+    // Invoke get_used_crates to ensure that we get a topological sorting of
+    // crates.
+    let deps = &codegen_results.crate_info.used_crates_dynamic;
+
+    // There's a few internal crates in the standard library (aka libcore and
+    // libstd) which actually have a circular dependence upon one another. This
+    // currently arises through "weak lang items" where libcore requires things
+    // like `rust_begin_unwind` but libstd ends up defining it. To get this
+    // circular dependence to work correctly in all situations we'll need to be
+    // sure to correctly apply the `--start-group` and `--end-group` options to
+    // GNU linkers, otherwise if we don't use any other symbol from the standard
+    // library it'll get discarded and the whole application won't link.
+    //
+    // In this loop we're calculating the `group_end`, after which crate to
+    // pass `--end-group` and `group_start`, before which crate to pass
+    // `--start-group`. We currently do this by passing `--end-group` after
+    // the first crate (when iterating backwards) that requires a lang item
+    // defined somewhere else. Once that's set then when we've defined all the
+    // necessary lang items we'll pass `--start-group`.
+    //
+    // Note that this isn't amazing logic for now but it should do the trick
+    // for the current implementation of the standard library.
+    let mut group_end = None;
+    let mut group_start = None;
+    // Crates available for linking thus far.
+    let mut available = FxHashSet::default();
+    // Crates required to satisfy dependencies discovered so far.
+    let mut required = FxHashSet::default();
+
+    let info = &codegen_results.crate_info;
+    for &(cnum, _) in deps.iter().rev() {
+        if let Some(missing) = info.missing_lang_items.get(&cnum) {
+            let missing_crates = missing.iter().map(|i| info.lang_item_to_crate.get(i).copied());
+            required.extend(missing_crates);
+        }
+
+        required.insert(Some(cnum));
+        available.insert(Some(cnum));
+
+        if required.len() > available.len() && group_end.is_none() {
+            group_end = Some(cnum);
+        }
+        if required.len() == available.len() && group_end.is_some() {
+            group_start = Some(cnum);
+            break;
+        }
+    }
+
+    // If we didn't end up filling in all lang items from upstream crates then
+    // we'll be filling it in with our crate. This probably means we're the
+    // standard library itself, so skip this for now.
+    if group_end.is_some() && group_start.is_none() {
+        group_end = None;
+    }
+
+    let mut compiler_builtins = None;
+
+    for &(cnum, _) in deps.iter() {
+        if group_start == Some(cnum) {
+            cmd.group_start();
+        }
+
+        // We may not pass all crates through to the linker. Some crates may
+        // appear statically in an existing dylib, meaning we'll pick up all the
+        // symbols from the dylib.
+        let src = &codegen_results.crate_info.used_crate_source[&cnum];
+        match data[cnum.as_usize() - 1] {
+            _ if codegen_results.crate_info.profiler_runtime == Some(cnum) => {
+                add_static_crate::<B>(cmd, sess, codegen_results, tmpdir, crate_type, cnum);
+            }
+            // compiler-builtins are always placed last to ensure that they're
+            // linked correctly.
+            _ if codegen_results.crate_info.compiler_builtins == Some(cnum) => {
+                assert!(compiler_builtins.is_none());
+                compiler_builtins = Some(cnum);
+            }
+            Linkage::NotLinked | Linkage::IncludedFromDylib => {}
+            Linkage::Static => {
+                add_static_crate::<B>(cmd, sess, codegen_results, tmpdir, crate_type, cnum);
+            }
+            Linkage::Dynamic => add_dynamic_crate(cmd, sess, &src.dylib.as_ref().unwrap().0),
+        }
+
+        if group_end == Some(cnum) {
+            cmd.group_end();
+        }
+    }
+
+    // compiler-builtins are always placed last to ensure that they're
+    // linked correctly.
+    // We must always link the `compiler_builtins` crate statically. Even if it
+    // was already "included" in a dylib (e.g., `libstd` when `-C prefer-dynamic`
+    // is used)
+    if let Some(cnum) = compiler_builtins {
+        add_static_crate::<B>(cmd, sess, codegen_results, tmpdir, crate_type, cnum);
+    }
+
+    // Converts a library file-stem into a cc -l argument
+    fn unlib<'a>(config: &config::Config, stem: &'a str) -> &'a str {
+        if stem.starts_with("lib") && !config.target.options.is_like_windows {
+            &stem[3..]
+        } else {
+            stem
+        }
+    }
+
+    // Adds the static "rlib" versions of all crates to the command line.
+    // There's a bit of magic which happens here specifically related to LTO and
+    // dynamic libraries. Specifically:
+    //
+    // * For LTO, we remove upstream object files.
+    // * For dylibs we remove metadata and bytecode from upstream rlibs
+    //
+    // When performing LTO, almost(*) all of the bytecode from the upstream
+    // libraries has already been included in our object file output. As a
+    // result we need to remove the object files in the upstream libraries so
+    // the linker doesn't try to include them twice (or whine about duplicate
+    // symbols). We must continue to include the rest of the rlib, however, as
+    // it may contain static native libraries which must be linked in.
+    //
+    // (*) Crates marked with `#![no_builtins]` don't participate in LTO and
+    // their bytecode wasn't included. The object files in those libraries must
+    // still be passed to the linker.
+    //
+    // When making a dynamic library, linkers by default don't include any
+    // object files in an archive if they're not necessary to resolve the link.
+    // We basically want to convert the archive (rlib) to a dylib, though, so we
+    // *do* want everything included in the output, regardless of whether the
+    // linker thinks it's needed or not. As a result we must use the
+    // --whole-archive option (or the platform equivalent). When using this
+    // option the linker will fail if there are non-objects in the archive (such
+    // as our own metadata and/or bytecode). All in all, for rlibs to be
+    // entirely included in dylibs, we need to remove all non-object files.
+    //
+    // Note, however, that if we're not doing LTO or we're not producing a dylib
+    // (aka we're making an executable), we can just pass the rlib blindly to
+    // the linker (fast) because it's fine if it's not actually included as
+    // we're at the end of the dependency chain.
+    fn add_static_crate<'a, B: ArchiveBuilder<'a>>(
+        cmd: &mut dyn Linker,
+        sess: &'a Session,
+        codegen_results: &CodegenResults,
+        tmpdir: &Path,
+        crate_type: CrateType,
+        cnum: CrateNum,
+    ) {
+        let src = &codegen_results.crate_info.used_crate_source[&cnum];
+        let cratepath = &src.rlib.as_ref().unwrap().0;
+
+        // See the comment above in `link_staticlib` and `link_rlib` for why if
+        // there's a static library that's not relevant we skip all object
+        // files.
+        let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
+        let skip_native = native_libs
+            .iter()
+            .any(|lib| lib.kind == NativeLibKind::StaticBundle && !relevant_lib(sess, lib));
+
+        if (!are_upstream_rust_objects_already_included(sess)
+            || ignored_for_lto(sess, &codegen_results.crate_info, cnum))
+            && crate_type != CrateType::Dylib
+            && !skip_native
+        {
+            cmd.link_rlib(&fix_windows_verbatim_for_gcc(cratepath));
+            return;
+        }
+
+        let dst = tmpdir.join(cratepath.file_name().unwrap());
+        let name = cratepath.file_name().unwrap().to_str().unwrap();
+        let name = &name[3..name.len() - 5]; // chop off lib/.rlib
+
+        sess.prof.generic_activity_with_arg("link_altering_rlib", name).run(|| {
+            let mut archive = <B as ArchiveBuilder>::new(sess, &dst, Some(cratepath));
+            archive.update_symbols();
+
+            let mut any_objects = false;
+            for f in archive.src_files() {
+                if f == METADATA_FILENAME {
+                    archive.remove_file(&f);
+                    continue;
+                }
+
+                let canonical = f.replace("-", "_");
+                let canonical_name = name.replace("-", "_");
+
+                let is_rust_object =
+                    canonical.starts_with(&canonical_name) && looks_like_rust_object_file(&f);
+
+                // If we've been requested to skip all native object files
+                // (those not generated by the rust compiler) then we can skip
+                // this file. See above for why we may want to do this.
+                let skip_because_cfg_say_so = skip_native && !is_rust_object;
+
+                // If we're performing LTO and this is a rust-generated object
+                // file, then we don't need the object file as it's part of the
+                // LTO module. Note that `#![no_builtins]` is excluded from LTO,
+                // though, so we let that object file slide.
+                let skip_because_lto = are_upstream_rust_objects_already_included(sess)
+                    && is_rust_object
+                    && (sess.target.target.options.no_builtins
+                        || !codegen_results.crate_info.is_no_builtins.contains(&cnum));
+
+                if skip_because_cfg_say_so || skip_because_lto {
+                    archive.remove_file(&f);
+                } else {
+                    any_objects = true;
+                }
+            }
+
+            if !any_objects {
+                return;
+            }
+            archive.build();
+
+            // If we're creating a dylib, then we need to include the
+            // whole of each object in our archive into that artifact. This is
+            // because a `dylib` can be reused as an intermediate artifact.
+            //
+            // Note, though, that we don't want to include the whole of a
+            // compiler-builtins crate (e.g., compiler-rt) because it'll get
+            // repeatedly linked anyway.
+            if crate_type == CrateType::Dylib
+                && codegen_results.crate_info.compiler_builtins != Some(cnum)
+            {
+                cmd.link_whole_rlib(&fix_windows_verbatim_for_gcc(&dst));
+            } else {
+                cmd.link_rlib(&fix_windows_verbatim_for_gcc(&dst));
+            }
+        });
+    }
+
+    // Same thing as above, but for dynamic crates instead of static crates.
+    fn add_dynamic_crate(cmd: &mut dyn Linker, sess: &Session, cratepath: &Path) {
+        // Just need to tell the linker about where the library lives and
+        // what its name is
+        let parent = cratepath.parent();
+        if let Some(dir) = parent {
+            cmd.include_path(&fix_windows_verbatim_for_gcc(dir));
+        }
+        let filestem = cratepath.file_stem().unwrap().to_str().unwrap();
+        cmd.link_rust_dylib(
+            Symbol::intern(&unlib(&sess.target, filestem)),
+            parent.unwrap_or(Path::new("")),
+        );
+    }
+}
+
+// Link in all of our upstream crates' native dependencies. Remember that
+// all of these upstream native dependencies are all non-static
+// dependencies. We've got two cases then:
+//
+// 1. The upstream crate is an rlib. In this case we *must* link in the
+// native dependency because the rlib is just an archive.
+//
+// 2. The upstream crate is a dylib. In order to use the dylib, we have to
+// have the dependency present on the system somewhere. Thus, we don't
+// gain a whole lot from not linking in the dynamic dependency to this
+// crate as well.
+//
+// The use case for this is a little subtle. In theory the native
+// dependencies of a crate are purely an implementation detail of the crate
+// itself, but the problem arises with generic and inlined functions. If a
+// generic function calls a native function, then the generic function must
+// be instantiated in the target crate, meaning that the native symbol must
+// also be resolved in the target crate.
+fn add_upstream_native_libraries(
+    cmd: &mut dyn Linker,
+    sess: &Session,
+    codegen_results: &CodegenResults,
+    crate_type: CrateType,
+) {
+    // Be sure to use a topological sorting of crates because there may be
+    // interdependencies between native libraries. When passing -nodefaultlibs,
+    // for example, almost all native libraries depend on libc, so we have to
+    // make sure that's all the way at the right (liblibc is near the base of
+    // the dependency chain).
+    //
+    // This passes RequireStatic, but the actual requirement doesn't matter,
+    // we're just getting an ordering of crate numbers, we're not worried about
+    // the paths.
+    let (_, data) = codegen_results
+        .crate_info
+        .dependency_formats
+        .iter()
+        .find(|(ty, _)| *ty == crate_type)
+        .expect("failed to find crate type in dependency format list");
+
+    let crates = &codegen_results.crate_info.used_crates_static;
+    for &(cnum, _) in crates {
+        for lib in codegen_results.crate_info.native_libraries[&cnum].iter() {
+            let name = match lib.name {
+                Some(l) => l,
+                None => continue,
+            };
+            if !relevant_lib(sess, &lib) {
+                continue;
+            }
+            match lib.kind {
+                NativeLibKind::Dylib | NativeLibKind::Unspecified => cmd.link_dylib(name),
+                NativeLibKind::Framework => cmd.link_framework(name),
+                NativeLibKind::StaticNoBundle => {
+                    // Link "static-nobundle" native libs only if the crate they originate from
+                    // is being linked statically to the current crate.  If it's linked dynamically
+                    // or is an rlib already included via some other dylib crate, the symbols from
+                    // native libs will have already been included in that dylib.
+                    if data[cnum.as_usize() - 1] == Linkage::Static {
+                        cmd.link_staticlib(name)
+                    }
+                }
+                // ignore statically included native libraries here as we've
+                // already included them when we included the rust library
+                // previously
+                NativeLibKind::StaticBundle => {}
+                NativeLibKind::RawDylib => {
+                    // FIXME(#58713): Proper handling for raw dylibs.
+                    bug!("raw_dylib feature not yet implemented");
+                }
+            }
+        }
+    }
+}
+
+fn relevant_lib(sess: &Session, lib: &NativeLib) -> bool {
+    match lib.cfg {
+        Some(ref cfg) => rustc_attr::cfg_matches(cfg, &sess.parse_sess, None),
+        None => true,
+    }
+}
+
+fn are_upstream_rust_objects_already_included(sess: &Session) -> bool {
+    match sess.lto() {
+        config::Lto::Fat => true,
+        config::Lto::Thin => {
+            // If we defer LTO to the linker, we haven't run LTO ourselves, so
+            // any upstream object files have not been copied yet.
+            !sess.opts.cg.linker_plugin_lto.enabled()
+        }
+        config::Lto::No | config::Lto::ThinLocal => false,
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs
new file mode 100644
index 00000000000..0ddf8bd316f
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/linker.rs
@@ -0,0 +1,1351 @@
+use super::archive;
+use super::command::Command;
+use super::symbol_export;
+use rustc_span::symbol::sym;
+
+use std::ffi::{OsStr, OsString};
+use std::fs::{self, File};
+use std::io::prelude::*;
+use std::io::{self, BufWriter};
+use std::mem;
+use std::path::{Path, PathBuf};
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
+use rustc_middle::middle::dependency_format::Linkage;
+use rustc_middle::ty::TyCtxt;
+use rustc_serialize::{json, Encoder};
+use rustc_session::config::{self, CrateType, DebugInfo, LinkerPluginLto, Lto, OptLevel, Strip};
+use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::{LinkOutputKind, LinkerFlavor, LldFlavor};
+
+/// Disables non-English messages from localized linkers.
+/// Such messages may cause issues with text encoding on Windows (#35785)
+/// and prevent inspection of linker output in case of errors, which we occasionally do.
+/// This should be acceptable because other messages from rustc are in English anyway,
+/// and may also be desirable to improve searchability of the linker diagnostics.
+pub fn disable_localization(linker: &mut Command) {
+    // No harm in setting both env vars simultaneously.
+    // Unix-style linkers.
+    linker.env("LC_ALL", "C");
+    // MSVC's `link.exe`.
+    linker.env("VSLANG", "1033");
+}
+
+/// For all the linkers we support, and information they might
+/// need out of the shared crate context before we get rid of it.
+#[derive(Encodable, Decodable)]
+pub struct LinkerInfo {
+    exports: FxHashMap<CrateType, Vec<String>>,
+}
+
+impl LinkerInfo {
+    pub fn new(tcx: TyCtxt<'_>) -> LinkerInfo {
+        LinkerInfo {
+            exports: tcx
+                .sess
+                .crate_types()
+                .iter()
+                .map(|&c| (c, exported_symbols(tcx, c)))
+                .collect(),
+        }
+    }
+
+    pub fn to_linker<'a>(
+        &'a self,
+        cmd: Command,
+        sess: &'a Session,
+        flavor: LinkerFlavor,
+        target_cpu: &'a str,
+    ) -> Box<dyn Linker + 'a> {
+        match flavor {
+            LinkerFlavor::Lld(LldFlavor::Link) | LinkerFlavor::Msvc => {
+                Box::new(MsvcLinker { cmd, sess, info: self }) as Box<dyn Linker>
+            }
+            LinkerFlavor::Em => Box::new(EmLinker { cmd, sess, info: self }) as Box<dyn Linker>,
+            LinkerFlavor::Gcc => Box::new(GccLinker {
+                cmd,
+                sess,
+                info: self,
+                hinted_static: false,
+                is_ld: false,
+                target_cpu,
+            }) as Box<dyn Linker>,
+
+            LinkerFlavor::Lld(LldFlavor::Ld)
+            | LinkerFlavor::Lld(LldFlavor::Ld64)
+            | LinkerFlavor::Ld => Box::new(GccLinker {
+                cmd,
+                sess,
+                info: self,
+                hinted_static: false,
+                is_ld: true,
+                target_cpu,
+            }) as Box<dyn Linker>,
+
+            LinkerFlavor::Lld(LldFlavor::Wasm) => {
+                Box::new(WasmLd::new(cmd, sess, self)) as Box<dyn Linker>
+            }
+
+            LinkerFlavor::PtxLinker => Box::new(PtxLinker { cmd, sess }) as Box<dyn Linker>,
+        }
+    }
+}
+
+/// Linker abstraction used by `back::link` to build up the command to invoke a
+/// linker.
+///
+/// This trait is the total list of requirements needed by `back::link` and
+/// represents the meaning of each option being passed down. This trait is then
+/// used to dispatch on whether a GNU-like linker (generally `ld.exe`) or an
+/// MSVC linker (e.g., `link.exe`) is being used.
+pub trait Linker {
+    fn cmd(&mut self) -> &mut Command;
+    fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path);
+    fn link_dylib(&mut self, lib: Symbol);
+    fn link_rust_dylib(&mut self, lib: Symbol, path: &Path);
+    fn link_framework(&mut self, framework: Symbol);
+    fn link_staticlib(&mut self, lib: Symbol);
+    fn link_rlib(&mut self, lib: &Path);
+    fn link_whole_rlib(&mut self, lib: &Path);
+    fn link_whole_staticlib(&mut self, lib: Symbol, search_path: &[PathBuf]);
+    fn include_path(&mut self, path: &Path);
+    fn framework_path(&mut self, path: &Path);
+    fn output_filename(&mut self, path: &Path);
+    fn add_object(&mut self, path: &Path);
+    fn gc_sections(&mut self, keep_metadata: bool);
+    fn full_relro(&mut self);
+    fn partial_relro(&mut self);
+    fn no_relro(&mut self);
+    fn optimize(&mut self);
+    fn pgo_gen(&mut self);
+    fn control_flow_guard(&mut self);
+    fn debuginfo(&mut self, strip: Strip);
+    fn no_crt_objects(&mut self);
+    fn no_default_libraries(&mut self);
+    fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType);
+    fn subsystem(&mut self, subsystem: &str);
+    fn group_start(&mut self);
+    fn group_end(&mut self);
+    fn linker_plugin_lto(&mut self);
+    fn add_eh_frame_header(&mut self) {}
+    fn finalize(&mut self);
+}
+
+impl dyn Linker + '_ {
+    pub fn arg(&mut self, arg: impl AsRef<OsStr>) {
+        self.cmd().arg(arg);
+    }
+
+    pub fn args(&mut self, args: impl IntoIterator<Item: AsRef<OsStr>>) {
+        self.cmd().args(args);
+    }
+
+    pub fn take_cmd(&mut self) -> Command {
+        mem::replace(self.cmd(), Command::new(""))
+    }
+}
+
+pub struct GccLinker<'a> {
+    cmd: Command,
+    sess: &'a Session,
+    info: &'a LinkerInfo,
+    hinted_static: bool, // Keeps track of the current hinting mode.
+    // Link as ld
+    is_ld: bool,
+    target_cpu: &'a str,
+}
+
+impl<'a> GccLinker<'a> {
+    /// Argument that must be passed *directly* to the linker
+    ///
+    /// These arguments need to be prepended with `-Wl`, when a GCC-style linker is used.
+    fn linker_arg<S>(&mut self, arg: S) -> &mut Self
+    where
+        S: AsRef<OsStr>,
+    {
+        if !self.is_ld {
+            let mut os = OsString::from("-Wl,");
+            os.push(arg.as_ref());
+            self.cmd.arg(os);
+        } else {
+            self.cmd.arg(arg);
+        }
+        self
+    }
+
+    fn takes_hints(&self) -> bool {
+        // Really this function only returns true if the underlying linker
+        // configured for a compiler is binutils `ld.bfd` and `ld.gold`. We
+        // don't really have a foolproof way to detect that, so rule out some
+        // platforms where currently this is guaranteed to *not* be the case:
+        //
+        // * On OSX they have their own linker, not binutils'
+        // * For WebAssembly the only functional linker is LLD, which doesn't
+        //   support hint flags
+        !self.sess.target.target.options.is_like_osx && self.sess.target.target.arch != "wasm32"
+    }
+
+    // Some platforms take hints about whether a library is static or dynamic.
+    // For those that support this, we ensure we pass the option if the library
+    // was flagged "static" (most defaults are dynamic) to ensure that if
+    // libfoo.a and libfoo.so both exist that the right one is chosen.
+    fn hint_static(&mut self) {
+        if !self.takes_hints() {
+            return;
+        }
+        if !self.hinted_static {
+            self.linker_arg("-Bstatic");
+            self.hinted_static = true;
+        }
+    }
+
+    fn hint_dynamic(&mut self) {
+        if !self.takes_hints() {
+            return;
+        }
+        if self.hinted_static {
+            self.linker_arg("-Bdynamic");
+            self.hinted_static = false;
+        }
+    }
+
+    fn push_linker_plugin_lto_args(&mut self, plugin_path: Option<&OsStr>) {
+        if let Some(plugin_path) = plugin_path {
+            let mut arg = OsString::from("-plugin=");
+            arg.push(plugin_path);
+            self.linker_arg(&arg);
+        }
+
+        let opt_level = match self.sess.opts.optimize {
+            config::OptLevel::No => "O0",
+            config::OptLevel::Less => "O1",
+            config::OptLevel::Default => "O2",
+            config::OptLevel::Aggressive => "O3",
+            config::OptLevel::Size => "Os",
+            config::OptLevel::SizeMin => "Oz",
+        };
+
+        self.linker_arg(&format!("-plugin-opt={}", opt_level));
+        let target_cpu = self.target_cpu;
+        self.linker_arg(&format!("-plugin-opt=mcpu={}", target_cpu));
+    }
+
+    fn build_dylib(&mut self, out_filename: &Path) {
+        // On mac we need to tell the linker to let this library be rpathed
+        if self.sess.target.target.options.is_like_osx {
+            self.cmd.arg("-dynamiclib");
+            self.linker_arg("-dylib");
+
+            // Note that the `osx_rpath_install_name` option here is a hack
+            // purely to support rustbuild right now, we should get a more
+            // principled solution at some point to force the compiler to pass
+            // the right `-Wl,-install_name` with an `@rpath` in it.
+            if self.sess.opts.cg.rpath || self.sess.opts.debugging_opts.osx_rpath_install_name {
+                self.linker_arg("-install_name");
+                let mut v = OsString::from("@rpath/");
+                v.push(out_filename.file_name().unwrap());
+                self.linker_arg(&v);
+            }
+        } else {
+            self.cmd.arg("-shared");
+            if self.sess.target.target.options.is_like_windows {
+                // The output filename already contains `dll_suffix` so
+                // the resulting import library will have a name in the
+                // form of libfoo.dll.a
+                let implib_name =
+                    out_filename.file_name().and_then(|file| file.to_str()).map(|file| {
+                        format!(
+                            "{}{}{}",
+                            self.sess.target.target.options.staticlib_prefix,
+                            file,
+                            self.sess.target.target.options.staticlib_suffix
+                        )
+                    });
+                if let Some(implib_name) = implib_name {
+                    let implib = out_filename.parent().map(|dir| dir.join(&implib_name));
+                    if let Some(implib) = implib {
+                        self.linker_arg(&format!("--out-implib={}", (*implib).to_str().unwrap()));
+                    }
+                }
+            }
+        }
+    }
+}
+
+impl<'a> Linker for GccLinker<'a> {
+    fn cmd(&mut self) -> &mut Command {
+        &mut self.cmd
+    }
+
+    fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path) {
+        match output_kind {
+            LinkOutputKind::DynamicNoPicExe => {
+                if !self.is_ld && self.sess.target.target.options.linker_is_gnu {
+                    self.cmd.arg("-no-pie");
+                }
+            }
+            LinkOutputKind::DynamicPicExe => {
+                // `-pie` works for both gcc wrapper and ld.
+                self.cmd.arg("-pie");
+            }
+            LinkOutputKind::StaticNoPicExe => {
+                // `-static` works for both gcc wrapper and ld.
+                self.cmd.arg("-static");
+                if !self.is_ld && self.sess.target.target.options.linker_is_gnu {
+                    self.cmd.arg("-no-pie");
+                }
+            }
+            LinkOutputKind::StaticPicExe => {
+                if !self.is_ld {
+                    // Note that combination `-static -pie` doesn't work as expected
+                    // for the gcc wrapper, `-static` in that case suppresses `-pie`.
+                    self.cmd.arg("-static-pie");
+                } else {
+                    // `--no-dynamic-linker` and `-z text` are not strictly necessary for producing
+                    // a static pie, but currently passed because gcc and clang pass them.
+                    // The former suppresses the `INTERP` ELF header specifying dynamic linker,
+                    // which is otherwise implicitly injected by ld (but not lld).
+                    // The latter doesn't change anything, only ensures that everything is pic.
+                    self.cmd.args(&["-static", "-pie", "--no-dynamic-linker", "-z", "text"]);
+                }
+            }
+            LinkOutputKind::DynamicDylib => self.build_dylib(out_filename),
+            LinkOutputKind::StaticDylib => {
+                self.cmd.arg("-static");
+                self.build_dylib(out_filename);
+            }
+        }
+        // VxWorks compiler driver introduced `--static-crt` flag specifically for rustc,
+        // it switches linking for libc and similar system libraries to static without using
+        // any `#[link]` attributes in the `libc` crate, see #72782 for details.
+        // FIXME: Switch to using `#[link]` attributes in the `libc` crate
+        // similarly to other targets.
+        if self.sess.target.target.target_os == "vxworks"
+            && matches!(
+                output_kind,
+                LinkOutputKind::StaticNoPicExe
+                    | LinkOutputKind::StaticPicExe
+                    | LinkOutputKind::StaticDylib
+            )
+        {
+            self.cmd.arg("--static-crt");
+        }
+    }
+
+    fn link_dylib(&mut self, lib: Symbol) {
+        self.hint_dynamic();
+        self.cmd.arg(format!("-l{}", lib));
+    }
+    fn link_staticlib(&mut self, lib: Symbol) {
+        self.hint_static();
+        self.cmd.arg(format!("-l{}", lib));
+    }
+    fn link_rlib(&mut self, lib: &Path) {
+        self.hint_static();
+        self.cmd.arg(lib);
+    }
+    fn include_path(&mut self, path: &Path) {
+        self.cmd.arg("-L").arg(path);
+    }
+    fn framework_path(&mut self, path: &Path) {
+        self.cmd.arg("-F").arg(path);
+    }
+    fn output_filename(&mut self, path: &Path) {
+        self.cmd.arg("-o").arg(path);
+    }
+    fn add_object(&mut self, path: &Path) {
+        self.cmd.arg(path);
+    }
+    fn full_relro(&mut self) {
+        self.linker_arg("-zrelro");
+        self.linker_arg("-znow");
+    }
+    fn partial_relro(&mut self) {
+        self.linker_arg("-zrelro");
+    }
+    fn no_relro(&mut self) {
+        self.linker_arg("-znorelro");
+    }
+
+    fn link_rust_dylib(&mut self, lib: Symbol, _path: &Path) {
+        self.hint_dynamic();
+        self.cmd.arg(format!("-l{}", lib));
+    }
+
+    fn link_framework(&mut self, framework: Symbol) {
+        self.hint_dynamic();
+        self.cmd.arg("-framework").sym_arg(framework);
+    }
+
+    // Here we explicitly ask that the entire archive is included into the
+    // result artifact. For more details see #15460, but the gist is that
+    // the linker will strip away any unused objects in the archive if we
+    // don't otherwise explicitly reference them. This can occur for
+    // libraries which are just providing bindings, libraries with generic
+    // functions, etc.
+    fn link_whole_staticlib(&mut self, lib: Symbol, search_path: &[PathBuf]) {
+        self.hint_static();
+        let target = &self.sess.target.target;
+        if !target.options.is_like_osx {
+            self.linker_arg("--whole-archive").cmd.arg(format!("-l{}", lib));
+            self.linker_arg("--no-whole-archive");
+        } else {
+            // -force_load is the macOS equivalent of --whole-archive, but it
+            // involves passing the full path to the library to link.
+            self.linker_arg("-force_load");
+            let lib = archive::find_library(lib, search_path, &self.sess);
+            self.linker_arg(&lib);
+        }
+    }
+
+    fn link_whole_rlib(&mut self, lib: &Path) {
+        self.hint_static();
+        if self.sess.target.target.options.is_like_osx {
+            self.linker_arg("-force_load");
+            self.linker_arg(&lib);
+        } else {
+            self.linker_arg("--whole-archive").cmd.arg(lib);
+            self.linker_arg("--no-whole-archive");
+        }
+    }
+
+    fn gc_sections(&mut self, keep_metadata: bool) {
+        // The dead_strip option to the linker specifies that functions and data
+        // unreachable by the entry point will be removed. This is quite useful
+        // with Rust's compilation model of compiling libraries at a time into
+        // one object file. For example, this brings hello world from 1.7MB to
+        // 458K.
+        //
+        // Note that this is done for both executables and dynamic libraries. We
+        // won't get much benefit from dylibs because LLVM will have already
+        // stripped away as much as it could. This has not been seen to impact
+        // link times negatively.
+        //
+        // -dead_strip can't be part of the pre_link_args because it's also used
+        // for partial linking when using multiple codegen units (-r).  So we
+        // insert it here.
+        if self.sess.target.target.options.is_like_osx {
+            self.linker_arg("-dead_strip");
+        } else if self.sess.target.target.options.is_like_solaris {
+            self.linker_arg("-zignore");
+
+        // If we're building a dylib, we don't use --gc-sections because LLVM
+        // has already done the best it can do, and we also don't want to
+        // eliminate the metadata. If we're building an executable, however,
+        // --gc-sections drops the size of hello world from 1.8MB to 597K, a 67%
+        // reduction.
+        } else if !keep_metadata {
+            self.linker_arg("--gc-sections");
+        }
+    }
+
+    fn optimize(&mut self) {
+        if !self.sess.target.target.options.linker_is_gnu {
+            return;
+        }
+
+        // GNU-style linkers support optimization with -O. GNU ld doesn't
+        // need a numeric argument, but other linkers do.
+        if self.sess.opts.optimize == config::OptLevel::Default
+            || self.sess.opts.optimize == config::OptLevel::Aggressive
+        {
+            self.linker_arg("-O1");
+        }
+    }
+
+    fn pgo_gen(&mut self) {
+        if !self.sess.target.target.options.linker_is_gnu {
+            return;
+        }
+
+        // If we're doing PGO generation stuff and on a GNU-like linker, use the
+        // "-u" flag to properly pull in the profiler runtime bits.
+        //
+        // This is because LLVM otherwise won't add the needed initialization
+        // for us on Linux (though the extra flag should be harmless if it
+        // does).
+        //
+        // See https://reviews.llvm.org/D14033 and https://reviews.llvm.org/D14030.
+        //
+        // Though it may be worth to try to revert those changes upstream, since
+        // the overhead of the initialization should be minor.
+        self.cmd.arg("-u");
+        self.cmd.arg("__llvm_profile_runtime");
+    }
+
+    fn control_flow_guard(&mut self) {}
+
+    fn debuginfo(&mut self, strip: Strip) {
+        match strip {
+            Strip::None => {}
+            Strip::Debuginfo => {
+                // MacOS linker does not support longhand argument --strip-debug
+                self.linker_arg("-S");
+            }
+            Strip::Symbols => {
+                // MacOS linker does not support longhand argument --strip-all
+                self.linker_arg("-s");
+            }
+        }
+    }
+
+    fn no_crt_objects(&mut self) {
+        if !self.is_ld {
+            self.cmd.arg("-nostartfiles");
+        }
+    }
+
+    fn no_default_libraries(&mut self) {
+        if !self.is_ld {
+            self.cmd.arg("-nodefaultlibs");
+        }
+    }
+
+    fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType) {
+        // Symbol visibility in object files typically takes care of this.
+        if crate_type == CrateType::Executable
+            && self.sess.target.target.options.override_export_symbols.is_none()
+        {
+            return;
+        }
+
+        // We manually create a list of exported symbols to ensure we don't expose any more.
+        // The object files have far more public symbols than we actually want to export,
+        // so we hide them all here.
+
+        if !self.sess.target.target.options.limit_rdylib_exports {
+            return;
+        }
+
+        if crate_type == CrateType::ProcMacro {
+            return;
+        }
+
+        let is_windows = self.sess.target.target.options.is_like_windows;
+        let mut arg = OsString::new();
+        let path = tmpdir.join(if is_windows { "list.def" } else { "list" });
+
+        debug!("EXPORTED SYMBOLS:");
+
+        if self.sess.target.target.options.is_like_osx {
+            // Write a plain, newline-separated list of symbols
+            let res: io::Result<()> = try {
+                let mut f = BufWriter::new(File::create(&path)?);
+                for sym in self.info.exports[&crate_type].iter() {
+                    debug!("  _{}", sym);
+                    writeln!(f, "_{}", sym)?;
+                }
+            };
+            if let Err(e) = res {
+                self.sess.fatal(&format!("failed to write lib.def file: {}", e));
+            }
+        } else if is_windows {
+            let res: io::Result<()> = try {
+                let mut f = BufWriter::new(File::create(&path)?);
+
+                // .def file similar to MSVC one but without LIBRARY section
+                // because LD doesn't like when it's empty
+                writeln!(f, "EXPORTS")?;
+                for symbol in self.info.exports[&crate_type].iter() {
+                    debug!("  _{}", symbol);
+                    writeln!(f, "  {}", symbol)?;
+                }
+            };
+            if let Err(e) = res {
+                self.sess.fatal(&format!("failed to write list.def file: {}", e));
+            }
+        } else {
+            // Write an LD version script
+            let res: io::Result<()> = try {
+                let mut f = BufWriter::new(File::create(&path)?);
+                writeln!(f, "{{")?;
+                if !self.info.exports[&crate_type].is_empty() {
+                    writeln!(f, "  global:")?;
+                    for sym in self.info.exports[&crate_type].iter() {
+                        debug!("    {};", sym);
+                        writeln!(f, "    {};", sym)?;
+                    }
+                }
+                writeln!(f, "\n  local:\n    *;\n}};")?;
+            };
+            if let Err(e) = res {
+                self.sess.fatal(&format!("failed to write version script: {}", e));
+            }
+        }
+
+        if self.sess.target.target.options.is_like_osx {
+            if !self.is_ld {
+                arg.push("-Wl,")
+            }
+            arg.push("-exported_symbols_list,");
+        } else if self.sess.target.target.options.is_like_solaris {
+            if !self.is_ld {
+                arg.push("-Wl,")
+            }
+            arg.push("-M,");
+        } else {
+            if !self.is_ld {
+                arg.push("-Wl,")
+            }
+            // Both LD and LLD accept export list in *.def file form, there are no flags required
+            if !is_windows {
+                arg.push("--version-script=")
+            }
+        }
+
+        arg.push(&path);
+        self.cmd.arg(arg);
+    }
+
+    fn subsystem(&mut self, subsystem: &str) {
+        self.linker_arg("--subsystem");
+        self.linker_arg(&subsystem);
+    }
+
+    fn finalize(&mut self) {
+        self.hint_dynamic(); // Reset to default before returning the composed command line.
+    }
+
+    fn group_start(&mut self) {
+        if self.takes_hints() {
+            self.linker_arg("--start-group");
+        }
+    }
+
+    fn group_end(&mut self) {
+        if self.takes_hints() {
+            self.linker_arg("--end-group");
+        }
+    }
+
+    fn linker_plugin_lto(&mut self) {
+        match self.sess.opts.cg.linker_plugin_lto {
+            LinkerPluginLto::Disabled => {
+                // Nothing to do
+            }
+            LinkerPluginLto::LinkerPluginAuto => {
+                self.push_linker_plugin_lto_args(None);
+            }
+            LinkerPluginLto::LinkerPlugin(ref path) => {
+                self.push_linker_plugin_lto_args(Some(path.as_os_str()));
+            }
+        }
+    }
+
+    // Add the `GNU_EH_FRAME` program header which is required to locate unwinding information.
+    // Some versions of `gcc` add it implicitly, some (e.g. `musl-gcc`) don't,
+    // so we just always add it.
+    fn add_eh_frame_header(&mut self) {
+        self.linker_arg("--eh-frame-hdr");
+    }
+}
+
+pub struct MsvcLinker<'a> {
+    cmd: Command,
+    sess: &'a Session,
+    info: &'a LinkerInfo,
+}
+
+impl<'a> Linker for MsvcLinker<'a> {
+    fn cmd(&mut self) -> &mut Command {
+        &mut self.cmd
+    }
+
+    fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path) {
+        match output_kind {
+            LinkOutputKind::DynamicNoPicExe
+            | LinkOutputKind::DynamicPicExe
+            | LinkOutputKind::StaticNoPicExe
+            | LinkOutputKind::StaticPicExe => {}
+            LinkOutputKind::DynamicDylib | LinkOutputKind::StaticDylib => {
+                self.cmd.arg("/DLL");
+                let mut arg: OsString = "/IMPLIB:".into();
+                arg.push(out_filename.with_extension("dll.lib"));
+                self.cmd.arg(arg);
+            }
+        }
+    }
+
+    fn link_rlib(&mut self, lib: &Path) {
+        self.cmd.arg(lib);
+    }
+    fn add_object(&mut self, path: &Path) {
+        self.cmd.arg(path);
+    }
+
+    fn gc_sections(&mut self, _keep_metadata: bool) {
+        // MSVC's ICF (Identical COMDAT Folding) link optimization is
+        // slow for Rust and thus we disable it by default when not in
+        // optimization build.
+        if self.sess.opts.optimize != config::OptLevel::No {
+            self.cmd.arg("/OPT:REF,ICF");
+        } else {
+            // It is necessary to specify NOICF here, because /OPT:REF
+            // implies ICF by default.
+            self.cmd.arg("/OPT:REF,NOICF");
+        }
+    }
+
+    fn link_dylib(&mut self, lib: Symbol) {
+        self.cmd.arg(&format!("{}.lib", lib));
+    }
+
+    fn link_rust_dylib(&mut self, lib: Symbol, path: &Path) {
+        // When producing a dll, the MSVC linker may not actually emit a
+        // `foo.lib` file if the dll doesn't actually export any symbols, so we
+        // check to see if the file is there and just omit linking to it if it's
+        // not present.
+        let name = format!("{}.dll.lib", lib);
+        if fs::metadata(&path.join(&name)).is_ok() {
+            self.cmd.arg(name);
+        }
+    }
+
+    fn link_staticlib(&mut self, lib: Symbol) {
+        self.cmd.arg(&format!("{}.lib", lib));
+    }
+
+    fn full_relro(&mut self) {
+        // noop
+    }
+
+    fn partial_relro(&mut self) {
+        // noop
+    }
+
+    fn no_relro(&mut self) {
+        // noop
+    }
+
+    fn no_crt_objects(&mut self) {
+        // noop
+    }
+
+    fn no_default_libraries(&mut self) {
+        self.cmd.arg("/NODEFAULTLIB");
+    }
+
+    fn include_path(&mut self, path: &Path) {
+        let mut arg = OsString::from("/LIBPATH:");
+        arg.push(path);
+        self.cmd.arg(&arg);
+    }
+
+    fn output_filename(&mut self, path: &Path) {
+        let mut arg = OsString::from("/OUT:");
+        arg.push(path);
+        self.cmd.arg(&arg);
+    }
+
+    fn framework_path(&mut self, _path: &Path) {
+        bug!("frameworks are not supported on windows")
+    }
+    fn link_framework(&mut self, _framework: Symbol) {
+        bug!("frameworks are not supported on windows")
+    }
+
+    fn link_whole_staticlib(&mut self, lib: Symbol, _search_path: &[PathBuf]) {
+        self.link_staticlib(lib);
+        self.cmd.arg(format!("/WHOLEARCHIVE:{}.lib", lib));
+    }
+    fn link_whole_rlib(&mut self, path: &Path) {
+        self.link_rlib(path);
+        let mut arg = OsString::from("/WHOLEARCHIVE:");
+        arg.push(path);
+        self.cmd.arg(arg);
+    }
+    fn optimize(&mut self) {
+        // Needs more investigation of `/OPT` arguments
+    }
+
+    fn pgo_gen(&mut self) {
+        // Nothing needed here.
+    }
+
+    fn control_flow_guard(&mut self) {
+        self.cmd.arg("/guard:cf");
+    }
+
+    fn debuginfo(&mut self, strip: Strip) {
+        match strip {
+            Strip::None => {
+                // This will cause the Microsoft linker to generate a PDB file
+                // from the CodeView line tables in the object files.
+                self.cmd.arg("/DEBUG");
+
+                // This will cause the Microsoft linker to embed .natvis info into the PDB file
+                let natvis_dir_path = self.sess.sysroot.join("lib\\rustlib\\etc");
+                if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) {
+                    for entry in natvis_dir {
+                        match entry {
+                            Ok(entry) => {
+                                let path = entry.path();
+                                if path.extension() == Some("natvis".as_ref()) {
+                                    let mut arg = OsString::from("/NATVIS:");
+                                    arg.push(path);
+                                    self.cmd.arg(arg);
+                                }
+                            }
+                            Err(err) => {
+                                self.sess
+                                    .warn(&format!("error enumerating natvis directory: {}", err));
+                            }
+                        }
+                    }
+                }
+            }
+            Strip::Debuginfo | Strip::Symbols => {
+                self.cmd.arg("/DEBUG:NONE");
+            }
+        }
+    }
+
+    // Currently the compiler doesn't use `dllexport` (an LLVM attribute) to
+    // export symbols from a dynamic library. When building a dynamic library,
+    // however, we're going to want some symbols exported, so this function
+    // generates a DEF file which lists all the symbols.
+    //
+    // The linker will read this `*.def` file and export all the symbols from
+    // the dynamic library. Note that this is not as simple as just exporting
+    // all the symbols in the current crate (as specified by `codegen.reachable`)
+    // but rather we also need to possibly export the symbols of upstream
+    // crates. Upstream rlibs may be linked statically to this dynamic library,
+    // in which case they may continue to transitively be used and hence need
+    // their symbols exported.
+    fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType) {
+        // Symbol visibility takes care of this typically
+        if crate_type == CrateType::Executable {
+            return;
+        }
+
+        let path = tmpdir.join("lib.def");
+        let res: io::Result<()> = try {
+            let mut f = BufWriter::new(File::create(&path)?);
+
+            // Start off with the standard module name header and then go
+            // straight to exports.
+            writeln!(f, "LIBRARY")?;
+            writeln!(f, "EXPORTS")?;
+            for symbol in self.info.exports[&crate_type].iter() {
+                debug!("  _{}", symbol);
+                writeln!(f, "  {}", symbol)?;
+            }
+        };
+        if let Err(e) = res {
+            self.sess.fatal(&format!("failed to write lib.def file: {}", e));
+        }
+        let mut arg = OsString::from("/DEF:");
+        arg.push(path);
+        self.cmd.arg(&arg);
+    }
+
+    fn subsystem(&mut self, subsystem: &str) {
+        // Note that previous passes of the compiler validated this subsystem,
+        // so we just blindly pass it to the linker.
+        self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem));
+
+        // Windows has two subsystems we're interested in right now, the console
+        // and windows subsystems. These both implicitly have different entry
+        // points (starting symbols). The console entry point starts with
+        // `mainCRTStartup` and the windows entry point starts with
+        // `WinMainCRTStartup`. These entry points, defined in system libraries,
+        // will then later probe for either `main` or `WinMain`, respectively to
+        // start the application.
+        //
+        // In Rust we just always generate a `main` function so we want control
+        // to always start there, so we force the entry point on the windows
+        // subsystem to be `mainCRTStartup` to get everything booted up
+        // correctly.
+        //
+        // For more information see RFC #1665
+        if subsystem == "windows" {
+            self.cmd.arg("/ENTRY:mainCRTStartup");
+        }
+    }
+
+    fn finalize(&mut self) {}
+
+    // MSVC doesn't need group indicators
+    fn group_start(&mut self) {}
+    fn group_end(&mut self) {}
+
+    fn linker_plugin_lto(&mut self) {
+        // Do nothing
+    }
+}
+
+pub struct EmLinker<'a> {
+    cmd: Command,
+    sess: &'a Session,
+    info: &'a LinkerInfo,
+}
+
+impl<'a> Linker for EmLinker<'a> {
+    fn cmd(&mut self) -> &mut Command {
+        &mut self.cmd
+    }
+
+    fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
+
+    fn include_path(&mut self, path: &Path) {
+        self.cmd.arg("-L").arg(path);
+    }
+
+    fn link_staticlib(&mut self, lib: Symbol) {
+        self.cmd.arg("-l").sym_arg(lib);
+    }
+
+    fn output_filename(&mut self, path: &Path) {
+        self.cmd.arg("-o").arg(path);
+    }
+
+    fn add_object(&mut self, path: &Path) {
+        self.cmd.arg(path);
+    }
+
+    fn link_dylib(&mut self, lib: Symbol) {
+        // Emscripten always links statically
+        self.link_staticlib(lib);
+    }
+
+    fn link_whole_staticlib(&mut self, lib: Symbol, _search_path: &[PathBuf]) {
+        // not supported?
+        self.link_staticlib(lib);
+    }
+
+    fn link_whole_rlib(&mut self, lib: &Path) {
+        // not supported?
+        self.link_rlib(lib);
+    }
+
+    fn link_rust_dylib(&mut self, lib: Symbol, _path: &Path) {
+        self.link_dylib(lib);
+    }
+
+    fn link_rlib(&mut self, lib: &Path) {
+        self.add_object(lib);
+    }
+
+    fn full_relro(&mut self) {
+        // noop
+    }
+
+    fn partial_relro(&mut self) {
+        // noop
+    }
+
+    fn no_relro(&mut self) {
+        // noop
+    }
+
+    fn framework_path(&mut self, _path: &Path) {
+        bug!("frameworks are not supported on Emscripten")
+    }
+
+    fn link_framework(&mut self, _framework: Symbol) {
+        bug!("frameworks are not supported on Emscripten")
+    }
+
+    fn gc_sections(&mut self, _keep_metadata: bool) {
+        // noop
+    }
+
+    fn optimize(&mut self) {
+        // Emscripten performs own optimizations
+        self.cmd.arg(match self.sess.opts.optimize {
+            OptLevel::No => "-O0",
+            OptLevel::Less => "-O1",
+            OptLevel::Default => "-O2",
+            OptLevel::Aggressive => "-O3",
+            OptLevel::Size => "-Os",
+            OptLevel::SizeMin => "-Oz",
+        });
+        // Unusable until https://github.com/rust-lang/rust/issues/38454 is resolved
+        self.cmd.args(&["--memory-init-file", "0"]);
+    }
+
+    fn pgo_gen(&mut self) {
+        // noop, but maybe we need something like the gnu linker?
+    }
+
+    fn control_flow_guard(&mut self) {}
+
+    fn debuginfo(&mut self, _strip: Strip) {
+        // Preserve names or generate source maps depending on debug info
+        self.cmd.arg(match self.sess.opts.debuginfo {
+            DebugInfo::None => "-g0",
+            DebugInfo::Limited => "-g3",
+            DebugInfo::Full => "-g4",
+        });
+    }
+
+    fn no_crt_objects(&mut self) {}
+
+    fn no_default_libraries(&mut self) {
+        self.cmd.args(&["-s", "DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=[]"]);
+    }
+
+    fn export_symbols(&mut self, _tmpdir: &Path, crate_type: CrateType) {
+        let symbols = &self.info.exports[&crate_type];
+
+        debug!("EXPORTED SYMBOLS:");
+
+        self.cmd.arg("-s");
+
+        let mut arg = OsString::from("EXPORTED_FUNCTIONS=");
+        let mut encoded = String::new();
+
+        {
+            let mut encoder = json::Encoder::new(&mut encoded);
+            let res = encoder.emit_seq(symbols.len(), |encoder| {
+                for (i, sym) in symbols.iter().enumerate() {
+                    encoder.emit_seq_elt(i, |encoder| encoder.emit_str(&("_".to_owned() + sym)))?;
+                }
+                Ok(())
+            });
+            if let Err(e) = res {
+                self.sess.fatal(&format!("failed to encode exported symbols: {}", e));
+            }
+        }
+        debug!("{}", encoded);
+        arg.push(encoded);
+
+        self.cmd.arg(arg);
+    }
+
+    fn subsystem(&mut self, _subsystem: &str) {
+        // noop
+    }
+
+    fn finalize(&mut self) {}
+
+    // Appears not necessary on Emscripten
+    fn group_start(&mut self) {}
+    fn group_end(&mut self) {}
+
+    fn linker_plugin_lto(&mut self) {
+        // Do nothing
+    }
+}
+
+pub struct WasmLd<'a> {
+    cmd: Command,
+    sess: &'a Session,
+    info: &'a LinkerInfo,
+}
+
+impl<'a> WasmLd<'a> {
+    fn new(mut cmd: Command, sess: &'a Session, info: &'a LinkerInfo) -> WasmLd<'a> {
+        // If the atomics feature is enabled for wasm then we need a whole bunch
+        // of flags:
+        //
+        // * `--shared-memory` - the link won't even succeed without this, flags
+        //   the one linear memory as `shared`
+        //
+        // * `--max-memory=1G` - when specifying a shared memory this must also
+        //   be specified. We conservatively choose 1GB but users should be able
+        //   to override this with `-C link-arg`.
+        //
+        // * `--import-memory` - it doesn't make much sense for memory to be
+        //   exported in a threaded module because typically you're
+        //   sharing memory and instantiating the module multiple times. As a
+        //   result if it were exported then we'd just have no sharing.
+        //
+        // * `--export=__wasm_init_memory` - when using `--passive-segments` the
+        //   linker will synthesize this function, and so we need to make sure
+        //   that our usage of `--export` below won't accidentally cause this
+        //   function to get deleted.
+        //
+        // * `--export=*tls*` - when `#[thread_local]` symbols are used these
+        //   symbols are how the TLS segments are initialized and configured.
+        if sess.target_features.contains(&sym::atomics) {
+            cmd.arg("--shared-memory");
+            cmd.arg("--max-memory=1073741824");
+            cmd.arg("--import-memory");
+            cmd.arg("--export=__wasm_init_memory");
+            cmd.arg("--export=__wasm_init_tls");
+            cmd.arg("--export=__tls_size");
+            cmd.arg("--export=__tls_align");
+            cmd.arg("--export=__tls_base");
+        }
+        WasmLd { cmd, sess, info }
+    }
+}
+
+impl<'a> Linker for WasmLd<'a> {
+    fn cmd(&mut self) -> &mut Command {
+        &mut self.cmd
+    }
+
+    fn set_output_kind(&mut self, output_kind: LinkOutputKind, _out_filename: &Path) {
+        match output_kind {
+            LinkOutputKind::DynamicNoPicExe
+            | LinkOutputKind::DynamicPicExe
+            | LinkOutputKind::StaticNoPicExe
+            | LinkOutputKind::StaticPicExe => {}
+            LinkOutputKind::DynamicDylib | LinkOutputKind::StaticDylib => {
+                self.cmd.arg("--no-entry");
+            }
+        }
+    }
+
+    fn link_dylib(&mut self, lib: Symbol) {
+        self.cmd.arg("-l").sym_arg(lib);
+    }
+
+    fn link_staticlib(&mut self, lib: Symbol) {
+        self.cmd.arg("-l").sym_arg(lib);
+    }
+
+    fn link_rlib(&mut self, lib: &Path) {
+        self.cmd.arg(lib);
+    }
+
+    fn include_path(&mut self, path: &Path) {
+        self.cmd.arg("-L").arg(path);
+    }
+
+    fn framework_path(&mut self, _path: &Path) {
+        panic!("frameworks not supported")
+    }
+
+    fn output_filename(&mut self, path: &Path) {
+        self.cmd.arg("-o").arg(path);
+    }
+
+    fn add_object(&mut self, path: &Path) {
+        self.cmd.arg(path);
+    }
+
+    fn full_relro(&mut self) {}
+
+    fn partial_relro(&mut self) {}
+
+    fn no_relro(&mut self) {}
+
+    fn link_rust_dylib(&mut self, lib: Symbol, _path: &Path) {
+        self.cmd.arg("-l").sym_arg(lib);
+    }
+
+    fn link_framework(&mut self, _framework: Symbol) {
+        panic!("frameworks not supported")
+    }
+
+    fn link_whole_staticlib(&mut self, lib: Symbol, _search_path: &[PathBuf]) {
+        self.cmd.arg("-l").sym_arg(lib);
+    }
+
+    fn link_whole_rlib(&mut self, lib: &Path) {
+        self.cmd.arg(lib);
+    }
+
+    fn gc_sections(&mut self, _keep_metadata: bool) {
+        self.cmd.arg("--gc-sections");
+    }
+
+    fn optimize(&mut self) {
+        self.cmd.arg(match self.sess.opts.optimize {
+            OptLevel::No => "-O0",
+            OptLevel::Less => "-O1",
+            OptLevel::Default => "-O2",
+            OptLevel::Aggressive => "-O3",
+            // Currently LLD doesn't support `Os` and `Oz`, so pass through `O2`
+            // instead.
+            OptLevel::Size => "-O2",
+            OptLevel::SizeMin => "-O2",
+        });
+    }
+
+    fn pgo_gen(&mut self) {}
+
+    fn debuginfo(&mut self, strip: Strip) {
+        match strip {
+            Strip::None => {}
+            Strip::Debuginfo => {
+                self.cmd.arg("--strip-debug");
+            }
+            Strip::Symbols => {
+                self.cmd.arg("--strip-all");
+            }
+        }
+    }
+
+    fn control_flow_guard(&mut self) {}
+
+    fn no_crt_objects(&mut self) {}
+
+    fn no_default_libraries(&mut self) {}
+
+    fn export_symbols(&mut self, _tmpdir: &Path, crate_type: CrateType) {
+        for sym in self.info.exports[&crate_type].iter() {
+            self.cmd.arg("--export").arg(&sym);
+        }
+
+        // LLD will hide these otherwise-internal symbols since it only exports
+        // symbols explicity passed via the `--export` flags above and hides all
+        // others. Various bits and pieces of tooling use this, so be sure these
+        // symbols make their way out of the linker as well.
+        self.cmd.arg("--export=__heap_base");
+        self.cmd.arg("--export=__data_end");
+    }
+
+    fn subsystem(&mut self, _subsystem: &str) {}
+
+    fn finalize(&mut self) {}
+
+    // Not needed for now with LLD
+    fn group_start(&mut self) {}
+    fn group_end(&mut self) {}
+
+    fn linker_plugin_lto(&mut self) {
+        // Do nothing for now
+    }
+}
+
+fn exported_symbols(tcx: TyCtxt<'_>, crate_type: CrateType) -> Vec<String> {
+    if let Some(ref exports) = tcx.sess.target.target.options.override_export_symbols {
+        return exports.clone();
+    }
+
+    let mut symbols = Vec::new();
+
+    let export_threshold = symbol_export::crates_export_threshold(&[crate_type]);
+    for &(symbol, level) in tcx.exported_symbols(LOCAL_CRATE).iter() {
+        if level.is_below_threshold(export_threshold) {
+            symbols.push(symbol_export::symbol_name_for_instance_in_crate(
+                tcx,
+                symbol,
+                LOCAL_CRATE,
+            ));
+        }
+    }
+
+    let formats = tcx.dependency_formats(LOCAL_CRATE);
+    let deps = formats.iter().find_map(|(t, list)| (*t == crate_type).then_some(list)).unwrap();
+
+    for (index, dep_format) in deps.iter().enumerate() {
+        let cnum = CrateNum::new(index + 1);
+        // For each dependency that we are linking to statically ...
+        if *dep_format == Linkage::Static {
+            // ... we add its symbol list to our export list.
+            for &(symbol, level) in tcx.exported_symbols(cnum).iter() {
+                if !level.is_below_threshold(export_threshold) {
+                    continue;
+                }
+
+                symbols.push(symbol_export::symbol_name_for_instance_in_crate(tcx, symbol, cnum));
+            }
+        }
+    }
+
+    symbols
+}
+
+/// Much simplified and explicit CLI for the NVPTX linker. The linker operates
+/// with bitcode and uses LLVM backend to generate a PTX assembly.
+pub struct PtxLinker<'a> {
+    cmd: Command,
+    sess: &'a Session,
+}
+
+impl<'a> Linker for PtxLinker<'a> {
+    fn cmd(&mut self) -> &mut Command {
+        &mut self.cmd
+    }
+
+    fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
+
+    fn link_rlib(&mut self, path: &Path) {
+        self.cmd.arg("--rlib").arg(path);
+    }
+
+    fn link_whole_rlib(&mut self, path: &Path) {
+        self.cmd.arg("--rlib").arg(path);
+    }
+
+    fn include_path(&mut self, path: &Path) {
+        self.cmd.arg("-L").arg(path);
+    }
+
+    fn debuginfo(&mut self, _strip: Strip) {
+        self.cmd.arg("--debug");
+    }
+
+    fn add_object(&mut self, path: &Path) {
+        self.cmd.arg("--bitcode").arg(path);
+    }
+
+    fn optimize(&mut self) {
+        match self.sess.lto() {
+            Lto::Thin | Lto::Fat | Lto::ThinLocal => {
+                self.cmd.arg("-Olto");
+            }
+
+            Lto::No => {}
+        };
+    }
+
+    fn output_filename(&mut self, path: &Path) {
+        self.cmd.arg("-o").arg(path);
+    }
+
+    fn finalize(&mut self) {
+        // Provide the linker with fallback to internal `target-cpu`.
+        self.cmd.arg("--fallback-arch").arg(match self.sess.opts.cg.target_cpu {
+            Some(ref s) => s,
+            None => &self.sess.target.target.options.cpu,
+        });
+    }
+
+    fn link_dylib(&mut self, _lib: Symbol) {
+        panic!("external dylibs not supported")
+    }
+
+    fn link_rust_dylib(&mut self, _lib: Symbol, _path: &Path) {
+        panic!("external dylibs not supported")
+    }
+
+    fn link_staticlib(&mut self, _lib: Symbol) {
+        panic!("staticlibs not supported")
+    }
+
+    fn link_whole_staticlib(&mut self, _lib: Symbol, _search_path: &[PathBuf]) {
+        panic!("staticlibs not supported")
+    }
+
+    fn framework_path(&mut self, _path: &Path) {
+        panic!("frameworks not supported")
+    }
+
+    fn link_framework(&mut self, _framework: Symbol) {
+        panic!("frameworks not supported")
+    }
+
+    fn full_relro(&mut self) {}
+
+    fn partial_relro(&mut self) {}
+
+    fn no_relro(&mut self) {}
+
+    fn gc_sections(&mut self, _keep_metadata: bool) {}
+
+    fn pgo_gen(&mut self) {}
+
+    fn no_crt_objects(&mut self) {}
+
+    fn no_default_libraries(&mut self) {}
+
+    fn control_flow_guard(&mut self) {}
+
+    fn export_symbols(&mut self, _tmpdir: &Path, _crate_type: CrateType) {}
+
+    fn subsystem(&mut self, _subsystem: &str) {}
+
+    fn group_start(&mut self) {}
+
+    fn group_end(&mut self) {}
+
+    fn linker_plugin_lto(&mut self) {}
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/lto.rs b/compiler/rustc_codegen_ssa/src/back/lto.rs
new file mode 100644
index 00000000000..0d7f4447696
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/lto.rs
@@ -0,0 +1,107 @@
+use super::write::CodegenContext;
+use crate::traits::*;
+use crate::ModuleCodegen;
+
+use rustc_errors::FatalError;
+
+use std::ffi::CString;
+use std::sync::Arc;
+
+pub struct ThinModule<B: WriteBackendMethods> {
+    pub shared: Arc<ThinShared<B>>,
+    pub idx: usize,
+}
+
+impl<B: WriteBackendMethods> ThinModule<B> {
+    pub fn name(&self) -> &str {
+        self.shared.module_names[self.idx].to_str().unwrap()
+    }
+
+    pub fn cost(&self) -> u64 {
+        // Yes, that's correct, we're using the size of the bytecode as an
+        // indicator for how costly this codegen unit is.
+        self.data().len() as u64
+    }
+
+    pub fn data(&self) -> &[u8] {
+        let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data());
+        a.unwrap_or_else(|| {
+            let len = self.shared.thin_buffers.len();
+            self.shared.serialized_modules[self.idx - len].data()
+        })
+    }
+}
+
+pub struct ThinShared<B: WriteBackendMethods> {
+    pub data: B::ThinData,
+    pub thin_buffers: Vec<B::ThinBuffer>,
+    pub serialized_modules: Vec<SerializedModule<B::ModuleBuffer>>,
+    pub module_names: Vec<CString>,
+}
+
+pub enum LtoModuleCodegen<B: WriteBackendMethods> {
+    Fat {
+        module: Option<ModuleCodegen<B::Module>>,
+        _serialized_bitcode: Vec<SerializedModule<B::ModuleBuffer>>,
+    },
+
+    Thin(ThinModule<B>),
+}
+
+impl<B: WriteBackendMethods> LtoModuleCodegen<B> {
+    pub fn name(&self) -> &str {
+        match *self {
+            LtoModuleCodegen::Fat { .. } => "everything",
+            LtoModuleCodegen::Thin(ref m) => m.name(),
+        }
+    }
+
+    /// Optimize this module within the given codegen context.
+    ///
+    /// This function is unsafe as it'll return a `ModuleCodegen` still
+    /// points to LLVM data structures owned by this `LtoModuleCodegen`.
+    /// It's intended that the module returned is immediately code generated and
+    /// dropped, and then this LTO module is dropped.
+    pub unsafe fn optimize(
+        &mut self,
+        cgcx: &CodegenContext<B>,
+    ) -> Result<ModuleCodegen<B::Module>, FatalError> {
+        match *self {
+            LtoModuleCodegen::Fat { ref mut module, .. } => {
+                let module = module.take().unwrap();
+                {
+                    let config = cgcx.config(module.kind);
+                    B::run_lto_pass_manager(cgcx, &module, config, false);
+                }
+                Ok(module)
+            }
+            LtoModuleCodegen::Thin(ref mut thin) => B::optimize_thin(cgcx, thin),
+        }
+    }
+
+    /// A "gauge" of how costly it is to optimize this module, used to sort
+    /// biggest modules first.
+    pub fn cost(&self) -> u64 {
+        match *self {
+            // Only one module with fat LTO, so the cost doesn't matter.
+            LtoModuleCodegen::Fat { .. } => 0,
+            LtoModuleCodegen::Thin(ref m) => m.cost(),
+        }
+    }
+}
+
+pub enum SerializedModule<M: ModuleBufferMethods> {
+    Local(M),
+    FromRlib(Vec<u8>),
+    FromUncompressedFile(memmap::Mmap),
+}
+
+impl<M: ModuleBufferMethods> SerializedModule<M> {
+    pub fn data(&self) -> &[u8] {
+        match *self {
+            SerializedModule::Local(ref m) => m.data(),
+            SerializedModule::FromRlib(ref m) => m,
+            SerializedModule::FromUncompressedFile(ref m) => m,
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/mod.rs b/compiler/rustc_codegen_ssa/src/back/mod.rs
new file mode 100644
index 00000000000..20ca503d43f
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/mod.rs
@@ -0,0 +1,8 @@
+pub mod archive;
+pub mod command;
+pub mod link;
+pub mod linker;
+pub mod lto;
+pub mod rpath;
+pub mod symbol_export;
+pub mod write;
diff --git a/compiler/rustc_codegen_ssa/src/back/rpath.rs b/compiler/rustc_codegen_ssa/src/back/rpath.rs
new file mode 100644
index 00000000000..005d2efdd3b
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/rpath.rs
@@ -0,0 +1,135 @@
+use pathdiff::diff_paths;
+use rustc_data_structures::fx::FxHashSet;
+use std::env;
+use std::fs;
+use std::path::{Path, PathBuf};
+
+use rustc_hir::def_id::CrateNum;
+use rustc_middle::middle::cstore::LibSource;
+
+pub struct RPathConfig<'a> {
+    pub used_crates: &'a [(CrateNum, LibSource)],
+    pub out_filename: PathBuf,
+    pub is_like_osx: bool,
+    pub has_rpath: bool,
+    pub linker_is_gnu: bool,
+    pub get_install_prefix_lib_path: &'a mut dyn FnMut() -> PathBuf,
+}
+
+pub fn get_rpath_flags(config: &mut RPathConfig<'_>) -> Vec<String> {
+    // No rpath on windows
+    if !config.has_rpath {
+        return Vec::new();
+    }
+
+    debug!("preparing the RPATH!");
+
+    let libs = config.used_crates.clone();
+    let libs = libs.iter().filter_map(|&(_, ref l)| l.option()).collect::<Vec<_>>();
+    let rpaths = get_rpaths(config, &libs);
+    let mut flags = rpaths_to_flags(&rpaths);
+
+    // Use DT_RUNPATH instead of DT_RPATH if available
+    if config.linker_is_gnu {
+        flags.push("-Wl,--enable-new-dtags".to_owned());
+    }
+
+    flags
+}
+
+fn rpaths_to_flags(rpaths: &[String]) -> Vec<String> {
+    let mut ret = Vec::with_capacity(rpaths.len()); // the minimum needed capacity
+
+    for rpath in rpaths {
+        if rpath.contains(',') {
+            ret.push("-Wl,-rpath".into());
+            ret.push("-Xlinker".into());
+            ret.push(rpath.clone());
+        } else {
+            ret.push(format!("-Wl,-rpath,{}", &(*rpath)));
+        }
+    }
+
+    ret
+}
+
+fn get_rpaths(config: &mut RPathConfig<'_>, libs: &[PathBuf]) -> Vec<String> {
+    debug!("output: {:?}", config.out_filename.display());
+    debug!("libs:");
+    for libpath in libs {
+        debug!("    {:?}", libpath.display());
+    }
+
+    // Use relative paths to the libraries. Binaries can be moved
+    // as long as they maintain the relative relationship to the
+    // crates they depend on.
+    let rel_rpaths = get_rpaths_relative_to_output(config, libs);
+
+    // And a final backup rpath to the global library location.
+    let fallback_rpaths = vec![get_install_prefix_rpath(config)];
+
+    fn log_rpaths(desc: &str, rpaths: &[String]) {
+        debug!("{} rpaths:", desc);
+        for rpath in rpaths {
+            debug!("    {}", *rpath);
+        }
+    }
+
+    log_rpaths("relative", &rel_rpaths);
+    log_rpaths("fallback", &fallback_rpaths);
+
+    let mut rpaths = rel_rpaths;
+    rpaths.extend_from_slice(&fallback_rpaths);
+
+    // Remove duplicates
+    minimize_rpaths(&rpaths)
+}
+
+fn get_rpaths_relative_to_output(config: &mut RPathConfig<'_>, libs: &[PathBuf]) -> Vec<String> {
+    libs.iter().map(|a| get_rpath_relative_to_output(config, a)).collect()
+}
+
+fn get_rpath_relative_to_output(config: &mut RPathConfig<'_>, lib: &Path) -> String {
+    // Mac doesn't appear to support $ORIGIN
+    let prefix = if config.is_like_osx { "@loader_path" } else { "$ORIGIN" };
+
+    let cwd = env::current_dir().unwrap();
+    let mut lib = fs::canonicalize(&cwd.join(lib)).unwrap_or_else(|_| cwd.join(lib));
+    lib.pop(); // strip filename
+    let mut output = cwd.join(&config.out_filename);
+    output.pop(); // strip filename
+    let output = fs::canonicalize(&output).unwrap_or(output);
+    let relative = path_relative_from(&lib, &output)
+        .unwrap_or_else(|| panic!("couldn't create relative path from {:?} to {:?}", output, lib));
+    // FIXME (#9639): This needs to handle non-utf8 paths
+    format!("{}/{}", prefix, relative.to_str().expect("non-utf8 component in path"))
+}
+
+// This routine is adapted from the *old* Path's `path_relative_from`
+// function, which works differently from the new `relative_from` function.
+// In particular, this handles the case on unix where both paths are
+// absolute but with only the root as the common directory.
+fn path_relative_from(path: &Path, base: &Path) -> Option<PathBuf> {
+    diff_paths(path, base)
+}
+
+fn get_install_prefix_rpath(config: &mut RPathConfig<'_>) -> String {
+    let path = (config.get_install_prefix_lib_path)();
+    let path = env::current_dir().unwrap().join(&path);
+    // FIXME (#9639): This needs to handle non-utf8 paths
+    path.to_str().expect("non-utf8 component in rpath").to_owned()
+}
+
+fn minimize_rpaths(rpaths: &[String]) -> Vec<String> {
+    let mut set = FxHashSet::default();
+    let mut minimized = Vec::new();
+    for rpath in rpaths {
+        if set.insert(rpath) {
+            minimized.push(rpath.clone());
+        }
+    }
+    minimized
+}
+
+#[cfg(all(unix, test))]
+mod tests;
diff --git a/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs b/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs
new file mode 100644
index 00000000000..35836ae719b
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs
@@ -0,0 +1,74 @@
+use super::RPathConfig;
+use super::{get_rpath_relative_to_output, minimize_rpaths, rpaths_to_flags};
+use std::path::{Path, PathBuf};
+
+#[test]
+fn test_rpaths_to_flags() {
+    let flags = rpaths_to_flags(&["path1".to_string(), "path2".to_string()]);
+    assert_eq!(flags, ["-Wl,-rpath,path1", "-Wl,-rpath,path2"]);
+}
+
+#[test]
+fn test_minimize1() {
+    let res = minimize_rpaths(&["rpath1".to_string(), "rpath2".to_string(), "rpath1".to_string()]);
+    assert!(res == ["rpath1", "rpath2",]);
+}
+
+#[test]
+fn test_minimize2() {
+    let res = minimize_rpaths(&[
+        "1a".to_string(),
+        "2".to_string(),
+        "2".to_string(),
+        "1a".to_string(),
+        "4a".to_string(),
+        "1a".to_string(),
+        "2".to_string(),
+        "3".to_string(),
+        "4a".to_string(),
+        "3".to_string(),
+    ]);
+    assert!(res == ["1a", "2", "4a", "3",]);
+}
+
+#[test]
+fn test_rpath_relative() {
+    if cfg!(target_os = "macos") {
+        let config = &mut RPathConfig {
+            used_crates: &[],
+            has_rpath: true,
+            is_like_osx: true,
+            linker_is_gnu: false,
+            out_filename: PathBuf::from("bin/rustc"),
+            get_install_prefix_lib_path: &mut || panic!(),
+        };
+        let res = get_rpath_relative_to_output(config, Path::new("lib/libstd.so"));
+        assert_eq!(res, "@loader_path/../lib");
+    } else {
+        let config = &mut RPathConfig {
+            used_crates: &[],
+            out_filename: PathBuf::from("bin/rustc"),
+            get_install_prefix_lib_path: &mut || panic!(),
+            has_rpath: true,
+            is_like_osx: false,
+            linker_is_gnu: true,
+        };
+        let res = get_rpath_relative_to_output(config, Path::new("lib/libstd.so"));
+        assert_eq!(res, "$ORIGIN/../lib");
+    }
+}
+
+#[test]
+fn test_xlinker() {
+    let args = rpaths_to_flags(&["a/normal/path".to_string(), "a,comma,path".to_string()]);
+
+    assert_eq!(
+        args,
+        vec![
+            "-Wl,-rpath,a/normal/path".to_string(),
+            "-Wl,-rpath".to_string(),
+            "-Xlinker".to_string(),
+            "a,comma,path".to_string()
+        ]
+    );
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
new file mode 100644
index 00000000000..51cc1ada432
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
@@ -0,0 +1,446 @@
+use std::collections::hash_map::Entry::*;
+
+use rustc_ast::expand::allocator::ALLOCATOR_METHODS;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, CRATE_DEF_INDEX, LOCAL_CRATE};
+use rustc_hir::Node;
+use rustc_index::vec::IndexVec;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::middle::exported_symbols::{
+    metadata_symbol_name, ExportedSymbol, SymbolExportLevel,
+};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::Instance;
+use rustc_middle::ty::{SymbolName, TyCtxt};
+use rustc_session::config::{CrateType, SanitizerSet};
+
+pub fn threshold(tcx: TyCtxt<'_>) -> SymbolExportLevel {
+    crates_export_threshold(&tcx.sess.crate_types())
+}
+
+fn crate_export_threshold(crate_type: CrateType) -> SymbolExportLevel {
+    match crate_type {
+        CrateType::Executable | CrateType::Staticlib | CrateType::ProcMacro | CrateType::Cdylib => {
+            SymbolExportLevel::C
+        }
+        CrateType::Rlib | CrateType::Dylib => SymbolExportLevel::Rust,
+    }
+}
+
+pub fn crates_export_threshold(crate_types: &[CrateType]) -> SymbolExportLevel {
+    if crate_types
+        .iter()
+        .any(|&crate_type| crate_export_threshold(crate_type) == SymbolExportLevel::Rust)
+    {
+        SymbolExportLevel::Rust
+    } else {
+        SymbolExportLevel::C
+    }
+}
+
+fn reachable_non_generics_provider(tcx: TyCtxt<'_>, cnum: CrateNum) -> DefIdMap<SymbolExportLevel> {
+    assert_eq!(cnum, LOCAL_CRATE);
+
+    if !tcx.sess.opts.output_types.should_codegen() {
+        return Default::default();
+    }
+
+    // Check to see if this crate is a "special runtime crate". These
+    // crates, implementation details of the standard library, typically
+    // have a bunch of `pub extern` and `#[no_mangle]` functions as the
+    // ABI between them. We don't want their symbols to have a `C`
+    // export level, however, as they're just implementation details.
+    // Down below we'll hardwire all of the symbols to the `Rust` export
+    // level instead.
+    let special_runtime_crate =
+        tcx.is_panic_runtime(LOCAL_CRATE) || tcx.is_compiler_builtins(LOCAL_CRATE);
+
+    let mut reachable_non_generics: DefIdMap<_> = tcx
+        .reachable_set(LOCAL_CRATE)
+        .iter()
+        .filter_map(|&def_id| {
+            // We want to ignore some FFI functions that are not exposed from
+            // this crate. Reachable FFI functions can be lumped into two
+            // categories:
+            //
+            // 1. Those that are included statically via a static library
+            // 2. Those included otherwise (e.g., dynamically or via a framework)
+            //
+            // Although our LLVM module is not literally emitting code for the
+            // statically included symbols, it's an export of our library which
+            // needs to be passed on to the linker and encoded in the metadata.
+            //
+            // As a result, if this id is an FFI item (foreign item) then we only
+            // let it through if it's included statically.
+            match tcx.hir().get(tcx.hir().local_def_id_to_hir_id(def_id)) {
+                Node::ForeignItem(..) => {
+                    tcx.is_statically_included_foreign_item(def_id).then_some(def_id)
+                }
+
+                // Only consider nodes that actually have exported symbols.
+                Node::Item(&hir::Item {
+                    kind: hir::ItemKind::Static(..) | hir::ItemKind::Fn(..),
+                    ..
+                })
+                | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(..), .. }) => {
+                    let generics = tcx.generics_of(def_id);
+                    if !generics.requires_monomorphization(tcx)
+                        // Functions marked with #[inline] are codegened with "internal"
+                        // linkage and are not exported unless marked with an extern
+                        // inidicator
+                        && (!Instance::mono(tcx, def_id.to_def_id()).def.generates_cgu_internal_copy(tcx)
+                            || tcx.codegen_fn_attrs(def_id.to_def_id()).contains_extern_indicator())
+                    {
+                        Some(def_id)
+                    } else {
+                        None
+                    }
+                }
+
+                _ => None,
+            }
+        })
+        .map(|def_id| {
+            let export_level = if special_runtime_crate {
+                let name = tcx.symbol_name(Instance::mono(tcx, def_id.to_def_id())).name;
+                // We can probably do better here by just ensuring that
+                // it has hidden visibility rather than public
+                // visibility, as this is primarily here to ensure it's
+                // not stripped during LTO.
+                //
+                // In general though we won't link right if these
+                // symbols are stripped, and LTO currently strips them.
+                match name {
+                    "rust_eh_personality"
+                    | "rust_eh_register_frames"
+                    | "rust_eh_unregister_frames" =>
+                        SymbolExportLevel::C,
+                    _ => SymbolExportLevel::Rust,
+                }
+            } else {
+                symbol_export_level(tcx, def_id.to_def_id())
+            };
+            debug!(
+                "EXPORTED SYMBOL (local): {} ({:?})",
+                tcx.symbol_name(Instance::mono(tcx, def_id.to_def_id())),
+                export_level
+            );
+            (def_id.to_def_id(), export_level)
+        })
+        .collect();
+
+    if let Some(id) = tcx.proc_macro_decls_static(LOCAL_CRATE) {
+        reachable_non_generics.insert(id, SymbolExportLevel::C);
+    }
+
+    if let Some(id) = tcx.plugin_registrar_fn(LOCAL_CRATE) {
+        reachable_non_generics.insert(id, SymbolExportLevel::C);
+    }
+
+    reachable_non_generics
+}
+
+fn is_reachable_non_generic_provider_local(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    let export_threshold = threshold(tcx);
+
+    if let Some(&level) = tcx.reachable_non_generics(def_id.krate).get(&def_id) {
+        level.is_below_threshold(export_threshold)
+    } else {
+        false
+    }
+}
+
+fn is_reachable_non_generic_provider_extern(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    tcx.reachable_non_generics(def_id.krate).contains_key(&def_id)
+}
+
+fn exported_symbols_provider_local(
+    tcx: TyCtxt<'tcx>,
+    cnum: CrateNum,
+) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportLevel)] {
+    assert_eq!(cnum, LOCAL_CRATE);
+
+    if !tcx.sess.opts.output_types.should_codegen() {
+        return &[];
+    }
+
+    let mut symbols: Vec<_> = tcx
+        .reachable_non_generics(LOCAL_CRATE)
+        .iter()
+        .map(|(&def_id, &level)| (ExportedSymbol::NonGeneric(def_id), level))
+        .collect();
+
+    if tcx.entry_fn(LOCAL_CRATE).is_some() {
+        let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, "main"));
+
+        symbols.push((exported_symbol, SymbolExportLevel::C));
+    }
+
+    if tcx.allocator_kind().is_some() {
+        for method in ALLOCATOR_METHODS {
+            let symbol_name = format!("__rust_{}", method.name);
+            let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, &symbol_name));
+
+            symbols.push((exported_symbol, SymbolExportLevel::Rust));
+        }
+    }
+
+    if tcx.sess.opts.debugging_opts.instrument_coverage
+        || tcx.sess.opts.cg.profile_generate.enabled()
+    {
+        // These are weak symbols that point to the profile version and the
+        // profile name, which need to be treated as exported so LTO doesn't nix
+        // them.
+        const PROFILER_WEAK_SYMBOLS: [&str; 2] =
+            ["__llvm_profile_raw_version", "__llvm_profile_filename"];
+
+        symbols.extend(PROFILER_WEAK_SYMBOLS.iter().map(|sym| {
+            let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, sym));
+            (exported_symbol, SymbolExportLevel::C)
+        }));
+    }
+
+    if tcx.sess.opts.debugging_opts.sanitizer.contains(SanitizerSet::MEMORY) {
+        // Similar to profiling, preserve weak msan symbol during LTO.
+        const MSAN_WEAK_SYMBOLS: [&str; 2] = ["__msan_track_origins", "__msan_keep_going"];
+
+        symbols.extend(MSAN_WEAK_SYMBOLS.iter().map(|sym| {
+            let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, sym));
+            (exported_symbol, SymbolExportLevel::C)
+        }));
+    }
+
+    if tcx.sess.crate_types().contains(&CrateType::Dylib) {
+        let symbol_name = metadata_symbol_name(tcx);
+        let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, &symbol_name));
+
+        symbols.push((exported_symbol, SymbolExportLevel::Rust));
+    }
+
+    if tcx.sess.opts.share_generics() && tcx.local_crate_exports_generics() {
+        use rustc_middle::mir::mono::{Linkage, MonoItem, Visibility};
+        use rustc_middle::ty::InstanceDef;
+
+        // Normally, we require that shared monomorphizations are not hidden,
+        // because if we want to re-use a monomorphization from a Rust dylib, it
+        // needs to be exported.
+        // However, on platforms that don't allow for Rust dylibs, having
+        // external linkage is enough for monomorphization to be linked to.
+        let need_visibility = tcx.sess.target.target.options.dynamic_linking
+            && !tcx.sess.target.target.options.only_cdylib;
+
+        let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
+
+        for (mono_item, &(linkage, visibility)) in cgus.iter().flat_map(|cgu| cgu.items().iter()) {
+            if linkage != Linkage::External {
+                // We can only re-use things with external linkage, otherwise
+                // we'll get a linker error
+                continue;
+            }
+
+            if need_visibility && visibility == Visibility::Hidden {
+                // If we potentially share things from Rust dylibs, they must
+                // not be hidden
+                continue;
+            }
+
+            match *mono_item {
+                MonoItem::Fn(Instance { def: InstanceDef::Item(def), substs }) => {
+                    if substs.non_erasable_generics().next().is_some() {
+                        let symbol = ExportedSymbol::Generic(def.did, substs);
+                        symbols.push((symbol, SymbolExportLevel::Rust));
+                    }
+                }
+                MonoItem::Fn(Instance { def: InstanceDef::DropGlue(_, Some(ty)), substs }) => {
+                    // A little sanity-check
+                    debug_assert_eq!(
+                        substs.non_erasable_generics().next(),
+                        Some(GenericArgKind::Type(ty))
+                    );
+                    symbols.push((ExportedSymbol::DropGlue(ty), SymbolExportLevel::Rust));
+                }
+                _ => {
+                    // Any other symbols don't qualify for sharing
+                }
+            }
+        }
+    }
+
+    // Sort so we get a stable incr. comp. hash.
+    symbols.sort_by_cached_key(|s| s.0.symbol_name_for_local_instance(tcx));
+
+    tcx.arena.alloc_from_iter(symbols)
+}
+
+fn upstream_monomorphizations_provider(
+    tcx: TyCtxt<'_>,
+    cnum: CrateNum,
+) -> DefIdMap<FxHashMap<SubstsRef<'_>, CrateNum>> {
+    debug_assert!(cnum == LOCAL_CRATE);
+
+    let cnums = tcx.all_crate_nums(LOCAL_CRATE);
+
+    let mut instances: DefIdMap<FxHashMap<_, _>> = Default::default();
+
+    let cnum_stable_ids: IndexVec<CrateNum, Fingerprint> = {
+        let mut cnum_stable_ids = IndexVec::from_elem_n(Fingerprint::ZERO, cnums.len() + 1);
+
+        for &cnum in cnums.iter() {
+            cnum_stable_ids[cnum] =
+                tcx.def_path_hash(DefId { krate: cnum, index: CRATE_DEF_INDEX }).0;
+        }
+
+        cnum_stable_ids
+    };
+
+    let drop_in_place_fn_def_id = tcx.lang_items().drop_in_place_fn();
+
+    for &cnum in cnums.iter() {
+        for (exported_symbol, _) in tcx.exported_symbols(cnum).iter() {
+            let (def_id, substs) = match *exported_symbol {
+                ExportedSymbol::Generic(def_id, substs) => (def_id, substs),
+                ExportedSymbol::DropGlue(ty) => {
+                    if let Some(drop_in_place_fn_def_id) = drop_in_place_fn_def_id {
+                        (drop_in_place_fn_def_id, tcx.intern_substs(&[ty.into()]))
+                    } else {
+                        // `drop_in_place` in place does not exist, don't try
+                        // to use it.
+                        continue;
+                    }
+                }
+                ExportedSymbol::NonGeneric(..) | ExportedSymbol::NoDefId(..) => {
+                    // These are no monomorphizations
+                    continue;
+                }
+            };
+
+            let substs_map = instances.entry(def_id).or_default();
+
+            match substs_map.entry(substs) {
+                Occupied(mut e) => {
+                    // If there are multiple monomorphizations available,
+                    // we select one deterministically.
+                    let other_cnum = *e.get();
+                    if cnum_stable_ids[other_cnum] > cnum_stable_ids[cnum] {
+                        e.insert(cnum);
+                    }
+                }
+                Vacant(e) => {
+                    e.insert(cnum);
+                }
+            }
+        }
+    }
+
+    instances
+}
+
+fn upstream_monomorphizations_for_provider(
+    tcx: TyCtxt<'_>,
+    def_id: DefId,
+) -> Option<&FxHashMap<SubstsRef<'_>, CrateNum>> {
+    debug_assert!(!def_id.is_local());
+    tcx.upstream_monomorphizations(LOCAL_CRATE).get(&def_id)
+}
+
+fn upstream_drop_glue_for_provider<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    substs: SubstsRef<'tcx>,
+) -> Option<CrateNum> {
+    if let Some(def_id) = tcx.lang_items().drop_in_place_fn() {
+        tcx.upstream_monomorphizations_for(def_id).and_then(|monos| monos.get(&substs).cloned())
+    } else {
+        None
+    }
+}
+
+fn is_unreachable_local_definition_provider(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    if let Some(def_id) = def_id.as_local() {
+        !tcx.reachable_set(LOCAL_CRATE).contains(&def_id)
+    } else {
+        bug!("is_unreachable_local_definition called with non-local DefId: {:?}", def_id)
+    }
+}
+
+pub fn provide(providers: &mut Providers) {
+    providers.reachable_non_generics = reachable_non_generics_provider;
+    providers.is_reachable_non_generic = is_reachable_non_generic_provider_local;
+    providers.exported_symbols = exported_symbols_provider_local;
+    providers.upstream_monomorphizations = upstream_monomorphizations_provider;
+    providers.is_unreachable_local_definition = is_unreachable_local_definition_provider;
+    providers.upstream_drop_glue_for = upstream_drop_glue_for_provider;
+}
+
+pub fn provide_extern(providers: &mut Providers) {
+    providers.is_reachable_non_generic = is_reachable_non_generic_provider_extern;
+    providers.upstream_monomorphizations_for = upstream_monomorphizations_for_provider;
+}
+
+fn symbol_export_level(tcx: TyCtxt<'_>, sym_def_id: DefId) -> SymbolExportLevel {
+    // We export anything that's not mangled at the "C" layer as it probably has
+    // to do with ABI concerns. We do not, however, apply such treatment to
+    // special symbols in the standard library for various plumbing between
+    // core/std/allocators/etc. For example symbols used to hook up allocation
+    // are not considered for export
+    let codegen_fn_attrs = tcx.codegen_fn_attrs(sym_def_id);
+    let is_extern = codegen_fn_attrs.contains_extern_indicator();
+    let std_internal =
+        codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);
+
+    if is_extern && !std_internal {
+        let target = &tcx.sess.target.target.llvm_target;
+        // WebAssembly cannot export data symbols, so reduce their export level
+        if target.contains("emscripten") {
+            if let Some(Node::Item(&hir::Item { kind: hir::ItemKind::Static(..), .. })) =
+                tcx.hir().get_if_local(sym_def_id)
+            {
+                return SymbolExportLevel::Rust;
+            }
+        }
+
+        SymbolExportLevel::C
+    } else {
+        SymbolExportLevel::Rust
+    }
+}
+
+/// This is the symbol name of the given instance instantiated in a specific crate.
+pub fn symbol_name_for_instance_in_crate<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    symbol: ExportedSymbol<'tcx>,
+    instantiating_crate: CrateNum,
+) -> String {
+    // If this is something instantiated in the local crate then we might
+    // already have cached the name as a query result.
+    if instantiating_crate == LOCAL_CRATE {
+        return symbol.symbol_name_for_local_instance(tcx).to_string();
+    }
+
+    // This is something instantiated in an upstream crate, so we have to use
+    // the slower (because uncached) version of computing the symbol name.
+    match symbol {
+        ExportedSymbol::NonGeneric(def_id) => {
+            rustc_symbol_mangling::symbol_name_for_instance_in_crate(
+                tcx,
+                Instance::mono(tcx, def_id),
+                instantiating_crate,
+            )
+        }
+        ExportedSymbol::Generic(def_id, substs) => {
+            rustc_symbol_mangling::symbol_name_for_instance_in_crate(
+                tcx,
+                Instance::new(def_id, substs),
+                instantiating_crate,
+            )
+        }
+        ExportedSymbol::DropGlue(ty) => rustc_symbol_mangling::symbol_name_for_instance_in_crate(
+            tcx,
+            Instance::resolve_drop_in_place(tcx, ty),
+            instantiating_crate,
+        ),
+        ExportedSymbol::NoDefId(symbol_name) => symbol_name.to_string(),
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
new file mode 100644
index 00000000000..7d69bb983dd
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -0,0 +1,1859 @@
+use super::link::{self, remove};
+use super::linker::LinkerInfo;
+use super::lto::{self, SerializedModule};
+use super::symbol_export::symbol_name_for_instance_in_crate;
+
+use crate::{
+    CachedModuleCodegen, CodegenResults, CompiledModule, CrateInfo, ModuleCodegen, ModuleKind,
+};
+
+use crate::traits::*;
+use jobserver::{Acquired, Client};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::profiling::TimingGuard;
+use rustc_data_structures::profiling::VerboseTimingGuard;
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::emitter::Emitter;
+use rustc_errors::{DiagnosticId, FatalError, Handler, Level};
+use rustc_fs_util::link_or_copy;
+use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
+use rustc_incremental::{
+    copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
+};
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_middle::middle::exported_symbols::SymbolExportLevel;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::cgu_reuse_tracker::CguReuseTracker;
+use rustc_session::config::{self, CrateType, Lto, OutputFilenames, OutputType};
+use rustc_session::config::{Passes, SanitizerSet, SwitchWithOptPath};
+use rustc_session::Session;
+use rustc_span::source_map::SourceMap;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::{BytePos, FileName, InnerSpan, Pos, Span};
+use rustc_target::spec::{MergeFunctions, PanicStrategy};
+
+use std::any::Any;
+use std::fs;
+use std::io;
+use std::mem;
+use std::path::{Path, PathBuf};
+use std::str;
+use std::sync::mpsc::{channel, Receiver, Sender};
+use std::sync::Arc;
+use std::thread;
+
+const PRE_LTO_BC_EXT: &str = "pre-lto.bc";
+
+/// What kind of object file to emit.
+#[derive(Clone, Copy, PartialEq)]
+pub enum EmitObj {
+    // No object file.
+    None,
+
+    // Just uncompressed llvm bitcode. Provides easy compatibility with
+    // emscripten's ecc compiler, when used as the linker.
+    Bitcode,
+
+    // Object code, possibly augmented with a bitcode section.
+    ObjectCode(BitcodeSection),
+}
+
+/// What kind of llvm bitcode section to embed in an object file.
+#[derive(Clone, Copy, PartialEq)]
+pub enum BitcodeSection {
+    // No bitcode section.
+    None,
+
+    // A full, uncompressed bitcode section.
+    Full,
+}
+
+/// Module-specific configuration for `optimize_and_codegen`.
+pub struct ModuleConfig {
+    /// Names of additional optimization passes to run.
+    pub passes: Vec<String>,
+    /// Some(level) to optimize at a certain level, or None to run
+    /// absolutely no optimizations (used for the metadata module).
+    pub opt_level: Option<config::OptLevel>,
+
+    /// Some(level) to optimize binary size, or None to not affect program size.
+    pub opt_size: Option<config::OptLevel>,
+
+    pub pgo_gen: SwitchWithOptPath,
+    pub pgo_use: Option<PathBuf>,
+
+    pub sanitizer: SanitizerSet,
+    pub sanitizer_recover: SanitizerSet,
+    pub sanitizer_memory_track_origins: usize,
+
+    // Flags indicating which outputs to produce.
+    pub emit_pre_lto_bc: bool,
+    pub emit_no_opt_bc: bool,
+    pub emit_bc: bool,
+    pub emit_ir: bool,
+    pub emit_asm: bool,
+    pub emit_obj: EmitObj,
+    pub bc_cmdline: String,
+
+    // Miscellaneous flags.  These are mostly copied from command-line
+    // options.
+    pub verify_llvm_ir: bool,
+    pub no_prepopulate_passes: bool,
+    pub no_builtins: bool,
+    pub time_module: bool,
+    pub vectorize_loop: bool,
+    pub vectorize_slp: bool,
+    pub merge_functions: bool,
+    pub inline_threshold: Option<usize>,
+    pub new_llvm_pass_manager: bool,
+    pub emit_lifetime_markers: bool,
+}
+
+impl ModuleConfig {
+    fn new(
+        kind: ModuleKind,
+        sess: &Session,
+        no_builtins: bool,
+        is_compiler_builtins: bool,
+    ) -> ModuleConfig {
+        // If it's a regular module, use `$regular`, otherwise use `$other`.
+        // `$regular` and `$other` are evaluated lazily.
+        macro_rules! if_regular {
+            ($regular: expr, $other: expr) => {
+                if let ModuleKind::Regular = kind { $regular } else { $other }
+            };
+        }
+
+        let opt_level_and_size = if_regular!(Some(sess.opts.optimize), None);
+
+        let save_temps = sess.opts.cg.save_temps;
+
+        let should_emit_obj = sess.opts.output_types.contains_key(&OutputType::Exe)
+            || match kind {
+                ModuleKind::Regular => sess.opts.output_types.contains_key(&OutputType::Object),
+                ModuleKind::Allocator => false,
+                ModuleKind::Metadata => sess.opts.output_types.contains_key(&OutputType::Metadata),
+            };
+
+        let emit_obj = if !should_emit_obj {
+            EmitObj::None
+        } else if sess.target.target.options.obj_is_bitcode
+            || (sess.opts.cg.linker_plugin_lto.enabled() && !no_builtins)
+        {
+            // This case is selected if the target uses objects as bitcode, or
+            // if linker plugin LTO is enabled. In the linker plugin LTO case
+            // the assumption is that the final link-step will read the bitcode
+            // and convert it to object code. This may be done by either the
+            // native linker or rustc itself.
+            //
+            // Note, however, that the linker-plugin-lto requested here is
+            // explicitly ignored for `#![no_builtins]` crates. These crates are
+            // specifically ignored by rustc's LTO passes and wouldn't work if
+            // loaded into the linker. These crates define symbols that LLVM
+            // lowers intrinsics to, and these symbol dependencies aren't known
+            // until after codegen. As a result any crate marked
+            // `#![no_builtins]` is assumed to not participate in LTO and
+            // instead goes on to generate object code.
+            EmitObj::Bitcode
+        } else if need_bitcode_in_object(sess) {
+            EmitObj::ObjectCode(BitcodeSection::Full)
+        } else {
+            EmitObj::ObjectCode(BitcodeSection::None)
+        };
+
+        ModuleConfig {
+            passes: if_regular!(
+                {
+                    let mut passes = sess.opts.cg.passes.clone();
+                    // compiler_builtins overrides the codegen-units settings,
+                    // which is incompatible with -Zprofile which requires that
+                    // only a single codegen unit is used per crate.
+                    if sess.opts.debugging_opts.profile && !is_compiler_builtins {
+                        passes.push("insert-gcov-profiling".to_owned());
+                    }
+
+                    // The rustc option `-Zinstrument_coverage` injects intrinsic calls to
+                    // `llvm.instrprof.increment()`, which requires the LLVM `instrprof` pass.
+                    if sess.opts.debugging_opts.instrument_coverage {
+                        passes.push("instrprof".to_owned());
+                    }
+                    passes
+                },
+                vec![]
+            ),
+
+            opt_level: opt_level_and_size,
+            opt_size: opt_level_and_size,
+
+            pgo_gen: if_regular!(
+                sess.opts.cg.profile_generate.clone(),
+                SwitchWithOptPath::Disabled
+            ),
+            pgo_use: if_regular!(sess.opts.cg.profile_use.clone(), None),
+
+            sanitizer: if_regular!(sess.opts.debugging_opts.sanitizer, SanitizerSet::empty()),
+            sanitizer_recover: if_regular!(
+                sess.opts.debugging_opts.sanitizer_recover,
+                SanitizerSet::empty()
+            ),
+            sanitizer_memory_track_origins: if_regular!(
+                sess.opts.debugging_opts.sanitizer_memory_track_origins,
+                0
+            ),
+
+            emit_pre_lto_bc: if_regular!(
+                save_temps || need_pre_lto_bitcode_for_incr_comp(sess),
+                false
+            ),
+            emit_no_opt_bc: if_regular!(save_temps, false),
+            emit_bc: if_regular!(
+                save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode),
+                save_temps
+            ),
+            emit_ir: if_regular!(
+                sess.opts.output_types.contains_key(&OutputType::LlvmAssembly),
+                false
+            ),
+            emit_asm: if_regular!(
+                sess.opts.output_types.contains_key(&OutputType::Assembly),
+                false
+            ),
+            emit_obj,
+            bc_cmdline: sess.target.target.options.bitcode_llvm_cmdline.clone(),
+
+            verify_llvm_ir: sess.verify_llvm_ir(),
+            no_prepopulate_passes: sess.opts.cg.no_prepopulate_passes,
+            no_builtins: no_builtins || sess.target.target.options.no_builtins,
+
+            // Exclude metadata and allocator modules from time_passes output,
+            // since they throw off the "LLVM passes" measurement.
+            time_module: if_regular!(true, false),
+
+            // Copy what clang does by turning on loop vectorization at O2 and
+            // slp vectorization at O3.
+            vectorize_loop: !sess.opts.cg.no_vectorize_loops
+                && (sess.opts.optimize == config::OptLevel::Default
+                    || sess.opts.optimize == config::OptLevel::Aggressive),
+            vectorize_slp: !sess.opts.cg.no_vectorize_slp
+                && sess.opts.optimize == config::OptLevel::Aggressive,
+
+            // Some targets (namely, NVPTX) interact badly with the
+            // MergeFunctions pass. This is because MergeFunctions can generate
+            // new function calls which may interfere with the target calling
+            // convention; e.g. for the NVPTX target, PTX kernels should not
+            // call other PTX kernels. MergeFunctions can also be configured to
+            // generate aliases instead, but aliases are not supported by some
+            // backends (again, NVPTX). Therefore, allow targets to opt out of
+            // the MergeFunctions pass, but otherwise keep the pass enabled (at
+            // O2 and O3) since it can be useful for reducing code size.
+            merge_functions: match sess
+                .opts
+                .debugging_opts
+                .merge_functions
+                .unwrap_or(sess.target.target.options.merge_functions)
+            {
+                MergeFunctions::Disabled => false,
+                MergeFunctions::Trampolines | MergeFunctions::Aliases => {
+                    sess.opts.optimize == config::OptLevel::Default
+                        || sess.opts.optimize == config::OptLevel::Aggressive
+                }
+            },
+
+            inline_threshold: sess.opts.cg.inline_threshold,
+            new_llvm_pass_manager: sess.opts.debugging_opts.new_llvm_pass_manager,
+            emit_lifetime_markers: sess.emit_lifetime_markers(),
+        }
+    }
+
+    pub fn bitcode_needed(&self) -> bool {
+        self.emit_bc
+            || self.emit_obj == EmitObj::Bitcode
+            || self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
+    }
+}
+
+// HACK(eddyb) work around `#[derive]` producing wrong bounds for `Clone`.
+pub struct TargetMachineFactory<B: WriteBackendMethods>(
+    pub Arc<dyn Fn() -> Result<B::TargetMachine, String> + Send + Sync>,
+);
+
+impl<B: WriteBackendMethods> Clone for TargetMachineFactory<B> {
+    fn clone(&self) -> Self {
+        TargetMachineFactory(self.0.clone())
+    }
+}
+
+pub type ExportedSymbols = FxHashMap<CrateNum, Arc<Vec<(String, SymbolExportLevel)>>>;
+
+/// Additional resources used by optimize_and_codegen (not module specific)
+#[derive(Clone)]
+pub struct CodegenContext<B: WriteBackendMethods> {
+    // Resources needed when running LTO
+    pub backend: B,
+    pub prof: SelfProfilerRef,
+    pub lto: Lto,
+    pub no_landing_pads: bool,
+    pub save_temps: bool,
+    pub fewer_names: bool,
+    pub exported_symbols: Option<Arc<ExportedSymbols>>,
+    pub opts: Arc<config::Options>,
+    pub crate_types: Vec<CrateType>,
+    pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>,
+    pub output_filenames: Arc<OutputFilenames>,
+    pub regular_module_config: Arc<ModuleConfig>,
+    pub metadata_module_config: Arc<ModuleConfig>,
+    pub allocator_module_config: Arc<ModuleConfig>,
+    pub tm_factory: TargetMachineFactory<B>,
+    pub msvc_imps_needed: bool,
+    pub target_pointer_width: String,
+    pub target_arch: String,
+    pub debuginfo: config::DebugInfo,
+
+    // Number of cgus excluding the allocator/metadata modules
+    pub total_cgus: usize,
+    // Handler to use for diagnostics produced during codegen.
+    pub diag_emitter: SharedEmitter,
+    // LLVM optimizations for which we want to print remarks.
+    pub remark: Passes,
+    // Worker thread number
+    pub worker: usize,
+    // The incremental compilation session directory, or None if we are not
+    // compiling incrementally
+    pub incr_comp_session_dir: Option<PathBuf>,
+    // Used to update CGU re-use information during the thinlto phase.
+    pub cgu_reuse_tracker: CguReuseTracker,
+    // Channel back to the main control thread to send messages to
+    pub coordinator_send: Sender<Box<dyn Any + Send>>,
+}
+
+impl<B: WriteBackendMethods> CodegenContext<B> {
+    pub fn create_diag_handler(&self) -> Handler {
+        Handler::with_emitter(true, None, Box::new(self.diag_emitter.clone()))
+    }
+
+    pub fn config(&self, kind: ModuleKind) -> &ModuleConfig {
+        match kind {
+            ModuleKind::Regular => &self.regular_module_config,
+            ModuleKind::Metadata => &self.metadata_module_config,
+            ModuleKind::Allocator => &self.allocator_module_config,
+        }
+    }
+}
+
+fn generate_lto_work<B: ExtraBackendMethods>(
+    cgcx: &CodegenContext<B>,
+    needs_fat_lto: Vec<FatLTOInput<B>>,
+    needs_thin_lto: Vec<(String, B::ThinBuffer)>,
+    import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
+) -> Vec<(WorkItem<B>, u64)> {
+    let _prof_timer = cgcx.prof.generic_activity("codegen_generate_lto_work");
+
+    let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() {
+        assert!(needs_thin_lto.is_empty());
+        let lto_module =
+            B::run_fat_lto(cgcx, needs_fat_lto, import_only_modules).unwrap_or_else(|e| e.raise());
+        (vec![lto_module], vec![])
+    } else {
+        assert!(needs_fat_lto.is_empty());
+        B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules).unwrap_or_else(|e| e.raise())
+    };
+
+    lto_modules
+        .into_iter()
+        .map(|module| {
+            let cost = module.cost();
+            (WorkItem::LTO(module), cost)
+        })
+        .chain(copy_jobs.into_iter().map(|wp| {
+            (
+                WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
+                    name: wp.cgu_name.clone(),
+                    source: wp,
+                }),
+                0,
+            )
+        }))
+        .collect()
+}
+
+pub struct CompiledModules {
+    pub modules: Vec<CompiledModule>,
+    pub metadata_module: Option<CompiledModule>,
+    pub allocator_module: Option<CompiledModule>,
+}
+
+fn need_bitcode_in_object(sess: &Session) -> bool {
+    let requested_for_rlib = sess.opts.cg.embed_bitcode
+        && sess.crate_types().contains(&CrateType::Rlib)
+        && sess.opts.output_types.contains_key(&OutputType::Exe);
+    let forced_by_target = sess.target.target.options.forces_embed_bitcode;
+    requested_for_rlib || forced_by_target
+}
+
+fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool {
+    if sess.opts.incremental.is_none() {
+        return false;
+    }
+
+    match sess.lto() {
+        Lto::No => false,
+        Lto::Fat | Lto::Thin | Lto::ThinLocal => true,
+    }
+}
+
+pub fn start_async_codegen<B: ExtraBackendMethods>(
+    backend: B,
+    tcx: TyCtxt<'_>,
+    metadata: EncodedMetadata,
+    total_cgus: usize,
+) -> OngoingCodegen<B> {
+    let (coordinator_send, coordinator_receive) = channel();
+    let sess = tcx.sess;
+
+    let crate_name = tcx.crate_name(LOCAL_CRATE);
+    let crate_hash = tcx.crate_hash(LOCAL_CRATE);
+    let no_builtins = tcx.sess.contains_name(&tcx.hir().krate().item.attrs, sym::no_builtins);
+    let is_compiler_builtins =
+        tcx.sess.contains_name(&tcx.hir().krate().item.attrs, sym::compiler_builtins);
+    let subsystem = tcx
+        .sess
+        .first_attr_value_str_by_name(&tcx.hir().krate().item.attrs, sym::windows_subsystem);
+    let windows_subsystem = subsystem.map(|subsystem| {
+        if subsystem != sym::windows && subsystem != sym::console {
+            tcx.sess.fatal(&format!(
+                "invalid windows subsystem `{}`, only \
+                                     `windows` and `console` are allowed",
+                subsystem
+            ));
+        }
+        subsystem.to_string()
+    });
+
+    let linker_info = LinkerInfo::new(tcx);
+    let crate_info = CrateInfo::new(tcx);
+
+    let regular_config =
+        ModuleConfig::new(ModuleKind::Regular, sess, no_builtins, is_compiler_builtins);
+    let metadata_config =
+        ModuleConfig::new(ModuleKind::Metadata, sess, no_builtins, is_compiler_builtins);
+    let allocator_config =
+        ModuleConfig::new(ModuleKind::Allocator, sess, no_builtins, is_compiler_builtins);
+
+    let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
+    let (codegen_worker_send, codegen_worker_receive) = channel();
+
+    let coordinator_thread = start_executing_work(
+        backend.clone(),
+        tcx,
+        &crate_info,
+        shared_emitter,
+        codegen_worker_send,
+        coordinator_receive,
+        total_cgus,
+        sess.jobserver.clone(),
+        Arc::new(regular_config),
+        Arc::new(metadata_config),
+        Arc::new(allocator_config),
+        coordinator_send.clone(),
+    );
+
+    OngoingCodegen {
+        backend,
+        crate_name,
+        crate_hash,
+        metadata,
+        windows_subsystem,
+        linker_info,
+        crate_info,
+
+        coordinator_send,
+        codegen_worker_receive,
+        shared_emitter_main,
+        future: coordinator_thread,
+        output_filenames: tcx.output_filenames(LOCAL_CRATE),
+    }
+}
+
+fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
+    sess: &Session,
+    compiled_modules: &CompiledModules,
+) -> FxHashMap<WorkProductId, WorkProduct> {
+    let mut work_products = FxHashMap::default();
+
+    if sess.opts.incremental.is_none() {
+        return work_products;
+    }
+
+    let _timer = sess.timer("copy_all_cgu_workproducts_to_incr_comp_cache_dir");
+
+    for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
+        let path = module.object.as_ref().cloned();
+
+        if let Some((id, product)) =
+            copy_cgu_workproduct_to_incr_comp_cache_dir(sess, &module.name, &path)
+        {
+            work_products.insert(id, product);
+        }
+    }
+
+    work_products
+}
+
+fn produce_final_output_artifacts(
+    sess: &Session,
+    compiled_modules: &CompiledModules,
+    crate_output: &OutputFilenames,
+) {
+    let mut user_wants_bitcode = false;
+    let mut user_wants_objects = false;
+
+    // Produce final compile outputs.
+    let copy_gracefully = |from: &Path, to: &Path| {
+        if let Err(e) = fs::copy(from, to) {
+            sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e));
+        }
+    };
+
+    let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
+        if compiled_modules.modules.len() == 1 {
+            // 1) Only one codegen unit.  In this case it's no difficulty
+            //    to copy `foo.0.x` to `foo.x`.
+            let module_name = Some(&compiled_modules.modules[0].name[..]);
+            let path = crate_output.temp_path(output_type, module_name);
+            copy_gracefully(&path, &crate_output.path(output_type));
+            if !sess.opts.cg.save_temps && !keep_numbered {
+                // The user just wants `foo.x`, not `foo.#module-name#.x`.
+                remove(sess, &path);
+            }
+        } else {
+            let ext = crate_output
+                .temp_path(output_type, None)
+                .extension()
+                .unwrap()
+                .to_str()
+                .unwrap()
+                .to_owned();
+
+            if crate_output.outputs.contains_key(&output_type) {
+                // 2) Multiple codegen units, with `--emit foo=some_name`.  We have
+                //    no good solution for this case, so warn the user.
+                sess.warn(&format!(
+                    "ignoring emit path because multiple .{} files \
+                                    were produced",
+                    ext
+                ));
+            } else if crate_output.single_output_file.is_some() {
+                // 3) Multiple codegen units, with `-o some_name`.  We have
+                //    no good solution for this case, so warn the user.
+                sess.warn(&format!(
+                    "ignoring -o because multiple .{} files \
+                                    were produced",
+                    ext
+                ));
+            } else {
+                // 4) Multiple codegen units, but no explicit name.  We
+                //    just leave the `foo.0.x` files in place.
+                // (We don't have to do any work in this case.)
+            }
+        }
+    };
+
+    // Flag to indicate whether the user explicitly requested bitcode.
+    // Otherwise, we produced it only as a temporary output, and will need
+    // to get rid of it.
+    for output_type in crate_output.outputs.keys() {
+        match *output_type {
+            OutputType::Bitcode => {
+                user_wants_bitcode = true;
+                // Copy to .bc, but always keep the .0.bc.  There is a later
+                // check to figure out if we should delete .0.bc files, or keep
+                // them for making an rlib.
+                copy_if_one_unit(OutputType::Bitcode, true);
+            }
+            OutputType::LlvmAssembly => {
+                copy_if_one_unit(OutputType::LlvmAssembly, false);
+            }
+            OutputType::Assembly => {
+                copy_if_one_unit(OutputType::Assembly, false);
+            }
+            OutputType::Object => {
+                user_wants_objects = true;
+                copy_if_one_unit(OutputType::Object, true);
+            }
+            OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {}
+        }
+    }
+
+    // Clean up unwanted temporary files.
+
+    // We create the following files by default:
+    //  - #crate#.#module-name#.bc
+    //  - #crate#.#module-name#.o
+    //  - #crate#.crate.metadata.bc
+    //  - #crate#.crate.metadata.o
+    //  - #crate#.o (linked from crate.##.o)
+    //  - #crate#.bc (copied from crate.##.bc)
+    // We may create additional files if requested by the user (through
+    // `-C save-temps` or `--emit=` flags).
+
+    if !sess.opts.cg.save_temps {
+        // Remove the temporary .#module-name#.o objects.  If the user didn't
+        // explicitly request bitcode (with --emit=bc), and the bitcode is not
+        // needed for building an rlib, then we must remove .#module-name#.bc as
+        // well.
+
+        // Specific rules for keeping .#module-name#.bc:
+        //  - If the user requested bitcode (`user_wants_bitcode`), and
+        //    codegen_units > 1, then keep it.
+        //  - If the user requested bitcode but codegen_units == 1, then we
+        //    can toss .#module-name#.bc because we copied it to .bc earlier.
+        //  - If we're not building an rlib and the user didn't request
+        //    bitcode, then delete .#module-name#.bc.
+        // If you change how this works, also update back::link::link_rlib,
+        // where .#module-name#.bc files are (maybe) deleted after making an
+        // rlib.
+        let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
+
+        let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1;
+
+        let keep_numbered_objects =
+            needs_crate_object || (user_wants_objects && sess.codegen_units() > 1);
+
+        for module in compiled_modules.modules.iter() {
+            if let Some(ref path) = module.object {
+                if !keep_numbered_objects {
+                    remove(sess, path);
+                }
+            }
+
+            if let Some(ref path) = module.bytecode {
+                if !keep_numbered_bitcode {
+                    remove(sess, path);
+                }
+            }
+        }
+
+        if !user_wants_bitcode {
+            if let Some(ref metadata_module) = compiled_modules.metadata_module {
+                if let Some(ref path) = metadata_module.bytecode {
+                    remove(sess, &path);
+                }
+            }
+
+            if let Some(ref allocator_module) = compiled_modules.allocator_module {
+                if let Some(ref path) = allocator_module.bytecode {
+                    remove(sess, path);
+                }
+            }
+        }
+    }
+
+    // We leave the following files around by default:
+    //  - #crate#.o
+    //  - #crate#.crate.metadata.o
+    //  - #crate#.bc
+    // These are used in linking steps and will be cleaned up afterward.
+}
+
+pub fn dump_incremental_data(_codegen_results: &CodegenResults) {
+    // FIXME(mw): This does not work at the moment because the situation has
+    //            become more complicated due to incremental LTO. Now a CGU
+    //            can have more than two caching states.
+    // println!("[incremental] Re-using {} out of {} modules",
+    //           codegen_results.modules.iter().filter(|m| m.pre_existing).count(),
+    //           codegen_results.modules.len());
+}
+
+pub enum WorkItem<B: WriteBackendMethods> {
+    /// Optimize a newly codegened, totally unoptimized module.
+    Optimize(ModuleCodegen<B::Module>),
+    /// Copy the post-LTO artifacts from the incremental cache to the output
+    /// directory.
+    CopyPostLtoArtifacts(CachedModuleCodegen),
+    /// Performs (Thin)LTO on the given module.
+    LTO(lto::LtoModuleCodegen<B>),
+}
+
+impl<B: WriteBackendMethods> WorkItem<B> {
+    pub fn module_kind(&self) -> ModuleKind {
+        match *self {
+            WorkItem::Optimize(ref m) => m.kind,
+            WorkItem::CopyPostLtoArtifacts(_) | WorkItem::LTO(_) => ModuleKind::Regular,
+        }
+    }
+
+    fn start_profiling<'a>(&self, cgcx: &'a CodegenContext<B>) -> TimingGuard<'a> {
+        match *self {
+            WorkItem::Optimize(ref m) => {
+                cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &m.name[..])
+            }
+            WorkItem::CopyPostLtoArtifacts(ref m) => cgcx
+                .prof
+                .generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &m.name[..]),
+            WorkItem::LTO(ref m) => {
+                cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", m.name())
+            }
+        }
+    }
+}
+
+enum WorkItemResult<B: WriteBackendMethods> {
+    Compiled(CompiledModule),
+    NeedsFatLTO(FatLTOInput<B>),
+    NeedsThinLTO(String, B::ThinBuffer),
+}
+
+pub enum FatLTOInput<B: WriteBackendMethods> {
+    Serialized { name: String, buffer: B::ModuleBuffer },
+    InMemory(ModuleCodegen<B::Module>),
+}
+
+fn execute_work_item<B: ExtraBackendMethods>(
+    cgcx: &CodegenContext<B>,
+    work_item: WorkItem<B>,
+) -> Result<WorkItemResult<B>, FatalError> {
+    let module_config = cgcx.config(work_item.module_kind());
+
+    match work_item {
+        WorkItem::Optimize(module) => execute_optimize_work_item(cgcx, module, module_config),
+        WorkItem::CopyPostLtoArtifacts(module) => {
+            execute_copy_from_cache_work_item(cgcx, module, module_config)
+        }
+        WorkItem::LTO(module) => execute_lto_work_item(cgcx, module, module_config),
+    }
+}
+
+// Actual LTO type we end up choosing based on multiple factors.
+pub enum ComputedLtoType {
+    No,
+    Thin,
+    Fat,
+}
+
+pub fn compute_per_cgu_lto_type(
+    sess_lto: &Lto,
+    opts: &config::Options,
+    sess_crate_types: &[CrateType],
+    module_kind: ModuleKind,
+) -> ComputedLtoType {
+    // Metadata modules never participate in LTO regardless of the lto
+    // settings.
+    if module_kind == ModuleKind::Metadata {
+        return ComputedLtoType::No;
+    }
+
+    // If the linker does LTO, we don't have to do it. Note that we
+    // keep doing full LTO, if it is requested, as not to break the
+    // assumption that the output will be a single module.
+    let linker_does_lto = opts.cg.linker_plugin_lto.enabled();
+
+    // When we're automatically doing ThinLTO for multi-codegen-unit
+    // builds we don't actually want to LTO the allocator modules if
+    // it shows up. This is due to various linker shenanigans that
+    // we'll encounter later.
+    let is_allocator = module_kind == ModuleKind::Allocator;
+
+    // We ignore a request for full crate grath LTO if the cate type
+    // is only an rlib, as there is no full crate graph to process,
+    // that'll happen later.
+    //
+    // This use case currently comes up primarily for targets that
+    // require LTO so the request for LTO is always unconditionally
+    // passed down to the backend, but we don't actually want to do
+    // anything about it yet until we've got a final product.
+    let is_rlib = sess_crate_types.len() == 1 && sess_crate_types[0] == CrateType::Rlib;
+
+    match sess_lto {
+        Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin,
+        Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
+        Lto::Fat if !is_rlib => ComputedLtoType::Fat,
+        _ => ComputedLtoType::No,
+    }
+}
+
+fn execute_optimize_work_item<B: ExtraBackendMethods>(
+    cgcx: &CodegenContext<B>,
+    module: ModuleCodegen<B::Module>,
+    module_config: &ModuleConfig,
+) -> Result<WorkItemResult<B>, FatalError> {
+    let diag_handler = cgcx.create_diag_handler();
+
+    unsafe {
+        B::optimize(cgcx, &diag_handler, &module, module_config)?;
+    }
+
+    // After we've done the initial round of optimizations we need to
+    // decide whether to synchronously codegen this module or ship it
+    // back to the coordinator thread for further LTO processing (which
+    // has to wait for all the initial modules to be optimized).
+
+    let lto_type = compute_per_cgu_lto_type(&cgcx.lto, &cgcx.opts, &cgcx.crate_types, module.kind);
+
+    // If we're doing some form of incremental LTO then we need to be sure to
+    // save our module to disk first.
+    let bitcode = if cgcx.config(module.kind).emit_pre_lto_bc {
+        let filename = pre_lto_bitcode_filename(&module.name);
+        cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
+    } else {
+        None
+    };
+
+    Ok(match lto_type {
+        ComputedLtoType::No => {
+            let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config)? };
+            WorkItemResult::Compiled(module)
+        }
+        ComputedLtoType::Thin => {
+            let (name, thin_buffer) = B::prepare_thin(module);
+            if let Some(path) = bitcode {
+                fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| {
+                    panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
+                });
+            }
+            WorkItemResult::NeedsThinLTO(name, thin_buffer)
+        }
+        ComputedLtoType::Fat => match bitcode {
+            Some(path) => {
+                let (name, buffer) = B::serialize_module(module);
+                fs::write(&path, buffer.data()).unwrap_or_else(|e| {
+                    panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
+                });
+                WorkItemResult::NeedsFatLTO(FatLTOInput::Serialized { name, buffer })
+            }
+            None => WorkItemResult::NeedsFatLTO(FatLTOInput::InMemory(module)),
+        },
+    })
+}
+
+fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
+    cgcx: &CodegenContext<B>,
+    module: CachedModuleCodegen,
+    module_config: &ModuleConfig,
+) -> Result<WorkItemResult<B>, FatalError> {
+    let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
+    let mut object = None;
+    if let Some(saved_file) = module.source.saved_file {
+        let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, Some(&module.name));
+        object = Some(obj_out.clone());
+        let source_file = in_incr_comp_dir(&incr_comp_session_dir, &saved_file);
+        debug!(
+            "copying pre-existing module `{}` from {:?} to {}",
+            module.name,
+            source_file,
+            obj_out.display()
+        );
+        if let Err(err) = link_or_copy(&source_file, &obj_out) {
+            let diag_handler = cgcx.create_diag_handler();
+            diag_handler.err(&format!(
+                "unable to copy {} to {}: {}",
+                source_file.display(),
+                obj_out.display(),
+                err
+            ));
+        }
+    }
+
+    assert_eq!(object.is_some(), module_config.emit_obj != EmitObj::None);
+
+    Ok(WorkItemResult::Compiled(CompiledModule {
+        name: module.name,
+        kind: ModuleKind::Regular,
+        object,
+        bytecode: None,
+    }))
+}
+
+fn execute_lto_work_item<B: ExtraBackendMethods>(
+    cgcx: &CodegenContext<B>,
+    mut module: lto::LtoModuleCodegen<B>,
+    module_config: &ModuleConfig,
+) -> Result<WorkItemResult<B>, FatalError> {
+    let diag_handler = cgcx.create_diag_handler();
+
+    unsafe {
+        let module = module.optimize(cgcx)?;
+        let module = B::codegen(cgcx, &diag_handler, module, module_config)?;
+        Ok(WorkItemResult::Compiled(module))
+    }
+}
+
+pub enum Message<B: WriteBackendMethods> {
+    Token(io::Result<Acquired>),
+    NeedsFatLTO {
+        result: FatLTOInput<B>,
+        worker_id: usize,
+    },
+    NeedsThinLTO {
+        name: String,
+        thin_buffer: B::ThinBuffer,
+        worker_id: usize,
+    },
+    Done {
+        result: Result<CompiledModule, Option<WorkerFatalError>>,
+        worker_id: usize,
+    },
+    CodegenDone {
+        llvm_work_item: WorkItem<B>,
+        cost: u64,
+    },
+    AddImportOnlyModule {
+        module_data: SerializedModule<B::ModuleBuffer>,
+        work_product: WorkProduct,
+    },
+    CodegenComplete,
+    CodegenItem,
+    CodegenAborted,
+}
+
+struct Diagnostic {
+    msg: String,
+    code: Option<DiagnosticId>,
+    lvl: Level,
+}
+
+#[derive(PartialEq, Clone, Copy, Debug)]
+enum MainThreadWorkerState {
+    Idle,
+    Codegenning,
+    LLVMing,
+}
+
+fn start_executing_work<B: ExtraBackendMethods>(
+    backend: B,
+    tcx: TyCtxt<'_>,
+    crate_info: &CrateInfo,
+    shared_emitter: SharedEmitter,
+    codegen_worker_send: Sender<Message<B>>,
+    coordinator_receive: Receiver<Box<dyn Any + Send>>,
+    total_cgus: usize,
+    jobserver: Client,
+    regular_config: Arc<ModuleConfig>,
+    metadata_config: Arc<ModuleConfig>,
+    allocator_config: Arc<ModuleConfig>,
+    tx_to_llvm_workers: Sender<Box<dyn Any + Send>>,
+) -> thread::JoinHandle<Result<CompiledModules, ()>> {
+    let coordinator_send = tx_to_llvm_workers;
+    let sess = tcx.sess;
+
+    // Compute the set of symbols we need to retain when doing LTO (if we need to)
+    let exported_symbols = {
+        let mut exported_symbols = FxHashMap::default();
+
+        let copy_symbols = |cnum| {
+            let symbols = tcx
+                .exported_symbols(cnum)
+                .iter()
+                .map(|&(s, lvl)| (symbol_name_for_instance_in_crate(tcx, s, cnum), lvl))
+                .collect();
+            Arc::new(symbols)
+        };
+
+        match sess.lto() {
+            Lto::No => None,
+            Lto::ThinLocal => {
+                exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
+                Some(Arc::new(exported_symbols))
+            }
+            Lto::Fat | Lto::Thin => {
+                exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
+                for &cnum in tcx.crates().iter() {
+                    exported_symbols.insert(cnum, copy_symbols(cnum));
+                }
+                Some(Arc::new(exported_symbols))
+            }
+        }
+    };
+
+    // First up, convert our jobserver into a helper thread so we can use normal
+    // mpsc channels to manage our messages and such.
+    // After we've requested tokens then we'll, when we can,
+    // get tokens on `coordinator_receive` which will
+    // get managed in the main loop below.
+    let coordinator_send2 = coordinator_send.clone();
+    let helper = jobserver
+        .into_helper_thread(move |token| {
+            drop(coordinator_send2.send(Box::new(Message::Token::<B>(token))));
+        })
+        .expect("failed to spawn helper thread");
+
+    let mut each_linked_rlib_for_lto = Vec::new();
+    drop(link::each_linked_rlib(crate_info, &mut |cnum, path| {
+        if link::ignored_for_lto(sess, crate_info, cnum) {
+            return;
+        }
+        each_linked_rlib_for_lto.push((cnum, path.to_path_buf()));
+    }));
+
+    let ol = if tcx.sess.opts.debugging_opts.no_codegen
+        || !tcx.sess.opts.output_types.should_codegen()
+    {
+        // If we know that we won’t be doing codegen, create target machines without optimisation.
+        config::OptLevel::No
+    } else {
+        tcx.backend_optimization_level(LOCAL_CRATE)
+    };
+    let cgcx = CodegenContext::<B> {
+        backend: backend.clone(),
+        crate_types: sess.crate_types().to_vec(),
+        each_linked_rlib_for_lto,
+        lto: sess.lto(),
+        no_landing_pads: sess.panic_strategy() == PanicStrategy::Abort,
+        fewer_names: sess.fewer_names(),
+        save_temps: sess.opts.cg.save_temps,
+        opts: Arc::new(sess.opts.clone()),
+        prof: sess.prof.clone(),
+        exported_symbols,
+        remark: sess.opts.cg.remark.clone(),
+        worker: 0,
+        incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
+        cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(),
+        coordinator_send,
+        diag_emitter: shared_emitter.clone(),
+        output_filenames: tcx.output_filenames(LOCAL_CRATE),
+        regular_module_config: regular_config,
+        metadata_module_config: metadata_config,
+        allocator_module_config: allocator_config,
+        tm_factory: TargetMachineFactory(backend.target_machine_factory(tcx.sess, ol)),
+        total_cgus,
+        msvc_imps_needed: msvc_imps_needed(tcx),
+        target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(),
+        target_arch: tcx.sess.target.target.arch.clone(),
+        debuginfo: tcx.sess.opts.debuginfo,
+    };
+
+    // This is the "main loop" of parallel work happening for parallel codegen.
+    // It's here that we manage parallelism, schedule work, and work with
+    // messages coming from clients.
+    //
+    // There are a few environmental pre-conditions that shape how the system
+    // is set up:
+    //
+    // - Error reporting only can happen on the main thread because that's the
+    //   only place where we have access to the compiler `Session`.
+    // - LLVM work can be done on any thread.
+    // - Codegen can only happen on the main thread.
+    // - Each thread doing substantial work most be in possession of a `Token`
+    //   from the `Jobserver`.
+    // - The compiler process always holds one `Token`. Any additional `Tokens`
+    //   have to be requested from the `Jobserver`.
+    //
+    // Error Reporting
+    // ===============
+    // The error reporting restriction is handled separately from the rest: We
+    // set up a `SharedEmitter` the holds an open channel to the main thread.
+    // When an error occurs on any thread, the shared emitter will send the
+    // error message to the receiver main thread (`SharedEmitterMain`). The
+    // main thread will periodically query this error message queue and emit
+    // any error messages it has received. It might even abort compilation if
+    // has received a fatal error. In this case we rely on all other threads
+    // being torn down automatically with the main thread.
+    // Since the main thread will often be busy doing codegen work, error
+    // reporting will be somewhat delayed, since the message queue can only be
+    // checked in between to work packages.
+    //
+    // Work Processing Infrastructure
+    // ==============================
+    // The work processing infrastructure knows three major actors:
+    //
+    // - the coordinator thread,
+    // - the main thread, and
+    // - LLVM worker threads
+    //
+    // The coordinator thread is running a message loop. It instructs the main
+    // thread about what work to do when, and it will spawn off LLVM worker
+    // threads as open LLVM WorkItems become available.
+    //
+    // The job of the main thread is to codegen CGUs into LLVM work package
+    // (since the main thread is the only thread that can do this). The main
+    // thread will block until it receives a message from the coordinator, upon
+    // which it will codegen one CGU, send it to the coordinator and block
+    // again. This way the coordinator can control what the main thread is
+    // doing.
+    //
+    // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
+    // available, it will spawn off a new LLVM worker thread and let it process
+    // that a WorkItem. When a LLVM worker thread is done with its WorkItem,
+    // it will just shut down, which also frees all resources associated with
+    // the given LLVM module, and sends a message to the coordinator that the
+    // has been completed.
+    //
+    // Work Scheduling
+    // ===============
+    // The scheduler's goal is to minimize the time it takes to complete all
+    // work there is, however, we also want to keep memory consumption low
+    // if possible. These two goals are at odds with each other: If memory
+    // consumption were not an issue, we could just let the main thread produce
+    // LLVM WorkItems at full speed, assuring maximal utilization of
+    // Tokens/LLVM worker threads. However, since codegen usual is faster
+    // than LLVM processing, the queue of LLVM WorkItems would fill up and each
+    // WorkItem potentially holds on to a substantial amount of memory.
+    //
+    // So the actual goal is to always produce just enough LLVM WorkItems as
+    // not to starve our LLVM worker threads. That means, once we have enough
+    // WorkItems in our queue, we can block the main thread, so it does not
+    // produce more until we need them.
+    //
+    // Doing LLVM Work on the Main Thread
+    // ----------------------------------
+    // Since the main thread owns the compiler processes implicit `Token`, it is
+    // wasteful to keep it blocked without doing any work. Therefore, what we do
+    // in this case is: We spawn off an additional LLVM worker thread that helps
+    // reduce the queue. The work it is doing corresponds to the implicit
+    // `Token`. The coordinator will mark the main thread as being busy with
+    // LLVM work. (The actual work happens on another OS thread but we just care
+    // about `Tokens`, not actual threads).
+    //
+    // When any LLVM worker thread finishes while the main thread is marked as
+    // "busy with LLVM work", we can do a little switcheroo: We give the Token
+    // of the just finished thread to the LLVM worker thread that is working on
+    // behalf of the main thread's implicit Token, thus freeing up the main
+    // thread again. The coordinator can then again decide what the main thread
+    // should do. This allows the coordinator to make decisions at more points
+    // in time.
+    //
+    // Striking a Balance between Throughput and Memory Consumption
+    // ------------------------------------------------------------
+    // Since our two goals, (1) use as many Tokens as possible and (2) keep
+    // memory consumption as low as possible, are in conflict with each other,
+    // we have to find a trade off between them. Right now, the goal is to keep
+    // all workers busy, which means that no worker should find the queue empty
+    // when it is ready to start.
+    // How do we do achieve this? Good question :) We actually never know how
+    // many `Tokens` are potentially available so it's hard to say how much to
+    // fill up the queue before switching the main thread to LLVM work. Also we
+    // currently don't have a means to estimate how long a running LLVM worker
+    // will still be busy with it's current WorkItem. However, we know the
+    // maximal count of available Tokens that makes sense (=the number of CPU
+    // cores), so we can take a conservative guess. The heuristic we use here
+    // is implemented in the `queue_full_enough()` function.
+    //
+    // Some Background on Jobservers
+    // -----------------------------
+    // It's worth also touching on the management of parallelism here. We don't
+    // want to just spawn a thread per work item because while that's optimal
+    // parallelism it may overload a system with too many threads or violate our
+    // configuration for the maximum amount of cpu to use for this process. To
+    // manage this we use the `jobserver` crate.
+    //
+    // Job servers are an artifact of GNU make and are used to manage
+    // parallelism between processes. A jobserver is a glorified IPC semaphore
+    // basically. Whenever we want to run some work we acquire the semaphore,
+    // and whenever we're done with that work we release the semaphore. In this
+    // manner we can ensure that the maximum number of parallel workers is
+    // capped at any one point in time.
+    //
+    // LTO and the coordinator thread
+    // ------------------------------
+    //
+    // The final job the coordinator thread is responsible for is managing LTO
+    // and how that works. When LTO is requested what we'll to is collect all
+    // optimized LLVM modules into a local vector on the coordinator. Once all
+    // modules have been codegened and optimized we hand this to the `lto`
+    // module for further optimization. The `lto` module will return back a list
+    // of more modules to work on, which the coordinator will continue to spawn
+    // work for.
+    //
+    // Each LLVM module is automatically sent back to the coordinator for LTO if
+    // necessary. There's already optimizations in place to avoid sending work
+    // back to the coordinator if LTO isn't requested.
+    return thread::spawn(move || {
+        let max_workers = ::num_cpus::get();
+        let mut worker_id_counter = 0;
+        let mut free_worker_ids = Vec::new();
+        let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| {
+            if let Some(id) = free_worker_ids.pop() {
+                id
+            } else {
+                let id = worker_id_counter;
+                worker_id_counter += 1;
+                id
+            }
+        };
+
+        // This is where we collect codegen units that have gone all the way
+        // through codegen and LLVM.
+        let mut compiled_modules = vec![];
+        let mut compiled_metadata_module = None;
+        let mut compiled_allocator_module = None;
+        let mut needs_fat_lto = Vec::new();
+        let mut needs_thin_lto = Vec::new();
+        let mut lto_import_only_modules = Vec::new();
+        let mut started_lto = false;
+        let mut codegen_aborted = false;
+
+        // This flag tracks whether all items have gone through codegens
+        let mut codegen_done = false;
+
+        // This is the queue of LLVM work items that still need processing.
+        let mut work_items = Vec::<(WorkItem<B>, u64)>::new();
+
+        // This are the Jobserver Tokens we currently hold. Does not include
+        // the implicit Token the compiler process owns no matter what.
+        let mut tokens = Vec::new();
+
+        let mut main_thread_worker_state = MainThreadWorkerState::Idle;
+        let mut running = 0;
+
+        let prof = &cgcx.prof;
+        let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
+
+        // Run the message loop while there's still anything that needs message
+        // processing. Note that as soon as codegen is aborted we simply want to
+        // wait for all existing work to finish, so many of the conditions here
+        // only apply if codegen hasn't been aborted as they represent pending
+        // work to be done.
+        while !codegen_done
+            || running > 0
+            || (!codegen_aborted
+                && !(work_items.is_empty()
+                    && needs_fat_lto.is_empty()
+                    && needs_thin_lto.is_empty()
+                    && lto_import_only_modules.is_empty()
+                    && main_thread_worker_state == MainThreadWorkerState::Idle))
+        {
+            // While there are still CGUs to be codegened, the coordinator has
+            // to decide how to utilize the compiler processes implicit Token:
+            // For codegenning more CGU or for running them through LLVM.
+            if !codegen_done {
+                if main_thread_worker_state == MainThreadWorkerState::Idle {
+                    if !queue_full_enough(work_items.len(), running, max_workers) {
+                        // The queue is not full enough, codegen more items:
+                        if codegen_worker_send.send(Message::CodegenItem).is_err() {
+                            panic!("Could not send Message::CodegenItem to main thread")
+                        }
+                        main_thread_worker_state = MainThreadWorkerState::Codegenning;
+                    } else {
+                        // The queue is full enough to not let the worker
+                        // threads starve. Use the implicit Token to do some
+                        // LLVM work too.
+                        let (item, _) =
+                            work_items.pop().expect("queue empty - queue_full_enough() broken?");
+                        let cgcx = CodegenContext {
+                            worker: get_worker_id(&mut free_worker_ids),
+                            ..cgcx.clone()
+                        };
+                        maybe_start_llvm_timer(
+                            prof,
+                            cgcx.config(item.module_kind()),
+                            &mut llvm_start_time,
+                        );
+                        main_thread_worker_state = MainThreadWorkerState::LLVMing;
+                        spawn_work(cgcx, item);
+                    }
+                }
+            } else if codegen_aborted {
+                // don't queue up any more work if codegen was aborted, we're
+                // just waiting for our existing children to finish
+            } else {
+                // If we've finished everything related to normal codegen
+                // then it must be the case that we've got some LTO work to do.
+                // Perform the serial work here of figuring out what we're
+                // going to LTO and then push a bunch of work items onto our
+                // queue to do LTO
+                if work_items.is_empty()
+                    && running == 0
+                    && main_thread_worker_state == MainThreadWorkerState::Idle
+                {
+                    assert!(!started_lto);
+                    started_lto = true;
+
+                    let needs_fat_lto = mem::take(&mut needs_fat_lto);
+                    let needs_thin_lto = mem::take(&mut needs_thin_lto);
+                    let import_only_modules = mem::take(&mut lto_import_only_modules);
+
+                    for (work, cost) in
+                        generate_lto_work(&cgcx, needs_fat_lto, needs_thin_lto, import_only_modules)
+                    {
+                        let insertion_index = work_items
+                            .binary_search_by_key(&cost, |&(_, cost)| cost)
+                            .unwrap_or_else(|e| e);
+                        work_items.insert(insertion_index, (work, cost));
+                        if !cgcx.opts.debugging_opts.no_parallel_llvm {
+                            helper.request_token();
+                        }
+                    }
+                }
+
+                // In this branch, we know that everything has been codegened,
+                // so it's just a matter of determining whether the implicit
+                // Token is free to use for LLVM work.
+                match main_thread_worker_state {
+                    MainThreadWorkerState::Idle => {
+                        if let Some((item, _)) = work_items.pop() {
+                            let cgcx = CodegenContext {
+                                worker: get_worker_id(&mut free_worker_ids),
+                                ..cgcx.clone()
+                            };
+                            maybe_start_llvm_timer(
+                                prof,
+                                cgcx.config(item.module_kind()),
+                                &mut llvm_start_time,
+                            );
+                            main_thread_worker_state = MainThreadWorkerState::LLVMing;
+                            spawn_work(cgcx, item);
+                        } else {
+                            // There is no unstarted work, so let the main thread
+                            // take over for a running worker. Otherwise the
+                            // implicit token would just go to waste.
+                            // We reduce the `running` counter by one. The
+                            // `tokens.truncate()` below will take care of
+                            // giving the Token back.
+                            debug_assert!(running > 0);
+                            running -= 1;
+                            main_thread_worker_state = MainThreadWorkerState::LLVMing;
+                        }
+                    }
+                    MainThreadWorkerState::Codegenning => bug!(
+                        "codegen worker should not be codegenning after \
+                              codegen was already completed"
+                    ),
+                    MainThreadWorkerState::LLVMing => {
+                        // Already making good use of that token
+                    }
+                }
+            }
+
+            // Spin up what work we can, only doing this while we've got available
+            // parallelism slots and work left to spawn.
+            while !codegen_aborted && !work_items.is_empty() && running < tokens.len() {
+                let (item, _) = work_items.pop().unwrap();
+
+                maybe_start_llvm_timer(prof, cgcx.config(item.module_kind()), &mut llvm_start_time);
+
+                let cgcx =
+                    CodegenContext { worker: get_worker_id(&mut free_worker_ids), ..cgcx.clone() };
+
+                spawn_work(cgcx, item);
+                running += 1;
+            }
+
+            // Relinquish accidentally acquired extra tokens
+            tokens.truncate(running);
+
+            // If a thread exits successfully then we drop a token associated
+            // with that worker and update our `running` count. We may later
+            // re-acquire a token to continue running more work. We may also not
+            // actually drop a token here if the worker was running with an
+            // "ephemeral token"
+            let mut free_worker = |worker_id| {
+                if main_thread_worker_state == MainThreadWorkerState::LLVMing {
+                    main_thread_worker_state = MainThreadWorkerState::Idle;
+                } else {
+                    running -= 1;
+                }
+
+                free_worker_ids.push(worker_id);
+            };
+
+            let msg = coordinator_receive.recv().unwrap();
+            match *msg.downcast::<Message<B>>().ok().unwrap() {
+                // Save the token locally and the next turn of the loop will use
+                // this to spawn a new unit of work, or it may get dropped
+                // immediately if we have no more work to spawn.
+                Message::Token(token) => {
+                    match token {
+                        Ok(token) => {
+                            tokens.push(token);
+
+                            if main_thread_worker_state == MainThreadWorkerState::LLVMing {
+                                // If the main thread token is used for LLVM work
+                                // at the moment, we turn that thread into a regular
+                                // LLVM worker thread, so the main thread is free
+                                // to react to codegen demand.
+                                main_thread_worker_state = MainThreadWorkerState::Idle;
+                                running += 1;
+                            }
+                        }
+                        Err(e) => {
+                            let msg = &format!("failed to acquire jobserver token: {}", e);
+                            shared_emitter.fatal(msg);
+                            // Exit the coordinator thread
+                            panic!("{}", msg)
+                        }
+                    }
+                }
+
+                Message::CodegenDone { llvm_work_item, cost } => {
+                    // We keep the queue sorted by estimated processing cost,
+                    // so that more expensive items are processed earlier. This
+                    // is good for throughput as it gives the main thread more
+                    // time to fill up the queue and it avoids scheduling
+                    // expensive items to the end.
+                    // Note, however, that this is not ideal for memory
+                    // consumption, as LLVM module sizes are not evenly
+                    // distributed.
+                    let insertion_index = work_items.binary_search_by_key(&cost, |&(_, cost)| cost);
+                    let insertion_index = match insertion_index {
+                        Ok(idx) | Err(idx) => idx,
+                    };
+                    work_items.insert(insertion_index, (llvm_work_item, cost));
+
+                    if !cgcx.opts.debugging_opts.no_parallel_llvm {
+                        helper.request_token();
+                    }
+                    assert!(!codegen_aborted);
+                    assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
+                    main_thread_worker_state = MainThreadWorkerState::Idle;
+                }
+
+                Message::CodegenComplete => {
+                    codegen_done = true;
+                    assert!(!codegen_aborted);
+                    assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
+                    main_thread_worker_state = MainThreadWorkerState::Idle;
+                }
+
+                // If codegen is aborted that means translation was aborted due
+                // to some normal-ish compiler error. In this situation we want
+                // to exit as soon as possible, but we want to make sure all
+                // existing work has finished. Flag codegen as being done, and
+                // then conditions above will ensure no more work is spawned but
+                // we'll keep executing this loop until `running` hits 0.
+                Message::CodegenAborted => {
+                    assert!(!codegen_aborted);
+                    codegen_done = true;
+                    codegen_aborted = true;
+                    assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
+                }
+                Message::Done { result: Ok(compiled_module), worker_id } => {
+                    free_worker(worker_id);
+                    match compiled_module.kind {
+                        ModuleKind::Regular => {
+                            compiled_modules.push(compiled_module);
+                        }
+                        ModuleKind::Metadata => {
+                            assert!(compiled_metadata_module.is_none());
+                            compiled_metadata_module = Some(compiled_module);
+                        }
+                        ModuleKind::Allocator => {
+                            assert!(compiled_allocator_module.is_none());
+                            compiled_allocator_module = Some(compiled_module);
+                        }
+                    }
+                }
+                Message::NeedsFatLTO { result, worker_id } => {
+                    assert!(!started_lto);
+                    free_worker(worker_id);
+                    needs_fat_lto.push(result);
+                }
+                Message::NeedsThinLTO { name, thin_buffer, worker_id } => {
+                    assert!(!started_lto);
+                    free_worker(worker_id);
+                    needs_thin_lto.push((name, thin_buffer));
+                }
+                Message::AddImportOnlyModule { module_data, work_product } => {
+                    assert!(!started_lto);
+                    assert!(!codegen_done);
+                    assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
+                    lto_import_only_modules.push((module_data, work_product));
+                    main_thread_worker_state = MainThreadWorkerState::Idle;
+                }
+                // If the thread failed that means it panicked, so we abort immediately.
+                Message::Done { result: Err(None), worker_id: _ } => {
+                    bug!("worker thread panicked");
+                }
+                Message::Done { result: Err(Some(WorkerFatalError)), worker_id: _ } => {
+                    return Err(());
+                }
+                Message::CodegenItem => bug!("the coordinator should not receive codegen requests"),
+            }
+        }
+
+        // Drop to print timings
+        drop(llvm_start_time);
+
+        // Regardless of what order these modules completed in, report them to
+        // the backend in the same order every time to ensure that we're handing
+        // out deterministic results.
+        compiled_modules.sort_by(|a, b| a.name.cmp(&b.name));
+
+        Ok(CompiledModules {
+            modules: compiled_modules,
+            metadata_module: compiled_metadata_module,
+            allocator_module: compiled_allocator_module,
+        })
+    });
+
+    // A heuristic that determines if we have enough LLVM WorkItems in the
+    // queue so that the main thread can do LLVM work instead of codegen
+    fn queue_full_enough(
+        items_in_queue: usize,
+        workers_running: usize,
+        max_workers: usize,
+    ) -> bool {
+        // Tune me, plz.
+        items_in_queue > 0 && items_in_queue >= max_workers.saturating_sub(workers_running / 2)
+    }
+
+    fn maybe_start_llvm_timer<'a>(
+        prof: &'a SelfProfilerRef,
+        config: &ModuleConfig,
+        llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
+    ) {
+        if config.time_module && llvm_start_time.is_none() {
+            *llvm_start_time = Some(prof.extra_verbose_generic_activity("LLVM_passes", "crate"));
+        }
+    }
+}
+
+pub const CODEGEN_WORKER_ID: usize = usize::MAX;
+
+/// `FatalError` is explicitly not `Send`.
+#[must_use]
+pub struct WorkerFatalError;
+
+fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>) {
+    thread::spawn(move || {
+        // Set up a destructor which will fire off a message that we're done as
+        // we exit.
+        struct Bomb<B: ExtraBackendMethods> {
+            coordinator_send: Sender<Box<dyn Any + Send>>,
+            result: Option<Result<WorkItemResult<B>, FatalError>>,
+            worker_id: usize,
+        }
+        impl<B: ExtraBackendMethods> Drop for Bomb<B> {
+            fn drop(&mut self) {
+                let worker_id = self.worker_id;
+                let msg = match self.result.take() {
+                    Some(Ok(WorkItemResult::Compiled(m))) => {
+                        Message::Done::<B> { result: Ok(m), worker_id }
+                    }
+                    Some(Ok(WorkItemResult::NeedsFatLTO(m))) => {
+                        Message::NeedsFatLTO::<B> { result: m, worker_id }
+                    }
+                    Some(Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer))) => {
+                        Message::NeedsThinLTO::<B> { name, thin_buffer, worker_id }
+                    }
+                    Some(Err(FatalError)) => {
+                        Message::Done::<B> { result: Err(Some(WorkerFatalError)), worker_id }
+                    }
+                    None => Message::Done::<B> { result: Err(None), worker_id },
+                };
+                drop(self.coordinator_send.send(Box::new(msg)));
+            }
+        }
+
+        let mut bomb = Bomb::<B> {
+            coordinator_send: cgcx.coordinator_send.clone(),
+            result: None,
+            worker_id: cgcx.worker,
+        };
+
+        // Execute the work itself, and if it finishes successfully then flag
+        // ourselves as a success as well.
+        //
+        // Note that we ignore any `FatalError` coming out of `execute_work_item`,
+        // as a diagnostic was already sent off to the main thread - just
+        // surface that there was an error in this worker.
+        bomb.result = {
+            let _prof_timer = work.start_profiling(&cgcx);
+            Some(execute_work_item(&cgcx, work))
+        };
+    });
+}
+
+enum SharedEmitterMessage {
+    Diagnostic(Diagnostic),
+    InlineAsmError(u32, String, Level, Option<(String, Vec<InnerSpan>)>),
+    AbortIfErrors,
+    Fatal(String),
+}
+
+#[derive(Clone)]
+pub struct SharedEmitter {
+    sender: Sender<SharedEmitterMessage>,
+}
+
+pub struct SharedEmitterMain {
+    receiver: Receiver<SharedEmitterMessage>,
+}
+
+impl SharedEmitter {
+    pub fn new() -> (SharedEmitter, SharedEmitterMain) {
+        let (sender, receiver) = channel();
+
+        (SharedEmitter { sender }, SharedEmitterMain { receiver })
+    }
+
+    pub fn inline_asm_error(
+        &self,
+        cookie: u32,
+        msg: String,
+        level: Level,
+        source: Option<(String, Vec<InnerSpan>)>,
+    ) {
+        drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg, level, source)));
+    }
+
+    pub fn fatal(&self, msg: &str) {
+        drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string())));
+    }
+}
+
+impl Emitter for SharedEmitter {
+    fn emit_diagnostic(&mut self, diag: &rustc_errors::Diagnostic) {
+        drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
+            msg: diag.message(),
+            code: diag.code.clone(),
+            lvl: diag.level,
+        })));
+        for child in &diag.children {
+            drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
+                msg: child.message(),
+                code: None,
+                lvl: child.level,
+            })));
+        }
+        drop(self.sender.send(SharedEmitterMessage::AbortIfErrors));
+    }
+    fn source_map(&self) -> Option<&Lrc<SourceMap>> {
+        None
+    }
+}
+
+impl SharedEmitterMain {
+    pub fn check(&self, sess: &Session, blocking: bool) {
+        loop {
+            let message = if blocking {
+                match self.receiver.recv() {
+                    Ok(message) => Ok(message),
+                    Err(_) => Err(()),
+                }
+            } else {
+                match self.receiver.try_recv() {
+                    Ok(message) => Ok(message),
+                    Err(_) => Err(()),
+                }
+            };
+
+            match message {
+                Ok(SharedEmitterMessage::Diagnostic(diag)) => {
+                    let handler = sess.diagnostic();
+                    let mut d = rustc_errors::Diagnostic::new(diag.lvl, &diag.msg);
+                    if let Some(code) = diag.code {
+                        d.code(code);
+                    }
+                    handler.emit_diagnostic(&d);
+                }
+                Ok(SharedEmitterMessage::InlineAsmError(cookie, msg, level, source)) => {
+                    let msg = msg.strip_prefix("error: ").unwrap_or(&msg);
+
+                    let mut err = match level {
+                        Level::Error => sess.struct_err(&msg),
+                        Level::Warning => sess.struct_warn(&msg),
+                        Level::Note => sess.struct_note_without_error(&msg),
+                        _ => bug!("Invalid inline asm diagnostic level"),
+                    };
+
+                    // If the cookie is 0 then we don't have span information.
+                    if cookie != 0 {
+                        let pos = BytePos::from_u32(cookie);
+                        let span = Span::with_root_ctxt(pos, pos);
+                        err.set_span(span);
+                    };
+
+                    // Point to the generated assembly if it is available.
+                    if let Some((buffer, spans)) = source {
+                        let source = sess
+                            .source_map()
+                            .new_source_file(FileName::inline_asm_source_code(&buffer), buffer);
+                        let source_span = Span::with_root_ctxt(source.start_pos, source.end_pos);
+                        let spans: Vec<_> =
+                            spans.iter().map(|sp| source_span.from_inner(*sp)).collect();
+                        err.span_note(spans, "instantiated into assembly here");
+                    }
+
+                    err.emit();
+                }
+                Ok(SharedEmitterMessage::AbortIfErrors) => {
+                    sess.abort_if_errors();
+                }
+                Ok(SharedEmitterMessage::Fatal(msg)) => {
+                    sess.fatal(&msg);
+                }
+                Err(_) => {
+                    break;
+                }
+            }
+        }
+    }
+}
+
+pub struct OngoingCodegen<B: ExtraBackendMethods> {
+    pub backend: B,
+    pub crate_name: Symbol,
+    pub crate_hash: Svh,
+    pub metadata: EncodedMetadata,
+    pub windows_subsystem: Option<String>,
+    pub linker_info: LinkerInfo,
+    pub crate_info: CrateInfo,
+    pub coordinator_send: Sender<Box<dyn Any + Send>>,
+    pub codegen_worker_receive: Receiver<Message<B>>,
+    pub shared_emitter_main: SharedEmitterMain,
+    pub future: thread::JoinHandle<Result<CompiledModules, ()>>,
+    pub output_filenames: Arc<OutputFilenames>,
+}
+
+impl<B: ExtraBackendMethods> OngoingCodegen<B> {
+    pub fn join(self, sess: &Session) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) {
+        let _timer = sess.timer("finish_ongoing_codegen");
+
+        self.shared_emitter_main.check(sess, true);
+        let future = self.future;
+        let compiled_modules = sess.time("join_worker_thread", || match future.join() {
+            Ok(Ok(compiled_modules)) => compiled_modules,
+            Ok(Err(())) => {
+                sess.abort_if_errors();
+                panic!("expected abort due to worker thread errors")
+            }
+            Err(_) => {
+                bug!("panic during codegen/LLVM phase");
+            }
+        });
+
+        sess.cgu_reuse_tracker.check_expected_reuse(sess.diagnostic());
+
+        sess.abort_if_errors();
+
+        let work_products =
+            copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
+        produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);
+
+        // FIXME: time_llvm_passes support - does this use a global context or
+        // something?
+        if sess.codegen_units() == 1 && sess.time_llvm_passes() {
+            self.backend.print_pass_timings()
+        }
+
+        (
+            CodegenResults {
+                crate_name: self.crate_name,
+                crate_hash: self.crate_hash,
+                metadata: self.metadata,
+                windows_subsystem: self.windows_subsystem,
+                linker_info: self.linker_info,
+                crate_info: self.crate_info,
+
+                modules: compiled_modules.modules,
+                allocator_module: compiled_modules.allocator_module,
+                metadata_module: compiled_modules.metadata_module,
+            },
+            work_products,
+        )
+    }
+
+    pub fn submit_pre_codegened_module_to_llvm(
+        &self,
+        tcx: TyCtxt<'_>,
+        module: ModuleCodegen<B::Module>,
+    ) {
+        self.wait_for_signal_to_codegen_item();
+        self.check_for_errors(tcx.sess);
+
+        // These are generally cheap and won't throw off scheduling.
+        let cost = 0;
+        submit_codegened_module_to_llvm(&self.backend, &self.coordinator_send, module, cost);
+    }
+
+    pub fn codegen_finished(&self, tcx: TyCtxt<'_>) {
+        self.wait_for_signal_to_codegen_item();
+        self.check_for_errors(tcx.sess);
+        drop(self.coordinator_send.send(Box::new(Message::CodegenComplete::<B>)));
+    }
+
+    /// Consumes this context indicating that codegen was entirely aborted, and
+    /// we need to exit as quickly as possible.
+    ///
+    /// This method blocks the current thread until all worker threads have
+    /// finished, and all worker threads should have exited or be real close to
+    /// exiting at this point.
+    pub fn codegen_aborted(self) {
+        // Signal to the coordinator it should spawn no more work and start
+        // shutdown.
+        drop(self.coordinator_send.send(Box::new(Message::CodegenAborted::<B>)));
+        drop(self.future.join());
+    }
+
+    pub fn check_for_errors(&self, sess: &Session) {
+        self.shared_emitter_main.check(sess, false);
+    }
+
+    pub fn wait_for_signal_to_codegen_item(&self) {
+        match self.codegen_worker_receive.recv() {
+            Ok(Message::CodegenItem) => {
+                // Nothing to do
+            }
+            Ok(_) => panic!("unexpected message"),
+            Err(_) => {
+                // One of the LLVM threads must have panicked, fall through so
+                // error handling can be reached.
+            }
+        }
+    }
+}
+
+pub fn submit_codegened_module_to_llvm<B: ExtraBackendMethods>(
+    _backend: &B,
+    tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
+    module: ModuleCodegen<B::Module>,
+    cost: u64,
+) {
+    let llvm_work_item = WorkItem::Optimize(module);
+    drop(tx_to_llvm_workers.send(Box::new(Message::CodegenDone::<B> { llvm_work_item, cost })));
+}
+
+pub fn submit_post_lto_module_to_llvm<B: ExtraBackendMethods>(
+    _backend: &B,
+    tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
+    module: CachedModuleCodegen,
+) {
+    let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module);
+    drop(tx_to_llvm_workers.send(Box::new(Message::CodegenDone::<B> { llvm_work_item, cost: 0 })));
+}
+
+pub fn submit_pre_lto_module_to_llvm<B: ExtraBackendMethods>(
+    _backend: &B,
+    tcx: TyCtxt<'_>,
+    tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
+    module: CachedModuleCodegen,
+) {
+    let filename = pre_lto_bitcode_filename(&module.name);
+    let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename);
+    let file = fs::File::open(&bc_path)
+        .unwrap_or_else(|e| panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e));
+
+    let mmap = unsafe {
+        memmap::Mmap::map(&file).unwrap_or_else(|e| {
+            panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e)
+        })
+    };
+    // Schedule the module to be loaded
+    drop(tx_to_llvm_workers.send(Box::new(Message::AddImportOnlyModule::<B> {
+        module_data: SerializedModule::FromUncompressedFile(mmap),
+        work_product: module.source,
+    })));
+}
+
+pub fn pre_lto_bitcode_filename(module_name: &str) -> String {
+    format!("{}.{}", module_name, PRE_LTO_BC_EXT)
+}
+
+fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
+    // This should never be true (because it's not supported). If it is true,
+    // something is wrong with commandline arg validation.
+    assert!(
+        !(tcx.sess.opts.cg.linker_plugin_lto.enabled()
+            && tcx.sess.target.target.options.is_like_windows
+            && tcx.sess.opts.cg.prefer_dynamic)
+    );
+
+    tcx.sess.target.target.options.is_like_windows &&
+        tcx.sess.crate_types().iter().any(|ct| *ct == CrateType::Rlib) &&
+    // ThinLTO can't handle this workaround in all cases, so we don't
+    // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
+    // dynamic linking when linker plugin LTO is enabled.
+    !tcx.sess.opts.cg.linker_plugin_lto.enabled()
+}
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
new file mode 100644
index 00000000000..77c12c410d5
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -0,0 +1,959 @@
+//! Codegen the completed AST to the LLVM IR.
+//!
+//! Some functions here, such as `codegen_block` and `codegen_expr`, return a value --
+//! the result of the codegen to LLVM -- while others, such as `codegen_fn`
+//! and `mono_item`, are called only for the side effect of adding a
+//! particular definition to the LLVM IR output we're producing.
+//!
+//! Hopefully useful general knowledge about codegen:
+//!
+//! * There's no way to find out the `Ty` type of a `Value`. Doing so
+//!   would be "trying to get the eggs out of an omelette" (credit:
+//!   pcwalton). You can, instead, find out its `llvm::Type` by calling `val_ty`,
+//!   but one `llvm::Type` corresponds to many `Ty`s; for instance, `tup(int, int,
+//!   int)` and `rec(x=int, y=int, z=int)` will have the same `llvm::Type`.
+
+use crate::back::write::{
+    compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
+    submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm, ComputedLtoType, OngoingCodegen,
+};
+use crate::common::{IntPredicate, RealPredicate, TypeKind};
+use crate::meth;
+use crate::mir;
+use crate::mir::operand::OperandValue;
+use crate::mir::place::PlaceRef;
+use crate::traits::*;
+use crate::{CachedModuleCodegen, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
+
+use rustc_attr as attr;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::profiling::print_time_passes_entry;
+use rustc_data_structures::sync::{par_iter, Lock, ParallelIterator};
+use rustc_hir as hir;
+use rustc_hir::def_id::{LocalDefId, LOCAL_CRATE};
+use rustc_hir::lang_items::LangItem;
+use rustc_index::vec::Idx;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_middle::middle::cstore::{self, LinkagePreference};
+use rustc_middle::middle::lang_items;
+use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
+use rustc_middle::ty::layout::{self, HasTyCtxt, TyAndLayout};
+use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{self, EntryFnType};
+use rustc_session::utils::NativeLibKind;
+use rustc_session::Session;
+use rustc_span::Span;
+use rustc_symbol_mangling::test as symbol_names_test;
+use rustc_target::abi::{Abi, Align, LayoutOf, Scalar, VariantIdx};
+
+use std::cmp;
+use std::ops::{Deref, DerefMut};
+use std::time::{Duration, Instant};
+
+pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) -> IntPredicate {
+    match op {
+        hir::BinOpKind::Eq => IntPredicate::IntEQ,
+        hir::BinOpKind::Ne => IntPredicate::IntNE,
+        hir::BinOpKind::Lt => {
+            if signed {
+                IntPredicate::IntSLT
+            } else {
+                IntPredicate::IntULT
+            }
+        }
+        hir::BinOpKind::Le => {
+            if signed {
+                IntPredicate::IntSLE
+            } else {
+                IntPredicate::IntULE
+            }
+        }
+        hir::BinOpKind::Gt => {
+            if signed {
+                IntPredicate::IntSGT
+            } else {
+                IntPredicate::IntUGT
+            }
+        }
+        hir::BinOpKind::Ge => {
+            if signed {
+                IntPredicate::IntSGE
+            } else {
+                IntPredicate::IntUGE
+            }
+        }
+        op => bug!(
+            "comparison_op_to_icmp_predicate: expected comparison operator, \
+             found {:?}",
+            op
+        ),
+    }
+}
+
+pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
+    match op {
+        hir::BinOpKind::Eq => RealPredicate::RealOEQ,
+        hir::BinOpKind::Ne => RealPredicate::RealUNE,
+        hir::BinOpKind::Lt => RealPredicate::RealOLT,
+        hir::BinOpKind::Le => RealPredicate::RealOLE,
+        hir::BinOpKind::Gt => RealPredicate::RealOGT,
+        hir::BinOpKind::Ge => RealPredicate::RealOGE,
+        op => {
+            bug!(
+                "comparison_op_to_fcmp_predicate: expected comparison operator, \
+                 found {:?}",
+                op
+            );
+        }
+    }
+}
+
+pub fn compare_simd_types<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    lhs: Bx::Value,
+    rhs: Bx::Value,
+    t: Ty<'tcx>,
+    ret_ty: Bx::Type,
+    op: hir::BinOpKind,
+) -> Bx::Value {
+    let signed = match t.kind {
+        ty::Float(_) => {
+            let cmp = bin_op_to_fcmp_predicate(op);
+            let cmp = bx.fcmp(cmp, lhs, rhs);
+            return bx.sext(cmp, ret_ty);
+        }
+        ty::Uint(_) => false,
+        ty::Int(_) => true,
+        _ => bug!("compare_simd_types: invalid SIMD type"),
+    };
+
+    let cmp = bin_op_to_icmp_predicate(op, signed);
+    let cmp = bx.icmp(cmp, lhs, rhs);
+    // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
+    // to get the correctly sized type. This will compile to a single instruction
+    // once the IR is converted to assembly if the SIMD instruction is supported
+    // by the target architecture.
+    bx.sext(cmp, ret_ty)
+}
+
+/// Retrieves the information we are losing (making dynamic) in an unsizing
+/// adjustment.
+///
+/// The `old_info` argument is a bit odd. It is intended for use in an upcast,
+/// where the new vtable for an object will be derived from the old one.
+pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
+    cx: &Cx,
+    source: Ty<'tcx>,
+    target: Ty<'tcx>,
+    old_info: Option<Cx::Value>,
+) -> Cx::Value {
+    let (source, target) =
+        cx.tcx().struct_lockstep_tails_erasing_lifetimes(source, target, cx.param_env());
+    match (&source.kind, &target.kind) {
+        (&ty::Array(_, len), &ty::Slice(_)) => {
+            cx.const_usize(len.eval_usize(cx.tcx(), ty::ParamEnv::reveal_all()))
+        }
+        (&ty::Dynamic(..), &ty::Dynamic(..)) => {
+            // For now, upcasts are limited to changes in marker
+            // traits, and hence never actually require an actual
+            // change to the vtable.
+            old_info.expect("unsized_info: missing old info for trait upcast")
+        }
+        (_, &ty::Dynamic(ref data, ..)) => {
+            let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)).field(cx, FAT_PTR_EXTRA);
+            cx.const_ptrcast(
+                meth::get_vtable(cx, source, data.principal()),
+                cx.backend_type(vtable_ptr),
+            )
+        }
+        _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
+    }
+}
+
+/// Coerces `src` to `dst_ty`. `src_ty` must be a thin pointer.
+pub fn unsize_thin_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    src: Bx::Value,
+    src_ty: Ty<'tcx>,
+    dst_ty: Ty<'tcx>,
+) -> (Bx::Value, Bx::Value) {
+    debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
+    match (&src_ty.kind, &dst_ty.kind) {
+        (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
+        | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
+            assert!(bx.cx().type_is_sized(a));
+            let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
+            (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
+        }
+        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+            assert_eq!(def_a, def_b);
+
+            let src_layout = bx.cx().layout_of(src_ty);
+            let dst_layout = bx.cx().layout_of(dst_ty);
+            let mut result = None;
+            for i in 0..src_layout.fields.count() {
+                let src_f = src_layout.field(bx.cx(), i);
+                assert_eq!(src_layout.fields.offset(i).bytes(), 0);
+                assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
+                if src_f.is_zst() {
+                    continue;
+                }
+                assert_eq!(src_layout.size, src_f.size);
+
+                let dst_f = dst_layout.field(bx.cx(), i);
+                assert_ne!(src_f.ty, dst_f.ty);
+                assert_eq!(result, None);
+                result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty));
+            }
+            let (lldata, llextra) = result.unwrap();
+            // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
+            // FIXME(eddyb) move these out of this `match` arm, so they're always
+            // applied, uniformly, no matter the source/destination types.
+            (
+                bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)),
+                bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true)),
+            )
+        }
+        _ => bug!("unsize_thin_ptr: called on bad types"),
+    }
+}
+
+/// Coerces `src`, which is a reference to a value of type `src_ty`,
+/// to a value of type `dst_ty`, and stores the result in `dst`.
+pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    src: PlaceRef<'tcx, Bx::Value>,
+    dst: PlaceRef<'tcx, Bx::Value>,
+) {
+    let src_ty = src.layout.ty;
+    let dst_ty = dst.layout.ty;
+    match (&src_ty.kind, &dst_ty.kind) {
+        (&ty::Ref(..), &ty::Ref(..) | &ty::RawPtr(..)) | (&ty::RawPtr(..), &ty::RawPtr(..)) => {
+            let (base, info) = match bx.load_operand(src).val {
+                OperandValue::Pair(base, info) => {
+                    // fat-ptr to fat-ptr unsize preserves the vtable
+                    // i.e., &'a fmt::Debug+Send => &'a fmt::Debug
+                    // So we need to pointercast the base to ensure
+                    // the types match up.
+                    // FIXME(eddyb) use `scalar_pair_element_backend_type` here,
+                    // like `unsize_thin_ptr` does.
+                    let thin_ptr = dst.layout.field(bx.cx(), FAT_PTR_ADDR);
+                    (bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info)
+                }
+                OperandValue::Immediate(base) => unsize_thin_ptr(bx, base, src_ty, dst_ty),
+                OperandValue::Ref(..) => bug!(),
+            };
+            OperandValue::Pair(base, info).store(bx, dst);
+        }
+
+        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+            assert_eq!(def_a, def_b);
+
+            for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
+                let src_f = src.project_field(bx, i);
+                let dst_f = dst.project_field(bx, i);
+
+                if dst_f.layout.is_zst() {
+                    continue;
+                }
+
+                if src_f.layout.ty == dst_f.layout.ty {
+                    memcpy_ty(
+                        bx,
+                        dst_f.llval,
+                        dst_f.align,
+                        src_f.llval,
+                        src_f.align,
+                        src_f.layout,
+                        MemFlags::empty(),
+                    );
+                } else {
+                    coerce_unsized_into(bx, src_f, dst_f);
+                }
+            }
+        }
+        _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty,),
+    }
+}
+
+pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    op: hir::BinOpKind,
+    lhs: Bx::Value,
+    rhs: Bx::Value,
+) -> Bx::Value {
+    cast_shift_rhs(bx, op, lhs, rhs)
+}
+
+fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    op: hir::BinOpKind,
+    lhs: Bx::Value,
+    rhs: Bx::Value,
+) -> Bx::Value {
+    // Shifts may have any size int on the rhs
+    if op.is_shift() {
+        let mut rhs_llty = bx.cx().val_ty(rhs);
+        let mut lhs_llty = bx.cx().val_ty(lhs);
+        if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
+            rhs_llty = bx.cx().element_type(rhs_llty)
+        }
+        if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
+            lhs_llty = bx.cx().element_type(lhs_llty)
+        }
+        let rhs_sz = bx.cx().int_width(rhs_llty);
+        let lhs_sz = bx.cx().int_width(lhs_llty);
+        if lhs_sz < rhs_sz {
+            bx.trunc(rhs, lhs_llty)
+        } else if lhs_sz > rhs_sz {
+            // FIXME (#1877: If in the future shifting by negative
+            // values is no longer undefined then this is wrong.
+            bx.zext(rhs, lhs_llty)
+        } else {
+            rhs
+        }
+    } else {
+        rhs
+    }
+}
+
+/// Returns `true` if this session's target will use SEH-based unwinding.
+///
+/// This is only true for MSVC targets, and even then the 64-bit MSVC target
+/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
+/// 64-bit MinGW) instead of "full SEH".
+pub fn wants_msvc_seh(sess: &Session) -> bool {
+    sess.target.target.options.is_like_msvc
+}
+
+pub fn from_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    val: Bx::Value,
+) -> Bx::Value {
+    if bx.cx().val_ty(val) == bx.cx().type_i1() { bx.zext(val, bx.cx().type_i8()) } else { val }
+}
+
+pub fn to_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    val: Bx::Value,
+    layout: layout::TyAndLayout<'_>,
+) -> Bx::Value {
+    if let Abi::Scalar(ref scalar) = layout.abi {
+        return to_immediate_scalar(bx, val, scalar);
+    }
+    val
+}
+
+pub fn to_immediate_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    val: Bx::Value,
+    scalar: &Scalar,
+) -> Bx::Value {
+    if scalar.is_bool() {
+        return bx.trunc(val, bx.cx().type_i1());
+    }
+    val
+}
+
+pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    dst: Bx::Value,
+    dst_align: Align,
+    src: Bx::Value,
+    src_align: Align,
+    layout: TyAndLayout<'tcx>,
+    flags: MemFlags,
+) {
+    let size = layout.size.bytes();
+    if size == 0 {
+        return;
+    }
+
+    bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
+}
+
+pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
+    cx: &'a Bx::CodegenCx,
+    instance: Instance<'tcx>,
+) {
+    // this is an info! to allow collecting monomorphization statistics
+    // and to allow finding the last function before LLVM aborts from
+    // release builds.
+    info!("codegen_instance({})", instance);
+
+    mir::codegen_mir::<Bx>(cx, instance);
+}
+
+/// Creates the `main` function which will initialize the rust runtime and call
+/// users main function.
+pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    cx: &'a Bx::CodegenCx,
+) -> Option<Bx::Function> {
+    let (main_def_id, span) = match cx.tcx().entry_fn(LOCAL_CRATE) {
+        Some((def_id, _)) => (def_id, cx.tcx().def_span(def_id)),
+        None => return None,
+    };
+
+    let instance = Instance::mono(cx.tcx(), main_def_id.to_def_id());
+
+    if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
+        // We want to create the wrapper in the same codegen unit as Rust's main
+        // function.
+        return None;
+    }
+
+    let main_llfn = cx.get_fn_addr(instance);
+
+    return cx.tcx().entry_fn(LOCAL_CRATE).map(|(_, et)| {
+        let use_start_lang_item = EntryFnType::Start != et;
+        create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, use_start_lang_item)
+    });
+
+    fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+        cx: &'a Bx::CodegenCx,
+        sp: Span,
+        rust_main: Bx::Value,
+        rust_main_def_id: LocalDefId,
+        use_start_lang_item: bool,
+    ) -> Bx::Function {
+        // The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
+        // depending on whether the target needs `argc` and `argv` to be passed in.
+        let llfty = if cx.sess().target.target.options.main_needs_argc_argv {
+            cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
+        } else {
+            cx.type_func(&[], cx.type_int())
+        };
+
+        let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
+        // Given that `main()` has no arguments,
+        // then its return type cannot have
+        // late-bound regions, since late-bound
+        // regions must appear in the argument
+        // listing.
+        let main_ret_ty = cx.tcx().erase_regions(&main_ret_ty.no_bound_vars().unwrap());
+
+        if cx.get_declared_value("main").is_some() {
+            // FIXME: We should be smart and show a better diagnostic here.
+            cx.sess()
+                .struct_span_err(sp, "entry symbol `main` declared multiple times")
+                .help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead")
+                .emit();
+            cx.sess().abort_if_errors();
+            bug!();
+        }
+        let llfn = cx.declare_cfn("main", llfty);
+
+        // `main` should respect same config for frame pointer elimination as rest of code
+        cx.set_frame_pointer_elimination(llfn);
+        cx.apply_target_cpu_attr(llfn);
+
+        let mut bx = Bx::new_block(&cx, llfn, "top");
+
+        bx.insert_reference_to_gdb_debug_scripts_section_global();
+
+        let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
+
+        let (start_fn, args) = if use_start_lang_item {
+            let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
+            let start_fn = cx.get_fn_addr(
+                ty::Instance::resolve(
+                    cx.tcx(),
+                    ty::ParamEnv::reveal_all(),
+                    start_def_id,
+                    cx.tcx().intern_substs(&[main_ret_ty.into()]),
+                )
+                .unwrap()
+                .unwrap(),
+            );
+            (
+                start_fn,
+                vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), arg_argc, arg_argv],
+            )
+        } else {
+            debug!("using user-defined start fn");
+            (rust_main, vec![arg_argc, arg_argv])
+        };
+
+        let result = bx.call(start_fn, &args, None);
+        let cast = bx.intcast(result, cx.type_int(), true);
+        bx.ret(cast);
+
+        llfn
+    }
+}
+
+/// Obtain the `argc` and `argv` values to pass to the rust start function.
+fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    cx: &'a Bx::CodegenCx,
+    bx: &mut Bx,
+) -> (Bx::Value, Bx::Value) {
+    if cx.sess().target.target.options.main_needs_argc_argv {
+        // Params from native `main()` used as args for rust start function
+        let param_argc = bx.get_param(0);
+        let param_argv = bx.get_param(1);
+        let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
+        let arg_argv = param_argv;
+        (arg_argc, arg_argv)
+    } else {
+        // The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
+        let arg_argc = bx.const_int(cx.type_int(), 0);
+        let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p()));
+        (arg_argc, arg_argv)
+    }
+}
+
+pub const CODEGEN_WORKER_ID: usize = usize::MAX;
+
+pub fn codegen_crate<B: ExtraBackendMethods>(
+    backend: B,
+    tcx: TyCtxt<'tcx>,
+    metadata: EncodedMetadata,
+    need_metadata_module: bool,
+) -> OngoingCodegen<B> {
+    // Skip crate items and just output metadata in -Z no-codegen mode.
+    if tcx.sess.opts.debugging_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
+        let ongoing_codegen = start_async_codegen(backend, tcx, metadata, 1);
+
+        ongoing_codegen.codegen_finished(tcx);
+
+        finalize_tcx(tcx);
+
+        ongoing_codegen.check_for_errors(tcx.sess);
+
+        return ongoing_codegen;
+    }
+
+    let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+
+    // Run the monomorphization collector and partition the collected items into
+    // codegen units.
+    let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1;
+
+    // Force all codegen_unit queries so they are already either red or green
+    // when compile_codegen_unit accesses them. We are not able to re-execute
+    // the codegen_unit query from just the DepNode, so an unknown color would
+    // lead to having to re-execute compile_codegen_unit, possibly
+    // unnecessarily.
+    if tcx.dep_graph.is_fully_enabled() {
+        for cgu in codegen_units {
+            tcx.ensure().codegen_unit(cgu.name());
+        }
+    }
+
+    let ongoing_codegen = start_async_codegen(backend.clone(), tcx, metadata, codegen_units.len());
+    let ongoing_codegen = AbortCodegenOnDrop::<B>(Some(ongoing_codegen));
+
+    // Codegen an allocator shim, if necessary.
+    //
+    // If the crate doesn't have an `allocator_kind` set then there's definitely
+    // no shim to generate. Otherwise we also check our dependency graph for all
+    // our output crate types. If anything there looks like its a `Dynamic`
+    // linkage, then it's already got an allocator shim and we'll be using that
+    // one instead. If nothing exists then it's our job to generate the
+    // allocator!
+    let any_dynamic_crate = tcx.dependency_formats(LOCAL_CRATE).iter().any(|(_, list)| {
+        use rustc_middle::middle::dependency_format::Linkage;
+        list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+    });
+    let allocator_module = if any_dynamic_crate {
+        None
+    } else if let Some(kind) = tcx.allocator_kind() {
+        let llmod_id =
+            cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
+        let mut modules = backend.new_metadata(tcx, &llmod_id);
+        tcx.sess
+            .time("write_allocator_module", || backend.codegen_allocator(tcx, &mut modules, kind));
+
+        Some(ModuleCodegen { name: llmod_id, module_llvm: modules, kind: ModuleKind::Allocator })
+    } else {
+        None
+    };
+
+    if let Some(allocator_module) = allocator_module {
+        ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
+    }
+
+    if need_metadata_module {
+        // Codegen the encoded metadata.
+        let metadata_cgu_name =
+            cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string();
+        let mut metadata_llvm_module = backend.new_metadata(tcx, &metadata_cgu_name);
+        tcx.sess.time("write_compressed_metadata", || {
+            backend.write_compressed_metadata(
+                tcx,
+                &ongoing_codegen.metadata,
+                &mut metadata_llvm_module,
+            );
+        });
+
+        let metadata_module = ModuleCodegen {
+            name: metadata_cgu_name,
+            module_llvm: metadata_llvm_module,
+            kind: ModuleKind::Metadata,
+        };
+        ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module);
+    }
+
+    // We sort the codegen units by size. This way we can schedule work for LLVM
+    // a bit more efficiently.
+    let codegen_units = {
+        let mut codegen_units = codegen_units.iter().collect::<Vec<_>>();
+        codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate()));
+        codegen_units
+    };
+
+    let total_codegen_time = Lock::new(Duration::new(0, 0));
+
+    // The non-parallel compiler can only translate codegen units to LLVM IR
+    // on a single thread, leading to a staircase effect where the N LLVM
+    // threads have to wait on the single codegen threads to generate work
+    // for them. The parallel compiler does not have this restriction, so
+    // we can pre-load the LLVM queue in parallel before handing off
+    // coordination to the OnGoingCodegen scheduler.
+    //
+    // This likely is a temporary measure. Once we don't have to support the
+    // non-parallel compiler anymore, we can compile CGUs end-to-end in
+    // parallel and get rid of the complicated scheduling logic.
+    let pre_compile_cgus = |cgu_reuse: &[CguReuse]| {
+        if cfg!(parallel_compiler) {
+            tcx.sess.time("compile_first_CGU_batch", || {
+                // Try to find one CGU to compile per thread.
+                let cgus: Vec<_> = cgu_reuse
+                    .iter()
+                    .enumerate()
+                    .filter(|&(_, reuse)| reuse == &CguReuse::No)
+                    .take(tcx.sess.threads())
+                    .collect();
+
+                // Compile the found CGUs in parallel.
+                par_iter(cgus)
+                    .map(|(i, _)| {
+                        let start_time = Instant::now();
+                        let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
+                        let mut time = total_codegen_time.lock();
+                        *time += start_time.elapsed();
+                        (i, module)
+                    })
+                    .collect()
+            })
+        } else {
+            FxHashMap::default()
+        }
+    };
+
+    let mut cgu_reuse = Vec::new();
+    let mut pre_compiled_cgus: Option<FxHashMap<usize, _>> = None;
+
+    for (i, cgu) in codegen_units.iter().enumerate() {
+        ongoing_codegen.wait_for_signal_to_codegen_item();
+        ongoing_codegen.check_for_errors(tcx.sess);
+
+        // Do some setup work in the first iteration
+        if pre_compiled_cgus.is_none() {
+            // Calculate the CGU reuse
+            cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
+                codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect()
+            });
+            // Pre compile some CGUs
+            pre_compiled_cgus = Some(pre_compile_cgus(&cgu_reuse));
+        }
+
+        let cgu_reuse = cgu_reuse[i];
+        tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
+
+        match cgu_reuse {
+            CguReuse::No => {
+                let (module, cost) =
+                    if let Some(cgu) = pre_compiled_cgus.as_mut().unwrap().remove(&i) {
+                        cgu
+                    } else {
+                        let start_time = Instant::now();
+                        let module = backend.compile_codegen_unit(tcx, cgu.name());
+                        let mut time = total_codegen_time.lock();
+                        *time += start_time.elapsed();
+                        module
+                    };
+                submit_codegened_module_to_llvm(
+                    &backend,
+                    &ongoing_codegen.coordinator_send,
+                    module,
+                    cost,
+                );
+                false
+            }
+            CguReuse::PreLto => {
+                submit_pre_lto_module_to_llvm(
+                    &backend,
+                    tcx,
+                    &ongoing_codegen.coordinator_send,
+                    CachedModuleCodegen {
+                        name: cgu.name().to_string(),
+                        source: cgu.work_product(tcx),
+                    },
+                );
+                true
+            }
+            CguReuse::PostLto => {
+                submit_post_lto_module_to_llvm(
+                    &backend,
+                    &ongoing_codegen.coordinator_send,
+                    CachedModuleCodegen {
+                        name: cgu.name().to_string(),
+                        source: cgu.work_product(tcx),
+                    },
+                );
+                true
+            }
+        };
+    }
+
+    ongoing_codegen.codegen_finished(tcx);
+
+    // Since the main thread is sometimes blocked during codegen, we keep track
+    // -Ztime-passes output manually.
+    print_time_passes_entry(
+        tcx.sess.time_passes(),
+        "codegen_to_LLVM_IR",
+        total_codegen_time.into_inner(),
+    );
+
+    ::rustc_incremental::assert_module_sources::assert_module_sources(tcx);
+
+    symbol_names_test::report_symbol_names(tcx);
+
+    ongoing_codegen.check_for_errors(tcx.sess);
+
+    finalize_tcx(tcx);
+
+    ongoing_codegen.into_inner()
+}
+
+/// A curious wrapper structure whose only purpose is to call `codegen_aborted`
+/// when it's dropped abnormally.
+///
+/// In the process of working on rust-lang/rust#55238 a mysterious segfault was
+/// stumbled upon. The segfault was never reproduced locally, but it was
+/// suspected to be related to the fact that codegen worker threads were
+/// sticking around by the time the main thread was exiting, causing issues.
+///
+/// This structure is an attempt to fix that issue where the `codegen_aborted`
+/// message will block until all workers have finished. This should ensure that
+/// even if the main codegen thread panics we'll wait for pending work to
+/// complete before returning from the main thread, hopefully avoiding
+/// segfaults.
+///
+/// If you see this comment in the code, then it means that this workaround
+/// worked! We may yet one day track down the mysterious cause of that
+/// segfault...
+struct AbortCodegenOnDrop<B: ExtraBackendMethods>(Option<OngoingCodegen<B>>);
+
+impl<B: ExtraBackendMethods> AbortCodegenOnDrop<B> {
+    fn into_inner(mut self) -> OngoingCodegen<B> {
+        self.0.take().unwrap()
+    }
+}
+
+impl<B: ExtraBackendMethods> Deref for AbortCodegenOnDrop<B> {
+    type Target = OngoingCodegen<B>;
+
+    fn deref(&self) -> &OngoingCodegen<B> {
+        self.0.as_ref().unwrap()
+    }
+}
+
+impl<B: ExtraBackendMethods> DerefMut for AbortCodegenOnDrop<B> {
+    fn deref_mut(&mut self) -> &mut OngoingCodegen<B> {
+        self.0.as_mut().unwrap()
+    }
+}
+
+impl<B: ExtraBackendMethods> Drop for AbortCodegenOnDrop<B> {
+    fn drop(&mut self) {
+        if let Some(codegen) = self.0.take() {
+            codegen.codegen_aborted();
+        }
+    }
+}
+
+fn finalize_tcx(tcx: TyCtxt<'_>) {
+    tcx.sess.time("assert_dep_graph", || ::rustc_incremental::assert_dep_graph(tcx));
+    tcx.sess.time("serialize_dep_graph", || ::rustc_incremental::save_dep_graph(tcx));
+
+    // We assume that no queries are run past here. If there are new queries
+    // after this point, they'll show up as "<unknown>" in self-profiling data.
+    {
+        let _prof_timer = tcx.prof.generic_activity("self_profile_alloc_query_strings");
+        tcx.alloc_self_profile_query_strings();
+    }
+}
+
+impl CrateInfo {
+    pub fn new(tcx: TyCtxt<'_>) -> CrateInfo {
+        let mut info = CrateInfo {
+            panic_runtime: None,
+            compiler_builtins: None,
+            profiler_runtime: None,
+            is_no_builtins: Default::default(),
+            native_libraries: Default::default(),
+            used_libraries: tcx.native_libraries(LOCAL_CRATE),
+            link_args: tcx.link_args(LOCAL_CRATE),
+            crate_name: Default::default(),
+            used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic),
+            used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic),
+            used_crate_source: Default::default(),
+            lang_item_to_crate: Default::default(),
+            missing_lang_items: Default::default(),
+            dependency_formats: tcx.dependency_formats(LOCAL_CRATE),
+        };
+        let lang_items = tcx.lang_items();
+
+        let crates = tcx.crates();
+
+        let n_crates = crates.len();
+        info.native_libraries.reserve(n_crates);
+        info.crate_name.reserve(n_crates);
+        info.used_crate_source.reserve(n_crates);
+        info.missing_lang_items.reserve(n_crates);
+
+        for &cnum in crates.iter() {
+            info.native_libraries.insert(cnum, tcx.native_libraries(cnum));
+            info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string());
+            info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum));
+            if tcx.is_panic_runtime(cnum) {
+                info.panic_runtime = Some(cnum);
+            }
+            if tcx.is_compiler_builtins(cnum) {
+                info.compiler_builtins = Some(cnum);
+            }
+            if tcx.is_profiler_runtime(cnum) {
+                info.profiler_runtime = Some(cnum);
+            }
+            if tcx.is_no_builtins(cnum) {
+                info.is_no_builtins.insert(cnum);
+            }
+            let missing = tcx.missing_lang_items(cnum);
+            for &item in missing.iter() {
+                if let Ok(id) = lang_items.require(item) {
+                    info.lang_item_to_crate.insert(item, id.krate);
+                }
+            }
+
+            // No need to look for lang items that don't actually need to exist.
+            let missing =
+                missing.iter().cloned().filter(|&l| lang_items::required(tcx, l)).collect();
+            info.missing_lang_items.insert(cnum, missing);
+        }
+
+        info
+    }
+}
+
+pub fn provide_both(providers: &mut Providers) {
+    providers.backend_optimization_level = |tcx, cratenum| {
+        let for_speed = match tcx.sess.opts.optimize {
+            // If globally no optimisation is done, #[optimize] has no effect.
+            //
+            // This is done because if we ended up "upgrading" to `-O2` here, we’d populate the
+            // pass manager and it is likely that some module-wide passes (such as inliner or
+            // cross-function constant propagation) would ignore the `optnone` annotation we put
+            // on the functions, thus necessarily involving these functions into optimisations.
+            config::OptLevel::No => return config::OptLevel::No,
+            // If globally optimise-speed is already specified, just use that level.
+            config::OptLevel::Less => return config::OptLevel::Less,
+            config::OptLevel::Default => return config::OptLevel::Default,
+            config::OptLevel::Aggressive => return config::OptLevel::Aggressive,
+            // If globally optimize-for-size has been requested, use -O2 instead (if optimize(size)
+            // are present).
+            config::OptLevel::Size => config::OptLevel::Default,
+            config::OptLevel::SizeMin => config::OptLevel::Default,
+        };
+
+        let (defids, _) = tcx.collect_and_partition_mono_items(cratenum);
+        for id in &*defids {
+            let CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id);
+            match optimize {
+                attr::OptimizeAttr::None => continue,
+                attr::OptimizeAttr::Size => continue,
+                attr::OptimizeAttr::Speed => {
+                    return for_speed;
+                }
+            }
+        }
+        tcx.sess.opts.optimize
+    };
+
+    providers.dllimport_foreign_items = |tcx, krate| {
+        let module_map = tcx.foreign_modules(krate);
+        let module_map =
+            module_map.iter().map(|lib| (lib.def_id, lib)).collect::<FxHashMap<_, _>>();
+
+        let dllimports = tcx
+            .native_libraries(krate)
+            .iter()
+            .filter(|lib| {
+                if !matches!(lib.kind, NativeLibKind::Dylib | NativeLibKind::Unspecified) {
+                    return false;
+                }
+                let cfg = match lib.cfg {
+                    Some(ref cfg) => cfg,
+                    None => return true,
+                };
+                attr::cfg_matches(cfg, &tcx.sess.parse_sess, None)
+            })
+            .filter_map(|lib| lib.foreign_module)
+            .map(|id| &module_map[&id])
+            .flat_map(|module| module.foreign_items.iter().cloned())
+            .collect();
+        dllimports
+    };
+
+    providers.is_dllimport_foreign_item =
+        |tcx, def_id| tcx.dllimport_foreign_items(def_id.krate).contains(&def_id);
+}
+
+fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
+    if !tcx.dep_graph.is_fully_enabled() {
+        return CguReuse::No;
+    }
+
+    let work_product_id = &cgu.work_product_id();
+    if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
+        // We don't have anything cached for this CGU. This can happen
+        // if the CGU did not exist in the previous session.
+        return CguReuse::No;
+    }
+
+    // Try to mark the CGU as green. If it we can do so, it means that nothing
+    // affecting the LLVM module has changed and we can re-use a cached version.
+    // If we compile with any kind of LTO, this means we can re-use the bitcode
+    // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
+    // know that later). If we are not doing LTO, there is only one optimized
+    // version of each module, so we re-use that.
+    let dep_node = cgu.codegen_dep_node(tcx);
+    assert!(
+        !tcx.dep_graph.dep_node_exists(&dep_node),
+        "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
+        cgu.name()
+    );
+
+    if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
+        // We can re-use either the pre- or the post-thinlto state. If no LTO is
+        // being performed then we can use post-LTO artifacts, otherwise we must
+        // reuse pre-LTO artifacts
+        match compute_per_cgu_lto_type(
+            &tcx.sess.lto(),
+            &tcx.sess.opts,
+            &tcx.sess.crate_types(),
+            ModuleKind::Regular,
+        ) {
+            ComputedLtoType::No => CguReuse::PostLto,
+            _ => CguReuse::PreLto,
+        }
+    } else {
+        CguReuse::No
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/common.rs b/compiler/rustc_codegen_ssa/src/common.rs
new file mode 100644
index 00000000000..e04ed531bbf
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/common.rs
@@ -0,0 +1,197 @@
+#![allow(non_camel_case_types, non_snake_case)]
+
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::LangItem;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_session::Session;
+use rustc_span::Span;
+
+use crate::base;
+use crate::traits::BuilderMethods;
+use crate::traits::*;
+
+pub enum IntPredicate {
+    IntEQ,
+    IntNE,
+    IntUGT,
+    IntUGE,
+    IntULT,
+    IntULE,
+    IntSGT,
+    IntSGE,
+    IntSLT,
+    IntSLE,
+}
+
+#[allow(dead_code)]
+pub enum RealPredicate {
+    RealPredicateFalse,
+    RealOEQ,
+    RealOGT,
+    RealOGE,
+    RealOLT,
+    RealOLE,
+    RealONE,
+    RealORD,
+    RealUNO,
+    RealUEQ,
+    RealUGT,
+    RealUGE,
+    RealULT,
+    RealULE,
+    RealUNE,
+    RealPredicateTrue,
+}
+
+pub enum AtomicRmwBinOp {
+    AtomicXchg,
+    AtomicAdd,
+    AtomicSub,
+    AtomicAnd,
+    AtomicNand,
+    AtomicOr,
+    AtomicXor,
+    AtomicMax,
+    AtomicMin,
+    AtomicUMax,
+    AtomicUMin,
+}
+
+pub enum AtomicOrdering {
+    #[allow(dead_code)]
+    NotAtomic,
+    Unordered,
+    Monotonic,
+    // Consume,  // Not specified yet.
+    Acquire,
+    Release,
+    AcquireRelease,
+    SequentiallyConsistent,
+}
+
+pub enum SynchronizationScope {
+    SingleThread,
+    CrossThread,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum TypeKind {
+    Void,
+    Half,
+    Float,
+    Double,
+    X86_FP80,
+    FP128,
+    PPC_FP128,
+    Label,
+    Integer,
+    Function,
+    Struct,
+    Array,
+    Pointer,
+    Vector,
+    Metadata,
+    X86_MMX,
+    Token,
+    ScalableVector,
+    BFloat,
+}
+
+// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement
+//            the HashStable trait. Normally DepGraph::with_task() calls are
+//            hidden behind queries, but CGU creation is a special case in two
+//            ways: (1) it's not a query and (2) CGU are output nodes, so their
+//            Fingerprints are not actually needed. It remains to be clarified
+//            how exactly this case will be handled in the red/green system but
+//            for now we content ourselves with providing a no-op HashStable
+//            implementation for CGUs.
+mod temp_stable_hash_impls {
+    use crate::ModuleCodegen;
+    use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+
+    impl<HCX, M> HashStable<HCX> for ModuleCodegen<M> {
+        fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
+            // do nothing
+        }
+    }
+}
+
+pub fn langcall(tcx: TyCtxt<'_>, span: Option<Span>, msg: &str, li: LangItem) -> DefId {
+    tcx.lang_items().require(li).unwrap_or_else(|s| {
+        let msg = format!("{} {}", msg, s);
+        match span {
+            Some(span) => tcx.sess.span_fatal(span, &msg[..]),
+            None => tcx.sess.fatal(&msg[..]),
+        }
+    })
+}
+
+// To avoid UB from LLVM, these two functions mask RHS with an
+// appropriate mask unconditionally (i.e., the fallback behavior for
+// all shifts). For 32- and 64-bit types, this matches the semantics
+// of Java. (See related discussion on #1877 and #10183.)
+
+pub fn build_unchecked_lshift<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    lhs: Bx::Value,
+    rhs: Bx::Value,
+) -> Bx::Value {
+    let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs);
+    // #1877, #10183: Ensure that input is always valid
+    let rhs = shift_mask_rhs(bx, rhs);
+    bx.shl(lhs, rhs)
+}
+
+pub fn build_unchecked_rshift<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    lhs_t: Ty<'tcx>,
+    lhs: Bx::Value,
+    rhs: Bx::Value,
+) -> Bx::Value {
+    let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs);
+    // #1877, #10183: Ensure that input is always valid
+    let rhs = shift_mask_rhs(bx, rhs);
+    let is_signed = lhs_t.is_signed();
+    if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
+}
+
+fn shift_mask_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    rhs: Bx::Value,
+) -> Bx::Value {
+    let rhs_llty = bx.val_ty(rhs);
+    let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false);
+    bx.and(rhs, shift_val)
+}
+
+pub fn shift_mask_val<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    llty: Bx::Type,
+    mask_llty: Bx::Type,
+    invert: bool,
+) -> Bx::Value {
+    let kind = bx.type_kind(llty);
+    match kind {
+        TypeKind::Integer => {
+            // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
+            let val = bx.int_width(llty) - 1;
+            if invert {
+                bx.const_int(mask_llty, !val as i64)
+            } else {
+                bx.const_uint(mask_llty, val)
+            }
+        }
+        TypeKind::Vector => {
+            let mask =
+                shift_mask_val(bx, bx.element_type(llty), bx.element_type(mask_llty), invert);
+            bx.vector_splat(bx.vector_length(mask_llty), mask)
+        }
+        _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
+    }
+}
+
+pub fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
+    struct_span_err!(a, b, E0511, "{}", c).emit();
+}
diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs
new file mode 100644
index 00000000000..a266d179a42
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs
@@ -0,0 +1,67 @@
+use rustc_middle::mir::coverage::{CounterValueReference, MappedExpressionIndex};
+
+/// Aligns with [llvm::coverage::Counter::CounterKind](https://github.com/rust-lang/llvm-project/blob/rustc/10.0-2020-05-05/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L91)
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+enum CounterKind {
+    Zero = 0,
+    CounterValueReference = 1,
+    Expression = 2,
+}
+
+/// A reference to an instance of an abstract "counter" that will yield a value in a coverage
+/// report. Note that `id` has different interpretations, depending on the `kind`:
+///   * For `CounterKind::Zero`, `id` is assumed to be `0`
+///   * For `CounterKind::CounterValueReference`,  `id` matches the `counter_id` of the injected
+///     instrumentation counter (the `index` argument to the LLVM intrinsic
+///     `instrprof.increment()`)
+///   * For `CounterKind::Expression`, `id` is the index into the coverage map's array of
+///     counter expressions.
+/// Aligns with [llvm::coverage::Counter](https://github.com/rust-lang/llvm-project/blob/rustc/10.0-2020-05-05/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L98-L99)
+/// Important: The Rust struct layout (order and types of fields) must match its C++ counterpart.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct Counter {
+    // Important: The layout (order and types of fields) must match its C++ counterpart.
+    kind: CounterKind,
+    id: u32,
+}
+
+impl Counter {
+    pub fn zero() -> Self {
+        Self { kind: CounterKind::Zero, id: 0 }
+    }
+
+    pub fn counter_value_reference(counter_id: CounterValueReference) -> Self {
+        Self { kind: CounterKind::CounterValueReference, id: counter_id.into() }
+    }
+
+    pub fn expression(mapped_expression_index: MappedExpressionIndex) -> Self {
+        Self { kind: CounterKind::Expression, id: mapped_expression_index.into() }
+    }
+}
+
+/// Aligns with [llvm::coverage::CounterExpression::ExprKind](https://github.com/rust-lang/llvm-project/blob/rustc/10.0-2020-05-05/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L146)
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum ExprKind {
+    Subtract = 0,
+    Add = 1,
+}
+
+/// Aligns with [llvm::coverage::CounterExpression](https://github.com/rust-lang/llvm-project/blob/rustc/10.0-2020-05-05/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L147-L148)
+/// Important: The Rust struct layout (order and types of fields) must match its C++
+/// counterpart.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct CounterExpression {
+    kind: ExprKind,
+    lhs: Counter,
+    rhs: Counter,
+}
+
+impl CounterExpression {
+    pub fn new(lhs: Counter, kind: ExprKind, rhs: Counter) -> Self {
+        Self { kind, lhs, rhs }
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
new file mode 100644
index 00000000000..814e43c5fa5
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
@@ -0,0 +1,205 @@
+pub use super::ffi::*;
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::coverage::{
+    CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionIndex,
+    MappedExpressionIndex, Op,
+};
+use rustc_middle::ty::Instance;
+use rustc_middle::ty::TyCtxt;
+
+#[derive(Clone, Debug)]
+pub struct ExpressionRegion {
+    lhs: ExpressionOperandId,
+    op: Op,
+    rhs: ExpressionOperandId,
+    region: CodeRegion,
+}
+
+/// Collects all of the coverage regions associated with (a) injected counters, (b) counter
+/// expressions (additions or subtraction), and (c) unreachable regions (always counted as zero),
+/// for a given Function. Counters and counter expressions have non-overlapping `id`s because they
+/// can both be operands in an expression. This struct also stores the `function_source_hash`,
+/// computed during instrumentation, and forwarded with counters.
+///
+/// Note, it may be important to understand LLVM's definitions of `unreachable` regions versus "gap
+/// regions" (or "gap areas"). A gap region is a code region within a counted region (either counter
+/// or expression), but the line or lines in the gap region are not executable (such as lines with
+/// only whitespace or comments). According to LLVM Code Coverage Mapping documentation, "A count
+/// for a gap area is only used as the line execution count if there are no other regions on a
+/// line."
+pub struct FunctionCoverage {
+    source_hash: u64,
+    counters: IndexVec<CounterValueReference, Option<CodeRegion>>,
+    expressions: IndexVec<InjectedExpressionIndex, Option<ExpressionRegion>>,
+    unreachable_regions: Vec<CodeRegion>,
+}
+
+impl FunctionCoverage {
+    pub fn new<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+        let coverageinfo = tcx.coverageinfo(instance.def_id());
+        Self {
+            source_hash: 0, // will be set with the first `add_counter()`
+            counters: IndexVec::from_elem_n(None, coverageinfo.num_counters as usize),
+            expressions: IndexVec::from_elem_n(None, coverageinfo.num_expressions as usize),
+            unreachable_regions: Vec::new(),
+        }
+    }
+
+    /// Adds a code region to be counted by an injected counter intrinsic.
+    /// The source_hash (computed during coverage instrumentation) should also be provided, and
+    /// should be the same for all counters in a given function.
+    pub fn add_counter(&mut self, source_hash: u64, id: CounterValueReference, region: CodeRegion) {
+        if self.source_hash == 0 {
+            self.source_hash = source_hash;
+        } else {
+            debug_assert_eq!(source_hash, self.source_hash);
+        }
+        self.counters[id].replace(region).expect_none("add_counter called with duplicate `id`");
+    }
+
+    /// Both counters and "counter expressions" (or simply, "expressions") can be operands in other
+    /// expressions. Expression IDs start from `u32::MAX` and go down, so the range of expression
+    /// IDs will not overlap with the range of counter IDs. Counters and expressions can be added in
+    /// any order, and expressions can still be assigned contiguous (though descending) IDs, without
+    /// knowing what the last counter ID will be.
+    ///
+    /// When storing the expression data in the `expressions` vector in the `FunctionCoverage`
+    /// struct, its vector index is computed, from the given expression ID, by subtracting from
+    /// `u32::MAX`.
+    ///
+    /// Since the expression operands (`lhs` and `rhs`) can reference either counters or
+    /// expressions, an operand that references an expression also uses its original ID, descending
+    /// from `u32::MAX`. Theses operands are translated only during code generation, after all
+    /// counters and expressions have been added.
+    pub fn add_counter_expression(
+        &mut self,
+        expression_id: InjectedExpressionIndex,
+        lhs: ExpressionOperandId,
+        op: Op,
+        rhs: ExpressionOperandId,
+        region: CodeRegion,
+    ) {
+        let expression_index = self.expression_index(u32::from(expression_id));
+        self.expressions[expression_index]
+            .replace(ExpressionRegion { lhs, op, rhs, region })
+            .expect_none("add_counter_expression called with duplicate `id_descending_from_max`");
+    }
+
+    /// Add a region that will be marked as "unreachable", with a constant "zero counter".
+    pub fn add_unreachable_region(&mut self, region: CodeRegion) {
+        self.unreachable_regions.push(region)
+    }
+
+    /// Return the source hash, generated from the HIR node structure, and used to indicate whether
+    /// or not the source code structure changed between different compilations.
+    pub fn source_hash(&self) -> u64 {
+        self.source_hash
+    }
+
+    /// Generate an array of CounterExpressions, and an iterator over all `Counter`s and their
+    /// associated `Regions` (from which the LLVM-specific `CoverageMapGenerator` will create
+    /// `CounterMappingRegion`s.
+    pub fn get_expressions_and_counter_regions<'a>(
+        &'a self,
+    ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &'a CodeRegion)>) {
+        assert!(self.source_hash != 0);
+
+        let counter_regions = self.counter_regions();
+        let (counter_expressions, expression_regions) = self.expressions_with_regions();
+        let unreachable_regions = self.unreachable_regions();
+
+        let counter_regions =
+            counter_regions.chain(expression_regions.into_iter().chain(unreachable_regions));
+        (counter_expressions, counter_regions)
+    }
+
+    fn counter_regions<'a>(&'a self) -> impl Iterator<Item = (Counter, &'a CodeRegion)> {
+        self.counters.iter_enumerated().filter_map(|(index, entry)| {
+            // Option::map() will return None to filter out missing counters. This may happen
+            // if, for example, a MIR-instrumented counter is removed during an optimization.
+            entry.as_ref().map(|region| {
+                (Counter::counter_value_reference(index as CounterValueReference), region)
+            })
+        })
+    }
+
+    fn expressions_with_regions(
+        &'a self,
+    ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &'a CodeRegion)>) {
+        let mut counter_expressions = Vec::with_capacity(self.expressions.len());
+        let mut expression_regions = Vec::with_capacity(self.expressions.len());
+        let mut new_indexes =
+            IndexVec::from_elem_n(MappedExpressionIndex::from(u32::MAX), self.expressions.len());
+        // Note, the initial value shouldn't matter since every index in use in `self.expressions`
+        // will be set, and after that, `new_indexes` will only be accessed using those same
+        // indexes.
+
+        // Note that an `ExpressionRegion`s at any given index can include other expressions as
+        // operands, but expression operands can only come from the subset of expressions having
+        // `expression_index`s lower than the referencing `ExpressionRegion`. Therefore, it is
+        // reasonable to look up the new index of an expression operand while the `new_indexes`
+        // vector is only complete up to the current `ExpressionIndex`.
+        let id_to_counter =
+            |new_indexes: &IndexVec<InjectedExpressionIndex, MappedExpressionIndex>,
+             id: ExpressionOperandId| {
+                if id.index() < self.counters.len() {
+                    let index = CounterValueReference::from(id.index());
+                    self.counters
+                        .get(index)
+                        .unwrap() // pre-validated
+                        .as_ref()
+                        .map(|_| Counter::counter_value_reference(index))
+                } else {
+                    let index = self.expression_index(u32::from(id));
+                    self.expressions
+                        .get(index)
+                        .expect("expression id is out of range")
+                        .as_ref()
+                        .map(|_| Counter::expression(new_indexes[index]))
+                }
+            };
+
+        for (original_index, expression_region) in
+            self.expressions.iter_enumerated().filter_map(|(original_index, entry)| {
+                // Option::map() will return None to filter out missing expressions. This may happen
+                // if, for example, a MIR-instrumented expression is removed during an optimization.
+                entry.as_ref().map(|region| (original_index, region))
+            })
+        {
+            let region = &expression_region.region;
+            let ExpressionRegion { lhs, op, rhs, .. } = *expression_region;
+
+            if let Some(Some((lhs_counter, rhs_counter))) =
+                id_to_counter(&new_indexes, lhs).map(|lhs_counter| {
+                    id_to_counter(&new_indexes, rhs).map(|rhs_counter| (lhs_counter, rhs_counter))
+                })
+            {
+                // Both operands exist. `Expression` operands exist in `self.expressions` and have
+                // been assigned a `new_index`.
+                let mapped_expression_index =
+                    MappedExpressionIndex::from(counter_expressions.len());
+                counter_expressions.push(CounterExpression::new(
+                    lhs_counter,
+                    match op {
+                        Op::Add => ExprKind::Add,
+                        Op::Subtract => ExprKind::Subtract,
+                    },
+                    rhs_counter,
+                ));
+                new_indexes[original_index] = mapped_expression_index;
+                expression_regions.push((Counter::expression(mapped_expression_index), region));
+            }
+        }
+        (counter_expressions, expression_regions.into_iter())
+    }
+
+    fn unreachable_regions<'a>(&'a self) -> impl Iterator<Item = (Counter, &'a CodeRegion)> {
+        self.unreachable_regions.iter().map(|region| (Counter::zero(), region))
+    }
+
+    fn expression_index(&self, id_descending_from_max: u32) -> InjectedExpressionIndex {
+        debug_assert!(id_descending_from_max >= self.counters.len() as u32);
+        InjectedExpressionIndex::from(u32::MAX - id_descending_from_max)
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/mod.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/mod.rs
new file mode 100644
index 00000000000..569fd3f1a51
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/coverageinfo/mod.rs
@@ -0,0 +1,2 @@
+pub mod ffi;
+pub mod map;
diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/mod.rs b/compiler/rustc_codegen_ssa/src/debuginfo/mod.rs
new file mode 100644
index 00000000000..d1a0cf78d6a
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/debuginfo/mod.rs
@@ -0,0 +1,2 @@
+// FIXME(eddyb) find a place for this (or a way to replace it).
+pub mod type_names;
diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
new file mode 100644
index 00000000000..fb8f5a62989
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
@@ -0,0 +1,266 @@
+// Type Names for Debug Info.
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::{self, subst::SubstsRef, Ty, TyCtxt};
+
+// Compute the name of the type as it should be stored in debuginfo. Does not do
+// any caching, i.e., calling the function twice with the same type will also do
+// the work twice. The `qualified` parameter only affects the first level of the
+// type name, further levels (i.e., type parameters) are always fully qualified.
+pub fn compute_debuginfo_type_name<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    t: Ty<'tcx>,
+    qualified: bool,
+) -> String {
+    let mut result = String::with_capacity(64);
+    let mut visited = FxHashSet::default();
+    push_debuginfo_type_name(tcx, t, qualified, &mut result, &mut visited);
+    result
+}
+
+// Pushes the name of the type as it should be stored in debuginfo on the
+// `output` String. See also compute_debuginfo_type_name().
+pub fn push_debuginfo_type_name<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    t: Ty<'tcx>,
+    qualified: bool,
+    output: &mut String,
+    visited: &mut FxHashSet<Ty<'tcx>>,
+) {
+    // When targeting MSVC, emit C++ style type names for compatibility with
+    // .natvis visualizers (and perhaps other existing native debuggers?)
+    let cpp_like_names = tcx.sess.target.target.options.is_like_msvc;
+
+    match t.kind {
+        ty::Bool => output.push_str("bool"),
+        ty::Char => output.push_str("char"),
+        ty::Str => output.push_str("str"),
+        ty::Never => output.push_str("!"),
+        ty::Int(int_ty) => output.push_str(int_ty.name_str()),
+        ty::Uint(uint_ty) => output.push_str(uint_ty.name_str()),
+        ty::Float(float_ty) => output.push_str(float_ty.name_str()),
+        ty::Foreign(def_id) => push_item_name(tcx, def_id, qualified, output),
+        ty::Adt(def, substs) => {
+            push_item_name(tcx, def.did, qualified, output);
+            push_type_params(tcx, substs, output, visited);
+        }
+        ty::Tuple(component_types) => {
+            if cpp_like_names {
+                output.push_str("tuple<");
+            } else {
+                output.push('(');
+            }
+
+            for component_type in component_types {
+                push_debuginfo_type_name(tcx, component_type.expect_ty(), true, output, visited);
+                output.push_str(", ");
+            }
+            if !component_types.is_empty() {
+                output.pop();
+                output.pop();
+            }
+
+            if cpp_like_names {
+                output.push('>');
+            } else {
+                output.push(')');
+            }
+        }
+        ty::RawPtr(ty::TypeAndMut { ty: inner_type, mutbl }) => {
+            if !cpp_like_names {
+                output.push('*');
+            }
+            match mutbl {
+                hir::Mutability::Not => output.push_str("const "),
+                hir::Mutability::Mut => output.push_str("mut "),
+            }
+
+            push_debuginfo_type_name(tcx, inner_type, true, output, visited);
+
+            if cpp_like_names {
+                output.push('*');
+            }
+        }
+        ty::Ref(_, inner_type, mutbl) => {
+            if !cpp_like_names {
+                output.push('&');
+            }
+            output.push_str(mutbl.prefix_str());
+
+            push_debuginfo_type_name(tcx, inner_type, true, output, visited);
+
+            if cpp_like_names {
+                output.push('*');
+            }
+        }
+        ty::Array(inner_type, len) => {
+            output.push('[');
+            push_debuginfo_type_name(tcx, inner_type, true, output, visited);
+            output.push_str(&format!("; {}", len.eval_usize(tcx, ty::ParamEnv::reveal_all())));
+            output.push(']');
+        }
+        ty::Slice(inner_type) => {
+            if cpp_like_names {
+                output.push_str("slice<");
+            } else {
+                output.push('[');
+            }
+
+            push_debuginfo_type_name(tcx, inner_type, true, output, visited);
+
+            if cpp_like_names {
+                output.push('>');
+            } else {
+                output.push(']');
+            }
+        }
+        ty::Dynamic(ref trait_data, ..) => {
+            if let Some(principal) = trait_data.principal() {
+                let principal = tcx
+                    .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &principal);
+                push_item_name(tcx, principal.def_id, false, output);
+                push_type_params(tcx, principal.substs, output, visited);
+            } else {
+                output.push_str("dyn '_");
+            }
+        }
+        ty::FnDef(..) | ty::FnPtr(_) => {
+            // We've encountered a weird 'recursive type'
+            // Currently, the only way to generate such a type
+            // is by using 'impl trait':
+            //
+            // fn foo() -> impl Copy { foo }
+            //
+            // There's not really a sensible name we can generate,
+            // since we don't include 'impl trait' types (e.g. ty::Opaque)
+            // in the output
+            //
+            // Since we need to generate *something*, we just
+            // use a dummy string that should make it clear
+            // that something unusual is going on
+            if !visited.insert(t) {
+                output.push_str("<recursive_type>");
+                return;
+            }
+
+            let sig = t.fn_sig(tcx);
+            output.push_str(sig.unsafety().prefix_str());
+
+            let abi = sig.abi();
+            if abi != rustc_target::spec::abi::Abi::Rust {
+                output.push_str("extern \"");
+                output.push_str(abi.name());
+                output.push_str("\" ");
+            }
+
+            output.push_str("fn(");
+
+            let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
+            if !sig.inputs().is_empty() {
+                for &parameter_type in sig.inputs() {
+                    push_debuginfo_type_name(tcx, parameter_type, true, output, visited);
+                    output.push_str(", ");
+                }
+                output.pop();
+                output.pop();
+            }
+
+            if sig.c_variadic {
+                if !sig.inputs().is_empty() {
+                    output.push_str(", ...");
+                } else {
+                    output.push_str("...");
+                }
+            }
+
+            output.push(')');
+
+            if !sig.output().is_unit() {
+                output.push_str(" -> ");
+                push_debuginfo_type_name(tcx, sig.output(), true, output, visited);
+            }
+
+            // We only keep the type in 'visited'
+            // for the duration of the body of this method.
+            // It's fine for a particular function type
+            // to show up multiple times in one overall type
+            // (e.g. MyType<fn() -> u8, fn() -> u8>
+            //
+            // We only care about avoiding recursing
+            // directly back to the type we're currently
+            // processing
+            visited.remove(t);
+        }
+        ty::Closure(def_id, ..) => {
+            output.push_str(&format!(
+                "closure-{}",
+                tcx.def_key(def_id).disambiguated_data.disambiguator
+            ));
+        }
+        ty::Generator(def_id, ..) => {
+            output.push_str(&format!(
+                "generator-{}",
+                tcx.def_key(def_id).disambiguated_data.disambiguator
+            ));
+        }
+        // Type parameters from polymorphized functions.
+        ty::Param(_) => {
+            output.push_str(&format!("{:?}", t));
+        }
+        ty::Error(_)
+        | ty::Infer(_)
+        | ty::Placeholder(..)
+        | ty::Projection(..)
+        | ty::Bound(..)
+        | ty::Opaque(..)
+        | ty::GeneratorWitness(..) => {
+            bug!(
+                "debuginfo: Trying to create type name for \
+                  unexpected type: {:?}",
+                t
+            );
+        }
+    }
+
+    fn push_item_name(tcx: TyCtxt<'tcx>, def_id: DefId, qualified: bool, output: &mut String) {
+        if qualified {
+            output.push_str(&tcx.crate_name(def_id.krate).as_str());
+            for path_element in tcx.def_path(def_id).data {
+                output.push_str("::");
+                output.push_str(&path_element.data.as_symbol().as_str());
+            }
+        } else {
+            output.push_str(&tcx.item_name(def_id).as_str());
+        }
+    }
+
+    // Pushes the type parameters in the given `InternalSubsts` to the output string.
+    // This ignores region parameters, since they can't reliably be
+    // reconstructed for items from non-local crates. For local crates, this
+    // would be possible but with inlining and LTO we have to use the least
+    // common denominator - otherwise we would run into conflicts.
+    fn push_type_params<'tcx>(
+        tcx: TyCtxt<'tcx>,
+        substs: SubstsRef<'tcx>,
+        output: &mut String,
+        visited: &mut FxHashSet<Ty<'tcx>>,
+    ) {
+        if substs.types().next().is_none() {
+            return;
+        }
+
+        output.push('<');
+
+        for type_parameter in substs.types() {
+            push_debuginfo_type_name(tcx, type_parameter, true, output, visited);
+            output.push_str(", ");
+        }
+
+        output.pop();
+        output.pop();
+
+        output.push('>');
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/glue.rs b/compiler/rustc_codegen_ssa/src/glue.rs
new file mode 100644
index 00000000000..5b086bc43ff
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/glue.rs
@@ -0,0 +1,109 @@
+//!
+//
+// Code relating to drop glue.
+
+use crate::common::IntPredicate;
+use crate::meth;
+use crate::traits::*;
+use rustc_middle::ty::{self, Ty};
+
+pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    t: Ty<'tcx>,
+    info: Option<Bx::Value>,
+) -> (Bx::Value, Bx::Value) {
+    let layout = bx.layout_of(t);
+    debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}", t, info, layout);
+    if !layout.is_unsized() {
+        let size = bx.const_usize(layout.size.bytes());
+        let align = bx.const_usize(layout.align.abi.bytes());
+        return (size, align);
+    }
+    match t.kind {
+        ty::Dynamic(..) => {
+            // load size/align from vtable
+            let vtable = info.unwrap();
+            (meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable))
+        }
+        ty::Slice(_) | ty::Str => {
+            let unit = layout.field(bx, 0);
+            // The info in this case is the length of the str, so the size is that
+            // times the unit size.
+            (
+                bx.mul(info.unwrap(), bx.const_usize(unit.size.bytes())),
+                bx.const_usize(unit.align.abi.bytes()),
+            )
+        }
+        _ => {
+            // First get the size of all statically known fields.
+            // Don't use size_of because it also rounds up to alignment, which we
+            // want to avoid, as the unsized field's alignment could be smaller.
+            assert!(!t.is_simd());
+            debug!("DST {} layout: {:?}", t, layout);
+
+            let i = layout.fields.count() - 1;
+            let sized_size = layout.fields.offset(i).bytes();
+            let sized_align = layout.align.abi.bytes();
+            debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align);
+            let sized_size = bx.const_usize(sized_size);
+            let sized_align = bx.const_usize(sized_align);
+
+            // Recurse to get the size of the dynamically sized field (must be
+            // the last field).
+            let field_ty = layout.field(bx, i).ty;
+            let (unsized_size, mut unsized_align) = size_and_align_of_dst(bx, field_ty, info);
+
+            // FIXME (#26403, #27023): We should be adding padding
+            // to `sized_size` (to accommodate the `unsized_align`
+            // required of the unsized field that follows) before
+            // summing it with `sized_size`. (Note that since #26403
+            // is unfixed, we do not yet add the necessary padding
+            // here. But this is where the add would go.)
+
+            // Return the sum of sizes and max of aligns.
+            let size = bx.add(sized_size, unsized_size);
+
+            // Packed types ignore the alignment of their fields.
+            if let ty::Adt(def, _) = t.kind {
+                if def.repr.packed() {
+                    unsized_align = sized_align;
+                }
+            }
+
+            // Choose max of two known alignments (combined value must
+            // be aligned according to more restrictive of the two).
+            let align = match (
+                bx.const_to_opt_u128(sized_align, false),
+                bx.const_to_opt_u128(unsized_align, false),
+            ) {
+                (Some(sized_align), Some(unsized_align)) => {
+                    // If both alignments are constant, (the sized_align should always be), then
+                    // pick the correct alignment statically.
+                    bx.const_usize(std::cmp::max(sized_align, unsized_align) as u64)
+                }
+                _ => {
+                    let cmp = bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align);
+                    bx.select(cmp, sized_align, unsized_align)
+                }
+            };
+
+            // Issue #27023: must add any necessary padding to `size`
+            // (to make it a multiple of `align`) before returning it.
+            //
+            // Namely, the returned size should be, in C notation:
+            //
+            //   `size + ((size & (align-1)) ? align : 0)`
+            //
+            // emulated via the semi-standard fast bit trick:
+            //
+            //   `(size + (align-1)) & -align`
+            let one = bx.const_usize(1);
+            let addend = bx.sub(align, one);
+            let add = bx.add(size, addend);
+            let neg = bx.neg(align);
+            let size = bx.and(add, neg);
+
+            (size, align)
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs
new file mode 100644
index 00000000000..73e33369175
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/lib.rs
@@ -0,0 +1,171 @@
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
+#![feature(bool_to_option)]
+#![feature(option_expect_none)]
+#![feature(box_patterns)]
+#![feature(try_blocks)]
+#![feature(in_band_lifetimes)]
+#![feature(nll)]
+#![feature(or_patterns)]
+#![feature(trusted_len)]
+#![feature(associated_type_bounds)]
+#![feature(const_fn)] // for rustc_index::newtype_index
+#![feature(const_panic)] // for rustc_index::newtype_index
+#![recursion_limit = "256"]
+
+//! This crate contains codegen code that is used by all codegen backends (LLVM and others).
+//! The backend-agnostic functions of this crate use functions defined in various traits that
+//! have to be implemented by each backends.
+
+#[macro_use]
+extern crate rustc_macros;
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::sync::Lrc;
+use rustc_hir::def_id::CrateNum;
+use rustc_hir::LangItem;
+use rustc_middle::dep_graph::WorkProduct;
+use rustc_middle::middle::cstore::{CrateSource, LibSource, NativeLib};
+use rustc_middle::middle::dependency_format::Dependencies;
+use rustc_middle::ty::query::Providers;
+use rustc_session::config::{OutputFilenames, OutputType, RUST_CGU_EXT};
+use rustc_span::symbol::Symbol;
+use std::path::{Path, PathBuf};
+
+pub mod back;
+pub mod base;
+pub mod common;
+pub mod coverageinfo;
+pub mod debuginfo;
+pub mod glue;
+pub mod meth;
+pub mod mir;
+pub mod mono_item;
+pub mod traits;
+
+pub struct ModuleCodegen<M> {
+    /// The name of the module. When the crate may be saved between
+    /// compilations, incremental compilation requires that name be
+    /// unique amongst **all** crates. Therefore, it should contain
+    /// something unique to this crate (e.g., a module path) as well
+    /// as the crate name and disambiguator.
+    /// We currently generate these names via CodegenUnit::build_cgu_name().
+    pub name: String,
+    pub module_llvm: M,
+    pub kind: ModuleKind,
+}
+
+// FIXME(eddyb) maybe include the crate name in this?
+pub const METADATA_FILENAME: &str = "lib.rmeta";
+
+impl<M> ModuleCodegen<M> {
+    pub fn into_compiled_module(
+        self,
+        emit_obj: bool,
+        emit_bc: bool,
+        outputs: &OutputFilenames,
+    ) -> CompiledModule {
+        let object = emit_obj.then(|| outputs.temp_path(OutputType::Object, Some(&self.name)));
+        let bytecode = emit_bc.then(|| outputs.temp_path(OutputType::Bitcode, Some(&self.name)));
+
+        CompiledModule { name: self.name.clone(), kind: self.kind, object, bytecode }
+    }
+}
+
+#[derive(Debug, Encodable, Decodable)]
+pub struct CompiledModule {
+    pub name: String,
+    pub kind: ModuleKind,
+    pub object: Option<PathBuf>,
+    pub bytecode: Option<PathBuf>,
+}
+
+pub struct CachedModuleCodegen {
+    pub name: String,
+    pub source: WorkProduct,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Encodable, Decodable)]
+pub enum ModuleKind {
+    Regular,
+    Metadata,
+    Allocator,
+}
+
+bitflags::bitflags! {
+    pub struct MemFlags: u8 {
+        const VOLATILE = 1 << 0;
+        const NONTEMPORAL = 1 << 1;
+        const UNALIGNED = 1 << 2;
+    }
+}
+
+/// Misc info we load from metadata to persist beyond the tcx.
+///
+/// Note: though `CrateNum` is only meaningful within the same tcx, information within `CrateInfo`
+/// is self-contained. `CrateNum` can be viewed as a unique identifier within a `CrateInfo`, where
+/// `used_crate_source` contains all `CrateSource` of the dependents, and maintains a mapping from
+/// identifiers (`CrateNum`) to `CrateSource`. The other fields map `CrateNum` to the crate's own
+/// additional properties, so that effectively we can retrieve each dependent crate's `CrateSource`
+/// and the corresponding properties without referencing information outside of a `CrateInfo`.
+#[derive(Debug, Encodable, Decodable)]
+pub struct CrateInfo {
+    pub panic_runtime: Option<CrateNum>,
+    pub compiler_builtins: Option<CrateNum>,
+    pub profiler_runtime: Option<CrateNum>,
+    pub is_no_builtins: FxHashSet<CrateNum>,
+    pub native_libraries: FxHashMap<CrateNum, Lrc<Vec<NativeLib>>>,
+    pub crate_name: FxHashMap<CrateNum, String>,
+    pub used_libraries: Lrc<Vec<NativeLib>>,
+    pub link_args: Lrc<Vec<String>>,
+    pub used_crate_source: FxHashMap<CrateNum, Lrc<CrateSource>>,
+    pub used_crates_static: Vec<(CrateNum, LibSource)>,
+    pub used_crates_dynamic: Vec<(CrateNum, LibSource)>,
+    pub lang_item_to_crate: FxHashMap<LangItem, CrateNum>,
+    pub missing_lang_items: FxHashMap<CrateNum, Vec<LangItem>>,
+    pub dependency_formats: Lrc<Dependencies>,
+}
+
+#[derive(Encodable, Decodable)]
+pub struct CodegenResults {
+    pub crate_name: Symbol,
+    pub modules: Vec<CompiledModule>,
+    pub allocator_module: Option<CompiledModule>,
+    pub metadata_module: Option<CompiledModule>,
+    pub crate_hash: Svh,
+    pub metadata: rustc_middle::middle::cstore::EncodedMetadata,
+    pub windows_subsystem: Option<String>,
+    pub linker_info: back::linker::LinkerInfo,
+    pub crate_info: CrateInfo,
+}
+
+pub fn provide(providers: &mut Providers) {
+    crate::back::symbol_export::provide(providers);
+    crate::base::provide_both(providers);
+}
+
+pub fn provide_extern(providers: &mut Providers) {
+    crate::back::symbol_export::provide_extern(providers);
+    crate::base::provide_both(providers);
+}
+
+/// Checks if the given filename ends with the `.rcgu.o` extension that `rustc`
+/// uses for the object files it generates.
+pub fn looks_like_rust_object_file(filename: &str) -> bool {
+    let path = Path::new(filename);
+    let ext = path.extension().and_then(|s| s.to_str());
+    if ext != Some(OutputType::Object.extension()) {
+        // The file name does not end with ".o", so it can't be an object file.
+        return false;
+    }
+
+    // Strip the ".o" at the end
+    let ext2 = path.file_stem().and_then(|s| Path::new(s).extension()).and_then(|s| s.to_str());
+
+    // Check if the "inner" extension
+    ext2 == Some(RUST_CGU_EXT)
+}
diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs
new file mode 100644
index 00000000000..bcc19c6a44b
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/meth.rs
@@ -0,0 +1,126 @@
+use crate::traits::*;
+
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_target::abi::call::FnAbi;
+
+#[derive(Copy, Clone, Debug)]
+pub struct VirtualIndex(u64);
+
+pub const DESTRUCTOR: VirtualIndex = VirtualIndex(0);
+pub const SIZE: VirtualIndex = VirtualIndex(1);
+pub const ALIGN: VirtualIndex = VirtualIndex(2);
+
+impl<'a, 'tcx> VirtualIndex {
+    pub fn from_index(index: usize) -> Self {
+        VirtualIndex(index as u64 + 3)
+    }
+
+    pub fn get_fn<Bx: BuilderMethods<'a, 'tcx>>(
+        self,
+        bx: &mut Bx,
+        llvtable: Bx::Value,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+    ) -> Bx::Value {
+        // Load the data pointer from the object.
+        debug!("get_fn({:?}, {:?})", llvtable, self);
+
+        let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.fn_ptr_backend_type(fn_abi)));
+        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+        let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
+        let ptr = bx.load(gep, ptr_align);
+        bx.nonnull_metadata(ptr);
+        // Vtable loads are invariant.
+        bx.set_invariant_load(ptr);
+        ptr
+    }
+
+    pub fn get_usize<Bx: BuilderMethods<'a, 'tcx>>(
+        self,
+        bx: &mut Bx,
+        llvtable: Bx::Value,
+    ) -> Bx::Value {
+        // Load the data pointer from the object.
+        debug!("get_int({:?}, {:?})", llvtable, self);
+
+        let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.type_isize()));
+        let usize_align = bx.tcx().data_layout.pointer_align.abi;
+        let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
+        let ptr = bx.load(gep, usize_align);
+        // Vtable loads are invariant.
+        bx.set_invariant_load(ptr);
+        ptr
+    }
+}
+
+/// Creates a dynamic vtable for the given type and vtable origin.
+/// This is used only for objects.
+///
+/// The vtables are cached instead of created on every call.
+///
+/// The `trait_ref` encodes the erased self type. Hence if we are
+/// making an object `Foo<dyn Trait>` from a value of type `Foo<T>`, then
+/// `trait_ref` would map `T: Trait`.
+pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
+    cx: &Cx,
+    ty: Ty<'tcx>,
+    trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> Cx::Value {
+    let tcx = cx.tcx();
+
+    debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref);
+
+    // Check the cache.
+    if let Some(&val) = cx.vtables().borrow().get(&(ty, trait_ref)) {
+        return val;
+    }
+
+    // Not in the cache; build it.
+    let nullptr = cx.const_null(cx.type_i8p_ext(cx.data_layout().instruction_address_space));
+
+    let methods_root;
+    let methods = if let Some(trait_ref) = trait_ref {
+        methods_root = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty));
+        methods_root.iter()
+    } else {
+        (&[]).iter()
+    };
+
+    let methods = methods.cloned().map(|opt_mth| {
+        opt_mth.map_or(nullptr, |(def_id, substs)| {
+            cx.get_fn_addr(
+                ty::Instance::resolve_for_vtable(
+                    cx.tcx(),
+                    ty::ParamEnv::reveal_all(),
+                    def_id,
+                    substs,
+                )
+                .unwrap()
+                .polymorphize(cx.tcx()),
+            )
+        })
+    });
+
+    let layout = cx.layout_of(ty);
+    // /////////////////////////////////////////////////////////////////////////////////////////////
+    // If you touch this code, be sure to also make the corresponding changes to
+    // `get_vtable` in `rust_mir/interpret/traits.rs`.
+    // /////////////////////////////////////////////////////////////////////////////////////////////
+    let components: Vec<_> = [
+        cx.get_fn_addr(Instance::resolve_drop_in_place(cx.tcx(), ty)),
+        cx.const_usize(layout.size.bytes()),
+        cx.const_usize(layout.align.abi.bytes()),
+    ]
+    .iter()
+    .cloned()
+    .chain(methods)
+    .collect();
+
+    let vtable_const = cx.const_struct(&components, false);
+    let align = cx.data_layout().pointer_align.abi;
+    let vtable = cx.static_addr_of(vtable_const, align, Some("vtable"));
+
+    cx.create_vtable_metadata(ty, vtable);
+
+    cx.vtables().borrow_mut().insert((ty, trait_ref), vtable);
+    vtable
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
new file mode 100644
index 00000000000..2e386c1e594
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -0,0 +1,448 @@
+//! An analysis to determine which locals require allocas and
+//! which do not.
+
+use super::FunctionCx;
+use crate::traits::*;
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::traversal;
+use rustc_middle::mir::visit::{
+    MutatingUseContext, NonMutatingUseContext, NonUseContext, PlaceContext, Visitor,
+};
+use rustc_middle::mir::{self, Location, TerminatorKind};
+use rustc_middle::ty;
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_target::abi::LayoutOf;
+
+pub fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    fx: &FunctionCx<'a, 'tcx, Bx>,
+) -> BitSet<mir::Local> {
+    let mir = fx.mir;
+    let mut analyzer = LocalAnalyzer::new(fx);
+
+    analyzer.visit_body(&mir);
+
+    for (local, decl) in mir.local_decls.iter_enumerated() {
+        let ty = fx.monomorphize(&decl.ty);
+        debug!("local {:?} has type `{}`", local, ty);
+        let layout = fx.cx.spanned_layout_of(ty, decl.source_info.span);
+        if fx.cx.is_backend_immediate(layout) {
+            // These sorts of types are immediates that we can store
+            // in an Value without an alloca.
+        } else if fx.cx.is_backend_scalar_pair(layout) {
+            // We allow pairs and uses of any of their 2 fields.
+        } else {
+            // These sorts of types require an alloca. Note that
+            // is_llvm_immediate() may *still* be true, particularly
+            // for newtypes, but we currently force some types
+            // (e.g., structs) into an alloca unconditionally, just so
+            // that we don't have to deal with having two pathways
+            // (gep vs extractvalue etc).
+            analyzer.not_ssa(local);
+        }
+    }
+
+    analyzer.non_ssa_locals
+}
+
+struct LocalAnalyzer<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
+    fx: &'mir FunctionCx<'a, 'tcx, Bx>,
+    dominators: Dominators<mir::BasicBlock>,
+    non_ssa_locals: BitSet<mir::Local>,
+    // The location of the first visited direct assignment to each
+    // local, or an invalid location (out of bounds `block` index).
+    first_assignment: IndexVec<mir::Local, Location>,
+}
+
+impl<Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
+    fn new(fx: &'mir FunctionCx<'a, 'tcx, Bx>) -> Self {
+        let invalid_location = mir::BasicBlock::new(fx.mir.basic_blocks().len()).start_location();
+        let dominators = fx.mir.dominators();
+        let mut analyzer = LocalAnalyzer {
+            fx,
+            dominators,
+            non_ssa_locals: BitSet::new_empty(fx.mir.local_decls.len()),
+            first_assignment: IndexVec::from_elem(invalid_location, &fx.mir.local_decls),
+        };
+
+        // Arguments get assigned to by means of the function being called
+        for arg in fx.mir.args_iter() {
+            analyzer.first_assignment[arg] = mir::START_BLOCK.start_location();
+        }
+
+        analyzer
+    }
+
+    fn first_assignment(&self, local: mir::Local) -> Option<Location> {
+        let location = self.first_assignment[local];
+        if location.block.index() < self.fx.mir.basic_blocks().len() {
+            Some(location)
+        } else {
+            None
+        }
+    }
+
+    fn not_ssa(&mut self, local: mir::Local) {
+        debug!("marking {:?} as non-SSA", local);
+        self.non_ssa_locals.insert(local);
+    }
+
+    fn assign(&mut self, local: mir::Local, location: Location) {
+        if self.first_assignment(local).is_some() {
+            self.not_ssa(local);
+        } else {
+            self.first_assignment[local] = location;
+        }
+    }
+
+    fn process_place(
+        &mut self,
+        place_ref: &mir::PlaceRef<'tcx>,
+        context: PlaceContext,
+        location: Location,
+    ) {
+        let cx = self.fx.cx;
+
+        if let &[ref proj_base @ .., elem] = place_ref.projection {
+            let mut base_context = if context.is_mutating_use() {
+                PlaceContext::MutatingUse(MutatingUseContext::Projection)
+            } else {
+                PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
+            };
+
+            // Allow uses of projections that are ZSTs or from scalar fields.
+            let is_consume = match context {
+                PlaceContext::NonMutatingUse(
+                    NonMutatingUseContext::Copy | NonMutatingUseContext::Move,
+                ) => true,
+                _ => false,
+            };
+            if is_consume {
+                let base_ty =
+                    mir::Place::ty_from(place_ref.local, proj_base, self.fx.mir, cx.tcx());
+                let base_ty = self.fx.monomorphize(&base_ty);
+
+                // ZSTs don't require any actual memory access.
+                let elem_ty = base_ty.projection_ty(cx.tcx(), self.fx.monomorphize(&elem)).ty;
+                let span = self.fx.mir.local_decls[place_ref.local].source_info.span;
+                if cx.spanned_layout_of(elem_ty, span).is_zst() {
+                    return;
+                }
+
+                if let mir::ProjectionElem::Field(..) = elem {
+                    let layout = cx.spanned_layout_of(base_ty.ty, span);
+                    if cx.is_backend_immediate(layout) || cx.is_backend_scalar_pair(layout) {
+                        // Recurse with the same context, instead of `Projection`,
+                        // potentially stopping at non-operand projections,
+                        // which would trigger `not_ssa` on locals.
+                        base_context = context;
+                    }
+                }
+            }
+
+            if let mir::ProjectionElem::Deref = elem {
+                // Deref projections typically only read the pointer.
+                // (the exception being `VarDebugInfo` contexts, handled below)
+                base_context = PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy);
+
+                // Indirect debuginfo requires going through memory, that only
+                // the debugger accesses, following our emitted DWARF pointer ops.
+                //
+                // FIXME(eddyb) Investigate the possibility of relaxing this, but
+                // note that `llvm.dbg.declare` *must* be used for indirect places,
+                // even if we start using `llvm.dbg.value` for all other cases,
+                // as we don't necessarily know when the value changes, but only
+                // where it lives in memory.
+                //
+                // It's possible `llvm.dbg.declare` could support starting from
+                // a pointer that doesn't point to an `alloca`, but this would
+                // only be useful if we know the pointer being `Deref`'d comes
+                // from an immutable place, and if `llvm.dbg.declare` calls
+                // must be at the very start of the function, then only function
+                // arguments could contain such pointers.
+                if context == PlaceContext::NonUse(NonUseContext::VarDebugInfo) {
+                    // We use `NonUseContext::VarDebugInfo` for the base,
+                    // which might not force the base local to memory,
+                    // so we have to do it manually.
+                    self.visit_local(&place_ref.local, context, location);
+                }
+            }
+
+            // `NonUseContext::VarDebugInfo` needs to flow all the
+            // way down to the base local (see `visit_local`).
+            if context == PlaceContext::NonUse(NonUseContext::VarDebugInfo) {
+                base_context = context;
+            }
+
+            self.process_place(
+                &mir::PlaceRef { local: place_ref.local, projection: proj_base },
+                base_context,
+                location,
+            );
+            // HACK(eddyb) this emulates the old `visit_projection_elem`, this
+            // entire `visit_place`-like `process_place` method should be rewritten,
+            // now that we have moved to the "slice of projections" representation.
+            if let mir::ProjectionElem::Index(local) = elem {
+                self.visit_local(
+                    &local,
+                    PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+                    location,
+                );
+            }
+        } else {
+            // FIXME this is super_place code, is repeated here to avoid cloning place or changing
+            // visit_place API
+            let mut context = context;
+
+            if !place_ref.projection.is_empty() {
+                context = if context.is_mutating_use() {
+                    PlaceContext::MutatingUse(MutatingUseContext::Projection)
+                } else {
+                    PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
+                };
+            }
+
+            self.visit_local(&place_ref.local, context, location);
+            self.visit_projection(place_ref.local, place_ref.projection, context, location);
+        }
+    }
+}
+
+impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
+    for LocalAnalyzer<'mir, 'a, 'tcx, Bx>
+{
+    fn visit_assign(
+        &mut self,
+        place: &mir::Place<'tcx>,
+        rvalue: &mir::Rvalue<'tcx>,
+        location: Location,
+    ) {
+        debug!("visit_assign(place={:?}, rvalue={:?})", place, rvalue);
+
+        if let Some(index) = place.as_local() {
+            self.assign(index, location);
+            let decl_span = self.fx.mir.local_decls[index].source_info.span;
+            if !self.fx.rvalue_creates_operand(rvalue, decl_span) {
+                self.not_ssa(index);
+            }
+        } else {
+            self.visit_place(place, PlaceContext::MutatingUse(MutatingUseContext::Store), location);
+        }
+
+        self.visit_rvalue(rvalue, location);
+    }
+
+    fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+        let check = match terminator.kind {
+            mir::TerminatorKind::Call { func: mir::Operand::Constant(ref c), ref args, .. } => {
+                match c.literal.ty.kind {
+                    ty::FnDef(did, _) => Some((did, args)),
+                    _ => None,
+                }
+            }
+            _ => None,
+        };
+        if let Some((def_id, args)) = check {
+            if Some(def_id) == self.fx.cx.tcx().lang_items().box_free_fn() {
+                // box_free(x) shares with `drop x` the property that it
+                // is not guaranteed to be statically dominated by the
+                // definition of x, so x must always be in an alloca.
+                if let mir::Operand::Move(ref place) = args[0] {
+                    self.visit_place(
+                        place,
+                        PlaceContext::MutatingUse(MutatingUseContext::Drop),
+                        location,
+                    );
+                }
+            }
+        }
+
+        self.super_terminator(terminator, location);
+    }
+
+    fn visit_place(&mut self, place: &mir::Place<'tcx>, context: PlaceContext, location: Location) {
+        debug!("visit_place(place={:?}, context={:?})", place, context);
+        self.process_place(&place.as_ref(), context, location);
+    }
+
+    fn visit_local(&mut self, &local: &mir::Local, context: PlaceContext, location: Location) {
+        match context {
+            PlaceContext::MutatingUse(MutatingUseContext::Call)
+            | PlaceContext::MutatingUse(MutatingUseContext::Yield) => {
+                self.assign(local, location);
+            }
+
+            PlaceContext::NonUse(_) | PlaceContext::MutatingUse(MutatingUseContext::Retag) => {}
+
+            PlaceContext::NonMutatingUse(
+                NonMutatingUseContext::Copy | NonMutatingUseContext::Move,
+            ) => {
+                // Reads from uninitialized variables (e.g., in dead code, after
+                // optimizations) require locals to be in (uninitialized) memory.
+                // N.B., there can be uninitialized reads of a local visited after
+                // an assignment to that local, if they happen on disjoint paths.
+                let ssa_read = match self.first_assignment(local) {
+                    Some(assignment_location) => {
+                        assignment_location.dominates(location, &self.dominators)
+                    }
+                    None => false,
+                };
+                if !ssa_read {
+                    self.not_ssa(local);
+                }
+            }
+
+            PlaceContext::MutatingUse(
+                MutatingUseContext::Store
+                | MutatingUseContext::AsmOutput
+                | MutatingUseContext::Borrow
+                | MutatingUseContext::AddressOf
+                | MutatingUseContext::Projection,
+            )
+            | PlaceContext::NonMutatingUse(
+                NonMutatingUseContext::Inspect
+                | NonMutatingUseContext::SharedBorrow
+                | NonMutatingUseContext::UniqueBorrow
+                | NonMutatingUseContext::ShallowBorrow
+                | NonMutatingUseContext::AddressOf
+                | NonMutatingUseContext::Projection,
+            ) => {
+                self.not_ssa(local);
+            }
+
+            PlaceContext::MutatingUse(MutatingUseContext::Drop) => {
+                let ty = self.fx.mir.local_decls[local].ty;
+                let ty = self.fx.monomorphize(&ty);
+
+                // Only need the place if we're actually dropping it.
+                if self.fx.cx.type_needs_drop(ty) {
+                    self.not_ssa(local);
+                }
+            }
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum CleanupKind {
+    NotCleanup,
+    Funclet,
+    Internal { funclet: mir::BasicBlock },
+}
+
+impl CleanupKind {
+    pub fn funclet_bb(self, for_bb: mir::BasicBlock) -> Option<mir::BasicBlock> {
+        match self {
+            CleanupKind::NotCleanup => None,
+            CleanupKind::Funclet => Some(for_bb),
+            CleanupKind::Internal { funclet } => Some(funclet),
+        }
+    }
+}
+
+pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKind> {
+    fn discover_masters<'tcx>(
+        result: &mut IndexVec<mir::BasicBlock, CleanupKind>,
+        mir: &mir::Body<'tcx>,
+    ) {
+        for (bb, data) in mir.basic_blocks().iter_enumerated() {
+            match data.terminator().kind {
+                TerminatorKind::Goto { .. }
+                | TerminatorKind::Resume
+                | TerminatorKind::Abort
+                | TerminatorKind::Return
+                | TerminatorKind::GeneratorDrop
+                | TerminatorKind::Unreachable
+                | TerminatorKind::SwitchInt { .. }
+                | TerminatorKind::Yield { .. }
+                | TerminatorKind::FalseEdge { .. }
+                | TerminatorKind::FalseUnwind { .. }
+                | TerminatorKind::InlineAsm { .. } => { /* nothing to do */ }
+                TerminatorKind::Call { cleanup: unwind, .. }
+                | TerminatorKind::Assert { cleanup: unwind, .. }
+                | TerminatorKind::DropAndReplace { unwind, .. }
+                | TerminatorKind::Drop { unwind, .. } => {
+                    if let Some(unwind) = unwind {
+                        debug!(
+                            "cleanup_kinds: {:?}/{:?} registering {:?} as funclet",
+                            bb, data, unwind
+                        );
+                        result[unwind] = CleanupKind::Funclet;
+                    }
+                }
+            }
+        }
+    }
+
+    fn propagate<'tcx>(result: &mut IndexVec<mir::BasicBlock, CleanupKind>, mir: &mir::Body<'tcx>) {
+        let mut funclet_succs = IndexVec::from_elem(None, mir.basic_blocks());
+
+        let mut set_successor = |funclet: mir::BasicBlock, succ| match funclet_succs[funclet] {
+            ref mut s @ None => {
+                debug!("set_successor: updating successor of {:?} to {:?}", funclet, succ);
+                *s = Some(succ);
+            }
+            Some(s) => {
+                if s != succ {
+                    span_bug!(
+                        mir.span,
+                        "funclet {:?} has 2 parents - {:?} and {:?}",
+                        funclet,
+                        s,
+                        succ
+                    );
+                }
+            }
+        };
+
+        for (bb, data) in traversal::reverse_postorder(mir) {
+            let funclet = match result[bb] {
+                CleanupKind::NotCleanup => continue,
+                CleanupKind::Funclet => bb,
+                CleanupKind::Internal { funclet } => funclet,
+            };
+
+            debug!(
+                "cleanup_kinds: {:?}/{:?}/{:?} propagating funclet {:?}",
+                bb, data, result[bb], funclet
+            );
+
+            for &succ in data.terminator().successors() {
+                let kind = result[succ];
+                debug!("cleanup_kinds: propagating {:?} to {:?}/{:?}", funclet, succ, kind);
+                match kind {
+                    CleanupKind::NotCleanup => {
+                        result[succ] = CleanupKind::Internal { funclet };
+                    }
+                    CleanupKind::Funclet => {
+                        if funclet != succ {
+                            set_successor(funclet, succ);
+                        }
+                    }
+                    CleanupKind::Internal { funclet: succ_funclet } => {
+                        if funclet != succ_funclet {
+                            // `succ` has 2 different funclet going into it, so it must
+                            // be a funclet by itself.
+
+                            debug!(
+                                "promoting {:?} to a funclet and updating {:?}",
+                                succ, succ_funclet
+                            );
+                            result[succ] = CleanupKind::Funclet;
+                            set_successor(succ_funclet, succ);
+                            set_successor(funclet, succ);
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, mir.basic_blocks());
+
+    discover_masters(&mut result, mir);
+    propagate(&mut result, mir);
+    debug!("cleanup_kinds: result={:?}", result);
+    result
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
new file mode 100644
index 00000000000..8048a569f79
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -0,0 +1,1416 @@
+use super::operand::OperandRef;
+use super::operand::OperandValue::{Immediate, Pair, Ref};
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::common::{self, IntPredicate};
+use crate::meth;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_ast as ast;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::vec::Idx;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{AllocId, ConstValue, Pointer, Scalar};
+use rustc_middle::mir::AssertKind;
+use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
+use rustc_middle::ty::{self, Instance, Ty, TypeFoldable};
+use rustc_span::source_map::Span;
+use rustc_span::{sym, Symbol};
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use rustc_target::abi::{self, LayoutOf};
+use rustc_target::spec::abi::Abi;
+
+use std::borrow::Cow;
+
+/// Used by `FunctionCx::codegen_terminator` for emitting common patterns
+/// e.g., creating a basic block, calling a function, etc.
+struct TerminatorCodegenHelper<'tcx> {
+    bb: mir::BasicBlock,
+    terminator: &'tcx mir::Terminator<'tcx>,
+    funclet_bb: Option<mir::BasicBlock>,
+}
+
+impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
+    /// Returns the associated funclet from `FunctionCx::funclets` for the
+    /// `funclet_bb` member if it is not `None`.
+    fn funclet<'b, Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        fx: &'b mut FunctionCx<'a, 'tcx, Bx>,
+    ) -> Option<&'b Bx::Funclet> {
+        match self.funclet_bb {
+            Some(funcl) => fx.funclets[funcl].as_ref(),
+            None => None,
+        }
+    }
+
+    fn lltarget<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        fx: &mut FunctionCx<'a, 'tcx, Bx>,
+        target: mir::BasicBlock,
+    ) -> (Bx::BasicBlock, bool) {
+        let span = self.terminator.source_info.span;
+        let lltarget = fx.blocks[target];
+        let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
+        match (self.funclet_bb, target_funclet) {
+            (None, None) => (lltarget, false),
+            (Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) => {
+                (lltarget, false)
+            }
+            // jump *into* cleanup - need a landing pad if GNU
+            (None, Some(_)) => (fx.landing_pad_to(target), false),
+            (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
+            (Some(_), Some(_)) => (fx.landing_pad_to(target), true),
+        }
+    }
+
+    /// Create a basic block.
+    fn llblock<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        fx: &mut FunctionCx<'a, 'tcx, Bx>,
+        target: mir::BasicBlock,
+    ) -> Bx::BasicBlock {
+        let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+        if is_cleanupret {
+            // MSVC cross-funclet jump - need a trampoline
+
+            debug!("llblock: creating cleanup trampoline for {:?}", target);
+            let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
+            let mut trampoline = fx.new_block(name);
+            trampoline.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
+            trampoline.llbb()
+        } else {
+            lltarget
+        }
+    }
+
+    fn funclet_br<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        fx: &mut FunctionCx<'a, 'tcx, Bx>,
+        bx: &mut Bx,
+        target: mir::BasicBlock,
+    ) {
+        let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+        if is_cleanupret {
+            // micro-optimization: generate a `ret` rather than a jump
+            // to a trampoline.
+            bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
+        } else {
+            bx.br(lltarget);
+        }
+    }
+
+    /// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional
+    /// return destination `destination` and the cleanup function `cleanup`.
+    fn do_call<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        fx: &mut FunctionCx<'a, 'tcx, Bx>,
+        bx: &mut Bx,
+        fn_abi: FnAbi<'tcx, Ty<'tcx>>,
+        fn_ptr: Bx::Value,
+        llargs: &[Bx::Value],
+        destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
+        cleanup: Option<mir::BasicBlock>,
+    ) {
+        // If there is a cleanup block and the function we're calling can unwind, then
+        // do an invoke, otherwise do a call.
+        if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) {
+            let ret_bx = if let Some((_, target)) = destination {
+                fx.blocks[target]
+            } else {
+                fx.unreachable_block()
+            };
+            let invokeret =
+                bx.invoke(fn_ptr, &llargs, ret_bx, self.llblock(fx, cleanup), self.funclet(fx));
+            bx.apply_attrs_callsite(&fn_abi, invokeret);
+
+            if let Some((ret_dest, target)) = destination {
+                let mut ret_bx = fx.build_block(target);
+                fx.set_debug_loc(&mut ret_bx, self.terminator.source_info);
+                fx.store_return(&mut ret_bx, ret_dest, &fn_abi.ret, invokeret);
+            }
+        } else {
+            let llret = bx.call(fn_ptr, &llargs, self.funclet(fx));
+            bx.apply_attrs_callsite(&fn_abi, llret);
+            if fx.mir[self.bb].is_cleanup {
+                // Cleanup is always the cold path. Don't inline
+                // drop glue. Also, when there is a deeply-nested
+                // struct, there are "symmetry" issues that cause
+                // exponential inlining - see issue #41696.
+                bx.do_not_inline(llret);
+            }
+
+            if let Some((ret_dest, target)) = destination {
+                fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
+                self.funclet_br(fx, bx, target);
+            } else {
+                bx.unreachable();
+            }
+        }
+    }
+
+    // Generate sideeffect intrinsic if jumping to any of the targets can form
+    // a loop.
+    fn maybe_sideeffect<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        mir: &'tcx mir::Body<'tcx>,
+        bx: &mut Bx,
+        targets: &[mir::BasicBlock],
+    ) {
+        if bx.tcx().sess.opts.debugging_opts.insert_sideeffect {
+            if targets.iter().any(|&target| {
+                target <= self.bb
+                    && target.start_location().is_predecessor_of(self.bb.start_location(), mir)
+            }) {
+                bx.sideeffect();
+            }
+        }
+    }
+}
+
+/// Codegen implementations for some terminator variants.
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    /// Generates code for a `Resume` terminator.
+    fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, mut bx: Bx) {
+        if let Some(funclet) = helper.funclet(self) {
+            bx.cleanup_ret(funclet, None);
+        } else {
+            let slot = self.get_personality_slot(&mut bx);
+            let lp0 = slot.project_field(&mut bx, 0);
+            let lp0 = bx.load_operand(lp0).immediate();
+            let lp1 = slot.project_field(&mut bx, 1);
+            let lp1 = bx.load_operand(lp1).immediate();
+            slot.storage_dead(&mut bx);
+
+            let mut lp = bx.const_undef(self.landing_pad_type());
+            lp = bx.insert_value(lp, lp0, 0);
+            lp = bx.insert_value(lp, lp1, 1);
+            bx.resume(lp);
+        }
+    }
+
+    fn codegen_switchint_terminator(
+        &mut self,
+        helper: TerminatorCodegenHelper<'tcx>,
+        mut bx: Bx,
+        discr: &mir::Operand<'tcx>,
+        switch_ty: Ty<'tcx>,
+        values: &Cow<'tcx, [u128]>,
+        targets: &Vec<mir::BasicBlock>,
+    ) {
+        let discr = self.codegen_operand(&mut bx, &discr);
+        // `switch_ty` is redundant, sanity-check that.
+        assert_eq!(discr.layout.ty, switch_ty);
+        if targets.len() == 2 {
+            // If there are two targets, emit br instead of switch
+            let lltrue = helper.llblock(self, targets[0]);
+            let llfalse = helper.llblock(self, targets[1]);
+            if switch_ty == bx.tcx().types.bool {
+                helper.maybe_sideeffect(self.mir, &mut bx, targets.as_slice());
+                // Don't generate trivial icmps when switching on bool
+                if let [0] = values[..] {
+                    bx.cond_br(discr.immediate(), llfalse, lltrue);
+                } else {
+                    assert_eq!(&values[..], &[1]);
+                    bx.cond_br(discr.immediate(), lltrue, llfalse);
+                }
+            } else {
+                let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty));
+                let llval = bx.const_uint_big(switch_llty, values[0]);
+                let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
+                helper.maybe_sideeffect(self.mir, &mut bx, targets.as_slice());
+                bx.cond_br(cmp, lltrue, llfalse);
+            }
+        } else {
+            helper.maybe_sideeffect(self.mir, &mut bx, targets.as_slice());
+            let (otherwise, targets) = targets.split_last().unwrap();
+            bx.switch(
+                discr.immediate(),
+                helper.llblock(self, *otherwise),
+                values
+                    .iter()
+                    .zip(targets)
+                    .map(|(&value, target)| (value, helper.llblock(self, *target))),
+            );
+        }
+    }
+
+    fn codegen_return_terminator(&mut self, mut bx: Bx) {
+        // Call `va_end` if this is the definition of a C-variadic function.
+        if self.fn_abi.c_variadic {
+            // The `VaList` "spoofed" argument is just after all the real arguments.
+            let va_list_arg_idx = self.fn_abi.args.len();
+            match self.locals[mir::Local::new(1 + va_list_arg_idx)] {
+                LocalRef::Place(va_list) => {
+                    bx.va_end(va_list.llval);
+                }
+                _ => bug!("C-variadic function must have a `VaList` place"),
+            }
+        }
+        if self.fn_abi.ret.layout.abi.is_uninhabited() {
+            // Functions with uninhabited return values are marked `noreturn`,
+            // so we should make sure that we never actually do.
+            // We play it safe by using a well-defined `abort`, but we could go for immediate UB
+            // if that turns out to be helpful.
+            bx.abort();
+            // `abort` does not terminate the block, so we still need to generate
+            // an `unreachable` terminator after it.
+            bx.unreachable();
+            return;
+        }
+        let llval = match self.fn_abi.ret.mode {
+            PassMode::Ignore | PassMode::Indirect(..) => {
+                bx.ret_void();
+                return;
+            }
+
+            PassMode::Direct(_) | PassMode::Pair(..) => {
+                let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref());
+                if let Ref(llval, _, align) = op.val {
+                    bx.load(llval, align)
+                } else {
+                    op.immediate_or_packed_pair(&mut bx)
+                }
+            }
+
+            PassMode::Cast(cast_ty) => {
+                let op = match self.locals[mir::RETURN_PLACE] {
+                    LocalRef::Operand(Some(op)) => op,
+                    LocalRef::Operand(None) => bug!("use of return before def"),
+                    LocalRef::Place(cg_place) => OperandRef {
+                        val: Ref(cg_place.llval, None, cg_place.align),
+                        layout: cg_place.layout,
+                    },
+                    LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
+                };
+                let llslot = match op.val {
+                    Immediate(_) | Pair(..) => {
+                        let scratch = PlaceRef::alloca(&mut bx, self.fn_abi.ret.layout);
+                        op.val.store(&mut bx, scratch);
+                        scratch.llval
+                    }
+                    Ref(llval, _, align) => {
+                        assert_eq!(align, op.layout.align.abi, "return place is unaligned!");
+                        llval
+                    }
+                };
+                let addr = bx.pointercast(llslot, bx.type_ptr_to(bx.cast_backend_type(&cast_ty)));
+                bx.load(addr, self.fn_abi.ret.layout.align.abi)
+            }
+        };
+        bx.ret(llval);
+    }
+
+    fn codegen_drop_terminator(
+        &mut self,
+        helper: TerminatorCodegenHelper<'tcx>,
+        mut bx: Bx,
+        location: mir::Place<'tcx>,
+        target: mir::BasicBlock,
+        unwind: Option<mir::BasicBlock>,
+    ) {
+        let ty = location.ty(self.mir, bx.tcx()).ty;
+        let ty = self.monomorphize(&ty);
+        let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
+
+        if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
+            // we don't actually need to drop anything.
+            helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+            helper.funclet_br(self, &mut bx, target);
+            return;
+        }
+
+        let place = self.codegen_place(&mut bx, location.as_ref());
+        let (args1, args2);
+        let mut args = if let Some(llextra) = place.llextra {
+            args2 = [place.llval, llextra];
+            &args2[..]
+        } else {
+            args1 = [place.llval];
+            &args1[..]
+        };
+        let (drop_fn, fn_abi) = match ty.kind {
+            // FIXME(eddyb) perhaps move some of this logic into
+            // `Instance::resolve_drop_in_place`?
+            ty::Dynamic(..) => {
+                let virtual_drop = Instance {
+                    def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
+                    substs: drop_fn.substs,
+                };
+                let fn_abi = FnAbi::of_instance(&bx, virtual_drop, &[]);
+                let vtable = args[1];
+                args = &args[..1];
+                (meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_abi), fn_abi)
+            }
+            _ => (bx.get_fn_addr(drop_fn), FnAbi::of_instance(&bx, drop_fn, &[])),
+        };
+        helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+        helper.do_call(
+            self,
+            &mut bx,
+            fn_abi,
+            drop_fn,
+            args,
+            Some((ReturnDest::Nothing, target)),
+            unwind,
+        );
+    }
+
+    fn codegen_assert_terminator(
+        &mut self,
+        helper: TerminatorCodegenHelper<'tcx>,
+        mut bx: Bx,
+        terminator: &mir::Terminator<'tcx>,
+        cond: &mir::Operand<'tcx>,
+        expected: bool,
+        msg: &mir::AssertMessage<'tcx>,
+        target: mir::BasicBlock,
+        cleanup: Option<mir::BasicBlock>,
+    ) {
+        let span = terminator.source_info.span;
+        let cond = self.codegen_operand(&mut bx, cond).immediate();
+        let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
+
+        // This case can currently arise only from functions marked
+        // with #[rustc_inherit_overflow_checks] and inlined from
+        // another crate (mostly core::num generic/#[inline] fns),
+        // while the current crate doesn't use overflow checks.
+        // NOTE: Unlike binops, negation doesn't have its own
+        // checked operation, just a comparison with the minimum
+        // value, so we have to check for the assert message.
+        if !bx.check_overflow() {
+            if let AssertKind::OverflowNeg(_) = *msg {
+                const_cond = Some(expected);
+            }
+        }
+
+        // Don't codegen the panic block if success if known.
+        if const_cond == Some(expected) {
+            helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+            helper.funclet_br(self, &mut bx, target);
+            return;
+        }
+
+        // Pass the condition through llvm.expect for branch hinting.
+        let cond = bx.expect(cond, expected);
+
+        // Create the failure block and the conditional branch to it.
+        let lltarget = helper.llblock(self, target);
+        let panic_block = self.new_block("panic");
+        helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+        if expected {
+            bx.cond_br(cond, lltarget, panic_block.llbb());
+        } else {
+            bx.cond_br(cond, panic_block.llbb(), lltarget);
+        }
+
+        // After this point, bx is the block for the call to panic.
+        bx = panic_block;
+        self.set_debug_loc(&mut bx, terminator.source_info);
+
+        // Get the location information.
+        let location = self.get_caller_location(&mut bx, span).immediate();
+
+        // Put together the arguments to the panic entry point.
+        let (lang_item, args) = match msg {
+            AssertKind::BoundsCheck { ref len, ref index } => {
+                let len = self.codegen_operand(&mut bx, len).immediate();
+                let index = self.codegen_operand(&mut bx, index).immediate();
+                // It's `fn panic_bounds_check(index: usize, len: usize)`,
+                // and `#[track_caller]` adds an implicit third argument.
+                (LangItem::PanicBoundsCheck, vec![index, len, location])
+            }
+            _ => {
+                let msg_str = Symbol::intern(msg.description());
+                let msg = bx.const_str(msg_str);
+                // It's `pub fn panic(expr: &str)`, with the wide reference being passed
+                // as two arguments, and `#[track_caller]` adds an implicit third argument.
+                (LangItem::Panic, vec![msg.0, msg.1, location])
+            }
+        };
+
+        // Obtain the panic entry point.
+        let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
+        let instance = ty::Instance::mono(bx.tcx(), def_id);
+        let fn_abi = FnAbi::of_instance(&bx, instance, &[]);
+        let llfn = bx.get_fn_addr(instance);
+
+        // Codegen the actual panic invoke/call.
+        helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup);
+    }
+
+    /// Returns `true` if this is indeed a panic intrinsic and codegen is done.
+    fn codegen_panic_intrinsic(
+        &mut self,
+        helper: &TerminatorCodegenHelper<'tcx>,
+        bx: &mut Bx,
+        intrinsic: Option<Symbol>,
+        instance: Option<Instance<'tcx>>,
+        span: Span,
+        destination: &Option<(mir::Place<'tcx>, mir::BasicBlock)>,
+        cleanup: Option<mir::BasicBlock>,
+    ) -> bool {
+        // Emit a panic or a no-op for `assert_*` intrinsics.
+        // These are intrinsics that compile to panics so that we can get a message
+        // which mentions the offending type, even from a const context.
+        #[derive(Debug, PartialEq)]
+        enum AssertIntrinsic {
+            Inhabited,
+            ZeroValid,
+            UninitValid,
+        };
+        let panic_intrinsic = intrinsic.and_then(|i| match i {
+            sym::assert_inhabited => Some(AssertIntrinsic::Inhabited),
+            sym::assert_zero_valid => Some(AssertIntrinsic::ZeroValid),
+            sym::assert_uninit_valid => Some(AssertIntrinsic::UninitValid),
+            _ => None,
+        });
+        if let Some(intrinsic) = panic_intrinsic {
+            use AssertIntrinsic::*;
+            let ty = instance.unwrap().substs.type_at(0);
+            let layout = bx.layout_of(ty);
+            let do_panic = match intrinsic {
+                Inhabited => layout.abi.is_uninhabited(),
+                // We unwrap as the error type is `!`.
+                ZeroValid => !layout.might_permit_raw_init(bx, /*zero:*/ true).unwrap(),
+                // We unwrap as the error type is `!`.
+                UninitValid => !layout.might_permit_raw_init(bx, /*zero:*/ false).unwrap(),
+            };
+            if do_panic {
+                let msg_str = if layout.abi.is_uninhabited() {
+                    // Use this error even for the other intrinsics as it is more precise.
+                    format!("attempted to instantiate uninhabited type `{}`", ty)
+                } else if intrinsic == ZeroValid {
+                    format!("attempted to zero-initialize type `{}`, which is invalid", ty)
+                } else {
+                    format!("attempted to leave type `{}` uninitialized, which is invalid", ty)
+                };
+                let msg = bx.const_str(Symbol::intern(&msg_str));
+                let location = self.get_caller_location(bx, span).immediate();
+
+                // Obtain the panic entry point.
+                // FIXME: dedup this with `codegen_assert_terminator` above.
+                let def_id = common::langcall(bx.tcx(), Some(span), "", LangItem::Panic);
+                let instance = ty::Instance::mono(bx.tcx(), def_id);
+                let fn_abi = FnAbi::of_instance(bx, instance, &[]);
+                let llfn = bx.get_fn_addr(instance);
+
+                if let Some((_, target)) = destination.as_ref() {
+                    helper.maybe_sideeffect(self.mir, bx, &[*target]);
+                }
+                // Codegen the actual panic invoke/call.
+                helper.do_call(
+                    self,
+                    bx,
+                    fn_abi,
+                    llfn,
+                    &[msg.0, msg.1, location],
+                    destination.as_ref().map(|(_, bb)| (ReturnDest::Nothing, *bb)),
+                    cleanup,
+                );
+            } else {
+                // a NOP
+                let target = destination.as_ref().unwrap().1;
+                helper.maybe_sideeffect(self.mir, bx, &[target]);
+                helper.funclet_br(self, bx, target)
+            }
+            true
+        } else {
+            false
+        }
+    }
+
+    fn codegen_call_terminator(
+        &mut self,
+        helper: TerminatorCodegenHelper<'tcx>,
+        mut bx: Bx,
+        terminator: &mir::Terminator<'tcx>,
+        func: &mir::Operand<'tcx>,
+        args: &Vec<mir::Operand<'tcx>>,
+        destination: &Option<(mir::Place<'tcx>, mir::BasicBlock)>,
+        cleanup: Option<mir::BasicBlock>,
+        fn_span: Span,
+    ) {
+        let span = terminator.source_info.span;
+        // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
+        let callee = self.codegen_operand(&mut bx, func);
+
+        let (instance, mut llfn) = match callee.layout.ty.kind {
+            ty::FnDef(def_id, substs) => (
+                Some(
+                    ty::Instance::resolve(bx.tcx(), ty::ParamEnv::reveal_all(), def_id, substs)
+                        .unwrap()
+                        .unwrap()
+                        .polymorphize(bx.tcx()),
+                ),
+                None,
+            ),
+            ty::FnPtr(_) => (None, Some(callee.immediate())),
+            _ => bug!("{} is not callable", callee.layout.ty),
+        };
+        let def = instance.map(|i| i.def);
+
+        if let Some(ty::InstanceDef::DropGlue(_, None)) = def {
+            // Empty drop glue; a no-op.
+            let &(_, target) = destination.as_ref().unwrap();
+            helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+            helper.funclet_br(self, &mut bx, target);
+            return;
+        }
+
+        // FIXME(eddyb) avoid computing this if possible, when `instance` is
+        // available - right now `sig` is only needed for getting the `abi`
+        // and figuring out how many extra args were passed to a C-variadic `fn`.
+        let sig = callee.layout.ty.fn_sig(bx.tcx());
+        let abi = sig.abi();
+
+        // Handle intrinsics old codegen wants Expr's for, ourselves.
+        let intrinsic = match def {
+            Some(ty::InstanceDef::Intrinsic(def_id)) => Some(bx.tcx().item_name(def_id)),
+            _ => None,
+        };
+
+        let extra_args = &args[sig.inputs().skip_binder().len()..];
+        let extra_args = extra_args
+            .iter()
+            .map(|op_arg| {
+                let op_ty = op_arg.ty(self.mir, bx.tcx());
+                self.monomorphize(&op_ty)
+            })
+            .collect::<Vec<_>>();
+
+        let fn_abi = match instance {
+            Some(instance) => FnAbi::of_instance(&bx, instance, &extra_args),
+            None => FnAbi::of_fn_ptr(&bx, sig, &extra_args),
+        };
+
+        if intrinsic == Some(sym::transmute) {
+            if let Some(destination_ref) = destination.as_ref() {
+                let &(dest, target) = destination_ref;
+                self.codegen_transmute(&mut bx, &args[0], dest);
+                helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+                helper.funclet_br(self, &mut bx, target);
+            } else {
+                // If we are trying to transmute to an uninhabited type,
+                // it is likely there is no allotted destination. In fact,
+                // transmuting to an uninhabited type is UB, which means
+                // we can do what we like. Here, we declare that transmuting
+                // into an uninhabited type is impossible, so anything following
+                // it must be unreachable.
+                assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited);
+                bx.unreachable();
+            }
+            return;
+        }
+
+        if self.codegen_panic_intrinsic(
+            &helper,
+            &mut bx,
+            intrinsic,
+            instance,
+            span,
+            destination,
+            cleanup,
+        ) {
+            return;
+        }
+
+        // The arguments we'll be passing. Plus one to account for outptr, if used.
+        let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize;
+        let mut llargs = Vec::with_capacity(arg_count);
+
+        // Prepare the return value destination
+        let ret_dest = if let Some((dest, _)) = *destination {
+            let is_intrinsic = intrinsic.is_some();
+            self.make_return_dest(&mut bx, dest, &fn_abi.ret, &mut llargs, is_intrinsic)
+        } else {
+            ReturnDest::Nothing
+        };
+
+        if intrinsic == Some(sym::caller_location) {
+            if let Some((_, target)) = destination.as_ref() {
+                let location = self.get_caller_location(&mut bx, fn_span);
+
+                if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
+                    location.val.store(&mut bx, tmp);
+                }
+                self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate());
+
+                helper.maybe_sideeffect(self.mir, &mut bx, &[*target]);
+                helper.funclet_br(self, &mut bx, *target);
+            }
+            return;
+        }
+
+        if intrinsic.is_some() && intrinsic != Some(sym::drop_in_place) {
+            let intrinsic = intrinsic.unwrap();
+            let dest = match ret_dest {
+                _ if fn_abi.ret.is_indirect() => llargs[0],
+                ReturnDest::Nothing => {
+                    bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret)))
+                }
+                ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
+                ReturnDest::DirectOperand(_) => {
+                    bug!("Cannot use direct operand with an intrinsic call")
+                }
+            };
+
+            let args: Vec<_> = args
+                .iter()
+                .enumerate()
+                .map(|(i, arg)| {
+                    // The indices passed to simd_shuffle* in the
+                    // third argument must be constant. This is
+                    // checked by const-qualification, which also
+                    // promotes any complex rvalues to constants.
+                    if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") {
+                        if let mir::Operand::Constant(constant) = arg {
+                            let c = self.eval_mir_constant(constant);
+                            let (llval, ty) = self.simd_shuffle_indices(
+                                &bx,
+                                constant.span,
+                                constant.literal.ty,
+                                c,
+                            );
+                            return OperandRef { val: Immediate(llval), layout: bx.layout_of(ty) };
+                        } else {
+                            span_bug!(span, "shuffle indices must be constant");
+                        }
+                    }
+
+                    self.codegen_operand(&mut bx, arg)
+                })
+                .collect();
+
+            bx.codegen_intrinsic_call(
+                *instance.as_ref().unwrap(),
+                &fn_abi,
+                &args,
+                dest,
+                terminator.source_info.span,
+            );
+
+            if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
+                self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval);
+            }
+
+            if let Some((_, target)) = *destination {
+                helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+                helper.funclet_br(self, &mut bx, target);
+            } else {
+                bx.unreachable();
+            }
+
+            return;
+        }
+
+        // Split the rust-call tupled arguments off.
+        let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
+            let (tup, args) = args.split_last().unwrap();
+            (args, Some(tup))
+        } else {
+            (&args[..], None)
+        };
+
+        'make_args: for (i, arg) in first_args.iter().enumerate() {
+            let mut op = self.codegen_operand(&mut bx, arg);
+
+            if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
+                if let Pair(..) = op.val {
+                    // In the case of Rc<Self>, we need to explicitly pass a
+                    // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
+                    // that is understood elsewhere in the compiler as a method on
+                    // `dyn Trait`.
+                    // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
+                    // we get a value of a built-in pointer type
+                    'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
+                        && !op.layout.ty.is_region_ptr()
+                    {
+                        for i in 0..op.layout.fields.count() {
+                            let field = op.extract_field(&mut bx, i);
+                            if !field.layout.is_zst() {
+                                // we found the one non-zero-sized field that is allowed
+                                // now find *its* non-zero-sized field, or stop if it's a
+                                // pointer
+                                op = field;
+                                continue 'descend_newtypes;
+                            }
+                        }
+
+                        span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+                    }
+
+                    // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
+                    // data pointer and vtable. Look up the method in the vtable, and pass
+                    // the data pointer as the first argument
+                    match op.val {
+                        Pair(data_ptr, meta) => {
+                            llfn = Some(
+                                meth::VirtualIndex::from_index(idx).get_fn(&mut bx, meta, &fn_abi),
+                            );
+                            llargs.push(data_ptr);
+                            continue 'make_args;
+                        }
+                        other => bug!("expected a Pair, got {:?}", other),
+                    }
+                } else if let Ref(data_ptr, Some(meta), _) = op.val {
+                    // by-value dynamic dispatch
+                    llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(&mut bx, meta, &fn_abi));
+                    llargs.push(data_ptr);
+                    continue;
+                } else {
+                    span_bug!(span, "can't codegen a virtual call on {:?}", op);
+                }
+            }
+
+            // The callee needs to own the argument memory if we pass it
+            // by-ref, so make a local copy of non-immediate constants.
+            match (arg, op.val) {
+                (&mir::Operand::Copy(_), Ref(_, None, _))
+                | (&mir::Operand::Constant(_), Ref(_, None, _)) => {
+                    let tmp = PlaceRef::alloca(&mut bx, op.layout);
+                    op.val.store(&mut bx, tmp);
+                    op.val = Ref(tmp.llval, None, tmp.align);
+                }
+                _ => {}
+            }
+
+            self.codegen_argument(&mut bx, op, &mut llargs, &fn_abi.args[i]);
+        }
+        if let Some(tup) = untuple {
+            self.codegen_arguments_untupled(
+                &mut bx,
+                tup,
+                &mut llargs,
+                &fn_abi.args[first_args.len()..],
+            )
+        }
+
+        let needs_location =
+            instance.map_or(false, |i| i.def.requires_caller_location(self.cx.tcx()));
+        if needs_location {
+            assert_eq!(
+                fn_abi.args.len(),
+                args.len() + 1,
+                "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR",
+            );
+            let location = self.get_caller_location(&mut bx, fn_span);
+            debug!(
+                "codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
+                terminator, location, fn_span
+            );
+
+            let last_arg = fn_abi.args.last().unwrap();
+            self.codegen_argument(&mut bx, location, &mut llargs, last_arg);
+        }
+
+        let fn_ptr = match (llfn, instance) {
+            (Some(llfn), _) => llfn,
+            (None, Some(instance)) => bx.get_fn_addr(instance),
+            _ => span_bug!(span, "no llfn for call"),
+        };
+
+        if let Some((_, target)) = destination.as_ref() {
+            helper.maybe_sideeffect(self.mir, &mut bx, &[*target]);
+        }
+        helper.do_call(
+            self,
+            &mut bx,
+            fn_abi,
+            fn_ptr,
+            &llargs,
+            destination.as_ref().map(|&(_, target)| (ret_dest, target)),
+            cleanup,
+        );
+    }
+
+    fn codegen_asm_terminator(
+        &mut self,
+        helper: TerminatorCodegenHelper<'tcx>,
+        mut bx: Bx,
+        terminator: &mir::Terminator<'tcx>,
+        template: &[ast::InlineAsmTemplatePiece],
+        operands: &[mir::InlineAsmOperand<'tcx>],
+        options: ast::InlineAsmOptions,
+        line_spans: &[Span],
+        destination: Option<mir::BasicBlock>,
+    ) {
+        let span = terminator.source_info.span;
+
+        let operands: Vec<_> = operands
+            .iter()
+            .map(|op| match *op {
+                mir::InlineAsmOperand::In { reg, ref value } => {
+                    let value = self.codegen_operand(&mut bx, value);
+                    InlineAsmOperandRef::In { reg, value }
+                }
+                mir::InlineAsmOperand::Out { reg, late, ref place } => {
+                    let place = place.map(|place| self.codegen_place(&mut bx, place.as_ref()));
+                    InlineAsmOperandRef::Out { reg, late, place }
+                }
+                mir::InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => {
+                    let in_value = self.codegen_operand(&mut bx, in_value);
+                    let out_place =
+                        out_place.map(|out_place| self.codegen_place(&mut bx, out_place.as_ref()));
+                    InlineAsmOperandRef::InOut { reg, late, in_value, out_place }
+                }
+                mir::InlineAsmOperand::Const { ref value } => {
+                    if let mir::Operand::Constant(constant) = value {
+                        let const_value = self
+                            .eval_mir_constant(constant)
+                            .unwrap_or_else(|_| span_bug!(span, "asm const cannot be resolved"));
+                        let ty = constant.literal.ty;
+                        let size = bx.layout_of(ty).size;
+                        let scalar = match const_value {
+                            // Promoted constants are evaluated into a ByRef instead of a Scalar,
+                            // but we want the scalar value here.
+                            ConstValue::ByRef { alloc, offset } => {
+                                let ptr = Pointer::new(AllocId(0), offset);
+                                alloc
+                                    .read_scalar(&bx, ptr, size)
+                                    .and_then(|s| s.check_init())
+                                    .unwrap_or_else(|e| {
+                                        bx.tcx().sess.span_err(
+                                            span,
+                                            &format!("Could not evaluate asm const: {}", e),
+                                        );
+
+                                        // We are erroring out, just emit a dummy constant.
+                                        Scalar::from_u64(0)
+                                    })
+                            }
+                            _ => span_bug!(span, "expected ByRef for promoted asm const"),
+                        };
+                        let value = scalar.assert_bits(size);
+                        let string = match ty.kind {
+                            ty::Uint(_) => value.to_string(),
+                            ty::Int(int_ty) => {
+                                match int_ty.normalize(bx.tcx().sess.target.ptr_width) {
+                                    ast::IntTy::I8 => (value as i8).to_string(),
+                                    ast::IntTy::I16 => (value as i16).to_string(),
+                                    ast::IntTy::I32 => (value as i32).to_string(),
+                                    ast::IntTy::I64 => (value as i64).to_string(),
+                                    ast::IntTy::I128 => (value as i128).to_string(),
+                                    ast::IntTy::Isize => unreachable!(),
+                                }
+                            }
+                            ty::Float(ast::FloatTy::F32) => {
+                                f32::from_bits(value as u32).to_string()
+                            }
+                            ty::Float(ast::FloatTy::F64) => {
+                                f64::from_bits(value as u64).to_string()
+                            }
+                            _ => span_bug!(span, "asm const has bad type {}", ty),
+                        };
+                        InlineAsmOperandRef::Const { string }
+                    } else {
+                        span_bug!(span, "asm const is not a constant");
+                    }
+                }
+                mir::InlineAsmOperand::SymFn { ref value } => {
+                    let literal = self.monomorphize(&value.literal);
+                    if let ty::FnDef(def_id, substs) = literal.ty.kind {
+                        let instance = ty::Instance::resolve_for_fn_ptr(
+                            bx.tcx(),
+                            ty::ParamEnv::reveal_all(),
+                            def_id,
+                            substs,
+                        )
+                        .unwrap();
+                        InlineAsmOperandRef::SymFn { instance }
+                    } else {
+                        span_bug!(span, "invalid type for asm sym (fn)");
+                    }
+                }
+                mir::InlineAsmOperand::SymStatic { def_id } => {
+                    InlineAsmOperandRef::SymStatic { def_id }
+                }
+            })
+            .collect();
+
+        bx.codegen_inline_asm(template, &operands, options, line_spans);
+
+        if let Some(target) = destination {
+            helper.funclet_br(self, &mut bx, target);
+        } else {
+            bx.unreachable();
+        }
+    }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn codegen_block(&mut self, bb: mir::BasicBlock) {
+        let mut bx = self.build_block(bb);
+        let mir = self.mir;
+        let data = &mir[bb];
+
+        debug!("codegen_block({:?}={:?})", bb, data);
+
+        for statement in &data.statements {
+            bx = self.codegen_statement(bx, statement);
+        }
+
+        self.codegen_terminator(bx, bb, data.terminator());
+    }
+
+    fn codegen_terminator(
+        &mut self,
+        mut bx: Bx,
+        bb: mir::BasicBlock,
+        terminator: &'tcx mir::Terminator<'tcx>,
+    ) {
+        debug!("codegen_terminator: {:?}", terminator);
+
+        // Create the cleanup bundle, if needed.
+        let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
+        let helper = TerminatorCodegenHelper { bb, terminator, funclet_bb };
+
+        self.set_debug_loc(&mut bx, terminator.source_info);
+        match terminator.kind {
+            mir::TerminatorKind::Resume => self.codegen_resume_terminator(helper, bx),
+
+            mir::TerminatorKind::Abort => {
+                bx.abort();
+                // `abort` does not terminate the block, so we still need to generate
+                // an `unreachable` terminator after it.
+                bx.unreachable();
+            }
+
+            mir::TerminatorKind::Goto { target } => {
+                helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+                helper.funclet_br(self, &mut bx, target);
+            }
+
+            mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
+                self.codegen_switchint_terminator(helper, bx, discr, switch_ty, values, targets);
+            }
+
+            mir::TerminatorKind::Return => {
+                self.codegen_return_terminator(bx);
+            }
+
+            mir::TerminatorKind::Unreachable => {
+                bx.unreachable();
+            }
+
+            mir::TerminatorKind::Drop { place, target, unwind } => {
+                self.codegen_drop_terminator(helper, bx, place, target, unwind);
+            }
+
+            mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
+                self.codegen_assert_terminator(
+                    helper, bx, terminator, cond, expected, msg, target, cleanup,
+                );
+            }
+
+            mir::TerminatorKind::DropAndReplace { .. } => {
+                bug!("undesugared DropAndReplace in codegen: {:?}", terminator);
+            }
+
+            mir::TerminatorKind::Call {
+                ref func,
+                ref args,
+                ref destination,
+                cleanup,
+                from_hir_call: _,
+                fn_span,
+            } => {
+                self.codegen_call_terminator(
+                    helper,
+                    bx,
+                    terminator,
+                    func,
+                    args,
+                    destination,
+                    cleanup,
+                    fn_span,
+                );
+            }
+            mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => {
+                bug!("generator ops in codegen")
+            }
+            mir::TerminatorKind::FalseEdge { .. } | mir::TerminatorKind::FalseUnwind { .. } => {
+                bug!("borrowck false edges in codegen")
+            }
+
+            mir::TerminatorKind::InlineAsm {
+                template,
+                ref operands,
+                options,
+                line_spans,
+                destination,
+            } => {
+                self.codegen_asm_terminator(
+                    helper,
+                    bx,
+                    terminator,
+                    template,
+                    operands,
+                    options,
+                    line_spans,
+                    destination,
+                );
+            }
+        }
+    }
+
+    fn codegen_argument(
+        &mut self,
+        bx: &mut Bx,
+        op: OperandRef<'tcx, Bx::Value>,
+        llargs: &mut Vec<Bx::Value>,
+        arg: &ArgAbi<'tcx, Ty<'tcx>>,
+    ) {
+        // Fill padding with undef value, where applicable.
+        if let Some(ty) = arg.pad {
+            llargs.push(bx.const_undef(bx.reg_backend_type(&ty)))
+        }
+
+        if arg.is_ignore() {
+            return;
+        }
+
+        if let PassMode::Pair(..) = arg.mode {
+            match op.val {
+                Pair(a, b) => {
+                    llargs.push(a);
+                    llargs.push(b);
+                    return;
+                }
+                _ => bug!("codegen_argument: {:?} invalid for pair argument", op),
+            }
+        } else if arg.is_unsized_indirect() {
+            match op.val {
+                Ref(a, Some(b), _) => {
+                    llargs.push(a);
+                    llargs.push(b);
+                    return;
+                }
+                _ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op),
+            }
+        }
+
+        // Force by-ref if we have to load through a cast pointer.
+        let (mut llval, align, by_ref) = match op.val {
+            Immediate(_) | Pair(..) => match arg.mode {
+                PassMode::Indirect(..) | PassMode::Cast(_) => {
+                    let scratch = PlaceRef::alloca(bx, arg.layout);
+                    op.val.store(bx, scratch);
+                    (scratch.llval, scratch.align, true)
+                }
+                _ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),
+            },
+            Ref(llval, _, align) => {
+                if arg.is_indirect() && align < arg.layout.align.abi {
+                    // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
+                    // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
+                    // have scary latent bugs around.
+
+                    let scratch = PlaceRef::alloca(bx, arg.layout);
+                    base::memcpy_ty(
+                        bx,
+                        scratch.llval,
+                        scratch.align,
+                        llval,
+                        align,
+                        op.layout,
+                        MemFlags::empty(),
+                    );
+                    (scratch.llval, scratch.align, true)
+                } else {
+                    (llval, align, true)
+                }
+            }
+        };
+
+        if by_ref && !arg.is_indirect() {
+            // Have to load the argument, maybe while casting it.
+            if let PassMode::Cast(ty) = arg.mode {
+                let addr = bx.pointercast(llval, bx.type_ptr_to(bx.cast_backend_type(&ty)));
+                llval = bx.load(addr, align.min(arg.layout.align.abi));
+            } else {
+                // We can't use `PlaceRef::load` here because the argument
+                // may have a type we don't treat as immediate, but the ABI
+                // used for this call is passing it by-value. In that case,
+                // the load would just produce `OperandValue::Ref` instead
+                // of the `OperandValue::Immediate` we need for the call.
+                llval = bx.load(llval, align);
+                if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
+                    if scalar.is_bool() {
+                        bx.range_metadata(llval, 0..2);
+                    }
+                }
+                // We store bools as `i8` so we need to truncate to `i1`.
+                llval = base::to_immediate(bx, llval, arg.layout);
+            }
+        }
+
+        llargs.push(llval);
+    }
+
+    fn codegen_arguments_untupled(
+        &mut self,
+        bx: &mut Bx,
+        operand: &mir::Operand<'tcx>,
+        llargs: &mut Vec<Bx::Value>,
+        args: &[ArgAbi<'tcx, Ty<'tcx>>],
+    ) {
+        let tuple = self.codegen_operand(bx, operand);
+
+        // Handle both by-ref and immediate tuples.
+        if let Ref(llval, None, align) = tuple.val {
+            let tuple_ptr = PlaceRef::new_sized_aligned(llval, tuple.layout, align);
+            for i in 0..tuple.layout.fields.count() {
+                let field_ptr = tuple_ptr.project_field(bx, i);
+                let field = bx.load_operand(field_ptr);
+                self.codegen_argument(bx, field, llargs, &args[i]);
+            }
+        } else if let Ref(_, Some(_), _) = tuple.val {
+            bug!("closure arguments must be sized")
+        } else {
+            // If the tuple is immediate, the elements are as well.
+            for i in 0..tuple.layout.fields.count() {
+                let op = tuple.extract_field(bx, i);
+                self.codegen_argument(bx, op, llargs, &args[i]);
+            }
+        }
+    }
+
+    fn get_caller_location(&mut self, bx: &mut Bx, span: Span) -> OperandRef<'tcx, Bx::Value> {
+        self.caller_location.unwrap_or_else(|| {
+            let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+            let caller = bx.tcx().sess.source_map().lookup_char_pos(topmost.lo());
+            let const_loc = bx.tcx().const_caller_location((
+                Symbol::intern(&caller.file.name.to_string()),
+                caller.line as u32,
+                caller.col_display as u32 + 1,
+            ));
+            OperandRef::from_const(bx, const_loc, bx.tcx().caller_location_ty())
+        })
+    }
+
+    fn get_personality_slot(&mut self, bx: &mut Bx) -> PlaceRef<'tcx, Bx::Value> {
+        let cx = bx.cx();
+        if let Some(slot) = self.personality_slot {
+            slot
+        } else {
+            let layout = cx.layout_of(
+                cx.tcx().intern_tup(&[cx.tcx().mk_mut_ptr(cx.tcx().types.u8), cx.tcx().types.i32]),
+            );
+            let slot = PlaceRef::alloca(bx, layout);
+            self.personality_slot = Some(slot);
+            slot
+        }
+    }
+
+    /// Returns the landing-pad wrapper around the given basic block.
+    ///
+    /// No-op in MSVC SEH scheme.
+    fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> Bx::BasicBlock {
+        if let Some(block) = self.landing_pads[target_bb] {
+            return block;
+        }
+
+        let block = self.blocks[target_bb];
+        let landing_pad = self.landing_pad_uncached(block);
+        self.landing_pads[target_bb] = Some(landing_pad);
+        landing_pad
+    }
+
+    fn landing_pad_uncached(&mut self, target_bb: Bx::BasicBlock) -> Bx::BasicBlock {
+        if base::wants_msvc_seh(self.cx.sess()) {
+            span_bug!(self.mir.span, "landing pad was not inserted?")
+        }
+
+        let mut bx = self.new_block("cleanup");
+
+        let llpersonality = self.cx.eh_personality();
+        let llretty = self.landing_pad_type();
+        let lp = bx.landing_pad(llretty, llpersonality, 1);
+        bx.set_cleanup(lp);
+
+        let slot = self.get_personality_slot(&mut bx);
+        slot.storage_live(&mut bx);
+        Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot);
+
+        bx.br(target_bb);
+        bx.llbb()
+    }
+
+    fn landing_pad_type(&self) -> Bx::Type {
+        let cx = self.cx;
+        cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false)
+    }
+
+    fn unreachable_block(&mut self) -> Bx::BasicBlock {
+        self.unreachable_block.unwrap_or_else(|| {
+            let mut bx = self.new_block("unreachable");
+            bx.unreachable();
+            self.unreachable_block = Some(bx.llbb());
+            bx.llbb()
+        })
+    }
+
+    pub fn new_block(&self, name: &str) -> Bx {
+        Bx::new_block(self.cx, self.llfn, name)
+    }
+
+    pub fn build_block(&self, bb: mir::BasicBlock) -> Bx {
+        let mut bx = Bx::with_cx(self.cx);
+        bx.position_at_end(self.blocks[bb]);
+        bx
+    }
+
+    fn make_return_dest(
+        &mut self,
+        bx: &mut Bx,
+        dest: mir::Place<'tcx>,
+        fn_ret: &ArgAbi<'tcx, Ty<'tcx>>,
+        llargs: &mut Vec<Bx::Value>,
+        is_intrinsic: bool,
+    ) -> ReturnDest<'tcx, Bx::Value> {
+        // If the return is ignored, we can just return a do-nothing `ReturnDest`.
+        if fn_ret.is_ignore() {
+            return ReturnDest::Nothing;
+        }
+        let dest = if let Some(index) = dest.as_local() {
+            match self.locals[index] {
+                LocalRef::Place(dest) => dest,
+                LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
+                LocalRef::Operand(None) => {
+                    // Handle temporary places, specifically `Operand` ones, as
+                    // they don't have `alloca`s.
+                    return if fn_ret.is_indirect() {
+                        // Odd, but possible, case, we have an operand temporary,
+                        // but the calling convention has an indirect return.
+                        let tmp = PlaceRef::alloca(bx, fn_ret.layout);
+                        tmp.storage_live(bx);
+                        llargs.push(tmp.llval);
+                        ReturnDest::IndirectOperand(tmp, index)
+                    } else if is_intrinsic {
+                        // Currently, intrinsics always need a location to store
+                        // the result, so we create a temporary `alloca` for the
+                        // result.
+                        let tmp = PlaceRef::alloca(bx, fn_ret.layout);
+                        tmp.storage_live(bx);
+                        ReturnDest::IndirectOperand(tmp, index)
+                    } else {
+                        ReturnDest::DirectOperand(index)
+                    };
+                }
+                LocalRef::Operand(Some(_)) => {
+                    bug!("place local already assigned to");
+                }
+            }
+        } else {
+            self.codegen_place(
+                bx,
+                mir::PlaceRef { local: dest.local, projection: &dest.projection },
+            )
+        };
+        if fn_ret.is_indirect() {
+            if dest.align < dest.layout.align.abi {
+                // Currently, MIR code generation does not create calls
+                // that store directly to fields of packed structs (in
+                // fact, the calls it creates write only to temps).
+                //
+                // If someone changes that, please update this code path
+                // to create a temporary.
+                span_bug!(self.mir.span, "can't directly store to unaligned value");
+            }
+            llargs.push(dest.llval);
+            ReturnDest::Nothing
+        } else {
+            ReturnDest::Store(dest)
+        }
+    }
+
+    fn codegen_transmute(&mut self, bx: &mut Bx, src: &mir::Operand<'tcx>, dst: mir::Place<'tcx>) {
+        if let Some(index) = dst.as_local() {
+            match self.locals[index] {
+                LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
+                LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
+                LocalRef::Operand(None) => {
+                    let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst.as_ref()));
+                    assert!(!dst_layout.ty.has_erasable_regions());
+                    let place = PlaceRef::alloca(bx, dst_layout);
+                    place.storage_live(bx);
+                    self.codegen_transmute_into(bx, src, place);
+                    let op = bx.load_operand(place);
+                    place.storage_dead(bx);
+                    self.locals[index] = LocalRef::Operand(Some(op));
+                    self.debug_introduce_local(bx, index);
+                }
+                LocalRef::Operand(Some(op)) => {
+                    assert!(op.layout.is_zst(), "assigning to initialized SSAtemp");
+                }
+            }
+        } else {
+            let dst = self.codegen_place(bx, dst.as_ref());
+            self.codegen_transmute_into(bx, src, dst);
+        }
+    }
+
+    fn codegen_transmute_into(
+        &mut self,
+        bx: &mut Bx,
+        src: &mir::Operand<'tcx>,
+        dst: PlaceRef<'tcx, Bx::Value>,
+    ) {
+        let src = self.codegen_operand(bx, src);
+        let llty = bx.backend_type(src.layout);
+        let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
+        let align = src.layout.align.abi.min(dst.align);
+        src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, align));
+    }
+
+    // Stores the return value of a function call into it's final location.
+    fn store_return(
+        &mut self,
+        bx: &mut Bx,
+        dest: ReturnDest<'tcx, Bx::Value>,
+        ret_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        llval: Bx::Value,
+    ) {
+        use self::ReturnDest::*;
+
+        match dest {
+            Nothing => (),
+            Store(dst) => bx.store_arg(&ret_abi, llval, dst),
+            IndirectOperand(tmp, index) => {
+                let op = bx.load_operand(tmp);
+                tmp.storage_dead(bx);
+                self.locals[index] = LocalRef::Operand(Some(op));
+                self.debug_introduce_local(bx, index);
+            }
+            DirectOperand(index) => {
+                // If there is a cast, we have to store and reload.
+                let op = if let PassMode::Cast(_) = ret_abi.mode {
+                    let tmp = PlaceRef::alloca(bx, ret_abi.layout);
+                    tmp.storage_live(bx);
+                    bx.store_arg(&ret_abi, llval, tmp);
+                    let op = bx.load_operand(tmp);
+                    tmp.storage_dead(bx);
+                    op
+                } else {
+                    OperandRef::from_immediate_or_packed_pair(bx, llval, ret_abi.layout)
+                };
+                self.locals[index] = LocalRef::Operand(Some(op));
+                self.debug_introduce_local(bx, index);
+            }
+        }
+    }
+}
+
+enum ReturnDest<'tcx, V> {
+    // Do nothing; the return value is indirect or ignored.
+    Nothing,
+    // Store the return value to the pointer.
+    Store(PlaceRef<'tcx, V>),
+    // Store an indirect return value to an operand local place.
+    IndirectOperand(PlaceRef<'tcx, V>, mir::Local),
+    // Store a direct return value to an operand local place.
+    DirectOperand(mir::Local),
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs
new file mode 100644
index 00000000000..4943e279c7e
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs
@@ -0,0 +1,91 @@
+use crate::mir::operand::OperandRef;
+use crate::traits::*;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{ConstValue, ErrorHandled};
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::source_map::Span;
+use rustc_target::abi::Abi;
+
+use super::FunctionCx;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn eval_mir_constant_to_operand(
+        &mut self,
+        bx: &mut Bx,
+        constant: &mir::Constant<'tcx>,
+    ) -> Result<OperandRef<'tcx, Bx::Value>, ErrorHandled> {
+        let val = self.eval_mir_constant(constant)?;
+        let ty = self.monomorphize(&constant.literal.ty);
+        Ok(OperandRef::from_const(bx, val, ty))
+    }
+
+    pub fn eval_mir_constant(
+        &mut self,
+        constant: &mir::Constant<'tcx>,
+    ) -> Result<ConstValue<'tcx>, ErrorHandled> {
+        match self.monomorphize(&constant.literal).val {
+            ty::ConstKind::Unevaluated(def, substs, promoted) => self
+                .cx
+                .tcx()
+                .const_eval_resolve(ty::ParamEnv::reveal_all(), def, substs, promoted, None)
+                .map_err(|err| {
+                    if promoted.is_none() {
+                        self.cx
+                            .tcx()
+                            .sess
+                            .span_err(constant.span, "erroneous constant encountered");
+                    }
+                    err
+                }),
+            ty::ConstKind::Value(value) => Ok(value),
+            err => span_bug!(
+                constant.span,
+                "encountered bad ConstKind after monomorphizing: {:?}",
+                err
+            ),
+        }
+    }
+
+    /// process constant containing SIMD shuffle indices
+    pub fn simd_shuffle_indices(
+        &mut self,
+        bx: &Bx,
+        span: Span,
+        ty: Ty<'tcx>,
+        constant: Result<ConstValue<'tcx>, ErrorHandled>,
+    ) -> (Bx::Value, Ty<'tcx>) {
+        constant
+            .map(|val| {
+                let field_ty = ty.builtin_index().unwrap();
+                let c = ty::Const::from_value(bx.tcx(), val, ty);
+                let values: Vec<_> = bx
+                    .tcx()
+                    .destructure_const(ty::ParamEnv::reveal_all().and(&c))
+                    .fields
+                    .iter()
+                    .map(|field| {
+                        if let Some(prim) = field.val.try_to_scalar() {
+                            let layout = bx.layout_of(field_ty);
+                            let scalar = match layout.abi {
+                                Abi::Scalar(ref x) => x,
+                                _ => bug!("from_const: invalid ByVal layout: {:#?}", layout),
+                            };
+                            bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
+                        } else {
+                            bug!("simd shuffle field {:?}", field)
+                        }
+                    })
+                    .collect();
+                let llval = bx.const_struct(&values, false);
+                (llval, c.ty)
+            })
+            .unwrap_or_else(|_| {
+                bx.tcx().sess.span_err(span, "could not evaluate shuffle_indices at compile time");
+                // We've errored, so we don't have to produce working code.
+                let ty = self.monomorphize(&ty);
+                let llty = bx.backend_type(bx.layout_of(ty));
+                (bx.const_undef(llty), ty)
+            })
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
new file mode 100644
index 00000000000..a2ad27b925c
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
@@ -0,0 +1,35 @@
+use crate::traits::*;
+
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::Coverage;
+
+use super::FunctionCx;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn codegen_coverage(&self, bx: &mut Bx, coverage: Coverage) {
+        let Coverage { kind, code_region } = coverage;
+        match kind {
+            CoverageKind::Counter { function_source_hash, id } => {
+                bx.add_counter_region(self.instance, function_source_hash, id, code_region);
+
+                let coverageinfo = bx.tcx().coverageinfo(self.instance.def_id());
+
+                let fn_name = bx.create_pgo_func_name_var(self.instance);
+                let hash = bx.const_u64(function_source_hash);
+                let num_counters = bx.const_u32(coverageinfo.num_counters);
+                let id = bx.const_u32(u32::from(id));
+                debug!(
+                    "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+                    fn_name, hash, num_counters, id,
+                );
+                bx.instrprof_increment(fn_name, hash, num_counters, id);
+            }
+            CoverageKind::Expression { id, lhs, op, rhs } => {
+                bx.add_counter_expression_region(self.instance, id, lhs, op, rhs, code_region);
+            }
+            CoverageKind::Unreachable => {
+                bx.add_unreachable_region(self.instance, code_region);
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
new file mode 100644
index 00000000000..d8a530d98fa
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -0,0 +1,361 @@
+use crate::traits::*;
+use rustc_hir::def_id::CrateNum;
+use rustc_index::vec::IndexVec;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir;
+use rustc_middle::ty;
+use rustc_session::config::DebugInfo;
+use rustc_span::symbol::{kw, Symbol};
+use rustc_span::{BytePos, Span};
+use rustc_target::abi::{LayoutOf, Size};
+
+use super::operand::OperandValue;
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+pub struct FunctionDebugContext<D> {
+    pub scopes: IndexVec<mir::SourceScope, DebugScope<D>>,
+    pub defining_crate: CrateNum,
+}
+
+#[derive(Copy, Clone)]
+pub enum VariableKind {
+    ArgumentVariable(usize /*index*/),
+    LocalVariable,
+}
+
+/// Like `mir::VarDebugInfo`, but within a `mir::Local`.
+#[derive(Copy, Clone)]
+pub struct PerLocalVarDebugInfo<'tcx, D> {
+    pub name: Symbol,
+    pub source_info: mir::SourceInfo,
+
+    /// `DIVariable` returned by `create_dbg_var`.
+    pub dbg_var: Option<D>,
+
+    /// `.place.projection` from `mir::VarDebugInfo`.
+    pub projection: &'tcx ty::List<mir::PlaceElem<'tcx>>,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct DebugScope<D> {
+    pub scope_metadata: Option<D>,
+    // Start and end offsets of the file to which this DIScope belongs.
+    // These are used to quickly determine whether some span refers to the same file.
+    pub file_start_pos: BytePos,
+    pub file_end_pos: BytePos,
+}
+
+impl<D> DebugScope<D> {
+    pub fn is_valid(&self) -> bool {
+        self.scope_metadata.is_some()
+    }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn set_debug_loc(&self, bx: &mut Bx, source_info: mir::SourceInfo) {
+        let (scope, span) = self.debug_loc(source_info);
+        if let Some(scope) = scope {
+            bx.set_source_location(scope, span);
+        }
+    }
+
+    pub fn debug_loc(&self, source_info: mir::SourceInfo) -> (Option<Bx::DIScope>, Span) {
+        // Bail out if debug info emission is not enabled.
+        match self.debug_context {
+            None => return (None, source_info.span),
+            Some(_) => {}
+        }
+
+        // In order to have a good line stepping behavior in debugger, we overwrite debug
+        // locations of macro expansions with that of the outermost expansion site
+        // (unless the crate is being compiled with `-Z debug-macros`).
+        if !source_info.span.from_expansion() || self.cx.sess().opts.debugging_opts.debug_macros {
+            let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo());
+            (scope, source_info.span)
+        } else {
+            // Walk up the macro expansion chain until we reach a non-expanded span.
+            // We also stop at the function body level because no line stepping can occur
+            // at the level above that.
+            let span = rustc_span::hygiene::walk_chain(source_info.span, self.mir.span.ctxt());
+            let scope = self.scope_metadata_for_loc(source_info.scope, span.lo());
+            // Use span of the outermost expansion site, while keeping the original lexical scope.
+            (scope, span)
+        }
+    }
+
+    // DILocations inherit source file name from the parent DIScope.  Due to macro expansions
+    // it may so happen that the current span belongs to a different file than the DIScope
+    // corresponding to span's containing source scope.  If so, we need to create a DIScope
+    // "extension" into that file.
+    fn scope_metadata_for_loc(
+        &self,
+        scope_id: mir::SourceScope,
+        pos: BytePos,
+    ) -> Option<Bx::DIScope> {
+        let debug_context = self.debug_context.as_ref()?;
+        let scope_metadata = debug_context.scopes[scope_id].scope_metadata;
+        if pos < debug_context.scopes[scope_id].file_start_pos
+            || pos >= debug_context.scopes[scope_id].file_end_pos
+        {
+            let sm = self.cx.sess().source_map();
+            let defining_crate = debug_context.defining_crate;
+            Some(self.cx.extend_scope_to_file(
+                scope_metadata.unwrap(),
+                &sm.lookup_char_pos(pos).file,
+                defining_crate,
+            ))
+        } else {
+            scope_metadata
+        }
+    }
+
+    /// Apply debuginfo and/or name, after creating the `alloca` for a local,
+    /// or initializing the local with an operand (whichever applies).
+    pub fn debug_introduce_local(&self, bx: &mut Bx, local: mir::Local) {
+        let full_debug_info = bx.sess().opts.debuginfo == DebugInfo::Full;
+
+        // FIXME(eddyb) maybe name the return place as `_0` or `return`?
+        if local == mir::RETURN_PLACE && !self.mir.local_decls[mir::RETURN_PLACE].is_user_variable()
+        {
+            return;
+        }
+
+        let vars = match &self.per_local_var_debug_info {
+            Some(per_local) => &per_local[local],
+            None => return,
+        };
+        let whole_local_var = vars.iter().find(|var| var.projection.is_empty()).copied();
+        let has_proj = || vars.iter().any(|var| !var.projection.is_empty());
+
+        let fallback_var = if self.mir.local_kind(local) == mir::LocalKind::Arg {
+            let arg_index = local.index() - 1;
+
+            // Add debuginfo even to unnamed arguments.
+            // FIXME(eddyb) is this really needed?
+            if arg_index == 0 && has_proj() {
+                // Hide closure environments from debuginfo.
+                // FIXME(eddyb) shouldn't `ArgumentVariable` indices
+                // be offset to account for the hidden environment?
+                None
+            } else if whole_local_var.is_some() {
+                // No need to make up anything, there is a `mir::VarDebugInfo`
+                // covering the whole local.
+                // FIXME(eddyb) take `whole_local_var.source_info.scope` into
+                // account, just in case it doesn't use `ArgumentVariable`
+                // (after #67586 gets fixed).
+                None
+            } else {
+                let name = kw::Invalid;
+                let decl = &self.mir.local_decls[local];
+                let (scope, span) = if full_debug_info {
+                    self.debug_loc(decl.source_info)
+                } else {
+                    (None, decl.source_info.span)
+                };
+                let dbg_var = scope.map(|scope| {
+                    // FIXME(eddyb) is this `+ 1` needed at all?
+                    let kind = VariableKind::ArgumentVariable(arg_index + 1);
+
+                    self.cx.create_dbg_var(
+                        self.debug_context.as_ref().unwrap(),
+                        name,
+                        self.monomorphize(&decl.ty),
+                        scope,
+                        kind,
+                        span,
+                    )
+                });
+
+                Some(PerLocalVarDebugInfo {
+                    name,
+                    source_info: decl.source_info,
+                    dbg_var,
+                    projection: ty::List::empty(),
+                })
+            }
+        } else {
+            None
+        };
+
+        let local_ref = &self.locals[local];
+
+        let name = if bx.sess().fewer_names() {
+            None
+        } else {
+            Some(match whole_local_var.or(fallback_var) {
+                Some(var) if var.name != kw::Invalid => var.name.to_string(),
+                _ => format!("{:?}", local),
+            })
+        };
+
+        if let Some(name) = &name {
+            match local_ref {
+                LocalRef::Place(place) | LocalRef::UnsizedPlace(place) => {
+                    bx.set_var_name(place.llval, name);
+                }
+                LocalRef::Operand(Some(operand)) => match operand.val {
+                    OperandValue::Ref(x, ..) | OperandValue::Immediate(x) => {
+                        bx.set_var_name(x, name);
+                    }
+                    OperandValue::Pair(a, b) => {
+                        // FIXME(eddyb) these are scalar components,
+                        // maybe extract the high-level fields?
+                        bx.set_var_name(a, &(name.clone() + ".0"));
+                        bx.set_var_name(b, &(name.clone() + ".1"));
+                    }
+                },
+                LocalRef::Operand(None) => {}
+            }
+        }
+
+        if !full_debug_info || vars.is_empty() && fallback_var.is_none() {
+            return;
+        }
+
+        let base = match local_ref {
+            LocalRef::Operand(None) => return,
+
+            LocalRef::Operand(Some(operand)) => {
+                // Don't spill operands onto the stack in naked functions.
+                // See: https://github.com/rust-lang/rust/issues/42779
+                let attrs = bx.tcx().codegen_fn_attrs(self.instance.def_id());
+                if attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+                    return;
+                }
+
+                // "Spill" the value onto the stack, for debuginfo,
+                // without forcing non-debuginfo uses of the local
+                // to also load from the stack every single time.
+                // FIXME(#68817) use `llvm.dbg.value` instead,
+                // at least for the cases which LLVM handles correctly.
+                let spill_slot = PlaceRef::alloca(bx, operand.layout);
+                if let Some(name) = name {
+                    bx.set_var_name(spill_slot.llval, &(name + ".dbg.spill"));
+                }
+                operand.val.store(bx, spill_slot);
+                spill_slot
+            }
+
+            LocalRef::Place(place) => *place,
+
+            // FIXME(eddyb) add debuginfo for unsized places too.
+            LocalRef::UnsizedPlace(_) => return,
+        };
+
+        let vars = vars.iter().copied().chain(fallback_var);
+
+        for var in vars {
+            let mut layout = base.layout;
+            let mut direct_offset = Size::ZERO;
+            // FIXME(eddyb) use smallvec here.
+            let mut indirect_offsets = vec![];
+
+            for elem in &var.projection[..] {
+                match *elem {
+                    mir::ProjectionElem::Deref => {
+                        indirect_offsets.push(Size::ZERO);
+                        layout = bx.cx().layout_of(
+                            layout
+                                .ty
+                                .builtin_deref(true)
+                                .unwrap_or_else(|| {
+                                    span_bug!(var.source_info.span, "cannot deref `{}`", layout.ty)
+                                })
+                                .ty,
+                        );
+                    }
+                    mir::ProjectionElem::Field(field, _) => {
+                        let i = field.index();
+                        let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset);
+                        *offset += layout.fields.offset(i);
+                        layout = layout.field(bx.cx(), i);
+                    }
+                    mir::ProjectionElem::Downcast(_, variant) => {
+                        layout = layout.for_variant(bx.cx(), variant);
+                    }
+                    _ => span_bug!(
+                        var.source_info.span,
+                        "unsupported var debuginfo place `{:?}`",
+                        mir::Place { local, projection: var.projection },
+                    ),
+                }
+            }
+
+            let (scope, span) = self.debug_loc(var.source_info);
+            if let Some(scope) = scope {
+                if let Some(dbg_var) = var.dbg_var {
+                    bx.dbg_var_addr(
+                        dbg_var,
+                        scope,
+                        base.llval,
+                        direct_offset,
+                        &indirect_offsets,
+                        span,
+                    );
+                }
+            }
+        }
+    }
+
+    pub fn debug_introduce_locals(&self, bx: &mut Bx) {
+        if bx.sess().opts.debuginfo == DebugInfo::Full || !bx.sess().fewer_names() {
+            for local in self.locals.indices() {
+                self.debug_introduce_local(bx, local);
+            }
+        }
+    }
+
+    /// Partition all `VarDebugInfo` in `self.mir`, by their base `Local`.
+    pub fn compute_per_local_var_debug_info(
+        &self,
+    ) -> Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>> {
+        let full_debug_info = self.cx.sess().opts.debuginfo == DebugInfo::Full;
+
+        if !full_debug_info && self.cx.sess().fewer_names() {
+            return None;
+        }
+
+        let mut per_local = IndexVec::from_elem(vec![], &self.mir.local_decls);
+        for var in &self.mir.var_debug_info {
+            let (scope, span) = if full_debug_info {
+                self.debug_loc(var.source_info)
+            } else {
+                (None, var.source_info.span)
+            };
+            let dbg_var = scope.map(|scope| {
+                let place = var.place;
+                let var_ty = self.monomorphized_place_ty(place.as_ref());
+                let var_kind = if self.mir.local_kind(place.local) == mir::LocalKind::Arg
+                    && place.projection.is_empty()
+                    && var.source_info.scope == mir::OUTERMOST_SOURCE_SCOPE
+                {
+                    let arg_index = place.local.index() - 1;
+
+                    // FIXME(eddyb) shouldn't `ArgumentVariable` indices be
+                    // offset in closures to account for the hidden environment?
+                    // Also, is this `+ 1` needed at all?
+                    VariableKind::ArgumentVariable(arg_index + 1)
+                } else {
+                    VariableKind::LocalVariable
+                };
+                self.cx.create_dbg_var(
+                    self.debug_context.as_ref().unwrap(),
+                    var.name,
+                    var_ty,
+                    scope,
+                    var_kind,
+                    span,
+                )
+            });
+
+            per_local[var.place.local].push(PerLocalVarDebugInfo {
+                name: var.name,
+                source_info: var.source_info,
+                dbg_var,
+                projection: var.place.projection,
+            });
+        }
+        Some(per_local)
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
new file mode 100644
index 00000000000..26e6c354702
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -0,0 +1,492 @@
+use crate::base;
+use crate::traits::*;
+use rustc_errors::ErrorReported;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt, TyAndLayout};
+use rustc_middle::ty::{self, Instance, Ty, TypeFoldable};
+use rustc_target::abi::call::{FnAbi, PassMode};
+use rustc_target::abi::HasDataLayout;
+
+use std::iter;
+
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+
+use self::analyze::CleanupKind;
+use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo};
+use self::place::PlaceRef;
+use rustc_middle::mir::traversal;
+
+use self::operand::{OperandRef, OperandValue};
+
+/// Master context for codegenning from MIR.
+pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
+    instance: Instance<'tcx>,
+
+    mir: &'tcx mir::Body<'tcx>,
+
+    debug_context: Option<FunctionDebugContext<Bx::DIScope>>,
+
+    llfn: Bx::Function,
+
+    cx: &'a Bx::CodegenCx,
+
+    fn_abi: FnAbi<'tcx, Ty<'tcx>>,
+
+    /// When unwinding is initiated, we have to store this personality
+    /// value somewhere so that we can load it and re-use it in the
+    /// resume instruction. The personality is (afaik) some kind of
+    /// value used for C++ unwinding, which must filter by type: we
+    /// don't really care about it very much. Anyway, this value
+    /// contains an alloca into which the personality is stored and
+    /// then later loaded when generating the DIVERGE_BLOCK.
+    personality_slot: Option<PlaceRef<'tcx, Bx::Value>>,
+
+    /// A `Block` for each MIR `BasicBlock`
+    blocks: IndexVec<mir::BasicBlock, Bx::BasicBlock>,
+
+    /// The funclet status of each basic block
+    cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
+
+    /// When targeting MSVC, this stores the cleanup info for each funclet
+    /// BB. This is initialized as we compute the funclets' head block in RPO.
+    funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
+
+    /// This stores the landing-pad block for a given BB, computed lazily on GNU
+    /// and eagerly on MSVC.
+    landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
+
+    /// Cached unreachable block
+    unreachable_block: Option<Bx::BasicBlock>,
+
+    /// The location where each MIR arg/var/tmp/ret is stored. This is
+    /// usually an `PlaceRef` representing an alloca, but not always:
+    /// sometimes we can skip the alloca and just store the value
+    /// directly using an `OperandRef`, which makes for tighter LLVM
+    /// IR. The conditions for using an `OperandRef` are as follows:
+    ///
+    /// - the type of the local must be judged "immediate" by `is_llvm_immediate`
+    /// - the operand must never be referenced indirectly
+    ///     - we should not take its address using the `&` operator
+    ///     - nor should it appear in a place path like `tmp.a`
+    /// - the operand must be defined by an rvalue that can generate immediate
+    ///   values
+    ///
+    /// Avoiding allocs can also be important for certain intrinsics,
+    /// notably `expect`.
+    locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>,
+
+    /// All `VarDebugInfo` from the MIR body, partitioned by `Local`.
+    /// This is `None` if no var`#[non_exhaustive]`iable debuginfo/names are needed.
+    per_local_var_debug_info:
+        Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>>,
+
+    /// Caller location propagated if this function has `#[track_caller]`.
+    caller_location: Option<OperandRef<'tcx, Bx::Value>>,
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn monomorphize<T>(&self, value: &T) -> T
+    where
+        T: Copy + TypeFoldable<'tcx>,
+    {
+        debug!("monomorphize: self.instance={:?}", self.instance);
+        if let Some(substs) = self.instance.substs_for_mir_body() {
+            self.cx.tcx().subst_and_normalize_erasing_regions(
+                substs,
+                ty::ParamEnv::reveal_all(),
+                &value,
+            )
+        } else {
+            self.cx.tcx().normalize_erasing_regions(ty::ParamEnv::reveal_all(), *value)
+        }
+    }
+}
+
+enum LocalRef<'tcx, V> {
+    Place(PlaceRef<'tcx, V>),
+    /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place).
+    /// `*p` is the fat pointer that references the actual unsized place.
+    /// Every time it is initialized, we have to reallocate the place
+    /// and update the fat pointer. That's the reason why it is indirect.
+    UnsizedPlace(PlaceRef<'tcx, V>),
+    Operand(Option<OperandRef<'tcx, V>>),
+}
+
+impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> {
+    fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &mut Bx,
+        layout: TyAndLayout<'tcx>,
+    ) -> LocalRef<'tcx, V> {
+        if layout.is_zst() {
+            // Zero-size temporaries aren't always initialized, which
+            // doesn't matter because they don't contain data, but
+            // we need something in the operand.
+            LocalRef::Operand(Some(OperandRef::new_zst(bx, layout)))
+        } else {
+            LocalRef::Operand(None)
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    cx: &'a Bx::CodegenCx,
+    instance: Instance<'tcx>,
+) {
+    assert!(!instance.substs.needs_infer());
+
+    let llfn = cx.get_fn(instance);
+
+    let mir = cx.tcx().instance_mir(instance.def);
+
+    let fn_abi = FnAbi::of_instance(cx, instance, &[]);
+    debug!("fn_abi: {:?}", fn_abi);
+
+    let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir);
+
+    let mut bx = Bx::new_block(cx, llfn, "start");
+
+    if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
+        bx.set_personality_fn(cx.eh_personality());
+    }
+
+    bx.sideeffect();
+
+    let cleanup_kinds = analyze::cleanup_kinds(&mir);
+    // Allocate a `Block` for every basic block, except
+    // the start block, if nothing loops back to it.
+    let reentrant_start_block = !mir.predecessors()[mir::START_BLOCK].is_empty();
+    let block_bxs: IndexVec<mir::BasicBlock, Bx::BasicBlock> = mir
+        .basic_blocks()
+        .indices()
+        .map(|bb| {
+            if bb == mir::START_BLOCK && !reentrant_start_block {
+                bx.llbb()
+            } else {
+                bx.build_sibling_block(&format!("{:?}", bb)).llbb()
+            }
+        })
+        .collect();
+
+    let (landing_pads, funclets) = create_funclets(&mir, &mut bx, &cleanup_kinds, &block_bxs);
+    let mut fx = FunctionCx {
+        instance,
+        mir,
+        llfn,
+        fn_abi,
+        cx,
+        personality_slot: None,
+        blocks: block_bxs,
+        unreachable_block: None,
+        cleanup_kinds,
+        landing_pads,
+        funclets,
+        locals: IndexVec::new(),
+        debug_context,
+        per_local_var_debug_info: None,
+        caller_location: None,
+    };
+
+    fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info();
+
+    for const_ in &mir.required_consts {
+        if let Err(err) = fx.eval_mir_constant(const_) {
+            match err {
+                // errored or at least linted
+                ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {}
+                ErrorHandled::TooGeneric => {
+                    span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err)
+                }
+            }
+        }
+    }
+
+    let memory_locals = analyze::non_ssa_locals(&fx);
+
+    // Allocate variable and temp allocas
+    fx.locals = {
+        let args = arg_local_refs(&mut bx, &mut fx, &memory_locals);
+
+        let mut allocate_local = |local| {
+            let decl = &mir.local_decls[local];
+            let layout = bx.layout_of(fx.monomorphize(&decl.ty));
+            assert!(!layout.ty.has_erasable_regions());
+
+            if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
+                debug!("alloc: {:?} (return place) -> place", local);
+                let llretptr = bx.get_param(0);
+                return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
+            }
+
+            if memory_locals.contains(local) {
+                debug!("alloc: {:?} -> place", local);
+                if layout.is_unsized() {
+                    LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout))
+                } else {
+                    LocalRef::Place(PlaceRef::alloca(&mut bx, layout))
+                }
+            } else {
+                debug!("alloc: {:?} -> operand", local);
+                LocalRef::new_operand(&mut bx, layout)
+            }
+        };
+
+        let retptr = allocate_local(mir::RETURN_PLACE);
+        iter::once(retptr)
+            .chain(args.into_iter())
+            .chain(mir.vars_and_temps_iter().map(allocate_local))
+            .collect()
+    };
+
+    // Apply debuginfo to the newly allocated locals.
+    fx.debug_introduce_locals(&mut bx);
+
+    // Branch to the START block, if it's not the entry block.
+    if reentrant_start_block {
+        bx.br(fx.blocks[mir::START_BLOCK]);
+    }
+
+    let rpo = traversal::reverse_postorder(&mir);
+    let mut visited = BitSet::new_empty(mir.basic_blocks().len());
+
+    // Codegen the body of each block using reverse postorder
+    for (bb, _) in rpo {
+        visited.insert(bb.index());
+        fx.codegen_block(bb);
+    }
+
+    // Remove blocks that haven't been visited, or have no
+    // predecessors.
+    for bb in mir.basic_blocks().indices() {
+        // Unreachable block
+        if !visited.contains(bb.index()) {
+            debug!("codegen_mir: block {:?} was not visited", bb);
+            unsafe {
+                bx.delete_basic_block(fx.blocks[bb]);
+            }
+        }
+    }
+}
+
+fn create_funclets<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    mir: &'tcx mir::Body<'tcx>,
+    bx: &mut Bx,
+    cleanup_kinds: &IndexVec<mir::BasicBlock, CleanupKind>,
+    block_bxs: &IndexVec<mir::BasicBlock, Bx::BasicBlock>,
+) -> (
+    IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
+    IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
+) {
+    block_bxs
+        .iter_enumerated()
+        .zip(cleanup_kinds)
+        .map(|((bb, &llbb), cleanup_kind)| {
+            match *cleanup_kind {
+                CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {}
+                _ => return (None, None),
+            }
+
+            let funclet;
+            let ret_llbb;
+            match mir[bb].terminator.as_ref().map(|t| &t.kind) {
+                // This is a basic block that we're aborting the program for,
+                // notably in an `extern` function. These basic blocks are inserted
+                // so that we assert that `extern` functions do indeed not panic,
+                // and if they do we abort the process.
+                //
+                // On MSVC these are tricky though (where we're doing funclets). If
+                // we were to do a cleanuppad (like below) the normal functions like
+                // `longjmp` would trigger the abort logic, terminating the
+                // program. Instead we insert the equivalent of `catch(...)` for C++
+                // which magically doesn't trigger when `longjmp` files over this
+                // frame.
+                //
+                // Lots more discussion can be found on #48251 but this codegen is
+                // modeled after clang's for:
+                //
+                //      try {
+                //          foo();
+                //      } catch (...) {
+                //          bar();
+                //      }
+                Some(&mir::TerminatorKind::Abort) => {
+                    let mut cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb));
+                    let mut cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb));
+                    ret_llbb = cs_bx.llbb();
+
+                    let cs = cs_bx.catch_switch(None, None, 1);
+                    cs_bx.add_handler(cs, cp_bx.llbb());
+
+                    // The "null" here is actually a RTTI type descriptor for the
+                    // C++ personality function, but `catch (...)` has no type so
+                    // it's null. The 64 here is actually a bitfield which
+                    // represents that this is a catch-all block.
+                    let null = bx.const_null(
+                        bx.type_i8p_ext(bx.cx().data_layout().instruction_address_space),
+                    );
+                    let sixty_four = bx.const_i32(64);
+                    funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
+                    cp_bx.br(llbb);
+                }
+                _ => {
+                    let mut cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb));
+                    ret_llbb = cleanup_bx.llbb();
+                    funclet = cleanup_bx.cleanup_pad(None, &[]);
+                    cleanup_bx.br(llbb);
+                }
+            };
+
+            (Some(ret_llbb), Some(funclet))
+        })
+        .unzip()
+}
+
+/// Produces, for each argument, a `Value` pointing at the
+/// argument's value. As arguments are places, these are always
+/// indirect.
+fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    fx: &mut FunctionCx<'a, 'tcx, Bx>,
+    memory_locals: &BitSet<mir::Local>,
+) -> Vec<LocalRef<'tcx, Bx::Value>> {
+    let mir = fx.mir;
+    let mut idx = 0;
+    let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize;
+
+    let args = mir
+        .args_iter()
+        .enumerate()
+        .map(|(arg_index, local)| {
+            let arg_decl = &mir.local_decls[local];
+
+            if Some(local) == mir.spread_arg {
+                // This argument (e.g., the last argument in the "rust-call" ABI)
+                // is a tuple that was spread at the ABI level and now we have
+                // to reconstruct it into a tuple local variable, from multiple
+                // individual LLVM function arguments.
+
+                let arg_ty = fx.monomorphize(&arg_decl.ty);
+                let tupled_arg_tys = match arg_ty.kind {
+                    ty::Tuple(ref tys) => tys,
+                    _ => bug!("spread argument isn't a tuple?!"),
+                };
+
+                let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
+                for i in 0..tupled_arg_tys.len() {
+                    let arg = &fx.fn_abi.args[idx];
+                    idx += 1;
+                    if arg.pad.is_some() {
+                        llarg_idx += 1;
+                    }
+                    let pr_field = place.project_field(bx, i);
+                    bx.store_fn_arg(arg, &mut llarg_idx, pr_field);
+                }
+
+                return LocalRef::Place(place);
+            }
+
+            if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() {
+                let arg_ty = fx.monomorphize(&arg_decl.ty);
+
+                let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
+                bx.va_start(va_list.llval);
+
+                return LocalRef::Place(va_list);
+            }
+
+            let arg = &fx.fn_abi.args[idx];
+            idx += 1;
+            if arg.pad.is_some() {
+                llarg_idx += 1;
+            }
+
+            if !memory_locals.contains(local) {
+                // We don't have to cast or keep the argument in the alloca.
+                // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
+                // of putting everything in allocas just so we can use llvm.dbg.declare.
+                let local = |op| LocalRef::Operand(Some(op));
+                match arg.mode {
+                    PassMode::Ignore => {
+                        return local(OperandRef::new_zst(bx, arg.layout));
+                    }
+                    PassMode::Direct(_) => {
+                        let llarg = bx.get_param(llarg_idx);
+                        llarg_idx += 1;
+                        return local(OperandRef::from_immediate_or_packed_pair(
+                            bx, llarg, arg.layout,
+                        ));
+                    }
+                    PassMode::Pair(..) => {
+                        let (a, b) = (bx.get_param(llarg_idx), bx.get_param(llarg_idx + 1));
+                        llarg_idx += 2;
+
+                        return local(OperandRef {
+                            val: OperandValue::Pair(a, b),
+                            layout: arg.layout,
+                        });
+                    }
+                    _ => {}
+                }
+            }
+
+            if arg.is_sized_indirect() {
+                // Don't copy an indirect argument to an alloca, the caller
+                // already put it in a temporary alloca and gave it up.
+                // FIXME: lifetimes
+                let llarg = bx.get_param(llarg_idx);
+                llarg_idx += 1;
+                LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout))
+            } else if arg.is_unsized_indirect() {
+                // As the storage for the indirect argument lives during
+                // the whole function call, we just copy the fat pointer.
+                let llarg = bx.get_param(llarg_idx);
+                llarg_idx += 1;
+                let llextra = bx.get_param(llarg_idx);
+                llarg_idx += 1;
+                let indirect_operand = OperandValue::Pair(llarg, llextra);
+
+                let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout);
+                indirect_operand.store(bx, tmp);
+                LocalRef::UnsizedPlace(tmp)
+            } else {
+                let tmp = PlaceRef::alloca(bx, arg.layout);
+                bx.store_fn_arg(arg, &mut llarg_idx, tmp);
+                LocalRef::Place(tmp)
+            }
+        })
+        .collect::<Vec<_>>();
+
+    if fx.instance.def.requires_caller_location(bx.tcx()) {
+        assert_eq!(
+            fx.fn_abi.args.len(),
+            args.len() + 1,
+            "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR",
+        );
+
+        let arg = fx.fn_abi.args.last().unwrap();
+        match arg.mode {
+            PassMode::Direct(_) => (),
+            _ => bug!("caller location must be PassMode::Direct, found {:?}", arg.mode),
+        }
+
+        fx.caller_location = Some(OperandRef {
+            val: OperandValue::Immediate(bx.get_param(llarg_idx)),
+            layout: arg.layout,
+        });
+    }
+
+    args
+}
+
+mod analyze;
+mod block;
+pub mod constant;
+pub mod coverageinfo;
+pub mod debuginfo;
+pub mod operand;
+pub mod place;
+mod rvalue;
+mod statement;
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
new file mode 100644
index 00000000000..937c7457c63
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -0,0 +1,471 @@
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::glue;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_errors::ErrorReported;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{ConstValue, ErrorHandled, Pointer, Scalar};
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::Ty;
+use rustc_target::abi::{Abi, Align, LayoutOf, Size};
+
+use std::fmt;
+
+/// The representation of a Rust value. The enum variant is in fact
+/// uniquely determined by the value's type, but is kept as a
+/// safety check.
+#[derive(Copy, Clone, Debug)]
+pub enum OperandValue<V> {
+    /// A reference to the actual operand. The data is guaranteed
+    /// to be valid for the operand's lifetime.
+    /// The second value, if any, is the extra data (vtable or length)
+    /// which indicates that it refers to an unsized rvalue.
+    Ref(V, Option<V>, Align),
+    /// A single LLVM value.
+    Immediate(V),
+    /// A pair of immediate LLVM values. Used by fat pointers too.
+    Pair(V, V),
+}
+
+/// An `OperandRef` is an "SSA" reference to a Rust value, along with
+/// its type.
+///
+/// NOTE: unless you know a value's type exactly, you should not
+/// generate LLVM opcodes acting on it and instead act via methods,
+/// to avoid nasty edge cases. In particular, using `Builder::store`
+/// directly is sure to cause problems -- use `OperandRef::store`
+/// instead.
+#[derive(Copy, Clone)]
+pub struct OperandRef<'tcx, V> {
+    // The value.
+    pub val: OperandValue<V>,
+
+    // The layout of value, based on its Rust type.
+    pub layout: TyAndLayout<'tcx>,
+}
+
+impl<V: CodegenObject> fmt::Debug for OperandRef<'tcx, V> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
+    }
+}
+
+impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
+    pub fn new_zst<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &mut Bx,
+        layout: TyAndLayout<'tcx>,
+    ) -> OperandRef<'tcx, V> {
+        assert!(layout.is_zst());
+        OperandRef {
+            val: OperandValue::Immediate(bx.const_undef(bx.immediate_backend_type(layout))),
+            layout,
+        }
+    }
+
+    pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &mut Bx,
+        val: ConstValue<'tcx>,
+        ty: Ty<'tcx>,
+    ) -> Self {
+        let layout = bx.layout_of(ty);
+
+        if layout.is_zst() {
+            return OperandRef::new_zst(bx, layout);
+        }
+
+        let val = match val {
+            ConstValue::Scalar(x) => {
+                let scalar = match layout.abi {
+                    Abi::Scalar(ref x) => x,
+                    _ => bug!("from_const: invalid ByVal layout: {:#?}", layout),
+                };
+                let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
+                OperandValue::Immediate(llval)
+            }
+            ConstValue::Slice { data, start, end } => {
+                let a_scalar = match layout.abi {
+                    Abi::ScalarPair(ref a, _) => a,
+                    _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout),
+                };
+                let a = Scalar::from(Pointer::new(
+                    bx.tcx().create_memory_alloc(data),
+                    Size::from_bytes(start),
+                ));
+                let a_llval = bx.scalar_to_backend(
+                    a,
+                    a_scalar,
+                    bx.scalar_pair_element_backend_type(layout, 0, true),
+                );
+                let b_llval = bx.const_usize((end - start) as u64);
+                OperandValue::Pair(a_llval, b_llval)
+            }
+            ConstValue::ByRef { alloc, offset } => {
+                return bx.load_operand(bx.from_const_alloc(layout, alloc, offset));
+            }
+        };
+
+        OperandRef { val, layout }
+    }
+
+    /// Asserts that this operand refers to a scalar and returns
+    /// a reference to its value.
+    pub fn immediate(self) -> V {
+        match self.val {
+            OperandValue::Immediate(s) => s,
+            _ => bug!("not immediate: {:?}", self),
+        }
+    }
+
+    pub fn deref<Cx: LayoutTypeMethods<'tcx>>(self, cx: &Cx) -> PlaceRef<'tcx, V> {
+        let projected_ty = self
+            .layout
+            .ty
+            .builtin_deref(true)
+            .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self))
+            .ty;
+        let (llptr, llextra) = match self.val {
+            OperandValue::Immediate(llptr) => (llptr, None),
+            OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)),
+            OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self),
+        };
+        let layout = cx.layout_of(projected_ty);
+        PlaceRef { llval: llptr, llextra, layout, align: layout.align.abi }
+    }
+
+    /// If this operand is a `Pair`, we return an aggregate with the two values.
+    /// For other cases, see `immediate`.
+    pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+    ) -> V {
+        if let OperandValue::Pair(a, b) = self.val {
+            let llty = bx.cx().backend_type(self.layout);
+            debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty);
+            // Reconstruct the immediate aggregate.
+            let mut llpair = bx.cx().const_undef(llty);
+            let imm_a = base::from_immediate(bx, a);
+            let imm_b = base::from_immediate(bx, b);
+            llpair = bx.insert_value(llpair, imm_a, 0);
+            llpair = bx.insert_value(llpair, imm_b, 1);
+            llpair
+        } else {
+            self.immediate()
+        }
+    }
+
+    /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
+    pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &mut Bx,
+        llval: V,
+        layout: TyAndLayout<'tcx>,
+    ) -> Self {
+        let val = if let Abi::ScalarPair(ref a, ref b) = layout.abi {
+            debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
+
+            // Deconstruct the immediate aggregate.
+            let a_llval = bx.extract_value(llval, 0);
+            let a_llval = base::to_immediate_scalar(bx, a_llval, a);
+            let b_llval = bx.extract_value(llval, 1);
+            let b_llval = base::to_immediate_scalar(bx, b_llval, b);
+            OperandValue::Pair(a_llval, b_llval)
+        } else {
+            OperandValue::Immediate(llval)
+        };
+        OperandRef { val, layout }
+    }
+
+    pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        &self,
+        bx: &mut Bx,
+        i: usize,
+    ) -> Self {
+        let field = self.layout.field(bx.cx(), i);
+        let offset = self.layout.fields.offset(i);
+
+        let mut val = match (self.val, &self.layout.abi) {
+            // If the field is ZST, it has no data.
+            _ if field.is_zst() => {
+                return OperandRef::new_zst(bx, field);
+            }
+
+            // Newtype of a scalar, scalar pair or vector.
+            (OperandValue::Immediate(_) | OperandValue::Pair(..), _)
+                if field.size == self.layout.size =>
+            {
+                assert_eq!(offset.bytes(), 0);
+                self.val
+            }
+
+            // Extract a scalar component from a pair.
+            (OperandValue::Pair(a_llval, b_llval), &Abi::ScalarPair(ref a, ref b)) => {
+                if offset.bytes() == 0 {
+                    assert_eq!(field.size, a.value.size(bx.cx()));
+                    OperandValue::Immediate(a_llval)
+                } else {
+                    assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
+                    assert_eq!(field.size, b.value.size(bx.cx()));
+                    OperandValue::Immediate(b_llval)
+                }
+            }
+
+            // `#[repr(simd)]` types are also immediate.
+            (OperandValue::Immediate(llval), &Abi::Vector { .. }) => {
+                OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
+            }
+
+            _ => bug!("OperandRef::extract_field({:?}): not applicable", self),
+        };
+
+        // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
+        // Bools in union fields needs to be truncated.
+        let to_immediate_or_cast = |bx: &mut Bx, val, ty| {
+            if ty == bx.cx().type_i1() { bx.trunc(val, ty) } else { bx.bitcast(val, ty) }
+        };
+
+        match val {
+            OperandValue::Immediate(ref mut llval) => {
+                *llval = to_immediate_or_cast(bx, *llval, bx.cx().immediate_backend_type(field));
+            }
+            OperandValue::Pair(ref mut a, ref mut b) => {
+                *a = to_immediate_or_cast(
+                    bx,
+                    *a,
+                    bx.cx().scalar_pair_element_backend_type(field, 0, true),
+                );
+                *b = to_immediate_or_cast(
+                    bx,
+                    *b,
+                    bx.cx().scalar_pair_element_backend_type(field, 1, true),
+                );
+            }
+            OperandValue::Ref(..) => bug!(),
+        }
+
+        OperandRef { val, layout: field }
+    }
+}
+
+impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
+    pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        dest: PlaceRef<'tcx, V>,
+    ) {
+        self.store_with_flags(bx, dest, MemFlags::empty());
+    }
+
+    pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        dest: PlaceRef<'tcx, V>,
+    ) {
+        self.store_with_flags(bx, dest, MemFlags::VOLATILE);
+    }
+
+    pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        dest: PlaceRef<'tcx, V>,
+    ) {
+        self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
+    }
+
+    pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        dest: PlaceRef<'tcx, V>,
+    ) {
+        self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
+    }
+
+    fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        dest: PlaceRef<'tcx, V>,
+        flags: MemFlags,
+    ) {
+        debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
+        // Avoid generating stores of zero-sized values, because the only way to have a zero-sized
+        // value is through `undef`, and store itself is useless.
+        if dest.layout.is_zst() {
+            return;
+        }
+        match self {
+            OperandValue::Ref(r, None, source_align) => {
+                base::memcpy_ty(bx, dest.llval, dest.align, r, source_align, dest.layout, flags)
+            }
+            OperandValue::Ref(_, Some(_), _) => {
+                bug!("cannot directly store unsized values");
+            }
+            OperandValue::Immediate(s) => {
+                let val = base::from_immediate(bx, s);
+                bx.store_with_flags(val, dest.llval, dest.align, flags);
+            }
+            OperandValue::Pair(a, b) => {
+                let (a_scalar, b_scalar) = match dest.layout.abi {
+                    Abi::ScalarPair(ref a, ref b) => (a, b),
+                    _ => bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout),
+                };
+                let b_offset = a_scalar.value.size(bx).align_to(b_scalar.value.align(bx).abi);
+
+                let llptr = bx.struct_gep(dest.llval, 0);
+                let val = base::from_immediate(bx, a);
+                let align = dest.align;
+                bx.store_with_flags(val, llptr, align, flags);
+
+                let llptr = bx.struct_gep(dest.llval, 1);
+                let val = base::from_immediate(bx, b);
+                let align = dest.align.restrict_for_offset(b_offset);
+                bx.store_with_flags(val, llptr, align, flags);
+            }
+        }
+    }
+
+    pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        indirect_dest: PlaceRef<'tcx, V>,
+    ) {
+        debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
+        let flags = MemFlags::empty();
+
+        // `indirect_dest` must have `*mut T` type. We extract `T` out of it.
+        let unsized_ty = indirect_dest
+            .layout
+            .ty
+            .builtin_deref(true)
+            .unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest))
+            .ty;
+
+        let (llptr, llextra) = if let OperandValue::Ref(llptr, Some(llextra), _) = self {
+            (llptr, llextra)
+        } else {
+            bug!("store_unsized called with a sized value")
+        };
+
+        // FIXME: choose an appropriate alignment, or use dynamic align somehow
+        let max_align = Align::from_bits(128).unwrap();
+        let min_align = Align::from_bits(8).unwrap();
+
+        // Allocate an appropriate region on the stack, and copy the value into it
+        let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
+        let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, max_align);
+        bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags);
+
+        // Store the allocated region and the extra to the indirect place.
+        let indirect_operand = OperandValue::Pair(lldst, llextra);
+        indirect_operand.store(bx, indirect_dest);
+    }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    fn maybe_codegen_consume_direct(
+        &mut self,
+        bx: &mut Bx,
+        place_ref: mir::PlaceRef<'tcx>,
+    ) -> Option<OperandRef<'tcx, Bx::Value>> {
+        debug!("maybe_codegen_consume_direct(place_ref={:?})", place_ref);
+
+        match self.locals[place_ref.local] {
+            LocalRef::Operand(Some(mut o)) => {
+                // Moves out of scalar and scalar pair fields are trivial.
+                for elem in place_ref.projection.iter() {
+                    match elem {
+                        mir::ProjectionElem::Field(ref f, _) => {
+                            o = o.extract_field(bx, f.index());
+                        }
+                        mir::ProjectionElem::Index(_)
+                        | mir::ProjectionElem::ConstantIndex { .. } => {
+                            // ZSTs don't require any actual memory access.
+                            // FIXME(eddyb) deduplicate this with the identical
+                            // checks in `codegen_consume` and `extract_field`.
+                            let elem = o.layout.field(bx.cx(), 0);
+                            if elem.is_zst() {
+                                o = OperandRef::new_zst(bx, elem);
+                            } else {
+                                return None;
+                            }
+                        }
+                        _ => return None,
+                    }
+                }
+
+                Some(o)
+            }
+            LocalRef::Operand(None) => {
+                bug!("use of {:?} before def", place_ref);
+            }
+            LocalRef::Place(..) | LocalRef::UnsizedPlace(..) => {
+                // watch out for locals that do not have an
+                // alloca; they are handled somewhat differently
+                None
+            }
+        }
+    }
+
+    pub fn codegen_consume(
+        &mut self,
+        bx: &mut Bx,
+        place_ref: mir::PlaceRef<'tcx>,
+    ) -> OperandRef<'tcx, Bx::Value> {
+        debug!("codegen_consume(place_ref={:?})", place_ref);
+
+        let ty = self.monomorphized_place_ty(place_ref);
+        let layout = bx.cx().layout_of(ty);
+
+        // ZSTs don't require any actual memory access.
+        if layout.is_zst() {
+            return OperandRef::new_zst(bx, layout);
+        }
+
+        if let Some(o) = self.maybe_codegen_consume_direct(bx, place_ref) {
+            return o;
+        }
+
+        // for most places, to consume them we just load them
+        // out from their home
+        let place = self.codegen_place(bx, place_ref);
+        bx.load_operand(place)
+    }
+
+    pub fn codegen_operand(
+        &mut self,
+        bx: &mut Bx,
+        operand: &mir::Operand<'tcx>,
+    ) -> OperandRef<'tcx, Bx::Value> {
+        debug!("codegen_operand(operand={:?})", operand);
+
+        match *operand {
+            mir::Operand::Copy(ref place) | mir::Operand::Move(ref place) => {
+                self.codegen_consume(bx, place.as_ref())
+            }
+
+            mir::Operand::Constant(ref constant) => {
+                self.eval_mir_constant_to_operand(bx, constant).unwrap_or_else(|err| {
+                    match err {
+                        // errored or at least linted
+                        ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {}
+                        ErrorHandled::TooGeneric => {
+                            bug!("codegen encountered polymorphic constant")
+                        }
+                    }
+                    // Allow RalfJ to sleep soundly knowing that even refactorings that remove
+                    // the above error (or silence it under some conditions) will not cause UB.
+                    bx.abort();
+                    // We still have to return an operand but it doesn't matter,
+                    // this code is unreachable.
+                    let ty = self.monomorphize(&constant.literal.ty);
+                    let layout = bx.cx().layout_of(ty);
+                    bx.load_operand(PlaceRef::new_sized(
+                        bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))),
+                        layout,
+                    ))
+                })
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
new file mode 100644
index 00000000000..05656774f0e
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -0,0 +1,502 @@
+use super::operand::OperandValue;
+use super::{FunctionCx, LocalRef};
+
+use crate::common::IntPredicate;
+use crate::glue;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::mir;
+use rustc_middle::mir::tcx::PlaceTy;
+use rustc_middle::ty::layout::{HasTyCtxt, TyAndLayout};
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::{Abi, Align, FieldsShape, Int, TagEncoding};
+use rustc_target::abi::{LayoutOf, VariantIdx, Variants};
+
+#[derive(Copy, Clone, Debug)]
+pub struct PlaceRef<'tcx, V> {
+    /// A pointer to the contents of the place.
+    pub llval: V,
+
+    /// This place's extra data if it is unsized, or `None` if null.
+    pub llextra: Option<V>,
+
+    /// The monomorphized type of this place, including variant information.
+    pub layout: TyAndLayout<'tcx>,
+
+    /// The alignment we know for this place.
+    pub align: Align,
+}
+
+impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
+    pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
+        assert!(!layout.is_unsized());
+        PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
+    }
+
+    pub fn new_sized_aligned(
+        llval: V,
+        layout: TyAndLayout<'tcx>,
+        align: Align,
+    ) -> PlaceRef<'tcx, V> {
+        assert!(!layout.is_unsized());
+        PlaceRef { llval, llextra: None, layout, align }
+    }
+
+    // FIXME(eddyb) pass something else for the name so no work is done
+    // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
+    pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &mut Bx,
+        layout: TyAndLayout<'tcx>,
+    ) -> Self {
+        assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
+        let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
+        Self::new_sized(tmp, layout)
+    }
+
+    /// Returns a place for an indirect reference to an unsized place.
+    // FIXME(eddyb) pass something else for the name so no work is done
+    // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
+    pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &mut Bx,
+        layout: TyAndLayout<'tcx>,
+    ) -> Self {
+        assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
+        let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
+        let ptr_layout = bx.cx().layout_of(ptr_ty);
+        Self::alloca(bx, ptr_layout)
+    }
+
+    pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
+        if let FieldsShape::Array { count, .. } = self.layout.fields {
+            if self.layout.is_unsized() {
+                assert_eq!(count, 0);
+                self.llextra.unwrap()
+            } else {
+                cx.const_usize(count)
+            }
+        } else {
+            bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
+        }
+    }
+}
+
+impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
+    /// Access a field, at a point when the value's case is known.
+    pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        ix: usize,
+    ) -> Self {
+        let field = self.layout.field(bx.cx(), ix);
+        let offset = self.layout.fields.offset(ix);
+        let effective_field_align = self.align.restrict_for_offset(offset);
+
+        let mut simple = || {
+            // Unions and newtypes only use an offset of 0.
+            let llval = if offset.bytes() == 0 {
+                self.llval
+            } else if let Abi::ScalarPair(ref a, ref b) = self.layout.abi {
+                // Offsets have to match either first or second field.
+                assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
+                bx.struct_gep(self.llval, 1)
+            } else {
+                bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
+            };
+            PlaceRef {
+                // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
+                llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
+                llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
+                layout: field,
+                align: effective_field_align,
+            }
+        };
+
+        // Simple cases, which don't need DST adjustment:
+        //   * no metadata available - just log the case
+        //   * known alignment - sized types, `[T]`, `str` or a foreign type
+        //   * packed struct - there is no alignment padding
+        match field.ty.kind {
+            _ if self.llextra.is_none() => {
+                debug!(
+                    "unsized field `{}`, of `{:?}` has no metadata for adjustment",
+                    ix, self.llval
+                );
+                return simple();
+            }
+            _ if !field.is_unsized() => return simple(),
+            ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
+            ty::Adt(def, _) => {
+                if def.repr.packed() {
+                    // FIXME(eddyb) generalize the adjustment when we
+                    // start supporting packing to larger alignments.
+                    assert_eq!(self.layout.align.abi.bytes(), 1);
+                    return simple();
+                }
+            }
+            _ => {}
+        }
+
+        // We need to get the pointer manually now.
+        // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
+        // We do this instead of, say, simply adjusting the pointer from the result of a GEP
+        // because the field may have an arbitrary alignment in the LLVM representation
+        // anyway.
+        //
+        // To demonstrate:
+        //
+        //     struct Foo<T: ?Sized> {
+        //         x: u16,
+        //         y: T
+        //     }
+        //
+        // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
+        // the `y` field has 16-bit alignment.
+
+        let meta = self.llextra;
+
+        let unaligned_offset = bx.cx().const_usize(offset.bytes());
+
+        // Get the alignment of the field
+        let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
+
+        // Bump the unaligned offset up to the appropriate alignment using the
+        // following expression:
+        //
+        //     (unaligned offset + (align - 1)) & -align
+
+        // Calculate offset.
+        let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
+        let and_lhs = bx.add(unaligned_offset, align_sub_1);
+        let and_rhs = bx.neg(unsized_align);
+        let offset = bx.and(and_lhs, and_rhs);
+
+        debug!("struct_field_ptr: DST field offset: {:?}", offset);
+
+        // Cast and adjust pointer.
+        let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
+        let byte_ptr = bx.gep(byte_ptr, &[offset]);
+
+        // Finally, cast back to the type expected.
+        let ll_fty = bx.cx().backend_type(field);
+        debug!("struct_field_ptr: Field type is {:?}", ll_fty);
+
+        PlaceRef {
+            llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
+            llextra: self.llextra,
+            layout: field,
+            align: effective_field_align,
+        }
+    }
+
+    /// Obtain the actual discriminant of a value.
+    pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self,
+        bx: &mut Bx,
+        cast_to: Ty<'tcx>,
+    ) -> V {
+        let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
+        if self.layout.abi.is_uninhabited() {
+            return bx.cx().const_undef(cast_to);
+        }
+        let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
+            Variants::Single { index } => {
+                let discr_val = self
+                    .layout
+                    .ty
+                    .discriminant_for_variant(bx.cx().tcx(), index)
+                    .map_or(index.as_u32() as u128, |discr| discr.val);
+                return bx.cx().const_uint_big(cast_to, discr_val);
+            }
+            Variants::Multiple { ref tag, ref tag_encoding, tag_field, .. } => {
+                (tag, tag_encoding, tag_field)
+            }
+        };
+
+        // Read the tag/niche-encoded discriminant from memory.
+        let tag = self.project_field(bx, tag_field);
+        let tag = bx.load_operand(tag);
+
+        // Decode the discriminant (specifically if it's niche-encoded).
+        match *tag_encoding {
+            TagEncoding::Direct => {
+                let signed = match tag_scalar.value {
+                    // We use `i1` for bytes that are always `0` or `1`,
+                    // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
+                    // let LLVM interpret the `i1` as signed, because
+                    // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
+                    Int(_, signed) => !tag_scalar.is_bool() && signed,
+                    _ => false,
+                };
+                bx.intcast(tag.immediate(), cast_to, signed)
+            }
+            TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+                // Rebase from niche values to discriminants, and check
+                // whether the result is in range for the niche variants.
+                let niche_llty = bx.cx().immediate_backend_type(tag.layout);
+                let tag = tag.immediate();
+
+                // We first compute the "relative discriminant" (wrt `niche_variants`),
+                // that is, if `n = niche_variants.end() - niche_variants.start()`,
+                // we remap `niche_start..=niche_start + n` (which may wrap around)
+                // to (non-wrap-around) `0..=n`, to be able to check whether the
+                // discriminant corresponds to a niche variant with one comparison.
+                // We also can't go directly to the (variant index) discriminant
+                // and check that it is in the range `niche_variants`, because
+                // that might not fit in the same type, on top of needing an extra
+                // comparison (see also the comment on `let niche_discr`).
+                let relative_discr = if niche_start == 0 {
+                    // Avoid subtracting `0`, which wouldn't work for pointers.
+                    // FIXME(eddyb) check the actual primitive type here.
+                    tag
+                } else {
+                    bx.sub(tag, bx.cx().const_uint_big(niche_llty, niche_start))
+                };
+                let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
+                let is_niche = if relative_max == 0 {
+                    // Avoid calling `const_uint`, which wouldn't work for pointers.
+                    // Also use canonical == 0 instead of non-canonical u<= 0.
+                    // FIXME(eddyb) check the actual primitive type here.
+                    bx.icmp(IntPredicate::IntEQ, relative_discr, bx.cx().const_null(niche_llty))
+                } else {
+                    let relative_max = bx.cx().const_uint(niche_llty, relative_max as u64);
+                    bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
+                };
+
+                // NOTE(eddyb) this addition needs to be performed on the final
+                // type, in case the niche itself can't represent all variant
+                // indices (e.g. `u8` niche with more than `256` variants,
+                // but enough uninhabited variants so that the remaining variants
+                // fit in the niche).
+                // In other words, `niche_variants.end - niche_variants.start`
+                // is representable in the niche, but `niche_variants.end`
+                // might not be, in extreme cases.
+                let niche_discr = {
+                    let relative_discr = if relative_max == 0 {
+                        // HACK(eddyb) since we have only one niche, we know which
+                        // one it is, and we can avoid having a dynamic value here.
+                        bx.cx().const_uint(cast_to, 0)
+                    } else {
+                        bx.intcast(relative_discr, cast_to, false)
+                    };
+                    bx.add(
+                        relative_discr,
+                        bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
+                    )
+                };
+
+                bx.select(
+                    is_niche,
+                    niche_discr,
+                    bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
+                )
+            }
+        }
+    }
+
+    /// Sets the discriminant for a new value of the given case of the given
+    /// representation.
+    pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        &self,
+        bx: &mut Bx,
+        variant_index: VariantIdx,
+    ) {
+        if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
+            // We play it safe by using a well-defined `abort`, but we could go for immediate UB
+            // if that turns out to be helpful.
+            bx.abort();
+            return;
+        }
+        match self.layout.variants {
+            Variants::Single { index } => {
+                assert_eq!(index, variant_index);
+            }
+            Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
+                let ptr = self.project_field(bx, tag_field);
+                let to =
+                    self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
+                bx.store(
+                    bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
+                    ptr.llval,
+                    ptr.align,
+                );
+            }
+            Variants::Multiple {
+                tag_encoding:
+                    TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+                tag_field,
+                ..
+            } => {
+                if variant_index != dataful_variant {
+                    if bx.cx().sess().target.target.arch == "arm"
+                        || bx.cx().sess().target.target.arch == "aarch64"
+                    {
+                        // FIXME(#34427): as workaround for LLVM bug on ARM,
+                        // use memset of 0 before assigning niche value.
+                        let fill_byte = bx.cx().const_u8(0);
+                        let size = bx.cx().const_usize(self.layout.size.bytes());
+                        bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
+                    }
+
+                    let niche = self.project_field(bx, tag_field);
+                    let niche_llty = bx.cx().immediate_backend_type(niche.layout);
+                    let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
+                    let niche_value = (niche_value as u128).wrapping_add(niche_start);
+                    // FIXME(eddyb): check the actual primitive type here.
+                    let niche_llval = if niche_value == 0 {
+                        // HACK(eddyb): using `c_null` as it works on all types.
+                        bx.cx().const_null(niche_llty)
+                    } else {
+                        bx.cx().const_uint_big(niche_llty, niche_value)
+                    };
+                    OperandValue::Immediate(niche_llval).store(bx, niche);
+                }
+            }
+        }
+    }
+
+    pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        &self,
+        bx: &mut Bx,
+        llindex: V,
+    ) -> Self {
+        // Statically compute the offset if we can, otherwise just use the element size,
+        // as this will yield the lowest alignment.
+        let layout = self.layout.field(bx, 0);
+        let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
+            layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
+        } else {
+            layout.size
+        };
+
+        PlaceRef {
+            llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
+            llextra: None,
+            layout,
+            align: self.align.restrict_for_offset(offset),
+        }
+    }
+
+    pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        &self,
+        bx: &mut Bx,
+        variant_index: VariantIdx,
+    ) -> Self {
+        let mut downcast = *self;
+        downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
+
+        // Cast to the appropriate variant struct type.
+        let variant_ty = bx.cx().backend_type(downcast.layout);
+        downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
+
+        downcast
+    }
+
+    pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
+        bx.lifetime_start(self.llval, self.layout.size);
+    }
+
+    pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
+        bx.lifetime_end(self.llval, self.layout.size);
+    }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn codegen_place(
+        &mut self,
+        bx: &mut Bx,
+        place_ref: mir::PlaceRef<'tcx>,
+    ) -> PlaceRef<'tcx, Bx::Value> {
+        debug!("codegen_place(place_ref={:?})", place_ref);
+        let cx = self.cx;
+        let tcx = self.cx.tcx();
+
+        let result = match place_ref {
+            mir::PlaceRef { local, projection: [] } => match self.locals[local] {
+                LocalRef::Place(place) => {
+                    return place;
+                }
+                LocalRef::UnsizedPlace(place) => {
+                    return bx.load_operand(place).deref(cx);
+                }
+                LocalRef::Operand(..) => {
+                    bug!("using operand local {:?} as place", place_ref);
+                }
+            },
+            mir::PlaceRef { local, projection: [proj_base @ .., mir::ProjectionElem::Deref] } => {
+                // Load the pointer from its location.
+                self.codegen_consume(bx, mir::PlaceRef { local, projection: proj_base })
+                    .deref(bx.cx())
+            }
+            mir::PlaceRef { local, projection: &[ref proj_base @ .., elem] } => {
+                // FIXME turn this recursion into iteration
+                let cg_base =
+                    self.codegen_place(bx, mir::PlaceRef { local, projection: proj_base });
+
+                match elem {
+                    mir::ProjectionElem::Deref => bug!(),
+                    mir::ProjectionElem::Field(ref field, _) => {
+                        cg_base.project_field(bx, field.index())
+                    }
+                    mir::ProjectionElem::Index(index) => {
+                        let index = &mir::Operand::Copy(mir::Place::from(index));
+                        let index = self.codegen_operand(bx, index);
+                        let llindex = index.immediate();
+                        cg_base.project_index(bx, llindex)
+                    }
+                    mir::ProjectionElem::ConstantIndex {
+                        offset,
+                        from_end: false,
+                        min_length: _,
+                    } => {
+                        let lloffset = bx.cx().const_usize(offset as u64);
+                        cg_base.project_index(bx, lloffset)
+                    }
+                    mir::ProjectionElem::ConstantIndex {
+                        offset,
+                        from_end: true,
+                        min_length: _,
+                    } => {
+                        let lloffset = bx.cx().const_usize(offset as u64);
+                        let lllen = cg_base.len(bx.cx());
+                        let llindex = bx.sub(lllen, lloffset);
+                        cg_base.project_index(bx, llindex)
+                    }
+                    mir::ProjectionElem::Subslice { from, to, from_end } => {
+                        let mut subslice =
+                            cg_base.project_index(bx, bx.cx().const_usize(from as u64));
+                        let projected_ty =
+                            PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, elem).ty;
+                        subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
+
+                        if subslice.layout.is_unsized() {
+                            assert!(from_end, "slice subslices should be `from_end`");
+                            subslice.llextra = Some(bx.sub(
+                                cg_base.llextra.unwrap(),
+                                bx.cx().const_usize((from as u64) + (to as u64)),
+                            ));
+                        }
+
+                        // Cast the place pointer type to the new
+                        // array or slice type (`*[%_; new_len]`).
+                        subslice.llval = bx.pointercast(
+                            subslice.llval,
+                            bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
+                        );
+
+                        subslice
+                    }
+                    mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),
+                }
+            }
+        };
+        debug!("codegen_place(place={:?}) => {:?}", place_ref, result);
+        result
+    }
+
+    pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
+        let tcx = self.cx.tcx();
+        let place_ty = mir::Place::ty_from(place_ref.local, place_ref.projection, self.mir, tcx);
+        self.monomorphize(&place_ty.ty)
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
new file mode 100644
index 00000000000..71f924df119
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -0,0 +1,1006 @@
+use super::operand::{OperandRef, OperandValue};
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::common::{self, IntPredicate, RealPredicate};
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_apfloat::{ieee, Float, Round, Status};
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::mir;
+use rustc_middle::ty::cast::{CastTy, IntTy};
+use rustc_middle::ty::layout::{HasTyCtxt, TyAndLayout};
+use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
+use rustc_span::source_map::{Span, DUMMY_SP};
+use rustc_span::symbol::sym;
+use rustc_target::abi::{Abi, Int, LayoutOf, Variants};
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn codegen_rvalue(
+        &mut self,
+        mut bx: Bx,
+        dest: PlaceRef<'tcx, Bx::Value>,
+        rvalue: &mir::Rvalue<'tcx>,
+    ) -> Bx {
+        debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue);
+
+        match *rvalue {
+            mir::Rvalue::Use(ref operand) => {
+                let cg_operand = self.codegen_operand(&mut bx, operand);
+                // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
+                // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
+                cg_operand.val.store(&mut bx, dest);
+                bx
+            }
+
+            mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
+                // The destination necessarily contains a fat pointer, so if
+                // it's a scalar pair, it's a fat pointer or newtype thereof.
+                if bx.cx().is_backend_scalar_pair(dest.layout) {
+                    // Into-coerce of a thin pointer to a fat pointer -- just
+                    // use the operand path.
+                    let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+                    temp.val.store(&mut bx, dest);
+                    return bx;
+                }
+
+                // Unsize of a nontrivial struct. I would prefer for
+                // this to be eliminated by MIR building, but
+                // `CoerceUnsized` can be passed by a where-clause,
+                // so the (generic) MIR may not be able to expand it.
+                let operand = self.codegen_operand(&mut bx, source);
+                match operand.val {
+                    OperandValue::Pair(..) | OperandValue::Immediate(_) => {
+                        // Unsize from an immediate structure. We don't
+                        // really need a temporary alloca here, but
+                        // avoiding it would require us to have
+                        // `coerce_unsized_into` use `extractvalue` to
+                        // index into the struct, and this case isn't
+                        // important enough for it.
+                        debug!("codegen_rvalue: creating ugly alloca");
+                        let scratch = PlaceRef::alloca(&mut bx, operand.layout);
+                        scratch.storage_live(&mut bx);
+                        operand.val.store(&mut bx, scratch);
+                        base::coerce_unsized_into(&mut bx, scratch, dest);
+                        scratch.storage_dead(&mut bx);
+                    }
+                    OperandValue::Ref(llref, None, align) => {
+                        let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
+                        base::coerce_unsized_into(&mut bx, source, dest);
+                    }
+                    OperandValue::Ref(_, Some(_), _) => {
+                        bug!("unsized coercion on an unsized rvalue");
+                    }
+                }
+                bx
+            }
+
+            mir::Rvalue::Repeat(ref elem, count) => {
+                let cg_elem = self.codegen_operand(&mut bx, elem);
+
+                // Do not generate the loop for zero-sized elements or empty arrays.
+                if dest.layout.is_zst() {
+                    return bx;
+                }
+
+                if let OperandValue::Immediate(v) = cg_elem.val {
+                    let zero = bx.const_usize(0);
+                    let start = dest.project_index(&mut bx, zero).llval;
+                    let size = bx.const_usize(dest.layout.size.bytes());
+
+                    // Use llvm.memset.p0i8.* to initialize all zero arrays
+                    if bx.cx().const_to_opt_uint(v) == Some(0) {
+                        let fill = bx.cx().const_u8(0);
+                        bx.memset(start, fill, size, dest.align, MemFlags::empty());
+                        return bx;
+                    }
+
+                    // Use llvm.memset.p0i8.* to initialize byte arrays
+                    let v = base::from_immediate(&mut bx, v);
+                    if bx.cx().val_ty(v) == bx.cx().type_i8() {
+                        bx.memset(start, v, size, dest.align, MemFlags::empty());
+                        return bx;
+                    }
+                }
+
+                let count =
+                    self.monomorphize(&count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
+
+                bx.write_operand_repeatedly(cg_elem, count, dest)
+            }
+
+            mir::Rvalue::Aggregate(ref kind, ref operands) => {
+                let (dest, active_field_index) = match **kind {
+                    mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
+                        dest.codegen_set_discr(&mut bx, variant_index);
+                        if adt_def.is_enum() {
+                            (dest.project_downcast(&mut bx, variant_index), active_field_index)
+                        } else {
+                            (dest, active_field_index)
+                        }
+                    }
+                    _ => (dest, None),
+                };
+                for (i, operand) in operands.iter().enumerate() {
+                    let op = self.codegen_operand(&mut bx, operand);
+                    // Do not generate stores and GEPis for zero-sized fields.
+                    if !op.layout.is_zst() {
+                        let field_index = active_field_index.unwrap_or(i);
+                        let field = dest.project_field(&mut bx, field_index);
+                        op.val.store(&mut bx, field);
+                    }
+                }
+                bx
+            }
+
+            _ => {
+                assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
+                let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+                temp.val.store(&mut bx, dest);
+                bx
+            }
+        }
+    }
+
+    pub fn codegen_rvalue_unsized(
+        &mut self,
+        mut bx: Bx,
+        indirect_dest: PlaceRef<'tcx, Bx::Value>,
+        rvalue: &mir::Rvalue<'tcx>,
+    ) -> Bx {
+        debug!(
+            "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
+            indirect_dest.llval, rvalue
+        );
+
+        match *rvalue {
+            mir::Rvalue::Use(ref operand) => {
+                let cg_operand = self.codegen_operand(&mut bx, operand);
+                cg_operand.val.store_unsized(&mut bx, indirect_dest);
+                bx
+            }
+
+            _ => bug!("unsized assignment other than `Rvalue::Use`"),
+        }
+    }
+
+    pub fn codegen_rvalue_operand(
+        &mut self,
+        mut bx: Bx,
+        rvalue: &mir::Rvalue<'tcx>,
+    ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
+        assert!(
+            self.rvalue_creates_operand(rvalue, DUMMY_SP),
+            "cannot codegen {:?} to operand",
+            rvalue,
+        );
+
+        match *rvalue {
+            mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
+                let operand = self.codegen_operand(&mut bx, source);
+                debug!("cast operand is {:?}", operand);
+                let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
+
+                let val = match *kind {
+                    mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
+                        match operand.layout.ty.kind {
+                            ty::FnDef(def_id, substs) => {
+                                if bx.cx().tcx().has_attr(def_id, sym::rustc_args_required_const) {
+                                    bug!("reifying a fn ptr that requires const arguments");
+                                }
+                                let instance = ty::Instance::resolve_for_fn_ptr(
+                                    bx.tcx(),
+                                    ty::ParamEnv::reveal_all(),
+                                    def_id,
+                                    substs,
+                                )
+                                .unwrap()
+                                .polymorphize(bx.cx().tcx());
+                                OperandValue::Immediate(bx.get_fn_addr(instance))
+                            }
+                            _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
+                        }
+                    }
+                    mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
+                        match operand.layout.ty.kind {
+                            ty::Closure(def_id, substs) => {
+                                let instance = Instance::resolve_closure(
+                                    bx.cx().tcx(),
+                                    def_id,
+                                    substs,
+                                    ty::ClosureKind::FnOnce,
+                                )
+                                .polymorphize(bx.cx().tcx());
+                                OperandValue::Immediate(bx.cx().get_fn_addr(instance))
+                            }
+                            _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
+                        }
+                    }
+                    mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
+                        // This is a no-op at the LLVM level.
+                        operand.val
+                    }
+                    mir::CastKind::Pointer(PointerCast::Unsize) => {
+                        assert!(bx.cx().is_backend_scalar_pair(cast));
+                        match operand.val {
+                            OperandValue::Pair(lldata, llextra) => {
+                                // unsize from a fat pointer -- this is a
+                                // "trait-object-to-supertrait" coercion, for
+                                // example, `&'a fmt::Debug + Send => &'a fmt::Debug`.
+
+                                // HACK(eddyb) have to bitcast pointers
+                                // until LLVM removes pointee types.
+                                let lldata = bx.pointercast(
+                                    lldata,
+                                    bx.cx().scalar_pair_element_backend_type(cast, 0, true),
+                                );
+                                OperandValue::Pair(lldata, llextra)
+                            }
+                            OperandValue::Immediate(lldata) => {
+                                // "standard" unsize
+                                let (lldata, llextra) = base::unsize_thin_ptr(
+                                    &mut bx,
+                                    lldata,
+                                    operand.layout.ty,
+                                    cast.ty,
+                                );
+                                OperandValue::Pair(lldata, llextra)
+                            }
+                            OperandValue::Ref(..) => {
+                                bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
+                            }
+                        }
+                    }
+                    mir::CastKind::Pointer(PointerCast::MutToConstPointer)
+                    | mir::CastKind::Misc
+                        if bx.cx().is_backend_scalar_pair(operand.layout) =>
+                    {
+                        if let OperandValue::Pair(data_ptr, meta) = operand.val {
+                            if bx.cx().is_backend_scalar_pair(cast) {
+                                let data_cast = bx.pointercast(
+                                    data_ptr,
+                                    bx.cx().scalar_pair_element_backend_type(cast, 0, true),
+                                );
+                                OperandValue::Pair(data_cast, meta)
+                            } else {
+                                // cast to thin-ptr
+                                // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
+                                // pointer-cast of that pointer to desired pointer type.
+                                let llcast_ty = bx.cx().immediate_backend_type(cast);
+                                let llval = bx.pointercast(data_ptr, llcast_ty);
+                                OperandValue::Immediate(llval)
+                            }
+                        } else {
+                            bug!("unexpected non-pair operand");
+                        }
+                    }
+                    mir::CastKind::Pointer(
+                        PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
+                    )
+                    | mir::CastKind::Misc => {
+                        assert!(bx.cx().is_backend_immediate(cast));
+                        let ll_t_out = bx.cx().immediate_backend_type(cast);
+                        if operand.layout.abi.is_uninhabited() {
+                            let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
+                            return (bx, OperandRef { val, layout: cast });
+                        }
+                        let r_t_in =
+                            CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
+                        let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
+                        let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
+                        match operand.layout.variants {
+                            Variants::Single { index } => {
+                                if let Some(discr) =
+                                    operand.layout.ty.discriminant_for_variant(bx.tcx(), index)
+                                {
+                                    let discr_layout = bx.cx().layout_of(discr.ty);
+                                    let discr_t = bx.cx().immediate_backend_type(discr_layout);
+                                    let discr_val = bx.cx().const_uint_big(discr_t, discr.val);
+                                    let discr_val =
+                                        bx.intcast(discr_val, ll_t_out, discr.ty.is_signed());
+
+                                    return (
+                                        bx,
+                                        OperandRef {
+                                            val: OperandValue::Immediate(discr_val),
+                                            layout: cast,
+                                        },
+                                    );
+                                }
+                            }
+                            Variants::Multiple { .. } => {}
+                        }
+                        let llval = operand.immediate();
+
+                        let mut signed = false;
+                        if let Abi::Scalar(ref scalar) = operand.layout.abi {
+                            if let Int(_, s) = scalar.value {
+                                // We use `i1` for bytes that are always `0` or `1`,
+                                // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
+                                // let LLVM interpret the `i1` as signed, because
+                                // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
+                                signed = !scalar.is_bool() && s;
+
+                                let er = scalar.valid_range_exclusive(bx.cx());
+                                if er.end != er.start
+                                    && scalar.valid_range.end() > scalar.valid_range.start()
+                                {
+                                    // We want `table[e as usize]` to not
+                                    // have bound checks, and this is the most
+                                    // convenient place to put the `assume`.
+                                    let ll_t_in_const =
+                                        bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end());
+                                    let cmp = bx.icmp(IntPredicate::IntULE, llval, ll_t_in_const);
+                                    bx.assume(cmp);
+                                }
+                            }
+                        }
+
+                        let newval = match (r_t_in, r_t_out) {
+                            (CastTy::Int(_), CastTy::Int(_)) => bx.intcast(llval, ll_t_out, signed),
+                            (CastTy::Float, CastTy::Float) => {
+                                let srcsz = bx.cx().float_width(ll_t_in);
+                                let dstsz = bx.cx().float_width(ll_t_out);
+                                if dstsz > srcsz {
+                                    bx.fpext(llval, ll_t_out)
+                                } else if srcsz > dstsz {
+                                    bx.fptrunc(llval, ll_t_out)
+                                } else {
+                                    llval
+                                }
+                            }
+                            (CastTy::Int(_), CastTy::Float) => {
+                                if signed {
+                                    bx.sitofp(llval, ll_t_out)
+                                } else {
+                                    bx.uitofp(llval, ll_t_out)
+                                }
+                            }
+                            (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
+                                bx.pointercast(llval, ll_t_out)
+                            }
+                            (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
+                                bx.ptrtoint(llval, ll_t_out)
+                            }
+                            (CastTy::Int(_), CastTy::Ptr(_)) => {
+                                let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
+                                bx.inttoptr(usize_llval, ll_t_out)
+                            }
+                            (CastTy::Float, CastTy::Int(IntTy::I)) => {
+                                cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out, cast)
+                            }
+                            (CastTy::Float, CastTy::Int(_)) => {
+                                cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out, cast)
+                            }
+                            _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
+                        };
+                        OperandValue::Immediate(newval)
+                    }
+                };
+                (bx, OperandRef { val, layout: cast })
+            }
+
+            mir::Rvalue::Ref(_, bk, place) => {
+                let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
+                    tcx.mk_ref(
+                        tcx.lifetimes.re_erased,
+                        ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
+                    )
+                };
+                self.codegen_place_to_pointer(bx, place, mk_ref)
+            }
+
+            mir::Rvalue::AddressOf(mutability, place) => {
+                let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
+                    tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
+                };
+                self.codegen_place_to_pointer(bx, place, mk_ptr)
+            }
+
+            mir::Rvalue::Len(place) => {
+                let size = self.evaluate_array_len(&mut bx, place);
+                let operand = OperandRef {
+                    val: OperandValue::Immediate(size),
+                    layout: bx.cx().layout_of(bx.tcx().types.usize),
+                };
+                (bx, operand)
+            }
+
+            mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
+                let lhs = self.codegen_operand(&mut bx, lhs);
+                let rhs = self.codegen_operand(&mut bx, rhs);
+                let llresult = match (lhs.val, rhs.val) {
+                    (
+                        OperandValue::Pair(lhs_addr, lhs_extra),
+                        OperandValue::Pair(rhs_addr, rhs_extra),
+                    ) => self.codegen_fat_ptr_binop(
+                        &mut bx,
+                        op,
+                        lhs_addr,
+                        lhs_extra,
+                        rhs_addr,
+                        rhs_extra,
+                        lhs.layout.ty,
+                    ),
+
+                    (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
+                        self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
+                    }
+
+                    _ => bug!(),
+                };
+                let operand = OperandRef {
+                    val: OperandValue::Immediate(llresult),
+                    layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
+                };
+                (bx, operand)
+            }
+            mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
+                let lhs = self.codegen_operand(&mut bx, lhs);
+                let rhs = self.codegen_operand(&mut bx, rhs);
+                let result = self.codegen_scalar_checked_binop(
+                    &mut bx,
+                    op,
+                    lhs.immediate(),
+                    rhs.immediate(),
+                    lhs.layout.ty,
+                );
+                let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
+                let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
+                let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
+
+                (bx, operand)
+            }
+
+            mir::Rvalue::UnaryOp(op, ref operand) => {
+                let operand = self.codegen_operand(&mut bx, operand);
+                let lloperand = operand.immediate();
+                let is_float = operand.layout.ty.is_floating_point();
+                let llval = match op {
+                    mir::UnOp::Not => bx.not(lloperand),
+                    mir::UnOp::Neg => {
+                        if is_float {
+                            bx.fneg(lloperand)
+                        } else {
+                            bx.neg(lloperand)
+                        }
+                    }
+                };
+                (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
+            }
+
+            mir::Rvalue::Discriminant(ref place) => {
+                let discr_ty = rvalue.ty(self.mir, bx.tcx());
+                let discr = self
+                    .codegen_place(&mut bx, place.as_ref())
+                    .codegen_get_discr(&mut bx, discr_ty);
+                (
+                    bx,
+                    OperandRef {
+                        val: OperandValue::Immediate(discr),
+                        layout: self.cx.layout_of(discr_ty),
+                    },
+                )
+            }
+
+            mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
+                assert!(bx.cx().type_is_sized(ty));
+                let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes());
+                let tcx = self.cx.tcx();
+                (
+                    bx,
+                    OperandRef {
+                        val: OperandValue::Immediate(val),
+                        layout: self.cx.layout_of(tcx.types.usize),
+                    },
+                )
+            }
+
+            mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
+                let content_ty = self.monomorphize(&content_ty);
+                let content_layout = bx.cx().layout_of(content_ty);
+                let llsize = bx.cx().const_usize(content_layout.size.bytes());
+                let llalign = bx.cx().const_usize(content_layout.align.abi.bytes());
+                let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
+                let llty_ptr = bx.cx().backend_type(box_layout);
+
+                // Allocate space:
+                let def_id = match bx.tcx().lang_items().require(LangItem::ExchangeMalloc) {
+                    Ok(id) => id,
+                    Err(s) => {
+                        bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
+                    }
+                };
+                let instance = ty::Instance::mono(bx.tcx(), def_id);
+                let r = bx.cx().get_fn_addr(instance);
+                let call = bx.call(r, &[llsize, llalign], None);
+                let val = bx.pointercast(call, llty_ptr);
+
+                let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
+                (bx, operand)
+            }
+            mir::Rvalue::ThreadLocalRef(def_id) => {
+                assert!(bx.cx().tcx().is_static(def_id));
+                let static_ = bx.get_static(def_id);
+                let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
+                let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
+                (bx, operand)
+            }
+            mir::Rvalue::Use(ref operand) => {
+                let operand = self.codegen_operand(&mut bx, operand);
+                (bx, operand)
+            }
+            mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
+                // According to `rvalue_creates_operand`, only ZST
+                // aggregate rvalues are allowed to be operands.
+                let ty = rvalue.ty(self.mir, self.cx.tcx());
+                let operand =
+                    OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(&ty)));
+                (bx, operand)
+            }
+        }
+    }
+
+    fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
+        // ZST are passed as operands and require special handling
+        // because codegen_place() panics if Local is operand.
+        if let Some(index) = place.as_local() {
+            if let LocalRef::Operand(Some(op)) = self.locals[index] {
+                if let ty::Array(_, n) = op.layout.ty.kind {
+                    let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
+                    return bx.cx().const_usize(n);
+                }
+            }
+        }
+        // use common size calculation for non zero-sized types
+        let cg_value = self.codegen_place(bx, place.as_ref());
+        cg_value.len(bx.cx())
+    }
+
+    /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
+    fn codegen_place_to_pointer(
+        &mut self,
+        mut bx: Bx,
+        place: mir::Place<'tcx>,
+        mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
+    ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
+        let cg_place = self.codegen_place(&mut bx, place.as_ref());
+
+        let ty = cg_place.layout.ty;
+
+        // Note: places are indirect, so storing the `llval` into the
+        // destination effectively creates a reference.
+        let val = if !bx.cx().type_has_metadata(ty) {
+            OperandValue::Immediate(cg_place.llval)
+        } else {
+            OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
+        };
+        (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
+    }
+
+    pub fn codegen_scalar_binop(
+        &mut self,
+        bx: &mut Bx,
+        op: mir::BinOp,
+        lhs: Bx::Value,
+        rhs: Bx::Value,
+        input_ty: Ty<'tcx>,
+    ) -> Bx::Value {
+        let is_float = input_ty.is_floating_point();
+        let is_signed = input_ty.is_signed();
+        match op {
+            mir::BinOp::Add => {
+                if is_float {
+                    bx.fadd(lhs, rhs)
+                } else {
+                    bx.add(lhs, rhs)
+                }
+            }
+            mir::BinOp::Sub => {
+                if is_float {
+                    bx.fsub(lhs, rhs)
+                } else {
+                    bx.sub(lhs, rhs)
+                }
+            }
+            mir::BinOp::Mul => {
+                if is_float {
+                    bx.fmul(lhs, rhs)
+                } else {
+                    bx.mul(lhs, rhs)
+                }
+            }
+            mir::BinOp::Div => {
+                if is_float {
+                    bx.fdiv(lhs, rhs)
+                } else if is_signed {
+                    bx.sdiv(lhs, rhs)
+                } else {
+                    bx.udiv(lhs, rhs)
+                }
+            }
+            mir::BinOp::Rem => {
+                if is_float {
+                    bx.frem(lhs, rhs)
+                } else if is_signed {
+                    bx.srem(lhs, rhs)
+                } else {
+                    bx.urem(lhs, rhs)
+                }
+            }
+            mir::BinOp::BitOr => bx.or(lhs, rhs),
+            mir::BinOp::BitAnd => bx.and(lhs, rhs),
+            mir::BinOp::BitXor => bx.xor(lhs, rhs),
+            mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]),
+            mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
+            mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
+            mir::BinOp::Ne
+            | mir::BinOp::Lt
+            | mir::BinOp::Gt
+            | mir::BinOp::Eq
+            | mir::BinOp::Le
+            | mir::BinOp::Ge => {
+                if is_float {
+                    bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
+                } else {
+                    bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
+                }
+            }
+        }
+    }
+
+    pub fn codegen_fat_ptr_binop(
+        &mut self,
+        bx: &mut Bx,
+        op: mir::BinOp,
+        lhs_addr: Bx::Value,
+        lhs_extra: Bx::Value,
+        rhs_addr: Bx::Value,
+        rhs_extra: Bx::Value,
+        _input_ty: Ty<'tcx>,
+    ) -> Bx::Value {
+        match op {
+            mir::BinOp::Eq => {
+                let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
+                let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
+                bx.and(lhs, rhs)
+            }
+            mir::BinOp::Ne => {
+                let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
+                let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
+                bx.or(lhs, rhs)
+            }
+            mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
+                // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
+                let (op, strict_op) = match op {
+                    mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
+                    mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
+                    mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
+                    mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
+                    _ => bug!(),
+                };
+                let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
+                let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
+                let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
+                let rhs = bx.and(and_lhs, and_rhs);
+                bx.or(lhs, rhs)
+            }
+            _ => {
+                bug!("unexpected fat ptr binop");
+            }
+        }
+    }
+
+    pub fn codegen_scalar_checked_binop(
+        &mut self,
+        bx: &mut Bx,
+        op: mir::BinOp,
+        lhs: Bx::Value,
+        rhs: Bx::Value,
+        input_ty: Ty<'tcx>,
+    ) -> OperandValue<Bx::Value> {
+        // This case can currently arise only from functions marked
+        // with #[rustc_inherit_overflow_checks] and inlined from
+        // another crate (mostly core::num generic/#[inline] fns),
+        // while the current crate doesn't use overflow checks.
+        if !bx.cx().check_overflow() {
+            let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
+            return OperandValue::Pair(val, bx.cx().const_bool(false));
+        }
+
+        let (val, of) = match op {
+            // These are checked using intrinsics
+            mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
+                let oop = match op {
+                    mir::BinOp::Add => OverflowOp::Add,
+                    mir::BinOp::Sub => OverflowOp::Sub,
+                    mir::BinOp::Mul => OverflowOp::Mul,
+                    _ => unreachable!(),
+                };
+                bx.checked_binop(oop, input_ty, lhs, rhs)
+            }
+            mir::BinOp::Shl | mir::BinOp::Shr => {
+                let lhs_llty = bx.cx().val_ty(lhs);
+                let rhs_llty = bx.cx().val_ty(rhs);
+                let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
+                let outer_bits = bx.and(rhs, invert_mask);
+
+                let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
+                let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
+
+                (val, of)
+            }
+            _ => bug!("Operator `{:?}` is not a checkable operator", op),
+        };
+
+        OperandValue::Pair(val, of)
+    }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
+        match *rvalue {
+            mir::Rvalue::Ref(..) |
+            mir::Rvalue::AddressOf(..) |
+            mir::Rvalue::Len(..) |
+            mir::Rvalue::Cast(..) | // (*)
+            mir::Rvalue::BinaryOp(..) |
+            mir::Rvalue::CheckedBinaryOp(..) |
+            mir::Rvalue::UnaryOp(..) |
+            mir::Rvalue::Discriminant(..) |
+            mir::Rvalue::NullaryOp(..) |
+            mir::Rvalue::ThreadLocalRef(_) |
+            mir::Rvalue::Use(..) => // (*)
+                true,
+            mir::Rvalue::Repeat(..) |
+            mir::Rvalue::Aggregate(..) => {
+                let ty = rvalue.ty(self.mir, self.cx.tcx());
+                let ty = self.monomorphize(&ty);
+                self.cx.spanned_layout_of(ty, span).is_zst()
+            }
+        }
+
+        // (*) this is only true if the type is suitable
+    }
+}
+
+fn cast_float_to_int<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
+    signed: bool,
+    x: Bx::Value,
+    float_ty: Bx::Type,
+    int_ty: Bx::Type,
+    int_layout: TyAndLayout<'tcx>,
+) -> Bx::Value {
+    if let Some(false) = bx.cx().sess().opts.debugging_opts.saturating_float_casts {
+        return if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
+    }
+
+    let try_sat_result = if signed { bx.fptosi_sat(x, int_ty) } else { bx.fptoui_sat(x, int_ty) };
+    if let Some(try_sat_result) = try_sat_result {
+        return try_sat_result;
+    }
+
+    let int_width = bx.cx().int_width(int_ty);
+    let float_width = bx.cx().float_width(float_ty);
+    // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
+    // destination integer type after rounding towards zero. This `undef` value can cause UB in
+    // safe code (see issue #10184), so we implement a saturating conversion on top of it:
+    // Semantically, the mathematical value of the input is rounded towards zero to the next
+    // mathematical integer, and then the result is clamped into the range of the destination
+    // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
+    // the destination integer type. NaN is mapped to 0.
+    //
+    // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
+    // a value representable in int_ty.
+    // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
+    // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
+    // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
+    // representable. Note that this only works if float_ty's exponent range is sufficiently large.
+    // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
+    // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
+    // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
+    // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
+    // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
+    let int_max = |signed: bool, int_width: u64| -> u128 {
+        let shift_amount = 128 - int_width;
+        if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
+    };
+    let int_min = |signed: bool, int_width: u64| -> i128 {
+        if signed { i128::MIN >> (128 - int_width) } else { 0 }
+    };
+
+    let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
+        let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+        assert_eq!(rounded_min.status, Status::OK);
+        let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+        assert!(rounded_max.value.is_finite());
+        (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+    };
+    let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
+        let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+        assert_eq!(rounded_min.status, Status::OK);
+        let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+        assert!(rounded_max.value.is_finite());
+        (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+    };
+
+    let mut float_bits_to_llval = |bits| {
+        let bits_llval = match float_width {
+            32 => bx.cx().const_u32(bits as u32),
+            64 => bx.cx().const_u64(bits as u64),
+            n => bug!("unsupported float width {}", n),
+        };
+        bx.bitcast(bits_llval, float_ty)
+    };
+    let (f_min, f_max) = match float_width {
+        32 => compute_clamp_bounds_single(signed, int_width),
+        64 => compute_clamp_bounds_double(signed, int_width),
+        n => bug!("unsupported float width {}", n),
+    };
+    let f_min = float_bits_to_llval(f_min);
+    let f_max = float_bits_to_llval(f_max);
+    // To implement saturation, we perform the following steps:
+    //
+    // 1. Cast x to an integer with fpto[su]i. This may result in undef.
+    // 2. Compare x to f_min and f_max, and use the comparison results to select:
+    //  a) int_ty::MIN if x < f_min or x is NaN
+    //  b) int_ty::MAX if x > f_max
+    //  c) the result of fpto[su]i otherwise
+    // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
+    //
+    // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
+    // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
+    // undef does not introduce any non-determinism either.
+    // More importantly, the above procedure correctly implements saturating conversion.
+    // Proof (sketch):
+    // If x is NaN, 0 is returned by definition.
+    // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
+    // This yields three cases to consider:
+    // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
+    //     saturating conversion for inputs in that range.
+    // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
+    //     (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
+    //     than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
+    //     is correct.
+    // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
+    //     int_ty::MIN and therefore the return value of int_ty::MIN is correct.
+    // QED.
+
+    let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width));
+    let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
+    let zero = bx.cx().const_uint(int_ty, 0);
+
+    // The codegen here differs quite a bit depending on whether our builder's
+    // `fptosi` and `fptoui` instructions may trap for out-of-bounds values. If
+    // they don't trap then we can start doing everything inline with a
+    // `select` instruction because it's ok to execute `fptosi` and `fptoui`
+    // even if we don't use the results.
+    if !bx.fptosui_may_trap(x, int_ty) {
+        // Step 1 ...
+        let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
+        let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
+        let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
+
+        // Step 2: We use two comparisons and two selects, with %s1 being the
+        // result:
+        //     %less_or_nan = fcmp ult %x, %f_min
+        //     %greater = fcmp olt %x, %f_max
+        //     %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
+        //     %s1 = select %greater, int_ty::MAX, %s0
+        // Note that %less_or_nan uses an *unordered* comparison. This
+        // comparison is true if the operands are not comparable (i.e., if x is
+        // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
+        // x is NaN.
+        //
+        // Performance note: Unordered comparison can be lowered to a "flipped"
+        // comparison and a negation, and the negation can be merged into the
+        // select. Therefore, it not necessarily any more expensive than a
+        // ordered ("normal") comparison. Whether these optimizations will be
+        // performed is ultimately up to the backend, but at least x86 does
+        // perform them.
+        let s0 = bx.select(less_or_nan, int_min, fptosui_result);
+        let s1 = bx.select(greater, int_max, s0);
+
+        // Step 3: NaN replacement.
+        // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
+        // Therefore we only need to execute this step for signed integer types.
+        if signed {
+            // LLVM has no isNaN predicate, so we use (x == x) instead
+            let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x);
+            bx.select(cmp, s1, zero)
+        } else {
+            s1
+        }
+    } else {
+        // In this case we cannot execute `fptosi` or `fptoui` and then later
+        // discard the result. The builder is telling us that these instructions
+        // will trap on out-of-bounds values, so we need to use basic blocks and
+        // control flow to avoid executing the `fptosi` and `fptoui`
+        // instructions.
+        //
+        // The general idea of what we're constructing here is, for f64 -> i32:
+        //
+        //      ;; block so far... %0 is the argument
+        //      %result = alloca i32, align 4
+        //      %inbound_lower = fcmp oge double %0, 0xC1E0000000000000
+        //      %inbound_upper = fcmp ole double %0, 0x41DFFFFFFFC00000
+        //      ;; match (inbound_lower, inbound_upper) {
+        //      ;;     (true, true) => %0 can be converted without trapping
+        //      ;;     (false, false) => %0 is a NaN
+        //      ;;     (true, false) => %0 is too large
+        //      ;;     (false, true) => %0 is too small
+        //      ;; }
+        //      ;;
+        //      ;; The (true, true) check, go to %convert if so.
+        //      %inbounds = and i1 %inbound_lower, %inbound_upper
+        //      br i1 %inbounds, label %convert, label %specialcase
+        //
+        //  convert:
+        //      %cvt = call i32 @llvm.wasm.trunc.signed.i32.f64(double %0)
+        //      store i32 %cvt, i32* %result, align 4
+        //      br label %done
+        //
+        //  specialcase:
+        //      ;; Handle the cases where the number is NaN, too large or too small
+        //
+        //      ;; Either (true, false) or (false, true)
+        //      %is_not_nan = or i1 %inbound_lower, %inbound_upper
+        //      ;; Figure out which saturated value we are interested in if not `NaN`
+        //      %saturated = select i1 %inbound_lower, i32 2147483647, i32 -2147483648
+        //      ;; Figure out between saturated and NaN representations
+        //      %result_nan = select i1 %is_not_nan, i32 %saturated, i32 0
+        //      store i32 %result_nan, i32* %result, align 4
+        //      br label %done
+        //
+        //  done:
+        //      %r = load i32, i32* %result, align 4
+        //      ;; ...
+        let done = bx.build_sibling_block("float_cast_done");
+        let mut convert = bx.build_sibling_block("float_cast_convert");
+        let mut specialcase = bx.build_sibling_block("float_cast_specialcase");
+
+        let result = PlaceRef::alloca(bx, int_layout);
+        result.storage_live(bx);
+
+        // Use control flow to figure out whether we can execute `fptosi` in a
+        // basic block, or whether we go to a different basic block to implement
+        // the saturating logic.
+        let inbound_lower = bx.fcmp(RealPredicate::RealOGE, x, f_min);
+        let inbound_upper = bx.fcmp(RealPredicate::RealOLE, x, f_max);
+        let inbounds = bx.and(inbound_lower, inbound_upper);
+        bx.cond_br(inbounds, convert.llbb(), specialcase.llbb());
+
+        // Translation of the `convert` basic block
+        let cvt = if signed { convert.fptosi(x, int_ty) } else { convert.fptoui(x, int_ty) };
+        convert.store(cvt, result.llval, result.align);
+        convert.br(done.llbb());
+
+        // Translation of the `specialcase` basic block. Note that like above
+        // we try to be a bit clever here for unsigned conversions. In those
+        // cases the `int_min` is zero so we don't need two select instructions,
+        // just one to choose whether we need `int_max` or not. If
+        // `inbound_lower` is true then we're guaranteed to not be `NaN` and
+        // since we're greater than zero we must be saturating to `int_max`. If
+        // `inbound_lower` is false then we're either NaN or less than zero, so
+        // we saturate to zero.
+        let result_nan = if signed {
+            let is_not_nan = specialcase.or(inbound_lower, inbound_upper);
+            let saturated = specialcase.select(inbound_lower, int_max, int_min);
+            specialcase.select(is_not_nan, saturated, zero)
+        } else {
+            specialcase.select(inbound_lower, int_max, int_min)
+        };
+        specialcase.store(result_nan, result.llval, result.align);
+        specialcase.br(done.llbb());
+
+        // Translation of the `done` basic block, positioning ourselves to
+        // continue from that point as well.
+        *bx = done;
+        let ret = bx.load(result.llval, result.align);
+        result.storage_dead(bx);
+        ret
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs
new file mode 100644
index 00000000000..6f74ba77d4c
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs
@@ -0,0 +1,124 @@
+use rustc_errors::struct_span_err;
+use rustc_middle::mir;
+
+use super::FunctionCx;
+use super::LocalRef;
+use super::OperandValue;
+use crate::traits::BuilderMethods;
+use crate::traits::*;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>) -> Bx {
+        debug!("codegen_statement(statement={:?})", statement);
+
+        self.set_debug_loc(&mut bx, statement.source_info);
+        match statement.kind {
+            mir::StatementKind::Assign(box (ref place, ref rvalue)) => {
+                if let Some(index) = place.as_local() {
+                    match self.locals[index] {
+                        LocalRef::Place(cg_dest) => self.codegen_rvalue(bx, cg_dest, rvalue),
+                        LocalRef::UnsizedPlace(cg_indirect_dest) => {
+                            self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
+                        }
+                        LocalRef::Operand(None) => {
+                            let (mut bx, operand) = self.codegen_rvalue_operand(bx, rvalue);
+                            self.locals[index] = LocalRef::Operand(Some(operand));
+                            self.debug_introduce_local(&mut bx, index);
+                            bx
+                        }
+                        LocalRef::Operand(Some(op)) => {
+                            if !op.layout.is_zst() {
+                                span_bug!(
+                                    statement.source_info.span,
+                                    "operand {:?} already assigned",
+                                    rvalue
+                                );
+                            }
+
+                            // If the type is zero-sized, it's already been set here,
+                            // but we still need to make sure we codegen the operand
+                            self.codegen_rvalue_operand(bx, rvalue).0
+                        }
+                    }
+                } else {
+                    let cg_dest = self.codegen_place(&mut bx, place.as_ref());
+                    self.codegen_rvalue(bx, cg_dest, rvalue)
+                }
+            }
+            mir::StatementKind::SetDiscriminant { box ref place, variant_index } => {
+                self.codegen_place(&mut bx, place.as_ref())
+                    .codegen_set_discr(&mut bx, variant_index);
+                bx
+            }
+            mir::StatementKind::StorageLive(local) => {
+                if let LocalRef::Place(cg_place) = self.locals[local] {
+                    cg_place.storage_live(&mut bx);
+                } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
+                    cg_indirect_place.storage_live(&mut bx);
+                }
+                bx
+            }
+            mir::StatementKind::StorageDead(local) => {
+                if let LocalRef::Place(cg_place) = self.locals[local] {
+                    cg_place.storage_dead(&mut bx);
+                } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
+                    cg_indirect_place.storage_dead(&mut bx);
+                }
+                bx
+            }
+            mir::StatementKind::LlvmInlineAsm(ref asm) => {
+                let outputs = asm
+                    .outputs
+                    .iter()
+                    .map(|output| self.codegen_place(&mut bx, output.as_ref()))
+                    .collect();
+
+                let input_vals = asm.inputs.iter().fold(
+                    Vec::with_capacity(asm.inputs.len()),
+                    |mut acc, (span, input)| {
+                        let op = self.codegen_operand(&mut bx, input);
+                        if let OperandValue::Immediate(_) = op.val {
+                            acc.push(op.immediate());
+                        } else {
+                            struct_span_err!(
+                                bx.sess(),
+                                span.to_owned(),
+                                E0669,
+                                "invalid value for constraint in inline assembly"
+                            )
+                            .emit();
+                        }
+                        acc
+                    },
+                );
+
+                if input_vals.len() == asm.inputs.len() {
+                    let res = bx.codegen_llvm_inline_asm(
+                        &asm.asm,
+                        outputs,
+                        input_vals,
+                        statement.source_info.span,
+                    );
+                    if !res {
+                        struct_span_err!(
+                            bx.sess(),
+                            statement.source_info.span,
+                            E0668,
+                            "malformed inline assembly"
+                        )
+                        .emit();
+                    }
+                }
+                bx
+            }
+            mir::StatementKind::Coverage(box ref coverage) => {
+                self.codegen_coverage(&mut bx, coverage.clone());
+                bx
+            }
+            mir::StatementKind::FakeRead(..)
+            | mir::StatementKind::Retag { .. }
+            | mir::StatementKind::AscribeUserType(..)
+            | mir::StatementKind::Nop => bx,
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mono_item.rs b/compiler/rustc_codegen_ssa/src/mono_item.rs
new file mode 100644
index 00000000000..fc65149937f
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mono_item.rs
@@ -0,0 +1,98 @@
+use crate::base;
+use crate::traits::*;
+use rustc_hir as hir;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::layout::HasTyCtxt;
+
+use rustc_middle::mir::mono::MonoItem;
+
+pub trait MonoItemExt<'a, 'tcx> {
+    fn define<Bx: BuilderMethods<'a, 'tcx>>(&self, cx: &'a Bx::CodegenCx);
+    fn predefine<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        cx: &'a Bx::CodegenCx,
+        linkage: Linkage,
+        visibility: Visibility,
+    );
+    fn to_raw_string(&self) -> String;
+}
+
+impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {
+    fn define<Bx: BuilderMethods<'a, 'tcx>>(&self, cx: &'a Bx::CodegenCx) {
+        debug!(
+            "BEGIN IMPLEMENTING '{} ({})' in cgu {}",
+            self.to_string(cx.tcx(), true),
+            self.to_raw_string(),
+            cx.codegen_unit().name()
+        );
+
+        match *self {
+            MonoItem::Static(def_id) => {
+                cx.codegen_static(def_id, cx.tcx().is_mutable_static(def_id));
+            }
+            MonoItem::GlobalAsm(hir_id) => {
+                let item = cx.tcx().hir().expect_item(hir_id);
+                if let hir::ItemKind::GlobalAsm(ref ga) = item.kind {
+                    cx.codegen_global_asm(ga);
+                } else {
+                    span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type")
+                }
+            }
+            MonoItem::Fn(instance) => {
+                base::codegen_instance::<Bx>(&cx, instance);
+            }
+        }
+
+        debug!(
+            "END IMPLEMENTING '{} ({})' in cgu {}",
+            self.to_string(cx.tcx(), true),
+            self.to_raw_string(),
+            cx.codegen_unit().name()
+        );
+    }
+
+    fn predefine<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        cx: &'a Bx::CodegenCx,
+        linkage: Linkage,
+        visibility: Visibility,
+    ) {
+        debug!(
+            "BEGIN PREDEFINING '{} ({})' in cgu {}",
+            self.to_string(cx.tcx(), true),
+            self.to_raw_string(),
+            cx.codegen_unit().name()
+        );
+
+        let symbol_name = self.symbol_name(cx.tcx()).name;
+
+        debug!("symbol {}", &symbol_name);
+
+        match *self {
+            MonoItem::Static(def_id) => {
+                cx.predefine_static(def_id, linkage, visibility, &symbol_name);
+            }
+            MonoItem::Fn(instance) => {
+                cx.predefine_fn(instance, linkage, visibility, &symbol_name);
+            }
+            MonoItem::GlobalAsm(..) => {}
+        }
+
+        debug!(
+            "END PREDEFINING '{} ({})' in cgu {}",
+            self.to_string(cx.tcx(), true),
+            self.to_raw_string(),
+            cx.codegen_unit().name()
+        );
+    }
+
+    fn to_raw_string(&self) -> String {
+        match *self {
+            MonoItem::Fn(instance) => {
+                format!("Fn({:?}, {})", instance.def, instance.substs.as_ptr() as usize)
+            }
+            MonoItem::Static(id) => format!("Static({:?})", id),
+            MonoItem::GlobalAsm(id) => format!("GlobalAsm({:?})", id),
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/abi.rs b/compiler/rustc_codegen_ssa/src/traits/abi.rs
new file mode 100644
index 00000000000..dd8495850bd
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/abi.rs
@@ -0,0 +1,8 @@
+use super::BackendTypes;
+use rustc_middle::ty::Ty;
+use rustc_target::abi::call::FnAbi;
+
+pub trait AbiBuilderMethods<'tcx>: BackendTypes {
+    fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value);
+    fn get_param(&self, index: usize) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/asm.rs b/compiler/rustc_codegen_ssa/src/traits/asm.rs
new file mode 100644
index 00000000000..69931935c49
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/asm.rs
@@ -0,0 +1,61 @@
+use super::BackendTypes;
+use crate::mir::operand::OperandRef;
+use crate::mir::place::PlaceRef;
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_hir::def_id::DefId;
+use rustc_hir::{GlobalAsm, LlvmInlineAsmInner};
+use rustc_middle::ty::Instance;
+use rustc_span::Span;
+use rustc_target::asm::InlineAsmRegOrRegClass;
+
+#[derive(Debug)]
+pub enum InlineAsmOperandRef<'tcx, B: BackendTypes + ?Sized> {
+    In {
+        reg: InlineAsmRegOrRegClass,
+        value: OperandRef<'tcx, B::Value>,
+    },
+    Out {
+        reg: InlineAsmRegOrRegClass,
+        late: bool,
+        place: Option<PlaceRef<'tcx, B::Value>>,
+    },
+    InOut {
+        reg: InlineAsmRegOrRegClass,
+        late: bool,
+        in_value: OperandRef<'tcx, B::Value>,
+        out_place: Option<PlaceRef<'tcx, B::Value>>,
+    },
+    Const {
+        string: String,
+    },
+    SymFn {
+        instance: Instance<'tcx>,
+    },
+    SymStatic {
+        def_id: DefId,
+    },
+}
+
+pub trait AsmBuilderMethods<'tcx>: BackendTypes {
+    /// Take an inline assembly expression and splat it out via LLVM
+    fn codegen_llvm_inline_asm(
+        &mut self,
+        ia: &LlvmInlineAsmInner,
+        outputs: Vec<PlaceRef<'tcx, Self::Value>>,
+        inputs: Vec<Self::Value>,
+        span: Span,
+    ) -> bool;
+
+    /// Take an inline assembly expression and splat it out via LLVM
+    fn codegen_inline_asm(
+        &mut self,
+        template: &[InlineAsmTemplatePiece],
+        operands: &[InlineAsmOperandRef<'tcx, Self>],
+        options: InlineAsmOptions,
+        line_spans: &[Span],
+    );
+}
+
+pub trait AsmMethods {
+    fn codegen_global_asm(&self, ga: &GlobalAsm);
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs
new file mode 100644
index 00000000000..3522ea01153
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs
@@ -0,0 +1,119 @@
+use super::write::WriteBackendMethods;
+use super::CodegenObject;
+use crate::ModuleCodegen;
+
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_errors::ErrorReported;
+use rustc_middle::dep_graph::DepGraph;
+use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoaderDyn};
+use rustc_middle::ty::layout::{HasTyCtxt, TyAndLayout};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_session::{
+    config::{self, OutputFilenames, PrintRequest},
+    Session,
+};
+use rustc_span::symbol::Symbol;
+use rustc_target::abi::LayoutOf;
+
+pub use rustc_data_structures::sync::MetadataRef;
+
+use std::any::Any;
+use std::sync::Arc;
+
+pub trait BackendTypes {
+    type Value: CodegenObject;
+    type Function: CodegenObject;
+
+    type BasicBlock: Copy;
+    type Type: CodegenObject;
+    type Funclet;
+
+    // FIXME(eddyb) find a common convention for all of the debuginfo-related
+    // names (choose between `Dbg`, `Debug`, `DebugInfo`, `DI` etc.).
+    type DIScope: Copy;
+    type DIVariable: Copy;
+}
+
+pub trait Backend<'tcx>:
+    Sized + BackendTypes + HasTyCtxt<'tcx> + LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
+{
+}
+
+impl<'tcx, T> Backend<'tcx> for T where
+    Self: BackendTypes + HasTyCtxt<'tcx> + LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
+{
+}
+
+pub trait CodegenBackend {
+    fn init(&self, _sess: &Session) {}
+    fn print(&self, _req: PrintRequest, _sess: &Session) {}
+    fn target_features(&self, _sess: &Session) -> Vec<Symbol> {
+        vec![]
+    }
+    fn print_passes(&self) {}
+    fn print_version(&self) {}
+
+    fn metadata_loader(&self) -> Box<MetadataLoaderDyn>;
+    fn provide(&self, _providers: &mut Providers);
+    fn provide_extern(&self, _providers: &mut Providers);
+    fn codegen_crate<'tcx>(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        metadata: EncodedMetadata,
+        need_metadata_module: bool,
+    ) -> Box<dyn Any>;
+
+    /// This is called on the returned `Box<dyn Any>` from `codegen_backend`
+    ///
+    /// # Panics
+    ///
+    /// Panics when the passed `Box<dyn Any>` was not returned by `codegen_backend`.
+    fn join_codegen(
+        &self,
+        ongoing_codegen: Box<dyn Any>,
+        sess: &Session,
+        dep_graph: &DepGraph,
+    ) -> Result<Box<dyn Any>, ErrorReported>;
+
+    /// This is called on the returned `Box<dyn Any>` from `join_codegen`
+    ///
+    /// # Panics
+    ///
+    /// Panics when the passed `Box<dyn Any>` was not returned by `join_codegen`.
+    fn link(
+        &self,
+        sess: &Session,
+        codegen_results: Box<dyn Any>,
+        outputs: &OutputFilenames,
+    ) -> Result<(), ErrorReported>;
+}
+
+pub trait ExtraBackendMethods: CodegenBackend + WriteBackendMethods + Sized + Send + Sync {
+    fn new_metadata(&self, sess: TyCtxt<'_>, mod_name: &str) -> Self::Module;
+    fn write_compressed_metadata<'tcx>(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        metadata: &EncodedMetadata,
+        llvm_module: &mut Self::Module,
+    );
+    fn codegen_allocator<'tcx>(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        mods: &mut Self::Module,
+        kind: AllocatorKind,
+    );
+    /// This generates the codegen unit and returns it along with
+    /// a `u64` giving an estimate of the unit's processing cost.
+    fn compile_codegen_unit(
+        &self,
+        tcx: TyCtxt<'_>,
+        cgu_name: Symbol,
+    ) -> (ModuleCodegen<Self::Module>, u64);
+    fn target_machine_factory(
+        &self,
+        sess: &Session,
+        opt_level: config::OptLevel,
+    ) -> Arc<dyn Fn() -> Result<Self::TargetMachine, String> + Send + Sync>;
+    fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
new file mode 100644
index 00000000000..5ffc83c5f99
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -0,0 +1,286 @@
+use super::abi::AbiBuilderMethods;
+use super::asm::AsmBuilderMethods;
+use super::coverageinfo::CoverageInfoBuilderMethods;
+use super::debuginfo::DebugInfoBuilderMethods;
+use super::intrinsic::IntrinsicCallMethods;
+use super::type_::ArgAbiMethods;
+use super::{HasCodegen, StaticBuilderMethods};
+
+use crate::common::{
+    AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope,
+};
+use crate::mir::operand::OperandRef;
+use crate::mir::place::PlaceRef;
+use crate::MemFlags;
+
+use rustc_middle::ty::layout::HasParamEnv;
+use rustc_middle::ty::Ty;
+use rustc_target::abi::{Align, Size};
+use rustc_target::spec::HasTargetSpec;
+
+use std::iter::TrustedLen;
+use std::ops::Range;
+
+#[derive(Copy, Clone)]
+pub enum OverflowOp {
+    Add,
+    Sub,
+    Mul,
+}
+
+pub trait BuilderMethods<'a, 'tcx>:
+    HasCodegen<'tcx>
+    + CoverageInfoBuilderMethods<'tcx>
+    + DebugInfoBuilderMethods
+    + ArgAbiMethods<'tcx>
+    + AbiBuilderMethods<'tcx>
+    + IntrinsicCallMethods<'tcx>
+    + AsmBuilderMethods<'tcx>
+    + StaticBuilderMethods
+    + HasParamEnv<'tcx>
+    + HasTargetSpec
+{
+    fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Function, name: &'b str) -> Self;
+    fn with_cx(cx: &'a Self::CodegenCx) -> Self;
+    fn build_sibling_block(&self, name: &str) -> Self;
+    fn cx(&self) -> &Self::CodegenCx;
+    fn llbb(&self) -> Self::BasicBlock;
+
+    fn position_at_end(&mut self, llbb: Self::BasicBlock);
+    fn ret_void(&mut self);
+    fn ret(&mut self, v: Self::Value);
+    fn br(&mut self, dest: Self::BasicBlock);
+    fn cond_br(
+        &mut self,
+        cond: Self::Value,
+        then_llbb: Self::BasicBlock,
+        else_llbb: Self::BasicBlock,
+    );
+    fn switch(
+        &mut self,
+        v: Self::Value,
+        else_llbb: Self::BasicBlock,
+        cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock)> + TrustedLen,
+    );
+    fn invoke(
+        &mut self,
+        llfn: Self::Value,
+        args: &[Self::Value],
+        then: Self::BasicBlock,
+        catch: Self::BasicBlock,
+        funclet: Option<&Self::Funclet>,
+    ) -> Self::Value;
+    fn unreachable(&mut self);
+
+    fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn unchecked_sadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn unchecked_uadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn unchecked_ssub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn unchecked_usub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn unchecked_smul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn unchecked_umul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn neg(&mut self, v: Self::Value) -> Self::Value;
+    fn fneg(&mut self, v: Self::Value) -> Self::Value;
+    fn not(&mut self, v: Self::Value) -> Self::Value;
+
+    fn checked_binop(
+        &mut self,
+        oop: OverflowOp,
+        ty: Ty<'_>,
+        lhs: Self::Value,
+        rhs: Self::Value,
+    ) -> (Self::Value, Self::Value);
+
+    fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
+    fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
+    fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value;
+
+    fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value;
+    fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value;
+    fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value;
+    fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
+    -> OperandRef<'tcx, Self::Value>;
+
+    /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset.
+    fn write_operand_repeatedly(
+        self,
+        elem: OperandRef<'tcx, Self::Value>,
+        count: u64,
+        dest: PlaceRef<'tcx, Self::Value>,
+    ) -> Self;
+
+    fn range_metadata(&mut self, load: Self::Value, range: Range<u128>);
+    fn nonnull_metadata(&mut self, load: Self::Value);
+
+    fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
+    fn store_with_flags(
+        &mut self,
+        val: Self::Value,
+        ptr: Self::Value,
+        align: Align,
+        flags: MemFlags,
+    ) -> Self::Value;
+    fn atomic_store(
+        &mut self,
+        val: Self::Value,
+        ptr: Self::Value,
+        order: AtomicOrdering,
+        size: Size,
+    );
+
+    fn gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
+    fn inbounds_gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
+    fn struct_gep(&mut self, ptr: Self::Value, idx: u64) -> Self::Value;
+
+    fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn fptoui_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
+    fn fptosi_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
+    fn fptosui_may_trap(&self, val: Self::Value, dest_ty: Self::Type) -> bool;
+    fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
+    fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+
+    fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+
+    fn memcpy(
+        &mut self,
+        dst: Self::Value,
+        dst_align: Align,
+        src: Self::Value,
+        src_align: Align,
+        size: Self::Value,
+        flags: MemFlags,
+    );
+    fn memmove(
+        &mut self,
+        dst: Self::Value,
+        dst_align: Align,
+        src: Self::Value,
+        src_align: Align,
+        size: Self::Value,
+        flags: MemFlags,
+    );
+    fn memset(
+        &mut self,
+        ptr: Self::Value,
+        fill_byte: Self::Value,
+        size: Self::Value,
+        align: Align,
+        flags: MemFlags,
+    );
+
+    fn select(
+        &mut self,
+        cond: Self::Value,
+        then_val: Self::Value,
+        else_val: Self::Value,
+    ) -> Self::Value;
+
+    fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value;
+    fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value;
+    fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value;
+    fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value;
+    fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
+
+    fn landing_pad(
+        &mut self,
+        ty: Self::Type,
+        pers_fn: Self::Value,
+        num_clauses: usize,
+    ) -> Self::Value;
+    fn set_cleanup(&mut self, landing_pad: Self::Value);
+    fn resume(&mut self, exn: Self::Value) -> Self::Value;
+    fn cleanup_pad(&mut self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
+    fn cleanup_ret(
+        &mut self,
+        funclet: &Self::Funclet,
+        unwind: Option<Self::BasicBlock>,
+    ) -> Self::Value;
+    fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
+    fn catch_switch(
+        &mut self,
+        parent: Option<Self::Value>,
+        unwind: Option<Self::BasicBlock>,
+        num_handlers: usize,
+    ) -> Self::Value;
+    fn add_handler(&mut self, catch_switch: Self::Value, handler: Self::BasicBlock);
+    fn set_personality_fn(&mut self, personality: Self::Value);
+
+    fn atomic_cmpxchg(
+        &mut self,
+        dst: Self::Value,
+        cmp: Self::Value,
+        src: Self::Value,
+        order: AtomicOrdering,
+        failure_order: AtomicOrdering,
+        weak: bool,
+    ) -> Self::Value;
+    fn atomic_rmw(
+        &mut self,
+        op: AtomicRmwBinOp,
+        dst: Self::Value,
+        src: Self::Value,
+        order: AtomicOrdering,
+    ) -> Self::Value;
+    fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
+    fn set_invariant_load(&mut self, load: Self::Value);
+
+    /// Called for `StorageLive`
+    fn lifetime_start(&mut self, ptr: Self::Value, size: Size);
+
+    /// Called for `StorageDead`
+    fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
+
+    fn instrprof_increment(
+        &mut self,
+        fn_name: Self::Value,
+        hash: Self::Value,
+        num_counters: Self::Value,
+        index: Self::Value,
+    );
+
+    fn call(
+        &mut self,
+        llfn: Self::Value,
+        args: &[Self::Value],
+        funclet: Option<&Self::Funclet>,
+    ) -> Self::Value;
+    fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+
+    unsafe fn delete_basic_block(&mut self, bb: Self::BasicBlock);
+    fn do_not_inline(&mut self, llret: Self::Value);
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs
new file mode 100644
index 00000000000..6b58dea794b
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs
@@ -0,0 +1,38 @@
+use super::BackendTypes;
+use crate::mir::place::PlaceRef;
+use rustc_middle::mir::interpret::{Allocation, Scalar};
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_span::Symbol;
+use rustc_target::abi::{self, Size};
+
+pub trait ConstMethods<'tcx>: BackendTypes {
+    // Constant constructors
+    fn const_null(&self, t: Self::Type) -> Self::Value;
+    fn const_undef(&self, t: Self::Type) -> Self::Value;
+    fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
+    fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
+    fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value;
+    fn const_bool(&self, val: bool) -> Self::Value;
+    fn const_i32(&self, i: i32) -> Self::Value;
+    fn const_u32(&self, i: u32) -> Self::Value;
+    fn const_u64(&self, i: u64) -> Self::Value;
+    fn const_usize(&self, i: u64) -> Self::Value;
+    fn const_u8(&self, i: u8) -> Self::Value;
+    fn const_real(&self, t: Self::Type, val: f64) -> Self::Value;
+
+    fn const_str(&self, s: Symbol) -> (Self::Value, Self::Value);
+    fn const_struct(&self, elts: &[Self::Value], packed: bool) -> Self::Value;
+
+    fn const_to_opt_uint(&self, v: Self::Value) -> Option<u64>;
+    fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option<u128>;
+
+    fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: Self::Type) -> Self::Value;
+    fn from_const_alloc(
+        &self,
+        layout: TyAndLayout<'tcx>,
+        alloc: &Allocation,
+        offset: Size,
+    ) -> PlaceRef<'tcx, Self::Value>;
+
+    fn const_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs
new file mode 100644
index 00000000000..b74e4e45901
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs
@@ -0,0 +1,31 @@
+use super::BackendTypes;
+use rustc_middle::mir::coverage::*;
+use rustc_middle::ty::Instance;
+
+pub trait CoverageInfoMethods: BackendTypes {
+    fn coverageinfo_finalize(&self);
+}
+
+pub trait CoverageInfoBuilderMethods<'tcx>: BackendTypes {
+    fn create_pgo_func_name_var(&self, instance: Instance<'tcx>) -> Self::Value;
+
+    fn add_counter_region(
+        &mut self,
+        instance: Instance<'tcx>,
+        function_source_hash: u64,
+        id: CounterValueReference,
+        region: CodeRegion,
+    );
+
+    fn add_counter_expression_region(
+        &mut self,
+        instance: Instance<'tcx>,
+        id: InjectedExpressionIndex,
+        lhs: ExpressionOperandId,
+        op: Op,
+        rhs: ExpressionOperandId,
+        region: CodeRegion,
+    );
+
+    fn add_unreachable_region(&mut self, instance: Instance<'tcx>, region: CodeRegion);
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
new file mode 100644
index 00000000000..1ee0f489ffc
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
@@ -0,0 +1,62 @@
+use super::BackendTypes;
+use crate::mir::debuginfo::{FunctionDebugContext, VariableKind};
+use rustc_hir::def_id::CrateNum;
+use rustc_middle::mir;
+use rustc_middle::ty::{Instance, Ty};
+use rustc_span::{SourceFile, Span, Symbol};
+use rustc_target::abi::call::FnAbi;
+use rustc_target::abi::Size;
+
+pub trait DebugInfoMethods<'tcx>: BackendTypes {
+    fn create_vtable_metadata(&self, ty: Ty<'tcx>, vtable: Self::Value);
+
+    /// Creates the function-specific debug context.
+    ///
+    /// Returns the FunctionDebugContext for the function which holds state needed
+    /// for debug info creation, if it is enabled.
+    fn create_function_debug_context(
+        &self,
+        instance: Instance<'tcx>,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        llfn: Self::Function,
+        mir: &mir::Body<'_>,
+    ) -> Option<FunctionDebugContext<Self::DIScope>>;
+
+    fn extend_scope_to_file(
+        &self,
+        scope_metadata: Self::DIScope,
+        file: &SourceFile,
+        defining_crate: CrateNum,
+    ) -> Self::DIScope;
+    fn debuginfo_finalize(&self);
+
+    // FIXME(eddyb) find a common convention for all of the debuginfo-related
+    // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+    fn create_dbg_var(
+        &self,
+        dbg_context: &FunctionDebugContext<Self::DIScope>,
+        variable_name: Symbol,
+        variable_type: Ty<'tcx>,
+        scope_metadata: Self::DIScope,
+        variable_kind: VariableKind,
+        span: Span,
+    ) -> Self::DIVariable;
+}
+
+pub trait DebugInfoBuilderMethods: BackendTypes {
+    // FIXME(eddyb) find a common convention for all of the debuginfo-related
+    // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+    fn dbg_var_addr(
+        &mut self,
+        dbg_var: Self::DIVariable,
+        scope_metadata: Self::DIScope,
+        variable_alloca: Self::Value,
+        direct_offset: Size,
+        // NB: each offset implies a deref (i.e. they're steps in a pointer chain).
+        indirect_offsets: &[Size],
+        span: Span,
+    );
+    fn set_source_location(&mut self, scope: Self::DIScope, span: Span);
+    fn insert_reference_to_gdb_debug_scripts_section_global(&mut self);
+    fn set_var_name(&mut self, value: Self::Value, name: &str);
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/declare.rs b/compiler/rustc_codegen_ssa/src/traits/declare.rs
new file mode 100644
index 00000000000..690aacd2056
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/declare.rs
@@ -0,0 +1,65 @@
+use super::BackendTypes;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::{Instance, Ty};
+use rustc_target::abi::call::FnAbi;
+
+pub trait DeclareMethods<'tcx>: BackendTypes {
+    /// Declare a global value.
+    ///
+    /// If there’s a value with the same name already declared, the function will
+    /// return its Value instead.
+    fn declare_global(&self, name: &str, ty: Self::Type) -> Self::Value;
+
+    /// Declare a C ABI function.
+    ///
+    /// Only use this for foreign function ABIs and glue. For Rust functions use
+    /// `declare_fn` instead.
+    ///
+    /// If there’s a value with the same name already declared, the function will
+    /// update the declaration and return existing Value instead.
+    fn declare_cfn(&self, name: &str, fn_type: Self::Type) -> Self::Function;
+
+    /// Declare a Rust function.
+    ///
+    /// If there’s a value with the same name already declared, the function will
+    /// update the declaration and return existing Value instead.
+    fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Function;
+
+    /// Declare a global with an intention to define it.
+    ///
+    /// Use this function when you intend to define a global. This function will
+    /// return `None` if the name already has a definition associated with it. In that
+    /// case an error should be reported to the user, because it usually happens due
+    /// to user’s fault (e.g., misuse of `#[no_mangle]` or `#[export_name]` attributes).
+    fn define_global(&self, name: &str, ty: Self::Type) -> Option<Self::Value>;
+
+    /// Declare a private global
+    ///
+    /// Use this function when you intend to define a global without a name.
+    fn define_private_global(&self, ty: Self::Type) -> Self::Value;
+
+    /// Gets declared value by name.
+    fn get_declared_value(&self, name: &str) -> Option<Self::Value>;
+
+    /// Gets defined or externally defined (AvailableExternally linkage) value by
+    /// name.
+    fn get_defined_value(&self, name: &str) -> Option<Self::Value>;
+}
+
+pub trait PreDefineMethods<'tcx>: BackendTypes {
+    fn predefine_static(
+        &self,
+        def_id: DefId,
+        linkage: Linkage,
+        visibility: Visibility,
+        symbol_name: &str,
+    );
+    fn predefine_fn(
+        &self,
+        instance: Instance<'tcx>,
+        linkage: Linkage,
+        visibility: Visibility,
+        symbol_name: &str,
+    );
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
new file mode 100644
index 00000000000..9d48e233de6
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
@@ -0,0 +1,30 @@
+use super::BackendTypes;
+use crate::mir::operand::OperandRef;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::Span;
+use rustc_target::abi::call::FnAbi;
+
+pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
+    /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
+    /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
+    /// add them to librustc_codegen_llvm/context.rs
+    fn codegen_intrinsic_call(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        args: &[OperandRef<'tcx, Self::Value>],
+        llresult: Self::Value,
+        span: Span,
+    );
+
+    fn abort(&mut self);
+    fn assume(&mut self, val: Self::Value);
+    fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value;
+    fn sideeffect(&mut self);
+    /// Trait method used to inject `va_start` on the "spoofed" `VaListImpl` in
+    /// Rust defined C-variadic functions.
+    fn va_start(&mut self, val: Self::Value) -> Self::Value;
+    /// Trait method used to inject `va_end` on the "spoofed" `VaListImpl` before
+    /// Rust defined C-variadic functions return.
+    fn va_end(&mut self, val: Self::Value) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/misc.rs b/compiler/rustc_codegen_ssa/src/traits/misc.rs
new file mode 100644
index 00000000000..fc57a9a80b2
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/misc.rs
@@ -0,0 +1,22 @@
+use super::BackendTypes;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_session::Session;
+use std::cell::RefCell;
+
+pub trait MiscMethods<'tcx>: BackendTypes {
+    fn vtables(
+        &self,
+    ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), Self::Value>>;
+    fn check_overflow(&self) -> bool;
+    fn get_fn(&self, instance: Instance<'tcx>) -> Self::Function;
+    fn get_fn_addr(&self, instance: Instance<'tcx>) -> Self::Value;
+    fn eh_personality(&self) -> Self::Value;
+    fn sess(&self) -> &Session;
+    fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx>;
+    fn used_statics(&self) -> &RefCell<Vec<Self::Value>>;
+    fn set_frame_pointer_elimination(&self, llfn: Self::Function);
+    fn apply_target_cpu_attr(&self, llfn: Self::Function);
+    fn create_used_variable(&self);
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/mod.rs b/compiler/rustc_codegen_ssa/src/traits/mod.rs
new file mode 100644
index 00000000000..0ac519dd0b1
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/mod.rs
@@ -0,0 +1,102 @@
+//! Interface of a Rust codegen backend
+//!
+//! This crate defines all the traits that have to be implemented by a codegen backend in order to
+//! use the backend-agnostic codegen code in `rustc_codegen_ssa`.
+//!
+//! The interface is designed around two backend-specific data structures, the codegen context and
+//! the builder. The codegen context is supposed to be read-only after its creation and during the
+//! actual codegen, while the builder stores the information about the function during codegen and
+//! is used to produce the instructions of the backend IR.
+//!
+//! Finally, a third `Backend` structure has to implement methods related to how codegen information
+//! is passed to the backend, especially for asynchronous compilation.
+//!
+//! The traits contain associated types that are backend-specific, such as the backend's value or
+//! basic blocks.
+
+mod abi;
+mod asm;
+mod backend;
+mod builder;
+mod consts;
+mod coverageinfo;
+mod debuginfo;
+mod declare;
+mod intrinsic;
+mod misc;
+mod statics;
+mod type_;
+mod write;
+
+pub use self::abi::AbiBuilderMethods;
+pub use self::asm::{AsmBuilderMethods, AsmMethods, InlineAsmOperandRef};
+pub use self::backend::{Backend, BackendTypes, CodegenBackend, ExtraBackendMethods};
+pub use self::builder::{BuilderMethods, OverflowOp};
+pub use self::consts::ConstMethods;
+pub use self::coverageinfo::{CoverageInfoBuilderMethods, CoverageInfoMethods};
+pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods};
+pub use self::declare::{DeclareMethods, PreDefineMethods};
+pub use self::intrinsic::IntrinsicCallMethods;
+pub use self::misc::MiscMethods;
+pub use self::statics::{StaticBuilderMethods, StaticMethods};
+pub use self::type_::{
+    ArgAbiMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods,
+};
+pub use self::write::{ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
+
+use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt};
+use rustc_target::spec::HasTargetSpec;
+
+use std::fmt;
+
+pub trait CodegenObject: Copy + PartialEq + fmt::Debug {}
+impl<T: Copy + PartialEq + fmt::Debug> CodegenObject for T {}
+
+pub trait CodegenMethods<'tcx>:
+    Backend<'tcx>
+    + TypeMethods<'tcx>
+    + MiscMethods<'tcx>
+    + ConstMethods<'tcx>
+    + StaticMethods
+    + CoverageInfoMethods
+    + DebugInfoMethods<'tcx>
+    + DeclareMethods<'tcx>
+    + AsmMethods
+    + PreDefineMethods<'tcx>
+    + HasParamEnv<'tcx>
+    + HasTyCtxt<'tcx>
+    + HasTargetSpec
+{
+}
+
+impl<'tcx, T> CodegenMethods<'tcx> for T where
+    Self: Backend<'tcx>
+        + TypeMethods<'tcx>
+        + MiscMethods<'tcx>
+        + ConstMethods<'tcx>
+        + StaticMethods
+        + CoverageInfoMethods
+        + DebugInfoMethods<'tcx>
+        + DeclareMethods<'tcx>
+        + AsmMethods
+        + PreDefineMethods<'tcx>
+        + HasParamEnv<'tcx>
+        + HasTyCtxt<'tcx>
+        + HasTargetSpec
+{
+}
+
+pub trait HasCodegen<'tcx>:
+    Backend<'tcx> + ::std::ops::Deref<Target = <Self as HasCodegen<'tcx>>::CodegenCx>
+{
+    type CodegenCx: CodegenMethods<'tcx>
+        + BackendTypes<
+            Value = Self::Value,
+            Function = Self::Function,
+            BasicBlock = Self::BasicBlock,
+            Type = Self::Type,
+            Funclet = Self::Funclet,
+            DIScope = Self::DIScope,
+            DIVariable = Self::DIVariable,
+        >;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/statics.rs b/compiler/rustc_codegen_ssa/src/traits/statics.rs
new file mode 100644
index 00000000000..817fc02d166
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/statics.rs
@@ -0,0 +1,24 @@
+use super::BackendTypes;
+use rustc_hir::def_id::DefId;
+use rustc_target::abi::Align;
+
+pub trait StaticMethods: BackendTypes {
+    fn static_addr_of(&self, cv: Self::Value, align: Align, kind: Option<&str>) -> Self::Value;
+    fn codegen_static(&self, def_id: DefId, is_mutable: bool);
+
+    /// Mark the given global value as "used", to prevent a backend from potentially removing a
+    /// static variable that may otherwise appear unused.
+    ///
+    /// Static variables in Rust can be annotated with the `#[used]` attribute to direct the `rustc`
+    /// compiler to mark the variable as a "used global".
+    ///
+    /// ```no_run
+    /// #[used]
+    /// static FOO: u32 = 0;
+    /// ```
+    fn add_used_global(&self, global: Self::Value);
+}
+
+pub trait StaticBuilderMethods: BackendTypes {
+    fn get_static(&mut self, def_id: DefId) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs
new file mode 100644
index 00000000000..726d948cfd4
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs
@@ -0,0 +1,137 @@
+use super::misc::MiscMethods;
+use super::Backend;
+use super::HasCodegen;
+use crate::common::TypeKind;
+use crate::mir::place::PlaceRef;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::DUMMY_SP;
+use rustc_target::abi::call::{ArgAbi, CastTarget, FnAbi, Reg};
+use rustc_target::abi::{AddressSpace, Integer};
+
+// This depends on `Backend` and not `BackendTypes`, because consumers will probably want to use
+// `LayoutOf` or `HasTyCtxt`. This way, they don't have to add a constraint on it themselves.
+pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
+    fn type_i1(&self) -> Self::Type;
+    fn type_i8(&self) -> Self::Type;
+    fn type_i16(&self) -> Self::Type;
+    fn type_i32(&self) -> Self::Type;
+    fn type_i64(&self) -> Self::Type;
+    fn type_i128(&self) -> Self::Type;
+    fn type_isize(&self) -> Self::Type;
+
+    fn type_f32(&self) -> Self::Type;
+    fn type_f64(&self) -> Self::Type;
+
+    fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
+    fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type;
+    fn type_kind(&self, ty: Self::Type) -> TypeKind;
+    fn type_ptr_to(&self, ty: Self::Type) -> Self::Type;
+    fn type_ptr_to_ext(&self, ty: Self::Type, address_space: AddressSpace) -> Self::Type;
+    fn element_type(&self, ty: Self::Type) -> Self::Type;
+
+    /// Returns the number of elements in `self` if it is a LLVM vector type.
+    fn vector_length(&self, ty: Self::Type) -> usize;
+
+    fn float_width(&self, ty: Self::Type) -> usize;
+
+    /// Retrieves the bit width of the integer type `self`.
+    fn int_width(&self, ty: Self::Type) -> u64;
+
+    fn val_ty(&self, v: Self::Value) -> Self::Type;
+}
+
+pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
+    fn type_i8p(&self) -> Self::Type {
+        self.type_i8p_ext(AddressSpace::DATA)
+    }
+
+    fn type_i8p_ext(&self, address_space: AddressSpace) -> Self::Type {
+        self.type_ptr_to_ext(self.type_i8(), address_space)
+    }
+
+    fn type_int(&self) -> Self::Type {
+        match &self.sess().target.target.target_c_int_width[..] {
+            "16" => self.type_i16(),
+            "32" => self.type_i32(),
+            "64" => self.type_i64(),
+            width => bug!("Unsupported target_c_int_width: {}", width),
+        }
+    }
+
+    fn type_from_integer(&self, i: Integer) -> Self::Type {
+        use Integer::*;
+        match i {
+            I8 => self.type_i8(),
+            I16 => self.type_i16(),
+            I32 => self.type_i32(),
+            I64 => self.type_i64(),
+            I128 => self.type_i128(),
+        }
+    }
+
+    fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
+        ty.needs_drop(self.tcx(), ty::ParamEnv::reveal_all())
+    }
+
+    fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
+        ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all())
+    }
+
+    fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
+        ty.is_freeze(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all())
+    }
+
+    fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool {
+        let param_env = ty::ParamEnv::reveal_all();
+        if ty.is_sized(self.tcx().at(DUMMY_SP), param_env) {
+            return false;
+        }
+
+        let tail = self.tcx().struct_tail_erasing_lifetimes(ty, param_env);
+        match tail.kind {
+            ty::Foreign(..) => false,
+            ty::Str | ty::Slice(..) | ty::Dynamic(..) => true,
+            _ => bug!("unexpected unsized tail: {:?}", tail),
+        }
+    }
+}
+
+impl<T> DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {}
+
+pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
+    fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
+    fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type;
+    fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
+    fn reg_backend_type(&self, ty: &Reg) -> Self::Type;
+    fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
+    fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool;
+    fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool;
+    fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64;
+    fn scalar_pair_element_backend_type(
+        &self,
+        layout: TyAndLayout<'tcx>,
+        index: usize,
+        immediate: bool,
+    ) -> Self::Type;
+}
+
+pub trait ArgAbiMethods<'tcx>: HasCodegen<'tcx> {
+    fn store_fn_arg(
+        &mut self,
+        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        idx: &mut usize,
+        dst: PlaceRef<'tcx, Self::Value>,
+    );
+    fn store_arg(
+        &mut self,
+        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        val: Self::Value,
+        dst: PlaceRef<'tcx, Self::Value>,
+    );
+    fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Self::Type;
+}
+
+pub trait TypeMethods<'tcx>: DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> {}
+
+impl<T> TypeMethods<'tcx> for T where Self: DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> {}
diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs
new file mode 100644
index 00000000000..27d52e9b9c5
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/write.rs
@@ -0,0 +1,64 @@
+use crate::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use crate::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
+use crate::{CompiledModule, ModuleCodegen};
+
+use rustc_errors::{FatalError, Handler};
+use rustc_middle::dep_graph::WorkProduct;
+
+pub trait WriteBackendMethods: 'static + Sized + Clone {
+    type Module: Send + Sync;
+    type TargetMachine;
+    type ModuleBuffer: ModuleBufferMethods;
+    type Context: ?Sized;
+    type ThinData: Send + Sync;
+    type ThinBuffer: ThinBufferMethods;
+
+    /// Performs fat LTO by merging all modules into a single one and returning it
+    /// for further optimization.
+    fn run_fat_lto(
+        cgcx: &CodegenContext<Self>,
+        modules: Vec<FatLTOInput<Self>>,
+        cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+    ) -> Result<LtoModuleCodegen<Self>, FatalError>;
+    /// Performs thin LTO by performing necessary global analysis and returning two
+    /// lists, one of the modules that need optimization and another for modules that
+    /// can simply be copied over from the incr. comp. cache.
+    fn run_thin_lto(
+        cgcx: &CodegenContext<Self>,
+        modules: Vec<(String, Self::ThinBuffer)>,
+        cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+    ) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError>;
+    fn print_pass_timings(&self);
+    unsafe fn optimize(
+        cgcx: &CodegenContext<Self>,
+        diag_handler: &Handler,
+        module: &ModuleCodegen<Self::Module>,
+        config: &ModuleConfig,
+    ) -> Result<(), FatalError>;
+    unsafe fn optimize_thin(
+        cgcx: &CodegenContext<Self>,
+        thin: &mut ThinModule<Self>,
+    ) -> Result<ModuleCodegen<Self::Module>, FatalError>;
+    unsafe fn codegen(
+        cgcx: &CodegenContext<Self>,
+        diag_handler: &Handler,
+        module: ModuleCodegen<Self::Module>,
+        config: &ModuleConfig,
+    ) -> Result<CompiledModule, FatalError>;
+    fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer);
+    fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer);
+    fn run_lto_pass_manager(
+        cgcx: &CodegenContext<Self>,
+        llmod: &ModuleCodegen<Self::Module>,
+        config: &ModuleConfig,
+        thin: bool,
+    );
+}
+
+pub trait ThinBufferMethods: Send + Sync {
+    fn data(&self) -> &[u8];
+}
+
+pub trait ModuleBufferMethods: Send + Sync {
+    fn data(&self) -> &[u8];
+}