about summary refs log tree commit diff
path: root/src/librustc_codegen_llvm/back
diff options
context:
space:
mode:
authorIrina Popa <irinagpopa@gmail.com>2018-05-08 16:10:16 +0300
committerIrina Popa <irinagpopa@gmail.com>2018-05-17 15:08:30 +0300
commitb63d7e2b1c4019e40051036bcb1fd5f254a8f6e2 (patch)
tree314792e2f467d17181d29d4988550058197ac029 /src/librustc_codegen_llvm/back
parente3150564f889a3bad01795d9fcb31d4f14d58a99 (diff)
downloadrust-b63d7e2b1c4019e40051036bcb1fd5f254a8f6e2.tar.gz
rust-b63d7e2b1c4019e40051036bcb1fd5f254a8f6e2.zip
Rename trans to codegen everywhere.
Diffstat (limited to 'src/librustc_codegen_llvm/back')
-rw-r--r--src/librustc_codegen_llvm/back/archive.rs325
-rw-r--r--src/librustc_codegen_llvm/back/bytecode.rs160
-rw-r--r--src/librustc_codegen_llvm/back/command.rs175
-rw-r--r--src/librustc_codegen_llvm/back/link.rs1630
-rw-r--r--src/librustc_codegen_llvm/back/linker.rs1037
-rw-r--r--src/librustc_codegen_llvm/back/lto.rs773
-rw-r--r--src/librustc_codegen_llvm/back/rpath.rs282
-rw-r--r--src/librustc_codegen_llvm/back/symbol_export.rs396
-rw-r--r--src/librustc_codegen_llvm/back/wasm.rs261
-rw-r--r--src/librustc_codegen_llvm/back/write.rs2390
10 files changed, 7429 insertions, 0 deletions
diff --git a/src/librustc_codegen_llvm/back/archive.rs b/src/librustc_codegen_llvm/back/archive.rs
new file mode 100644
index 00000000000..609629bffb9
--- /dev/null
+++ b/src/librustc_codegen_llvm/back/archive.rs
@@ -0,0 +1,325 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A helper class for dealing with static archives
+
+use std::ffi::{CString, CStr};
+use std::io;
+use std::mem;
+use std::path::{Path, PathBuf};
+use std::ptr;
+use std::str;
+
+use back::bytecode::RLIB_BYTECODE_EXTENSION;
+use libc;
+use llvm::archive_ro::{ArchiveRO, Child};
+use llvm::{self, ArchiveKind};
+use metadata::METADATA_FILENAME;
+use rustc::session::Session;
+
+pub struct ArchiveConfig<'a> {
+    pub sess: &'a Session,
+    pub dst: PathBuf,
+    pub src: Option<PathBuf>,
+    pub lib_search_paths: Vec<PathBuf>,
+}
+
+/// Helper for adding many files to an archive.
+#[must_use = "must call build() to finish building the archive"]
+pub struct ArchiveBuilder<'a> {
+    config: ArchiveConfig<'a>,
+    removals: Vec<String>,
+    additions: Vec<Addition>,
+    should_update_symbols: bool,
+    src_archive: Option<Option<ArchiveRO>>,
+}
+
+enum Addition {
+    File {
+        path: PathBuf,
+        name_in_archive: String,
+    },
+    Archive {
+        archive: ArchiveRO,
+        skip: Box<FnMut(&str) -> bool>,
+    },
+}
+
+pub fn find_library(name: &str, search_paths: &[PathBuf], sess: &Session)
+                    -> PathBuf {
+    // On Windows, static libraries sometimes show up as libfoo.a and other
+    // times show up as foo.lib
+    let oslibname = format!("{}{}{}",
+                            sess.target.target.options.staticlib_prefix,
+                            name,
+                            sess.target.target.options.staticlib_suffix);
+    let unixlibname = format!("lib{}.a", name);
+
+    for path in search_paths {
+        debug!("looking for {} inside {:?}", name, path);
+        let test = path.join(&oslibname);
+        if test.exists() { return test }
+        if oslibname != unixlibname {
+            let test = path.join(&unixlibname);
+            if test.exists() { return test }
+        }
+    }
+    sess.fatal(&format!("could not find native static library `{}`, \
+                         perhaps an -L flag is missing?", name));
+}
+
+fn is_relevant_child(c: &Child) -> bool {
+    match c.name() {
+        Some(name) => !name.contains("SYMDEF"),
+        None => false,
+    }
+}
+
+impl<'a> ArchiveBuilder<'a> {
+    /// Create a new static archive, ready for modifying the archive specified
+    /// by `config`.
+    pub fn new(config: ArchiveConfig<'a>) -> ArchiveBuilder<'a> {
+        ArchiveBuilder {
+            config,
+            removals: Vec::new(),
+            additions: Vec::new(),
+            should_update_symbols: false,
+            src_archive: None,
+        }
+    }
+
+    /// Removes a file from this archive
+    pub fn remove_file(&mut self, file: &str) {
+        self.removals.push(file.to_string());
+    }
+
+    /// Lists all files in an archive
+    pub fn src_files(&mut self) -> Vec<String> {
+        if self.src_archive().is_none() {
+            return Vec::new()
+        }
+        let archive = self.src_archive.as_ref().unwrap().as_ref().unwrap();
+        let ret = archive.iter()
+                         .filter_map(|child| child.ok())
+                         .filter(is_relevant_child)
+                         .filter_map(|child| child.name())
+                         .filter(|name| !self.removals.iter().any(|x| x == name))
+                         .map(|name| name.to_string())
+                         .collect();
+        return ret;
+    }
+
+    fn src_archive(&mut self) -> Option<&ArchiveRO> {
+        if let Some(ref a) = self.src_archive {
+            return a.as_ref()
+        }
+        let src = self.config.src.as_ref()?;
+        self.src_archive = Some(ArchiveRO::open(src).ok());
+        self.src_archive.as_ref().unwrap().as_ref()
+    }
+
+    /// Adds all of the contents of a native library to this archive. This will
+    /// search in the relevant locations for a library named `name`.
+    pub fn add_native_library(&mut self, name: &str) {
+        let location = find_library(name, &self.config.lib_search_paths,
+                                    self.config.sess);
+        self.add_archive(&location, |_| false).unwrap_or_else(|e| {
+            self.config.sess.fatal(&format!("failed to add native library {}: {}",
+                                            location.to_string_lossy(), e));
+        });
+    }
+
+    /// Adds all of the contents of the rlib at the specified path to this
+    /// archive.
+    ///
+    /// This ignores adding the bytecode from the rlib, and if LTO is enabled
+    /// then the object file also isn't added.
+    pub fn add_rlib(&mut self,
+                    rlib: &Path,
+                    name: &str,
+                    lto: bool,
+                    skip_objects: bool) -> io::Result<()> {
+        // Ignoring obj file starting with the crate name
+        // as simple comparison is not enough - there
+        // might be also an extra name suffix
+        let obj_start = format!("{}", name);
+
+        self.add_archive(rlib, move |fname: &str| {
+            // Ignore bytecode/metadata files, no matter the name.
+            if fname.ends_with(RLIB_BYTECODE_EXTENSION) || fname == METADATA_FILENAME {
+                return true
+            }
+
+            // Don't include Rust objects if LTO is enabled
+            if lto && fname.starts_with(&obj_start) && fname.ends_with(".o") {
+                return true
+            }
+
+            // Otherwise if this is *not* a rust object and we're skipping
+            // objects then skip this file
+            if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) {
+                return true
+            }
+
+            // ok, don't skip this
+            return false
+        })
+    }
+
+    fn add_archive<F>(&mut self, archive: &Path, skip: F)
+                      -> io::Result<()>
+        where F: FnMut(&str) -> bool + 'static
+    {
+        let archive = match ArchiveRO::open(archive) {
+            Ok(ar) => ar,
+            Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)),
+        };
+        self.additions.push(Addition::Archive {
+            archive,
+            skip: Box::new(skip),
+        });
+        Ok(())
+    }
+
+    /// Adds an arbitrary file to this archive
+    pub fn add_file(&mut self, file: &Path) {
+        let name = file.file_name().unwrap().to_str().unwrap();
+        self.additions.push(Addition::File {
+            path: file.to_path_buf(),
+            name_in_archive: name.to_string(),
+        });
+    }
+
+    /// Indicate that the next call to `build` should update all symbols in
+    /// the archive (equivalent to running 'ar s' over it).
+    pub fn update_symbols(&mut self) {
+        self.should_update_symbols = true;
+    }
+
+    /// Combine the provided files, rlibs, and native libraries into a single
+    /// `Archive`.
+    pub fn build(&mut self) {
+        let kind = match self.llvm_archive_kind() {
+            Ok(kind) => kind,
+            Err(kind) => {
+                self.config.sess.fatal(&format!("Don't know how to build archive of type: {}",
+                                                kind));
+            }
+        };
+
+        if let Err(e) = self.build_with_llvm(kind) {
+            self.config.sess.fatal(&format!("failed to build archive: {}", e));
+        }
+
+    }
+
+    fn llvm_archive_kind(&self) -> Result<ArchiveKind, &str> {
+        let kind = &*self.config.sess.target.target.options.archive_format;
+        kind.parse().map_err(|_| kind)
+    }
+
+    fn build_with_llvm(&mut self, kind: ArchiveKind) -> io::Result<()> {
+        let mut archives = Vec::new();
+        let mut strings = Vec::new();
+        let mut members = Vec::new();
+        let removals = mem::replace(&mut self.removals, Vec::new());
+
+        unsafe {
+            if let Some(archive) = self.src_archive() {
+                for child in archive.iter() {
+                    let child = child.map_err(string_to_io_error)?;
+                    let child_name = match child.name() {
+                        Some(s) => s,
+                        None => continue,
+                    };
+                    if removals.iter().any(|r| r == child_name) {
+                        continue
+                    }
+
+                    let name = CString::new(child_name)?;
+                    members.push(llvm::LLVMRustArchiveMemberNew(ptr::null(),
+                                                                name.as_ptr(),
+                                                                child.raw()));
+                    strings.push(name);
+                }
+            }
+            for addition in mem::replace(&mut self.additions, Vec::new()) {
+                match addition {
+                    Addition::File { path, name_in_archive } => {
+                        let path = CString::new(path.to_str().unwrap())?;
+                        let name = CString::new(name_in_archive)?;
+                        members.push(llvm::LLVMRustArchiveMemberNew(path.as_ptr(),
+                                                                    name.as_ptr(),
+                                                                    ptr::null_mut()));
+                        strings.push(path);
+                        strings.push(name);
+                    }
+                    Addition::Archive { archive, mut skip } => {
+                        for child in archive.iter() {
+                            let child = child.map_err(string_to_io_error)?;
+                            if !is_relevant_child(&child) {
+                                continue
+                            }
+                            let child_name = child.name().unwrap();
+                            if skip(child_name) {
+                                continue
+                            }
+
+                            // It appears that LLVM's archive writer is a little
+                            // buggy if the name we pass down isn't just the
+                            // filename component, so chop that off here and
+                            // pass it in.
+                            //
+                            // See LLVM bug 25877 for more info.
+                            let child_name = Path::new(child_name)
+                                                  .file_name().unwrap()
+                                                  .to_str().unwrap();
+                            let name = CString::new(child_name)?;
+                            let m = llvm::LLVMRustArchiveMemberNew(ptr::null(),
+                                                                   name.as_ptr(),
+                                                                   child.raw());
+                            members.push(m);
+                            strings.push(name);
+                        }
+                        archives.push(archive);
+                    }
+                }
+            }
+
+            let dst = self.config.dst.to_str().unwrap().as_bytes();
+            let dst = CString::new(dst)?;
+            let r = llvm::LLVMRustWriteArchive(dst.as_ptr(),
+                                               members.len() as libc::size_t,
+                                               members.as_ptr(),
+                                               self.should_update_symbols,
+                                               kind);
+            let ret = if r.into_result().is_err() {
+                let err = llvm::LLVMRustGetLastError();
+                let msg = if err.is_null() {
+                    "failed to write archive".to_string()
+                } else {
+                    String::from_utf8_lossy(CStr::from_ptr(err).to_bytes())
+                            .into_owned()
+                };
+                Err(io::Error::new(io::ErrorKind::Other, msg))
+            } else {
+                Ok(())
+            };
+            for member in members {
+                llvm::LLVMRustArchiveMemberFree(member);
+            }
+            return ret
+        }
+    }
+}
+
+fn string_to_io_error(s: String) -> io::Error {
+    io::Error::new(io::ErrorKind::Other, format!("bad archive: {}", s))
+}
diff --git a/src/librustc_codegen_llvm/back/bytecode.rs b/src/librustc_codegen_llvm/back/bytecode.rs
new file mode 100644
index 00000000000..212d1aaf055
--- /dev/null
+++ b/src/librustc_codegen_llvm/back/bytecode.rs
@@ -0,0 +1,160 @@
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Management of the encoding of LLVM bytecode into rlibs
+//!
+//! This module contains the management of encoding LLVM bytecode into rlibs,
+//! primarily for the usage in LTO situations. Currently the compiler will
+//! unconditionally encode LLVM-IR into rlibs regardless of what's happening
+//! elsewhere, so we currently compress the bytecode via deflate to avoid taking
+//! up too much space on disk.
+//!
+//! After compressing the bytecode we then have the rest of the format to
+//! basically deal with various bugs in various archive implementations. The
+//! format currently is:
+//!
+//!     RLIB LLVM-BYTECODE OBJECT LAYOUT
+//!     Version 2
+//!     Bytes    Data
+//!     0..10    "RUST_OBJECT" encoded in ASCII
+//!     11..14   format version as little-endian u32
+//!     15..19   the length of the module identifier string
+//!     20..n    the module identifier string
+//!     n..n+8   size in bytes of deflate compressed LLVM bitcode as
+//!              little-endian u64
+//!     n+9..    compressed LLVM bitcode
+//!     ?        maybe a byte to make this whole thing even length
+
+use std::io::{Read, Write};
+use std::ptr;
+use std::str;
+
+use flate2::Compression;
+use flate2::read::DeflateDecoder;
+use flate2::write::DeflateEncoder;
+
+// This is the "magic number" expected at the beginning of a LLVM bytecode
+// object in an rlib.
+pub const RLIB_BYTECODE_OBJECT_MAGIC: &'static [u8] = b"RUST_OBJECT";
+
+// The version number this compiler will write to bytecode objects in rlibs
+pub const RLIB_BYTECODE_OBJECT_VERSION: u8 = 2;
+
+pub const RLIB_BYTECODE_EXTENSION: &str = "bc.z";
+
+pub fn encode(identifier: &str, bytecode: &[u8]) -> Vec<u8> {
+    let mut encoded = Vec::new();
+
+    // Start off with the magic string
+    encoded.extend_from_slice(RLIB_BYTECODE_OBJECT_MAGIC);
+
+    // Next up is the version
+    encoded.extend_from_slice(&[RLIB_BYTECODE_OBJECT_VERSION, 0, 0, 0]);
+
+    // Next is the LLVM module identifier length + contents
+    let identifier_len = identifier.len();
+    encoded.extend_from_slice(&[
+        (identifier_len >>  0) as u8,
+        (identifier_len >>  8) as u8,
+        (identifier_len >> 16) as u8,
+        (identifier_len >> 24) as u8,
+    ]);
+    encoded.extend_from_slice(identifier.as_bytes());
+
+    // Next is the LLVM module deflate compressed, prefixed with its length. We
+    // don't know its length yet, so fill in 0s
+    let deflated_size_pos = encoded.len();
+    encoded.extend_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]);
+
+    let before = encoded.len();
+    DeflateEncoder::new(&mut encoded, Compression::fast())
+        .write_all(bytecode)
+        .unwrap();
+    let after = encoded.len();
+
+    // Fill in the length we reserved space for before
+    let bytecode_len = (after - before) as u64;
+    encoded[deflated_size_pos + 0] = (bytecode_len >>  0) as u8;
+    encoded[deflated_size_pos + 1] = (bytecode_len >>  8) as u8;
+    encoded[deflated_size_pos + 2] = (bytecode_len >> 16) as u8;
+    encoded[deflated_size_pos + 3] = (bytecode_len >> 24) as u8;
+    encoded[deflated_size_pos + 4] = (bytecode_len >> 32) as u8;
+    encoded[deflated_size_pos + 5] = (bytecode_len >> 40) as u8;
+    encoded[deflated_size_pos + 6] = (bytecode_len >> 48) as u8;
+    encoded[deflated_size_pos + 7] = (bytecode_len >> 56) as u8;
+
+    // If the number of bytes written to the object so far is odd, add a
+    // padding byte to make it even. This works around a crash bug in LLDB
+    // (see issue #15950)
+    if encoded.len() % 2 == 1 {
+        encoded.push(0);
+    }
+
+    return encoded
+}
+
+pub struct DecodedBytecode<'a> {
+    identifier: &'a str,
+    encoded_bytecode: &'a [u8],
+}
+
+impl<'a> DecodedBytecode<'a> {
+    pub fn new(data: &'a [u8]) -> Result<DecodedBytecode<'a>, String> {
+        if !data.starts_with(RLIB_BYTECODE_OBJECT_MAGIC) {
+            return Err(format!("magic bytecode prefix not found"))
+        }
+        let data = &data[RLIB_BYTECODE_OBJECT_MAGIC.len()..];
+        if !data.starts_with(&[RLIB_BYTECODE_OBJECT_VERSION, 0, 0, 0]) {
+            return Err(format!("wrong version prefix found in bytecode"))
+        }
+        let data = &data[4..];
+        if data.len() < 4 {
+            return Err(format!("bytecode corrupted"))
+        }
+        let identifier_len = unsafe {
+            u32::from_le(ptr::read_unaligned(data.as_ptr() as *const u32)) as usize
+        };
+        let data = &data[4..];
+        if data.len() < identifier_len {
+            return Err(format!("bytecode corrupted"))
+        }
+        let identifier = match str::from_utf8(&data[..identifier_len]) {
+            Ok(s) => s,
+            Err(_) => return Err(format!("bytecode corrupted"))
+        };
+        let data = &data[identifier_len..];
+        if data.len() < 8 {
+            return Err(format!("bytecode corrupted"))
+        }
+        let bytecode_len = unsafe {
+            u64::from_le(ptr::read_unaligned(data.as_ptr() as *const u64)) as usize
+        };
+        let data = &data[8..];
+        if data.len() < bytecode_len {
+            return Err(format!("bytecode corrupted"))
+        }
+        let encoded_bytecode = &data[..bytecode_len];
+
+        Ok(DecodedBytecode {
+            identifier,
+            encoded_bytecode,
+        })
+    }
+
+    pub fn bytecode(&self) -> Vec<u8> {
+        let mut data = Vec::new();
+        DeflateDecoder::new(self.encoded_bytecode).read_to_end(&mut data).unwrap();
+        return data
+    }
+
+    pub fn identifier(&self) -> &'a str {
+        self.identifier
+    }
+}
diff --git a/src/librustc_codegen_llvm/back/command.rs b/src/librustc_codegen_llvm/back/command.rs
new file mode 100644
index 00000000000..9ebbdd7c3c9
--- /dev/null
+++ b/src/librustc_codegen_llvm/back/command.rs
@@ -0,0 +1,175 @@
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A thin wrapper around `Command` in the standard library which allows us to
+//! read the arguments that are built up.
+
+use std::ffi::{OsStr, OsString};
+use std::fmt;
+use std::io;
+use std::mem;
+use std::process::{self, Output};
+
+use rustc_target::spec::LldFlavor;
+
+#[derive(Clone)]
+pub struct Command {
+    program: Program,
+    args: Vec<OsString>,
+    env: Vec<(OsString, OsString)>,
+}
+
+#[derive(Clone)]
+enum Program {
+    Normal(OsString),
+    CmdBatScript(OsString),
+    Lld(OsString, LldFlavor)
+}
+
+impl Command {
+    pub fn new<P: AsRef<OsStr>>(program: P) -> Command {
+        Command::_new(Program::Normal(program.as_ref().to_owned()))
+    }
+
+    pub fn bat_script<P: AsRef<OsStr>>(program: P) -> Command {
+        Command::_new(Program::CmdBatScript(program.as_ref().to_owned()))
+    }
+
+    pub fn lld<P: AsRef<OsStr>>(program: P, flavor: LldFlavor) -> Command {
+        Command::_new(Program::Lld(program.as_ref().to_owned(), flavor))
+    }
+
+    fn _new(program: Program) -> Command {
+        Command {
+            program,
+            args: Vec::new(),
+            env: Vec::new(),
+        }
+    }
+
+    pub fn arg<P: AsRef<OsStr>>(&mut self, arg: P) -> &mut Command {
+        self._arg(arg.as_ref());
+        self
+    }
+
+    pub fn args<I>(&mut self, args: I) -> &mut Command
+        where I: IntoIterator,
+              I::Item: AsRef<OsStr>,
+    {
+        for arg in args {
+            self._arg(arg.as_ref());
+        }
+        self
+    }
+
+    fn _arg(&mut self, arg: &OsStr) {
+        self.args.push(arg.to_owned());
+    }
+
+    pub fn env<K, V>(&mut self, key: K, value: V) -> &mut Command
+        where K: AsRef<OsStr>,
+              V: AsRef<OsStr>
+    {
+        self._env(key.as_ref(), value.as_ref());
+        self
+    }
+
+    fn _env(&mut self, key: &OsStr, value: &OsStr) {
+        self.env.push((key.to_owned(), value.to_owned()));
+    }
+
+    pub fn output(&mut self) -> io::Result<Output> {
+        self.command().output()
+    }
+
+    pub fn command(&self) -> process::Command {
+        let mut ret = match self.program {
+            Program::Normal(ref p) => process::Command::new(p),
+            Program::CmdBatScript(ref p) => {
+                let mut c = process::Command::new("cmd");
+                c.arg("/c").arg(p);
+                c
+            }
+            Program::Lld(ref p, flavor) => {
+                let mut c = process::Command::new(p);
+                c.arg("-flavor").arg(match flavor {
+                    LldFlavor::Wasm => "wasm",
+                    LldFlavor::Ld => "gnu",
+                    LldFlavor::Link => "link",
+                    LldFlavor::Ld64 => "darwin",
+                });
+                c
+            }
+        };
+        ret.args(&self.args);
+        ret.envs(self.env.clone());
+        return ret
+    }
+
+    // extensions
+
+    pub fn get_args(&self) -> &[OsString] {
+        &self.args
+    }
+
+    pub fn take_args(&mut self) -> Vec<OsString> {
+        mem::replace(&mut self.args, Vec::new())
+    }
+
+    /// Returns a `true` if we're pretty sure that this'll blow OS spawn limits,
+    /// or `false` if we should attempt to spawn and see what the OS says.
+    pub fn very_likely_to_exceed_some_spawn_limit(&self) -> bool {
+        // We mostly only care about Windows in this method, on Unix the limits
+        // can be gargantuan anyway so we're pretty unlikely to hit them
+        if cfg!(unix) {
+            return false
+        }
+
+        // Right now LLD doesn't support the `@` syntax of passing an argument
+        // through files, so regardless of the platform we try to go to the OS
+        // on this one.
+        if let Program::Lld(..) = self.program {
+            return false
+        }
+
+        // Ok so on Windows to spawn a process is 32,768 characters in its
+        // command line [1]. Unfortunately we don't actually have access to that
+        // as it's calculated just before spawning. Instead we perform a
+        // poor-man's guess as to how long our command line will be. We're
+        // assuming here that we don't have to escape every character...
+        //
+        // Turns out though that `cmd.exe` has even smaller limits, 8192
+        // characters [2]. Linkers can often be batch scripts (for example
+        // Emscripten, Gecko's current build system) which means that we're
+        // running through batch scripts. These linkers often just forward
+        // arguments elsewhere (and maybe tack on more), so if we blow 8192
+        // bytes we'll typically cause them to blow as well.
+        //
+        // Basically as a result just perform an inflated estimate of what our
+        // command line will look like and test if it's > 8192 (we actually
+        // test against 6k to artificially inflate our estimate). If all else
+        // fails we'll fall back to the normal unix logic of testing the OS
+        // error code if we fail to spawn and automatically re-spawning the
+        // linker with smaller arguments.
+        //
+        // [1]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms682425(v=vs.85).aspx
+        // [2]: https://blogs.msdn.microsoft.com/oldnewthing/20031210-00/?p=41553
+
+        let estimated_command_line_len =
+            self.args.iter().map(|a| a.len()).sum::<usize>();
+        estimated_command_line_len > 1024 * 6
+    }
+}
+
+impl fmt::Debug for Command {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        self.command().fmt(f)
+    }
+}
diff --git a/src/librustc_codegen_llvm/back/link.rs b/src/librustc_codegen_llvm/back/link.rs
new file mode 100644
index 00000000000..dbfd430a3e2
--- /dev/null
+++ b/src/librustc_codegen_llvm/back/link.rs
@@ -0,0 +1,1630 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use back::wasm;
+use cc::windows_registry;
+use super::archive::{ArchiveBuilder, ArchiveConfig};
+use super::bytecode::RLIB_BYTECODE_EXTENSION;
+use super::linker::Linker;
+use super::command::Command;
+use super::rpath::RPathConfig;
+use super::rpath;
+use metadata::METADATA_FILENAME;
+use rustc::session::config::{self, NoDebugInfo, OutputFilenames, OutputType, PrintRequest};
+use rustc::session::config::{RUST_CGU_EXT, Lto};
+use rustc::session::filesearch;
+use rustc::session::search_paths::PathKind;
+use rustc::session::Session;
+use rustc::middle::cstore::{NativeLibrary, LibSource, NativeLibraryKind};
+use rustc::middle::dependency_format::Linkage;
+use {CodegenResults, CrateInfo};
+use rustc::util::common::time;
+use rustc::util::fs::fix_windows_verbatim_for_gcc;
+use rustc::hir::def_id::CrateNum;
+use tempdir::TempDir;
+use rustc_target::spec::{PanicStrategy, RelroLevel, LinkerFlavor, TargetTriple};
+use rustc_data_structures::fx::FxHashSet;
+use context::get_reloc_model;
+use llvm;
+
+use std::ascii;
+use std::char;
+use std::env;
+use std::fmt;
+use std::fs;
+use std::io;
+use std::path::{Path, PathBuf};
+use std::process::{Output, Stdio};
+use std::str;
+use syntax::attr;
+
+/// The LLVM module name containing crate-metadata. This includes a `.` on
+/// purpose, so it cannot clash with the name of a user-defined module.
+pub const METADATA_MODULE_NAME: &'static str = "crate.metadata";
+
+// same as for metadata above, but for allocator shim
+pub const ALLOCATOR_MODULE_NAME: &'static str = "crate.allocator";
+
+pub use rustc_codegen_utils::link::{find_crate_name, filename_for_input, default_output_for_target,
+                                  invalid_output_for_target, build_link_meta, out_filename,
+                                  check_file_is_writeable};
+
+// The third parameter is for env vars, used on windows to set up the
+// path for MSVC to find its DLLs, and gcc to find its bundled
+// toolchain
+pub fn get_linker(sess: &Session) -> (PathBuf, Command) {
+    // If our linker looks like a batch script on Windows then to execute this
+    // we'll need to spawn `cmd` explicitly. This is primarily done to handle
+    // emscripten where the linker is `emcc.bat` and needs to be spawned as
+    // `cmd /c emcc.bat ...`.
+    //
+    // This worked historically but is needed manually since #42436 (regression
+    // was tagged as #42791) and some more info can be found on #44443 for
+    // emscripten itself.
+    let cmd = |linker: &Path| {
+        if let Some(linker) = linker.to_str() {
+            if cfg!(windows) && linker.ends_with(".bat") {
+                return Command::bat_script(linker)
+            }
+        }
+        match sess.linker_flavor() {
+            LinkerFlavor::Lld(f) => Command::lld(linker, f),
+            _ => Command::new(linker),
+
+        }
+    };
+
+    let msvc_tool = windows_registry::find_tool(&sess.opts.target_triple.triple(), "link.exe");
+
+    let linker_path = sess.opts.cg.linker.as_ref().map(|s| &**s)
+        .or(sess.target.target.options.linker.as_ref().map(|s| s.as_ref()))
+        .unwrap_or(match sess.linker_flavor() {
+            LinkerFlavor::Msvc => {
+                msvc_tool.as_ref().map(|t| t.path()).unwrap_or("link.exe".as_ref())
+            }
+            LinkerFlavor::Em if cfg!(windows) => "emcc.bat".as_ref(),
+            LinkerFlavor::Em => "emcc".as_ref(),
+            LinkerFlavor::Gcc => "cc".as_ref(),
+            LinkerFlavor::Ld => "ld".as_ref(),
+            LinkerFlavor::Lld(_) => "lld".as_ref(),
+        });
+
+    let mut cmd = cmd(linker_path);
+
+    // The compiler's sysroot often has some bundled tools, so add it to the
+    // PATH for the child.
+    let mut new_path = sess.host_filesearch(PathKind::All)
+                           .get_tools_search_paths();
+    let mut msvc_changed_path = false;
+    if sess.target.target.options.is_like_msvc {
+        if let Some(ref tool) = msvc_tool {
+            cmd.args(tool.args());
+            for &(ref k, ref v) in tool.env() {
+                if k == "PATH" {
+                    new_path.extend(env::split_paths(v));
+                    msvc_changed_path = true;
+                } else {
+                    cmd.env(k, v);
+                }
+            }
+        }
+    }
+
+    if !msvc_changed_path {
+        if let Some(path) = env::var_os("PATH") {
+            new_path.extend(env::split_paths(&path));
+        }
+    }
+    cmd.env("PATH", env::join_paths(new_path).unwrap());
+
+    (linker_path.to_path_buf(), cmd)
+}
+
+pub fn remove(sess: &Session, path: &Path) {
+    match fs::remove_file(path) {
+        Ok(..) => {}
+        Err(e) => {
+            sess.err(&format!("failed to remove {}: {}",
+                             path.display(),
+                             e));
+        }
+    }
+}
+
+/// Perform the linkage portion of the compilation phase. This will generate all
+/// of the requested outputs for this compilation session.
+pub(crate) fn link_binary(sess: &Session,
+                          codegen_results: &CodegenResults,
+                          outputs: &OutputFilenames,
+                          crate_name: &str) -> Vec<PathBuf> {
+    let mut out_filenames = Vec::new();
+    for &crate_type in sess.crate_types.borrow().iter() {
+        // Ignore executable crates if we have -Z no-codegen, as they will error.
+        let output_metadata = sess.opts.output_types.contains_key(&OutputType::Metadata);
+        if (sess.opts.debugging_opts.no_codegen || !sess.opts.output_types.should_codegen()) &&
+           !output_metadata &&
+           crate_type == config::CrateTypeExecutable {
+            continue;
+        }
+
+        if invalid_output_for_target(sess, crate_type) {
+           bug!("invalid output type `{:?}` for target os `{}`",
+                crate_type, sess.opts.target_triple);
+        }
+        let mut out_files = link_binary_output(sess,
+                                               codegen_results,
+                                               crate_type,
+                                               outputs,
+                                               crate_name);
+        out_filenames.append(&mut out_files);
+    }
+
+    // Remove the temporary object file and metadata if we aren't saving temps
+    if !sess.opts.cg.save_temps {
+        if sess.opts.output_types.should_codegen() &&
+            !preserve_objects_for_their_debuginfo(sess)
+        {
+            for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+                remove(sess, obj);
+            }
+        }
+        for obj in codegen_results.modules.iter().filter_map(|m| m.bytecode_compressed.as_ref()) {
+            remove(sess, obj);
+        }
+        if let Some(ref obj) = codegen_results.metadata_module.object {
+            remove(sess, obj);
+        }
+        if let Some(ref allocator) = codegen_results.allocator_module {
+            if let Some(ref obj) = allocator.object {
+                remove(sess, obj);
+            }
+            if let Some(ref bc) = allocator.bytecode_compressed {
+                remove(sess, bc);
+            }
+        }
+    }
+
+    out_filenames
+}
+
+/// Returns a boolean indicating whether we should preserve the object files on
+/// the filesystem for their debug information. This is often useful with
+/// split-dwarf like schemes.
+fn preserve_objects_for_their_debuginfo(sess: &Session) -> bool {
+    // If the objects don't have debuginfo there's nothing to preserve.
+    if sess.opts.debuginfo == NoDebugInfo {
+        return false
+    }
+
+    // If we're only producing artifacts that are archives, no need to preserve
+    // the objects as they're losslessly contained inside the archives.
+    let output_linked = sess.crate_types.borrow()
+        .iter()
+        .any(|x| *x != config::CrateTypeRlib && *x != config::CrateTypeStaticlib);
+    if !output_linked {
+        return false
+    }
+
+    // If we're on OSX then the equivalent of split dwarf is turned on by
+    // default. The final executable won't actually have any debug information
+    // except it'll have pointers to elsewhere. Historically we've always run
+    // `dsymutil` to "link all the dwarf together" but this is actually sort of
+    // a bummer for incremental compilation! (the whole point of split dwarf is
+    // that you don't do this sort of dwarf link).
+    //
+    // Basically as a result this just means that if we're on OSX and we're
+    // *not* running dsymutil then the object files are the only source of truth
+    // for debug information, so we must preserve them.
+    if sess.target.target.options.is_like_osx {
+        match sess.opts.debugging_opts.run_dsymutil {
+            // dsymutil is not being run, preserve objects
+            Some(false) => return true,
+
+            // dsymutil is being run, no need to preserve the objects
+            Some(true) => return false,
+
+            // The default historical behavior was to always run dsymutil, so
+            // we're preserving that temporarily, but we're likely to switch the
+            // default soon.
+            None => return false,
+        }
+    }
+
+    false
+}
+
+fn filename_for_metadata(sess: &Session, crate_name: &str, outputs: &OutputFilenames) -> PathBuf {
+    let out_filename = outputs.single_output_file.clone()
+        .unwrap_or(outputs
+            .out_directory
+            .join(&format!("lib{}{}.rmeta", crate_name, sess.opts.cg.extra_filename)));
+    check_file_is_writeable(&out_filename, sess);
+    out_filename
+}
+
+pub(crate) fn each_linked_rlib(sess: &Session,
+                               info: &CrateInfo,
+                               f: &mut FnMut(CrateNum, &Path)) -> Result<(), String> {
+    let crates = info.used_crates_static.iter();
+    let fmts = sess.dependency_formats.borrow();
+    let fmts = fmts.get(&config::CrateTypeExecutable)
+                   .or_else(|| fmts.get(&config::CrateTypeStaticlib))
+                   .or_else(|| fmts.get(&config::CrateTypeCdylib))
+                   .or_else(|| fmts.get(&config::CrateTypeProcMacro));
+    let fmts = match fmts {
+        Some(f) => f,
+        None => return Err(format!("could not find formats for rlibs"))
+    };
+    for &(cnum, ref path) in crates {
+        match fmts.get(cnum.as_usize() - 1) {
+            Some(&Linkage::NotLinked) |
+            Some(&Linkage::IncludedFromDylib) => continue,
+            Some(_) => {}
+            None => return Err(format!("could not find formats for rlibs"))
+        }
+        let name = &info.crate_name[&cnum];
+        let path = match *path {
+            LibSource::Some(ref p) => p,
+            LibSource::MetadataOnly => {
+                return Err(format!("could not find rlib for: `{}`, found rmeta (metadata) file",
+                                   name))
+            }
+            LibSource::None => {
+                return Err(format!("could not find rlib for: `{}`", name))
+            }
+        };
+        f(cnum, &path);
+    }
+    Ok(())
+}
+
+/// Returns a boolean indicating whether the specified crate should be ignored
+/// during LTO.
+///
+/// Crates ignored during LTO are not lumped together in the "massive object
+/// file" that we create and are linked in their normal rlib states. See
+/// comments below for what crates do not participate in LTO.
+///
+/// It's unusual for a crate to not participate in LTO. Typically only
+/// compiler-specific and unstable crates have a reason to not participate in
+/// LTO.
+pub(crate) fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool {
+    // If our target enables builtin function lowering in LLVM then the
+    // crates providing these functions don't participate in LTO (e.g.
+    // no_builtins or compiler builtins crates).
+    !sess.target.target.options.no_builtins &&
+        (info.is_no_builtins.contains(&cnum) || info.compiler_builtins == Some(cnum))
+}
+
+fn link_binary_output(sess: &Session,
+                      codegen_results: &CodegenResults,
+                      crate_type: config::CrateType,
+                      outputs: &OutputFilenames,
+                      crate_name: &str) -> Vec<PathBuf> {
+    for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+        check_file_is_writeable(obj, sess);
+    }
+
+    let mut out_filenames = vec![];
+
+    if outputs.outputs.contains_key(&OutputType::Metadata) {
+        let out_filename = filename_for_metadata(sess, crate_name, outputs);
+        // To avoid races with another rustc process scanning the output directory,
+        // we need to write the file somewhere else and atomically move it to its
+        // final destination, with a `fs::rename` call. In order for the rename to
+        // always succeed, the temporary file needs to be on the same filesystem,
+        // which is why we create it inside the output directory specifically.
+        let metadata_tmpdir = match TempDir::new_in(out_filename.parent().unwrap(), "rmeta") {
+            Ok(tmpdir) => tmpdir,
+            Err(err) => sess.fatal(&format!("couldn't create a temp dir: {}", err)),
+        };
+        let metadata = emit_metadata(sess, codegen_results, &metadata_tmpdir);
+        if let Err(e) = fs::rename(metadata, &out_filename) {
+            sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e));
+        }
+        out_filenames.push(out_filename);
+    }
+
+    let tmpdir = match TempDir::new("rustc") {
+        Ok(tmpdir) => tmpdir,
+        Err(err) => sess.fatal(&format!("couldn't create a temp dir: {}", err)),
+    };
+
+    if outputs.outputs.should_codegen() {
+        let out_filename = out_filename(sess, crate_type, outputs, crate_name);
+        match crate_type {
+            config::CrateTypeRlib => {
+                link_rlib(sess,
+                          codegen_results,
+                          RlibFlavor::Normal,
+                          &out_filename,
+                          &tmpdir).build();
+            }
+            config::CrateTypeStaticlib => {
+                link_staticlib(sess, codegen_results, &out_filename, &tmpdir);
+            }
+            _ => {
+                link_natively(sess, crate_type, &out_filename, codegen_results, tmpdir.path());
+            }
+        }
+        out_filenames.push(out_filename);
+    }
+
+    if sess.opts.cg.save_temps {
+        let _ = tmpdir.into_path();
+    }
+
+    out_filenames
+}
+
+fn archive_search_paths(sess: &Session) -> Vec<PathBuf> {
+    let mut search = Vec::new();
+    sess.target_filesearch(PathKind::Native).for_each_lib_search_path(|path, _| {
+        search.push(path.to_path_buf());
+    });
+    return search;
+}
+
+fn archive_config<'a>(sess: &'a Session,
+                      output: &Path,
+                      input: Option<&Path>) -> ArchiveConfig<'a> {
+    ArchiveConfig {
+        sess,
+        dst: output.to_path_buf(),
+        src: input.map(|p| p.to_path_buf()),
+        lib_search_paths: archive_search_paths(sess),
+    }
+}
+
+/// We use a temp directory here to avoid races between concurrent rustc processes,
+/// such as builds in the same directory using the same filename for metadata while
+/// building an `.rlib` (stomping over one another), or writing an `.rmeta` into a
+/// directory being searched for `extern crate` (observing an incomplete file).
+/// The returned path is the temporary file containing the complete metadata.
+fn emit_metadata<'a>(sess: &'a Session, codegen_results: &CodegenResults, tmpdir: &TempDir)
+                     -> PathBuf {
+    let out_filename = tmpdir.path().join(METADATA_FILENAME);
+    let result = fs::write(&out_filename, &codegen_results.metadata.raw_data);
+
+    if let Err(e) = result {
+        sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e));
+    }
+
+    out_filename
+}
+
+enum RlibFlavor {
+    Normal,
+    StaticlibBase,
+}
+
+// Create an 'rlib'
+//
+// An rlib in its current incarnation is essentially a renamed .a file. The
+// rlib primarily contains the object file of the crate, but it also contains
+// all of the object files from native libraries. This is done by unzipping
+// native libraries and inserting all of the contents into this archive.
+fn link_rlib<'a>(sess: &'a Session,
+                 codegen_results: &CodegenResults,
+                 flavor: RlibFlavor,
+                 out_filename: &Path,
+                 tmpdir: &TempDir) -> ArchiveBuilder<'a> {
+    info!("preparing rlib to {:?}", out_filename);
+    let mut ab = ArchiveBuilder::new(archive_config(sess, out_filename, None));
+
+    for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+        ab.add_file(obj);
+    }
+
+    // Note that in this loop we are ignoring the value of `lib.cfg`. That is,
+    // we may not be configured to actually include a static library if we're
+    // adding it here. That's because later when we consume this rlib we'll
+    // decide whether we actually needed the static library or not.
+    //
+    // To do this "correctly" we'd need to keep track of which libraries added
+    // which object files to the archive. We don't do that here, however. The
+    // #[link(cfg(..))] feature is unstable, though, and only intended to get
+    // liblibc working. In that sense the check below just indicates that if
+    // there are any libraries we want to omit object files for at link time we
+    // just exclude all custom object files.
+    //
+    // Eventually if we want to stabilize or flesh out the #[link(cfg(..))]
+    // feature then we'll need to figure out how to record what objects were
+    // loaded from the libraries found here and then encode that into the
+    // metadata of the rlib we're generating somehow.
+    for lib in codegen_results.crate_info.used_libraries.iter() {
+        match lib.kind {
+            NativeLibraryKind::NativeStatic => {}
+            NativeLibraryKind::NativeStaticNobundle |
+            NativeLibraryKind::NativeFramework |
+            NativeLibraryKind::NativeUnknown => continue,
+        }
+        ab.add_native_library(&lib.name.as_str());
+    }
+
+    // After adding all files to the archive, we need to update the
+    // symbol table of the archive.
+    ab.update_symbols();
+
+    // Note that it is important that we add all of our non-object "magical
+    // files" *after* all of the object files in the archive. The reason for
+    // this is as follows:
+    //
+    // * When performing LTO, this archive will be modified to remove
+    //   objects from above. The reason for this is described below.
+    //
+    // * When the system linker looks at an archive, it will attempt to
+    //   determine the architecture of the archive in order to see whether its
+    //   linkable.
+    //
+    //   The algorithm for this detection is: iterate over the files in the
+    //   archive. Skip magical SYMDEF names. Interpret the first file as an
+    //   object file. Read architecture from the object file.
+    //
+    // * As one can probably see, if "metadata" and "foo.bc" were placed
+    //   before all of the objects, then the architecture of this archive would
+    //   not be correctly inferred once 'foo.o' is removed.
+    //
+    // Basically, all this means is that this code should not move above the
+    // code above.
+    match flavor {
+        RlibFlavor::Normal => {
+            // Instead of putting the metadata in an object file section, rlibs
+            // contain the metadata in a separate file.
+            ab.add_file(&emit_metadata(sess, codegen_results, tmpdir));
+
+            // For LTO purposes, the bytecode of this library is also inserted
+            // into the archive.
+            for bytecode in codegen_results
+                .modules
+                .iter()
+                .filter_map(|m| m.bytecode_compressed.as_ref())
+            {
+                ab.add_file(bytecode);
+            }
+
+            // After adding all files to the archive, we need to update the
+            // symbol table of the archive. This currently dies on macOS (see
+            // #11162), and isn't necessary there anyway
+            if !sess.target.target.options.is_like_osx {
+                ab.update_symbols();
+            }
+        }
+
+        RlibFlavor::StaticlibBase => {
+            let obj = codegen_results.allocator_module
+                .as_ref()
+                .and_then(|m| m.object.as_ref());
+            if let Some(obj) = obj {
+                ab.add_file(obj);
+            }
+        }
+    }
+
+    ab
+}
+
+// Create a static archive
+//
+// This is essentially the same thing as an rlib, but it also involves adding
+// all of the upstream crates' objects into the archive. This will slurp in
+// all of the native libraries of upstream dependencies as well.
+//
+// Additionally, there's no way for us to link dynamic libraries, so we warn
+// about all dynamic library dependencies that they're not linked in.
+//
+// There's no need to include metadata in a static archive, so ensure to not
+// link in the metadata object file (and also don't prepare the archive with a
+// metadata file).
+fn link_staticlib(sess: &Session,
+                  codegen_results: &CodegenResults,
+                  out_filename: &Path,
+                  tempdir: &TempDir) {
+    let mut ab = link_rlib(sess,
+                           codegen_results,
+                           RlibFlavor::StaticlibBase,
+                           out_filename,
+                           tempdir);
+    let mut all_native_libs = vec![];
+
+    let res = each_linked_rlib(sess, &codegen_results.crate_info, &mut |cnum, path| {
+        let name = &codegen_results.crate_info.crate_name[&cnum];
+        let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
+
+        // Here when we include the rlib into our staticlib we need to make a
+        // decision whether to include the extra object files along the way.
+        // These extra object files come from statically included native
+        // libraries, but they may be cfg'd away with #[link(cfg(..))].
+        //
+        // This unstable feature, though, only needs liblibc to work. The only
+        // use case there is where musl is statically included in liblibc.rlib,
+        // so if we don't want the included version we just need to skip it. As
+        // a result the logic here is that if *any* linked library is cfg'd away
+        // we just skip all object files.
+        //
+        // Clearly this is not sufficient for a general purpose feature, and
+        // we'd want to read from the library's metadata to determine which
+        // object files come from where and selectively skip them.
+        let skip_object_files = native_libs.iter().any(|lib| {
+            lib.kind == NativeLibraryKind::NativeStatic && !relevant_lib(sess, lib)
+        });
+        ab.add_rlib(path,
+                    &name.as_str(),
+                    is_full_lto_enabled(sess) &&
+                        !ignored_for_lto(sess, &codegen_results.crate_info, cnum),
+                    skip_object_files).unwrap();
+
+        all_native_libs.extend(codegen_results.crate_info.native_libraries[&cnum].iter().cloned());
+    });
+    if let Err(e) = res {
+        sess.fatal(&e);
+    }
+
+    ab.update_symbols();
+    ab.build();
+
+    if !all_native_libs.is_empty() {
+        if sess.opts.prints.contains(&PrintRequest::NativeStaticLibs) {
+            print_native_static_libs(sess, &all_native_libs);
+        }
+    }
+}
+
+fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLibrary]) {
+    let lib_args: Vec<_> = all_native_libs.iter()
+        .filter(|l| relevant_lib(sess, l))
+        .filter_map(|lib| match lib.kind {
+            NativeLibraryKind::NativeStaticNobundle |
+            NativeLibraryKind::NativeUnknown => {
+                if sess.target.target.options.is_like_msvc {
+                    Some(format!("{}.lib", lib.name))
+                } else {
+                    Some(format!("-l{}", lib.name))
+                }
+            },
+            NativeLibraryKind::NativeFramework => {
+                // ld-only syntax, since there are no frameworks in MSVC
+                Some(format!("-framework {}", lib.name))
+            },
+            // These are included, no need to print them
+            NativeLibraryKind::NativeStatic => None,
+        })
+        .collect();
+    if !lib_args.is_empty() {
+        sess.note_without_error("Link against the following native artifacts when linking \
+                                 against this static library. The order and any duplication \
+                                 can be significant on some platforms.");
+        // Prefix for greppability
+        sess.note_without_error(&format!("native-static-libs: {}", &lib_args.join(" ")));
+    }
+}
+
+// Create a dynamic library or executable
+//
+// This will invoke the system linker/cc to create the resulting file. This
+// links to all upstream files as well.
+fn link_natively(sess: &Session,
+                 crate_type: config::CrateType,
+                 out_filename: &Path,
+                 codegen_results: &CodegenResults,
+                 tmpdir: &Path) {
+    info!("preparing {:?} to {:?}", crate_type, out_filename);
+    let flavor = sess.linker_flavor();
+
+    // The invocations of cc share some flags across platforms
+    let (pname, mut cmd) = get_linker(sess);
+
+    let root = sess.target_filesearch(PathKind::Native).get_lib_path();
+    if let Some(args) = sess.target.target.options.pre_link_args.get(&flavor) {
+        cmd.args(args);
+    }
+    if let Some(args) = sess.target.target.options.pre_link_args_crt.get(&flavor) {
+        if sess.crt_static() {
+            cmd.args(args);
+        }
+    }
+    if let Some(ref args) = sess.opts.debugging_opts.pre_link_args {
+        cmd.args(args);
+    }
+    cmd.args(&sess.opts.debugging_opts.pre_link_arg);
+
+    let pre_link_objects = if crate_type == config::CrateTypeExecutable {
+        &sess.target.target.options.pre_link_objects_exe
+    } else {
+        &sess.target.target.options.pre_link_objects_dll
+    };
+    for obj in pre_link_objects {
+        cmd.arg(root.join(obj));
+    }
+
+    if crate_type == config::CrateTypeExecutable && sess.crt_static() {
+        for obj in &sess.target.target.options.pre_link_objects_exe_crt {
+            cmd.arg(root.join(obj));
+        }
+
+        for obj in &sess.target.target.options.pre_link_objects_exe_crt_sys {
+            if flavor == LinkerFlavor::Gcc {
+                cmd.arg(format!("-l:{}", obj));
+            }
+        }
+    }
+
+    if sess.target.target.options.is_like_emscripten {
+        cmd.arg("-s");
+        cmd.arg(if sess.panic_strategy() == PanicStrategy::Abort {
+            "DISABLE_EXCEPTION_CATCHING=1"
+        } else {
+            "DISABLE_EXCEPTION_CATCHING=0"
+        });
+    }
+
+    {
+        let mut linker = codegen_results.linker_info.to_linker(cmd, &sess);
+        link_args(&mut *linker, sess, crate_type, tmpdir,
+                  out_filename, codegen_results);
+        cmd = linker.finalize();
+    }
+    if let Some(args) = sess.target.target.options.late_link_args.get(&flavor) {
+        cmd.args(args);
+    }
+    for obj in &sess.target.target.options.post_link_objects {
+        cmd.arg(root.join(obj));
+    }
+    if sess.crt_static() {
+        for obj in &sess.target.target.options.post_link_objects_crt_sys {
+            if flavor == LinkerFlavor::Gcc {
+                cmd.arg(format!("-l:{}", obj));
+            }
+        }
+        for obj in &sess.target.target.options.post_link_objects_crt {
+            cmd.arg(root.join(obj));
+        }
+    }
+    if let Some(args) = sess.target.target.options.post_link_args.get(&flavor) {
+        cmd.args(args);
+    }
+    for &(ref k, ref v) in &sess.target.target.options.link_env {
+        cmd.env(k, v);
+    }
+
+    if sess.opts.debugging_opts.print_link_args {
+        println!("{:?}", &cmd);
+    }
+
+    // May have not found libraries in the right formats.
+    sess.abort_if_errors();
+
+    // Invoke the system linker
+    //
+    // Note that there's a terribly awful hack that really shouldn't be present
+    // in any compiler. Here an environment variable is supported to
+    // automatically retry the linker invocation if the linker looks like it
+    // segfaulted.
+    //
+    // Gee that seems odd, normally segfaults are things we want to know about!
+    // Unfortunately though in rust-lang/rust#38878 we're experiencing the
+    // linker segfaulting on Travis quite a bit which is causing quite a bit of
+    // pain to land PRs when they spuriously fail due to a segfault.
+    //
+    // The issue #38878 has some more debugging information on it as well, but
+    // this unfortunately looks like it's just a race condition in macOS's linker
+    // with some thread pool working in the background. It seems that no one
+    // currently knows a fix for this so in the meantime we're left with this...
+    info!("{:?}", &cmd);
+    let retry_on_segfault = env::var("RUSTC_RETRY_LINKER_ON_SEGFAULT").is_ok();
+    let mut prog;
+    let mut i = 0;
+    loop {
+        i += 1;
+        prog = time(sess, "running linker", || {
+            exec_linker(sess, &mut cmd, out_filename, tmpdir)
+        });
+        let output = match prog {
+            Ok(ref output) => output,
+            Err(_) => break,
+        };
+        if output.status.success() {
+            break
+        }
+        let mut out = output.stderr.clone();
+        out.extend(&output.stdout);
+        let out = String::from_utf8_lossy(&out);
+
+        // Check to see if the link failed with "unrecognized command line option:
+        // '-no-pie'" for gcc or "unknown argument: '-no-pie'" for clang. If so,
+        // reperform the link step without the -no-pie option. This is safe because
+        // if the linker doesn't support -no-pie then it should not default to
+        // linking executables as pie. Different versions of gcc seem to use
+        // different quotes in the error message so don't check for them.
+        if sess.target.target.options.linker_is_gnu &&
+           sess.linker_flavor() != LinkerFlavor::Ld &&
+           (out.contains("unrecognized command line option") ||
+            out.contains("unknown argument")) &&
+           out.contains("-no-pie") &&
+           cmd.get_args().iter().any(|e| e.to_string_lossy() == "-no-pie") {
+            info!("linker output: {:?}", out);
+            warn!("Linker does not support -no-pie command line option. Retrying without.");
+            for arg in cmd.take_args() {
+                if arg.to_string_lossy() != "-no-pie" {
+                    cmd.arg(arg);
+                }
+            }
+            info!("{:?}", &cmd);
+            continue;
+        }
+        if !retry_on_segfault || i > 3 {
+            break
+        }
+        let msg_segv = "clang: error: unable to execute command: Segmentation fault: 11";
+        let msg_bus  = "clang: error: unable to execute command: Bus error: 10";
+        if !(out.contains(msg_segv) || out.contains(msg_bus)) {
+            break
+        }
+
+        warn!(
+            "looks like the linker segfaulted when we tried to call it, \
+             automatically retrying again. cmd = {:?}, out = {}.",
+            cmd,
+            out,
+        );
+    }
+
+    match prog {
+        Ok(prog) => {
+            fn escape_string(s: &[u8]) -> String {
+                str::from_utf8(s).map(|s| s.to_owned())
+                    .unwrap_or_else(|_| {
+                        let mut x = "Non-UTF-8 output: ".to_string();
+                        x.extend(s.iter()
+                                 .flat_map(|&b| ascii::escape_default(b))
+                                 .map(|b| char::from_u32(b as u32).unwrap()));
+                        x
+                    })
+            }
+            if !prog.status.success() {
+                let mut output = prog.stderr.clone();
+                output.extend_from_slice(&prog.stdout);
+                sess.struct_err(&format!("linking with `{}` failed: {}",
+                                         pname.display(),
+                                         prog.status))
+                    .note(&format!("{:?}", &cmd))
+                    .note(&escape_string(&output))
+                    .emit();
+                sess.abort_if_errors();
+            }
+            info!("linker stderr:\n{}", escape_string(&prog.stderr));
+            info!("linker stdout:\n{}", escape_string(&prog.stdout));
+        },
+        Err(e) => {
+            let linker_not_found = e.kind() == io::ErrorKind::NotFound;
+
+            let mut linker_error = {
+                if linker_not_found {
+                    sess.struct_err(&format!("linker `{}` not found", pname.display()))
+                } else {
+                    sess.struct_err(&format!("could not exec the linker `{}`", pname.display()))
+                }
+            };
+
+            linker_error.note(&format!("{}", e));
+
+            if !linker_not_found {
+                linker_error.note(&format!("{:?}", &cmd));
+            }
+
+            linker_error.emit();
+
+            if sess.target.target.options.is_like_msvc && linker_not_found {
+                sess.note_without_error("the msvc targets depend on the msvc linker \
+                    but `link.exe` was not found");
+                sess.note_without_error("please ensure that VS 2013 or VS 2015 was installed \
+                    with the Visual C++ option");
+            }
+            sess.abort_if_errors();
+        }
+    }
+
+
+    // On macOS, debuggers need this utility to get run to do some munging of
+    // the symbols. Note, though, that if the object files are being preserved
+    // for their debug information there's no need for us to run dsymutil.
+    if sess.target.target.options.is_like_osx &&
+        sess.opts.debuginfo != NoDebugInfo &&
+        !preserve_objects_for_their_debuginfo(sess)
+    {
+        match Command::new("dsymutil").arg(out_filename).output() {
+            Ok(..) => {}
+            Err(e) => sess.fatal(&format!("failed to run dsymutil: {}", e)),
+        }
+    }
+
+    if sess.opts.target_triple == TargetTriple::from_triple("wasm32-unknown-unknown") {
+        wasm::rewrite_imports(&out_filename, &codegen_results.crate_info.wasm_imports);
+        wasm::add_custom_sections(&out_filename,
+                                  &codegen_results.crate_info.wasm_custom_sections);
+    }
+}
+
+fn exec_linker(sess: &Session, cmd: &mut Command, out_filename: &Path, tmpdir: &Path)
+    -> io::Result<Output>
+{
+    // When attempting to spawn the linker we run a risk of blowing out the
+    // size limits for spawning a new process with respect to the arguments
+    // we pass on the command line.
+    //
+    // Here we attempt to handle errors from the OS saying "your list of
+    // arguments is too big" by reinvoking the linker again with an `@`-file
+    // that contains all the arguments. The theory is that this is then
+    // accepted on all linkers and the linker will read all its options out of
+    // there instead of looking at the command line.
+    if !cmd.very_likely_to_exceed_some_spawn_limit() {
+        match cmd.command().stdout(Stdio::piped()).stderr(Stdio::piped()).spawn() {
+            Ok(child) => {
+                let output = child.wait_with_output();
+                flush_linked_file(&output, out_filename)?;
+                return output;
+            }
+            Err(ref e) if command_line_too_big(e) => {
+                info!("command line to linker was too big: {}", e);
+            }
+            Err(e) => return Err(e)
+        }
+    }
+
+    info!("falling back to passing arguments to linker via an @-file");
+    let mut cmd2 = cmd.clone();
+    let mut args = String::new();
+    for arg in cmd2.take_args() {
+        args.push_str(&Escape {
+            arg: arg.to_str().unwrap(),
+            is_like_msvc: sess.target.target.options.is_like_msvc,
+        }.to_string());
+        args.push_str("\n");
+    }
+    let file = tmpdir.join("linker-arguments");
+    let bytes = if sess.target.target.options.is_like_msvc {
+        let mut out = vec![];
+        // start the stream with a UTF-16 BOM
+        for c in vec![0xFEFF].into_iter().chain(args.encode_utf16()) {
+            // encode in little endian
+            out.push(c as u8);
+            out.push((c >> 8) as u8);
+        }
+        out
+    } else {
+        args.into_bytes()
+    };
+    fs::write(&file, &bytes)?;
+    cmd2.arg(format!("@{}", file.display()));
+    info!("invoking linker {:?}", cmd2);
+    let output = cmd2.output();
+    flush_linked_file(&output, out_filename)?;
+    return output;
+
+    #[cfg(unix)]
+    fn flush_linked_file(_: &io::Result<Output>, _: &Path) -> io::Result<()> {
+        Ok(())
+    }
+
+    #[cfg(windows)]
+    fn flush_linked_file(command_output: &io::Result<Output>, out_filename: &Path)
+        -> io::Result<()>
+    {
+        // On Windows, under high I/O load, output buffers are sometimes not flushed,
+        // even long after process exit, causing nasty, non-reproducible output bugs.
+        //
+        // File::sync_all() calls FlushFileBuffers() down the line, which solves the problem.
+        //
+        // А full writeup of the original Chrome bug can be found at
+        // randomascii.wordpress.com/2018/02/25/compiler-bug-linker-bug-windows-kernel-bug/amp
+
+        if let &Ok(ref out) = command_output {
+            if out.status.success() {
+                if let Ok(of) = fs::OpenOptions::new().write(true).open(out_filename) {
+                    of.sync_all()?;
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    #[cfg(unix)]
+    fn command_line_too_big(err: &io::Error) -> bool {
+        err.raw_os_error() == Some(::libc::E2BIG)
+    }
+
+    #[cfg(windows)]
+    fn command_line_too_big(err: &io::Error) -> bool {
+        const ERROR_FILENAME_EXCED_RANGE: i32 = 206;
+        err.raw_os_error() == Some(ERROR_FILENAME_EXCED_RANGE)
+    }
+
+    struct Escape<'a> {
+        arg: &'a str,
+        is_like_msvc: bool,
+    }
+
+    impl<'a> fmt::Display for Escape<'a> {
+        fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+            if self.is_like_msvc {
+                // This is "documented" at
+                // https://msdn.microsoft.com/en-us/library/4xdcbak7.aspx
+                //
+                // Unfortunately there's not a great specification of the
+                // syntax I could find online (at least) but some local
+                // testing showed that this seemed sufficient-ish to catch
+                // at least a few edge cases.
+                write!(f, "\"")?;
+                for c in self.arg.chars() {
+                    match c {
+                        '"' => write!(f, "\\{}", c)?,
+                        c => write!(f, "{}", c)?,
+                    }
+                }
+                write!(f, "\"")?;
+            } else {
+                // This is documented at https://linux.die.net/man/1/ld, namely:
+                //
+                // > Options in file are separated by whitespace. A whitespace
+                // > character may be included in an option by surrounding the
+                // > entire option in either single or double quotes. Any
+                // > character (including a backslash) may be included by
+                // > prefixing the character to be included with a backslash.
+                //
+                // We put an argument on each line, so all we need to do is
+                // ensure the line is interpreted as one whole argument.
+                for c in self.arg.chars() {
+                    match c {
+                        '\\' |
+                        ' ' => write!(f, "\\{}", c)?,
+                        c => write!(f, "{}", c)?,
+                    }
+                }
+            }
+            Ok(())
+        }
+    }
+}
+
+fn link_args(cmd: &mut Linker,
+             sess: &Session,
+             crate_type: config::CrateType,
+             tmpdir: &Path,
+             out_filename: &Path,
+             codegen_results: &CodegenResults) {
+
+    // Linker plugins should be specified early in the list of arguments
+    cmd.cross_lang_lto();
+
+    // The default library location, we need this to find the runtime.
+    // The location of crates will be determined as needed.
+    let lib_path = sess.target_filesearch(PathKind::All).get_lib_path();
+
+    // target descriptor
+    let t = &sess.target.target;
+
+    cmd.include_path(&fix_windows_verbatim_for_gcc(&lib_path));
+    for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+        cmd.add_object(obj);
+    }
+    cmd.output_filename(out_filename);
+
+    if crate_type == config::CrateTypeExecutable &&
+       sess.target.target.options.is_like_windows {
+        if let Some(ref s) = codegen_results.windows_subsystem {
+            cmd.subsystem(s);
+        }
+    }
+
+    // If we're building a dynamic library then some platforms need to make sure
+    // that all symbols are exported correctly from the dynamic library.
+    if crate_type != config::CrateTypeExecutable ||
+       sess.target.target.options.is_like_emscripten {
+        cmd.export_symbols(tmpdir, crate_type);
+    }
+
+    // When linking a dynamic library, we put the metadata into a section of the
+    // executable. This metadata is in a separate object file from the main
+    // object file, so we link that in here.
+    if crate_type == config::CrateTypeDylib ||
+       crate_type == config::CrateTypeProcMacro {
+        if let Some(obj) = codegen_results.metadata_module.object.as_ref() {
+            cmd.add_object(obj);
+        }
+    }
+
+    let obj = codegen_results.allocator_module
+        .as_ref()
+        .and_then(|m| m.object.as_ref());
+    if let Some(obj) = obj {
+        cmd.add_object(obj);
+    }
+
+    // Try to strip as much out of the generated object by removing unused
+    // sections if possible. See more comments in linker.rs
+    if !sess.opts.cg.link_dead_code {
+        let keep_metadata = crate_type == config::CrateTypeDylib;
+        cmd.gc_sections(keep_metadata);
+    }
+
+    let used_link_args = &codegen_results.crate_info.link_args;
+
+    if crate_type == config::CrateTypeExecutable {
+        let mut position_independent_executable = false;
+
+        if t.options.position_independent_executables {
+            let empty_vec = Vec::new();
+            let args = sess.opts.cg.link_args.as_ref().unwrap_or(&empty_vec);
+            let more_args = &sess.opts.cg.link_arg;
+            let mut args = args.iter().chain(more_args.iter()).chain(used_link_args.iter());
+
+            if get_reloc_model(sess) == llvm::RelocMode::PIC
+                && !sess.crt_static() && !args.any(|x| *x == "-static") {
+                position_independent_executable = true;
+            }
+        }
+
+        if position_independent_executable {
+            cmd.position_independent_executable();
+        } else {
+            // recent versions of gcc can be configured to generate position
+            // independent executables by default. We have to pass -no-pie to
+            // explicitly turn that off. Not applicable to ld.
+            if sess.target.target.options.linker_is_gnu
+                && sess.linker_flavor() != LinkerFlavor::Ld {
+                cmd.no_position_independent_executable();
+            }
+        }
+    }
+
+    let relro_level = match sess.opts.debugging_opts.relro_level {
+        Some(level) => level,
+        None => t.options.relro_level,
+    };
+    match relro_level {
+        RelroLevel::Full => {
+            cmd.full_relro();
+        },
+        RelroLevel::Partial => {
+            cmd.partial_relro();
+        },
+        RelroLevel::Off => {
+            cmd.no_relro();
+        },
+        RelroLevel::None => {
+        },
+    }
+
+    // Pass optimization flags down to the linker.
+    cmd.optimize();
+
+    // Pass debuginfo flags down to the linker.
+    cmd.debuginfo();
+
+    // We want to prevent the compiler from accidentally leaking in any system
+    // libraries, so we explicitly ask gcc to not link to any libraries by
+    // default. Note that this does not happen for windows because windows pulls
+    // in some large number of libraries and I couldn't quite figure out which
+    // subset we wanted.
+    if t.options.no_default_libraries {
+        cmd.no_default_libraries();
+    }
+
+    // Take careful note of the ordering of the arguments we pass to the linker
+    // here. Linkers will assume that things on the left depend on things to the
+    // right. Things on the right cannot depend on things on the left. This is
+    // all formally implemented in terms of resolving symbols (libs on the right
+    // resolve unknown symbols of libs on the left, but not vice versa).
+    //
+    // For this reason, we have organized the arguments we pass to the linker as
+    // such:
+    //
+    //  1. The local object that LLVM just generated
+    //  2. Local native libraries
+    //  3. Upstream rust libraries
+    //  4. Upstream native libraries
+    //
+    // The rationale behind this ordering is that those items lower down in the
+    // list can't depend on items higher up in the list. For example nothing can
+    // depend on what we just generated (e.g. that'd be a circular dependency).
+    // Upstream rust libraries are not allowed to depend on our local native
+    // libraries as that would violate the structure of the DAG, in that
+    // scenario they are required to link to them as well in a shared fashion.
+    //
+    // Note that upstream rust libraries may contain native dependencies as
+    // well, but they also can't depend on what we just started to add to the
+    // link line. And finally upstream native libraries can't depend on anything
+    // in this DAG so far because they're only dylibs and dylibs can only depend
+    // on other dylibs (e.g. other native deps).
+    add_local_native_libraries(cmd, sess, codegen_results);
+    add_upstream_rust_crates(cmd, sess, codegen_results, crate_type, tmpdir);
+    add_upstream_native_libraries(cmd, sess, codegen_results, crate_type);
+
+    // Tell the linker what we're doing.
+    if crate_type != config::CrateTypeExecutable {
+        cmd.build_dylib(out_filename);
+    }
+    if crate_type == config::CrateTypeExecutable && sess.crt_static() {
+        cmd.build_static_executable();
+    }
+
+    if sess.opts.debugging_opts.pgo_gen.is_some() {
+        cmd.pgo_gen();
+    }
+
+    // FIXME (#2397): At some point we want to rpath our guesses as to
+    // where extern libraries might live, based on the
+    // addl_lib_search_paths
+    if sess.opts.cg.rpath {
+        let sysroot = sess.sysroot();
+        let target_triple = sess.opts.target_triple.triple();
+        let mut get_install_prefix_lib_path = || {
+            let install_prefix = option_env!("CFG_PREFIX").expect("CFG_PREFIX");
+            let tlib = filesearch::relative_target_lib_path(sysroot, target_triple);
+            let mut path = PathBuf::from(install_prefix);
+            path.push(&tlib);
+
+            path
+        };
+        let mut rpath_config = RPathConfig {
+            used_crates: &codegen_results.crate_info.used_crates_dynamic,
+            out_filename: out_filename.to_path_buf(),
+            has_rpath: sess.target.target.options.has_rpath,
+            is_like_osx: sess.target.target.options.is_like_osx,
+            linker_is_gnu: sess.target.target.options.linker_is_gnu,
+            get_install_prefix_lib_path: &mut get_install_prefix_lib_path,
+        };
+        cmd.args(&rpath::get_rpath_flags(&mut rpath_config));
+    }
+
+    // Finally add all the linker arguments provided on the command line along
+    // with any #[link_args] attributes found inside the crate
+    if let Some(ref args) = sess.opts.cg.link_args {
+        cmd.args(args);
+    }
+    cmd.args(&sess.opts.cg.link_arg);
+    cmd.args(&used_link_args);
+}
+
+// # Native library linking
+//
+// User-supplied library search paths (-L on the command line). These are
+// the same paths used to find Rust crates, so some of them may have been
+// added already by the previous crate linking code. This only allows them
+// to be found at compile time so it is still entirely up to outside
+// forces to make sure that library can be found at runtime.
+//
+// Also note that the native libraries linked here are only the ones located
+// in the current crate. Upstream crates with native library dependencies
+// may have their native library pulled in above.
+fn add_local_native_libraries(cmd: &mut Linker,
+                              sess: &Session,
+                              codegen_results: &CodegenResults) {
+    sess.target_filesearch(PathKind::All).for_each_lib_search_path(|path, k| {
+        match k {
+            PathKind::Framework => { cmd.framework_path(path); }
+            _ => { cmd.include_path(&fix_windows_verbatim_for_gcc(path)); }
+        }
+    });
+
+    let relevant_libs = codegen_results.crate_info.used_libraries.iter().filter(|l| {
+        relevant_lib(sess, l)
+    });
+
+    let search_path = archive_search_paths(sess);
+    for lib in relevant_libs {
+        match lib.kind {
+            NativeLibraryKind::NativeUnknown => cmd.link_dylib(&lib.name.as_str()),
+            NativeLibraryKind::NativeFramework => cmd.link_framework(&lib.name.as_str()),
+            NativeLibraryKind::NativeStaticNobundle => cmd.link_staticlib(&lib.name.as_str()),
+            NativeLibraryKind::NativeStatic => cmd.link_whole_staticlib(&lib.name.as_str(),
+                                                                        &search_path)
+        }
+    }
+}
+
+// # Rust Crate linking
+//
+// Rust crates are not considered at all when creating an rlib output. All
+// dependencies will be linked when producing the final output (instead of
+// the intermediate rlib version)
+fn add_upstream_rust_crates(cmd: &mut Linker,
+                            sess: &Session,
+                            codegen_results: &CodegenResults,
+                            crate_type: config::CrateType,
+                            tmpdir: &Path) {
+    // All of the heavy lifting has previously been accomplished by the
+    // dependency_format module of the compiler. This is just crawling the
+    // output of that module, adding crates as necessary.
+    //
+    // Linking to a rlib involves just passing it to the linker (the linker
+    // will slurp up the object files inside), and linking to a dynamic library
+    // involves just passing the right -l flag.
+
+    let formats = sess.dependency_formats.borrow();
+    let data = formats.get(&crate_type).unwrap();
+
+    // Invoke get_used_crates to ensure that we get a topological sorting of
+    // crates.
+    let deps = &codegen_results.crate_info.used_crates_dynamic;
+
+    // There's a few internal crates in the standard library (aka libcore and
+    // libstd) which actually have a circular dependence upon one another. This
+    // currently arises through "weak lang items" where libcore requires things
+    // like `rust_begin_unwind` but libstd ends up defining it. To get this
+    // circular dependence to work correctly in all situations we'll need to be
+    // sure to correctly apply the `--start-group` and `--end-group` options to
+    // GNU linkers, otherwise if we don't use any other symbol from the standard
+    // library it'll get discarded and the whole application won't link.
+    //
+    // In this loop we're calculating the `group_end`, after which crate to
+    // pass `--end-group` and `group_start`, before which crate to pass
+    // `--start-group`. We currently do this by passing `--end-group` after
+    // the first crate (when iterating backwards) that requires a lang item
+    // defined somewhere else. Once that's set then when we've defined all the
+    // necessary lang items we'll pass `--start-group`.
+    //
+    // Note that this isn't amazing logic for now but it should do the trick
+    // for the current implementation of the standard library.
+    let mut group_end = None;
+    let mut group_start = None;
+    let mut end_with = FxHashSet();
+    let info = &codegen_results.crate_info;
+    for &(cnum, _) in deps.iter().rev() {
+        if let Some(missing) = info.missing_lang_items.get(&cnum) {
+            end_with.extend(missing.iter().cloned());
+            if end_with.len() > 0 && group_end.is_none() {
+                group_end = Some(cnum);
+            }
+        }
+        end_with.retain(|item| info.lang_item_to_crate.get(item) != Some(&cnum));
+        if end_with.len() == 0 && group_end.is_some() {
+            group_start = Some(cnum);
+            break
+        }
+    }
+
+    // If we didn't end up filling in all lang items from upstream crates then
+    // we'll be filling it in with our crate. This probably means we're the
+    // standard library itself, so skip this for now.
+    if group_end.is_some() && group_start.is_none() {
+        group_end = None;
+    }
+
+    let mut compiler_builtins = None;
+
+    for &(cnum, _) in deps.iter() {
+        if group_start == Some(cnum) {
+            cmd.group_start();
+        }
+
+        // We may not pass all crates through to the linker. Some crates may
+        // appear statically in an existing dylib, meaning we'll pick up all the
+        // symbols from the dylib.
+        let src = &codegen_results.crate_info.used_crate_source[&cnum];
+        match data[cnum.as_usize() - 1] {
+            _ if codegen_results.crate_info.profiler_runtime == Some(cnum) => {
+                add_static_crate(cmd, sess, codegen_results, tmpdir, crate_type, cnum);
+            }
+            _ if codegen_results.crate_info.sanitizer_runtime == Some(cnum) => {
+                link_sanitizer_runtime(cmd, sess, codegen_results, tmpdir, cnum);
+            }
+            // compiler-builtins are always placed last to ensure that they're
+            // linked correctly.
+            _ if codegen_results.crate_info.compiler_builtins == Some(cnum) => {
+                assert!(compiler_builtins.is_none());
+                compiler_builtins = Some(cnum);
+            }
+            Linkage::NotLinked |
+            Linkage::IncludedFromDylib => {}
+            Linkage::Static => {
+                add_static_crate(cmd, sess, codegen_results, tmpdir, crate_type, cnum);
+            }
+            Linkage::Dynamic => {
+                add_dynamic_crate(cmd, sess, &src.dylib.as_ref().unwrap().0)
+            }
+        }
+
+        if group_end == Some(cnum) {
+            cmd.group_end();
+        }
+    }
+
+    // compiler-builtins are always placed last to ensure that they're
+    // linked correctly.
+    // We must always link the `compiler_builtins` crate statically. Even if it
+    // was already "included" in a dylib (e.g. `libstd` when `-C prefer-dynamic`
+    // is used)
+    if let Some(cnum) = compiler_builtins {
+        add_static_crate(cmd, sess, codegen_results, tmpdir, crate_type, cnum);
+    }
+
+    // Converts a library file-stem into a cc -l argument
+    fn unlib<'a>(config: &config::Config, stem: &'a str) -> &'a str {
+        if stem.starts_with("lib") && !config.target.options.is_like_windows {
+            &stem[3..]
+        } else {
+            stem
+        }
+    }
+
+    // We must link the sanitizer runtime using -Wl,--whole-archive but since
+    // it's packed in a .rlib, it contains stuff that are not objects that will
+    // make the linker error. So we must remove those bits from the .rlib before
+    // linking it.
+    fn link_sanitizer_runtime(cmd: &mut Linker,
+                              sess: &Session,
+                              codegen_results: &CodegenResults,
+                              tmpdir: &Path,
+                              cnum: CrateNum) {
+        let src = &codegen_results.crate_info.used_crate_source[&cnum];
+        let cratepath = &src.rlib.as_ref().unwrap().0;
+
+        if sess.target.target.options.is_like_osx {
+            // On Apple platforms, the sanitizer is always built as a dylib, and
+            // LLVM will link to `@rpath/*.dylib`, so we need to specify an
+            // rpath to the library as well (the rpath should be absolute, see
+            // PR #41352 for details).
+            //
+            // FIXME: Remove this logic into librustc_*san once Cargo supports it
+            let rpath = cratepath.parent().unwrap();
+            let rpath = rpath.to_str().expect("non-utf8 component in path");
+            cmd.args(&["-Wl,-rpath".into(), "-Xlinker".into(), rpath.into()]);
+        }
+
+        let dst = tmpdir.join(cratepath.file_name().unwrap());
+        let cfg = archive_config(sess, &dst, Some(cratepath));
+        let mut archive = ArchiveBuilder::new(cfg);
+        archive.update_symbols();
+
+        for f in archive.src_files() {
+            if f.ends_with(RLIB_BYTECODE_EXTENSION) || f == METADATA_FILENAME {
+                archive.remove_file(&f);
+                continue
+            }
+        }
+
+        archive.build();
+
+        cmd.link_whole_rlib(&dst);
+    }
+
+    // Adds the static "rlib" versions of all crates to the command line.
+    // There's a bit of magic which happens here specifically related to LTO and
+    // dynamic libraries. Specifically:
+    //
+    // * For LTO, we remove upstream object files.
+    // * For dylibs we remove metadata and bytecode from upstream rlibs
+    //
+    // When performing LTO, almost(*) all of the bytecode from the upstream
+    // libraries has already been included in our object file output. As a
+    // result we need to remove the object files in the upstream libraries so
+    // the linker doesn't try to include them twice (or whine about duplicate
+    // symbols). We must continue to include the rest of the rlib, however, as
+    // it may contain static native libraries which must be linked in.
+    //
+    // (*) Crates marked with `#![no_builtins]` don't participate in LTO and
+    // their bytecode wasn't included. The object files in those libraries must
+    // still be passed to the linker.
+    //
+    // When making a dynamic library, linkers by default don't include any
+    // object files in an archive if they're not necessary to resolve the link.
+    // We basically want to convert the archive (rlib) to a dylib, though, so we
+    // *do* want everything included in the output, regardless of whether the
+    // linker thinks it's needed or not. As a result we must use the
+    // --whole-archive option (or the platform equivalent). When using this
+    // option the linker will fail if there are non-objects in the archive (such
+    // as our own metadata and/or bytecode). All in all, for rlibs to be
+    // entirely included in dylibs, we need to remove all non-object files.
+    //
+    // Note, however, that if we're not doing LTO or we're not producing a dylib
+    // (aka we're making an executable), we can just pass the rlib blindly to
+    // the linker (fast) because it's fine if it's not actually included as
+    // we're at the end of the dependency chain.
+    fn add_static_crate(cmd: &mut Linker,
+                        sess: &Session,
+                        codegen_results: &CodegenResults,
+                        tmpdir: &Path,
+                        crate_type: config::CrateType,
+                        cnum: CrateNum) {
+        let src = &codegen_results.crate_info.used_crate_source[&cnum];
+        let cratepath = &src.rlib.as_ref().unwrap().0;
+
+        // See the comment above in `link_staticlib` and `link_rlib` for why if
+        // there's a static library that's not relevant we skip all object
+        // files.
+        let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
+        let skip_native = native_libs.iter().any(|lib| {
+            lib.kind == NativeLibraryKind::NativeStatic && !relevant_lib(sess, lib)
+        });
+
+        if (!is_full_lto_enabled(sess) ||
+            ignored_for_lto(sess, &codegen_results.crate_info, cnum)) &&
+           crate_type != config::CrateTypeDylib &&
+           !skip_native {
+            cmd.link_rlib(&fix_windows_verbatim_for_gcc(cratepath));
+            return
+        }
+
+        let dst = tmpdir.join(cratepath.file_name().unwrap());
+        let name = cratepath.file_name().unwrap().to_str().unwrap();
+        let name = &name[3..name.len() - 5]; // chop off lib/.rlib
+
+        time(sess, &format!("altering {}.rlib", name), || {
+            let cfg = archive_config(sess, &dst, Some(cratepath));
+            let mut archive = ArchiveBuilder::new(cfg);
+            archive.update_symbols();
+
+            let mut any_objects = false;
+            for f in archive.src_files() {
+                if f.ends_with(RLIB_BYTECODE_EXTENSION) || f == METADATA_FILENAME {
+                    archive.remove_file(&f);
+                    continue
+                }
+
+                let canonical = f.replace("-", "_");
+                let canonical_name = name.replace("-", "_");
+
+                // Look for `.rcgu.o` at the end of the filename to conclude
+                // that this is a Rust-related object file.
+                fn looks_like_rust(s: &str) -> bool {
+                    let path = Path::new(s);
+                    let ext = path.extension().and_then(|s| s.to_str());
+                    if ext != Some(OutputType::Object.extension()) {
+                        return false
+                    }
+                    let ext2 = path.file_stem()
+                        .and_then(|s| Path::new(s).extension())
+                        .and_then(|s| s.to_str());
+                    ext2 == Some(RUST_CGU_EXT)
+                }
+
+                let is_rust_object =
+                    canonical.starts_with(&canonical_name) &&
+                    looks_like_rust(&f);
+
+                // If we've been requested to skip all native object files
+                // (those not generated by the rust compiler) then we can skip
+                // this file. See above for why we may want to do this.
+                let skip_because_cfg_say_so = skip_native && !is_rust_object;
+
+                // If we're performing LTO and this is a rust-generated object
+                // file, then we don't need the object file as it's part of the
+                // LTO module. Note that `#![no_builtins]` is excluded from LTO,
+                // though, so we let that object file slide.
+                let skip_because_lto = is_full_lto_enabled(sess) &&
+                    is_rust_object &&
+                    (sess.target.target.options.no_builtins ||
+                     !codegen_results.crate_info.is_no_builtins.contains(&cnum));
+
+                if skip_because_cfg_say_so || skip_because_lto {
+                    archive.remove_file(&f);
+                } else {
+                    any_objects = true;
+                }
+            }
+
+            if !any_objects {
+                return
+            }
+            archive.build();
+
+            // If we're creating a dylib, then we need to include the
+            // whole of each object in our archive into that artifact. This is
+            // because a `dylib` can be reused as an intermediate artifact.
+            //
+            // Note, though, that we don't want to include the whole of a
+            // compiler-builtins crate (e.g. compiler-rt) because it'll get
+            // repeatedly linked anyway.
+            if crate_type == config::CrateTypeDylib &&
+                codegen_results.crate_info.compiler_builtins != Some(cnum) {
+                cmd.link_whole_rlib(&fix_windows_verbatim_for_gcc(&dst));
+            } else {
+                cmd.link_rlib(&fix_windows_verbatim_for_gcc(&dst));
+            }
+        });
+    }
+
+    // Same thing as above, but for dynamic crates instead of static crates.
+    fn add_dynamic_crate(cmd: &mut Linker, sess: &Session, cratepath: &Path) {
+        // If we're performing LTO, then it should have been previously required
+        // that all upstream rust dependencies were available in an rlib format.
+        assert!(!is_full_lto_enabled(sess));
+
+        // Just need to tell the linker about where the library lives and
+        // what its name is
+        let parent = cratepath.parent();
+        if let Some(dir) = parent {
+            cmd.include_path(&fix_windows_verbatim_for_gcc(dir));
+        }
+        let filestem = cratepath.file_stem().unwrap().to_str().unwrap();
+        cmd.link_rust_dylib(&unlib(&sess.target, filestem),
+                            parent.unwrap_or(Path::new("")));
+    }
+}
+
+// Link in all of our upstream crates' native dependencies. Remember that
+// all of these upstream native dependencies are all non-static
+// dependencies. We've got two cases then:
+//
+// 1. The upstream crate is an rlib. In this case we *must* link in the
+// native dependency because the rlib is just an archive.
+//
+// 2. The upstream crate is a dylib. In order to use the dylib, we have to
+// have the dependency present on the system somewhere. Thus, we don't
+// gain a whole lot from not linking in the dynamic dependency to this
+// crate as well.
+//
+// The use case for this is a little subtle. In theory the native
+// dependencies of a crate are purely an implementation detail of the crate
+// itself, but the problem arises with generic and inlined functions. If a
+// generic function calls a native function, then the generic function must
+// be instantiated in the target crate, meaning that the native symbol must
+// also be resolved in the target crate.
+fn add_upstream_native_libraries(cmd: &mut Linker,
+                                 sess: &Session,
+                                 codegen_results: &CodegenResults,
+                                 crate_type: config::CrateType) {
+    // Be sure to use a topological sorting of crates because there may be
+    // interdependencies between native libraries. When passing -nodefaultlibs,
+    // for example, almost all native libraries depend on libc, so we have to
+    // make sure that's all the way at the right (liblibc is near the base of
+    // the dependency chain).
+    //
+    // This passes RequireStatic, but the actual requirement doesn't matter,
+    // we're just getting an ordering of crate numbers, we're not worried about
+    // the paths.
+    let formats = sess.dependency_formats.borrow();
+    let data = formats.get(&crate_type).unwrap();
+
+    let crates = &codegen_results.crate_info.used_crates_static;
+    for &(cnum, _) in crates {
+        for lib in codegen_results.crate_info.native_libraries[&cnum].iter() {
+            if !relevant_lib(sess, &lib) {
+                continue
+            }
+            match lib.kind {
+                NativeLibraryKind::NativeUnknown => cmd.link_dylib(&lib.name.as_str()),
+                NativeLibraryKind::NativeFramework => cmd.link_framework(&lib.name.as_str()),
+                NativeLibraryKind::NativeStaticNobundle => {
+                    // Link "static-nobundle" native libs only if the crate they originate from
+                    // is being linked statically to the current crate.  If it's linked dynamically
+                    // or is an rlib already included via some other dylib crate, the symbols from
+                    // native libs will have already been included in that dylib.
+                    if data[cnum.as_usize() - 1] == Linkage::Static {
+                        cmd.link_staticlib(&lib.name.as_str())
+                    }
+                },
+                // ignore statically included native libraries here as we've
+                // already included them when we included the rust library
+                // previously
+                NativeLibraryKind::NativeStatic => {}
+            }
+        }
+    }
+}
+
+fn relevant_lib(sess: &Session, lib: &NativeLibrary) -> bool {
+    match lib.cfg {
+        Some(ref cfg) => attr::cfg_matches(cfg, &sess.parse_sess, None),
+        None => true,
+    }
+}
+
+fn is_full_lto_enabled(sess: &Session) -> bool {
+    match sess.lto() {
+        Lto::Yes |
+        Lto::Thin |
+        Lto::Fat => true,
+        Lto::No |
+        Lto::ThinLocal => false,
+    }
+}
diff --git a/src/librustc_codegen_llvm/back/linker.rs b/src/librustc_codegen_llvm/back/linker.rs
new file mode 100644
index 00000000000..dd1983bdc17
--- /dev/null
+++ b/src/librustc_codegen_llvm/back/linker.rs
@@ -0,0 +1,1037 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::collections::HashMap;
+use std::ffi::{OsStr, OsString};
+use std::fs::{self, File};
+use std::io::prelude::*;
+use std::io::{self, BufWriter};
+use std::path::{Path, PathBuf};
+
+use back::archive;
+use back::command::Command;
+use back::symbol_export;
+use rustc::hir::def_id::{LOCAL_CRATE, CrateNum};
+use rustc::middle::dependency_format::Linkage;
+use rustc::session::Session;
+use rustc::session::config::{self, CrateType, OptLevel, DebugInfoLevel,
+                             CrossLangLto};
+use rustc::ty::TyCtxt;
+use rustc_target::spec::{LinkerFlavor, LldFlavor};
+use serialize::{json, Encoder};
+
+/// For all the linkers we support, and information they might
+/// need out of the shared crate context before we get rid of it.
+pub struct LinkerInfo {
+    exports: HashMap<CrateType, Vec<String>>,
+}
+
+impl LinkerInfo {
+    pub fn new(tcx: TyCtxt) -> LinkerInfo {
+        LinkerInfo {
+            exports: tcx.sess.crate_types.borrow().iter().map(|&c| {
+                (c, exported_symbols(tcx, c))
+            }).collect(),
+        }
+    }
+
+    pub fn to_linker<'a>(&'a self,
+                         cmd: Command,
+                         sess: &'a Session) -> Box<Linker+'a> {
+        match sess.linker_flavor() {
+            LinkerFlavor::Lld(LldFlavor::Link) |
+            LinkerFlavor::Msvc => {
+                Box::new(MsvcLinker {
+                    cmd,
+                    sess,
+                    info: self
+                }) as Box<Linker>
+            }
+            LinkerFlavor::Em =>  {
+                Box::new(EmLinker {
+                    cmd,
+                    sess,
+                    info: self
+                }) as Box<Linker>
+            }
+            LinkerFlavor::Gcc =>  {
+                Box::new(GccLinker {
+                    cmd,
+                    sess,
+                    info: self,
+                    hinted_static: false,
+                    is_ld: false,
+                }) as Box<Linker>
+            }
+
+            LinkerFlavor::Lld(LldFlavor::Ld) |
+            LinkerFlavor::Lld(LldFlavor::Ld64) |
+            LinkerFlavor::Ld => {
+                Box::new(GccLinker {
+                    cmd,
+                    sess,
+                    info: self,
+                    hinted_static: false,
+                    is_ld: true,
+                }) as Box<Linker>
+            }
+
+            LinkerFlavor::Lld(LldFlavor::Wasm) => {
+                Box::new(WasmLd {
+                    cmd,
+                }) as Box<Linker>
+            }
+        }
+    }
+}
+
+/// Linker abstraction used by back::link to build up the command to invoke a
+/// linker.
+///
+/// This trait is the total list of requirements needed by `back::link` and
+/// represents the meaning of each option being passed down. This trait is then
+/// used to dispatch on whether a GNU-like linker (generally `ld.exe`) or an
+/// MSVC linker (e.g. `link.exe`) is being used.
+pub trait Linker {
+    fn link_dylib(&mut self, lib: &str);
+    fn link_rust_dylib(&mut self, lib: &str, path: &Path);
+    fn link_framework(&mut self, framework: &str);
+    fn link_staticlib(&mut self, lib: &str);
+    fn link_rlib(&mut self, lib: &Path);
+    fn link_whole_rlib(&mut self, lib: &Path);
+    fn link_whole_staticlib(&mut self, lib: &str, search_path: &[PathBuf]);
+    fn include_path(&mut self, path: &Path);
+    fn framework_path(&mut self, path: &Path);
+    fn output_filename(&mut self, path: &Path);
+    fn add_object(&mut self, path: &Path);
+    fn gc_sections(&mut self, keep_metadata: bool);
+    fn position_independent_executable(&mut self);
+    fn no_position_independent_executable(&mut self);
+    fn full_relro(&mut self);
+    fn partial_relro(&mut self);
+    fn no_relro(&mut self);
+    fn optimize(&mut self);
+    fn pgo_gen(&mut self);
+    fn debuginfo(&mut self);
+    fn no_default_libraries(&mut self);
+    fn build_dylib(&mut self, out_filename: &Path);
+    fn build_static_executable(&mut self);
+    fn args(&mut self, args: &[String]);
+    fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType);
+    fn subsystem(&mut self, subsystem: &str);
+    fn group_start(&mut self);
+    fn group_end(&mut self);
+    fn cross_lang_lto(&mut self);
+    // Should have been finalize(self), but we don't support self-by-value on trait objects (yet?).
+    fn finalize(&mut self) -> Command;
+}
+
+pub struct GccLinker<'a> {
+    cmd: Command,
+    sess: &'a Session,
+    info: &'a LinkerInfo,
+    hinted_static: bool, // Keeps track of the current hinting mode.
+    // Link as ld
+    is_ld: bool,
+}
+
+impl<'a> GccLinker<'a> {
+    /// Argument that must be passed *directly* to the linker
+    ///
+    /// These arguments need to be prepended with '-Wl,' when a gcc-style linker is used
+    fn linker_arg<S>(&mut self, arg: S) -> &mut Self
+        where S: AsRef<OsStr>
+    {
+        if !self.is_ld {
+            let mut os = OsString::from("-Wl,");
+            os.push(arg.as_ref());
+            self.cmd.arg(os);
+        } else {
+            self.cmd.arg(arg);
+        }
+        self
+    }
+
+    fn takes_hints(&self) -> bool {
+        !self.sess.target.target.options.is_like_osx
+    }
+
+    // Some platforms take hints about whether a library is static or dynamic.
+    // For those that support this, we ensure we pass the option if the library
+    // was flagged "static" (most defaults are dynamic) to ensure that if
+    // libfoo.a and libfoo.so both exist that the right one is chosen.
+    fn hint_static(&mut self) {
+        if !self.takes_hints() { return }
+        if !self.hinted_static {
+            self.linker_arg("-Bstatic");
+            self.hinted_static = true;
+        }
+    }
+
+    fn hint_dynamic(&mut self) {
+        if !self.takes_hints() { return }
+        if self.hinted_static {
+            self.linker_arg("-Bdynamic");
+            self.hinted_static = false;
+        }
+    }
+}
+
+impl<'a> Linker for GccLinker<'a> {
+    fn link_dylib(&mut self, lib: &str) { self.hint_dynamic(); self.cmd.arg("-l").arg(lib); }
+    fn link_staticlib(&mut self, lib: &str) { self.hint_static(); self.cmd.arg("-l").arg(lib); }
+    fn link_rlib(&mut self, lib: &Path) { self.hint_static(); self.cmd.arg(lib); }
+    fn include_path(&mut self, path: &Path) { self.cmd.arg("-L").arg(path); }
+    fn framework_path(&mut self, path: &Path) { self.cmd.arg("-F").arg(path); }
+    fn output_filename(&mut self, path: &Path) { self.cmd.arg("-o").arg(path); }
+    fn add_object(&mut self, path: &Path) { self.cmd.arg(path); }
+    fn position_independent_executable(&mut self) { self.cmd.arg("-pie"); }
+    fn no_position_independent_executable(&mut self) { self.cmd.arg("-no-pie"); }
+    fn full_relro(&mut self) { self.linker_arg("-z,relro,-z,now"); }
+    fn partial_relro(&mut self) { self.linker_arg("-z,relro"); }
+    fn no_relro(&mut self) { self.linker_arg("-z,norelro"); }
+    fn build_static_executable(&mut self) { self.cmd.arg("-static"); }
+    fn args(&mut self, args: &[String]) { self.cmd.args(args); }
+
+    fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
+        self.hint_dynamic();
+        self.cmd.arg("-l").arg(lib);
+    }
+
+    fn link_framework(&mut self, framework: &str) {
+        self.hint_dynamic();
+        self.cmd.arg("-framework").arg(framework);
+    }
+
+    // Here we explicitly ask that the entire archive is included into the
+    // result artifact. For more details see #15460, but the gist is that
+    // the linker will strip away any unused objects in the archive if we
+    // don't otherwise explicitly reference them. This can occur for
+    // libraries which are just providing bindings, libraries with generic
+    // functions, etc.
+    fn link_whole_staticlib(&mut self, lib: &str, search_path: &[PathBuf]) {
+        self.hint_static();
+        let target = &self.sess.target.target;
+        if !target.options.is_like_osx {
+            self.linker_arg("--whole-archive").cmd.arg("-l").arg(lib);
+            self.linker_arg("--no-whole-archive");
+        } else {
+            // -force_load is the macOS equivalent of --whole-archive, but it
+            // involves passing the full path to the library to link.
+            let mut v = OsString::from("-force_load,");
+            v.push(&archive::find_library(lib, search_path, &self.sess));
+            self.linker_arg(&v);
+        }
+    }
+
+    fn link_whole_rlib(&mut self, lib: &Path) {
+        self.hint_static();
+        if self.sess.target.target.options.is_like_osx {
+            let mut v = OsString::from("-force_load,");
+            v.push(lib);
+            self.linker_arg(&v);
+        } else {
+            self.linker_arg("--whole-archive").cmd.arg(lib);
+            self.linker_arg("--no-whole-archive");
+        }
+    }
+
+    fn gc_sections(&mut self, keep_metadata: bool) {
+        // The dead_strip option to the linker specifies that functions and data
+        // unreachable by the entry point will be removed. This is quite useful
+        // with Rust's compilation model of compiling libraries at a time into
+        // one object file. For example, this brings hello world from 1.7MB to
+        // 458K.
+        //
+        // Note that this is done for both executables and dynamic libraries. We
+        // won't get much benefit from dylibs because LLVM will have already
+        // stripped away as much as it could. This has not been seen to impact
+        // link times negatively.
+        //
+        // -dead_strip can't be part of the pre_link_args because it's also used
+        // for partial linking when using multiple codegen units (-r).  So we
+        // insert it here.
+        if self.sess.target.target.options.is_like_osx {
+            self.linker_arg("-dead_strip");
+        } else if self.sess.target.target.options.is_like_solaris {
+            self.linker_arg("-z");
+            self.linker_arg("ignore");
+
+        // If we're building a dylib, we don't use --gc-sections because LLVM
+        // has already done the best it can do, and we also don't want to
+        // eliminate the metadata. If we're building an executable, however,
+        // --gc-sections drops the size of hello world from 1.8MB to 597K, a 67%
+        // reduction.
+        } else if !keep_metadata {
+            self.linker_arg("--gc-sections");
+        }
+    }
+
+    fn optimize(&mut self) {
+        if !self.sess.target.target.options.linker_is_gnu { return }
+
+        // GNU-style linkers support optimization with -O. GNU ld doesn't
+        // need a numeric argument, but other linkers do.
+        if self.sess.opts.optimize == config::OptLevel::Default ||
+           self.sess.opts.optimize == config::OptLevel::Aggressive {
+            self.linker_arg("-O1");
+        }
+    }
+
+    fn pgo_gen(&mut self) {
+        if !self.sess.target.target.options.linker_is_gnu { return }
+
+        // If we're doing PGO generation stuff and on a GNU-like linker, use the
+        // "-u" flag to properly pull in the profiler runtime bits.
+        //
+        // This is because LLVM otherwise won't add the needed initialization
+        // for us on Linux (though the extra flag should be harmless if it
+        // does).
+        //
+        // See https://reviews.llvm.org/D14033 and https://reviews.llvm.org/D14030.
+        //
+        // Though it may be worth to try to revert those changes upstream, since
+        // the overhead of the initialization should be minor.
+        self.cmd.arg("-u");
+        self.cmd.arg("__llvm_profile_runtime");
+    }
+
+    fn debuginfo(&mut self) {
+        match self.sess.opts.debuginfo {
+            DebugInfoLevel::NoDebugInfo => {
+                // If we are building without debuginfo enabled and we were called with
+                // `-Zstrip-debuginfo-if-disabled=yes`, tell the linker to strip any debuginfo
+                // found when linking to get rid of symbols from libstd.
+                match self.sess.opts.debugging_opts.strip_debuginfo_if_disabled {
+                    Some(true) => { self.linker_arg("-S"); },
+                    _ => {},
+                }
+            },
+            _ => {},
+        };
+    }
+
+    fn no_default_libraries(&mut self) {
+        if !self.is_ld {
+            self.cmd.arg("-nodefaultlibs");
+        }
+    }
+
+    fn build_dylib(&mut self, out_filename: &Path) {
+        // On mac we need to tell the linker to let this library be rpathed
+        if self.sess.target.target.options.is_like_osx {
+            self.cmd.arg("-dynamiclib");
+            self.linker_arg("-dylib");
+
+            // Note that the `osx_rpath_install_name` option here is a hack
+            // purely to support rustbuild right now, we should get a more
+            // principled solution at some point to force the compiler to pass
+            // the right `-Wl,-install_name` with an `@rpath` in it.
+            if self.sess.opts.cg.rpath ||
+               self.sess.opts.debugging_opts.osx_rpath_install_name {
+                let mut v = OsString::from("-install_name,@rpath/");
+                v.push(out_filename.file_name().unwrap());
+                self.linker_arg(&v);
+            }
+        } else {
+            self.cmd.arg("-shared");
+        }
+    }
+
+    fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType) {
+        // If we're compiling a dylib, then we let symbol visibility in object
+        // files to take care of whether they're exported or not.
+        //
+        // If we're compiling a cdylib, however, we manually create a list of
+        // exported symbols to ensure we don't expose any more. The object files
+        // have far more public symbols than we actually want to export, so we
+        // hide them all here.
+        if crate_type == CrateType::CrateTypeDylib ||
+           crate_type == CrateType::CrateTypeProcMacro {
+            return
+        }
+
+        let mut arg = OsString::new();
+        let path = tmpdir.join("list");
+
+        debug!("EXPORTED SYMBOLS:");
+
+        if self.sess.target.target.options.is_like_osx {
+            // Write a plain, newline-separated list of symbols
+            let res = (|| -> io::Result<()> {
+                let mut f = BufWriter::new(File::create(&path)?);
+                for sym in self.info.exports[&crate_type].iter() {
+                    debug!("  _{}", sym);
+                    writeln!(f, "_{}", sym)?;
+                }
+                Ok(())
+            })();
+            if let Err(e) = res {
+                self.sess.fatal(&format!("failed to write lib.def file: {}", e));
+            }
+        } else {
+            // Write an LD version script
+            let res = (|| -> io::Result<()> {
+                let mut f = BufWriter::new(File::create(&path)?);
+                writeln!(f, "{{\n  global:")?;
+                for sym in self.info.exports[&crate_type].iter() {
+                    debug!("    {};", sym);
+                    writeln!(f, "    {};", sym)?;
+                }
+                writeln!(f, "\n  local:\n    *;\n}};")?;
+                Ok(())
+            })();
+            if let Err(e) = res {
+                self.sess.fatal(&format!("failed to write version script: {}", e));
+            }
+        }
+
+        if self.sess.target.target.options.is_like_osx {
+            if !self.is_ld {
+                arg.push("-Wl,")
+            }
+            arg.push("-exported_symbols_list,");
+        } else if self.sess.target.target.options.is_like_solaris {
+            if !self.is_ld {
+                arg.push("-Wl,")
+            }
+            arg.push("-M,");
+        } else {
+            if !self.is_ld {
+                arg.push("-Wl,")
+            }
+            arg.push("--version-script=");
+        }
+
+        arg.push(&path);
+        self.cmd.arg(arg);
+    }
+
+    fn subsystem(&mut self, subsystem: &str) {
+        self.linker_arg(&format!("--subsystem,{}", subsystem));
+    }
+
+    fn finalize(&mut self) -> Command {
+        self.hint_dynamic(); // Reset to default before returning the composed command line.
+        let mut cmd = Command::new("");
+        ::std::mem::swap(&mut cmd, &mut self.cmd);
+        cmd
+    }
+
+    fn group_start(&mut self) {
+        if !self.sess.target.target.options.is_like_osx {
+            self.linker_arg("--start-group");
+        }
+    }
+
+    fn group_end(&mut self) {
+        if !self.sess.target.target.options.is_like_osx {
+            self.linker_arg("--end-group");
+        }
+    }
+
+    fn cross_lang_lto(&mut self) {
+        match self.sess.opts.debugging_opts.cross_lang_lto {
+            CrossLangLto::Disabled |
+            CrossLangLto::NoLink => {
+                // Nothing to do
+            }
+            CrossLangLto::LinkerPlugin(ref path) => {
+                self.linker_arg(&format!("-plugin={}", path.display()));
+
+                let opt_level = match self.sess.opts.optimize {
+                    config::OptLevel::No => "O0",
+                    config::OptLevel::Less => "O1",
+                    config::OptLevel::Default => "O2",
+                    config::OptLevel::Aggressive => "O3",
+                    config::OptLevel::Size => "Os",
+                    config::OptLevel::SizeMin => "Oz",
+                };
+
+                self.linker_arg(&format!("-plugin-opt={}", opt_level));
+                self.linker_arg(&format!("-plugin-opt=mcpu={}", self.sess.target_cpu()));
+
+                match self.sess.opts.cg.lto {
+                    config::Lto::Thin |
+                    config::Lto::ThinLocal => {
+                        self.linker_arg(&format!("-plugin-opt=thin"));
+                    }
+                    config::Lto::Fat |
+                    config::Lto::Yes |
+                    config::Lto::No => {
+                        // default to regular LTO
+                    }
+                }
+            }
+        }
+    }
+}
+
+pub struct MsvcLinker<'a> {
+    cmd: Command,
+    sess: &'a Session,
+    info: &'a LinkerInfo
+}
+
+impl<'a> Linker for MsvcLinker<'a> {
+    fn link_rlib(&mut self, lib: &Path) { self.cmd.arg(lib); }
+    fn add_object(&mut self, path: &Path) { self.cmd.arg(path); }
+    fn args(&mut self, args: &[String]) { self.cmd.args(args); }
+
+    fn build_dylib(&mut self, out_filename: &Path) {
+        self.cmd.arg("/DLL");
+        let mut arg: OsString = "/IMPLIB:".into();
+        arg.push(out_filename.with_extension("dll.lib"));
+        self.cmd.arg(arg);
+    }
+
+    fn build_static_executable(&mut self) {
+        // noop
+    }
+
+    fn gc_sections(&mut self, _keep_metadata: bool) {
+        // MSVC's ICF (Identical COMDAT Folding) link optimization is
+        // slow for Rust and thus we disable it by default when not in
+        // optimization build.
+        if self.sess.opts.optimize != config::OptLevel::No {
+            self.cmd.arg("/OPT:REF,ICF");
+        } else {
+            // It is necessary to specify NOICF here, because /OPT:REF
+            // implies ICF by default.
+            self.cmd.arg("/OPT:REF,NOICF");
+        }
+    }
+
+    fn link_dylib(&mut self, lib: &str) {
+        self.cmd.arg(&format!("{}.lib", lib));
+    }
+
+    fn link_rust_dylib(&mut self, lib: &str, path: &Path) {
+        // When producing a dll, the MSVC linker may not actually emit a
+        // `foo.lib` file if the dll doesn't actually export any symbols, so we
+        // check to see if the file is there and just omit linking to it if it's
+        // not present.
+        let name = format!("{}.dll.lib", lib);
+        if fs::metadata(&path.join(&name)).is_ok() {
+            self.cmd.arg(name);
+        }
+    }
+
+    fn link_staticlib(&mut self, lib: &str) {
+        self.cmd.arg(&format!("{}.lib", lib));
+    }
+
+    fn position_independent_executable(&mut self) {
+        // noop
+    }
+
+    fn no_position_independent_executable(&mut self) {
+        // noop
+    }
+
+    fn full_relro(&mut self) {
+        // noop
+    }
+
+    fn partial_relro(&mut self) {
+        // noop
+    }
+
+    fn no_relro(&mut self) {
+        // noop
+    }
+
+    fn no_default_libraries(&mut self) {
+        // Currently we don't pass the /NODEFAULTLIB flag to the linker on MSVC
+        // as there's been trouble in the past of linking the C++ standard
+        // library required by LLVM. This likely needs to happen one day, but
+        // in general Windows is also a more controlled environment than
+        // Unix, so it's not necessarily as critical that this be implemented.
+        //
+        // Note that there are also some licensing worries about statically
+        // linking some libraries which require a specific agreement, so it may
+        // not ever be possible for us to pass this flag.
+    }
+
+    fn include_path(&mut self, path: &Path) {
+        let mut arg = OsString::from("/LIBPATH:");
+        arg.push(path);
+        self.cmd.arg(&arg);
+    }
+
+    fn output_filename(&mut self, path: &Path) {
+        let mut arg = OsString::from("/OUT:");
+        arg.push(path);
+        self.cmd.arg(&arg);
+    }
+
+    fn framework_path(&mut self, _path: &Path) {
+        bug!("frameworks are not supported on windows")
+    }
+    fn link_framework(&mut self, _framework: &str) {
+        bug!("frameworks are not supported on windows")
+    }
+
+    fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) {
+        // not supported?
+        self.link_staticlib(lib);
+    }
+    fn link_whole_rlib(&mut self, path: &Path) {
+        // not supported?
+        self.link_rlib(path);
+    }
+    fn optimize(&mut self) {
+        // Needs more investigation of `/OPT` arguments
+    }
+
+    fn pgo_gen(&mut self) {
+        // Nothing needed here.
+    }
+
+    fn debuginfo(&mut self) {
+        // This will cause the Microsoft linker to generate a PDB file
+        // from the CodeView line tables in the object files.
+        self.cmd.arg("/DEBUG");
+
+        // This will cause the Microsoft linker to embed .natvis info into the the PDB file
+        let sysroot = self.sess.sysroot();
+        let natvis_dir_path = sysroot.join("lib\\rustlib\\etc");
+        if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) {
+            // LLVM 5.0.0's lld-link frontend doesn't yet recognize, and chokes
+            // on, the /NATVIS:... flags.  LLVM 6 (or earlier) should at worst ignore
+            // them, eventually mooting this workaround, per this landed patch:
+            // https://github.com/llvm-mirror/lld/commit/27b9c4285364d8d76bb43839daa100
+            if let Some(ref linker_path) = self.sess.opts.cg.linker {
+                if let Some(linker_name) = Path::new(&linker_path).file_stem() {
+                    if linker_name.to_str().unwrap().to_lowercase() == "lld-link" {
+                        self.sess.warn("not embedding natvis: lld-link may not support the flag");
+                        return;
+                    }
+                }
+            }
+            for entry in natvis_dir {
+                match entry {
+                    Ok(entry) => {
+                        let path = entry.path();
+                        if path.extension() == Some("natvis".as_ref()) {
+                            let mut arg = OsString::from("/NATVIS:");
+                            arg.push(path);
+                            self.cmd.arg(arg);
+                        }
+                    },
+                    Err(err) => {
+                        self.sess.warn(&format!("error enumerating natvis directory: {}", err));
+                    },
+                }
+            }
+        }
+    }
+
+    // Currently the compiler doesn't use `dllexport` (an LLVM attribute) to
+    // export symbols from a dynamic library. When building a dynamic library,
+    // however, we're going to want some symbols exported, so this function
+    // generates a DEF file which lists all the symbols.
+    //
+    // The linker will read this `*.def` file and export all the symbols from
+    // the dynamic library. Note that this is not as simple as just exporting
+    // all the symbols in the current crate (as specified by `codegen.reachable`)
+    // but rather we also need to possibly export the symbols of upstream
+    // crates. Upstream rlibs may be linked statically to this dynamic library,
+    // in which case they may continue to transitively be used and hence need
+    // their symbols exported.
+    fn export_symbols(&mut self,
+                      tmpdir: &Path,
+                      crate_type: CrateType) {
+        let path = tmpdir.join("lib.def");
+        let res = (|| -> io::Result<()> {
+            let mut f = BufWriter::new(File::create(&path)?);
+
+            // Start off with the standard module name header and then go
+            // straight to exports.
+            writeln!(f, "LIBRARY")?;
+            writeln!(f, "EXPORTS")?;
+            for symbol in self.info.exports[&crate_type].iter() {
+                debug!("  _{}", symbol);
+                writeln!(f, "  {}", symbol)?;
+            }
+            Ok(())
+        })();
+        if let Err(e) = res {
+            self.sess.fatal(&format!("failed to write lib.def file: {}", e));
+        }
+        let mut arg = OsString::from("/DEF:");
+        arg.push(path);
+        self.cmd.arg(&arg);
+    }
+
+    fn subsystem(&mut self, subsystem: &str) {
+        // Note that previous passes of the compiler validated this subsystem,
+        // so we just blindly pass it to the linker.
+        self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem));
+
+        // Windows has two subsystems we're interested in right now, the console
+        // and windows subsystems. These both implicitly have different entry
+        // points (starting symbols). The console entry point starts with
+        // `mainCRTStartup` and the windows entry point starts with
+        // `WinMainCRTStartup`. These entry points, defined in system libraries,
+        // will then later probe for either `main` or `WinMain`, respectively to
+        // start the application.
+        //
+        // In Rust we just always generate a `main` function so we want control
+        // to always start there, so we force the entry point on the windows
+        // subsystem to be `mainCRTStartup` to get everything booted up
+        // correctly.
+        //
+        // For more information see RFC #1665
+        if subsystem == "windows" {
+            self.cmd.arg("/ENTRY:mainCRTStartup");
+        }
+    }
+
+    fn finalize(&mut self) -> Command {
+        let mut cmd = Command::new("");
+        ::std::mem::swap(&mut cmd, &mut self.cmd);
+        cmd
+    }
+
+    // MSVC doesn't need group indicators
+    fn group_start(&mut self) {}
+    fn group_end(&mut self) {}
+
+    fn cross_lang_lto(&mut self) {
+        // Do nothing
+    }
+}
+
+pub struct EmLinker<'a> {
+    cmd: Command,
+    sess: &'a Session,
+    info: &'a LinkerInfo
+}
+
+impl<'a> Linker for EmLinker<'a> {
+    fn include_path(&mut self, path: &Path) {
+        self.cmd.arg("-L").arg(path);
+    }
+
+    fn link_staticlib(&mut self, lib: &str) {
+        self.cmd.arg("-l").arg(lib);
+    }
+
+    fn output_filename(&mut self, path: &Path) {
+        self.cmd.arg("-o").arg(path);
+    }
+
+    fn add_object(&mut self, path: &Path) {
+        self.cmd.arg(path);
+    }
+
+    fn link_dylib(&mut self, lib: &str) {
+        // Emscripten always links statically
+        self.link_staticlib(lib);
+    }
+
+    fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) {
+        // not supported?
+        self.link_staticlib(lib);
+    }
+
+    fn link_whole_rlib(&mut self, lib: &Path) {
+        // not supported?
+        self.link_rlib(lib);
+    }
+
+    fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
+        self.link_dylib(lib);
+    }
+
+    fn link_rlib(&mut self, lib: &Path) {
+        self.add_object(lib);
+    }
+
+    fn position_independent_executable(&mut self) {
+        // noop
+    }
+
+    fn no_position_independent_executable(&mut self) {
+        // noop
+    }
+
+    fn full_relro(&mut self) {
+        // noop
+    }
+
+    fn partial_relro(&mut self) {
+        // noop
+    }
+
+    fn no_relro(&mut self) {
+        // noop
+    }
+
+    fn args(&mut self, args: &[String]) {
+        self.cmd.args(args);
+    }
+
+    fn framework_path(&mut self, _path: &Path) {
+        bug!("frameworks are not supported on Emscripten")
+    }
+
+    fn link_framework(&mut self, _framework: &str) {
+        bug!("frameworks are not supported on Emscripten")
+    }
+
+    fn gc_sections(&mut self, _keep_metadata: bool) {
+        // noop
+    }
+
+    fn optimize(&mut self) {
+        // Emscripten performs own optimizations
+        self.cmd.arg(match self.sess.opts.optimize {
+            OptLevel::No => "-O0",
+            OptLevel::Less => "-O1",
+            OptLevel::Default => "-O2",
+            OptLevel::Aggressive => "-O3",
+            OptLevel::Size => "-Os",
+            OptLevel::SizeMin => "-Oz"
+        });
+        // Unusable until https://github.com/rust-lang/rust/issues/38454 is resolved
+        self.cmd.args(&["--memory-init-file", "0"]);
+    }
+
+    fn pgo_gen(&mut self) {
+        // noop, but maybe we need something like the gnu linker?
+    }
+
+    fn debuginfo(&mut self) {
+        // Preserve names or generate source maps depending on debug info
+        self.cmd.arg(match self.sess.opts.debuginfo {
+            DebugInfoLevel::NoDebugInfo => "-g0",
+            DebugInfoLevel::LimitedDebugInfo => "-g3",
+            DebugInfoLevel::FullDebugInfo => "-g4"
+        });
+    }
+
+    fn no_default_libraries(&mut self) {
+        self.cmd.args(&["-s", "DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=[]"]);
+    }
+
+    fn build_dylib(&mut self, _out_filename: &Path) {
+        bug!("building dynamic library is unsupported on Emscripten")
+    }
+
+    fn build_static_executable(&mut self) {
+        // noop
+    }
+
+    fn export_symbols(&mut self, _tmpdir: &Path, crate_type: CrateType) {
+        let symbols = &self.info.exports[&crate_type];
+
+        debug!("EXPORTED SYMBOLS:");
+
+        self.cmd.arg("-s");
+
+        let mut arg = OsString::from("EXPORTED_FUNCTIONS=");
+        let mut encoded = String::new();
+
+        {
+            let mut encoder = json::Encoder::new(&mut encoded);
+            let res = encoder.emit_seq(symbols.len(), |encoder| {
+                for (i, sym) in symbols.iter().enumerate() {
+                    encoder.emit_seq_elt(i, |encoder| {
+                        encoder.emit_str(&("_".to_string() + sym))
+                    })?;
+                }
+                Ok(())
+            });
+            if let Err(e) = res {
+                self.sess.fatal(&format!("failed to encode exported symbols: {}", e));
+            }
+        }
+        debug!("{}", encoded);
+        arg.push(encoded);
+
+        self.cmd.arg(arg);
+    }
+
+    fn subsystem(&mut self, _subsystem: &str) {
+        // noop
+    }
+
+    fn finalize(&mut self) -> Command {
+        let mut cmd = Command::new("");
+        ::std::mem::swap(&mut cmd, &mut self.cmd);
+        cmd
+    }
+
+    // Appears not necessary on Emscripten
+    fn group_start(&mut self) {}
+    fn group_end(&mut self) {}
+
+    fn cross_lang_lto(&mut self) {
+        // Do nothing
+    }
+}
+
+fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec<String> {
+    let mut symbols = Vec::new();
+
+    let export_threshold = symbol_export::crates_export_threshold(&[crate_type]);
+    for &(symbol, level) in tcx.exported_symbols(LOCAL_CRATE).iter() {
+        if level.is_below_threshold(export_threshold) {
+            symbols.push(symbol.symbol_name(tcx).to_string());
+        }
+    }
+
+    let formats = tcx.sess.dependency_formats.borrow();
+    let deps = formats[&crate_type].iter();
+
+    for (index, dep_format) in deps.enumerate() {
+        let cnum = CrateNum::new(index + 1);
+        // For each dependency that we are linking to statically ...
+        if *dep_format == Linkage::Static {
+            // ... we add its symbol list to our export list.
+            for &(symbol, level) in tcx.exported_symbols(cnum).iter() {
+                if level.is_below_threshold(export_threshold) {
+                    symbols.push(symbol.symbol_name(tcx).to_string());
+                }
+            }
+        }
+    }
+
+    symbols
+}
+
+pub struct WasmLd {
+    cmd: Command,
+}
+
+impl Linker for WasmLd {
+    fn link_dylib(&mut self, lib: &str) {
+        self.cmd.arg("-l").arg(lib);
+    }
+
+    fn link_staticlib(&mut self, lib: &str) {
+        self.cmd.arg("-l").arg(lib);
+    }
+
+    fn link_rlib(&mut self, lib: &Path) {
+        self.cmd.arg(lib);
+    }
+
+    fn include_path(&mut self, path: &Path) {
+        self.cmd.arg("-L").arg(path);
+    }
+
+    fn framework_path(&mut self, _path: &Path) {
+        panic!("frameworks not supported")
+    }
+
+    fn output_filename(&mut self, path: &Path) {
+        self.cmd.arg("-o").arg(path);
+    }
+
+    fn add_object(&mut self, path: &Path) {
+        self.cmd.arg(path);
+    }
+
+    fn position_independent_executable(&mut self) {
+    }
+
+    fn full_relro(&mut self) {
+    }
+
+    fn partial_relro(&mut self) {
+    }
+
+    fn no_relro(&mut self) {
+    }
+
+    fn build_static_executable(&mut self) {
+    }
+
+    fn args(&mut self, args: &[String]) {
+        self.cmd.args(args);
+    }
+
+    fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
+        self.cmd.arg("-l").arg(lib);
+    }
+
+    fn link_framework(&mut self, _framework: &str) {
+        panic!("frameworks not supported")
+    }
+
+    fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) {
+        self.cmd.arg("-l").arg(lib);
+    }
+
+    fn link_whole_rlib(&mut self, lib: &Path) {
+        self.cmd.arg(lib);
+    }
+
+    fn gc_sections(&mut self, _keep_metadata: bool) {
+    }
+
+    fn optimize(&mut self) {
+    }
+
+    fn pgo_gen(&mut self) {
+    }
+
+    fn debuginfo(&mut self) {
+    }
+
+    fn no_default_libraries(&mut self) {
+    }
+
+    fn build_dylib(&mut self, _out_filename: &Path) {
+    }
+
+    fn export_symbols(&mut self, _tmpdir: &Path, _crate_type: CrateType) {
+    }
+
+    fn subsystem(&mut self, _subsystem: &str) {
+    }
+
+    fn no_position_independent_executable(&mut self) {
+    }
+
+    fn finalize(&mut self) -> Command {
+        // There have been reports in the wild (rustwasm/wasm-bindgen#119) of
+        // using threads causing weird hangs and bugs. Disable it entirely as
+        // this isn't yet the bottleneck of compilation at all anyway.
+        self.cmd.arg("--no-threads");
+
+        self.cmd.arg("-z").arg("stack-size=1048576");
+
+        // FIXME we probably shouldn't pass this but instead pass an explicit
+        // whitelist of symbols we'll allow to be undefined. Unfortunately
+        // though we can't handle symbols like `log10` that LLVM injects at a
+        // super late date without actually parsing object files. For now let's
+        // stick to this and hopefully fix it before stabilization happens.
+        self.cmd.arg("--allow-undefined");
+
+        // For now we just never have an entry symbol
+        self.cmd.arg("--no-entry");
+
+        let mut cmd = Command::new("");
+        ::std::mem::swap(&mut cmd, &mut self.cmd);
+        cmd
+    }
+
+    // Not needed for now with LLD
+    fn group_start(&mut self) {}
+    fn group_end(&mut self) {}
+
+    fn cross_lang_lto(&mut self) {
+        // Do nothing for now
+    }
+}
diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs
new file mode 100644
index 00000000000..96eda50d788
--- /dev/null
+++ b/src/librustc_codegen_llvm/back/lto.rs
@@ -0,0 +1,773 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use back::bytecode::{DecodedBytecode, RLIB_BYTECODE_EXTENSION};
+use back::symbol_export;
+use back::write::{ModuleConfig, with_llvm_pmb, CodegenContext};
+use back::write;
+use errors::{FatalError, Handler};
+use llvm::archive_ro::ArchiveRO;
+use llvm::{ModuleRef, TargetMachineRef, True, False};
+use llvm;
+use rustc::hir::def_id::LOCAL_CRATE;
+use rustc::middle::exported_symbols::SymbolExportLevel;
+use rustc::session::config::{self, Lto};
+use rustc::util::common::time_ext;
+use time_graph::Timeline;
+use {ModuleCodegen, ModuleLlvm, ModuleKind, ModuleSource};
+
+use libc;
+
+use std::ffi::CString;
+use std::ptr;
+use std::slice;
+use std::sync::Arc;
+
+pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool {
+    match crate_type {
+        config::CrateTypeExecutable |
+        config::CrateTypeStaticlib  |
+        config::CrateTypeCdylib     => true,
+
+        config::CrateTypeDylib     |
+        config::CrateTypeRlib      |
+        config::CrateTypeProcMacro => false,
+    }
+}
+
+pub(crate) enum LtoModuleCodegen {
+    Fat {
+        module: Option<ModuleCodegen>,
+        _serialized_bitcode: Vec<SerializedModule>,
+    },
+
+    Thin(ThinModule),
+}
+
+impl LtoModuleCodegen {
+    pub fn name(&self) -> &str {
+        match *self {
+            LtoModuleCodegen::Fat { .. } => "everything",
+            LtoModuleCodegen::Thin(ref m) => m.name(),
+        }
+    }
+
+    /// Optimize this module within the given codegen context.
+    ///
+    /// This function is unsafe as it'll return a `ModuleCodegen` still
+    /// points to LLVM data structures owned by this `LtoModuleCodegen`.
+    /// It's intended that the module returned is immediately code generated and
+    /// dropped, and then this LTO module is dropped.
+    pub(crate) unsafe fn optimize(&mut self,
+                                  cgcx: &CodegenContext,
+                                  timeline: &mut Timeline)
+        -> Result<ModuleCodegen, FatalError>
+    {
+        match *self {
+            LtoModuleCodegen::Fat { ref mut module, .. } => {
+                let module = module.take().unwrap();
+                let config = cgcx.config(module.kind);
+                let llmod = module.llvm().unwrap().llmod;
+                let tm = module.llvm().unwrap().tm;
+                run_pass_manager(cgcx, tm, llmod, config, false);
+                timeline.record("fat-done");
+                Ok(module)
+            }
+            LtoModuleCodegen::Thin(ref mut thin) => thin.optimize(cgcx, timeline),
+        }
+    }
+
+    /// A "gauge" of how costly it is to optimize this module, used to sort
+    /// biggest modules first.
+    pub fn cost(&self) -> u64 {
+        match *self {
+            // Only one module with fat LTO, so the cost doesn't matter.
+            LtoModuleCodegen::Fat { .. } => 0,
+            LtoModuleCodegen::Thin(ref m) => m.cost(),
+        }
+    }
+}
+
+pub(crate) fn run(cgcx: &CodegenContext,
+                  modules: Vec<ModuleCodegen>,
+                  timeline: &mut Timeline)
+    -> Result<Vec<LtoModuleCodegen>, FatalError>
+{
+    let diag_handler = cgcx.create_diag_handler();
+    let export_threshold = match cgcx.lto {
+        // We're just doing LTO for our one crate
+        Lto::ThinLocal => SymbolExportLevel::Rust,
+
+        // We're doing LTO for the entire crate graph
+        Lto::Yes | Lto::Fat | Lto::Thin => {
+            symbol_export::crates_export_threshold(&cgcx.crate_types)
+        }
+
+        Lto::No => panic!("didn't request LTO but we're doing LTO"),
+    };
+
+    let symbol_filter = &|&(ref name, level): &(String, SymbolExportLevel)| {
+        if level.is_below_threshold(export_threshold) {
+            let mut bytes = Vec::with_capacity(name.len() + 1);
+            bytes.extend(name.bytes());
+            Some(CString::new(bytes).unwrap())
+        } else {
+            None
+        }
+    };
+    let exported_symbols = cgcx.exported_symbols
+        .as_ref().expect("needs exported symbols for LTO");
+    let mut symbol_white_list = exported_symbols[&LOCAL_CRATE]
+        .iter()
+        .filter_map(symbol_filter)
+        .collect::<Vec<CString>>();
+    timeline.record("whitelist");
+    info!("{} symbols to preserve in this crate", symbol_white_list.len());
+
+    // If we're performing LTO for the entire crate graph, then for each of our
+    // upstream dependencies, find the corresponding rlib and load the bitcode
+    // from the archive.
+    //
+    // We save off all the bytecode and LLVM module ids for later processing
+    // with either fat or thin LTO
+    let mut upstream_modules = Vec::new();
+    if cgcx.lto != Lto::ThinLocal {
+        if cgcx.opts.cg.prefer_dynamic {
+            diag_handler.struct_err("cannot prefer dynamic linking when performing LTO")
+                        .note("only 'staticlib', 'bin', and 'cdylib' outputs are \
+                               supported with LTO")
+                        .emit();
+            return Err(FatalError)
+        }
+
+        // Make sure we actually can run LTO
+        for crate_type in cgcx.crate_types.iter() {
+            if !crate_type_allows_lto(*crate_type) {
+                let e = diag_handler.fatal("lto can only be run for executables, cdylibs and \
+                                            static library outputs");
+                return Err(e)
+            }
+        }
+
+        for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
+            let exported_symbols = cgcx.exported_symbols
+                .as_ref().expect("needs exported symbols for LTO");
+            symbol_white_list.extend(
+                exported_symbols[&cnum]
+                    .iter()
+                    .filter_map(symbol_filter));
+
+            let archive = ArchiveRO::open(&path).expect("wanted an rlib");
+            let bytecodes = archive.iter().filter_map(|child| {
+                child.ok().and_then(|c| c.name().map(|name| (name, c)))
+            }).filter(|&(name, _)| name.ends_with(RLIB_BYTECODE_EXTENSION));
+            for (name, data) in bytecodes {
+                info!("adding bytecode {}", name);
+                let bc_encoded = data.data();
+
+                let (bc, id) = time_ext(cgcx.time_passes, None, &format!("decode {}", name), || {
+                    match DecodedBytecode::new(bc_encoded) {
+                        Ok(b) => Ok((b.bytecode(), b.identifier().to_string())),
+                        Err(e) => Err(diag_handler.fatal(&e)),
+                    }
+                })?;
+                let bc = SerializedModule::FromRlib(bc);
+                upstream_modules.push((bc, CString::new(id).unwrap()));
+            }
+            timeline.record(&format!("load: {}", path.display()));
+        }
+    }
+
+    let arr = symbol_white_list.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
+    match cgcx.lto {
+        Lto::Yes | // `-C lto` == fat LTO by default
+        Lto::Fat => {
+            fat_lto(cgcx, &diag_handler, modules, upstream_modules, &arr, timeline)
+        }
+        Lto::Thin |
+        Lto::ThinLocal => {
+            thin_lto(&diag_handler, modules, upstream_modules, &arr, timeline)
+        }
+        Lto::No => unreachable!(),
+    }
+}
+
+fn fat_lto(cgcx: &CodegenContext,
+           diag_handler: &Handler,
+           mut modules: Vec<ModuleCodegen>,
+           mut serialized_modules: Vec<(SerializedModule, CString)>,
+           symbol_white_list: &[*const libc::c_char],
+           timeline: &mut Timeline)
+    -> Result<Vec<LtoModuleCodegen>, FatalError>
+{
+    info!("going for a fat lto");
+
+    // Find the "costliest" module and merge everything into that codegen unit.
+    // All the other modules will be serialized and reparsed into the new
+    // context, so this hopefully avoids serializing and parsing the largest
+    // codegen unit.
+    //
+    // Additionally use a regular module as the base here to ensure that various
+    // file copy operations in the backend work correctly. The only other kind
+    // of module here should be an allocator one, and if your crate is smaller
+    // than the allocator module then the size doesn't really matter anyway.
+    let (_, costliest_module) = modules.iter()
+        .enumerate()
+        .filter(|&(_, module)| module.kind == ModuleKind::Regular)
+        .map(|(i, module)| {
+            let cost = unsafe {
+                llvm::LLVMRustModuleCost(module.llvm().unwrap().llmod)
+            };
+            (cost, i)
+        })
+        .max()
+        .expect("must be codegen'ing at least one module");
+    let module = modules.remove(costliest_module);
+    let llmod = module.llvm().expect("can't lto pre-codegened modules").llmod;
+    info!("using {:?} as a base module", module.llmod_id);
+
+    // For all other modules we codegened we'll need to link them into our own
+    // bitcode. All modules were codegened in their own LLVM context, however,
+    // and we want to move everything to the same LLVM context. Currently the
+    // way we know of to do that is to serialize them to a string and them parse
+    // them later. Not great but hey, that's why it's "fat" LTO, right?
+    for module in modules {
+        let llvm = module.llvm().expect("can't lto pre-codegened modules");
+        let buffer = ModuleBuffer::new(llvm.llmod);
+        let llmod_id = CString::new(&module.llmod_id[..]).unwrap();
+        serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
+    }
+
+    // For all serialized bitcode files we parse them and link them in as we did
+    // above, this is all mostly handled in C++. Like above, though, we don't
+    // know much about the memory management here so we err on the side of being
+    // save and persist everything with the original module.
+    let mut serialized_bitcode = Vec::new();
+    let mut linker = Linker::new(llmod);
+    for (bc_decoded, name) in serialized_modules {
+        info!("linking {:?}", name);
+        time_ext(cgcx.time_passes, None, &format!("ll link {:?}", name), || {
+            let data = bc_decoded.data();
+            linker.add(&data).map_err(|()| {
+                let msg = format!("failed to load bc of {:?}", name);
+                write::llvm_err(&diag_handler, msg)
+            })
+        })?;
+        timeline.record(&format!("link {:?}", name));
+        serialized_bitcode.push(bc_decoded);
+    }
+    drop(linker);
+    cgcx.save_temp_bitcode(&module, "lto.input");
+
+    // Internalize everything that *isn't* in our whitelist to help strip out
+    // more modules and such
+    unsafe {
+        let ptr = symbol_white_list.as_ptr();
+        llvm::LLVMRustRunRestrictionPass(llmod,
+                                         ptr as *const *const libc::c_char,
+                                         symbol_white_list.len() as libc::size_t);
+        cgcx.save_temp_bitcode(&module, "lto.after-restriction");
+    }
+
+    if cgcx.no_landing_pads {
+        unsafe {
+            llvm::LLVMRustMarkAllFunctionsNounwind(llmod);
+        }
+        cgcx.save_temp_bitcode(&module, "lto.after-nounwind");
+    }
+    timeline.record("passes");
+
+    Ok(vec![LtoModuleCodegen::Fat {
+        module: Some(module),
+        _serialized_bitcode: serialized_bitcode,
+    }])
+}
+
+struct Linker(llvm::LinkerRef);
+
+impl Linker {
+    fn new(llmod: ModuleRef) -> Linker {
+        unsafe { Linker(llvm::LLVMRustLinkerNew(llmod)) }
+    }
+
+    fn add(&mut self, bytecode: &[u8]) -> Result<(), ()> {
+        unsafe {
+            if llvm::LLVMRustLinkerAdd(self.0,
+                                       bytecode.as_ptr() as *const libc::c_char,
+                                       bytecode.len()) {
+                Ok(())
+            } else {
+                Err(())
+            }
+        }
+    }
+}
+
+impl Drop for Linker {
+    fn drop(&mut self) {
+        unsafe { llvm::LLVMRustLinkerFree(self.0); }
+    }
+}
+
+/// Prepare "thin" LTO to get run on these modules.
+///
+/// The general structure of ThinLTO is quite different from the structure of
+/// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into
+/// one giant LLVM module, and then we run more optimization passes over this
+/// big module after internalizing most symbols. Thin LTO, on the other hand,
+/// avoid this large bottleneck through more targeted optimization.
+///
+/// At a high level Thin LTO looks like:
+///
+///     1. Prepare a "summary" of each LLVM module in question which describes
+///        the values inside, cost of the values, etc.
+///     2. Merge the summaries of all modules in question into one "index"
+///     3. Perform some global analysis on this index
+///     4. For each module, use the index and analysis calculated previously to
+///        perform local transformations on the module, for example inlining
+///        small functions from other modules.
+///     5. Run thin-specific optimization passes over each module, and then code
+///        generate everything at the end.
+///
+/// The summary for each module is intended to be quite cheap, and the global
+/// index is relatively quite cheap to create as well. As a result, the goal of
+/// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more
+/// situations. For example one cheap optimization is that we can parallelize
+/// all codegen modules, easily making use of all the cores on a machine.
+///
+/// With all that in mind, the function here is designed at specifically just
+/// calculating the *index* for ThinLTO. This index will then be shared amongst
+/// all of the `LtoModuleCodegen` units returned below and destroyed once
+/// they all go out of scope.
+fn thin_lto(diag_handler: &Handler,
+            modules: Vec<ModuleCodegen>,
+            serialized_modules: Vec<(SerializedModule, CString)>,
+            symbol_white_list: &[*const libc::c_char],
+            timeline: &mut Timeline)
+    -> Result<Vec<LtoModuleCodegen>, FatalError>
+{
+    unsafe {
+        info!("going for that thin, thin LTO");
+
+        let mut thin_buffers = Vec::new();
+        let mut module_names = Vec::new();
+        let mut thin_modules = Vec::new();
+
+        // FIXME: right now, like with fat LTO, we serialize all in-memory
+        //        modules before working with them and ThinLTO. We really
+        //        shouldn't do this, however, and instead figure out how to
+        //        extract a summary from an in-memory module and then merge that
+        //        into the global index. It turns out that this loop is by far
+        //        the most expensive portion of this small bit of global
+        //        analysis!
+        for (i, module) in modules.iter().enumerate() {
+            info!("local module: {} - {}", i, module.llmod_id);
+            let llvm = module.llvm().expect("can't lto precodegened module");
+            let name = CString::new(module.llmod_id.clone()).unwrap();
+            let buffer = ThinBuffer::new(llvm.llmod);
+            thin_modules.push(llvm::ThinLTOModule {
+                identifier: name.as_ptr(),
+                data: buffer.data().as_ptr(),
+                len: buffer.data().len(),
+            });
+            thin_buffers.push(buffer);
+            module_names.push(name);
+            timeline.record(&module.llmod_id);
+        }
+
+        // FIXME: All upstream crates are deserialized internally in the
+        //        function below to extract their summary and modules. Note that
+        //        unlike the loop above we *must* decode and/or read something
+        //        here as these are all just serialized files on disk. An
+        //        improvement, however, to make here would be to store the
+        //        module summary separately from the actual module itself. Right
+        //        now this is store in one large bitcode file, and the entire
+        //        file is deflate-compressed. We could try to bypass some of the
+        //        decompression by storing the index uncompressed and only
+        //        lazily decompressing the bytecode if necessary.
+        //
+        //        Note that truly taking advantage of this optimization will
+        //        likely be further down the road. We'd have to implement
+        //        incremental ThinLTO first where we could actually avoid
+        //        looking at upstream modules entirely sometimes (the contents,
+        //        we must always unconditionally look at the index).
+        let mut serialized = Vec::new();
+        for (module, name) in serialized_modules {
+            info!("foreign module {:?}", name);
+            thin_modules.push(llvm::ThinLTOModule {
+                identifier: name.as_ptr(),
+                data: module.data().as_ptr(),
+                len: module.data().len(),
+            });
+            serialized.push(module);
+            module_names.push(name);
+        }
+
+        // Delegate to the C++ bindings to create some data here. Once this is a
+        // tried-and-true interface we may wish to try to upstream some of this
+        // to LLVM itself, right now we reimplement a lot of what they do
+        // upstream...
+        let data = llvm::LLVMRustCreateThinLTOData(
+            thin_modules.as_ptr(),
+            thin_modules.len() as u32,
+            symbol_white_list.as_ptr(),
+            symbol_white_list.len() as u32,
+        );
+        if data.is_null() {
+            let msg = format!("failed to prepare thin LTO context");
+            return Err(write::llvm_err(&diag_handler, msg))
+        }
+        let data = ThinData(data);
+        info!("thin LTO data created");
+        timeline.record("data");
+
+        // Throw our data in an `Arc` as we'll be sharing it across threads. We
+        // also put all memory referenced by the C++ data (buffers, ids, etc)
+        // into the arc as well. After this we'll create a thin module
+        // codegen per module in this data.
+        let shared = Arc::new(ThinShared {
+            data,
+            thin_buffers,
+            serialized_modules: serialized,
+            module_names,
+        });
+        Ok((0..shared.module_names.len()).map(|i| {
+            LtoModuleCodegen::Thin(ThinModule {
+                shared: shared.clone(),
+                idx: i,
+            })
+        }).collect())
+    }
+}
+
+fn run_pass_manager(cgcx: &CodegenContext,
+                    tm: TargetMachineRef,
+                    llmod: ModuleRef,
+                    config: &ModuleConfig,
+                    thin: bool) {
+    // Now we have one massive module inside of llmod. Time to run the
+    // LTO-specific optimization passes that LLVM provides.
+    //
+    // This code is based off the code found in llvm's LTO code generator:
+    //      tools/lto/LTOCodeGenerator.cpp
+    debug!("running the pass manager");
+    unsafe {
+        let pm = llvm::LLVMCreatePassManager();
+        llvm::LLVMRustAddAnalysisPasses(tm, pm, llmod);
+        let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _);
+        assert!(!pass.is_null());
+        llvm::LLVMRustAddPass(pm, pass);
+
+        // When optimizing for LTO we don't actually pass in `-O0`, but we force
+        // it to always happen at least with `-O1`.
+        //
+        // With ThinLTO we mess around a lot with symbol visibility in a way
+        // that will actually cause linking failures if we optimize at O0 which
+        // notable is lacking in dead code elimination. To ensure we at least
+        // get some optimizations and correctly link we forcibly switch to `-O1`
+        // to get dead code elimination.
+        //
+        // Note that in general this shouldn't matter too much as you typically
+        // only turn on ThinLTO when you're compiling with optimizations
+        // otherwise.
+        let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None);
+        let opt_level = match opt_level {
+            llvm::CodeGenOptLevel::None => llvm::CodeGenOptLevel::Less,
+            level => level,
+        };
+        with_llvm_pmb(llmod, config, opt_level, false, &mut |b| {
+            if thin {
+                if !llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm) {
+                    panic!("this version of LLVM does not support ThinLTO");
+                }
+            } else {
+                llvm::LLVMPassManagerBuilderPopulateLTOPassManager(b, pm,
+                    /* Internalize = */ False,
+                    /* RunInliner = */ True);
+            }
+        });
+
+        let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _);
+        assert!(!pass.is_null());
+        llvm::LLVMRustAddPass(pm, pass);
+
+        time_ext(cgcx.time_passes, None, "LTO passes", ||
+             llvm::LLVMRunPassManager(pm, llmod));
+
+        llvm::LLVMDisposePassManager(pm);
+    }
+    debug!("lto done");
+}
+
+pub enum SerializedModule {
+    Local(ModuleBuffer),
+    FromRlib(Vec<u8>),
+}
+
+impl SerializedModule {
+    fn data(&self) -> &[u8] {
+        match *self {
+            SerializedModule::Local(ref m) => m.data(),
+            SerializedModule::FromRlib(ref m) => m,
+        }
+    }
+}
+
+pub struct ModuleBuffer(*mut llvm::ModuleBuffer);
+
+unsafe impl Send for ModuleBuffer {}
+unsafe impl Sync for ModuleBuffer {}
+
+impl ModuleBuffer {
+    pub fn new(m: ModuleRef) -> ModuleBuffer {
+        ModuleBuffer(unsafe {
+            llvm::LLVMRustModuleBufferCreate(m)
+        })
+    }
+
+    pub fn data(&self) -> &[u8] {
+        unsafe {
+            let ptr = llvm::LLVMRustModuleBufferPtr(self.0);
+            let len = llvm::LLVMRustModuleBufferLen(self.0);
+            slice::from_raw_parts(ptr, len)
+        }
+    }
+}
+
+impl Drop for ModuleBuffer {
+    fn drop(&mut self) {
+        unsafe { llvm::LLVMRustModuleBufferFree(self.0); }
+    }
+}
+
+pub struct ThinModule {
+    shared: Arc<ThinShared>,
+    idx: usize,
+}
+
+struct ThinShared {
+    data: ThinData,
+    thin_buffers: Vec<ThinBuffer>,
+    serialized_modules: Vec<SerializedModule>,
+    module_names: Vec<CString>,
+}
+
+struct ThinData(*mut llvm::ThinLTOData);
+
+unsafe impl Send for ThinData {}
+unsafe impl Sync for ThinData {}
+
+impl Drop for ThinData {
+    fn drop(&mut self) {
+        unsafe {
+            llvm::LLVMRustFreeThinLTOData(self.0);
+        }
+    }
+}
+
+pub struct ThinBuffer(*mut llvm::ThinLTOBuffer);
+
+unsafe impl Send for ThinBuffer {}
+unsafe impl Sync for ThinBuffer {}
+
+impl ThinBuffer {
+    pub fn new(m: ModuleRef) -> ThinBuffer {
+        unsafe {
+            let buffer = llvm::LLVMRustThinLTOBufferCreate(m);
+            ThinBuffer(buffer)
+        }
+    }
+
+    pub fn data(&self) -> &[u8] {
+        unsafe {
+            let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _;
+            let len = llvm::LLVMRustThinLTOBufferLen(self.0);
+            slice::from_raw_parts(ptr, len)
+        }
+    }
+}
+
+impl Drop for ThinBuffer {
+    fn drop(&mut self) {
+        unsafe {
+            llvm::LLVMRustThinLTOBufferFree(self.0);
+        }
+    }
+}
+
+impl ThinModule {
+    fn name(&self) -> &str {
+        self.shared.module_names[self.idx].to_str().unwrap()
+    }
+
+    fn cost(&self) -> u64 {
+        // Yes, that's correct, we're using the size of the bytecode as an
+        // indicator for how costly this codegen unit is.
+        self.data().len() as u64
+    }
+
+    fn data(&self) -> &[u8] {
+        let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data());
+        a.unwrap_or_else(|| {
+            let len = self.shared.thin_buffers.len();
+            self.shared.serialized_modules[self.idx - len].data()
+        })
+    }
+
+    unsafe fn optimize(&mut self, cgcx: &CodegenContext, timeline: &mut Timeline)
+        -> Result<ModuleCodegen, FatalError>
+    {
+        let diag_handler = cgcx.create_diag_handler();
+        let tm = (cgcx.tm_factory)().map_err(|e| {
+            write::llvm_err(&diag_handler, e)
+        })?;
+
+        // Right now the implementation we've got only works over serialized
+        // modules, so we create a fresh new LLVM context and parse the module
+        // into that context. One day, however, we may do this for upstream
+        // crates but for locally codegened modules we may be able to reuse
+        // that LLVM Context and Module.
+        let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
+        let llmod = llvm::LLVMRustParseBitcodeForThinLTO(
+            llcx,
+            self.data().as_ptr(),
+            self.data().len(),
+            self.shared.module_names[self.idx].as_ptr(),
+        );
+        if llmod.is_null() {
+            let msg = format!("failed to parse bitcode for thin LTO module");
+            return Err(write::llvm_err(&diag_handler, msg));
+        }
+        let module = ModuleCodegen {
+            source: ModuleSource::Codegened(ModuleLlvm {
+                llmod,
+                llcx,
+                tm,
+            }),
+            llmod_id: self.name().to_string(),
+            name: self.name().to_string(),
+            kind: ModuleKind::Regular,
+        };
+        cgcx.save_temp_bitcode(&module, "thin-lto-input");
+
+        // Before we do much else find the "main" `DICompileUnit` that we'll be
+        // using below. If we find more than one though then rustc has changed
+        // in a way we're not ready for, so generate an ICE by returning
+        // an error.
+        let mut cu1 = ptr::null_mut();
+        let mut cu2 = ptr::null_mut();
+        llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2);
+        if !cu2.is_null() {
+            let msg = format!("multiple source DICompileUnits found");
+            return Err(write::llvm_err(&diag_handler, msg))
+        }
+
+        // Like with "fat" LTO, get some better optimizations if landing pads
+        // are disabled by removing all landing pads.
+        if cgcx.no_landing_pads {
+            llvm::LLVMRustMarkAllFunctionsNounwind(llmod);
+            cgcx.save_temp_bitcode(&module, "thin-lto-after-nounwind");
+            timeline.record("nounwind");
+        }
+
+        // Up next comes the per-module local analyses that we do for Thin LTO.
+        // Each of these functions is basically copied from the LLVM
+        // implementation and then tailored to suit this implementation. Ideally
+        // each of these would be supported by upstream LLVM but that's perhaps
+        // a patch for another day!
+        //
+        // You can find some more comments about these functions in the LLVM
+        // bindings we've got (currently `PassWrapper.cpp`)
+        if !llvm::LLVMRustPrepareThinLTORename(self.shared.data.0, llmod) {
+            let msg = format!("failed to prepare thin LTO module");
+            return Err(write::llvm_err(&diag_handler, msg))
+        }
+        cgcx.save_temp_bitcode(&module, "thin-lto-after-rename");
+        timeline.record("rename");
+        if !llvm::LLVMRustPrepareThinLTOResolveWeak(self.shared.data.0, llmod) {
+            let msg = format!("failed to prepare thin LTO module");
+            return Err(write::llvm_err(&diag_handler, msg))
+        }
+        cgcx.save_temp_bitcode(&module, "thin-lto-after-resolve");
+        timeline.record("resolve");
+        if !llvm::LLVMRustPrepareThinLTOInternalize(self.shared.data.0, llmod) {
+            let msg = format!("failed to prepare thin LTO module");
+            return Err(write::llvm_err(&diag_handler, msg))
+        }
+        cgcx.save_temp_bitcode(&module, "thin-lto-after-internalize");
+        timeline.record("internalize");
+        if !llvm::LLVMRustPrepareThinLTOImport(self.shared.data.0, llmod) {
+            let msg = format!("failed to prepare thin LTO module");
+            return Err(write::llvm_err(&diag_handler, msg))
+        }
+        cgcx.save_temp_bitcode(&module, "thin-lto-after-import");
+        timeline.record("import");
+
+        // Ok now this is a bit unfortunate. This is also something you won't
+        // find upstream in LLVM's ThinLTO passes! This is a hack for now to
+        // work around bugs in LLVM.
+        //
+        // First discovered in #45511 it was found that as part of ThinLTO
+        // importing passes LLVM will import `DICompileUnit` metadata
+        // information across modules. This means that we'll be working with one
+        // LLVM module that has multiple `DICompileUnit` instances in it (a
+        // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of
+        // bugs in LLVM's backend which generates invalid DWARF in a situation
+        // like this:
+        //
+        //  https://bugs.llvm.org/show_bug.cgi?id=35212
+        //  https://bugs.llvm.org/show_bug.cgi?id=35562
+        //
+        // While the first bug there is fixed the second ended up causing #46346
+        // which was basically a resurgence of #45511 after LLVM's bug 35212 was
+        // fixed.
+        //
+        // This function below is a huge hack around this problem. The function
+        // below is defined in `PassWrapper.cpp` and will basically "merge"
+        // all `DICompileUnit` instances in a module. Basically it'll take all
+        // the objects, rewrite all pointers of `DISubprogram` to point to the
+        // first `DICompileUnit`, and then delete all the other units.
+        //
+        // This is probably mangling to the debug info slightly (but hopefully
+        // not too much) but for now at least gets LLVM to emit valid DWARF (or
+        // so it appears). Hopefully we can remove this once upstream bugs are
+        // fixed in LLVM.
+        llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1);
+        cgcx.save_temp_bitcode(&module, "thin-lto-after-patch");
+        timeline.record("patch");
+
+        // Alright now that we've done everything related to the ThinLTO
+        // analysis it's time to run some optimizations! Here we use the same
+        // `run_pass_manager` as the "fat" LTO above except that we tell it to
+        // populate a thin-specific pass manager, which presumably LLVM treats a
+        // little differently.
+        info!("running thin lto passes over {}", module.name);
+        let config = cgcx.config(module.kind);
+        run_pass_manager(cgcx, tm, llmod, config, true);
+        cgcx.save_temp_bitcode(&module, "thin-lto-after-pm");
+        timeline.record("thin-done");
+
+        // FIXME: this is a hack around a bug in LLVM right now. Discovered in
+        // #46910 it was found out that on 32-bit MSVC LLVM will hit a codegen
+        // error if there's an available_externally function in the LLVM module.
+        // Typically we don't actually use these functions but ThinLTO makes
+        // heavy use of them when inlining across modules.
+        //
+        // Tracked upstream at https://bugs.llvm.org/show_bug.cgi?id=35736 this
+        // function call (and its definition on the C++ side of things)
+        // shouldn't be necessary eventually and we can safetly delete these few
+        // lines.
+        llvm::LLVMRustThinLTORemoveAvailableExternally(llmod);
+        cgcx.save_temp_bitcode(&module, "thin-lto-after-rm-ae");
+        timeline.record("no-ae");
+
+        Ok(module)
+    }
+}
diff --git a/src/librustc_codegen_llvm/back/rpath.rs b/src/librustc_codegen_llvm/back/rpath.rs
new file mode 100644
index 00000000000..8e5e7d37648
--- /dev/null
+++ b/src/librustc_codegen_llvm/back/rpath.rs
@@ -0,0 +1,282 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::collections::HashSet;
+use std::env;
+use std::path::{Path, PathBuf};
+use std::fs;
+
+use rustc::hir::def_id::CrateNum;
+use rustc::middle::cstore::LibSource;
+
+pub struct RPathConfig<'a> {
+    pub used_crates: &'a [(CrateNum, LibSource)],
+    pub out_filename: PathBuf,
+    pub is_like_osx: bool,
+    pub has_rpath: bool,
+    pub linker_is_gnu: bool,
+    pub get_install_prefix_lib_path: &'a mut FnMut() -> PathBuf,
+}
+
+pub fn get_rpath_flags(config: &mut RPathConfig) -> Vec<String> {
+    // No rpath on windows
+    if !config.has_rpath {
+        return Vec::new();
+    }
+
+    let mut flags = Vec::new();
+
+    debug!("preparing the RPATH!");
+
+    let libs = config.used_crates.clone();
+    let libs = libs.iter().filter_map(|&(_, ref l)| l.option()).collect::<Vec<_>>();
+    let rpaths = get_rpaths(config, &libs);
+    flags.extend_from_slice(&rpaths_to_flags(&rpaths));
+
+    // Use DT_RUNPATH instead of DT_RPATH if available
+    if config.linker_is_gnu {
+        flags.push("-Wl,--enable-new-dtags".to_string());
+    }
+
+    flags
+}
+
+fn rpaths_to_flags(rpaths: &[String]) -> Vec<String> {
+    let mut ret = Vec::new();
+    for rpath in rpaths {
+        if rpath.contains(',') {
+            ret.push("-Wl,-rpath".into());
+            ret.push("-Xlinker".into());
+            ret.push(rpath.clone());
+        } else {
+            ret.push(format!("-Wl,-rpath,{}", &(*rpath)));
+        }
+    }
+    return ret;
+}
+
+fn get_rpaths(config: &mut RPathConfig, libs: &[PathBuf]) -> Vec<String> {
+    debug!("output: {:?}", config.out_filename.display());
+    debug!("libs:");
+    for libpath in libs {
+        debug!("    {:?}", libpath.display());
+    }
+
+    // Use relative paths to the libraries. Binaries can be moved
+    // as long as they maintain the relative relationship to the
+    // crates they depend on.
+    let rel_rpaths = get_rpaths_relative_to_output(config, libs);
+
+    // And a final backup rpath to the global library location.
+    let fallback_rpaths = vec![get_install_prefix_rpath(config)];
+
+    fn log_rpaths(desc: &str, rpaths: &[String]) {
+        debug!("{} rpaths:", desc);
+        for rpath in rpaths {
+            debug!("    {}", *rpath);
+        }
+    }
+
+    log_rpaths("relative", &rel_rpaths);
+    log_rpaths("fallback", &fallback_rpaths);
+
+    let mut rpaths = rel_rpaths;
+    rpaths.extend_from_slice(&fallback_rpaths);
+
+    // Remove duplicates
+    let rpaths = minimize_rpaths(&rpaths);
+    return rpaths;
+}
+
+fn get_rpaths_relative_to_output(config: &mut RPathConfig,
+                                 libs: &[PathBuf]) -> Vec<String> {
+    libs.iter().map(|a| get_rpath_relative_to_output(config, a)).collect()
+}
+
+fn get_rpath_relative_to_output(config: &mut RPathConfig, lib: &Path) -> String {
+    // Mac doesn't appear to support $ORIGIN
+    let prefix = if config.is_like_osx {
+        "@loader_path"
+    } else {
+        "$ORIGIN"
+    };
+
+    let cwd = env::current_dir().unwrap();
+    let mut lib = fs::canonicalize(&cwd.join(lib)).unwrap_or(cwd.join(lib));
+    lib.pop();
+    let mut output = cwd.join(&config.out_filename);
+    output.pop();
+    let output = fs::canonicalize(&output).unwrap_or(output);
+    let relative = path_relative_from(&lib, &output)
+        .expect(&format!("couldn't create relative path from {:?} to {:?}", output, lib));
+    // FIXME (#9639): This needs to handle non-utf8 paths
+    format!("{}/{}", prefix,
+            relative.to_str().expect("non-utf8 component in path"))
+}
+
+// This routine is adapted from the *old* Path's `path_relative_from`
+// function, which works differently from the new `relative_from` function.
+// In particular, this handles the case on unix where both paths are
+// absolute but with only the root as the common directory.
+fn path_relative_from(path: &Path, base: &Path) -> Option<PathBuf> {
+    use std::path::Component;
+
+    if path.is_absolute() != base.is_absolute() {
+        if path.is_absolute() {
+            Some(PathBuf::from(path))
+        } else {
+            None
+        }
+    } else {
+        let mut ita = path.components();
+        let mut itb = base.components();
+        let mut comps: Vec<Component> = vec![];
+        loop {
+            match (ita.next(), itb.next()) {
+                (None, None) => break,
+                (Some(a), None) => {
+                    comps.push(a);
+                    comps.extend(ita.by_ref());
+                    break;
+                }
+                (None, _) => comps.push(Component::ParentDir),
+                (Some(a), Some(b)) if comps.is_empty() && a == b => (),
+                (Some(a), Some(b)) if b == Component::CurDir => comps.push(a),
+                (Some(_), Some(b)) if b == Component::ParentDir => return None,
+                (Some(a), Some(_)) => {
+                    comps.push(Component::ParentDir);
+                    for _ in itb {
+                        comps.push(Component::ParentDir);
+                    }
+                    comps.push(a);
+                    comps.extend(ita.by_ref());
+                    break;
+                }
+            }
+        }
+        Some(comps.iter().map(|c| c.as_os_str()).collect())
+    }
+}
+
+
+fn get_install_prefix_rpath(config: &mut RPathConfig) -> String {
+    let path = (config.get_install_prefix_lib_path)();
+    let path = env::current_dir().unwrap().join(&path);
+    // FIXME (#9639): This needs to handle non-utf8 paths
+    path.to_str().expect("non-utf8 component in rpath").to_string()
+}
+
+fn minimize_rpaths(rpaths: &[String]) -> Vec<String> {
+    let mut set = HashSet::new();
+    let mut minimized = Vec::new();
+    for rpath in rpaths {
+        if set.insert(rpath) {
+            minimized.push(rpath.clone());
+        }
+    }
+    minimized
+}
+
+#[cfg(all(unix, test))]
+mod tests {
+    use super::{RPathConfig};
+    use super::{minimize_rpaths, rpaths_to_flags, get_rpath_relative_to_output};
+    use std::path::{Path, PathBuf};
+
+    #[test]
+    fn test_rpaths_to_flags() {
+        let flags = rpaths_to_flags(&[
+            "path1".to_string(),
+            "path2".to_string()
+        ]);
+        assert_eq!(flags,
+                   ["-Wl,-rpath,path1",
+                    "-Wl,-rpath,path2"]);
+    }
+
+    #[test]
+    fn test_minimize1() {
+        let res = minimize_rpaths(&[
+            "rpath1".to_string(),
+            "rpath2".to_string(),
+            "rpath1".to_string()
+        ]);
+        assert!(res == [
+            "rpath1",
+            "rpath2",
+        ]);
+    }
+
+    #[test]
+    fn test_minimize2() {
+        let res = minimize_rpaths(&[
+            "1a".to_string(),
+            "2".to_string(),
+            "2".to_string(),
+            "1a".to_string(),
+            "4a".to_string(),
+            "1a".to_string(),
+            "2".to_string(),
+            "3".to_string(),
+            "4a".to_string(),
+            "3".to_string()
+        ]);
+        assert!(res == [
+            "1a",
+            "2",
+            "4a",
+            "3",
+        ]);
+    }
+
+    #[test]
+    fn test_rpath_relative() {
+        if cfg!(target_os = "macos") {
+            let config = &mut RPathConfig {
+                used_crates: Vec::new(),
+                has_rpath: true,
+                is_like_osx: true,
+                linker_is_gnu: false,
+                out_filename: PathBuf::from("bin/rustc"),
+                get_install_prefix_lib_path: &mut || panic!(),
+            };
+            let res = get_rpath_relative_to_output(config,
+                                                   Path::new("lib/libstd.so"));
+            assert_eq!(res, "@loader_path/../lib");
+        } else {
+            let config = &mut RPathConfig {
+                used_crates: Vec::new(),
+                out_filename: PathBuf::from("bin/rustc"),
+                get_install_prefix_lib_path: &mut || panic!(),
+                has_rpath: true,
+                is_like_osx: false,
+                linker_is_gnu: true,
+            };
+            let res = get_rpath_relative_to_output(config,
+                                                   Path::new("lib/libstd.so"));
+            assert_eq!(res, "$ORIGIN/../lib");
+        }
+    }
+
+    #[test]
+    fn test_xlinker() {
+        let args = rpaths_to_flags(&[
+            "a/normal/path".to_string(),
+            "a,comma,path".to_string()
+        ]);
+
+        assert_eq!(args, vec![
+            "-Wl,-rpath,a/normal/path".to_string(),
+            "-Wl,-rpath".to_string(),
+            "-Xlinker".to_string(),
+            "a,comma,path".to_string()
+        ]);
+    }
+}
diff --git a/src/librustc_codegen_llvm/back/symbol_export.rs b/src/librustc_codegen_llvm/back/symbol_export.rs
new file mode 100644
index 00000000000..81ac684aee2
--- /dev/null
+++ b/src/librustc_codegen_llvm/back/symbol_export.rs
@@ -0,0 +1,396 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc_data_structures::sync::Lrc;
+use std::sync::Arc;
+
+use monomorphize::Instance;
+use rustc::hir;
+use rustc::hir::CodegenFnAttrFlags;
+use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, CRATE_DEF_INDEX};
+use rustc::ich::Fingerprint;
+use rustc::middle::exported_symbols::{SymbolExportLevel, ExportedSymbol, metadata_symbol_name};
+use rustc::session::config;
+use rustc::ty::{TyCtxt, SymbolName};
+use rustc::ty::maps::Providers;
+use rustc::ty::subst::Substs;
+use rustc::util::nodemap::{FxHashMap, DefIdMap};
+use rustc_allocator::ALLOCATOR_METHODS;
+use rustc_data_structures::indexed_vec::IndexVec;
+use std::collections::hash_map::Entry::*;
+
+pub type ExportedSymbols = FxHashMap<
+    CrateNum,
+    Arc<Vec<(String, SymbolExportLevel)>>,
+>;
+
+pub fn threshold(tcx: TyCtxt) -> SymbolExportLevel {
+    crates_export_threshold(&tcx.sess.crate_types.borrow())
+}
+
+fn crate_export_threshold(crate_type: config::CrateType) -> SymbolExportLevel {
+    match crate_type {
+        config::CrateTypeExecutable |
+        config::CrateTypeStaticlib  |
+        config::CrateTypeProcMacro  |
+        config::CrateTypeCdylib     => SymbolExportLevel::C,
+        config::CrateTypeRlib       |
+        config::CrateTypeDylib      => SymbolExportLevel::Rust,
+    }
+}
+
+pub fn crates_export_threshold(crate_types: &[config::CrateType])
+                                      -> SymbolExportLevel {
+    if crate_types.iter().any(|&crate_type| {
+        crate_export_threshold(crate_type) == SymbolExportLevel::Rust
+    }) {
+        SymbolExportLevel::Rust
+    } else {
+        SymbolExportLevel::C
+    }
+}
+
+fn reachable_non_generics_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+                                             cnum: CrateNum)
+                                             -> Lrc<DefIdMap<SymbolExportLevel>>
+{
+    assert_eq!(cnum, LOCAL_CRATE);
+
+    if !tcx.sess.opts.output_types.should_codegen() {
+        return Lrc::new(DefIdMap())
+    }
+
+    // Check to see if this crate is a "special runtime crate". These
+    // crates, implementation details of the standard library, typically
+    // have a bunch of `pub extern` and `#[no_mangle]` functions as the
+    // ABI between them. We don't want their symbols to have a `C`
+    // export level, however, as they're just implementation details.
+    // Down below we'll hardwire all of the symbols to the `Rust` export
+    // level instead.
+    let special_runtime_crate = tcx.is_panic_runtime(LOCAL_CRATE) ||
+        tcx.is_compiler_builtins(LOCAL_CRATE);
+
+    let mut reachable_non_generics: DefIdMap<_> = tcx.reachable_set(LOCAL_CRATE).0
+        .iter()
+        .filter_map(|&node_id| {
+            // We want to ignore some FFI functions that are not exposed from
+            // this crate. Reachable FFI functions can be lumped into two
+            // categories:
+            //
+            // 1. Those that are included statically via a static library
+            // 2. Those included otherwise (e.g. dynamically or via a framework)
+            //
+            // Although our LLVM module is not literally emitting code for the
+            // statically included symbols, it's an export of our library which
+            // needs to be passed on to the linker and encoded in the metadata.
+            //
+            // As a result, if this id is an FFI item (foreign item) then we only
+            // let it through if it's included statically.
+            match tcx.hir.get(node_id) {
+                hir::map::NodeForeignItem(..) => {
+                    let def_id = tcx.hir.local_def_id(node_id);
+                    if tcx.is_statically_included_foreign_item(def_id) {
+                        Some(def_id)
+                    } else {
+                        None
+                    }
+                }
+
+                // Only consider nodes that actually have exported symbols.
+                hir::map::NodeItem(&hir::Item {
+                    node: hir::ItemStatic(..),
+                    ..
+                }) |
+                hir::map::NodeItem(&hir::Item {
+                    node: hir::ItemFn(..), ..
+                }) |
+                hir::map::NodeImplItem(&hir::ImplItem {
+                    node: hir::ImplItemKind::Method(..),
+                    ..
+                }) => {
+                    let def_id = tcx.hir.local_def_id(node_id);
+                    let generics = tcx.generics_of(def_id);
+                    if !generics.requires_monomorphization(tcx) &&
+                        // Functions marked with #[inline] are only ever codegened
+                        // with "internal" linkage and are never exported.
+                        !Instance::mono(tcx, def_id).def.requires_local(tcx) {
+                        Some(def_id)
+                    } else {
+                        None
+                    }
+                }
+
+                _ => None
+            }
+        })
+        .map(|def_id| {
+            let export_level = if special_runtime_crate {
+                let name = tcx.symbol_name(Instance::mono(tcx, def_id)).as_str();
+                // We can probably do better here by just ensuring that
+                // it has hidden visibility rather than public
+                // visibility, as this is primarily here to ensure it's
+                // not stripped during LTO.
+                //
+                // In general though we won't link right if these
+                // symbols are stripped, and LTO currently strips them.
+                if &*name == "rust_eh_personality" ||
+                   &*name == "rust_eh_register_frames" ||
+                   &*name == "rust_eh_unregister_frames" {
+                    SymbolExportLevel::C
+                } else {
+                    SymbolExportLevel::Rust
+                }
+            } else {
+                symbol_export_level(tcx, def_id)
+            };
+            debug!("EXPORTED SYMBOL (local): {} ({:?})",
+                   tcx.symbol_name(Instance::mono(tcx, def_id)),
+                   export_level);
+            (def_id, export_level)
+        })
+        .collect();
+
+    if let Some(id) = *tcx.sess.derive_registrar_fn.get() {
+        let def_id = tcx.hir.local_def_id(id);
+        reachable_non_generics.insert(def_id, SymbolExportLevel::C);
+    }
+
+    if let Some(id) = *tcx.sess.plugin_registrar_fn.get() {
+        let def_id = tcx.hir.local_def_id(id);
+        reachable_non_generics.insert(def_id, SymbolExportLevel::C);
+    }
+
+    Lrc::new(reachable_non_generics)
+}
+
+fn is_reachable_non_generic_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+                                                     def_id: DefId)
+                                                     -> bool {
+    let export_threshold = threshold(tcx);
+
+    if let Some(&level) = tcx.reachable_non_generics(def_id.krate).get(&def_id) {
+        level.is_below_threshold(export_threshold)
+    } else {
+        false
+    }
+}
+
+fn is_reachable_non_generic_provider_extern<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+                                                      def_id: DefId)
+                                                      -> bool {
+    tcx.reachable_non_generics(def_id.krate).contains_key(&def_id)
+}
+
+fn exported_symbols_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+                                             cnum: CrateNum)
+                                             -> Arc<Vec<(ExportedSymbol<'tcx>,
+                                                         SymbolExportLevel)>>
+{
+    assert_eq!(cnum, LOCAL_CRATE);
+
+    if !tcx.sess.opts.output_types.should_codegen() {
+        return Arc::new(vec![])
+    }
+
+    let mut symbols: Vec<_> = tcx.reachable_non_generics(LOCAL_CRATE)
+                                 .iter()
+                                 .map(|(&def_id, &level)| {
+                                    (ExportedSymbol::NonGeneric(def_id), level)
+                                 })
+                                 .collect();
+
+    if let Some(_) = *tcx.sess.entry_fn.borrow() {
+        let symbol_name = "main".to_string();
+        let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(&symbol_name));
+
+        symbols.push((exported_symbol, SymbolExportLevel::C));
+    }
+
+    if tcx.sess.allocator_kind.get().is_some() {
+        for method in ALLOCATOR_METHODS {
+            let symbol_name = format!("__rust_{}", method.name);
+            let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(&symbol_name));
+
+            symbols.push((exported_symbol, SymbolExportLevel::Rust));
+        }
+    }
+
+    if tcx.sess.opts.debugging_opts.pgo_gen.is_some() {
+        // These are weak symbols that point to the profile version and the
+        // profile name, which need to be treated as exported so LTO doesn't nix
+        // them.
+        const PROFILER_WEAK_SYMBOLS: [&'static str; 2] = [
+            "__llvm_profile_raw_version",
+            "__llvm_profile_filename",
+        ];
+        for sym in &PROFILER_WEAK_SYMBOLS {
+            let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(sym));
+            symbols.push((exported_symbol, SymbolExportLevel::C));
+        }
+    }
+
+    if tcx.sess.crate_types.borrow().contains(&config::CrateTypeDylib) {
+        let symbol_name = metadata_symbol_name(tcx);
+        let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(&symbol_name));
+
+        symbols.push((exported_symbol, SymbolExportLevel::Rust));
+    }
+
+    if tcx.share_generics() && tcx.local_crate_exports_generics() {
+        use rustc::mir::mono::{Linkage, Visibility, MonoItem};
+        use rustc::ty::InstanceDef;
+
+        // Normally, we require that shared monomorphizations are not hidden,
+        // because if we want to re-use a monomorphization from a Rust dylib, it
+        // needs to be exported.
+        // However, on platforms that don't allow for Rust dylibs, having
+        // external linkage is enough for monomorphization to be linked to.
+        let need_visibility = tcx.sess.target.target.options.dynamic_linking &&
+                              !tcx.sess.target.target.options.only_cdylib;
+
+        let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
+
+        for (mono_item, &(linkage, visibility)) in cgus.iter()
+                                                       .flat_map(|cgu| cgu.items().iter()) {
+            if linkage != Linkage::External {
+                // We can only re-use things with external linkage, otherwise
+                // we'll get a linker error
+                continue
+            }
+
+            if need_visibility && visibility == Visibility::Hidden {
+                // If we potentially share things from Rust dylibs, they must
+                // not be hidden
+                continue
+            }
+
+            if let &MonoItem::Fn(Instance {
+                def: InstanceDef::Item(def_id),
+                substs,
+            }) = mono_item {
+                if substs.types().next().is_some() {
+                    symbols.push((ExportedSymbol::Generic(def_id, substs),
+                                  SymbolExportLevel::Rust));
+                }
+            }
+        }
+    }
+
+    // Sort so we get a stable incr. comp. hash.
+    symbols.sort_unstable_by(|&(ref symbol1, ..), &(ref symbol2, ..)| {
+        symbol1.compare_stable(tcx, symbol2)
+    });
+
+    Arc::new(symbols)
+}
+
+fn upstream_monomorphizations_provider<'a, 'tcx>(
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    cnum: CrateNum)
+    -> Lrc<DefIdMap<Lrc<FxHashMap<&'tcx Substs<'tcx>, CrateNum>>>>
+{
+    debug_assert!(cnum == LOCAL_CRATE);
+
+    let cnums = tcx.all_crate_nums(LOCAL_CRATE);
+
+    let mut instances = DefIdMap();
+
+    let cnum_stable_ids: IndexVec<CrateNum, Fingerprint> = {
+        let mut cnum_stable_ids = IndexVec::from_elem_n(Fingerprint::ZERO,
+                                                        cnums.len() + 1);
+
+        for &cnum in cnums.iter() {
+            cnum_stable_ids[cnum] = tcx.def_path_hash(DefId {
+                krate: cnum,
+                index: CRATE_DEF_INDEX,
+            }).0;
+        }
+
+        cnum_stable_ids
+    };
+
+    for &cnum in cnums.iter() {
+        for &(ref exported_symbol, _) in tcx.exported_symbols(cnum).iter() {
+            if let &ExportedSymbol::Generic(def_id, substs) = exported_symbol {
+                let substs_map = instances.entry(def_id)
+                                          .or_insert_with(|| FxHashMap());
+
+                match substs_map.entry(substs) {
+                    Occupied(mut e) => {
+                        // If there are multiple monomorphizations available,
+                        // we select one deterministically.
+                        let other_cnum = *e.get();
+                        if cnum_stable_ids[other_cnum] > cnum_stable_ids[cnum] {
+                            e.insert(cnum);
+                        }
+                    }
+                    Vacant(e) => {
+                        e.insert(cnum);
+                    }
+                }
+            }
+        }
+    }
+
+    Lrc::new(instances.into_iter()
+                      .map(|(key, value)| (key, Lrc::new(value)))
+                      .collect())
+}
+
+fn upstream_monomorphizations_for_provider<'a, 'tcx>(
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    def_id: DefId)
+    -> Option<Lrc<FxHashMap<&'tcx Substs<'tcx>, CrateNum>>>
+{
+    debug_assert!(!def_id.is_local());
+    tcx.upstream_monomorphizations(LOCAL_CRATE)
+       .get(&def_id)
+       .cloned()
+}
+
+fn is_unreachable_local_definition_provider(tcx: TyCtxt, def_id: DefId) -> bool {
+    if let Some(node_id) = tcx.hir.as_local_node_id(def_id) {
+        !tcx.reachable_set(LOCAL_CRATE).0.contains(&node_id)
+    } else {
+        bug!("is_unreachable_local_definition called with non-local DefId: {:?}",
+              def_id)
+    }
+}
+
+pub fn provide(providers: &mut Providers) {
+    providers.reachable_non_generics = reachable_non_generics_provider;
+    providers.is_reachable_non_generic = is_reachable_non_generic_provider_local;
+    providers.exported_symbols = exported_symbols_provider_local;
+    providers.upstream_monomorphizations = upstream_monomorphizations_provider;
+    providers.is_unreachable_local_definition = is_unreachable_local_definition_provider;
+}
+
+pub fn provide_extern(providers: &mut Providers) {
+    providers.is_reachable_non_generic = is_reachable_non_generic_provider_extern;
+    providers.upstream_monomorphizations_for = upstream_monomorphizations_for_provider;
+}
+
+fn symbol_export_level(tcx: TyCtxt, sym_def_id: DefId) -> SymbolExportLevel {
+    // We export anything that's not mangled at the "C" layer as it probably has
+    // to do with ABI concerns. We do not, however, apply such treatment to
+    // special symbols in the standard library for various plumbing between
+    // core/std/allocators/etc. For example symbols used to hook up allocation
+    // are not considered for export
+    let codegen_fn_attrs = tcx.codegen_fn_attrs(sym_def_id);
+    let is_extern = codegen_fn_attrs.contains_extern_indicator();
+    let std_internal =
+        codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);
+
+    if is_extern && !std_internal {
+        SymbolExportLevel::C
+    } else {
+        SymbolExportLevel::Rust
+    }
+}
diff --git a/src/librustc_codegen_llvm/back/wasm.rs b/src/librustc_codegen_llvm/back/wasm.rs
new file mode 100644
index 00000000000..d6d386c9fbe
--- /dev/null
+++ b/src/librustc_codegen_llvm/back/wasm.rs
@@ -0,0 +1,261 @@
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::collections::BTreeMap;
+use std::fs;
+use std::path::Path;
+use std::str;
+
+use rustc_data_structures::fx::FxHashMap;
+use serialize::leb128;
+
+// https://webassembly.github.io/spec/core/binary/modules.html#binary-importsec
+const WASM_IMPORT_SECTION_ID: u8 = 2;
+
+const WASM_EXTERNAL_KIND_FUNCTION: u8 = 0;
+const WASM_EXTERNAL_KIND_TABLE: u8 = 1;
+const WASM_EXTERNAL_KIND_MEMORY: u8 = 2;
+const WASM_EXTERNAL_KIND_GLOBAL: u8 = 3;
+
+/// Append all the custom sections listed in `sections` to the wasm binary
+/// specified at `path`.
+///
+/// LLVM 6 which we're using right now doesn't have the ability to create custom
+/// sections in wasm files nor does LLD have the ability to merge these sections
+/// into one larger section when linking. It's expected that this will
+/// eventually get implemented, however!
+///
+/// Until that time though this is a custom implementation in rustc to append
+/// all sections to a wasm file to the finished product that LLD produces.
+///
+/// Support for this is landing in LLVM in https://reviews.llvm.org/D43097,
+/// although after that support will need to be in LLD as well.
+pub fn add_custom_sections(path: &Path, sections: &BTreeMap<String, Vec<u8>>) {
+    if sections.len() == 0 {
+        return
+    }
+
+    let wasm = fs::read(path).expect("failed to read wasm output");
+
+    // see https://webassembly.github.io/spec/core/binary/modules.html#custom-section
+    let mut wasm = WasmEncoder { data: wasm };
+    for (section, bytes) in sections {
+        // write the `id` identifier, 0 for a custom section
+        wasm.byte(0);
+
+        // figure out how long our name descriptor will be
+        let mut name = WasmEncoder::new();
+        name.str(section);
+
+        // write the length of the payload followed by all its contents
+        wasm.u32((bytes.len() + name.data.len()) as u32);
+        wasm.data.extend_from_slice(&name.data);
+        wasm.data.extend_from_slice(bytes);
+    }
+
+    fs::write(path, &wasm.data).expect("failed to write wasm output");
+}
+
+/// Rewrite the module imports are listed from in a wasm module given the field
+/// name to module name mapping in `import_map`.
+///
+/// LLVM 6 which we're using right now doesn't have the ability to configure the
+/// module a wasm symbol is import from. Rather all imported symbols come from
+/// the bland `"env"` module unconditionally. Furthermore we'd *also* need
+/// support in LLD for preserving these import modules, which it unfortunately
+/// currently does not.
+///
+/// This function is intended as a hack for now where we manually rewrite the
+/// wasm output by LLVM to have the correct import modules listed. The
+/// `#[wasm_import_module]` attribute in Rust translates to the module that each
+/// symbol is imported from, so here we manually go through the wasm file,
+/// decode it, rewrite imports, and then rewrite the wasm module.
+///
+/// Support for this was added to LLVM in
+/// https://github.com/llvm-mirror/llvm/commit/0f32e1365, although support still
+/// needs to be added (AFAIK at the time of this writing) to LLD
+pub fn rewrite_imports(path: &Path, import_map: &FxHashMap<String, String>) {
+    if import_map.len() == 0 {
+        return
+    }
+
+    let wasm = fs::read(path).expect("failed to read wasm output");
+    let mut ret = WasmEncoder::new();
+    ret.data.extend(&wasm[..8]);
+
+    // skip the 8 byte wasm/version header
+    for (id, raw) in WasmSections(WasmDecoder::new(&wasm[8..])) {
+        ret.byte(id);
+        if id == WASM_IMPORT_SECTION_ID {
+            info!("rewriting import section");
+            let data = rewrite_import_section(
+                &mut WasmDecoder::new(raw),
+                import_map,
+            );
+            ret.bytes(&data);
+        } else {
+            info!("carry forward section {}, {} bytes long", id, raw.len());
+            ret.bytes(raw);
+        }
+    }
+
+    fs::write(path, &ret.data).expect("failed to write wasm output");
+
+    fn rewrite_import_section(
+        wasm: &mut WasmDecoder,
+        import_map: &FxHashMap<String, String>,
+    )
+        -> Vec<u8>
+    {
+        let mut dst = WasmEncoder::new();
+        let n = wasm.u32();
+        dst.u32(n);
+        info!("rewriting {} imports", n);
+        for _ in 0..n {
+            rewrite_import_entry(wasm, &mut dst, import_map);
+        }
+        return dst.data
+    }
+
+    fn rewrite_import_entry(wasm: &mut WasmDecoder,
+                            dst: &mut WasmEncoder,
+                            import_map: &FxHashMap<String, String>) {
+        // More info about the binary format here is available at:
+        // https://webassembly.github.io/spec/core/binary/modules.html#import-section
+        //
+        // Note that you can also find the whole point of existence of this
+        // function here, where we map the `module` name to a different one if
+        // we've got one listed.
+        let module = wasm.str();
+        let field = wasm.str();
+        let new_module = if module == "env" {
+            import_map.get(field).map(|s| &**s).unwrap_or(module)
+        } else {
+            module
+        };
+        info!("import rewrite ({} => {}) / {}", module, new_module, field);
+        dst.str(new_module);
+        dst.str(field);
+        let kind = wasm.byte();
+        dst.byte(kind);
+        match kind {
+            WASM_EXTERNAL_KIND_FUNCTION => dst.u32(wasm.u32()),
+            WASM_EXTERNAL_KIND_TABLE => {
+                dst.byte(wasm.byte()); // element_type
+                dst.limits(wasm.limits());
+            }
+            WASM_EXTERNAL_KIND_MEMORY => dst.limits(wasm.limits()),
+            WASM_EXTERNAL_KIND_GLOBAL => {
+                dst.byte(wasm.byte()); // content_type
+                dst.bool(wasm.bool()); // mutable
+            }
+            b => panic!("unknown kind: {}", b),
+        }
+    }
+}
+
+struct WasmSections<'a>(WasmDecoder<'a>);
+
+impl<'a> Iterator for WasmSections<'a> {
+    type Item = (u8, &'a [u8]);
+
+    fn next(&mut self) -> Option<(u8, &'a [u8])> {
+        if self.0.data.len() == 0 {
+            return None
+        }
+
+        // see https://webassembly.github.io/spec/core/binary/modules.html#sections
+        let id = self.0.byte();
+        let section_len = self.0.u32();
+        info!("new section {} / {} bytes", id, section_len);
+        let section = self.0.skip(section_len as usize);
+        Some((id, section))
+    }
+}
+
+struct WasmDecoder<'a> {
+    data: &'a [u8],
+}
+
+impl<'a> WasmDecoder<'a> {
+    fn new(data: &'a [u8]) -> WasmDecoder<'a> {
+        WasmDecoder { data }
+    }
+
+    fn byte(&mut self) -> u8 {
+        self.skip(1)[0]
+    }
+
+    fn u32(&mut self) -> u32 {
+        let (n, l1) = leb128::read_u32_leb128(self.data);
+        self.data = &self.data[l1..];
+        return n
+    }
+
+    fn skip(&mut self, amt: usize) -> &'a [u8] {
+        let (data, rest) = self.data.split_at(amt);
+        self.data = rest;
+        data
+    }
+
+    fn str(&mut self) -> &'a str {
+        let len = self.u32();
+        str::from_utf8(self.skip(len as usize)).unwrap()
+    }
+
+    fn bool(&mut self) -> bool {
+        self.byte() == 1
+    }
+
+    fn limits(&mut self) -> (u32, Option<u32>) {
+        let has_max = self.bool();
+        (self.u32(), if has_max { Some(self.u32()) } else { None })
+    }
+}
+
+struct WasmEncoder {
+    data: Vec<u8>,
+}
+
+impl WasmEncoder {
+    fn new() -> WasmEncoder {
+        WasmEncoder { data: Vec::new() }
+    }
+
+    fn u32(&mut self, val: u32) {
+        let at = self.data.len();
+        leb128::write_u32_leb128(&mut self.data, at, val);
+    }
+
+    fn byte(&mut self, val: u8) {
+        self.data.push(val);
+    }
+
+    fn bytes(&mut self, val: &[u8]) {
+        self.u32(val.len() as u32);
+        self.data.extend_from_slice(val);
+    }
+
+    fn str(&mut self, val: &str) {
+        self.bytes(val.as_bytes())
+    }
+
+    fn bool(&mut self, b: bool) {
+        self.byte(b as u8);
+    }
+
+    fn limits(&mut self, limits: (u32, Option<u32>)) {
+        self.bool(limits.1.is_some());
+        self.u32(limits.0);
+        if let Some(c) = limits.1 {
+            self.u32(c);
+        }
+    }
+}
diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs
new file mode 100644
index 00000000000..1151e013131
--- /dev/null
+++ b/src/librustc_codegen_llvm/back/write.rs
@@ -0,0 +1,2390 @@
+// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use attributes;
+use back::bytecode::{self, RLIB_BYTECODE_EXTENSION};
+use back::lto::{self, ModuleBuffer, ThinBuffer};
+use back::link::{self, get_linker, remove};
+use back::command::Command;
+use back::linker::LinkerInfo;
+use back::symbol_export::ExportedSymbols;
+use base;
+use consts;
+use rustc_incremental::{copy_cgu_workproducts_to_incr_comp_cache_dir, in_incr_comp_dir};
+use rustc::dep_graph::{WorkProduct, WorkProductId, WorkProductFileKind};
+use rustc::middle::cstore::{LinkMeta, EncodedMetadata};
+use rustc::session::config::{self, OutputFilenames, OutputType, Passes, SomePasses,
+                             AllPasses, Sanitizer, Lto};
+use rustc::session::Session;
+use rustc::util::nodemap::FxHashMap;
+use time_graph::{self, TimeGraph, Timeline};
+use llvm;
+use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef};
+use llvm::{SMDiagnosticRef, ContextRef};
+use {CodegenResults, ModuleSource, ModuleCodegen, CompiledModule, ModuleKind};
+use CrateInfo;
+use rustc::hir::def_id::{CrateNum, LOCAL_CRATE};
+use rustc::ty::TyCtxt;
+use rustc::util::common::{time_ext, time_depth, set_time_depth, print_time_passes_entry};
+use rustc::util::common::path2cstr;
+use rustc::util::fs::{link_or_copy};
+use errors::{self, Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId};
+use errors::emitter::{Emitter};
+use syntax::attr;
+use syntax::ext::hygiene::Mark;
+use syntax_pos::MultiSpan;
+use syntax_pos::symbol::Symbol;
+use type_::Type;
+use context::{is_pie_binary, get_reloc_model};
+use common::{C_bytes_in_context, val_ty};
+use jobserver::{Client, Acquired};
+use rustc_demangle;
+
+use std::any::Any;
+use std::ffi::{CString, CStr};
+use std::fs;
+use std::io::{self, Write};
+use std::mem;
+use std::path::{Path, PathBuf};
+use std::str;
+use std::sync::Arc;
+use std::sync::mpsc::{channel, Sender, Receiver};
+use std::slice;
+use std::time::Instant;
+use std::thread;
+use libc::{c_uint, c_void, c_char, size_t};
+
+pub const RELOC_MODEL_ARGS : [(&'static str, llvm::RelocMode); 7] = [
+    ("pic", llvm::RelocMode::PIC),
+    ("static", llvm::RelocMode::Static),
+    ("default", llvm::RelocMode::Default),
+    ("dynamic-no-pic", llvm::RelocMode::DynamicNoPic),
+    ("ropi", llvm::RelocMode::ROPI),
+    ("rwpi", llvm::RelocMode::RWPI),
+    ("ropi-rwpi", llvm::RelocMode::ROPI_RWPI),
+];
+
+pub const CODE_GEN_MODEL_ARGS: &[(&str, llvm::CodeModel)] = &[
+    ("small", llvm::CodeModel::Small),
+    ("kernel", llvm::CodeModel::Kernel),
+    ("medium", llvm::CodeModel::Medium),
+    ("large", llvm::CodeModel::Large),
+];
+
+pub const TLS_MODEL_ARGS : [(&'static str, llvm::ThreadLocalMode); 4] = [
+    ("global-dynamic", llvm::ThreadLocalMode::GeneralDynamic),
+    ("local-dynamic", llvm::ThreadLocalMode::LocalDynamic),
+    ("initial-exec", llvm::ThreadLocalMode::InitialExec),
+    ("local-exec", llvm::ThreadLocalMode::LocalExec),
+];
+
+pub fn llvm_err(handler: &errors::Handler, msg: String) -> FatalError {
+    match llvm::last_error() {
+        Some(err) => handler.fatal(&format!("{}: {}", msg, err)),
+        None => handler.fatal(&msg),
+    }
+}
+
+pub fn write_output_file(
+        handler: &errors::Handler,
+        target: llvm::TargetMachineRef,
+        pm: llvm::PassManagerRef,
+        m: ModuleRef,
+        output: &Path,
+        file_type: llvm::FileType) -> Result<(), FatalError> {
+    unsafe {
+        let output_c = path2cstr(output);
+        let result = llvm::LLVMRustWriteOutputFile(
+                target, pm, m, output_c.as_ptr(), file_type);
+        if result.into_result().is_err() {
+            let msg = format!("could not write output to {}", output.display());
+            Err(llvm_err(handler, msg))
+        } else {
+            Ok(())
+        }
+    }
+}
+
+fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel {
+    match optimize {
+      config::OptLevel::No => llvm::CodeGenOptLevel::None,
+      config::OptLevel::Less => llvm::CodeGenOptLevel::Less,
+      config::OptLevel::Default => llvm::CodeGenOptLevel::Default,
+      config::OptLevel::Aggressive => llvm::CodeGenOptLevel::Aggressive,
+      _ => llvm::CodeGenOptLevel::Default,
+    }
+}
+
+fn get_llvm_opt_size(optimize: config::OptLevel) -> llvm::CodeGenOptSize {
+    match optimize {
+      config::OptLevel::Size => llvm::CodeGenOptSizeDefault,
+      config::OptLevel::SizeMin => llvm::CodeGenOptSizeAggressive,
+      _ => llvm::CodeGenOptSizeNone,
+    }
+}
+
+pub fn create_target_machine(sess: &Session, find_features: bool) -> TargetMachineRef {
+    target_machine_factory(sess, find_features)().unwrap_or_else(|err| {
+        llvm_err(sess.diagnostic(), err).raise()
+    })
+}
+
+// If find_features is true this won't access `sess.crate_types` by assuming
+// that `is_pie_binary` is false. When we discover LLVM target features
+// `sess.crate_types` is uninitialized so we cannot access it.
+pub fn target_machine_factory(sess: &Session, find_features: bool)
+    -> Arc<Fn() -> Result<TargetMachineRef, String> + Send + Sync>
+{
+    let reloc_model = get_reloc_model(sess);
+
+    let opt_level = get_llvm_opt_level(sess.opts.optimize);
+    let use_softfp = sess.opts.cg.soft_float;
+
+    let ffunction_sections = sess.target.target.options.function_sections;
+    let fdata_sections = ffunction_sections;
+
+    let code_model_arg = sess.opts.cg.code_model.as_ref().or(
+        sess.target.target.options.code_model.as_ref(),
+    );
+
+    let code_model = match code_model_arg {
+        Some(s) => {
+            match CODE_GEN_MODEL_ARGS.iter().find(|arg| arg.0 == s) {
+                Some(x) => x.1,
+                _ => {
+                    sess.err(&format!("{:?} is not a valid code model",
+                                      code_model_arg));
+                    sess.abort_if_errors();
+                    bug!();
+                }
+            }
+        }
+        None => llvm::CodeModel::None,
+    };
+
+    let singlethread = sess.target.target.options.singlethread;
+
+    let triple = &sess.target.target.llvm_target;
+
+    let triple = CString::new(triple.as_bytes()).unwrap();
+    let cpu = sess.target_cpu();
+    let cpu = CString::new(cpu.as_bytes()).unwrap();
+    let features = attributes::llvm_target_features(sess)
+        .collect::<Vec<_>>()
+        .join(",");
+    let features = CString::new(features).unwrap();
+    let is_pie_binary = !find_features && is_pie_binary(sess);
+    let trap_unreachable = sess.target.target.options.trap_unreachable;
+
+    Arc::new(move || {
+        let tm = unsafe {
+            llvm::LLVMRustCreateTargetMachine(
+                triple.as_ptr(), cpu.as_ptr(), features.as_ptr(),
+                code_model,
+                reloc_model,
+                opt_level,
+                use_softfp,
+                is_pie_binary,
+                ffunction_sections,
+                fdata_sections,
+                trap_unreachable,
+                singlethread,
+            )
+        };
+
+        if tm.is_null() {
+            Err(format!("Could not create LLVM TargetMachine for triple: {}",
+                        triple.to_str().unwrap()))
+        } else {
+            Ok(tm)
+        }
+    })
+}
+
+/// Module-specific configuration for `optimize_and_codegen`.
+pub struct ModuleConfig {
+    /// Names of additional optimization passes to run.
+    passes: Vec<String>,
+    /// Some(level) to optimize at a certain level, or None to run
+    /// absolutely no optimizations (used for the metadata module).
+    pub opt_level: Option<llvm::CodeGenOptLevel>,
+
+    /// Some(level) to optimize binary size, or None to not affect program size.
+    opt_size: Option<llvm::CodeGenOptSize>,
+
+    pgo_gen: Option<String>,
+    pgo_use: String,
+
+    // Flags indicating which outputs to produce.
+    emit_no_opt_bc: bool,
+    emit_bc: bool,
+    emit_bc_compressed: bool,
+    emit_lto_bc: bool,
+    emit_ir: bool,
+    emit_asm: bool,
+    emit_obj: bool,
+    // Miscellaneous flags.  These are mostly copied from command-line
+    // options.
+    no_verify: bool,
+    no_prepopulate_passes: bool,
+    no_builtins: bool,
+    time_passes: bool,
+    vectorize_loop: bool,
+    vectorize_slp: bool,
+    merge_functions: bool,
+    inline_threshold: Option<usize>,
+    // Instead of creating an object file by doing LLVM codegen, just
+    // make the object file bitcode. Provides easy compatibility with
+    // emscripten's ecc compiler, when used as the linker.
+    obj_is_bitcode: bool,
+    no_integrated_as: bool,
+    embed_bitcode: bool,
+    embed_bitcode_marker: bool,
+}
+
+impl ModuleConfig {
+    fn new(passes: Vec<String>) -> ModuleConfig {
+        ModuleConfig {
+            passes,
+            opt_level: None,
+            opt_size: None,
+
+            pgo_gen: None,
+            pgo_use: String::new(),
+
+            emit_no_opt_bc: false,
+            emit_bc: false,
+            emit_bc_compressed: false,
+            emit_lto_bc: false,
+            emit_ir: false,
+            emit_asm: false,
+            emit_obj: false,
+            obj_is_bitcode: false,
+            embed_bitcode: false,
+            embed_bitcode_marker: false,
+            no_integrated_as: false,
+
+            no_verify: false,
+            no_prepopulate_passes: false,
+            no_builtins: false,
+            time_passes: false,
+            vectorize_loop: false,
+            vectorize_slp: false,
+            merge_functions: false,
+            inline_threshold: None
+        }
+    }
+
+    fn set_flags(&mut self, sess: &Session, no_builtins: bool) {
+        self.no_verify = sess.no_verify();
+        self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes;
+        self.no_builtins = no_builtins || sess.target.target.options.no_builtins;
+        self.time_passes = sess.time_passes();
+        self.inline_threshold = sess.opts.cg.inline_threshold;
+        self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode;
+        let embed_bitcode = sess.target.target.options.embed_bitcode ||
+                            sess.opts.debugging_opts.embed_bitcode ||
+                            sess.opts.debugging_opts.cross_lang_lto.embed_bitcode();
+        if embed_bitcode {
+            match sess.opts.optimize {
+                config::OptLevel::No |
+                config::OptLevel::Less => {
+                    self.embed_bitcode_marker = embed_bitcode;
+                }
+                _ => self.embed_bitcode = embed_bitcode,
+            }
+        }
+
+        // Copy what clang does by turning on loop vectorization at O2 and
+        // slp vectorization at O3. Otherwise configure other optimization aspects
+        // of this pass manager builder.
+        // Turn off vectorization for emscripten, as it's not very well supported.
+        self.vectorize_loop = !sess.opts.cg.no_vectorize_loops &&
+                             (sess.opts.optimize == config::OptLevel::Default ||
+                              sess.opts.optimize == config::OptLevel::Aggressive) &&
+                             !sess.target.target.options.is_like_emscripten;
+
+        self.vectorize_slp = !sess.opts.cg.no_vectorize_slp &&
+                            sess.opts.optimize == config::OptLevel::Aggressive &&
+                            !sess.target.target.options.is_like_emscripten;
+
+        self.merge_functions = sess.opts.optimize == config::OptLevel::Default ||
+                               sess.opts.optimize == config::OptLevel::Aggressive;
+    }
+}
+
+/// Assembler name and command used by codegen when no_integrated_as is enabled
+struct AssemblerCommand {
+    name: PathBuf,
+    cmd: Command,
+}
+
+/// Additional resources used by optimize_and_codegen (not module specific)
+#[derive(Clone)]
+pub struct CodegenContext {
+    // Resouces needed when running LTO
+    pub time_passes: bool,
+    pub lto: Lto,
+    pub no_landing_pads: bool,
+    pub save_temps: bool,
+    pub fewer_names: bool,
+    pub exported_symbols: Option<Arc<ExportedSymbols>>,
+    pub opts: Arc<config::Options>,
+    pub crate_types: Vec<config::CrateType>,
+    pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>,
+    output_filenames: Arc<OutputFilenames>,
+    regular_module_config: Arc<ModuleConfig>,
+    metadata_module_config: Arc<ModuleConfig>,
+    allocator_module_config: Arc<ModuleConfig>,
+    pub tm_factory: Arc<Fn() -> Result<TargetMachineRef, String> + Send + Sync>,
+    pub msvc_imps_needed: bool,
+    pub target_pointer_width: String,
+    debuginfo: config::DebugInfoLevel,
+
+    // Number of cgus excluding the allocator/metadata modules
+    pub total_cgus: usize,
+    // Handler to use for diagnostics produced during codegen.
+    pub diag_emitter: SharedEmitter,
+    // LLVM passes added by plugins.
+    pub plugin_passes: Vec<String>,
+    // LLVM optimizations for which we want to print remarks.
+    pub remark: Passes,
+    // Worker thread number
+    pub worker: usize,
+    // The incremental compilation session directory, or None if we are not
+    // compiling incrementally
+    pub incr_comp_session_dir: Option<PathBuf>,
+    // Channel back to the main control thread to send messages to
+    coordinator_send: Sender<Box<Any + Send>>,
+    // A reference to the TimeGraph so we can register timings. None means that
+    // measuring is disabled.
+    time_graph: Option<TimeGraph>,
+    // The assembler command if no_integrated_as option is enabled, None otherwise
+    assembler_cmd: Option<Arc<AssemblerCommand>>,
+}
+
+impl CodegenContext {
+    pub fn create_diag_handler(&self) -> Handler {
+        Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone()))
+    }
+
+    pub(crate) fn config(&self, kind: ModuleKind) -> &ModuleConfig {
+        match kind {
+            ModuleKind::Regular => &self.regular_module_config,
+            ModuleKind::Metadata => &self.metadata_module_config,
+            ModuleKind::Allocator => &self.allocator_module_config,
+        }
+    }
+
+    pub(crate) fn save_temp_bitcode(&self, module: &ModuleCodegen, name: &str) {
+        if !self.save_temps {
+            return
+        }
+        unsafe {
+            let ext = format!("{}.bc", name);
+            let cgu = Some(&module.name[..]);
+            let path = self.output_filenames.temp_path_ext(&ext, cgu);
+            let cstr = path2cstr(&path);
+            let llmod = module.llvm().unwrap().llmod;
+            llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
+        }
+    }
+}
+
+struct DiagnosticHandlers<'a> {
+    inner: Box<(&'a CodegenContext, &'a Handler)>,
+    llcx: ContextRef,
+}
+
+impl<'a> DiagnosticHandlers<'a> {
+    fn new(cgcx: &'a CodegenContext,
+           handler: &'a Handler,
+           llcx: ContextRef) -> DiagnosticHandlers<'a> {
+        let data = Box::new((cgcx, handler));
+        unsafe {
+            let arg = &*data as &(_, _) as *const _ as *mut _;
+            llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, arg);
+            llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, arg);
+        }
+        DiagnosticHandlers {
+            inner: data,
+            llcx: llcx,
+        }
+    }
+}
+
+impl<'a> Drop for DiagnosticHandlers<'a> {
+    fn drop(&mut self) {
+        unsafe {
+            llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, 0 as *mut _);
+            llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, 0 as *mut _);
+        }
+    }
+}
+
+unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext,
+                                               msg: &'b str,
+                                               cookie: c_uint) {
+    cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_string());
+}
+
+unsafe extern "C" fn inline_asm_handler(diag: SMDiagnosticRef,
+                                        user: *const c_void,
+                                        cookie: c_uint) {
+    if user.is_null() {
+        return
+    }
+    let (cgcx, _) = *(user as *const (&CodegenContext, &Handler));
+
+    let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s))
+        .expect("non-UTF8 SMDiagnostic");
+
+    report_inline_asm(cgcx, &msg, cookie);
+}
+
+unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_void) {
+    if user.is_null() {
+        return
+    }
+    let (cgcx, diag_handler) = *(user as *const (&CodegenContext, &Handler));
+
+    match llvm::diagnostic::Diagnostic::unpack(info) {
+        llvm::diagnostic::InlineAsm(inline) => {
+            report_inline_asm(cgcx,
+                              &llvm::twine_to_string(inline.message),
+                              inline.cookie);
+        }
+
+        llvm::diagnostic::Optimization(opt) => {
+            let enabled = match cgcx.remark {
+                AllPasses => true,
+                SomePasses(ref v) => v.iter().any(|s| *s == opt.pass_name),
+            };
+
+            if enabled {
+                diag_handler.note_without_error(&format!("optimization {} for {} at {}:{}:{}: {}",
+                                                opt.kind.describe(),
+                                                opt.pass_name,
+                                                opt.filename,
+                                                opt.line,
+                                                opt.column,
+                                                opt.message));
+            }
+        }
+        llvm::diagnostic::PGO(diagnostic_ref) => {
+            let msg = llvm::build_string(|s| {
+                llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
+            }).expect("non-UTF8 PGO diagnostic");
+            diag_handler.warn(&msg);
+        }
+        llvm::diagnostic::UnknownDiagnostic(..) => {},
+    }
+}
+
+// Unsafe due to LLVM calls.
+unsafe fn optimize(cgcx: &CodegenContext,
+                   diag_handler: &Handler,
+                   module: &ModuleCodegen,
+                   config: &ModuleConfig,
+                   timeline: &mut Timeline)
+    -> Result<(), FatalError>
+{
+    let (llmod, llcx, tm) = match module.source {
+        ModuleSource::Codegened(ref llvm) => (llvm.llmod, llvm.llcx, llvm.tm),
+        ModuleSource::Preexisting(_) => {
+            bug!("optimize_and_codegen: called with ModuleSource::Preexisting")
+        }
+    };
+
+    let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+    let module_name = module.name.clone();
+    let module_name = Some(&module_name[..]);
+
+    if config.emit_no_opt_bc {
+        let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
+        let out = path2cstr(&out);
+        llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
+    }
+
+    if config.opt_level.is_some() {
+        // Create the two optimizing pass managers. These mirror what clang
+        // does, and are by populated by LLVM's default PassManagerBuilder.
+        // Each manager has a different set of passes, but they also share
+        // some common passes.
+        let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
+        let mpm = llvm::LLVMCreatePassManager();
+
+        // If we're verifying or linting, add them to the function pass
+        // manager.
+        let addpass = |pass_name: &str| {
+            let pass_name = CString::new(pass_name).unwrap();
+            let pass = llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr());
+            if pass.is_null() {
+                return false;
+            }
+            let pass_manager = match llvm::LLVMRustPassKind(pass) {
+                llvm::PassKind::Function => fpm,
+                llvm::PassKind::Module => mpm,
+                llvm::PassKind::Other => {
+                    diag_handler.err("Encountered LLVM pass kind we can't handle");
+                    return true
+                },
+            };
+            llvm::LLVMRustAddPass(pass_manager, pass);
+            true
+        };
+
+        if !config.no_verify { assert!(addpass("verify")); }
+        if !config.no_prepopulate_passes {
+            llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
+            llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
+            let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None);
+            let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal;
+            with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| {
+                llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm);
+                llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm);
+            })
+        }
+
+        for pass in &config.passes {
+            if !addpass(pass) {
+                diag_handler.warn(&format!("unknown pass `{}`, ignoring",
+                                           pass));
+            }
+        }
+
+        for pass in &cgcx.plugin_passes {
+            if !addpass(pass) {
+                diag_handler.err(&format!("a plugin asked for LLVM pass \
+                                           `{}` but LLVM does not \
+                                           recognize it", pass));
+            }
+        }
+
+        diag_handler.abort_if_errors();
+
+        // Finally, run the actual optimization passes
+        time_ext(config.time_passes,
+                 None,
+                 &format!("llvm function passes [{}]", module_name.unwrap()),
+                 || {
+            llvm::LLVMRustRunFunctionPassManager(fpm, llmod)
+        });
+        timeline.record("fpm");
+        time_ext(config.time_passes,
+                 None,
+                 &format!("llvm module passes [{}]", module_name.unwrap()),
+                 || {
+            llvm::LLVMRunPassManager(mpm, llmod)
+        });
+
+        // Deallocate managers that we're now done with
+        llvm::LLVMDisposePassManager(fpm);
+        llvm::LLVMDisposePassManager(mpm);
+    }
+    Ok(())
+}
+
+fn generate_lto_work(cgcx: &CodegenContext,
+                     modules: Vec<ModuleCodegen>)
+    -> Vec<(WorkItem, u64)>
+{
+    let mut timeline = cgcx.time_graph.as_ref().map(|tg| {
+        tg.start(CODEGEN_WORKER_TIMELINE,
+                 CODEGEN_WORK_PACKAGE_KIND,
+                 "generate lto")
+    }).unwrap_or(Timeline::noop());
+    let lto_modules = lto::run(cgcx, modules, &mut timeline)
+        .unwrap_or_else(|e| e.raise());
+
+    lto_modules.into_iter().map(|module| {
+        let cost = module.cost();
+        (WorkItem::LTO(module), cost)
+    }).collect()
+}
+
+unsafe fn codegen(cgcx: &CodegenContext,
+                  diag_handler: &Handler,
+                  module: ModuleCodegen,
+                  config: &ModuleConfig,
+                  timeline: &mut Timeline)
+    -> Result<CompiledModule, FatalError>
+{
+    timeline.record("codegen");
+    let (llmod, llcx, tm) = match module.source {
+        ModuleSource::Codegened(ref llvm) => (llvm.llmod, llvm.llcx, llvm.tm),
+        ModuleSource::Preexisting(_) => {
+            bug!("codegen: called with ModuleSource::Preexisting")
+        }
+    };
+    let module_name = module.name.clone();
+    let module_name = Some(&module_name[..]);
+    let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+    if cgcx.msvc_imps_needed {
+        create_msvc_imps(cgcx, llcx, llmod);
+    }
+
+    // A codegen-specific pass manager is used to generate object
+    // files for an LLVM module.
+    //
+    // Apparently each of these pass managers is a one-shot kind of
+    // thing, so we create a new one for each type of output. The
+    // pass manager passed to the closure should be ensured to not
+    // escape the closure itself, and the manager should only be
+    // used once.
+    unsafe fn with_codegen<F, R>(tm: TargetMachineRef,
+                                 llmod: ModuleRef,
+                                 no_builtins: bool,
+                                 f: F) -> R
+        where F: FnOnce(PassManagerRef) -> R,
+    {
+        let cpm = llvm::LLVMCreatePassManager();
+        llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
+        llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
+        f(cpm)
+    }
+
+    // If we don't have the integrated assembler, then we need to emit asm
+    // from LLVM and use `gcc` to create the object file.
+    let asm_to_obj = config.emit_obj && config.no_integrated_as;
+
+    // Change what we write and cleanup based on whether obj files are
+    // just llvm bitcode. In that case write bitcode, and possibly
+    // delete the bitcode if it wasn't requested. Don't generate the
+    // machine code, instead copy the .o file from the .bc
+    let write_bc = config.emit_bc || config.obj_is_bitcode;
+    let rm_bc = !config.emit_bc && config.obj_is_bitcode;
+    let write_obj = config.emit_obj && !config.obj_is_bitcode && !asm_to_obj;
+    let copy_bc_to_obj = config.emit_obj && config.obj_is_bitcode;
+
+    let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
+    let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
+
+
+    if write_bc || config.emit_bc_compressed || config.embed_bitcode {
+        let thin;
+        let old;
+        let data = if llvm::LLVMRustThinLTOAvailable() {
+            thin = ThinBuffer::new(llmod);
+            thin.data()
+        } else {
+            old = ModuleBuffer::new(llmod);
+            old.data()
+        };
+        timeline.record("make-bc");
+
+        if write_bc {
+            if let Err(e) = fs::write(&bc_out, data) {
+                diag_handler.err(&format!("failed to write bytecode: {}", e));
+            }
+            timeline.record("write-bc");
+        }
+
+        if config.embed_bitcode {
+            embed_bitcode(cgcx, llcx, llmod, Some(data));
+            timeline.record("embed-bc");
+        }
+
+        if config.emit_bc_compressed {
+            let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
+            let data = bytecode::encode(&module.llmod_id, data);
+            if let Err(e) = fs::write(&dst, data) {
+                diag_handler.err(&format!("failed to write bytecode: {}", e));
+            }
+            timeline.record("compress-bc");
+        }
+    } else if config.embed_bitcode_marker {
+        embed_bitcode(cgcx, llcx, llmod, None);
+    }
+
+    time_ext(config.time_passes, None, &format!("codegen passes [{}]", module_name.unwrap()),
+         || -> Result<(), FatalError> {
+        if config.emit_ir {
+            let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
+            let out = path2cstr(&out);
+
+            extern "C" fn demangle_callback(input_ptr: *const c_char,
+                                            input_len: size_t,
+                                            output_ptr: *mut c_char,
+                                            output_len: size_t) -> size_t {
+                let input = unsafe {
+                    slice::from_raw_parts(input_ptr as *const u8, input_len as usize)
+                };
+
+                let input = match str::from_utf8(input) {
+                    Ok(s) => s,
+                    Err(_) => return 0,
+                };
+
+                let output = unsafe {
+                    slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
+                };
+                let mut cursor = io::Cursor::new(output);
+
+                let demangled = match rustc_demangle::try_demangle(input) {
+                    Ok(d) => d,
+                    Err(_) => return 0,
+                };
+
+                if let Err(_) = write!(cursor, "{:#}", demangled) {
+                    // Possible only if provided buffer is not big enough
+                    return 0;
+                }
+
+                cursor.position() as size_t
+            }
+
+            with_codegen(tm, llmod, config.no_builtins, |cpm| {
+                llvm::LLVMRustPrintModule(cpm, llmod, out.as_ptr(), demangle_callback);
+                llvm::LLVMDisposePassManager(cpm);
+            });
+            timeline.record("ir");
+        }
+
+        if config.emit_asm || asm_to_obj {
+            let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
+
+            // We can't use the same module for asm and binary output, because that triggers
+            // various errors like invalid IR or broken binaries, so we might have to clone the
+            // module to produce the asm output
+            let llmod = if config.emit_obj {
+                llvm::LLVMCloneModule(llmod)
+            } else {
+                llmod
+            };
+            with_codegen(tm, llmod, config.no_builtins, |cpm| {
+                write_output_file(diag_handler, tm, cpm, llmod, &path,
+                                  llvm::FileType::AssemblyFile)
+            })?;
+            if config.emit_obj {
+                llvm::LLVMDisposeModule(llmod);
+            }
+            timeline.record("asm");
+        }
+
+        if write_obj {
+            with_codegen(tm, llmod, config.no_builtins, |cpm| {
+                write_output_file(diag_handler, tm, cpm, llmod, &obj_out,
+                                  llvm::FileType::ObjectFile)
+            })?;
+            timeline.record("obj");
+        } else if asm_to_obj {
+            let assembly = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
+            run_assembler(cgcx, diag_handler, &assembly, &obj_out);
+            timeline.record("asm_to_obj");
+
+            if !config.emit_asm && !cgcx.save_temps {
+                drop(fs::remove_file(&assembly));
+            }
+        }
+
+        Ok(())
+    })?;
+
+    if copy_bc_to_obj {
+        debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
+        if let Err(e) = link_or_copy(&bc_out, &obj_out) {
+            diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
+        }
+    }
+
+    if rm_bc {
+        debug!("removing_bitcode {:?}", bc_out);
+        if let Err(e) = fs::remove_file(&bc_out) {
+            diag_handler.err(&format!("failed to remove bitcode: {}", e));
+        }
+    }
+
+    drop(handlers);
+    Ok(module.into_compiled_module(config.emit_obj,
+                                   config.emit_bc,
+                                   config.emit_bc_compressed,
+                                   &cgcx.output_filenames))
+}
+
+/// Embed the bitcode of an LLVM module in the LLVM module itself.
+///
+/// This is done primarily for iOS where it appears to be standard to compile C
+/// code at least with `-fembed-bitcode` which creates two sections in the
+/// executable:
+///
+/// * __LLVM,__bitcode
+/// * __LLVM,__cmdline
+///
+/// It appears *both* of these sections are necessary to get the linker to
+/// recognize what's going on. For us though we just always throw in an empty
+/// cmdline section.
+///
+/// Furthermore debug/O1 builds don't actually embed bitcode but rather just
+/// embed an empty section.
+///
+/// Basically all of this is us attempting to follow in the footsteps of clang
+/// on iOS. See #35968 for lots more info.
+unsafe fn embed_bitcode(cgcx: &CodegenContext,
+                        llcx: ContextRef,
+                        llmod: ModuleRef,
+                        bitcode: Option<&[u8]>) {
+    let llconst = C_bytes_in_context(llcx, bitcode.unwrap_or(&[]));
+    let llglobal = llvm::LLVMAddGlobal(
+        llmod,
+        val_ty(llconst).to_ref(),
+        "rustc.embedded.module\0".as_ptr() as *const _,
+    );
+    llvm::LLVMSetInitializer(llglobal, llconst);
+
+    let is_apple = cgcx.opts.target_triple.triple().contains("-ios") ||
+                   cgcx.opts.target_triple.triple().contains("-darwin");
+
+    let section = if is_apple {
+        "__LLVM,__bitcode\0"
+    } else {
+        ".llvmbc\0"
+    };
+    llvm::LLVMSetSection(llglobal, section.as_ptr() as *const _);
+    llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+    llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
+
+    let llconst = C_bytes_in_context(llcx, &[]);
+    let llglobal = llvm::LLVMAddGlobal(
+        llmod,
+        val_ty(llconst).to_ref(),
+        "rustc.embedded.cmdline\0".as_ptr() as *const _,
+    );
+    llvm::LLVMSetInitializer(llglobal, llconst);
+    let section = if  is_apple {
+        "__LLVM,__cmdline\0"
+    } else {
+        ".llvmcmd\0"
+    };
+    llvm::LLVMSetSection(llglobal, section.as_ptr() as *const _);
+    llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+}
+
+pub(crate) struct CompiledModules {
+    pub modules: Vec<CompiledModule>,
+    pub metadata_module: CompiledModule,
+    pub allocator_module: Option<CompiledModule>,
+}
+
+fn need_crate_bitcode_for_rlib(sess: &Session) -> bool {
+    sess.crate_types.borrow().contains(&config::CrateTypeRlib) &&
+    sess.opts.output_types.contains_key(&OutputType::Exe)
+}
+
+pub fn start_async_codegen(tcx: TyCtxt,
+                               time_graph: Option<TimeGraph>,
+                               link: LinkMeta,
+                               metadata: EncodedMetadata,
+                               coordinator_receive: Receiver<Box<Any + Send>>,
+                               total_cgus: usize)
+                               -> OngoingCodegen {
+    let sess = tcx.sess;
+    let crate_name = tcx.crate_name(LOCAL_CRATE);
+    let no_builtins = attr::contains_name(&tcx.hir.krate().attrs, "no_builtins");
+    let subsystem = attr::first_attr_value_str_by_name(&tcx.hir.krate().attrs,
+                                                       "windows_subsystem");
+    let windows_subsystem = subsystem.map(|subsystem| {
+        if subsystem != "windows" && subsystem != "console" {
+            tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \
+                                     `windows` and `console` are allowed",
+                                    subsystem));
+        }
+        subsystem.to_string()
+    });
+
+    let linker_info = LinkerInfo::new(tcx);
+    let crate_info = CrateInfo::new(tcx);
+
+    // Figure out what we actually need to build.
+    let mut modules_config = ModuleConfig::new(sess.opts.cg.passes.clone());
+    let mut metadata_config = ModuleConfig::new(vec![]);
+    let mut allocator_config = ModuleConfig::new(vec![]);
+
+    if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer {
+        match *sanitizer {
+            Sanitizer::Address => {
+                modules_config.passes.push("asan".to_owned());
+                modules_config.passes.push("asan-module".to_owned());
+            }
+            Sanitizer::Memory => {
+                modules_config.passes.push("msan".to_owned())
+            }
+            Sanitizer::Thread => {
+                modules_config.passes.push("tsan".to_owned())
+            }
+            _ => {}
+        }
+    }
+
+    if sess.opts.debugging_opts.profile {
+        modules_config.passes.push("insert-gcov-profiling".to_owned())
+    }
+
+    modules_config.pgo_gen = sess.opts.debugging_opts.pgo_gen.clone();
+    modules_config.pgo_use = sess.opts.debugging_opts.pgo_use.clone();
+
+    modules_config.opt_level = Some(get_llvm_opt_level(sess.opts.optimize));
+    modules_config.opt_size = Some(get_llvm_opt_size(sess.opts.optimize));
+
+    // Save all versions of the bytecode if we're saving our temporaries.
+    if sess.opts.cg.save_temps {
+        modules_config.emit_no_opt_bc = true;
+        modules_config.emit_bc = true;
+        modules_config.emit_lto_bc = true;
+        metadata_config.emit_bc = true;
+        allocator_config.emit_bc = true;
+    }
+
+    // Emit compressed bitcode files for the crate if we're emitting an rlib.
+    // Whenever an rlib is created, the bitcode is inserted into the archive in
+    // order to allow LTO against it.
+    if need_crate_bitcode_for_rlib(sess) {
+        modules_config.emit_bc_compressed = true;
+        allocator_config.emit_bc_compressed = true;
+    }
+
+    modules_config.no_integrated_as = tcx.sess.opts.cg.no_integrated_as ||
+        tcx.sess.target.target.options.no_integrated_as;
+
+    for output_type in sess.opts.output_types.keys() {
+        match *output_type {
+            OutputType::Bitcode => { modules_config.emit_bc = true; }
+            OutputType::LlvmAssembly => { modules_config.emit_ir = true; }
+            OutputType::Assembly => {
+                modules_config.emit_asm = true;
+                // If we're not using the LLVM assembler, this function
+                // could be invoked specially with output_type_assembly, so
+                // in this case we still want the metadata object file.
+                if !sess.opts.output_types.contains_key(&OutputType::Assembly) {
+                    metadata_config.emit_obj = true;
+                    allocator_config.emit_obj = true;
+                }
+            }
+            OutputType::Object => { modules_config.emit_obj = true; }
+            OutputType::Metadata => { metadata_config.emit_obj = true; }
+            OutputType::Exe => {
+                modules_config.emit_obj = true;
+                metadata_config.emit_obj = true;
+                allocator_config.emit_obj = true;
+            },
+            OutputType::Mir => {}
+            OutputType::DepInfo => {}
+        }
+    }
+
+    modules_config.set_flags(sess, no_builtins);
+    metadata_config.set_flags(sess, no_builtins);
+    allocator_config.set_flags(sess, no_builtins);
+
+    // Exclude metadata and allocator modules from time_passes output, since
+    // they throw off the "LLVM passes" measurement.
+    metadata_config.time_passes = false;
+    allocator_config.time_passes = false;
+
+    let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
+    let (codegen_worker_send, codegen_worker_receive) = channel();
+
+    let coordinator_thread = start_executing_work(tcx,
+                                                  &crate_info,
+                                                  shared_emitter,
+                                                  codegen_worker_send,
+                                                  coordinator_receive,
+                                                  total_cgus,
+                                                  sess.jobserver.clone(),
+                                                  time_graph.clone(),
+                                                  Arc::new(modules_config),
+                                                  Arc::new(metadata_config),
+                                                  Arc::new(allocator_config));
+
+    OngoingCodegen {
+        crate_name,
+        link,
+        metadata,
+        windows_subsystem,
+        linker_info,
+        crate_info,
+
+        time_graph,
+        coordinator_send: tcx.tx_to_llvm_workers.lock().clone(),
+        codegen_worker_receive,
+        shared_emitter_main,
+        future: coordinator_thread,
+        output_filenames: tcx.output_filenames(LOCAL_CRATE),
+    }
+}
+
+fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
+    sess: &Session,
+    compiled_modules: &CompiledModules
+) -> FxHashMap<WorkProductId, WorkProduct> {
+    let mut work_products = FxHashMap::default();
+
+    if sess.opts.incremental.is_none() {
+        return work_products;
+    }
+
+    for module in compiled_modules.modules.iter() {
+        let mut files = vec![];
+
+        if let Some(ref path) = module.object {
+            files.push((WorkProductFileKind::Object, path.clone()));
+        }
+        if let Some(ref path) = module.bytecode {
+            files.push((WorkProductFileKind::Bytecode, path.clone()));
+        }
+        if let Some(ref path) = module.bytecode_compressed {
+            files.push((WorkProductFileKind::BytecodeCompressed, path.clone()));
+        }
+
+        if let Some((id, product)) =
+                copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files) {
+            work_products.insert(id, product);
+        }
+    }
+
+    work_products
+}
+
+fn produce_final_output_artifacts(sess: &Session,
+                                  compiled_modules: &CompiledModules,
+                                  crate_output: &OutputFilenames) {
+    let mut user_wants_bitcode = false;
+    let mut user_wants_objects = false;
+
+    // Produce final compile outputs.
+    let copy_gracefully = |from: &Path, to: &Path| {
+        if let Err(e) = fs::copy(from, to) {
+            sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e));
+        }
+    };
+
+    let copy_if_one_unit = |output_type: OutputType,
+                            keep_numbered: bool| {
+        if compiled_modules.modules.len() == 1 {
+            // 1) Only one codegen unit.  In this case it's no difficulty
+            //    to copy `foo.0.x` to `foo.x`.
+            let module_name = Some(&compiled_modules.modules[0].name[..]);
+            let path = crate_output.temp_path(output_type, module_name);
+            copy_gracefully(&path,
+                            &crate_output.path(output_type));
+            if !sess.opts.cg.save_temps && !keep_numbered {
+                // The user just wants `foo.x`, not `foo.#module-name#.x`.
+                remove(sess, &path);
+            }
+        } else {
+            let ext = crate_output.temp_path(output_type, None)
+                                  .extension()
+                                  .unwrap()
+                                  .to_str()
+                                  .unwrap()
+                                  .to_owned();
+
+            if crate_output.outputs.contains_key(&output_type) {
+                // 2) Multiple codegen units, with `--emit foo=some_name`.  We have
+                //    no good solution for this case, so warn the user.
+                sess.warn(&format!("ignoring emit path because multiple .{} files \
+                                    were produced", ext));
+            } else if crate_output.single_output_file.is_some() {
+                // 3) Multiple codegen units, with `-o some_name`.  We have
+                //    no good solution for this case, so warn the user.
+                sess.warn(&format!("ignoring -o because multiple .{} files \
+                                    were produced", ext));
+            } else {
+                // 4) Multiple codegen units, but no explicit name.  We
+                //    just leave the `foo.0.x` files in place.
+                // (We don't have to do any work in this case.)
+            }
+        }
+    };
+
+    // Flag to indicate whether the user explicitly requested bitcode.
+    // Otherwise, we produced it only as a temporary output, and will need
+    // to get rid of it.
+    for output_type in crate_output.outputs.keys() {
+        match *output_type {
+            OutputType::Bitcode => {
+                user_wants_bitcode = true;
+                // Copy to .bc, but always keep the .0.bc.  There is a later
+                // check to figure out if we should delete .0.bc files, or keep
+                // them for making an rlib.
+                copy_if_one_unit(OutputType::Bitcode, true);
+            }
+            OutputType::LlvmAssembly => {
+                copy_if_one_unit(OutputType::LlvmAssembly, false);
+            }
+            OutputType::Assembly => {
+                copy_if_one_unit(OutputType::Assembly, false);
+            }
+            OutputType::Object => {
+                user_wants_objects = true;
+                copy_if_one_unit(OutputType::Object, true);
+            }
+            OutputType::Mir |
+            OutputType::Metadata |
+            OutputType::Exe |
+            OutputType::DepInfo => {}
+        }
+    }
+
+    // Clean up unwanted temporary files.
+
+    // We create the following files by default:
+    //  - #crate#.#module-name#.bc
+    //  - #crate#.#module-name#.o
+    //  - #crate#.crate.metadata.bc
+    //  - #crate#.crate.metadata.o
+    //  - #crate#.o (linked from crate.##.o)
+    //  - #crate#.bc (copied from crate.##.bc)
+    // We may create additional files if requested by the user (through
+    // `-C save-temps` or `--emit=` flags).
+
+    if !sess.opts.cg.save_temps {
+        // Remove the temporary .#module-name#.o objects.  If the user didn't
+        // explicitly request bitcode (with --emit=bc), and the bitcode is not
+        // needed for building an rlib, then we must remove .#module-name#.bc as
+        // well.
+
+        // Specific rules for keeping .#module-name#.bc:
+        //  - If the user requested bitcode (`user_wants_bitcode`), and
+        //    codegen_units > 1, then keep it.
+        //  - If the user requested bitcode but codegen_units == 1, then we
+        //    can toss .#module-name#.bc because we copied it to .bc earlier.
+        //  - If we're not building an rlib and the user didn't request
+        //    bitcode, then delete .#module-name#.bc.
+        // If you change how this works, also update back::link::link_rlib,
+        // where .#module-name#.bc files are (maybe) deleted after making an
+        // rlib.
+        let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
+
+        let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1;
+
+        let keep_numbered_objects = needs_crate_object ||
+                (user_wants_objects && sess.codegen_units() > 1);
+
+        for module in compiled_modules.modules.iter() {
+            if let Some(ref path) = module.object {
+                if !keep_numbered_objects {
+                    remove(sess, path);
+                }
+            }
+
+            if let Some(ref path) = module.bytecode {
+                if !keep_numbered_bitcode {
+                    remove(sess, path);
+                }
+            }
+        }
+
+        if !user_wants_bitcode {
+            if let Some(ref path) = compiled_modules.metadata_module.bytecode {
+                remove(sess, &path);
+            }
+
+            if let Some(ref allocator_module) = compiled_modules.allocator_module {
+                if let Some(ref path) = allocator_module.bytecode {
+                    remove(sess, path);
+                }
+            }
+        }
+    }
+
+    // We leave the following files around by default:
+    //  - #crate#.o
+    //  - #crate#.crate.metadata.o
+    //  - #crate#.bc
+    // These are used in linking steps and will be cleaned up afterward.
+}
+
+pub(crate) fn dump_incremental_data(codegen_results: &CodegenResults) {
+    println!("[incremental] Re-using {} out of {} modules",
+              codegen_results.modules.iter().filter(|m| m.pre_existing).count(),
+              codegen_results.modules.len());
+}
+
+enum WorkItem {
+    Optimize(ModuleCodegen),
+    LTO(lto::LtoModuleCodegen),
+}
+
+impl WorkItem {
+    fn kind(&self) -> ModuleKind {
+        match *self {
+            WorkItem::Optimize(ref m) => m.kind,
+            WorkItem::LTO(_) => ModuleKind::Regular,
+        }
+    }
+
+    fn name(&self) -> String {
+        match *self {
+            WorkItem::Optimize(ref m) => format!("optimize: {}", m.name),
+            WorkItem::LTO(ref m) => format!("lto: {}", m.name()),
+        }
+    }
+}
+
+enum WorkItemResult {
+    Compiled(CompiledModule),
+    NeedsLTO(ModuleCodegen),
+}
+
+fn execute_work_item(cgcx: &CodegenContext,
+                     work_item: WorkItem,
+                     timeline: &mut Timeline)
+    -> Result<WorkItemResult, FatalError>
+{
+    let diag_handler = cgcx.create_diag_handler();
+    let config = cgcx.config(work_item.kind());
+    let module = match work_item {
+        WorkItem::Optimize(module) => module,
+        WorkItem::LTO(mut lto) => {
+            unsafe {
+                let module = lto.optimize(cgcx, timeline)?;
+                let module = codegen(cgcx, &diag_handler, module, config, timeline)?;
+                return Ok(WorkItemResult::Compiled(module))
+            }
+        }
+    };
+    let module_name = module.name.clone();
+
+    let pre_existing = match module.source {
+        ModuleSource::Codegened(_) => None,
+        ModuleSource::Preexisting(ref wp) => Some(wp.clone()),
+    };
+
+    if let Some(wp) = pre_existing {
+        let incr_comp_session_dir = cgcx.incr_comp_session_dir
+                                        .as_ref()
+                                        .unwrap();
+        let name = &module.name;
+        let mut object = None;
+        let mut bytecode = None;
+        let mut bytecode_compressed = None;
+        for (kind, saved_file) in wp.saved_files {
+            let obj_out = match kind {
+                WorkProductFileKind::Object => {
+                    let path = cgcx.output_filenames.temp_path(OutputType::Object, Some(name));
+                    object = Some(path.clone());
+                    path
+                }
+                WorkProductFileKind::Bytecode => {
+                    let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, Some(name));
+                    bytecode = Some(path.clone());
+                    path
+                }
+                WorkProductFileKind::BytecodeCompressed => {
+                    let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, Some(name))
+                        .with_extension(RLIB_BYTECODE_EXTENSION);
+                    bytecode_compressed = Some(path.clone());
+                    path
+                }
+            };
+            let source_file = in_incr_comp_dir(&incr_comp_session_dir,
+                                               &saved_file);
+            debug!("copying pre-existing module `{}` from {:?} to {}",
+                   module.name,
+                   source_file,
+                   obj_out.display());
+            match link_or_copy(&source_file, &obj_out) {
+                Ok(_) => { }
+                Err(err) => {
+                    diag_handler.err(&format!("unable to copy {} to {}: {}",
+                                              source_file.display(),
+                                              obj_out.display(),
+                                              err));
+                }
+            }
+        }
+        assert_eq!(object.is_some(), config.emit_obj);
+        assert_eq!(bytecode.is_some(), config.emit_bc);
+        assert_eq!(bytecode_compressed.is_some(), config.emit_bc_compressed);
+
+        Ok(WorkItemResult::Compiled(CompiledModule {
+            llmod_id: module.llmod_id.clone(),
+            name: module_name,
+            kind: ModuleKind::Regular,
+            pre_existing: true,
+            object,
+            bytecode,
+            bytecode_compressed,
+        }))
+    } else {
+        debug!("llvm-optimizing {:?}", module_name);
+
+        unsafe {
+            optimize(cgcx, &diag_handler, &module, config, timeline)?;
+
+            // After we've done the initial round of optimizations we need to
+            // decide whether to synchronously codegen this module or ship it
+            // back to the coordinator thread for further LTO processing (which
+            // has to wait for all the initial modules to be optimized).
+            //
+            // Here we dispatch based on the `cgcx.lto` and kind of module we're
+            // codegenning...
+            let needs_lto = match cgcx.lto {
+                Lto::No => false,
+
+                // Here we've got a full crate graph LTO requested. We ignore
+                // this, however, if the crate type is only an rlib as there's
+                // no full crate graph to process, that'll happen later.
+                //
+                // This use case currently comes up primarily for targets that
+                // require LTO so the request for LTO is always unconditionally
+                // passed down to the backend, but we don't actually want to do
+                // anything about it yet until we've got a final product.
+                Lto::Yes | Lto::Fat | Lto::Thin => {
+                    cgcx.crate_types.len() != 1 ||
+                        cgcx.crate_types[0] != config::CrateTypeRlib
+                }
+
+                // When we're automatically doing ThinLTO for multi-codegen-unit
+                // builds we don't actually want to LTO the allocator modules if
+                // it shows up. This is due to various linker shenanigans that
+                // we'll encounter later.
+                //
+                // Additionally here's where we also factor in the current LLVM
+                // version. If it doesn't support ThinLTO we skip this.
+                Lto::ThinLocal => {
+                    module.kind != ModuleKind::Allocator &&
+                        llvm::LLVMRustThinLTOAvailable()
+                }
+            };
+
+            // Metadata modules never participate in LTO regardless of the lto
+            // settings.
+            let needs_lto = needs_lto && module.kind != ModuleKind::Metadata;
+
+            // Don't run LTO passes when cross-lang LTO is enabled. The linker
+            // will do that for us in this case.
+            let needs_lto = needs_lto &&
+                !cgcx.opts.debugging_opts.cross_lang_lto.embed_bitcode();
+
+            if needs_lto {
+                Ok(WorkItemResult::NeedsLTO(module))
+            } else {
+                let module = codegen(cgcx, &diag_handler, module, config, timeline)?;
+                Ok(WorkItemResult::Compiled(module))
+            }
+        }
+    }
+}
+
+enum Message {
+    Token(io::Result<Acquired>),
+    NeedsLTO {
+        result: ModuleCodegen,
+        worker_id: usize,
+    },
+    Done {
+        result: Result<CompiledModule, ()>,
+        worker_id: usize,
+    },
+    CodegenDone {
+        llvm_work_item: WorkItem,
+        cost: u64,
+    },
+    CodegenComplete,
+    CodegenItem,
+}
+
+struct Diagnostic {
+    msg: String,
+    code: Option<DiagnosticId>,
+    lvl: Level,
+}
+
+#[derive(PartialEq, Clone, Copy, Debug)]
+enum MainThreadWorkerState {
+    Idle,
+    Codegenning,
+    LLVMing,
+}
+
+fn start_executing_work(tcx: TyCtxt,
+                        crate_info: &CrateInfo,
+                        shared_emitter: SharedEmitter,
+                        codegen_worker_send: Sender<Message>,
+                        coordinator_receive: Receiver<Box<Any + Send>>,
+                        total_cgus: usize,
+                        jobserver: Client,
+                        time_graph: Option<TimeGraph>,
+                        modules_config: Arc<ModuleConfig>,
+                        metadata_config: Arc<ModuleConfig>,
+                        allocator_config: Arc<ModuleConfig>)
+                        -> thread::JoinHandle<Result<CompiledModules, ()>> {
+    let coordinator_send = tcx.tx_to_llvm_workers.lock().clone();
+    let sess = tcx.sess;
+
+    // Compute the set of symbols we need to retain when doing LTO (if we need to)
+    let exported_symbols = {
+        let mut exported_symbols = FxHashMap();
+
+        let copy_symbols = |cnum| {
+            let symbols = tcx.exported_symbols(cnum)
+                             .iter()
+                             .map(|&(s, lvl)| (s.symbol_name(tcx).to_string(), lvl))
+                             .collect();
+            Arc::new(symbols)
+        };
+
+        match sess.lto() {
+            Lto::No => None,
+            Lto::ThinLocal => {
+                exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
+                Some(Arc::new(exported_symbols))
+            }
+            Lto::Yes | Lto::Fat | Lto::Thin => {
+                exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
+                for &cnum in tcx.crates().iter() {
+                    exported_symbols.insert(cnum, copy_symbols(cnum));
+                }
+                Some(Arc::new(exported_symbols))
+            }
+        }
+    };
+
+    // First up, convert our jobserver into a helper thread so we can use normal
+    // mpsc channels to manage our messages and such.
+    // After we've requested tokens then we'll, when we can,
+    // get tokens on `coordinator_receive` which will
+    // get managed in the main loop below.
+    let coordinator_send2 = coordinator_send.clone();
+    let helper = jobserver.into_helper_thread(move |token| {
+        drop(coordinator_send2.send(Box::new(Message::Token(token))));
+    }).expect("failed to spawn helper thread");
+
+    let mut each_linked_rlib_for_lto = Vec::new();
+    drop(link::each_linked_rlib(sess, crate_info, &mut |cnum, path| {
+        if link::ignored_for_lto(sess, crate_info, cnum) {
+            return
+        }
+        each_linked_rlib_for_lto.push((cnum, path.to_path_buf()));
+    }));
+
+    let assembler_cmd = if modules_config.no_integrated_as {
+        // HACK: currently we use linker (gcc) as our assembler
+        let (name, mut cmd) = get_linker(sess);
+        cmd.args(&sess.target.target.options.asm_args);
+        Some(Arc::new(AssemblerCommand {
+            name,
+            cmd,
+        }))
+    } else {
+        None
+    };
+
+    let cgcx = CodegenContext {
+        crate_types: sess.crate_types.borrow().clone(),
+        each_linked_rlib_for_lto,
+        lto: sess.lto(),
+        no_landing_pads: sess.no_landing_pads(),
+        fewer_names: sess.fewer_names(),
+        save_temps: sess.opts.cg.save_temps,
+        opts: Arc::new(sess.opts.clone()),
+        time_passes: sess.time_passes(),
+        exported_symbols,
+        plugin_passes: sess.plugin_llvm_passes.borrow().clone(),
+        remark: sess.opts.cg.remark.clone(),
+        worker: 0,
+        incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
+        coordinator_send,
+        diag_emitter: shared_emitter.clone(),
+        time_graph,
+        output_filenames: tcx.output_filenames(LOCAL_CRATE),
+        regular_module_config: modules_config,
+        metadata_module_config: metadata_config,
+        allocator_module_config: allocator_config,
+        tm_factory: target_machine_factory(tcx.sess, false),
+        total_cgus,
+        msvc_imps_needed: msvc_imps_needed(tcx),
+        target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(),
+        debuginfo: tcx.sess.opts.debuginfo,
+        assembler_cmd,
+    };
+
+    // This is the "main loop" of parallel work happening for parallel codegen.
+    // It's here that we manage parallelism, schedule work, and work with
+    // messages coming from clients.
+    //
+    // There are a few environmental pre-conditions that shape how the system
+    // is set up:
+    //
+    // - Error reporting only can happen on the main thread because that's the
+    //   only place where we have access to the compiler `Session`.
+    // - LLVM work can be done on any thread.
+    // - Codegen can only happen on the main thread.
+    // - Each thread doing substantial work most be in possession of a `Token`
+    //   from the `Jobserver`.
+    // - The compiler process always holds one `Token`. Any additional `Tokens`
+    //   have to be requested from the `Jobserver`.
+    //
+    // Error Reporting
+    // ===============
+    // The error reporting restriction is handled separately from the rest: We
+    // set up a `SharedEmitter` the holds an open channel to the main thread.
+    // When an error occurs on any thread, the shared emitter will send the
+    // error message to the receiver main thread (`SharedEmitterMain`). The
+    // main thread will periodically query this error message queue and emit
+    // any error messages it has received. It might even abort compilation if
+    // has received a fatal error. In this case we rely on all other threads
+    // being torn down automatically with the main thread.
+    // Since the main thread will often be busy doing codegen work, error
+    // reporting will be somewhat delayed, since the message queue can only be
+    // checked in between to work packages.
+    //
+    // Work Processing Infrastructure
+    // ==============================
+    // The work processing infrastructure knows three major actors:
+    //
+    // - the coordinator thread,
+    // - the main thread, and
+    // - LLVM worker threads
+    //
+    // The coordinator thread is running a message loop. It instructs the main
+    // thread about what work to do when, and it will spawn off LLVM worker
+    // threads as open LLVM WorkItems become available.
+    //
+    // The job of the main thread is to codegen CGUs into LLVM work package
+    // (since the main thread is the only thread that can do this). The main
+    // thread will block until it receives a message from the coordinator, upon
+    // which it will codegen one CGU, send it to the coordinator and block
+    // again. This way the coordinator can control what the main thread is
+    // doing.
+    //
+    // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
+    // available, it will spawn off a new LLVM worker thread and let it process
+    // that a WorkItem. When a LLVM worker thread is done with its WorkItem,
+    // it will just shut down, which also frees all resources associated with
+    // the given LLVM module, and sends a message to the coordinator that the
+    // has been completed.
+    //
+    // Work Scheduling
+    // ===============
+    // The scheduler's goal is to minimize the time it takes to complete all
+    // work there is, however, we also want to keep memory consumption low
+    // if possible. These two goals are at odds with each other: If memory
+    // consumption were not an issue, we could just let the main thread produce
+    // LLVM WorkItems at full speed, assuring maximal utilization of
+    // Tokens/LLVM worker threads. However, since codegen usual is faster
+    // than LLVM processing, the queue of LLVM WorkItems would fill up and each
+    // WorkItem potentially holds on to a substantial amount of memory.
+    //
+    // So the actual goal is to always produce just enough LLVM WorkItems as
+    // not to starve our LLVM worker threads. That means, once we have enough
+    // WorkItems in our queue, we can block the main thread, so it does not
+    // produce more until we need them.
+    //
+    // Doing LLVM Work on the Main Thread
+    // ----------------------------------
+    // Since the main thread owns the compiler processes implicit `Token`, it is
+    // wasteful to keep it blocked without doing any work. Therefore, what we do
+    // in this case is: We spawn off an additional LLVM worker thread that helps
+    // reduce the queue. The work it is doing corresponds to the implicit
+    // `Token`. The coordinator will mark the main thread as being busy with
+    // LLVM work. (The actual work happens on another OS thread but we just care
+    // about `Tokens`, not actual threads).
+    //
+    // When any LLVM worker thread finishes while the main thread is marked as
+    // "busy with LLVM work", we can do a little switcheroo: We give the Token
+    // of the just finished thread to the LLVM worker thread that is working on
+    // behalf of the main thread's implicit Token, thus freeing up the main
+    // thread again. The coordinator can then again decide what the main thread
+    // should do. This allows the coordinator to make decisions at more points
+    // in time.
+    //
+    // Striking a Balance between Throughput and Memory Consumption
+    // ------------------------------------------------------------
+    // Since our two goals, (1) use as many Tokens as possible and (2) keep
+    // memory consumption as low as possible, are in conflict with each other,
+    // we have to find a trade off between them. Right now, the goal is to keep
+    // all workers busy, which means that no worker should find the queue empty
+    // when it is ready to start.
+    // How do we do achieve this? Good question :) We actually never know how
+    // many `Tokens` are potentially available so it's hard to say how much to
+    // fill up the queue before switching the main thread to LLVM work. Also we
+    // currently don't have a means to estimate how long a running LLVM worker
+    // will still be busy with it's current WorkItem. However, we know the
+    // maximal count of available Tokens that makes sense (=the number of CPU
+    // cores), so we can take a conservative guess. The heuristic we use here
+    // is implemented in the `queue_full_enough()` function.
+    //
+    // Some Background on Jobservers
+    // -----------------------------
+    // It's worth also touching on the management of parallelism here. We don't
+    // want to just spawn a thread per work item because while that's optimal
+    // parallelism it may overload a system with too many threads or violate our
+    // configuration for the maximum amount of cpu to use for this process. To
+    // manage this we use the `jobserver` crate.
+    //
+    // Job servers are an artifact of GNU make and are used to manage
+    // parallelism between processes. A jobserver is a glorified IPC semaphore
+    // basically. Whenever we want to run some work we acquire the semaphore,
+    // and whenever we're done with that work we release the semaphore. In this
+    // manner we can ensure that the maximum number of parallel workers is
+    // capped at any one point in time.
+    //
+    // LTO and the coordinator thread
+    // ------------------------------
+    //
+    // The final job the coordinator thread is responsible for is managing LTO
+    // and how that works. When LTO is requested what we'll to is collect all
+    // optimized LLVM modules into a local vector on the coordinator. Once all
+    // modules have been codegened and optimized we hand this to the `lto`
+    // module for further optimization. The `lto` module will return back a list
+    // of more modules to work on, which the coordinator will continue to spawn
+    // work for.
+    //
+    // Each LLVM module is automatically sent back to the coordinator for LTO if
+    // necessary. There's already optimizations in place to avoid sending work
+    // back to the coordinator if LTO isn't requested.
+    return thread::spawn(move || {
+        // We pretend to be within the top-level LLVM time-passes task here:
+        set_time_depth(1);
+
+        let max_workers = ::num_cpus::get();
+        let mut worker_id_counter = 0;
+        let mut free_worker_ids = Vec::new();
+        let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| {
+            if let Some(id) = free_worker_ids.pop() {
+                id
+            } else {
+                let id = worker_id_counter;
+                worker_id_counter += 1;
+                id
+            }
+        };
+
+        // This is where we collect codegen units that have gone all the way
+        // through codegen and LLVM.
+        let mut compiled_modules = vec![];
+        let mut compiled_metadata_module = None;
+        let mut compiled_allocator_module = None;
+        let mut needs_lto = Vec::new();
+        let mut started_lto = false;
+
+        // This flag tracks whether all items have gone through codegens
+        let mut codegen_done = false;
+
+        // This is the queue of LLVM work items that still need processing.
+        let mut work_items = Vec::<(WorkItem, u64)>::new();
+
+        // This are the Jobserver Tokens we currently hold. Does not include
+        // the implicit Token the compiler process owns no matter what.
+        let mut tokens = Vec::new();
+
+        let mut main_thread_worker_state = MainThreadWorkerState::Idle;
+        let mut running = 0;
+
+        let mut llvm_start_time = None;
+
+        // Run the message loop while there's still anything that needs message
+        // processing:
+        while !codegen_done ||
+              work_items.len() > 0 ||
+              running > 0 ||
+              needs_lto.len() > 0 ||
+              main_thread_worker_state != MainThreadWorkerState::Idle {
+
+            // While there are still CGUs to be codegened, the coordinator has
+            // to decide how to utilize the compiler processes implicit Token:
+            // For codegenning more CGU or for running them through LLVM.
+            if !codegen_done {
+                if main_thread_worker_state == MainThreadWorkerState::Idle {
+                    if !queue_full_enough(work_items.len(), running, max_workers) {
+                        // The queue is not full enough, codegen more items:
+                        if let Err(_) = codegen_worker_send.send(Message::CodegenItem) {
+                            panic!("Could not send Message::CodegenItem to main thread")
+                        }
+                        main_thread_worker_state = MainThreadWorkerState::Codegenning;
+                    } else {
+                        // The queue is full enough to not let the worker
+                        // threads starve. Use the implicit Token to do some
+                        // LLVM work too.
+                        let (item, _) = work_items.pop()
+                            .expect("queue empty - queue_full_enough() broken?");
+                        let cgcx = CodegenContext {
+                            worker: get_worker_id(&mut free_worker_ids),
+                            .. cgcx.clone()
+                        };
+                        maybe_start_llvm_timer(cgcx.config(item.kind()),
+                                               &mut llvm_start_time);
+                        main_thread_worker_state = MainThreadWorkerState::LLVMing;
+                        spawn_work(cgcx, item);
+                    }
+                }
+            } else {
+                // If we've finished everything related to normal codegen
+                // then it must be the case that we've got some LTO work to do.
+                // Perform the serial work here of figuring out what we're
+                // going to LTO and then push a bunch of work items onto our
+                // queue to do LTO
+                if work_items.len() == 0 &&
+                   running == 0 &&
+                   main_thread_worker_state == MainThreadWorkerState::Idle {
+                    assert!(!started_lto);
+                    assert!(needs_lto.len() > 0);
+                    started_lto = true;
+                    let modules = mem::replace(&mut needs_lto, Vec::new());
+                    for (work, cost) in generate_lto_work(&cgcx, modules) {
+                        let insertion_index = work_items
+                            .binary_search_by_key(&cost, |&(_, cost)| cost)
+                            .unwrap_or_else(|e| e);
+                        work_items.insert(insertion_index, (work, cost));
+                        helper.request_token();
+                    }
+                }
+
+                // In this branch, we know that everything has been codegened,
+                // so it's just a matter of determining whether the implicit
+                // Token is free to use for LLVM work.
+                match main_thread_worker_state {
+                    MainThreadWorkerState::Idle => {
+                        if let Some((item, _)) = work_items.pop() {
+                            let cgcx = CodegenContext {
+                                worker: get_worker_id(&mut free_worker_ids),
+                                .. cgcx.clone()
+                            };
+                            maybe_start_llvm_timer(cgcx.config(item.kind()),
+                                                   &mut llvm_start_time);
+                            main_thread_worker_state = MainThreadWorkerState::LLVMing;
+                            spawn_work(cgcx, item);
+                        } else {
+                            // There is no unstarted work, so let the main thread
+                            // take over for a running worker. Otherwise the
+                            // implicit token would just go to waste.
+                            // We reduce the `running` counter by one. The
+                            // `tokens.truncate()` below will take care of
+                            // giving the Token back.
+                            debug_assert!(running > 0);
+                            running -= 1;
+                            main_thread_worker_state = MainThreadWorkerState::LLVMing;
+                        }
+                    }
+                    MainThreadWorkerState::Codegenning => {
+                        bug!("codegen worker should not be codegenning after \
+                              codegen was already completed")
+                    }
+                    MainThreadWorkerState::LLVMing => {
+                        // Already making good use of that token
+                    }
+                }
+            }
+
+            // Spin up what work we can, only doing this while we've got available
+            // parallelism slots and work left to spawn.
+            while work_items.len() > 0 && running < tokens.len() {
+                let (item, _) = work_items.pop().unwrap();
+
+                maybe_start_llvm_timer(cgcx.config(item.kind()),
+                                       &mut llvm_start_time);
+
+                let cgcx = CodegenContext {
+                    worker: get_worker_id(&mut free_worker_ids),
+                    .. cgcx.clone()
+                };
+
+                spawn_work(cgcx, item);
+                running += 1;
+            }
+
+            // Relinquish accidentally acquired extra tokens
+            tokens.truncate(running);
+
+            let msg = coordinator_receive.recv().unwrap();
+            match *msg.downcast::<Message>().ok().unwrap() {
+                // Save the token locally and the next turn of the loop will use
+                // this to spawn a new unit of work, or it may get dropped
+                // immediately if we have no more work to spawn.
+                Message::Token(token) => {
+                    match token {
+                        Ok(token) => {
+                            tokens.push(token);
+
+                            if main_thread_worker_state == MainThreadWorkerState::LLVMing {
+                                // If the main thread token is used for LLVM work
+                                // at the moment, we turn that thread into a regular
+                                // LLVM worker thread, so the main thread is free
+                                // to react to codegen demand.
+                                main_thread_worker_state = MainThreadWorkerState::Idle;
+                                running += 1;
+                            }
+                        }
+                        Err(e) => {
+                            let msg = &format!("failed to acquire jobserver token: {}", e);
+                            shared_emitter.fatal(msg);
+                            // Exit the coordinator thread
+                            panic!("{}", msg)
+                        }
+                    }
+                }
+
+                Message::CodegenDone { llvm_work_item, cost } => {
+                    // We keep the queue sorted by estimated processing cost,
+                    // so that more expensive items are processed earlier. This
+                    // is good for throughput as it gives the main thread more
+                    // time to fill up the queue and it avoids scheduling
+                    // expensive items to the end.
+                    // Note, however, that this is not ideal for memory
+                    // consumption, as LLVM module sizes are not evenly
+                    // distributed.
+                    let insertion_index =
+                        work_items.binary_search_by_key(&cost, |&(_, cost)| cost);
+                    let insertion_index = match insertion_index {
+                        Ok(idx) | Err(idx) => idx
+                    };
+                    work_items.insert(insertion_index, (llvm_work_item, cost));
+
+                    helper.request_token();
+                    assert_eq!(main_thread_worker_state,
+                               MainThreadWorkerState::Codegenning);
+                    main_thread_worker_state = MainThreadWorkerState::Idle;
+                }
+
+                Message::CodegenComplete => {
+                    codegen_done = true;
+                    assert_eq!(main_thread_worker_state,
+                               MainThreadWorkerState::Codegenning);
+                    main_thread_worker_state = MainThreadWorkerState::Idle;
+                }
+
+                // If a thread exits successfully then we drop a token associated
+                // with that worker and update our `running` count. We may later
+                // re-acquire a token to continue running more work. We may also not
+                // actually drop a token here if the worker was running with an
+                // "ephemeral token"
+                //
+                // Note that if the thread failed that means it panicked, so we
+                // abort immediately.
+                Message::Done { result: Ok(compiled_module), worker_id } => {
+                    if main_thread_worker_state == MainThreadWorkerState::LLVMing {
+                        main_thread_worker_state = MainThreadWorkerState::Idle;
+                    } else {
+                        running -= 1;
+                    }
+
+                    free_worker_ids.push(worker_id);
+
+                    match compiled_module.kind {
+                        ModuleKind::Regular => {
+                            compiled_modules.push(compiled_module);
+                        }
+                        ModuleKind::Metadata => {
+                            assert!(compiled_metadata_module.is_none());
+                            compiled_metadata_module = Some(compiled_module);
+                        }
+                        ModuleKind::Allocator => {
+                            assert!(compiled_allocator_module.is_none());
+                            compiled_allocator_module = Some(compiled_module);
+                        }
+                    }
+                }
+                Message::NeedsLTO { result, worker_id } => {
+                    assert!(!started_lto);
+                    if main_thread_worker_state == MainThreadWorkerState::LLVMing {
+                        main_thread_worker_state = MainThreadWorkerState::Idle;
+                    } else {
+                        running -= 1;
+                    }
+
+                    free_worker_ids.push(worker_id);
+                    needs_lto.push(result);
+                }
+                Message::Done { result: Err(()), worker_id: _ } => {
+                    shared_emitter.fatal("aborting due to worker thread failure");
+                    // Exit the coordinator thread
+                    return Err(())
+                }
+                Message::CodegenItem => {
+                    bug!("the coordinator should not receive codegen requests")
+                }
+            }
+        }
+
+        if let Some(llvm_start_time) = llvm_start_time {
+            let total_llvm_time = Instant::now().duration_since(llvm_start_time);
+            // This is the top-level timing for all of LLVM, set the time-depth
+            // to zero.
+            set_time_depth(0);
+            print_time_passes_entry(cgcx.time_passes,
+                                    "LLVM passes",
+                                    total_llvm_time);
+        }
+
+        // Regardless of what order these modules completed in, report them to
+        // the backend in the same order every time to ensure that we're handing
+        // out deterministic results.
+        compiled_modules.sort_by(|a, b| a.name.cmp(&b.name));
+
+        let compiled_metadata_module = compiled_metadata_module
+            .expect("Metadata module not compiled?");
+
+        Ok(CompiledModules {
+            modules: compiled_modules,
+            metadata_module: compiled_metadata_module,
+            allocator_module: compiled_allocator_module,
+        })
+    });
+
+    // A heuristic that determines if we have enough LLVM WorkItems in the
+    // queue so that the main thread can do LLVM work instead of codegen
+    fn queue_full_enough(items_in_queue: usize,
+                         workers_running: usize,
+                         max_workers: usize) -> bool {
+        // Tune me, plz.
+        items_in_queue > 0 &&
+        items_in_queue >= max_workers.saturating_sub(workers_running / 2)
+    }
+
+    fn maybe_start_llvm_timer(config: &ModuleConfig,
+                              llvm_start_time: &mut Option<Instant>) {
+        // We keep track of the -Ztime-passes output manually,
+        // since the closure-based interface does not fit well here.
+        if config.time_passes {
+            if llvm_start_time.is_none() {
+                *llvm_start_time = Some(Instant::now());
+            }
+        }
+    }
+}
+
+pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX;
+pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId =
+    time_graph::TimelineId(CODEGEN_WORKER_ID);
+pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind =
+    time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]);
+const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind =
+    time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]);
+
+fn spawn_work(cgcx: CodegenContext, work: WorkItem) {
+    let depth = time_depth();
+
+    thread::spawn(move || {
+        set_time_depth(depth);
+
+        // Set up a destructor which will fire off a message that we're done as
+        // we exit.
+        struct Bomb {
+            coordinator_send: Sender<Box<Any + Send>>,
+            result: Option<WorkItemResult>,
+            worker_id: usize,
+        }
+        impl Drop for Bomb {
+            fn drop(&mut self) {
+                let worker_id = self.worker_id;
+                let msg = match self.result.take() {
+                    Some(WorkItemResult::Compiled(m)) => {
+                        Message::Done { result: Ok(m), worker_id }
+                    }
+                    Some(WorkItemResult::NeedsLTO(m)) => {
+                        Message::NeedsLTO { result: m, worker_id }
+                    }
+                    None => Message::Done { result: Err(()), worker_id }
+                };
+                drop(self.coordinator_send.send(Box::new(msg)));
+            }
+        }
+
+        let mut bomb = Bomb {
+            coordinator_send: cgcx.coordinator_send.clone(),
+            result: None,
+            worker_id: cgcx.worker,
+        };
+
+        // Execute the work itself, and if it finishes successfully then flag
+        // ourselves as a success as well.
+        //
+        // Note that we ignore any `FatalError` coming out of `execute_work_item`,
+        // as a diagnostic was already sent off to the main thread - just
+        // surface that there was an error in this worker.
+        bomb.result = {
+            let timeline = cgcx.time_graph.as_ref().map(|tg| {
+                tg.start(time_graph::TimelineId(cgcx.worker),
+                         LLVM_WORK_PACKAGE_KIND,
+                         &work.name())
+            });
+            let mut timeline = timeline.unwrap_or(Timeline::noop());
+            execute_work_item(&cgcx, work, &mut timeline).ok()
+        };
+    });
+}
+
+pub fn run_assembler(cgcx: &CodegenContext, handler: &Handler, assembly: &Path, object: &Path) {
+    let assembler = cgcx.assembler_cmd
+        .as_ref()
+        .expect("cgcx.assembler_cmd is missing?");
+
+    let pname = &assembler.name;
+    let mut cmd = assembler.cmd.clone();
+    cmd.arg("-c").arg("-o").arg(object).arg(assembly);
+    debug!("{:?}", cmd);
+
+    match cmd.output() {
+        Ok(prog) => {
+            if !prog.status.success() {
+                let mut note = prog.stderr.clone();
+                note.extend_from_slice(&prog.stdout);
+
+                handler.struct_err(&format!("linking with `{}` failed: {}",
+                                            pname.display(),
+                                            prog.status))
+                    .note(&format!("{:?}", &cmd))
+                    .note(str::from_utf8(&note[..]).unwrap())
+                    .emit();
+                handler.abort_if_errors();
+            }
+        },
+        Err(e) => {
+            handler.err(&format!("could not exec the linker `{}`: {}", pname.display(), e));
+            handler.abort_if_errors();
+        }
+    }
+}
+
+pub unsafe fn with_llvm_pmb(llmod: ModuleRef,
+                            config: &ModuleConfig,
+                            opt_level: llvm::CodeGenOptLevel,
+                            prepare_for_thin_lto: bool,
+                            f: &mut FnMut(llvm::PassManagerBuilderRef)) {
+    use std::ptr;
+
+    // Create the PassManagerBuilder for LLVM. We configure it with
+    // reasonable defaults and prepare it to actually populate the pass
+    // manager.
+    let builder = llvm::LLVMPassManagerBuilderCreate();
+    let opt_size = config.opt_size.unwrap_or(llvm::CodeGenOptSizeNone);
+    let inline_threshold = config.inline_threshold;
+
+    let pgo_gen_path = config.pgo_gen.as_ref().map(|s| {
+        let s = if s.is_empty() { "default_%m.profraw" } else { s };
+        CString::new(s.as_bytes()).unwrap()
+    });
+
+    let pgo_use_path = if config.pgo_use.is_empty() {
+        None
+    } else {
+        Some(CString::new(config.pgo_use.as_bytes()).unwrap())
+    };
+
+    llvm::LLVMRustConfigurePassManagerBuilder(
+        builder,
+        opt_level,
+        config.merge_functions,
+        config.vectorize_slp,
+        config.vectorize_loop,
+        prepare_for_thin_lto,
+        pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+        pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+    );
+
+    llvm::LLVMPassManagerBuilderSetSizeLevel(builder, opt_size as u32);
+
+    if opt_size != llvm::CodeGenOptSizeNone {
+        llvm::LLVMPassManagerBuilderSetDisableUnrollLoops(builder, 1);
+    }
+
+    llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins);
+
+    // Here we match what clang does (kinda). For O0 we only inline
+    // always-inline functions (but don't add lifetime intrinsics), at O1 we
+    // inline with lifetime intrinsics, and O2+ we add an inliner with a
+    // thresholds copied from clang.
+    match (opt_level, opt_size, inline_threshold) {
+        (.., Some(t)) => {
+            llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, t as u32);
+        }
+        (llvm::CodeGenOptLevel::Aggressive, ..) => {
+            llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 275);
+        }
+        (_, llvm::CodeGenOptSizeDefault, _) => {
+            llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 75);
+        }
+        (_, llvm::CodeGenOptSizeAggressive, _) => {
+            llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 25);
+        }
+        (llvm::CodeGenOptLevel::None, ..) => {
+            llvm::LLVMRustAddAlwaysInlinePass(builder, false);
+        }
+        (llvm::CodeGenOptLevel::Less, ..) => {
+            llvm::LLVMRustAddAlwaysInlinePass(builder, true);
+        }
+        (llvm::CodeGenOptLevel::Default, ..) => {
+            llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225);
+        }
+        (llvm::CodeGenOptLevel::Other, ..) => {
+            bug!("CodeGenOptLevel::Other selected")
+        }
+    }
+
+    f(builder);
+    llvm::LLVMPassManagerBuilderDispose(builder);
+}
+
+
+enum SharedEmitterMessage {
+    Diagnostic(Diagnostic),
+    InlineAsmError(u32, String),
+    AbortIfErrors,
+    Fatal(String),
+}
+
+#[derive(Clone)]
+pub struct SharedEmitter {
+    sender: Sender<SharedEmitterMessage>,
+}
+
+pub struct SharedEmitterMain {
+    receiver: Receiver<SharedEmitterMessage>,
+}
+
+impl SharedEmitter {
+    pub fn new() -> (SharedEmitter, SharedEmitterMain) {
+        let (sender, receiver) = channel();
+
+        (SharedEmitter { sender }, SharedEmitterMain { receiver })
+    }
+
+    fn inline_asm_error(&self, cookie: u32, msg: String) {
+        drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg)));
+    }
+
+    fn fatal(&self, msg: &str) {
+        drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string())));
+    }
+}
+
+impl Emitter for SharedEmitter {
+    fn emit(&mut self, db: &DiagnosticBuilder) {
+        drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
+            msg: db.message(),
+            code: db.code.clone(),
+            lvl: db.level,
+        })));
+        for child in &db.children {
+            drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
+                msg: child.message(),
+                code: None,
+                lvl: child.level,
+            })));
+        }
+        drop(self.sender.send(SharedEmitterMessage::AbortIfErrors));
+    }
+}
+
+impl SharedEmitterMain {
+    pub fn check(&self, sess: &Session, blocking: bool) {
+        loop {
+            let message = if blocking {
+                match self.receiver.recv() {
+                    Ok(message) => Ok(message),
+                    Err(_) => Err(()),
+                }
+            } else {
+                match self.receiver.try_recv() {
+                    Ok(message) => Ok(message),
+                    Err(_) => Err(()),
+                }
+            };
+
+            match message {
+                Ok(SharedEmitterMessage::Diagnostic(diag)) => {
+                    let handler = sess.diagnostic();
+                    match diag.code {
+                        Some(ref code) => {
+                            handler.emit_with_code(&MultiSpan::new(),
+                                                   &diag.msg,
+                                                   code.clone(),
+                                                   diag.lvl);
+                        }
+                        None => {
+                            handler.emit(&MultiSpan::new(),
+                                         &diag.msg,
+                                         diag.lvl);
+                        }
+                    }
+                }
+                Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => {
+                    match Mark::from_u32(cookie).expn_info() {
+                        Some(ei) => sess.span_err(ei.call_site, &msg),
+                        None     => sess.err(&msg),
+                    }
+                }
+                Ok(SharedEmitterMessage::AbortIfErrors) => {
+                    sess.abort_if_errors();
+                }
+                Ok(SharedEmitterMessage::Fatal(msg)) => {
+                    sess.fatal(&msg);
+                }
+                Err(_) => {
+                    break;
+                }
+            }
+
+        }
+    }
+}
+
+pub struct OngoingCodegen {
+    crate_name: Symbol,
+    link: LinkMeta,
+    metadata: EncodedMetadata,
+    windows_subsystem: Option<String>,
+    linker_info: LinkerInfo,
+    crate_info: CrateInfo,
+    time_graph: Option<TimeGraph>,
+    coordinator_send: Sender<Box<Any + Send>>,
+    codegen_worker_receive: Receiver<Message>,
+    shared_emitter_main: SharedEmitterMain,
+    future: thread::JoinHandle<Result<CompiledModules, ()>>,
+    output_filenames: Arc<OutputFilenames>,
+}
+
+impl OngoingCodegen {
+    pub(crate) fn join(
+        self,
+        sess: &Session
+    ) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) {
+        self.shared_emitter_main.check(sess, true);
+        let compiled_modules = match self.future.join() {
+            Ok(Ok(compiled_modules)) => compiled_modules,
+            Ok(Err(())) => {
+                sess.abort_if_errors();
+                panic!("expected abort due to worker thread errors")
+            },
+            Err(_) => {
+                sess.fatal("Error during codegen/LLVM phase.");
+            }
+        };
+
+        sess.abort_if_errors();
+
+        if let Some(time_graph) = self.time_graph {
+            time_graph.dump(&format!("{}-timings", self.crate_name));
+        }
+
+        let work_products = copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess,
+                                                                             &compiled_modules);
+
+        produce_final_output_artifacts(sess,
+                                       &compiled_modules,
+                                       &self.output_filenames);
+
+        // FIXME: time_llvm_passes support - does this use a global context or
+        // something?
+        if sess.codegen_units() == 1 && sess.time_llvm_passes() {
+            unsafe { llvm::LLVMRustPrintPassTimings(); }
+        }
+
+        (CodegenResults {
+            crate_name: self.crate_name,
+            link: self.link,
+            metadata: self.metadata,
+            windows_subsystem: self.windows_subsystem,
+            linker_info: self.linker_info,
+            crate_info: self.crate_info,
+
+            modules: compiled_modules.modules,
+            allocator_module: compiled_modules.allocator_module,
+            metadata_module: compiled_modules.metadata_module,
+        }, work_products)
+    }
+
+    pub(crate) fn submit_pre_codegened_module_to_llvm(&self,
+                                                       tcx: TyCtxt,
+                                                       module: ModuleCodegen) {
+        self.wait_for_signal_to_codegen_item();
+        self.check_for_errors(tcx.sess);
+
+        // These are generally cheap and won't through off scheduling.
+        let cost = 0;
+        submit_codegened_module_to_llvm(tcx, module, cost);
+    }
+
+    pub fn codegen_finished(&self, tcx: TyCtxt) {
+        self.wait_for_signal_to_codegen_item();
+        self.check_for_errors(tcx.sess);
+        drop(self.coordinator_send.send(Box::new(Message::CodegenComplete)));
+    }
+
+    pub fn check_for_errors(&self, sess: &Session) {
+        self.shared_emitter_main.check(sess, false);
+    }
+
+    pub fn wait_for_signal_to_codegen_item(&self) {
+        match self.codegen_worker_receive.recv() {
+            Ok(Message::CodegenItem) => {
+                // Nothing to do
+            }
+            Ok(_) => panic!("unexpected message"),
+            Err(_) => {
+                // One of the LLVM threads must have panicked, fall through so
+                // error handling can be reached.
+            }
+        }
+    }
+}
+
+pub(crate) fn submit_codegened_module_to_llvm(tcx: TyCtxt,
+                                               module: ModuleCodegen,
+                                               cost: u64) {
+    let llvm_work_item = WorkItem::Optimize(module);
+    drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone {
+        llvm_work_item,
+        cost,
+    })));
+}
+
+fn msvc_imps_needed(tcx: TyCtxt) -> bool {
+    tcx.sess.target.target.options.is_like_msvc &&
+        tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib)
+}
+
+// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
+// This is required to satisfy `dllimport` references to static data in .rlibs
+// when using MSVC linker.  We do this only for data, as linker can fix up
+// code references on its own.
+// See #26591, #27438
+fn create_msvc_imps(cgcx: &CodegenContext, llcx: ContextRef, llmod: ModuleRef) {
+    if !cgcx.msvc_imps_needed {
+        return
+    }
+    // The x86 ABI seems to require that leading underscores are added to symbol
+    // names, so we need an extra underscore on 32-bit. There's also a leading
+    // '\x01' here which disables LLVM's symbol mangling (e.g. no extra
+    // underscores added in front).
+    let prefix = if cgcx.target_pointer_width == "32" {
+        "\x01__imp__"
+    } else {
+        "\x01__imp_"
+    };
+    unsafe {
+        let i8p_ty = Type::i8p_llcx(llcx);
+        let globals = base::iter_globals(llmod)
+            .filter(|&val| {
+                llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage &&
+                    llvm::LLVMIsDeclaration(val) == 0
+            })
+            .map(move |val| {
+                let name = CStr::from_ptr(llvm::LLVMGetValueName(val));
+                let mut imp_name = prefix.as_bytes().to_vec();
+                imp_name.extend(name.to_bytes());
+                let imp_name = CString::new(imp_name).unwrap();
+                (imp_name, val)
+            })
+            .collect::<Vec<_>>();
+        for (imp_name, val) in globals {
+            let imp = llvm::LLVMAddGlobal(llmod,
+                                          i8p_ty.to_ref(),
+                                          imp_name.as_ptr() as *const _);
+            llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty));
+            llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
+        }
+    }
+}