about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/allocator.rs45
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs28
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs17
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs261
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/base.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs14
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs11
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs13
-rw-r--r--compiler/rustc_codegen_llvm/src/declare.rs43
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs537
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/mod.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs151
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs19
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs1
23 files changed, 261 insertions, 926 deletions
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
index bc1d9e1818c..e028b2c2dc7 100644
--- a/compiler/rustc_codegen_llvm/src/allocator.rs
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -3,11 +3,17 @@ use libc::c_uint;
 use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
 use rustc_middle::bug;
 use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::sym;
 
 use crate::llvm::{self, False, True};
 use crate::ModuleLlvm;
 
-pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut ModuleLlvm, kind: AllocatorKind) {
+pub(crate) unsafe fn codegen(
+    tcx: TyCtxt<'_>,
+    mods: &mut ModuleLlvm,
+    kind: AllocatorKind,
+    has_alloc_error_handler: bool,
+) {
     let llcx = &*mods.llcx;
     let llmod = mods.llmod();
     let usize = match &tcx.sess.target.target.target_pointer_width[..] {
@@ -82,4 +88,41 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut ModuleLlvm, kind: Alloc
         }
         llvm::LLVMDisposeBuilder(llbuilder);
     }
+
+    // rust alloc error handler
+    let args = [usize, usize]; // size, align
+
+    let ty = llvm::LLVMFunctionType(void, args.as_ptr(), args.len() as c_uint, False);
+    let name = format!("__rust_alloc_error_handler");
+    let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
+    // -> ! DIFlagNoReturn
+    llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
+
+    if tcx.sess.target.target.options.default_hidden_visibility {
+        llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+    }
+    if tcx.sess.must_emit_unwind_tables() {
+        attributes::emit_uwtable(llfn, true);
+    }
+
+    let kind = if has_alloc_error_handler { AllocatorKind::Global } else { AllocatorKind::Default };
+    let callee = kind.fn_name(sym::oom);
+    let callee = llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
+    // -> ! DIFlagNoReturn
+    llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, callee);
+    llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+    let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
+
+    let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
+    llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
+    let args = args
+        .iter()
+        .enumerate()
+        .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
+        .collect::<Vec<_>>();
+    let ret = llvm::LLVMRustBuildCall(llbuilder, callee, args.as_ptr(), args.len() as c_uint, None);
+    llvm::LLVMSetTailCall(ret, True);
+    llvm::LLVMBuildRetVoid(llbuilder);
+    llvm::LLVMDisposeBuilder(llbuilder);
 }
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index a468d09c2d9..1eb852e6b01 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -259,6 +259,7 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                 InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {}
                 InlineAsmArch::Nvptx64 => {}
                 InlineAsmArch::Hexagon => {}
+                InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
             }
         }
         if !options.contains(InlineAsmOptions::NOMEM) {
@@ -505,6 +506,8 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'tcx>>)
             InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
             | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
             InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
             InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
             InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
             InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
@@ -551,6 +554,7 @@ fn modifier_to_llvm(
             }
         }
         InlineAsmRegClass::Hexagon(_) => None,
+        InlineAsmRegClass::Mips(_) => None,
         InlineAsmRegClass::Nvptx(_) => None,
         InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
         | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
@@ -603,6 +607,8 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll
             cx.type_vector(cx.type_i64(), 2)
         }
         InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
         InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
         InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
         InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
@@ -700,6 +706,13 @@ fn llvm_fixup_input(
                 value
             }
         }
+        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
+            // MIPS only supports register-length arithmetics.
+            Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
+            Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
+            Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
+            _ => value,
+        },
         _ => value,
     }
 }
@@ -768,6 +781,14 @@ fn llvm_fixup_output(
                 value
             }
         }
+        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
+            // MIPS only supports register-length arithmetics.
+            Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
+            Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
+            Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
+            Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
+            _ => value,
+        },
         _ => value,
     }
 }
@@ -831,6 +852,13 @@ fn llvm_fixup_output_type(
                 layout.llvm_type(cx)
             }
         }
+        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
+            // MIPS only supports register-length arithmetics.
+            Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
+            Primitive::F32 => cx.type_i32(),
+            Primitive::F64 => cx.type_i64(),
+            _ => layout.llvm_type(cx),
+        },
         _ => layout.llvm_type(cx),
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 434a42607d6..8d131fa294b 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -18,7 +18,7 @@ use crate::attributes;
 use crate::llvm::AttributePlace::Function;
 use crate::llvm::{self, Attribute};
 use crate::llvm_util;
-pub use rustc_attr::{InlineAttr, OptimizeAttr};
+pub use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr};
 
 use crate::context::CodegenCx;
 use crate::value::Value;
@@ -306,6 +306,9 @@ pub fn from_fn_attrs(cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value, instance: ty::
     if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
         Attribute::NoAlias.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
     }
+    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY) {
+        llvm::AddFunctionAttrString(llfn, Function, const_cstr!("cmse_nonsecure_entry"));
+    }
     sanitize(cx, codegen_fn_attrs.no_sanitize, llfn);
 
     // Always annotate functions with the target-cpu they are compiled for.
@@ -322,6 +325,10 @@ pub fn from_fn_attrs(cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value, instance: ty::
             let feature = &f.as_str();
             format!("+{}", llvm_util::to_llvm_feature(cx.tcx.sess, feature))
         }))
+        .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
+            InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
+            InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
+        }))
         .collect::<Vec<String>>()
         .join(",");
 
@@ -361,17 +368,15 @@ pub fn from_fn_attrs(cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value, instance: ty::
 }
 
 pub fn provide(providers: &mut Providers) {
+    use rustc_codegen_ssa::target_features::{all_known_features, supported_target_features};
     providers.supported_target_features = |tcx, cnum| {
         assert_eq!(cnum, LOCAL_CRATE);
         if tcx.sess.opts.actually_rustdoc {
             // rustdoc needs to be able to document functions that use all the features, so
             // provide them all.
-            llvm_util::all_known_features().map(|(a, b)| (a.to_string(), b)).collect()
+            all_known_features().map(|(a, b)| (a.to_string(), b)).collect()
         } else {
-            llvm_util::supported_target_features(tcx.sess)
-                .iter()
-                .map(|&(a, b)| (a.to_string(), b))
-                .collect()
+            supported_target_features(tcx.sess).iter().map(|&(a, b)| (a.to_string(), b)).collect()
         }
     };
 
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 4b2d5907a02..ff312bade25 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -2,14 +2,14 @@ use crate::back::write::{
     self, save_temp_bitcode, to_llvm_opt_settings, with_llvm_pmb, DiagnosticHandlers,
 };
 use crate::llvm::archive_ro::ArchiveRO;
-use crate::llvm::{self, False, True};
+use crate::llvm::{self, build_string, False, True};
 use crate::{LlvmCodegenBackend, ModuleLlvm};
 use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
 use rustc_codegen_ssa::back::symbol_export;
 use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
 use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::fx::FxHashMap;
 use rustc_errors::{FatalError, Handler};
 use rustc_hir::def_id::LOCAL_CRATE;
 use rustc_middle::bug;
@@ -22,16 +22,14 @@ use tracing::{debug, info};
 use std::ffi::{CStr, CString};
 use std::fs::File;
 use std::io;
-use std::mem;
 use std::path::Path;
 use std::ptr;
 use std::slice;
 use std::sync::Arc;
 
-/// We keep track of past LTO imports that were used to produce the current set
-/// of compiled object files that we might choose to reuse during this
-/// compilation session.
-pub const THIN_LTO_IMPORTS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-imports.bin";
+/// We keep track of the computed LTO cache keys from the previous
+/// session to determine which CGUs we can reuse.
+pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
 
 pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
     match crate_type {
@@ -485,31 +483,31 @@ fn thin_lto(
         )
         .ok_or_else(|| write::llvm_err(&diag_handler, "failed to prepare thin LTO context"))?;
 
-        info!("thin LTO data created");
+        let data = ThinData(data);
 
-        let (import_map_path, prev_import_map, curr_import_map) =
-            if let Some(ref incr_comp_session_dir) = cgcx.incr_comp_session_dir {
-                let path = incr_comp_session_dir.join(THIN_LTO_IMPORTS_INCR_COMP_FILE_NAME);
-                // If previous imports have been deleted, or we get an IO error
-                // reading the file storing them, then we'll just use `None` as the
-                // prev_import_map, which will force the code to be recompiled.
-                let prev = if path.exists() {
-                    ThinLTOImportMaps::load_from_file(&path).ok()
-                } else {
-                    None
-                };
-                let curr = ThinLTOImportMaps::from_thin_lto_data(data);
-                (Some(path), prev, curr)
-            } else {
-                // If we don't compile incrementally, we don't need to load the
-                // import data from LLVM.
-                assert!(green_modules.is_empty());
-                let curr = ThinLTOImportMaps::default();
-                (None, None, curr)
-            };
-        info!("thin LTO import map loaded");
+        info!("thin LTO data created");
 
-        let data = ThinData(data);
+        let (key_map_path, prev_key_map, curr_key_map) = if let Some(ref incr_comp_session_dir) =
+            cgcx.incr_comp_session_dir
+        {
+            let path = incr_comp_session_dir.join(THIN_LTO_KEYS_INCR_COMP_FILE_NAME);
+            // If the previous file was deleted, or we get an IO error
+            // reading the file, then we'll just use `None` as the
+            // prev_key_map, which will force the code to be recompiled.
+            let prev =
+                if path.exists() { ThinLTOKeysMap::load_from_file(&path).ok() } else { None };
+            let curr = ThinLTOKeysMap::from_thin_lto_modules(&data, &thin_modules, &module_names);
+            (Some(path), prev, curr)
+        } else {
+            // If we don't compile incrementally, we don't need to load the
+            // import data from LLVM.
+            assert!(green_modules.is_empty());
+            let curr = ThinLTOKeysMap::default();
+            (None, None, curr)
+        };
+        info!("thin LTO cache key map loaded");
+        info!("prev_key_map: {:#?}", prev_key_map);
+        info!("curr_key_map: {:#?}", curr_key_map);
 
         // Throw our data in an `Arc` as we'll be sharing it across threads. We
         // also put all memory referenced by the C++ data (buffers, ids, etc)
@@ -528,60 +526,14 @@ fn thin_lto(
         info!("checking which modules can be-reused and which have to be re-optimized.");
         for (module_index, module_name) in shared.module_names.iter().enumerate() {
             let module_name = module_name_to_str(module_name);
-
-            // If (1.) the module hasn't changed, and (2.) none of the modules
-            // it imports from have changed, *and* (3.) the import and export
-            // sets themselves have not changed from the previous compile when
-            // it was last ThinLTO'ed, then we can re-use the post-ThinLTO
-            // version of the module. Otherwise, freshly perform LTO
-            // optimization.
-            //
-            // (Note that globally, the export set is just the inverse of the
-            // import set.)
-            //
-            // For further justification of why the above is necessary and sufficient,
-            // see the LLVM blog post on ThinLTO:
-            //
-            // http://blog.llvm.org/2016/06/thinlto-scalable-and-incremental-lto.html
-            //
-            // which states the following:
-            //
-            // ```quote
-            // any particular ThinLTO backend must be redone iff:
-            //
-            // 1. The corresponding (primary) module’s bitcode changed
-            // 2. The list of imports into or exports from the module changed
-            // 3. The bitcode for any module being imported from has changed
-            // 4. Any global analysis result affecting either the primary module
-            //    or anything it imports has changed.
-            // ```
-            //
-            // This strategy means we can always save the computed imports as
-            // canon: when we reuse the post-ThinLTO version, condition (3.)
-            // ensures that the current import set is the same as the previous
-            // one. (And of course, when we don't reuse the post-ThinLTO
-            // version, the current import set *is* the correct one, since we
-            // are doing the ThinLTO in this current compilation cycle.)
-            //
-            // For more discussion, see rust-lang/rust#59535 (where the import
-            // issue was discovered) and rust-lang/rust#69798 (where the
-            // analogous export issue was discovered).
-            if let (Some(prev_import_map), true) =
-                (prev_import_map.as_ref(), green_modules.contains_key(module_name))
+            if let (Some(prev_key_map), true) =
+                (prev_key_map.as_ref(), green_modules.contains_key(module_name))
             {
                 assert!(cgcx.incr_comp_session_dir.is_some());
 
-                let prev_imports = prev_import_map.imports_of(module_name);
-                let curr_imports = curr_import_map.imports_of(module_name);
-                let prev_exports = prev_import_map.exports_of(module_name);
-                let curr_exports = curr_import_map.exports_of(module_name);
-                let imports_all_green = curr_imports
-                    .iter()
-                    .all(|imported_module| green_modules.contains_key(imported_module));
-                if imports_all_green
-                    && equivalent_as_sets(prev_imports, curr_imports)
-                    && equivalent_as_sets(prev_exports, curr_exports)
-                {
+                // If a module exists in both the current and the previous session,
+                // and has the same LTO cache key in both sessions, then we can re-use it
+                if prev_key_map.keys.get(module_name) == curr_key_map.keys.get(module_name) {
                     let work_product = green_modules[module_name].clone();
                     copy_jobs.push(work_product);
                     info!(" - {}: re-used", module_name);
@@ -599,10 +551,10 @@ fn thin_lto(
         }
 
         // Save the current ThinLTO import information for the next compilation
-        // session, overwriting the previous serialized imports (if any).
-        if let Some(path) = import_map_path {
-            if let Err(err) = curr_import_map.save_to_file(&path) {
-                let msg = format!("Error while writing ThinLTO import data: {}", err);
+        // session, overwriting the previous serialized data (if any).
+        if let Some(path) = key_map_path {
+            if let Err(err) = curr_key_map.save_to_file(&path) {
+                let msg = format!("Error while writing ThinLTO key data: {}", err);
                 return Err(write::llvm_err(&diag_handler, &msg));
             }
         }
@@ -611,24 +563,6 @@ fn thin_lto(
     }
 }
 
-/// Given two slices, each with no repeat elements. returns true if and only if
-/// the two slices have the same contents when considered as sets (i.e. when
-/// element order is disregarded).
-fn equivalent_as_sets(a: &[String], b: &[String]) -> bool {
-    // cheap path: unequal lengths means cannot possibly be set equivalent.
-    if a.len() != b.len() {
-        return false;
-    }
-    // fast path: before building new things, check if inputs are equivalent as is.
-    if a == b {
-        return true;
-    }
-    // slow path: general set comparison.
-    let a: FxHashSet<&str> = a.iter().map(|s| s.as_str()).collect();
-    let b: FxHashSet<&str> = b.iter().map(|s| s.as_str()).collect();
-    a == b
-}
-
 pub(crate) fn run_pass_manager(
     cgcx: &CodegenContext<LlvmCodegenBackend>,
     module: &ModuleCodegen<ModuleLlvm>,
@@ -942,113 +876,56 @@ pub unsafe fn optimize_thin_module(
     Ok(module)
 }
 
-/// Summarizes module import/export relationships used by LLVM's ThinLTO pass.
-///
-/// Note that we tend to have two such instances of `ThinLTOImportMaps` in use:
-/// one loaded from a file that represents the relationships used during the
-/// compilation associated with the incremetnal build artifacts we are
-/// attempting to reuse, and another constructed via `from_thin_lto_data`, which
-/// captures the relationships of ThinLTO in the current compilation.
+/// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys
 #[derive(Debug, Default)]
-pub struct ThinLTOImportMaps {
-    // key = llvm name of importing module, value = list of modules it imports from
-    imports: FxHashMap<String, Vec<String>>,
-    // key = llvm name of exporting module, value = list of modules it exports to
-    exports: FxHashMap<String, Vec<String>>,
+pub struct ThinLTOKeysMap {
+    // key = llvm name of importing module, value = LLVM cache key
+    keys: FxHashMap<String, String>,
 }
 
-impl ThinLTOImportMaps {
-    /// Returns modules imported by `llvm_module_name` during some ThinLTO pass.
-    fn imports_of(&self, llvm_module_name: &str) -> &[String] {
-        self.imports.get(llvm_module_name).map(|v| &v[..]).unwrap_or(&[])
-    }
-
-    /// Returns modules exported by `llvm_module_name` during some ThinLTO pass.
-    fn exports_of(&self, llvm_module_name: &str) -> &[String] {
-        self.exports.get(llvm_module_name).map(|v| &v[..]).unwrap_or(&[])
-    }
-
+impl ThinLTOKeysMap {
     fn save_to_file(&self, path: &Path) -> io::Result<()> {
         use std::io::Write;
         let file = File::create(path)?;
         let mut writer = io::BufWriter::new(file);
-        for (importing_module_name, imported_modules) in &self.imports {
-            writeln!(writer, "{}", importing_module_name)?;
-            for imported_module in imported_modules {
-                writeln!(writer, " {}", imported_module)?;
-            }
-            writeln!(writer)?;
+        for (module, key) in &self.keys {
+            writeln!(writer, "{} {}", module, key)?;
         }
         Ok(())
     }
 
-    fn load_from_file(path: &Path) -> io::Result<ThinLTOImportMaps> {
+    fn load_from_file(path: &Path) -> io::Result<Self> {
         use std::io::BufRead;
-        let mut imports = FxHashMap::default();
-        let mut exports: FxHashMap<_, Vec<_>> = FxHashMap::default();
-        let mut current_module: Option<String> = None;
-        let mut current_imports: Vec<String> = vec![];
+        let mut keys = FxHashMap::default();
         let file = File::open(path)?;
         for line in io::BufReader::new(file).lines() {
             let line = line?;
-            if line.is_empty() {
-                let importing_module = current_module.take().expect("Importing module not set");
-                for imported in &current_imports {
-                    exports.entry(imported.clone()).or_default().push(importing_module.clone());
-                }
-                imports.insert(importing_module, mem::replace(&mut current_imports, vec![]));
-            } else if line.starts_with(' ') {
-                // Space marks an imported module
-                assert_ne!(current_module, None);
-                current_imports.push(line.trim().to_string());
-            } else {
-                // Otherwise, beginning of a new module (must be start or follow empty line)
-                assert_eq!(current_module, None);
-                current_module = Some(line.trim().to_string());
-            }
+            let mut split = line.split(" ");
+            let module = split.next().unwrap();
+            let key = split.next().unwrap();
+            assert_eq!(split.next(), None, "Expected two space-separated values, found {:?}", line);
+            keys.insert(module.to_string(), key.to_string());
         }
-        Ok(ThinLTOImportMaps { imports, exports })
+        Ok(Self { keys })
     }
 
-    /// Loads the ThinLTO import map from ThinLTOData.
-    unsafe fn from_thin_lto_data(data: *const llvm::ThinLTOData) -> ThinLTOImportMaps {
-        unsafe extern "C" fn imported_module_callback(
-            payload: *mut libc::c_void,
-            importing_module_name: *const libc::c_char,
-            imported_module_name: *const libc::c_char,
-        ) {
-            let map = &mut *(payload as *mut ThinLTOImportMaps);
-            let importing_module_name = CStr::from_ptr(importing_module_name);
-            let importing_module_name = module_name_to_str(&importing_module_name);
-            let imported_module_name = CStr::from_ptr(imported_module_name);
-            let imported_module_name = module_name_to_str(&imported_module_name);
-
-            if !map.imports.contains_key(importing_module_name) {
-                map.imports.insert(importing_module_name.to_owned(), vec![]);
-            }
-
-            map.imports
-                .get_mut(importing_module_name)
-                .unwrap()
-                .push(imported_module_name.to_owned());
-
-            if !map.exports.contains_key(imported_module_name) {
-                map.exports.insert(imported_module_name.to_owned(), vec![]);
-            }
-
-            map.exports
-                .get_mut(imported_module_name)
-                .unwrap()
-                .push(importing_module_name.to_owned());
-        }
-
-        let mut map = ThinLTOImportMaps::default();
-        llvm::LLVMRustGetThinLTOModuleImports(
-            data,
-            imported_module_callback,
-            &mut map as *mut _ as *mut libc::c_void,
-        );
-        map
+    fn from_thin_lto_modules(
+        data: &ThinData,
+        modules: &[llvm::ThinLTOModule],
+        names: &[CString],
+    ) -> Self {
+        let keys = modules
+            .iter()
+            .zip(names.iter())
+            .map(|(module, name)| {
+                let key = build_string(|rust_str| unsafe {
+                    llvm::LLVMRustComputeLTOCacheKey(rust_str, module.identifier, data.0);
+                })
+                .expect("Invalid ThinLTO module key");
+                (name.clone().into_string().unwrap(), key)
+            })
+            .collect();
+        Self { keys }
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 937821e9d4f..f35c1016f86 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -344,6 +344,13 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void
             .expect("non-UTF8 diagnostic");
             diag_handler.warn(&msg);
         }
+        llvm::diagnostic::Unsupported(diagnostic_ref) => {
+            let msg = llvm::build_string(|s| {
+                llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
+            })
+            .expect("non-UTF8 diagnostic");
+            diag_handler.err(&msg);
+        }
         llvm::diagnostic::UnknownDiagnostic(..) => {}
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
index 6a1b373ef07..f35708b1d09 100644
--- a/compiler/rustc_codegen_llvm/src/base.rs
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -108,7 +108,7 @@ pub fn compile_codegen_unit(
 
     // We assume that the cost to run LLVM on a CGU is proportional to
     // the time we needed for codegenning it.
-    let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
+    let cost = time_to_codegen.as_nanos() as u64;
 
     fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<ModuleLlvm> {
         let cgu = tcx.codegen_unit(cgu_name);
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 23a3be1a2f2..f496f3283da 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -16,12 +16,11 @@ use rustc_data_structures::small_c_str::SmallCStr;
 use rustc_hir::def_id::DefId;
 use rustc_middle::ty::layout::TyAndLayout;
 use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_span::sym;
+use rustc_span::{sym, Span};
 use rustc_target::abi::{self, Align, Size};
 use rustc_target::spec::{HasTargetSpec, Target};
 use std::borrow::Cow;
 use std::ffi::CStr;
-use std::iter::TrustedLen;
 use std::ops::{Deref, Range};
 use std::ptr;
 use tracing::debug;
@@ -140,6 +139,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
     }
 
+    fn set_span(&self, _span: Span) {}
+
     fn position_at_end(&mut self, llbb: &'ll BasicBlock) {
         unsafe {
             llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
@@ -179,7 +180,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         &mut self,
         v: &'ll Value,
         else_llbb: &'ll BasicBlock,
-        cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)> + TrustedLen,
+        cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)>,
     ) {
         let switch =
             unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) };
@@ -931,7 +932,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) }
     }
 
-    #[allow(dead_code)]
     fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
         unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
     }
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index 0b1cf03fa7e..0992410a728 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -1,5 +1,3 @@
-#![allow(non_camel_case_types, non_snake_case)]
-
 //! Code that is useful in various codegen modules.
 
 use crate::consts::{self, const_alloc_to_llvm};
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 2b2bcd97999..6d3582d3027 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -7,12 +7,13 @@ use crate::type_of::LayoutLlvmExt;
 use crate::value::Value;
 use libc::c_uint;
 use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::const_cstr;
 use rustc_hir as hir;
 use rustc_hir::def_id::DefId;
 use rustc_hir::Node;
 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
 use rustc_middle::mir::interpret::{
-    read_target_uint, Allocation, ConstValue, ErrorHandled, GlobalAlloc, Pointer,
+    read_target_uint, Allocation, ErrorHandled, GlobalAlloc, Pointer,
 };
 use rustc_middle::mir::mono::MonoItem;
 use rustc_middle::ty::{self, Instance, Ty};
@@ -22,8 +23,6 @@ use rustc_span::Span;
 use rustc_target::abi::{AddressSpace, Align, HasDataLayout, LayoutOf, Primitive, Scalar, Size};
 use tracing::debug;
 
-use std::ffi::CStr;
-
 pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
     let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
     let dl = cx.data_layout();
@@ -85,10 +84,7 @@ pub fn codegen_static_initializer(
     cx: &CodegenCx<'ll, 'tcx>,
     def_id: DefId,
 ) -> Result<(&'ll Value, &'tcx Allocation), ErrorHandled> {
-    let alloc = match cx.tcx.const_eval_poly(def_id)? {
-        ConstValue::ByRef { alloc, offset } if offset.bytes() == 0 => alloc,
-        val => bug!("static const eval returned {:#?}", val),
-    };
+    let alloc = cx.tcx.eval_static_initializer(def_id)?;
     Ok((const_alloc_to_llvm(cx, alloc), alloc))
 }
 
@@ -457,9 +453,9 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> {
                             .all(|&byte| byte == 0);
 
                     let sect_name = if all_bytes_are_zero {
-                        CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0")
+                        const_cstr!("__DATA,__thread_bss")
                     } else {
-                        CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0")
+                        const_cstr!("__DATA,__thread_data")
                     };
                     llvm::LLVMSetSection(g, sect_name.as_ptr());
                 }
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 6e6b2cacc03..d3c71ff501b 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -434,6 +434,17 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             llvm::LLVMSetSection(g, section.as_ptr());
         }
     }
+
+    fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
+        if self.get_declared_value("main").is_none() {
+            Some(self.declare_cfn("main", fn_type))
+        } else {
+            // If the symbol already exists, it is an error: for example, the user wrote
+            // #[no_mangle] extern "C" fn main(..) {..}
+            // instead of #[start]
+            None
+        }
+    }
 }
 
 impl CodegenCx<'b, 'tcx> {
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index ec6c177614d..0098555a373 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -126,6 +126,7 @@ impl CoverageMapGenerator {
                 let (filenames_index, _) = self.filenames.insert_full(c_filename);
                 virtual_file_mapping.push(filenames_index as u32);
             }
+            debug!("Adding counter {:?} to map for {:?}", counter, region,);
             mapping_regions.push(CounterMappingRegion::code_region(
                 counter,
                 current_file_id,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 868eb74cf09..987149cb4c2 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -1845,7 +1845,6 @@ impl<'tcx> VariantInfo<'_, 'tcx> {
         None
     }
 
-    #[allow(dead_code)]
     fn is_artificial(&self) -> bool {
         match self {
             VariantInfo::Generator { .. } => true,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
index d1a55335c44..9945d4f4282 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
@@ -27,11 +27,18 @@ pub fn item_namespace(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {
         .parent
         .map(|parent| item_namespace(cx, DefId { krate: def_id.krate, index: parent }));
 
+    let crate_name_as_str;
+    let name_to_string;
     let namespace_name = match def_key.disambiguated_data.data {
-        DefPathData::CrateRoot => cx.tcx.crate_name(def_id.krate),
-        data => data.as_symbol(),
+        DefPathData::CrateRoot => {
+            crate_name_as_str = cx.tcx.crate_name(def_id.krate).as_str();
+            &*crate_name_as_str
+        }
+        data => {
+            name_to_string = data.to_string();
+            &*name_to_string
+        }
     };
-    let namespace_name = namespace_name.as_str();
 
     let scope = unsafe {
         llvm::LLVMRustDIBuilderCreateNameSpace(
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
index ec42bd4a039..a3d6882940a 100644
--- a/compiler/rustc_codegen_llvm/src/declare.rs
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -51,17 +51,32 @@ fn declare_raw_fn(
     llfn
 }
 
-impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> {
-    fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value {
+impl CodegenCx<'ll, 'tcx> {
+    /// Declare a global value.
+    ///
+    /// If there’s a value with the same name already declared, the function will
+    /// return its Value instead.
+    pub fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value {
         debug!("declare_global(name={:?})", name);
         unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_ptr().cast(), name.len(), ty) }
     }
 
-    fn declare_cfn(&self, name: &str, fn_type: &'ll Type) -> &'ll Value {
+    /// Declare a C ABI function.
+    ///
+    /// Only use this for foreign function ABIs and glue. For Rust functions use
+    /// `declare_fn` instead.
+    ///
+    /// If there’s a value with the same name already declared, the function will
+    /// update the declaration and return existing Value instead.
+    pub fn declare_cfn(&self, name: &str, fn_type: &'ll Type) -> &'ll Value {
         declare_raw_fn(self, name, llvm::CCallConv, fn_type)
     }
 
-    fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Value {
+    /// Declare a Rust function.
+    ///
+    /// If there’s a value with the same name already declared, the function will
+    /// update the declaration and return existing Value instead.
+    pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Value {
         debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi);
 
         let llfn = declare_raw_fn(self, name, fn_abi.llvm_cconv(), fn_abi.llvm_type(self));
@@ -69,7 +84,13 @@ impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         llfn
     }
 
-    fn define_global(&self, name: &str, ty: &'ll Type) -> Option<&'ll Value> {
+    /// Declare a global with an intention to define it.
+    ///
+    /// Use this function when you intend to define a global. This function will
+    /// return `None` if the name already has a definition associated with it. In that
+    /// case an error should be reported to the user, because it usually happens due
+    /// to user’s fault (e.g., misuse of `#[no_mangle]` or `#[export_name]` attributes).
+    pub fn define_global(&self, name: &str, ty: &'ll Type) -> Option<&'ll Value> {
         if self.get_defined_value(name).is_some() {
             None
         } else {
@@ -77,16 +98,22 @@ impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         }
     }
 
-    fn define_private_global(&self, ty: &'ll Type) -> &'ll Value {
+    /// Declare a private global
+    ///
+    /// Use this function when you intend to define a global without a name.
+    pub fn define_private_global(&self, ty: &'ll Type) -> &'ll Value {
         unsafe { llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) }
     }
 
-    fn get_declared_value(&self, name: &str) -> Option<&'ll Value> {
+    /// Gets declared value by name.
+    pub fn get_declared_value(&self, name: &str) -> Option<&'ll Value> {
         debug!("get_declared_value(name={:?})", name);
         unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_ptr().cast(), name.len()) }
     }
 
-    fn get_defined_value(&self, name: &str) -> Option<&'ll Value> {
+    /// Gets defined or externally defined (AvailableExternally linkage) value by
+    /// name.
+    pub fn get_defined_value(&self, name: &str) -> Option<&'ll Value> {
         self.get_declared_value(name).and_then(|val| {
             let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 };
             if !declaration { Some(val) } else { None }
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 2208ceca00c..e76e86f5651 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -7,15 +7,12 @@ use crate::type_of::LayoutLlvmExt;
 use crate::va_arg::emit_va_arg;
 use crate::value::Value;
 
-use rustc_ast as ast;
 use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
 use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
-use rustc_codegen_ssa::glue;
-use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::operand::OperandRef;
 use rustc_codegen_ssa::mir::place::PlaceRef;
 use rustc_codegen_ssa::traits::*;
-use rustc_codegen_ssa::MemFlags;
 use rustc_hir as hir;
 use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
 use rustc_middle::ty::{self, Ty};
@@ -71,8 +68,6 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<&'ll Va
         sym::nearbyintf64 => "llvm.nearbyint.f64",
         sym::roundf32 => "llvm.round.f32",
         sym::roundf64 => "llvm.round.f64",
-        sym::assume => "llvm.assume",
-        sym::abort => "llvm.trap",
         _ => return None,
     };
     Some(cx.get_intrinsic(&llvm_name))
@@ -112,9 +107,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
                 None,
             ),
-            sym::unreachable => {
-                return;
-            }
             sym::likely => {
                 let expect = self.get_intrinsic(&("llvm.expect.i1"));
                 self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
@@ -137,8 +129,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                 let llfn = self.get_intrinsic(&("llvm.debugtrap"));
                 self.call(llfn, &[], None)
             }
-            sym::va_start => self.va_start(args[0].immediate()),
-            sym::va_end => self.va_end(args[0].immediate()),
             sym::va_copy => {
                 let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
                 self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)
@@ -169,123 +159,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                     _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
                 }
             }
-            sym::size_of_val => {
-                let tp_ty = substs.type_at(0);
-                if let OperandValue::Pair(_, meta) = args[0].val {
-                    let (llsize, _) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
-                    llsize
-                } else {
-                    self.const_usize(self.size_of(tp_ty).bytes())
-                }
-            }
-            sym::min_align_of_val => {
-                let tp_ty = substs.type_at(0);
-                if let OperandValue::Pair(_, meta) = args[0].val {
-                    let (_, llalign) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
-                    llalign
-                } else {
-                    self.const_usize(self.align_of(tp_ty).bytes())
-                }
-            }
-            sym::size_of
-            | sym::pref_align_of
-            | sym::min_align_of
-            | sym::needs_drop
-            | sym::type_id
-            | sym::type_name
-            | sym::variant_count => {
-                let value = self
-                    .tcx
-                    .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
-                    .unwrap();
-                OperandRef::from_const(self, value, ret_ty).immediate_or_packed_pair(self)
-            }
-            // Effectively no-op
-            sym::forget => {
-                return;
-            }
-            sym::offset => {
-                let ptr = args[0].immediate();
-                let offset = args[1].immediate();
-                self.inbounds_gep(ptr, &[offset])
-            }
-            sym::arith_offset => {
-                let ptr = args[0].immediate();
-                let offset = args[1].immediate();
-                self.gep(ptr, &[offset])
-            }
-
-            sym::copy_nonoverlapping => {
-                copy_intrinsic(
-                    self,
-                    false,
-                    false,
-                    substs.type_at(0),
-                    args[1].immediate(),
-                    args[0].immediate(),
-                    args[2].immediate(),
-                );
-                return;
-            }
-            sym::copy => {
-                copy_intrinsic(
-                    self,
-                    true,
-                    false,
-                    substs.type_at(0),
-                    args[1].immediate(),
-                    args[0].immediate(),
-                    args[2].immediate(),
-                );
-                return;
-            }
-            sym::write_bytes => {
-                memset_intrinsic(
-                    self,
-                    false,
-                    substs.type_at(0),
-                    args[0].immediate(),
-                    args[1].immediate(),
-                    args[2].immediate(),
-                );
-                return;
-            }
 
-            sym::volatile_copy_nonoverlapping_memory => {
-                copy_intrinsic(
-                    self,
-                    false,
-                    true,
-                    substs.type_at(0),
-                    args[0].immediate(),
-                    args[1].immediate(),
-                    args[2].immediate(),
-                );
-                return;
-            }
-            sym::volatile_copy_memory => {
-                copy_intrinsic(
-                    self,
-                    true,
-                    true,
-                    substs.type_at(0),
-                    args[0].immediate(),
-                    args[1].immediate(),
-                    args[2].immediate(),
-                );
-                return;
-            }
-            sym::volatile_set_memory => {
-                memset_intrinsic(
-                    self,
-                    true,
-                    substs.type_at(0),
-                    args[0].immediate(),
-                    args[1].immediate(),
-                    args[2].immediate(),
-                );
-                return;
-            }
             sym::volatile_load | sym::unaligned_volatile_load => {
                 let tp_ty = substs.type_at(0);
                 let mut ptr = args[0].immediate();
@@ -343,20 +217,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
             | sym::ctpop
             | sym::bswap
             | sym::bitreverse
-            | sym::add_with_overflow
-            | sym::sub_with_overflow
-            | sym::mul_with_overflow
-            | sym::wrapping_add
-            | sym::wrapping_sub
-            | sym::wrapping_mul
-            | sym::unchecked_div
-            | sym::unchecked_rem
-            | sym::unchecked_shl
-            | sym::unchecked_shr
-            | sym::unchecked_add
-            | sym::unchecked_sub
-            | sym::unchecked_mul
-            | sym::exact_div
             | sym::rotate_left
             | sym::rotate_right
             | sym::saturating_add
@@ -396,84 +256,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                             &[args[0].immediate()],
                             None,
                         ),
-                        sym::add_with_overflow
-                        | sym::sub_with_overflow
-                        | sym::mul_with_overflow => {
-                            let intrinsic = format!(
-                                "llvm.{}{}.with.overflow.i{}",
-                                if signed { 's' } else { 'u' },
-                                &name_str[..3],
-                                width
-                            );
-                            let llfn = self.get_intrinsic(&intrinsic);
-
-                            // Convert `i1` to a `bool`, and write it to the out parameter
-                            let pair =
-                                self.call(llfn, &[args[0].immediate(), args[1].immediate()], None);
-                            let val = self.extract_value(pair, 0);
-                            let overflow = self.extract_value(pair, 1);
-                            let overflow = self.zext(overflow, self.type_bool());
-
-                            let dest = result.project_field(self, 0);
-                            self.store(val, dest.llval, dest.align);
-                            let dest = result.project_field(self, 1);
-                            self.store(overflow, dest.llval, dest.align);
-
-                            return;
-                        }
-                        sym::wrapping_add => self.add(args[0].immediate(), args[1].immediate()),
-                        sym::wrapping_sub => self.sub(args[0].immediate(), args[1].immediate()),
-                        sym::wrapping_mul => self.mul(args[0].immediate(), args[1].immediate()),
-                        sym::exact_div => {
-                            if signed {
-                                self.exactsdiv(args[0].immediate(), args[1].immediate())
-                            } else {
-                                self.exactudiv(args[0].immediate(), args[1].immediate())
-                            }
-                        }
-                        sym::unchecked_div => {
-                            if signed {
-                                self.sdiv(args[0].immediate(), args[1].immediate())
-                            } else {
-                                self.udiv(args[0].immediate(), args[1].immediate())
-                            }
-                        }
-                        sym::unchecked_rem => {
-                            if signed {
-                                self.srem(args[0].immediate(), args[1].immediate())
-                            } else {
-                                self.urem(args[0].immediate(), args[1].immediate())
-                            }
-                        }
-                        sym::unchecked_shl => self.shl(args[0].immediate(), args[1].immediate()),
-                        sym::unchecked_shr => {
-                            if signed {
-                                self.ashr(args[0].immediate(), args[1].immediate())
-                            } else {
-                                self.lshr(args[0].immediate(), args[1].immediate())
-                            }
-                        }
-                        sym::unchecked_add => {
-                            if signed {
-                                self.unchecked_sadd(args[0].immediate(), args[1].immediate())
-                            } else {
-                                self.unchecked_uadd(args[0].immediate(), args[1].immediate())
-                            }
-                        }
-                        sym::unchecked_sub => {
-                            if signed {
-                                self.unchecked_ssub(args[0].immediate(), args[1].immediate())
-                            } else {
-                                self.unchecked_usub(args[0].immediate(), args[1].immediate())
-                            }
-                        }
-                        sym::unchecked_mul => {
-                            if signed {
-                                self.unchecked_smul(args[0].immediate(), args[1].immediate())
-                            } else {
-                                self.unchecked_umul(args[0].immediate(), args[1].immediate())
-                            }
-                        }
                         sym::rotate_left | sym::rotate_right => {
                             let is_left = name == sym::rotate_left;
                             let val = args[0].immediate();
@@ -513,75 +295,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                     }
                 }
             }
-            sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
-                match float_type_width(arg_tys[0]) {
-                    Some(_width) => match name {
-                        sym::fadd_fast => self.fadd_fast(args[0].immediate(), args[1].immediate()),
-                        sym::fsub_fast => self.fsub_fast(args[0].immediate(), args[1].immediate()),
-                        sym::fmul_fast => self.fmul_fast(args[0].immediate(), args[1].immediate()),
-                        sym::fdiv_fast => self.fdiv_fast(args[0].immediate(), args[1].immediate()),
-                        sym::frem_fast => self.frem_fast(args[0].immediate(), args[1].immediate()),
-                        _ => bug!(),
-                    },
-                    None => {
-                        span_invalid_monomorphization_error(
-                            tcx.sess,
-                            span,
-                            &format!(
-                                "invalid monomorphization of `{}` intrinsic: \
-                                      expected basic float type, found `{}`",
-                                name, arg_tys[0]
-                            ),
-                        );
-                        return;
-                    }
-                }
-            }
-
-            sym::float_to_int_unchecked => {
-                if float_type_width(arg_tys[0]).is_none() {
-                    span_invalid_monomorphization_error(
-                        tcx.sess,
-                        span,
-                        &format!(
-                            "invalid monomorphization of `float_to_int_unchecked` \
-                                  intrinsic: expected basic float type, \
-                                  found `{}`",
-                            arg_tys[0]
-                        ),
-                    );
-                    return;
-                }
-                let (width, signed) = match int_type_width_signed(ret_ty, self.cx) {
-                    Some(pair) => pair,
-                    None => {
-                        span_invalid_monomorphization_error(
-                            tcx.sess,
-                            span,
-                            &format!(
-                                "invalid monomorphization of `float_to_int_unchecked` \
-                                      intrinsic:  expected basic integer type, \
-                                      found `{}`",
-                                ret_ty
-                            ),
-                        );
-                        return;
-                    }
-                };
-                if signed {
-                    self.fptosi(args[0].immediate(), self.cx.type_ix(width))
-                } else {
-                    self.fptoui(args[0].immediate(), self.cx.type_ix(width))
-                }
-            }
-
-            sym::discriminant_value => {
-                if ret_ty.is_integral() {
-                    args[0].deref(self.cx()).codegen_get_discr(self, ret_ty)
-                } else {
-                    span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
-                }
-            }
 
             _ if name_str.starts_with("simd_") => {
                 match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
@@ -589,174 +302,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                     Err(()) => return,
                 }
             }
-            // This requires that atomic intrinsics follow a specific naming pattern:
-            // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
-            name if name_str.starts_with("atomic_") => {
-                use rustc_codegen_ssa::common::AtomicOrdering::*;
-                use rustc_codegen_ssa::common::{AtomicRmwBinOp, SynchronizationScope};
-
-                let split: Vec<&str> = name_str.split('_').collect();
-
-                let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
-                let (order, failorder) = match split.len() {
-                    2 => (SequentiallyConsistent, SequentiallyConsistent),
-                    3 => match split[2] {
-                        "unordered" => (Unordered, Unordered),
-                        "relaxed" => (Monotonic, Monotonic),
-                        "acq" => (Acquire, Acquire),
-                        "rel" => (Release, Monotonic),
-                        "acqrel" => (AcquireRelease, Acquire),
-                        "failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic),
-                        "failacq" if is_cxchg => (SequentiallyConsistent, Acquire),
-                        _ => self.sess().fatal("unknown ordering in atomic intrinsic"),
-                    },
-                    4 => match (split[2], split[3]) {
-                        ("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic),
-                        ("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic),
-                        _ => self.sess().fatal("unknown ordering in atomic intrinsic"),
-                    },
-                    _ => self.sess().fatal("Atomic intrinsic not in correct format"),
-                };
-
-                let invalid_monomorphization = |ty| {
-                    span_invalid_monomorphization_error(
-                        tcx.sess,
-                        span,
-                        &format!(
-                            "invalid monomorphization of `{}` intrinsic: \
-                                  expected basic integer type, found `{}`",
-                            name, ty
-                        ),
-                    );
-                };
-
-                match split[1] {
-                    "cxchg" | "cxchgweak" => {
-                        let ty = substs.type_at(0);
-                        if int_type_width_signed(ty, self).is_some() {
-                            let weak = split[1] == "cxchgweak";
-                            let pair = self.atomic_cmpxchg(
-                                args[0].immediate(),
-                                args[1].immediate(),
-                                args[2].immediate(),
-                                order,
-                                failorder,
-                                weak,
-                            );
-                            let val = self.extract_value(pair, 0);
-                            let success = self.extract_value(pair, 1);
-                            let success = self.zext(success, self.type_bool());
-
-                            let dest = result.project_field(self, 0);
-                            self.store(val, dest.llval, dest.align);
-                            let dest = result.project_field(self, 1);
-                            self.store(success, dest.llval, dest.align);
-                            return;
-                        } else {
-                            return invalid_monomorphization(ty);
-                        }
-                    }
-
-                    "load" => {
-                        let ty = substs.type_at(0);
-                        if int_type_width_signed(ty, self).is_some() {
-                            let size = self.size_of(ty);
-                            self.atomic_load(args[0].immediate(), order, size)
-                        } else {
-                            return invalid_monomorphization(ty);
-                        }
-                    }
-
-                    "store" => {
-                        let ty = substs.type_at(0);
-                        if int_type_width_signed(ty, self).is_some() {
-                            let size = self.size_of(ty);
-                            self.atomic_store(
-                                args[1].immediate(),
-                                args[0].immediate(),
-                                order,
-                                size,
-                            );
-                            return;
-                        } else {
-                            return invalid_monomorphization(ty);
-                        }
-                    }
-
-                    "fence" => {
-                        self.atomic_fence(order, SynchronizationScope::CrossThread);
-                        return;
-                    }
-
-                    "singlethreadfence" => {
-                        self.atomic_fence(order, SynchronizationScope::SingleThread);
-                        return;
-                    }
-
-                    // These are all AtomicRMW ops
-                    op => {
-                        let atom_op = match op {
-                            "xchg" => AtomicRmwBinOp::AtomicXchg,
-                            "xadd" => AtomicRmwBinOp::AtomicAdd,
-                            "xsub" => AtomicRmwBinOp::AtomicSub,
-                            "and" => AtomicRmwBinOp::AtomicAnd,
-                            "nand" => AtomicRmwBinOp::AtomicNand,
-                            "or" => AtomicRmwBinOp::AtomicOr,
-                            "xor" => AtomicRmwBinOp::AtomicXor,
-                            "max" => AtomicRmwBinOp::AtomicMax,
-                            "min" => AtomicRmwBinOp::AtomicMin,
-                            "umax" => AtomicRmwBinOp::AtomicUMax,
-                            "umin" => AtomicRmwBinOp::AtomicUMin,
-                            _ => self.sess().fatal("unknown atomic operation"),
-                        };
-
-                        let ty = substs.type_at(0);
-                        if int_type_width_signed(ty, self).is_some() {
-                            self.atomic_rmw(
-                                atom_op,
-                                args[0].immediate(),
-                                args[1].immediate(),
-                                order,
-                            )
-                        } else {
-                            return invalid_monomorphization(ty);
-                        }
-                    }
-                }
-            }
-
-            sym::nontemporal_store => {
-                let dst = args[0].deref(self.cx());
-                args[1].val.nontemporal_store(self, dst);
-                return;
-            }
-
-            sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
-                let a = args[0].immediate();
-                let b = args[1].immediate();
-                if name == sym::ptr_guaranteed_eq {
-                    self.icmp(IntPredicate::IntEQ, a, b)
-                } else {
-                    self.icmp(IntPredicate::IntNE, a, b)
-                }
-            }
-
-            sym::ptr_offset_from => {
-                let ty = substs.type_at(0);
-                let pointee_size = self.size_of(ty);
-
-                // This is the same sequence that Clang emits for pointer subtraction.
-                // It can be neither `nsw` nor `nuw` because the input is treated as
-                // unsigned but then the output is treated as signed, so neither works.
-                let a = args[0].immediate();
-                let b = args[1].immediate();
-                let a = self.ptrtoint(a, self.type_isize());
-                let b = self.ptrtoint(b, self.type_isize());
-                let d = self.sub(a, b);
-                let pointee_size = self.const_usize(pointee_size.bytes());
-                // this is where the signed magic happens (notice the `s` in `exactsdiv`)
-                self.exactsdiv(d, pointee_size)
-            }
 
             _ => bug!("unknown intrinsic '{}'", name),
         };
@@ -807,39 +352,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
     }
 }
 
-fn copy_intrinsic(
-    bx: &mut Builder<'a, 'll, 'tcx>,
-    allow_overlap: bool,
-    volatile: bool,
-    ty: Ty<'tcx>,
-    dst: &'ll Value,
-    src: &'ll Value,
-    count: &'ll Value,
-) {
-    let (size, align) = bx.size_and_align_of(ty);
-    let size = bx.mul(bx.const_usize(size.bytes()), count);
-    let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
-    if allow_overlap {
-        bx.memmove(dst, align, src, align, size, flags);
-    } else {
-        bx.memcpy(dst, align, src, align, size, flags);
-    }
-}
-
-fn memset_intrinsic(
-    bx: &mut Builder<'a, 'll, 'tcx>,
-    volatile: bool,
-    ty: Ty<'tcx>,
-    dst: &'ll Value,
-    val: &'ll Value,
-    count: &'ll Value,
-) {
-    let (size, align) = bx.size_and_align_of(ty);
-    let size = bx.mul(bx.const_usize(size.bytes()), count);
-    let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
-    bx.memset(dst, val, size, align, flags);
-}
-
 fn try_intrinsic(
     bx: &mut Builder<'a, 'll, 'tcx>,
     try_func: &'ll Value,
@@ -1281,14 +793,18 @@ fn generic_simd_intrinsic(
         require_simd!(arg_tys[1], "argument");
         let v_len = arg_tys[1].simd_size(tcx);
         require!(
-            m_len == v_len,
+            // Allow masks for vectors with fewer than 8 elements to be
+            // represented with a u8 or i8.
+            m_len == v_len || (m_len == 8 && v_len < 8),
             "mismatched lengths: mask length `{}` != other vector length `{}`",
             m_len,
             v_len
         );
         let i1 = bx.type_i1();
-        let i1xn = bx.type_vector(i1, m_len);
-        let m_i1s = bx.bitcast(args[0].immediate(), i1xn);
+        let im = bx.type_ix(v_len);
+        let i1xn = bx.type_vector(i1, v_len);
+        let m_im = bx.trunc(args[0].immediate(), im);
+        let m_i1s = bx.bitcast(m_im, i1xn);
         return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
     }
 
@@ -2205,37 +1721,12 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
 // stuffs.
 fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
     match ty.kind() {
-        ty::Int(t) => Some((
-            match t {
-                ast::IntTy::Isize => u64::from(cx.tcx.sess.target.ptr_width),
-                ast::IntTy::I8 => 8,
-                ast::IntTy::I16 => 16,
-                ast::IntTy::I32 => 32,
-                ast::IntTy::I64 => 64,
-                ast::IntTy::I128 => 128,
-            },
-            true,
-        )),
-        ty::Uint(t) => Some((
-            match t {
-                ast::UintTy::Usize => u64::from(cx.tcx.sess.target.ptr_width),
-                ast::UintTy::U8 => 8,
-                ast::UintTy::U16 => 16,
-                ast::UintTy::U32 => 32,
-                ast::UintTy::U64 => 64,
-                ast::UintTy::U128 => 128,
-            },
-            false,
-        )),
-        _ => None,
-    }
-}
-
-// Returns the width of a float Ty
-// Returns None if the type is not a float
-fn float_type_width(ty: Ty<'_>) -> Option<u64> {
-    match ty.kind() {
-        ty::Float(t) => Some(t.bit_width()),
+        ty::Int(t) => {
+            Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.ptr_width)), true))
+        }
+        ty::Uint(t) => {
+            Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.ptr_width)), false))
+        }
         _ => None,
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index 28e49e823c0..b5f83118d0c 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -4,7 +4,7 @@
 //!
 //! This API is completely unstable and subject to change.
 
-#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
 #![feature(bool_to_option)]
 #![feature(const_cstr_unchecked)]
 #![feature(crate_visibility_modifier)]
@@ -12,7 +12,6 @@
 #![feature(in_band_lifetimes)]
 #![feature(nll)]
 #![feature(or_patterns)]
-#![feature(trusted_len)]
 #![recursion_limit = "256"]
 
 use back::write::{create_informational_target_machine, create_target_machine};
@@ -96,8 +95,9 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
         tcx: TyCtxt<'tcx>,
         mods: &mut ModuleLlvm,
         kind: AllocatorKind,
+        has_alloc_error_handler: bool,
     ) {
-        unsafe { allocator::codegen(tcx, mods, kind) }
+        unsafe { allocator::codegen(tcx, mods, kind, has_alloc_error_handler) }
     }
     fn compile_codegen_unit(
         &self,
diff --git a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
index 47f5c94e70c..ccd3e42e458 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
@@ -118,6 +118,7 @@ pub enum Diagnostic<'ll> {
     InlineAsm(InlineAsmDiagnostic<'ll>),
     PGO(&'ll DiagnosticInfo),
     Linker(&'ll DiagnosticInfo),
+    Unsupported(&'ll DiagnosticInfo),
 
     /// LLVM has other types that we do not wrap here.
     UnknownDiagnostic(&'ll DiagnosticInfo),
@@ -159,6 +160,7 @@ impl Diagnostic<'ll> {
 
             Dk::PGOProfile => PGO(di),
             Dk::Linker => Linker(di),
+            Dk::Unsupported => Unsupported(di),
 
             _ => UnknownDiagnostic(di),
         }
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 4942c997682..4c1fee0106a 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -483,6 +483,7 @@ pub enum DiagnosticKind {
     OptimizationFailure,
     PGOProfile,
     Linker,
+    Unsupported,
 }
 
 /// LLVMRustDiagnosticLevel
@@ -948,7 +949,6 @@ extern "C" {
 
     // Operations on other types
     pub fn LLVMVoidTypeInContext(C: &Context) -> &Type;
-    pub fn LLVMX86MMXTypeInContext(C: &Context) -> &Type;
     pub fn LLVMRustMetadataTypeInContext(C: &Context) -> &Type;
 
     // Operations on all values
@@ -2362,4 +2362,10 @@ extern "C" {
         bytecode_len: usize,
     ) -> bool;
     pub fn LLVMRustLinkerFree(linker: &'a mut Linker<'a>);
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustComputeLTOCacheKey(
+        key_out: &RustString,
+        mod_id: *const c_char,
+        data: &ThinLTOData,
+    );
 }
diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
index c09e3659f80..ed9b99188bb 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
@@ -37,6 +37,12 @@ pub fn AddFunctionAttrStringValue(llfn: &'a Value, idx: AttributePlace, attr: &C
     }
 }
 
+pub fn AddFunctionAttrString(llfn: &'a Value, idx: AttributePlace, attr: &CStr) {
+    unsafe {
+        LLVMRustAddFunctionAttrStringValue(llfn, idx.as_uint(), attr.as_ptr(), std::ptr::null())
+    }
+}
+
 #[derive(Copy, Clone)]
 pub enum AttributePlace {
     ReturnValue,
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 7fe30fc8ac0..a4605f46309 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -1,12 +1,12 @@
 use crate::back::write::create_informational_target_machine;
 use crate::llvm;
 use libc::c_int;
+use rustc_codegen_ssa::target_features::supported_target_features;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_feature::UnstableFeatures;
 use rustc_middle::bug;
 use rustc_session::config::PrintRequest;
 use rustc_session::Session;
-use rustc_span::symbol::sym;
 use rustc_span::symbol::Symbol;
 use rustc_target::spec::{MergeFunctions, PanicStrategy};
 use std::ffi::CString;
@@ -139,141 +139,6 @@ pub fn time_trace_profiler_finish(file_name: &str) {
 // WARNING: the features after applying `to_llvm_feature` must be known
 // to LLVM or the feature detection code will walk past the end of the feature
 // array, leading to crashes.
-
-const ARM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("aclass", Some(sym::arm_target_feature)),
-    ("mclass", Some(sym::arm_target_feature)),
-    ("rclass", Some(sym::arm_target_feature)),
-    ("dsp", Some(sym::arm_target_feature)),
-    ("neon", Some(sym::arm_target_feature)),
-    ("crc", Some(sym::arm_target_feature)),
-    ("crypto", Some(sym::arm_target_feature)),
-    ("v5te", Some(sym::arm_target_feature)),
-    ("v6", Some(sym::arm_target_feature)),
-    ("v6k", Some(sym::arm_target_feature)),
-    ("v6t2", Some(sym::arm_target_feature)),
-    ("v7", Some(sym::arm_target_feature)),
-    ("v8", Some(sym::arm_target_feature)),
-    ("vfp2", Some(sym::arm_target_feature)),
-    ("vfp3", Some(sym::arm_target_feature)),
-    ("vfp4", Some(sym::arm_target_feature)),
-    // This is needed for inline assembly, but shouldn't be stabilized as-is
-    // since it should be enabled per-function using #[instruction_set], not
-    // #[target_feature].
-    ("thumb-mode", Some(sym::arm_target_feature)),
-];
-
-const AARCH64_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("fp", Some(sym::aarch64_target_feature)),
-    ("neon", Some(sym::aarch64_target_feature)),
-    ("sve", Some(sym::aarch64_target_feature)),
-    ("crc", Some(sym::aarch64_target_feature)),
-    ("crypto", Some(sym::aarch64_target_feature)),
-    ("ras", Some(sym::aarch64_target_feature)),
-    ("lse", Some(sym::aarch64_target_feature)),
-    ("rdm", Some(sym::aarch64_target_feature)),
-    ("fp16", Some(sym::aarch64_target_feature)),
-    ("rcpc", Some(sym::aarch64_target_feature)),
-    ("dotprod", Some(sym::aarch64_target_feature)),
-    ("tme", Some(sym::aarch64_target_feature)),
-    ("v8.1a", Some(sym::aarch64_target_feature)),
-    ("v8.2a", Some(sym::aarch64_target_feature)),
-    ("v8.3a", Some(sym::aarch64_target_feature)),
-];
-
-const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("adx", Some(sym::adx_target_feature)),
-    ("aes", None),
-    ("avx", None),
-    ("avx2", None),
-    ("avx512bw", Some(sym::avx512_target_feature)),
-    ("avx512cd", Some(sym::avx512_target_feature)),
-    ("avx512dq", Some(sym::avx512_target_feature)),
-    ("avx512er", Some(sym::avx512_target_feature)),
-    ("avx512f", Some(sym::avx512_target_feature)),
-    ("avx512ifma", Some(sym::avx512_target_feature)),
-    ("avx512pf", Some(sym::avx512_target_feature)),
-    ("avx512vbmi", Some(sym::avx512_target_feature)),
-    ("avx512vl", Some(sym::avx512_target_feature)),
-    ("avx512vpopcntdq", Some(sym::avx512_target_feature)),
-    ("bmi1", None),
-    ("bmi2", None),
-    ("cmpxchg16b", Some(sym::cmpxchg16b_target_feature)),
-    ("f16c", Some(sym::f16c_target_feature)),
-    ("fma", None),
-    ("fxsr", None),
-    ("lzcnt", None),
-    ("mmx", Some(sym::mmx_target_feature)),
-    ("movbe", Some(sym::movbe_target_feature)),
-    ("pclmulqdq", None),
-    ("popcnt", None),
-    ("rdrand", None),
-    ("rdseed", None),
-    ("rtm", Some(sym::rtm_target_feature)),
-    ("sha", None),
-    ("sse", None),
-    ("sse2", None),
-    ("sse3", None),
-    ("sse4.1", None),
-    ("sse4.2", None),
-    ("sse4a", Some(sym::sse4a_target_feature)),
-    ("ssse3", None),
-    ("tbm", Some(sym::tbm_target_feature)),
-    ("xsave", None),
-    ("xsavec", None),
-    ("xsaveopt", None),
-    ("xsaves", None),
-];
-
-const HEXAGON_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("hvx", Some(sym::hexagon_target_feature)),
-    ("hvx-length128b", Some(sym::hexagon_target_feature)),
-];
-
-const POWERPC_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("altivec", Some(sym::powerpc_target_feature)),
-    ("power8-altivec", Some(sym::powerpc_target_feature)),
-    ("power9-altivec", Some(sym::powerpc_target_feature)),
-    ("power8-vector", Some(sym::powerpc_target_feature)),
-    ("power9-vector", Some(sym::powerpc_target_feature)),
-    ("vsx", Some(sym::powerpc_target_feature)),
-];
-
-const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] =
-    &[("fp64", Some(sym::mips_target_feature)), ("msa", Some(sym::mips_target_feature))];
-
-const RISCV_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("m", Some(sym::riscv_target_feature)),
-    ("a", Some(sym::riscv_target_feature)),
-    ("c", Some(sym::riscv_target_feature)),
-    ("f", Some(sym::riscv_target_feature)),
-    ("d", Some(sym::riscv_target_feature)),
-    ("e", Some(sym::riscv_target_feature)),
-];
-
-const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
-    ("simd128", Some(sym::wasm_target_feature)),
-    ("atomics", Some(sym::wasm_target_feature)),
-    ("nontrapping-fptoint", Some(sym::wasm_target_feature)),
-];
-
-/// When rustdoc is running, provide a list of all known features so that all their respective
-/// primitives may be documented.
-///
-/// IMPORTANT: If you're adding another feature list above, make sure to add it to this iterator!
-pub fn all_known_features() -> impl Iterator<Item = (&'static str, Option<Symbol>)> {
-    std::iter::empty()
-        .chain(ARM_ALLOWED_FEATURES.iter())
-        .chain(AARCH64_ALLOWED_FEATURES.iter())
-        .chain(X86_ALLOWED_FEATURES.iter())
-        .chain(HEXAGON_ALLOWED_FEATURES.iter())
-        .chain(POWERPC_ALLOWED_FEATURES.iter())
-        .chain(MIPS_ALLOWED_FEATURES.iter())
-        .chain(RISCV_ALLOWED_FEATURES.iter())
-        .chain(WASM_ALLOWED_FEATURES.iter())
-        .cloned()
-}
-
 pub fn to_llvm_feature<'a>(sess: &Session, s: &'a str) -> &'a str {
     let arch = if sess.target.target.arch == "x86_64" { "x86" } else { &*sess.target.target.arch };
     match (arch, s) {
@@ -307,20 +172,6 @@ pub fn target_features(sess: &Session) -> Vec<Symbol> {
         .collect()
 }
 
-pub fn supported_target_features(sess: &Session) -> &'static [(&'static str, Option<Symbol>)] {
-    match &*sess.target.target.arch {
-        "arm" => ARM_ALLOWED_FEATURES,
-        "aarch64" => AARCH64_ALLOWED_FEATURES,
-        "x86" | "x86_64" => X86_ALLOWED_FEATURES,
-        "hexagon" => HEXAGON_ALLOWED_FEATURES,
-        "mips" | "mips64" => MIPS_ALLOWED_FEATURES,
-        "powerpc" | "powerpc64" => POWERPC_ALLOWED_FEATURES,
-        "riscv32" | "riscv64" => RISCV_ALLOWED_FEATURES,
-        "wasm32" => WASM_ALLOWED_FEATURES,
-        _ => &[],
-    }
-}
-
 pub fn print_version() {
     // Can be called without initializing LLVM
     unsafe {
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
index 3b53b4fe77b..a43724fd495 100644
--- a/compiler/rustc_codegen_llvm/src/type_.rs
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -62,10 +62,6 @@ impl CodegenCx<'ll, 'tcx> {
         unsafe { llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) }
     }
 
-    crate fn type_x86_mmx(&self) -> &'ll Type {
-        unsafe { llvm::LLVMX86MMXTypeInContext(self.llcx) }
-    }
-
     crate fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type {
         unsafe { llvm::LLVMVectorType(ty, len as c_uint) }
     }
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 12901de6048..e0754d21df1 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -21,23 +21,8 @@ fn uncached_llvm_type<'a, 'tcx>(
     match layout.abi {
         Abi::Scalar(_) => bug!("handled elsewhere"),
         Abi::Vector { ref element, count } => {
-            // LLVM has a separate type for 64-bit SIMD vectors on X86 called
-            // `x86_mmx` which is needed for some SIMD operations. As a bit of a
-            // hack (all SIMD definitions are super unstable anyway) we
-            // recognize any one-element SIMD vector as "this should be an
-            // x86_mmx" type. In general there shouldn't be a need for other
-            // one-element SIMD vectors, so it's assumed this won't clash with
-            // much else.
-            let use_x86_mmx = count == 1
-                && layout.size.bits() == 64
-                && (cx.sess().target.target.arch == "x86"
-                    || cx.sess().target.target.arch == "x86_64");
-            if use_x86_mmx {
-                return cx.type_x86_mmx();
-            } else {
-                let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
-                return cx.type_vector(element, count);
-            }
+            let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
+            return cx.type_vector(element, count);
         }
         Abi::ScalarPair(..) => {
             return cx.type_struct(
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index 54efa05aee8..22ed4dd7576 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -11,7 +11,6 @@ use rustc_middle::ty::layout::HasTyCtxt;
 use rustc_middle::ty::Ty;
 use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size};
 
-#[allow(dead_code)]
 fn round_pointer_up_to_alignment(
     bx: &mut Builder<'a, 'll, 'tcx>,
     addr: &'ll Value,