about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs12
-rw-r--r--compiler/rustc_codegen_llvm/src/allocator.rs14
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs17
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs70
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs186
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs73
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs108
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs65
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs18
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs50
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs73
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs62
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs85
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs3
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/utils.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/declare.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/errors.rs157
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs582
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs28
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs38
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs29
-rw-r--r--compiler/rustc_codegen_llvm/src/mono_item.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs15
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs37
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs84
31 files changed, 1050 insertions, 801 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index a6fd2a7de6b..28be6d033f8 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -34,13 +34,6 @@ pub trait ArgAttributesExt {
     );
 }
 
-fn should_use_mutable_noalias(cx: &CodegenCx<'_, '_>) -> bool {
-    // LLVM prior to version 12 had known miscompiles in the presence of
-    // noalias attributes (see #54878), but we don't support earlier
-    // versions at all anymore. We now enable mutable noalias by default.
-    cx.tcx.sess.opts.unstable_opts.mutable_noalias.unwrap_or(true)
-}
-
 const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] =
     [(ArgAttribute::InReg, llvm::AttributeKind::InReg)];
 
@@ -88,9 +81,6 @@ fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'
                 attrs.push(llattr.create_attr(cx.llcx));
             }
         }
-        if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
-            attrs.push(llvm::AttributeKind::NoAlias.create_attr(cx.llcx));
-        }
     } else if cx.tcx.sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::MEMORY) {
         // If we're not optimising, *but* memory sanitizer is on, emit noundef, since it affects
         // memory sanitizer's behavior.
@@ -231,7 +221,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
                 bx.store(val, cast_dst, self.layout.align.abi);
             } else {
                 // The actual return type is a struct, but the ABI
-                // adaptation code has cast it into some scalar type.  The
+                // adaptation code has cast it into some scalar type. The
                 // code that follows is the only reliable way I have
                 // found to do a transform like i64 -> {i32,i32}.
                 // Basically we dump the data onto the stack then memcpy it.
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
index fed56cdd438..668d9292705 100644
--- a/compiler/rustc_codegen_llvm/src/allocator.rs
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -88,7 +88,8 @@ pub(crate) unsafe fn codegen(
             callee,
             args.as_ptr(),
             args.len() as c_uint,
-            None,
+            [].as_ptr(),
+            0 as c_uint,
         );
         llvm::LLVMSetTailCall(ret, True);
         if output.is_some() {
@@ -132,8 +133,15 @@ pub(crate) unsafe fn codegen(
         .enumerate()
         .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
         .collect::<Vec<_>>();
-    let ret =
-        llvm::LLVMRustBuildCall(llbuilder, ty, callee, args.as_ptr(), args.len() as c_uint, None);
+    let ret = llvm::LLVMRustBuildCall(
+        llbuilder,
+        ty,
+        callee,
+        args.as_ptr(),
+        args.len() as c_uint,
+        [].as_ptr(),
+        0 as c_uint,
+    );
     llvm::LLVMSetTailCall(ret, True);
     llvm::LLVMBuildRetVoid(llbuilder);
     llvm::LLVMDisposeBuilder(llbuilder);
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 219a4f8fa89..d9f8170a3cf 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -144,7 +144,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     // We prefer the latter because it matches the behavior of
                     // Clang.
                     if late && matches!(reg, InlineAsmRegOrRegClass::Reg(_)) {
-                        constraints.push(format!("{}", reg_to_llvm(reg, Some(&in_value.layout))));
+                        constraints.push(reg_to_llvm(reg, Some(&in_value.layout)).to_string());
                     } else {
                         constraints.push(format!("{}", op_idx[&idx]));
                     }
@@ -445,7 +445,7 @@ pub(crate) fn inline_asm_call<'ll>(
             };
 
             // Store mark in a metadata node so we can map LLVM errors
-            // back to source locations.  See #17552.
+            // back to source locations. See #17552.
             let key = "srcloc";
             let kind = llvm::LLVMGetMDKindIDInContext(
                 bx.llcx,
@@ -849,6 +849,7 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'
 /// Helper function to get the LLVM type for a Scalar. Pointers are returned as
 /// the equivalent integer type.
 fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
+    let dl = &cx.tcx.data_layout;
     match scalar.primitive() {
         Primitive::Int(Integer::I8, _) => cx.type_i8(),
         Primitive::Int(Integer::I16, _) => cx.type_i16(),
@@ -856,7 +857,8 @@ fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Ty
         Primitive::Int(Integer::I64, _) => cx.type_i64(),
         Primitive::F32 => cx.type_f32(),
         Primitive::F64 => cx.type_f64(),
-        Primitive::Pointer => cx.type_isize(),
+        // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
+        Primitive::Pointer(_) => cx.type_from_integer(dl.ptr_sized_integer()),
         _ => unreachable!(),
     }
 }
@@ -868,6 +870,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
     reg: InlineAsmRegClass,
     layout: &TyAndLayout<'tcx>,
 ) -> &'ll Value {
+    let dl = &bx.tcx.data_layout;
     match (reg, layout.abi) {
         (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.primitive() {
@@ -881,8 +884,10 @@ fn llvm_fixup_input<'ll, 'tcx>(
             let elem_ty = llvm_asm_scalar_type(bx.cx, s);
             let count = 16 / layout.size.bytes();
             let vec_ty = bx.cx.type_vector(elem_ty, count);
-            if let Primitive::Pointer = s.primitive() {
-                value = bx.ptrtoint(value, bx.cx.type_isize());
+            // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
+            if let Primitive::Pointer(_) = s.primitive() {
+                let t = bx.type_from_integer(dl.ptr_sized_integer());
+                value = bx.ptrtoint(value, t);
             }
             bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
         }
@@ -958,7 +963,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
         }
         (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
             value = bx.extract_element(value, bx.const_i32(0));
-            if let Primitive::Pointer = s.primitive() {
+            if let Primitive::Pointer(_) = s.primitive() {
                 value = bx.inttoptr(value, layout.llvm_type(bx.cx));
             }
             value
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index a8b47633519..651d644ebb6 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -62,7 +62,7 @@ pub fn sanitize_attrs<'ll>(
 ) -> SmallVec<[&'ll Attribute; 4]> {
     let mut attrs = SmallVec::new();
     let enabled = cx.tcx.sess.opts.unstable_opts.sanitizer - no_sanitize;
-    if enabled.contains(SanitizerSet::ADDRESS) {
+    if enabled.contains(SanitizerSet::ADDRESS) || enabled.contains(SanitizerSet::KERNELADDRESS) {
         attrs.push(llvm::AttributeKind::SanitizeAddress.create_attr(cx.llcx));
     }
     if enabled.contains(SanitizerSet::MEMORY) {
@@ -102,10 +102,10 @@ pub fn uwtable_attr(llcx: &llvm::Context) -> &Attribute {
 
 pub fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
     let mut fp = cx.sess().target.frame_pointer;
+    let opts = &cx.sess().opts;
     // "mcount" function relies on stack pointer.
     // See <https://sourceware.org/binutils/docs/gprof/Implementation.html>.
-    if cx.sess().instrument_mcount() || matches!(cx.sess().opts.cg.force_frame_pointers, Some(true))
-    {
+    if opts.unstable_opts.instrument_mcount || matches!(opts.cg.force_frame_pointers, Some(true)) {
         fp = FramePointer::Always;
     }
     let attr_value = match fp {
@@ -118,8 +118,9 @@ pub fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attr
 
 /// Tell LLVM what instrument function to insert.
 #[inline]
-fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
-    if cx.sess().instrument_mcount() {
+fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 4]> {
+    let mut attrs = SmallVec::new();
+    if cx.sess().opts.unstable_opts.instrument_mcount {
         // Similar to `clang -pg` behavior. Handled by the
         // `post-inline-ee-instrument` LLVM pass.
 
@@ -127,14 +128,49 @@ fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribu
         // See test/CodeGen/mcount.c in clang.
         let mcount_name = cx.sess().target.mcount.as_ref();
 
-        Some(llvm::CreateAttrStringValue(
+        attrs.push(llvm::CreateAttrStringValue(
             cx.llcx,
             "instrument-function-entry-inlined",
             &mcount_name,
-        ))
-    } else {
-        None
+        ));
+    }
+    if let Some(options) = &cx.sess().opts.unstable_opts.instrument_xray {
+        // XRay instrumentation is similar to __cyg_profile_func_{enter,exit}.
+        // Function prologue and epilogue are instrumented with NOP sleds,
+        // a runtime library later replaces them with detours into tracing code.
+        if options.always {
+            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-always"));
+        }
+        if options.never {
+            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-never"));
+        }
+        if options.ignore_loops {
+            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-ignore-loops"));
+        }
+        // LLVM will not choose the default for us, but rather requires specific
+        // threshold in absence of "xray-always". Use the same default as Clang.
+        let threshold = options.instruction_threshold.unwrap_or(200);
+        attrs.push(llvm::CreateAttrStringValue(
+            cx.llcx,
+            "xray-instruction-threshold",
+            &threshold.to_string(),
+        ));
+        if options.skip_entry {
+            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-entry"));
+        }
+        if options.skip_exit {
+            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-exit"));
+        }
     }
+    attrs
+}
+
+fn nojumptables_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+    if !cx.sess().opts.unstable_opts.no_jump_tables {
+        return None;
+    }
+
+    Some(llvm::CreateAttrStringValue(cx.llcx, "no-jump-tables", "true"))
 }
 
 fn probestack_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
@@ -258,13 +294,12 @@ pub fn from_fn_attrs<'ll, 'tcx>(
         OptimizeAttr::Speed => {}
     }
 
-    let inline = if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
-        InlineAttr::Never
-    } else if codegen_fn_attrs.inline == InlineAttr::None && instance.def.requires_inline(cx.tcx) {
-        InlineAttr::Hint
-    } else {
-        codegen_fn_attrs.inline
-    };
+    let inline =
+        if codegen_fn_attrs.inline == InlineAttr::None && instance.def.requires_inline(cx.tcx) {
+            InlineAttr::Hint
+        } else {
+            codegen_fn_attrs.inline
+        };
     to_add.extend(inline_attr(cx, inline));
 
     // The `uwtable` attribute according to LLVM is:
@@ -294,6 +329,7 @@ pub fn from_fn_attrs<'ll, 'tcx>(
     // FIXME: none of these three functions interact with source level attributes.
     to_add.extend(frame_pointer_type_attr(cx));
     to_add.extend(instrument_function_attr(cx));
+    to_add.extend(nojumptables_attr(cx));
     to_add.extend(probestack_attr(cx));
     to_add.extend(stackprotector_attr(cx));
 
@@ -433,7 +469,7 @@ pub fn from_fn_attrs<'ll, 'tcx>(
         // the WebAssembly specification, which has this feature. This won't be
         // needed when LLVM enables this `multivalue` feature by default.
         if !cx.tcx.is_closure(instance.def_id()) {
-            let abi = cx.tcx.fn_sig(instance.def_id()).abi();
+            let abi = cx.tcx.fn_sig(instance.def_id()).skip_binder().abi();
             if abi == Abi::Wasm {
                 function_features.push("+multivalue".to_string());
             }
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index 5c68abeb08b..a570f2af0f0 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -1,31 +1,30 @@
 //! A helper class for dealing with static archives
 
 use std::env;
-use std::ffi::{CStr, CString, OsString};
-use std::fs;
-use std::io::{self, Write};
+use std::ffi::{c_char, c_void, CStr, CString, OsString};
+use std::io;
 use std::mem;
 use std::path::{Path, PathBuf};
 use std::ptr;
 use std::str;
 
-use object::read::macho::FatArch;
-
 use crate::common;
 use crate::errors::{
-    ArchiveBuildFailure, DlltoolFailImportLibrary, ErrorCallingDllTool, ErrorCreatingImportLibrary,
-    ErrorWritingDEFFile, UnknownArchiveKind,
+    DlltoolFailImportLibrary, ErrorCallingDllTool, ErrorCreatingImportLibrary, ErrorWritingDEFFile,
 };
 use crate::llvm::archive_ro::{ArchiveRO, Child};
 use crate::llvm::{self, ArchiveKind, LLVMMachineType, LLVMRustCOFFShortExport};
-use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder};
-use rustc_data_structures::memmap::Mmap;
+use rustc_codegen_ssa::back::archive::{
+    get_native_object_symbols, try_extract_macho_fat_archive, ArArchiveBuilder,
+    ArchiveBuildFailure, ArchiveBuilder, ArchiveBuilderBuilder, UnknownArchiveKind,
+};
+
 use rustc_session::cstore::DllImport;
 use rustc_session::Session;
 
 /// Helper for adding many files to an archive.
 #[must_use = "must call build() to finish building the archive"]
-pub struct LlvmArchiveBuilder<'a> {
+pub(crate) struct LlvmArchiveBuilder<'a> {
     sess: &'a Session,
     additions: Vec<Addition>,
 }
@@ -61,57 +60,6 @@ fn llvm_machine_type(cpu: &str) -> LLVMMachineType {
     }
 }
 
-fn try_filter_fat_archs(
-    archs: object::read::Result<&[impl FatArch]>,
-    target_arch: object::Architecture,
-    archive_path: &Path,
-    archive_map_data: &[u8],
-) -> io::Result<Option<PathBuf>> {
-    let archs = archs.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
-
-    let desired = match archs.iter().filter(|a| a.architecture() == target_arch).next() {
-        Some(a) => a,
-        None => return Ok(None),
-    };
-
-    let (mut new_f, extracted_path) = tempfile::Builder::new()
-        .suffix(archive_path.file_name().unwrap())
-        .tempfile()?
-        .keep()
-        .unwrap();
-
-    new_f.write_all(
-        desired.data(archive_map_data).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?,
-    )?;
-
-    Ok(Some(extracted_path))
-}
-
-fn try_extract_macho_fat_archive(
-    sess: &Session,
-    archive_path: &Path,
-) -> io::Result<Option<PathBuf>> {
-    let archive_map = unsafe { Mmap::map(fs::File::open(&archive_path)?)? };
-    let target_arch = match sess.target.arch.as_ref() {
-        "aarch64" => object::Architecture::Aarch64,
-        "x86_64" => object::Architecture::X86_64,
-        _ => return Ok(None),
-    };
-
-    match object::macho::FatHeader::parse(&*archive_map) {
-        Ok(h) if h.magic.get(object::endian::BigEndian) == object::macho::FAT_MAGIC => {
-            let archs = object::macho::FatHeader::parse_arch32(&*archive_map);
-            try_filter_fat_archs(archs, target_arch, archive_path, &*archive_map)
-        }
-        Ok(h) if h.magic.get(object::endian::BigEndian) == object::macho::FAT_MAGIC_64 => {
-            let archs = object::macho::FatHeader::parse_arch64(&*archive_map);
-            try_filter_fat_archs(archs, target_arch, archive_path, &*archive_map)
-        }
-        // Not a FatHeader at all, just return None.
-        _ => Ok(None),
-    }
-}
-
 impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> {
     fn add_archive(
         &mut self,
@@ -160,7 +108,13 @@ pub struct LlvmArchiveBuilderBuilder;
 
 impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
     fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a> {
-        Box::new(LlvmArchiveBuilder { sess, additions: Vec::new() })
+        // FIXME use ArArchiveBuilder on most targets again once reading thin archives is
+        // implemented
+        if true || sess.target.arch == "wasm32" || sess.target.arch == "wasm64" {
+            Box::new(LlvmArchiveBuilder { sess, additions: Vec::new() })
+        } else {
+            Box::new(ArArchiveBuilder::new(sess, get_llvm_object_symbols))
+        }
     }
 
     fn create_dll_import_lib(
@@ -199,7 +153,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
             // The binutils linker used on -windows-gnu targets cannot read the import
             // libraries generated by LLVM: in our attempts, the linker produced an .EXE
             // that loaded but crashed with an AV upon calling one of the imported
-            // functions.  Therefore, use binutils to create the import library instead,
+            // functions. Therefore, use binutils to create the import library instead,
             // by writing a .DEF file to the temp dir and calling binutils's dlltool.
             let def_file_path =
                 tmpdir.join(format!("{}{}", lib_name, name_suffix)).with_extension("def");
@@ -229,6 +183,21 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
             // able to control the *exact* spelling of each of the symbols that are being imported:
             // hence we don't want `dlltool` adding leading underscores automatically.
             let dlltool = find_binutils_dlltool(sess);
+            let temp_prefix = {
+                let mut path = PathBuf::from(&output_path);
+                path.pop();
+                path.push(lib_name);
+                path
+            };
+            // dlltool target architecture args from:
+            // https://github.com/llvm/llvm-project-release-prs/blob/llvmorg-15.0.6/llvm/lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp#L69
+            let (dlltool_target_arch, dlltool_target_bitness) = match sess.target.arch.as_ref() {
+                "x86_64" => ("i386:x86-64", "--64"),
+                "x86" => ("i386", "--32"),
+                "aarch64" => ("arm64", "--64"),
+                "arm" => ("arm", "--32"),
+                _ => panic!("unsupported arch {}", sess.target.arch),
+            };
             let result = std::process::Command::new(dlltool)
                 .args([
                     "-d",
@@ -237,7 +206,13 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
                     lib_name,
                     "-l",
                     output_path.to_str().unwrap(),
+                    "-m",
+                    dlltool_target_arch,
+                    "-f",
+                    dlltool_target_bitness,
                     "--no-leading-underscore",
+                    "--temp-prefix",
+                    temp_prefix.to_str().unwrap(),
                 ])
                 .output();
 
@@ -273,7 +248,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
 
             // All import names are Rust identifiers and therefore cannot contain \0 characters.
             // FIXME: when support for #[link_name] is implemented, ensure that the import names
-            // still don't contain any \0 characters.  Also need to check that the names don't
+            // still don't contain any \0 characters. Also need to check that the names don't
             // contain substrings like " @" or "NONAME" that are keywords or otherwise reserved
             // in definition files.
             let cstring_import_name_and_ordinal_vector: Vec<(CString, Option<u16>)> =
@@ -309,6 +284,61 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
     }
 }
 
+// The object crate doesn't know how to get symbols for LLVM bitcode and COFF bigobj files.
+// As such we need to use LLVM for them.
+#[deny(unsafe_op_in_unsafe_fn)]
+fn get_llvm_object_symbols(
+    buf: &[u8],
+    f: &mut dyn FnMut(&[u8]) -> io::Result<()>,
+) -> io::Result<bool> {
+    let is_bitcode = unsafe { llvm::LLVMRustIsBitcode(buf.as_ptr(), buf.len()) };
+
+    // COFF bigobj file, msvc LTO file or import library. See
+    // https://github.com/llvm/llvm-project/blob/453f27bc9/llvm/lib/BinaryFormat/Magic.cpp#L38-L51
+    let is_unsupported_windows_obj_file = buf.get(0..4) == Some(b"\0\0\xFF\xFF");
+
+    if is_bitcode || is_unsupported_windows_obj_file {
+        let mut state = Box::new(f);
+
+        let err = unsafe {
+            llvm::LLVMRustGetSymbols(
+                buf.as_ptr(),
+                buf.len(),
+                &mut *state as *mut &mut _ as *mut c_void,
+                callback,
+                error_callback,
+            )
+        };
+
+        if err.is_null() {
+            return Ok(true);
+        } else {
+            return Err(unsafe { *Box::from_raw(err as *mut io::Error) });
+        }
+
+        unsafe extern "C" fn callback(
+            state: *mut c_void,
+            symbol_name: *const c_char,
+        ) -> *mut c_void {
+            let f = unsafe { &mut *(state as *mut &mut dyn FnMut(&[u8]) -> io::Result<()>) };
+            match f(unsafe { CStr::from_ptr(symbol_name) }.to_bytes()) {
+                Ok(()) => std::ptr::null_mut(),
+                Err(err) => Box::into_raw(Box::new(err)) as *mut c_void,
+            }
+        }
+
+        unsafe extern "C" fn error_callback(error: *const c_char) -> *mut c_void {
+            let error = unsafe { CStr::from_ptr(error) };
+            Box::into_raw(Box::new(io::Error::new(
+                io::ErrorKind::Other,
+                format!("LLVM error: {}", error.to_string_lossy()),
+            ))) as *mut c_void
+        }
+    } else {
+        get_native_object_symbols(buf, f)
+    }
+}
+
 impl<'a> LlvmArchiveBuilder<'a> {
     fn build_with_llvm(&mut self, output: &Path) -> io::Result<bool> {
         let kind = &*self.sess.target.archive_format;
@@ -405,24 +435,22 @@ fn find_binutils_dlltool(sess: &Session) -> OsString {
         return dlltool_path.clone().into_os_string();
     }
 
-    let mut tool_name: OsString = if sess.host.arch != sess.target.arch {
-        // We are cross-compiling, so we need the tool with the prefix matching our target
-        if sess.target.arch == "x86" {
-            "i686-w64-mingw32-dlltool"
-        } else {
-            "x86_64-w64-mingw32-dlltool"
-        }
+    let tool_name: OsString = if sess.host.options.is_like_windows {
+        // If we're compiling on Windows, always use "dlltool.exe".
+        "dlltool.exe"
     } else {
-        // We are not cross-compiling, so we just want `dlltool`
-        "dlltool"
+        // On other platforms, use the architecture-specific name.
+        match sess.target.arch.as_ref() {
+            "x86_64" => "x86_64-w64-mingw32-dlltool",
+            "x86" => "i686-w64-mingw32-dlltool",
+            "aarch64" => "aarch64-w64-mingw32-dlltool",
+
+            // For non-standard architectures (e.g., aarch32) fallback to "dlltool".
+            _ => "dlltool",
+        }
     }
     .into();
 
-    if sess.host.options.is_like_windows {
-        // If we're compiling on Windows, add the .exe suffix
-        tool_name.push(".exe");
-    }
-
     // NOTE: it's not clear how useful it is to explicitly search PATH.
     for dir in env::split_paths(&env::var_os("PATH").unwrap_or_default()) {
         let full_path = dir.join(&tool_name);
@@ -432,7 +460,7 @@ fn find_binutils_dlltool(sess: &Session) -> OsString {
     }
 
     // The user didn't specify the location of the dlltool binary, and we weren't able
-    // to find the appropriate one on the PATH.  Just return the name of the tool
+    // to find the appropriate one on the PATH. Just return the name of the tool
     // and let the invocation fail with a hopefully useful error message.
     tool_name
 }
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 3fa21355b7f..d2e01708a37 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -1,5 +1,7 @@
 use crate::back::write::{self, save_temp_bitcode, DiagnosticHandlers};
-use crate::errors::DynamicLinkingWithLTO;
+use crate::errors::{
+    DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib,
+};
 use crate::llvm::{self, build_string};
 use crate::{LlvmCodegenBackend, ModuleLlvm};
 use object::read::archive::ArchiveFile;
@@ -77,15 +79,12 @@ fn prepare_lto(
         // Make sure we actually can run LTO
         for crate_type in cgcx.crate_types.iter() {
             if !crate_type_allows_lto(*crate_type) {
-                let e = diag_handler.fatal(
-                    "lto can only be run for executables, cdylibs and \
-                                            static library outputs",
-                );
-                return Err(e);
+                diag_handler.emit_err(LtoDisallowed);
+                return Err(FatalError);
             } else if *crate_type == CrateType::Dylib {
                 if !cgcx.opts.unstable_opts.dylib_lto {
-                    return Err(diag_handler
-                        .fatal("lto cannot be used for `dylib` crate type without `-Zdylib-lto`"));
+                    diag_handler.emit_err(LtoDylib);
+                    return Err(FatalError);
                 }
             }
         }
@@ -127,16 +126,23 @@ fn prepare_lto(
                         let module = SerializedModule::FromRlib(data.to_vec());
                         upstream_modules.push((module, CString::new(name).unwrap()));
                     }
-                    Err(msg) => return Err(diag_handler.fatal(&msg)),
+                    Err(e) => {
+                        diag_handler.emit_err(e);
+                        return Err(FatalError);
+                    }
                 }
             }
         }
     }
 
+    // __llvm_profile_counter_bias is pulled in at link time by an undefined reference to
+    // __llvm_profile_runtime, therefore we won't know until link time if this symbol
+    // should have default visibility.
+    symbols_below_threshold.push(CString::new("__llvm_profile_counter_bias").unwrap());
     Ok((symbols_below_threshold, upstream_modules))
 }
 
-fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], String> {
+fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], LtoBitcodeFromRlib> {
     let mut len = 0;
     let data =
         unsafe { llvm::LLVMRustGetBitcodeSliceFromObjectData(obj.as_ptr(), obj.len(), &mut len) };
@@ -151,8 +157,9 @@ fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], String> {
         Ok(bc)
     } else {
         assert!(len == 0);
-        let msg = llvm::last_error().unwrap_or_else(|| "unknown LLVM error".to_string());
-        Err(format!("failed to get bitcode from object file for LTO ({})", msg))
+        Err(LtoBitcodeFromRlib {
+            llvm_err: llvm::last_error().unwrap_or_else(|| "unknown LLVM error".to_string()),
+        })
     }
 }
 
@@ -206,7 +213,7 @@ pub(crate) fn run_thin(
 }
 
 pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBuffer) {
-    let name = module.name.clone();
+    let name = module.name;
     let buffer = ThinBuffer::new(module.module_llvm.llmod(), true);
     (name, buffer)
 }
@@ -324,10 +331,9 @@ fn fat_lto(
                 });
             info!("linking {:?}", name);
             let data = bc_decoded.data();
-            linker.add(data).map_err(|()| {
-                let msg = format!("failed to load bitcode of module {:?}", name);
-                write::llvm_err(diag_handler, &msg)
-            })?;
+            linker
+                .add(data)
+                .map_err(|()| write::llvm_err(diag_handler, LlvmError::LoadBitcode { name }))?;
             serialized_bitcode.push(bc_decoded);
         }
         drop(linker);
@@ -421,7 +427,7 @@ fn thin_lto(
         info!("going for that thin, thin LTO");
 
         let green_modules: FxHashMap<_, _> =
-            cached_modules.iter().map(|&(_, ref wp)| (wp.cgu_name.clone(), wp.clone())).collect();
+            cached_modules.iter().map(|(_, wp)| (wp.cgu_name.clone(), wp.clone())).collect();
 
         let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
         let mut thin_buffers = Vec::with_capacity(modules.len());
@@ -485,7 +491,7 @@ fn thin_lto(
             symbols_below_threshold.as_ptr(),
             symbols_below_threshold.len() as u32,
         )
-        .ok_or_else(|| write::llvm_err(diag_handler, "failed to prepare thin LTO context"))?;
+        .ok_or_else(|| write::llvm_err(diag_handler, LlvmError::PrepareThinLtoContext))?;
 
         let data = ThinData(data);
 
@@ -558,8 +564,7 @@ fn thin_lto(
         // session, overwriting the previous serialized data (if any).
         if let Some(path) = key_map_path {
             if let Err(err) = curr_key_map.save_to_file(&path) {
-                let msg = format!("Error while writing ThinLTO key data: {}", err);
-                return Err(write::llvm_err(diag_handler, &msg));
+                return Err(write::llvm_err(diag_handler, LlvmError::WriteThinLtoKey { err }));
             }
         }
 
@@ -685,8 +690,7 @@ pub unsafe fn optimize_thin_module(
 
     let module_name = &thin_module.shared.module_names[thin_module.idx];
     let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap());
-    let tm =
-        (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&diag_handler, &e))?;
+    let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&diag_handler, e))?;
 
     // Right now the implementation we've got only works over serialized
     // modules, so we create a fresh new LLVM context and parse the module
@@ -713,8 +717,7 @@ pub unsafe fn optimize_thin_module(
         let mut cu2 = ptr::null_mut();
         llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2);
         if !cu2.is_null() {
-            let msg = "multiple source DICompileUnits found";
-            return Err(write::llvm_err(&diag_handler, msg));
+            return Err(write::llvm_err(&diag_handler, LlvmError::MultipleSourceDiCompileUnit));
         }
 
         // Up next comes the per-module local analyses that we do for Thin LTO.
@@ -729,8 +732,7 @@ pub unsafe fn optimize_thin_module(
             let _timer =
                 cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name());
             if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) {
-                let msg = "failed to prepare thin LTO module";
-                return Err(write::llvm_err(&diag_handler, msg));
+                return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule));
             }
             save_temp_bitcode(cgcx, &module, "thin-lto-after-rename");
         }
@@ -740,8 +742,7 @@ pub unsafe fn optimize_thin_module(
                 .prof
                 .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
             if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) {
-                let msg = "failed to prepare thin LTO module";
-                return Err(write::llvm_err(&diag_handler, msg));
+                return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule));
             }
             save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
         }
@@ -751,8 +752,7 @@ pub unsafe fn optimize_thin_module(
                 .prof
                 .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
             if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) {
-                let msg = "failed to prepare thin LTO module";
-                return Err(write::llvm_err(&diag_handler, msg));
+                return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule));
             }
             save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
         }
@@ -761,8 +761,7 @@ pub unsafe fn optimize_thin_module(
             let _timer =
                 cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name());
             if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) {
-                let msg = "failed to prepare thin LTO module";
-                return Err(write::llvm_err(&diag_handler, msg));
+                return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule));
             }
             save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
         }
@@ -882,11 +881,7 @@ pub fn parse_module<'a>(
     diag_handler: &Handler,
 ) -> Result<&'a llvm::Module, FatalError> {
     unsafe {
-        llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr()).ok_or_else(
-            || {
-                let msg = "failed to parse bitcode for LTO module";
-                write::llvm_err(diag_handler, msg)
-            },
-        )
+        llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr())
+            .ok_or_else(|| write::llvm_err(diag_handler, LlvmError::ParseBitcode))
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 97d0de47b3a..a4ae1b01e86 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -5,6 +5,9 @@ use crate::back::profiling::{
 use crate::base;
 use crate::common;
 use crate::consts;
+use crate::errors::{
+    CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, WithLlvmError, WriteBytecode,
+};
 use crate::llvm::{self, DiagnosticInfo, PassManager};
 use crate::llvm_util;
 use crate::type_::Type;
@@ -37,10 +40,10 @@ use std::slice;
 use std::str;
 use std::sync::Arc;
 
-pub fn llvm_err(handler: &rustc_errors::Handler, msg: &str) -> FatalError {
+pub fn llvm_err<'a>(handler: &rustc_errors::Handler, err: LlvmError<'a>) -> FatalError {
     match llvm::last_error() {
-        Some(err) => handler.fatal(&format!("{}: {}", msg, err)),
-        None => handler.fatal(msg),
+        Some(llvm_err) => handler.emit_almost_fatal(WithLlvmError(err, llvm_err)),
+        None => handler.emit_almost_fatal(err),
     }
 }
 
@@ -85,10 +88,9 @@ pub fn write_output_file<'ll>(
             }
         }
 
-        result.into_result().map_err(|()| {
-            let msg = format!("could not write output to {}", output.display());
-            llvm_err(handler, &msg)
-        })
+        result
+            .into_result()
+            .map_err(|()| llvm_err(handler, LlvmError::WriteOutput { path: output }))
     }
 }
 
@@ -98,7 +100,7 @@ pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm:
     // system/tcx is set up.
     let features = llvm_util::global_llvm_features(sess, false);
     target_machine_factory(sess, config::OptLevel::No, &features)(config)
-        .unwrap_or_else(|err| llvm_err(sess.diagnostic(), &err).raise())
+        .unwrap_or_else(|err| llvm_err(sess.diagnostic(), err).raise())
 }
 
 pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut llvm::TargetMachine {
@@ -117,7 +119,7 @@ pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut ll
         tcx.backend_optimization_level(()),
         tcx.global_backend_features(()),
     )(config)
-    .unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), &err).raise())
+    .unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), err).raise())
 }
 
 pub fn to_llvm_opt_settings(
@@ -203,7 +205,7 @@ pub fn target_machine_factory(
         sess.opts.unstable_opts.trap_unreachable.unwrap_or(sess.target.trap_unreachable);
     let emit_stack_size_section = sess.opts.unstable_opts.emit_stack_sizes;
 
-    let asm_comments = sess.asm_comments();
+    let asm_comments = sess.opts.unstable_opts.asm_comments;
     let relax_elf_relocations =
         sess.opts.unstable_opts.relax_elf_relocations.unwrap_or(sess.target.relax_elf_relocations);
 
@@ -240,9 +242,7 @@ pub fn target_machine_factory(
             )
         };
 
-        tm.ok_or_else(|| {
-            format!("Could not create LLVM TargetMachine for triple: {}", triple.to_str().unwrap())
-        })
+        tm.ok_or_else(|| LlvmError::CreateTargetMachine { triple: triple.clone() })
     })
 }
 
@@ -355,25 +355,28 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void
             };
 
             if enabled {
-                diag_handler.note_without_error(&format!(
-                    "{}:{}:{}: {}: {}",
-                    opt.filename, opt.line, opt.column, opt.pass_name, opt.message,
-                ));
+                diag_handler.emit_note(FromLlvmOptimizationDiag {
+                    filename: &opt.filename,
+                    line: opt.line,
+                    column: opt.column,
+                    pass_name: &opt.pass_name,
+                    message: &opt.message,
+                });
             }
         }
         llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
-            let msg = llvm::build_string(|s| {
+            let message = llvm::build_string(|s| {
                 llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
             })
             .expect("non-UTF8 diagnostic");
-            diag_handler.warn(&msg);
+            diag_handler.emit_warning(FromLlvmDiag { message });
         }
         llvm::diagnostic::Unsupported(diagnostic_ref) => {
-            let msg = llvm::build_string(|s| {
+            let message = llvm::build_string(|s| {
                 llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
             })
             .expect("non-UTF8 diagnostic");
-            diag_handler.err(&msg);
+            diag_handler.emit_err(FromLlvmDiag { message });
         }
         llvm::diagnostic::UnknownDiagnostic(..) => {}
     }
@@ -409,11 +412,7 @@ fn get_pgo_sample_use_path(config: &ModuleConfig) -> Option<CString> {
 }
 
 fn get_instr_profile_output_path(config: &ModuleConfig) -> Option<CString> {
-    if config.instrument_coverage {
-        Some(CString::new("default_%m_%p.profraw").unwrap())
-    } else {
-        None
-    }
+    config.instrument_coverage.then(|| CString::new("default_%m_%p.profraw").unwrap())
 }
 
 pub(crate) unsafe fn llvm_optimize(
@@ -443,16 +442,19 @@ pub(crate) unsafe fn llvm_optimize(
             sanitize_thread: config.sanitizer.contains(SanitizerSet::THREAD),
             sanitize_hwaddress: config.sanitizer.contains(SanitizerSet::HWADDRESS),
             sanitize_hwaddress_recover: config.sanitizer_recover.contains(SanitizerSet::HWADDRESS),
+            sanitize_kernel_address: config.sanitizer.contains(SanitizerSet::KERNELADDRESS),
+            sanitize_kernel_address_recover: config
+                .sanitizer_recover
+                .contains(SanitizerSet::KERNELADDRESS),
         })
     } else {
         None
     };
 
-    let mut llvm_profiler = if cgcx.prof.llvm_recording_enabled() {
-        Some(LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap()))
-    } else {
-        None
-    };
+    let mut llvm_profiler = cgcx
+        .prof
+        .llvm_recording_enabled()
+        .then(|| LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap()));
 
     let llvm_selfprofiler =
         llvm_profiler.as_mut().map(|s| s as *mut _ as *mut c_void).unwrap_or(std::ptr::null_mut());
@@ -494,7 +496,7 @@ pub(crate) unsafe fn llvm_optimize(
         llvm_plugins.as_ptr().cast(),
         llvm_plugins.len(),
     );
-    result.into_result().map_err(|()| llvm_err(diag_handler, "failed to run LLVM passes"))
+    result.into_result().map_err(|()| llvm_err(diag_handler, LlvmError::RunLlvmPasses))
 }
 
 // Unsafe due to LLVM calls.
@@ -547,8 +549,7 @@ pub(crate) fn link(
         let _timer = cgcx.prof.generic_activity_with_arg("LLVM_link_module", &*module.name);
         let buffer = ModuleBuffer::new(module.module_llvm.llmod());
         linker.add(buffer.data()).map_err(|()| {
-            let msg = format!("failed to serialize module {:?}", module.name);
-            llvm_err(diag_handler, &msg)
+            llvm_err(diag_handler, LlvmError::SerializeModule { name: &module.name })
         })?;
     }
     drop(linker);
@@ -626,9 +627,8 @@ pub(crate) unsafe fn codegen(
                 let _timer = cgcx
                     .prof
                     .generic_activity_with_arg("LLVM_module_codegen_emit_bitcode", &*module.name);
-                if let Err(e) = fs::write(&bc_out, data) {
-                    let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
-                    diag_handler.err(&msg);
+                if let Err(err) = fs::write(&bc_out, data) {
+                    diag_handler.emit_err(WriteBytecode { path: &bc_out, err });
                 }
             }
 
@@ -678,10 +678,9 @@ pub(crate) unsafe fn codegen(
                 record_artifact_size(&cgcx.prof, "llvm_ir", &out);
             }
 
-            result.into_result().map_err(|()| {
-                let msg = format!("failed to write LLVM IR to {}", out.display());
-                llvm_err(diag_handler, &msg)
-            })?;
+            result
+                .into_result()
+                .map_err(|()| llvm_err(diag_handler, LlvmError::WriteIr { path: &out }))?;
         }
 
         if config.emit_asm {
@@ -749,8 +748,8 @@ pub(crate) unsafe fn codegen(
 
             EmitObj::Bitcode => {
                 debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
-                if let Err(e) = link_or_copy(&bc_out, &obj_out) {
-                    diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
+                if let Err(err) = link_or_copy(&bc_out, &obj_out) {
+                    diag_handler.emit_err(CopyBitcode { err });
                 }
 
                 if !config.emit_bc {
@@ -762,6 +761,7 @@ pub(crate) unsafe fn codegen(
             EmitObj::None => {}
         }
 
+        record_llvm_cgu_instructions_stats(&cgcx.prof, llmod);
         drop(handlers);
     }
 
@@ -909,7 +909,7 @@ unsafe fn embed_bitcode(
 
 // Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
 // This is required to satisfy `dllimport` references to static data in .rlibs
-// when using MSVC linker.  We do this only for data, as linker can fix up
+// when using MSVC linker. We do this only for data, as linker can fix up
 // code references on its own.
 // See #26591, #27438
 fn create_msvc_imps(
@@ -975,3 +975,23 @@ fn record_artifact_size(
         self_profiler_ref.artifact_size(artifact_kind, artifact_name.to_string_lossy(), file_size);
     }
 }
+
+fn record_llvm_cgu_instructions_stats(prof: &SelfProfilerRef, llmod: &llvm::Module) {
+    if !prof.enabled() {
+        return;
+    }
+
+    let raw_stats =
+        llvm::build_string(|s| unsafe { llvm::LLVMRustModuleInstructionStats(&llmod, s) })
+            .expect("cannot get module instruction stats");
+
+    #[derive(serde::Deserialize)]
+    struct InstructionsStats {
+        module: String,
+        total: u64,
+    }
+
+    let InstructionsStats { module, total } =
+        serde_json::from_str(&raw_stats).expect("cannot parse llvm cgu instructions stats");
+    prof.artifact_size("cgu_instructions", module, total);
+}
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 77dd15ef4d8..580451ba265 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -20,6 +20,7 @@ use rustc_middle::ty::layout::{
 };
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_span::Span;
+use rustc_symbol_mangling::typeid::kcfi_typeid_for_fnabi;
 use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange};
 use rustc_target::spec::{HasTargetSpec, Target};
 use std::borrow::Cow;
@@ -225,9 +226,25 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         debug!("invoke {:?} with args ({:?})", llfn, args);
 
         let args = self.check_call("invoke", llty, llfn, args);
-        let bundle = funclet.map(|funclet| funclet.bundle());
-        let bundle = bundle.as_ref().map(|b| &*b.raw);
+        let funclet_bundle = funclet.map(|funclet| funclet.bundle());
+        let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
+        let mut bundles = vec![funclet_bundle];
+
+        // Set KCFI operand bundle
+        let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() };
+        let kcfi_bundle =
+            if self.tcx.sess.is_sanitizer_kcfi_enabled() && let Some(fn_abi) = fn_abi && is_indirect_call {
+                let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi);
+                Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
+            } else {
+                None
+            };
+        if kcfi_bundle.is_some() {
+            let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
+            bundles.push(kcfi_bundle);
+        }
 
+        bundles.retain(|bundle| bundle.is_some());
         let invoke = unsafe {
             llvm::LLVMRustBuildInvoke(
                 self.llbuilder,
@@ -237,7 +254,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                 args.len() as c_uint,
                 then,
                 catch,
-                bundle,
+                bundles.as_ptr(),
+                bundles.len() as c_uint,
                 UNNAMED,
             )
         };
@@ -483,7 +501,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
             layout: TyAndLayout<'tcx>,
             offset: Size,
         ) {
-            if !scalar.is_always_valid(bx) {
+            if !scalar.is_uninit_valid() {
                 bx.noundef_metadata(load);
             }
 
@@ -493,7 +511,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                         bx.range_metadata(load, scalar.valid_range(bx));
                     }
                 }
-                abi::Pointer => {
+                abi::Pointer(_) => {
                     if !scalar.valid_range(bx).contains(0) {
                         bx.nonnull_metadata(load);
                     }
@@ -961,15 +979,20 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         }
     }
 
-    fn cleanup_landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value) -> &'ll Value {
+    fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
+        let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
         let landing_pad = self.landing_pad(ty, pers_fn, 1 /* FIXME should this be 0? */);
         unsafe {
             llvm::LLVMSetCleanup(landing_pad, llvm::True);
         }
-        landing_pad
+        (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1))
     }
 
-    fn resume(&mut self, exn: &'ll Value) {
+    fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) {
+        let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
+        let mut exn = self.const_poison(ty);
+        exn = self.insert_value(exn, exn0, 0);
+        exn = self.insert_value(exn, exn1, 1);
         unsafe {
             llvm::LLVMBuildResume(self.llbuilder, exn);
         }
@@ -1143,7 +1166,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                 llfn,
                 args.as_ptr() as *const &llvm::Value,
                 args.len() as c_uint,
-                None,
+                [].as_ptr(),
+                0 as c_uint,
             );
         }
     }
@@ -1159,9 +1183,25 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         debug!("call {:?} with args ({:?})", llfn, args);
 
         let args = self.check_call("call", llty, llfn, args);
-        let bundle = funclet.map(|funclet| funclet.bundle());
-        let bundle = bundle.as_ref().map(|b| &*b.raw);
+        let funclet_bundle = funclet.map(|funclet| funclet.bundle());
+        let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
+        let mut bundles = vec![funclet_bundle];
+
+        // Set KCFI operand bundle
+        let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() };
+        let kcfi_bundle =
+            if self.tcx.sess.is_sanitizer_kcfi_enabled() && fn_abi.is_some() && is_indirect_call {
+                let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi.unwrap());
+                Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
+            } else {
+                None
+            };
+        if kcfi_bundle.is_some() {
+            let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
+            bundles.push(kcfi_bundle);
+        }
 
+        bundles.retain(|bundle| bundle.is_some());
         let call = unsafe {
             llvm::LLVMRustBuildCall(
                 self.llbuilder,
@@ -1169,7 +1209,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                 llfn,
                 args.as_ptr() as *const &llvm::Value,
                 args.len() as c_uint,
-                bundle,
+                bundles.as_ptr(),
+                bundles.len() as c_uint,
             )
         };
         if let Some(fn_abi) = fn_abi {
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 70ff5c9617b..6ee2a05ffd7 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -13,7 +13,7 @@ use crate::value::Value;
 use rustc_codegen_ssa::traits::*;
 
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
-use rustc_middle::ty::{self, Instance, TypeVisitable};
+use rustc_middle::ty::{self, Instance, TypeVisitableExt};
 
 /// Codegens a reference to a fn/method item, monomorphizing and
 /// inlining as it goes.
@@ -49,8 +49,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
         let llptrty = fn_abi.ptr_to_llvm_type(cx);
 
         // This is subtle and surprising, but sometimes we have to bitcast
-        // the resulting fn pointer.  The reason has to do with external
-        // functions.  If you have two crates that both bind the same C
+        // the resulting fn pointer. The reason has to do with external
+        // functions. If you have two crates that both bind the same C
         // library, they may not use precisely the same types: for
         // example, they will probably each declare their own structs,
         // which are distinct types from LLVM's point of view (nominal
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index acee9134fb9..efa0c13226e 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -10,6 +10,7 @@ use crate::value::Value;
 use rustc_ast::Mutability;
 use rustc_codegen_ssa::mir::place::PlaceRef;
 use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
 use rustc_hir::def_id::DefId;
 use rustc_middle::bug;
 use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
@@ -129,6 +130,10 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         unsafe { llvm::LLVMGetUndef(t) }
     }
 
+    fn const_poison(&self, t: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMGetPoison(t) }
+    }
+
     fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
         unsafe { llvm::LLVMConstInt(t, i as u64, True) }
     }
@@ -236,7 +241,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             Scalar::Int(int) => {
                 let data = int.assert_bits(layout.size(self));
                 let llval = self.const_uint_big(self.type_ix(bitsize), data);
-                if layout.primitive() == Pointer {
+                if matches!(layout.primitive(), Pointer(_)) {
                     unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
                 } else {
                     self.const_bitcast(llval, llty)
@@ -252,8 +257,13 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                             Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
                             _ => self.static_addr_of(init, alloc.align, None),
                         };
-                        if !self.sess().fewer_names() {
-                            llvm::set_value_name(value, format!("{:?}", alloc_id).as_bytes());
+                        if !self.sess().fewer_names() && llvm::get_value_name(value).is_empty() {
+                            let hash = self.tcx.with_stable_hashing_context(|mut hcx| {
+                                let mut hasher = StableHasher::new();
+                                alloc.hash_stable(&mut hcx, &mut hasher);
+                                hasher.finish::<u128>()
+                            });
+                            llvm::set_value_name(value, format!("alloc_{hash:032x}").as_bytes());
                         }
                         (value, AddressSpace::DATA)
                     }
@@ -284,7 +294,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                         1,
                     )
                 };
-                if layout.primitive() != Pointer {
+                if !matches!(layout.primitive(), Pointer(_)) {
                     unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
                 } else {
                     self.const_bitcast(llval, llty)
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 3c324359565..9116e71beac 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -1,9 +1,8 @@
 use crate::base;
 use crate::common::{self, CodegenCx};
 use crate::debuginfo;
-use crate::errors::{InvalidMinimumAlignment, LinkageConstOrMutType, SymbolAlreadyDefined};
+use crate::errors::{InvalidMinimumAlignment, SymbolAlreadyDefined};
 use crate::llvm::{self, True};
-use crate::llvm_util;
 use crate::type_::Type;
 use crate::type_of::LayoutLlvmExt;
 use crate::value::Value;
@@ -13,7 +12,7 @@ use rustc_codegen_ssa::traits::*;
 use rustc_hir::def_id::DefId;
 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
 use rustc_middle::mir::interpret::{
-    read_target_uint, Allocation, ConstAllocation, ErrorHandled, GlobalAlloc, InitChunk, Pointer,
+    read_target_uint, Allocation, ConstAllocation, ErrorHandled, InitChunk, Pointer,
     Scalar as InterpScalar,
 };
 use rustc_middle::mir::mono::MonoItem;
@@ -21,9 +20,7 @@ use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::{self, Instance, Ty};
 use rustc_middle::{bug, span_bug};
 use rustc_session::config::Lto;
-use rustc_target::abi::{
-    AddressSpace, Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange,
-};
+use rustc_target::abi::{Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange};
 use std::ops::Range;
 
 pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value {
@@ -58,13 +55,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
         // to avoid the cost of generating large complex const expressions.
         // For example, `[(u32, u8); 1024 * 1024]` contains uninit padding in each element,
         // and would result in `{ [5 x i8] zeroinitializer, [3 x i8] undef, ...repeat 1M times... }`.
-        let max = if llvm_util::get_version() < (14, 0, 0) {
-            // Generating partially-uninit consts inhibits optimizations in LLVM < 14.
-            // See https://github.com/rust-lang/rust/issues/84565.
-            1
-        } else {
-            cx.sess().opts.unstable_opts.uninit_const_chunk_threshold
-        };
+        let max = cx.sess().opts.unstable_opts.uninit_const_chunk_threshold;
         let allow_uninit_chunks = chunks.clone().take(max.saturating_add(1)).count() <= max;
 
         if allow_uninit_chunks {
@@ -98,12 +89,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
         .expect("const_alloc_to_llvm: could not read relocation pointer")
             as u64;
 
-        let address_space = match cx.tcx.global_alloc(alloc_id) {
-            GlobalAlloc::Function(..) => cx.data_layout().instruction_address_space,
-            GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) | GlobalAlloc::VTable(..) => {
-                AddressSpace::DATA
-            }
-        };
+        let address_space = cx.tcx.global_alloc(alloc_id).address_space(cx);
 
         llvals.push(cx.scalar_to_backend(
             InterpScalar::from_pointer(
@@ -111,7 +97,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
                 &cx.tcx,
             ),
             Scalar::Initialized {
-                value: Primitive::Pointer,
+                value: Primitive::Pointer(address_space),
                 valid_range: WrappingRange::full(dl.pointer_size),
             },
             cx.type_i8p_ext(address_space),
@@ -140,7 +126,7 @@ pub fn codegen_static_initializer<'ll, 'tcx>(
 fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) {
     // The target may require greater alignment for globals than the type does.
     // Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
-    // which can force it to be smaller.  Rust doesn't support this yet.
+    // which can force it to be smaller. Rust doesn't support this yet.
     if let Some(min) = cx.sess().target.min_global_align {
         match Align::from_bits(min) {
             Ok(min) => align = align.max(min),
@@ -162,26 +148,16 @@ fn check_and_apply_linkage<'ll, 'tcx>(
     def_id: DefId,
 ) -> &'ll Value {
     let llty = cx.layout_of(ty).llvm_type(cx);
-    if let Some(linkage) = attrs.linkage {
+    if let Some(linkage) = attrs.import_linkage {
         debug!("get_static: sym={} linkage={:?}", sym, linkage);
 
-        // If this is a static with a linkage specified, then we need to handle
-        // it a little specially. The typesystem prevents things like &T and
-        // extern "C" fn() from being non-null, so we can't just declare a
-        // static and call it a day. Some linkages (like weak) will make it such
-        // that the static actually has a null value.
-        let llty2 = if let ty::RawPtr(ref mt) = ty.kind() {
-            cx.layout_of(mt.ty).llvm_type(cx)
-        } else {
-            cx.sess().emit_fatal(LinkageConstOrMutType { span: cx.tcx.def_span(def_id) })
-        };
         unsafe {
             // Declare a symbol `foo` with the desired linkage.
-            let g1 = cx.declare_global(sym, llty2);
+            let g1 = cx.declare_global(sym, cx.type_i8());
             llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));
 
             // Declare an internal global `extern_with_linkage_foo` which
-            // is initialized with the address of `foo`.  If `foo` is
+            // is initialized with the address of `foo`. If `foo` is
             // discarded during linking (for example, if `foo` has weak
             // linkage and there are no definitions), then
             // `extern_with_linkage_foo` will instead be initialized to
@@ -195,7 +171,7 @@ fn check_and_apply_linkage<'ll, 'tcx>(
                 })
             });
             llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
-            llvm::LLVMSetInitializer(g2, g1);
+            llvm::LLVMSetInitializer(g2, cx.const_ptrcast(g1, llty));
             g2
         }
     } else if cx.tcx.sess.target.arch == "x86" &&
@@ -545,7 +521,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
 
                 // The semantics of #[used] in Rust only require the symbol to make it into the
                 // object file. It is explicitly allowed for the linker to strip the symbol if it
-                // is dead, which means we are allowed use `llvm.compiler.used` instead of
+                // is dead, which means we are allowed to use `llvm.compiler.used` instead of
                 // `llvm.used` here.
                 //
                 // Additionally, https://reviews.llvm.org/D97448 in LLVM 13 started emitting unique
@@ -556,7 +532,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
                 // That said, we only ever emit these when compiling for ELF targets, unless
                 // `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage
                 // on other targets, in particular MachO targets have *their* static constructor
-                // lists broken if `llvm.compiler.used` is emitted rather than llvm.used. However,
+                // lists broken if `llvm.compiler.used` is emitted rather than `llvm.used`. However,
                 // that check happens when assigning the `CodegenFnAttrFlags` in `rustc_hir_analysis`,
                 // so we don't need to take care of it here.
                 self.add_compiler_used_global(g);
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 4dcc7cd5447..f0d729d4779 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -3,7 +3,6 @@ use crate::back::write::to_llvm_code_model;
 use crate::callee::get_fn;
 use crate::coverageinfo;
 use crate::debuginfo;
-use crate::errors::BranchProtectionRequiresAArch64;
 use crate::llvm;
 use crate::llvm_util;
 use crate::type_::Type;
@@ -144,24 +143,15 @@ pub unsafe fn create_module<'ll>(
 
     let mut target_data_layout = sess.target.data_layout.to_string();
     let llvm_version = llvm_util::get_version();
-    if llvm_version < (14, 0, 0) {
-        if sess.target.llvm_target == "i686-pc-windows-msvc"
-            || sess.target.llvm_target == "i586-pc-windows-msvc"
-        {
-            target_data_layout =
-                "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:32-n8:16:32-a:0:32-S32"
-                    .to_string();
-        }
-        if sess.target.arch == "wasm32" {
-            target_data_layout = target_data_layout.replace("-p10:8:8-p20:8:8", "");
-        }
-    }
     if llvm_version < (16, 0, 0) {
         if sess.target.arch == "s390x" {
+            // LLVM 16 data layout changed to always set 64-bit vector alignment,
+            // which is conditional in earlier LLVM versions.
+            // https://reviews.llvm.org/D131158 for the discussion.
             target_data_layout = target_data_layout.replace("-v128:64", "");
-        }
-
-        if sess.target.arch == "riscv64" {
+        } else if sess.target.arch == "riscv64" {
+            // LLVM 16 introduced this change so as to produce more efficient code.
+            // See https://reviews.llvm.org/D116735 for the discussion.
             target_data_layout = target_data_layout.replace("-n32:64-", "-n64-");
         }
     }
@@ -192,7 +182,7 @@ pub unsafe fn create_module<'ll>(
         //
         // FIXME(#34960)
         let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or("");
-        let custom_llvm_used = cfg_llvm_root.trim() != "";
+        let custom_llvm_used = !cfg_llvm_root.trim().is_empty();
 
         if !custom_llvm_used && target_data_layout != llvm_data_layout {
             bug!(
@@ -250,6 +240,11 @@ pub unsafe fn create_module<'ll>(
         );
     }
 
+    if sess.is_sanitizer_kcfi_enabled() {
+        let kcfi = "kcfi\0".as_ptr().cast();
+        llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1);
+    }
+
     // Control Flow Guard is currently only supported by the MSVC linker on Windows.
     if sess.target.is_like_msvc {
         match sess.opts.cg.control_flow_guard {
@@ -276,34 +271,43 @@ pub unsafe fn create_module<'ll>(
     }
 
     if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection {
-        if sess.target.arch != "aarch64" {
-            sess.emit_err(BranchProtectionRequiresAArch64);
+        let behavior = if llvm_version >= (15, 0, 0) {
+            llvm::LLVMModFlagBehavior::Min
         } else {
+            llvm::LLVMModFlagBehavior::Error
+        };
+
+        if sess.target.arch == "aarch64" {
             llvm::LLVMRustAddModuleFlag(
                 llmod,
-                llvm::LLVMModFlagBehavior::Error,
+                behavior,
                 "branch-target-enforcement\0".as_ptr().cast(),
                 bti.into(),
             );
             llvm::LLVMRustAddModuleFlag(
                 llmod,
-                llvm::LLVMModFlagBehavior::Error,
+                behavior,
                 "sign-return-address\0".as_ptr().cast(),
                 pac_ret.is_some().into(),
             );
             let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A });
             llvm::LLVMRustAddModuleFlag(
                 llmod,
-                llvm::LLVMModFlagBehavior::Error,
+                behavior,
                 "sign-return-address-all\0".as_ptr().cast(),
                 pac_opts.leaf.into(),
             );
             llvm::LLVMRustAddModuleFlag(
                 llmod,
-                llvm::LLVMModFlagBehavior::Error,
+                behavior,
                 "sign-return-address-with-bkey\0".as_ptr().cast(),
                 u32::from(pac_opts.key == PAuthKey::B),
             );
+        } else {
+            bug!(
+                "branch-protection used on non-AArch64 target; \
+                  this should be checked in rustc_session."
+            );
         }
     }
 
@@ -403,12 +407,8 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
 
         let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
 
-        let coverage_cx = if tcx.sess.instrument_coverage() {
-            let covctx = coverageinfo::CrateCoverageContext::new();
-            Some(covctx)
-        } else {
-            None
-        };
+        let coverage_cx =
+            tcx.sess.instrument_coverage().then(coverageinfo::CrateCoverageContext::new);
 
         let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
             let dctx = debuginfo::CodegenUnitDebugContext::new(llmod);
@@ -520,14 +520,9 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         let tcx = self.tcx;
         let llfn = match tcx.lang_items().eh_personality() {
             Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
-                ty::Instance::resolve(
-                    tcx,
-                    ty::ParamEnv::reveal_all(),
-                    def_id,
-                    tcx.intern_substs(&[]),
-                )
-                .unwrap()
-                .unwrap(),
+                ty::Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, ty::List::empty())
+                    .unwrap()
+                    .unwrap(),
             ),
             _ => {
                 let name = if wants_msvc_seh(self.sess()) {
@@ -740,9 +735,13 @@ impl<'ll> CodegenCx<'ll, '_> {
 
         ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
         ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
+
         ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
 
+        ifn!("llvm.roundeven.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.roundeven.f64", fn(t_f64) -> t_f64);
+
         ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
         ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 86580d05d41..240a9d2f371 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -1,6 +1,5 @@
 use crate::common::CodegenCx;
 use crate::coverageinfo;
-use crate::errors::InstrumentCoverageRequiresLLVM12;
 use crate::llvm;
 
 use llvm::coverageinfo::CounterMappingRegion;
@@ -8,7 +7,7 @@ use rustc_codegen_ssa::coverageinfo::map::{Counter, CounterExpression};
 use rustc_codegen_ssa::traits::{ConstMethods, CoverageInfoMethods};
 use rustc_data_structures::fx::FxIndexSet;
 use rustc_hir::def::DefKind;
-use rustc_hir::def_id::DefIdSet;
+use rustc_hir::def_id::DefId;
 use rustc_llvm::RustString;
 use rustc_middle::bug;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
@@ -19,8 +18,8 @@ use std::ffi::CString;
 
 /// Generates and exports the Coverage Map.
 ///
-/// Rust Coverage Map generation supports LLVM Coverage Mapping Format versions
-/// 5 (LLVM 12, only) and 6 (zero-based encoded as 4 and 5, respectively), as defined at
+/// Rust Coverage Map generation supports LLVM Coverage Mapping Format version
+/// 6 (zero-based encoded as 5), as defined at
 /// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
 /// These versions are supported by the LLVM coverage tools (`llvm-profdata` and `llvm-cov`)
 /// bundled with Rust's fork of LLVM.
@@ -30,16 +29,13 @@ use std::ffi::CString;
 /// implementing this Rust version, and though the format documentation is very explicit and
 /// detailed, some undocumented details in Clang's implementation (that may or may not be important)
 /// were also replicated for Rust's Coverage Map.
-pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
+pub fn finalize(cx: &CodegenCx<'_, '_>) {
     let tcx = cx.tcx;
 
-    // Ensure the installed version of LLVM supports at least Coverage Map
-    // Version 5 (encoded as a zero-based value: 4), which was introduced with
-    // LLVM 12.
+    // Ensure the installed version of LLVM supports Coverage Map Version 6
+    // (encoded as a zero-based value: 5), which was introduced with LLVM 13.
     let version = coverageinfo::mapping_version();
-    if version < 4 {
-        tcx.sess.emit_fatal(InstrumentCoverageRequiresLLVM12);
-    }
+    assert_eq!(version, 5, "The `CoverageMappingVersion` exposed by `llvm-wrapper` is out of sync");
 
     debug!("Generating coverage map for CodegenUnit: `{}`", cx.codegen_unit.name());
 
@@ -61,7 +57,7 @@ pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
         return;
     }
 
-    let mut mapgen = CoverageMapGenerator::new(tcx, version);
+    let mut mapgen = CoverageMapGenerator::new(tcx);
 
     // Encode coverage mappings and generate function records
     let mut function_data = Vec::new();
@@ -124,25 +120,18 @@ struct CoverageMapGenerator {
 }
 
 impl CoverageMapGenerator {
-    fn new(tcx: TyCtxt<'_>, version: u32) -> Self {
+    fn new(tcx: TyCtxt<'_>) -> Self {
         let mut filenames = FxIndexSet::default();
-        if version >= 5 {
-            // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
-            // requires setting the first filename to the compilation directory.
-            // Since rustc generates coverage maps with relative paths, the
-            // compilation directory can be combined with the relative paths
-            // to get absolute paths, if needed.
-            let working_dir = tcx
-                .sess
-                .opts
-                .working_dir
-                .remapped_path_if_available()
-                .to_string_lossy()
-                .to_string();
-            let c_filename =
-                CString::new(working_dir).expect("null error converting filename to C string");
-            filenames.insert(c_filename);
-        }
+        // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
+        // requires setting the first filename to the compilation directory.
+        // Since rustc generates coverage maps with relative paths, the
+        // compilation directory can be combined with the relative paths
+        // to get absolute paths, if needed.
+        let working_dir =
+            tcx.sess.opts.working_dir.remapped_path_if_available().to_string_lossy().to_string();
+        let c_filename =
+            CString::new(working_dir).expect("null error converting filename to C string");
+        filenames.insert(c_filename);
         Self { filenames }
     }
 
@@ -284,14 +273,14 @@ fn save_function_record(
 /// "code coverage dead code cgu" during the partitioning process. This prevents us from generating
 /// code regions for the same function more than once which can lead to linker errors regarding
 /// duplicate symbols.
-fn add_unused_functions<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
+fn add_unused_functions(cx: &CodegenCx<'_, '_>) {
     assert!(cx.codegen_unit.is_code_coverage_dead_code_cgu());
 
     let tcx = cx.tcx;
 
     let ignore_unused_generics = tcx.sess.instrument_coverage_except_unused_generics();
 
-    let eligible_def_ids: DefIdSet = tcx
+    let eligible_def_ids: Vec<DefId> = tcx
         .mir_keys(())
         .iter()
         .filter_map(|local_def_id| {
@@ -306,9 +295,8 @@ fn add_unused_functions<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
                 DefKind::Fn | DefKind::AssocFn | DefKind::Closure | DefKind::Generator
             ) {
                 return None;
-            } else if ignore_unused_generics
-                && tcx.generics_of(def_id).requires_monomorphization(tcx)
-            {
+            }
+            if ignore_unused_generics && tcx.generics_of(def_id).requires_monomorphization(tcx) {
                 return None;
             }
             Some(local_def_id.to_def_id())
@@ -317,7 +305,9 @@ fn add_unused_functions<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
 
     let codegenned_def_ids = tcx.codegened_and_inlined_items(());
 
-    for &non_codegenned_def_id in eligible_def_ids.difference(codegenned_def_ids) {
+    for non_codegenned_def_id in
+        eligible_def_ids.into_iter().filter(|id| !codegenned_def_ids.contains(id))
+    {
         let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
 
         // If a function is marked `#[no_coverage]`, then skip generating a
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index ace15cfb024..3dc0ac03312 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -27,8 +27,6 @@ use rustc_middle::ty::Instance;
 use std::cell::RefCell;
 use std::ffi::CString;
 
-use std::iter;
-
 pub mod mapgen;
 
 const UNUSED_FUNCTION_COUNTER_ID: CounterValueReference = CounterValueReference::START;
@@ -201,7 +199,7 @@ fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<
         tcx.symbol_name(instance).name,
         cx.fn_abi_of_fn_ptr(
             ty::Binder::dummy(tcx.mk_fn_sig(
-                iter::once(tcx.mk_unit()),
+                [tcx.mk_unit()],
                 tcx.mk_unit(),
                 false,
                 hir::Unsafety::Unsafe,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
index 80fd9726fc7..ff2b005d757 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -5,12 +5,12 @@ use crate::llvm;
 use crate::builder::Builder;
 use crate::common::CodegenCx;
 use crate::value::Value;
+use rustc_ast::attr;
 use rustc_codegen_ssa::base::collect_debugger_visualizers_transitive;
 use rustc_codegen_ssa::traits::*;
 use rustc_hir::def_id::LOCAL_CRATE;
 use rustc_middle::bug;
 use rustc_session::config::{CrateType, DebugInfo};
-
 use rustc_span::symbol::sym;
 use rustc_span::DebuggerVisualizerType;
 
@@ -87,7 +87,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, '
 
 pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool {
     let omit_gdb_pretty_printer_section =
-        cx.tcx.sess.contains_name(cx.tcx.hir().krate_attrs(), sym::omit_gdb_pretty_printer_section);
+        attr::contains_name(cx.tcx.hir().krate_attrs(), sym::omit_gdb_pretty_printer_section);
 
     // To ensure the section `__rustc_debug_gdb_scripts_section__` will not create
     // ODR violations at link time, this section will not be emitted for rlibs since
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index d87117dffdc..c1b3f34e5a6 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -27,9 +27,7 @@ use rustc_codegen_ssa::traits::*;
 use rustc_fs_util::path_to_c_string;
 use rustc_hir::def::CtorKind;
 use rustc_hir::def_id::{DefId, LOCAL_CRATE};
-use rustc_index::vec::{Idx, IndexVec};
 use rustc_middle::bug;
-use rustc_middle::mir::{self, GeneratorLayout};
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::subst::GenericArgKind;
 use rustc_middle::ty::{
@@ -113,7 +111,7 @@ macro_rules! return_if_di_node_created_in_meantime {
 
 /// Extract size and alignment from a TyAndLayout.
 #[inline]
-fn size_and_align_of<'tcx>(ty_and_layout: TyAndLayout<'tcx>) -> (Size, Align) {
+fn size_and_align_of(ty_and_layout: TyAndLayout<'_>) -> (Size, Align) {
     (ty_and_layout.size, ty_and_layout.align.abi)
 }
 
@@ -134,7 +132,7 @@ fn build_fixed_size_array_di_node<'ll, 'tcx>(
 
     let (size, align) = cx.size_and_align_of(array_type);
 
-    let upper_bound = len.eval_usize(cx.tcx, ty::ParamEnv::reveal_all()) as c_longlong;
+    let upper_bound = len.eval_target_usize(cx.tcx, ty::ParamEnv::reveal_all()) as c_longlong;
 
     let subrange =
         unsafe { Some(llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)) };
@@ -784,10 +782,10 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
     codegen_unit_name: &str,
     debug_context: &CodegenUnitDebugContext<'ll, 'tcx>,
 ) -> &'ll DIDescriptor {
-    let mut name_in_debuginfo = match tcx.sess.local_crate_source_file {
-        Some(ref path) => path.clone(),
-        None => PathBuf::from(tcx.crate_name(LOCAL_CRATE).as_str()),
-    };
+    let mut name_in_debuginfo = tcx
+        .sess
+        .local_crate_source_file()
+        .unwrap_or_else(|| PathBuf::from(tcx.crate_name(LOCAL_CRATE).as_str()));
 
     // To avoid breaking split DWARF, we need to ensure that each codegen unit
     // has a unique `DW_AT_name`. This is because there's a remote chance that
@@ -1026,33 +1024,6 @@ fn build_struct_type_di_node<'ll, 'tcx>(
 // Tuples
 //=-----------------------------------------------------------------------------
 
-/// Returns names of captured upvars for closures and generators.
-///
-/// Here are some examples:
-///  - `name__field1__field2` when the upvar is captured by value.
-///  - `_ref__name__field` when the upvar is captured by reference.
-///
-/// For generators this only contains upvars that are shared by all states.
-fn closure_saved_names_of_captured_variables(tcx: TyCtxt<'_>, def_id: DefId) -> SmallVec<String> {
-    let body = tcx.optimized_mir(def_id);
-
-    body.var_debug_info
-        .iter()
-        .filter_map(|var| {
-            let is_ref = match var.value {
-                mir::VarDebugInfoContents::Place(place) if place.local == mir::Local::new(1) => {
-                    // The projection is either `[.., Field, Deref]` or `[.., Field]`. It
-                    // implies whether the variable is captured by value or by reference.
-                    matches!(place.projection.last().unwrap(), mir::ProjectionElem::Deref)
-                }
-                _ => return None,
-            };
-            let prefix = if is_ref { "_ref__" } else { "" };
-            Some(prefix.to_owned() + var.name.as_str())
-        })
-        .collect()
-}
-
 /// Builds the DW_TAG_member debuginfo nodes for the upvars of a closure or generator.
 /// For a generator, this will handle upvars shared by all states.
 fn build_upvar_field_di_nodes<'ll, 'tcx>(
@@ -1083,7 +1054,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
             .all(|&t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
     );
 
-    let capture_names = closure_saved_names_of_captured_variables(cx.tcx, def_id);
+    let capture_names = cx.tcx.closure_saved_names_of_captured_variables(def_id);
     let layout = cx.layout_of(closure_or_generator_ty);
 
     up_var_tys
@@ -1229,43 +1200,6 @@ fn build_union_type_di_node<'ll, 'tcx>(
     )
 }
 
-// FIXME(eddyb) maybe precompute this? Right now it's computed once
-// per generator monomorphization, but it doesn't depend on substs.
-fn generator_layout_and_saved_local_names<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    def_id: DefId,
-) -> (&'tcx GeneratorLayout<'tcx>, IndexVec<mir::GeneratorSavedLocal, Option<Symbol>>) {
-    let body = tcx.optimized_mir(def_id);
-    let generator_layout = body.generator_layout().unwrap();
-    let mut generator_saved_local_names = IndexVec::from_elem(None, &generator_layout.field_tys);
-
-    let state_arg = mir::Local::new(1);
-    for var in &body.var_debug_info {
-        let mir::VarDebugInfoContents::Place(place) = &var.value else { continue };
-        if place.local != state_arg {
-            continue;
-        }
-        match place.projection[..] {
-            [
-                // Deref of the `Pin<&mut Self>` state argument.
-                mir::ProjectionElem::Field(..),
-                mir::ProjectionElem::Deref,
-                // Field of a variant of the state.
-                mir::ProjectionElem::Downcast(_, variant),
-                mir::ProjectionElem::Field(field, _),
-            ] => {
-                let name = &mut generator_saved_local_names
-                    [generator_layout.variant_fields[variant][field]];
-                if name.is_none() {
-                    name.replace(var.name);
-                }
-            }
-            _ => {}
-        }
-    }
-    (generator_layout, generator_saved_local_names)
-}
-
 /// Computes the type parameters for a type, if any, for the given metadata.
 fn build_generic_type_param_di_nodes<'ll, 'tcx>(
     cx: &CodegenCx<'ll, 'tcx>,
@@ -1565,6 +1499,11 @@ pub fn create_vtable_di_node<'ll, 'tcx>(
         return;
     }
 
+    // When full debuginfo is enabled, we want to try and prevent vtables from being
+    // merged. Otherwise debuggers will have a hard time mapping from dyn pointer
+    // to concrete type.
+    llvm::SetUnnamedAddress(vtable, llvm::UnnamedAddr::No);
+
     let vtable_name =
         compute_debuginfo_vtable_name(cx.tcx, ty, poly_trait_ref, VTableNameKind::GlobalVariable);
     let vtable_type_di_node = build_vtable_type_di_node(cx, ty, poly_trait_ref);
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
index 53e8a291d1e..69443b9b828 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
@@ -22,9 +22,9 @@ use crate::{
     common::CodegenCx,
     debuginfo::{
         metadata::{
-            build_field_di_node, closure_saved_names_of_captured_variables,
+            build_field_di_node,
             enums::{tag_base_type, DiscrResult},
-            file_metadata, generator_layout_and_saved_local_names, size_and_align_of, type_di_node,
+            file_metadata, size_and_align_of, type_di_node,
             type_map::{self, Stub, UniqueTypeId},
             unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS, NO_SCOPE_METADATA,
             UNKNOWN_LINE_NUMBER,
@@ -677,9 +677,9 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
     };
 
     let (generator_layout, state_specific_upvar_names) =
-        generator_layout_and_saved_local_names(cx.tcx, generator_def_id);
+        cx.tcx.generator_layout_and_saved_local_names(generator_def_id);
 
-    let common_upvar_names = closure_saved_names_of_captured_variables(cx.tcx, generator_def_id);
+    let common_upvar_names = cx.tcx.closure_saved_names_of_captured_variables(generator_def_id);
     let variant_range = generator_substs.variant_range(generator_def_id, cx.tcx);
     let variant_count = (variant_range.start.as_u32()..variant_range.end.as_u32()).len();
 
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
index 564ab351bd4..54e850f2599 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
@@ -122,7 +122,8 @@ fn tag_base_type<'ll, 'tcx>(
                 Primitive::Int(t, _) => t,
                 Primitive::F32 => Integer::I32,
                 Primitive::F64 => Integer::I64,
-                Primitive::Pointer => {
+                // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
+                Primitive::Pointer(_) => {
                     // If the niche is the NULL value of a reference, then `discr_enum_ty` will be
                     // a RawPtr. CodeView doesn't know what to do with enums whose base type is a
                     // pointer so we fix this up to just be `usize`.
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
index becbccc434d..978141917c6 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -4,9 +4,8 @@ use crate::{
     common::CodegenCx,
     debuginfo::{
         metadata::{
-            closure_saved_names_of_captured_variables,
             enums::tag_base_type,
-            file_metadata, generator_layout_and_saved_local_names, size_and_align_of, type_di_node,
+            file_metadata, size_and_align_of, type_di_node,
             type_map::{self, Stub, StubInfo, UniqueTypeId},
             unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS,
             UNKNOWN_LINE_NUMBER,
@@ -157,7 +156,7 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
         ),
         |cx, generator_type_di_node| {
             let (generator_layout, state_specific_upvar_names) =
-                generator_layout_and_saved_local_names(cx.tcx, generator_def_id);
+                cx.tcx.generator_layout_and_saved_local_names(generator_def_id);
 
             let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } = generator_type_and_layout.variants else {
                 bug!(
@@ -167,7 +166,7 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
             };
 
             let common_upvar_names =
-                closure_saved_names_of_captured_variables(cx.tcx, generator_def_id);
+                cx.tcx.closure_saved_names_of_captured_variables(generator_def_id);
 
             // Build variant struct types
             let variant_struct_type_di_nodes: SmallVec<_> = variants
@@ -439,6 +438,7 @@ fn build_enum_variant_member_di_node<'ll, 'tcx>(
 ///         DW_TAG_structure_type            (type of variant 1)
 ///         DW_TAG_structure_type            (type of variant 2)
 ///         DW_TAG_structure_type            (type of variant 3)
+/// ```
 struct VariantMemberInfo<'a, 'll> {
     variant_index: VariantIdx,
     variant_name: Cow<'a, str>,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index ca7a07d8391..5392534cfcb 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -27,7 +27,7 @@ use rustc_index::vec::IndexVec;
 use rustc_middle::mir;
 use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
-use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeVisitable};
+use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeVisitableExt};
 use rustc_session::config::{self, DebugInfo};
 use rustc_session::Session;
 use rustc_span::symbol::Symbol;
@@ -508,7 +508,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                     let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
                         instance.substs,
                         ty::ParamEnv::reveal_all(),
-                        cx.tcx.type_of(impl_def_id),
+                        cx.tcx.type_of(impl_def_id).skip_binder(),
                     );
 
                     // Only "class" methods are generally understood by LLVM,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
index 5cd0e1cb63a..6bcd3e5bf58 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -5,7 +5,7 @@ use super::CodegenUnitDebugContext;
 
 use rustc_hir::def_id::DefId;
 use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
-use rustc_middle::ty::{self, DefIdTree, Ty};
+use rustc_middle::ty::{self, Ty};
 use trace;
 
 use crate::common::CodegenCx;
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
index dc21a02cec4..6a575095f7e 100644
--- a/compiler/rustc_codegen_llvm/src/declare.rs
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -20,7 +20,7 @@ use crate::type_::Type;
 use crate::value::Value;
 use rustc_codegen_ssa::traits::TypeMembershipMethods;
 use rustc_middle::ty::Ty;
-use rustc_symbol_mangling::typeid::typeid_for_fnabi;
+use rustc_symbol_mangling::typeid::{kcfi_typeid_for_fnabi, typeid_for_fnabi};
 use smallvec::SmallVec;
 
 /// Declare a function.
@@ -136,6 +136,11 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
             self.set_type_metadata(llfn, typeid);
         }
 
+        if self.tcx.sess.is_sanitizer_kcfi_enabled() {
+            let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi);
+            self.set_kcfi_type_metadata(llfn, kcfi_typeid);
+        }
+
         llfn
     }
 
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
index 0fafc214f2f..bae88d94293 100644
--- a/compiler/rustc_codegen_llvm/src/errors.rs
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -1,10 +1,12 @@
 use std::borrow::Cow;
-
-use rustc_errors::fluent;
-use rustc_errors::DiagnosticBuilder;
-use rustc_errors::ErrorGuaranteed;
-use rustc_errors::Handler;
-use rustc_errors::IntoDiagnostic;
+use std::ffi::CString;
+use std::path::Path;
+
+use crate::fluent_generated as fluent;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_errors::{
+    DiagnosticBuilder, EmissionGuarantee, ErrorGuaranteed, Handler, IntoDiagnostic,
+};
 use rustc_macros::{Diagnostic, Subdiagnostic};
 use rustc_span::Span;
 
@@ -26,9 +28,9 @@ pub(crate) struct UnknownCTargetFeature<'a> {
 
 #[derive(Subdiagnostic)]
 pub(crate) enum PossibleFeature<'a> {
-    #[help(possible_feature)]
+    #[help(codegen_llvm_possible_feature)]
     Some { rust_feature: &'a str },
-    #[help(consider_filing_feature_request)]
+    #[help(codegen_llvm_consider_filing_feature_request)]
     None,
 }
 
@@ -40,10 +42,6 @@ pub(crate) struct ErrorCreatingImportLibrary<'a> {
 }
 
 #[derive(Diagnostic)]
-#[diag(codegen_llvm_instrument_coverage_requires_llvm_12)]
-pub(crate) struct InstrumentCoverageRequiresLLVM12;
-
-#[derive(Diagnostic)]
 #[diag(codegen_llvm_symbol_already_defined)]
 pub(crate) struct SymbolAlreadyDefined<'a> {
     #[primary_span]
@@ -52,33 +50,16 @@ pub(crate) struct SymbolAlreadyDefined<'a> {
 }
 
 #[derive(Diagnostic)]
-#[diag(codegen_llvm_branch_protection_requires_aarch64)]
-pub(crate) struct BranchProtectionRequiresAArch64;
-
-#[derive(Diagnostic)]
 #[diag(codegen_llvm_invalid_minimum_alignment)]
 pub(crate) struct InvalidMinimumAlignment {
     pub err: String,
 }
 
 #[derive(Diagnostic)]
-#[diag(codegen_llvm_linkage_const_or_mut_type)]
-pub(crate) struct LinkageConstOrMutType {
-    #[primary_span]
-    pub span: Span,
-}
-
-#[derive(Diagnostic)]
 #[diag(codegen_llvm_sanitizer_memtag_requires_mte)]
 pub(crate) struct SanitizerMemtagRequiresMte;
 
 #[derive(Diagnostic)]
-#[diag(codegen_llvm_archive_build_failure)]
-pub(crate) struct ArchiveBuildFailure {
-    pub error: std::io::Error,
-}
-
-#[derive(Diagnostic)]
 #[diag(codegen_llvm_error_writing_def_file)]
 pub(crate) struct ErrorWritingDEFFile {
     pub error: std::io::Error,
@@ -98,20 +79,22 @@ pub(crate) struct DlltoolFailImportLibrary<'a> {
 }
 
 #[derive(Diagnostic)]
-#[diag(codegen_llvm_unknown_archive_kind)]
-pub(crate) struct UnknownArchiveKind<'a> {
-    pub kind: &'a str,
-}
-
-#[derive(Diagnostic)]
 #[diag(codegen_llvm_dynamic_linking_with_lto)]
 #[note]
 pub(crate) struct DynamicLinkingWithLTO;
 
-#[derive(Diagnostic)]
-#[diag(codegen_llvm_fail_parsing_target_machine_config_to_target_machine)]
-pub(crate) struct FailParsingTargetMachineConfigToTargetMachine {
-    pub error: String,
+pub(crate) struct ParseTargetMachineConfig<'a>(pub LlvmError<'a>);
+
+impl<EM: EmissionGuarantee> IntoDiagnostic<'_, EM> for ParseTargetMachineConfig<'_> {
+    fn into_diagnostic(self, sess: &'_ Handler) -> DiagnosticBuilder<'_, EM> {
+        let diag: DiagnosticBuilder<'_, EM> = self.0.into_diagnostic(sess);
+        let (message, _) = diag.styled_message().first().expect("`LlvmError` with no message");
+        let message = sess.eagerly_translate_to_string(message.clone(), diag.args());
+
+        let mut diag = sess.struct_diagnostic(fluent::codegen_llvm_parse_target_machine_config);
+        diag.set_arg("error", message);
+        diag
+    }
 }
 
 pub(crate) struct TargetFeatureDisableOrEnable<'a> {
@@ -137,3 +120,99 @@ impl IntoDiagnostic<'_, ErrorGuaranteed> for TargetFeatureDisableOrEnable<'_> {
         diag
     }
 }
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_disallowed)]
+pub(crate) struct LtoDisallowed;
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_dylib)]
+pub(crate) struct LtoDylib;
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_bitcode_from_rlib)]
+pub(crate) struct LtoBitcodeFromRlib {
+    pub llvm_err: String,
+}
+
+#[derive(Diagnostic)]
+pub enum LlvmError<'a> {
+    #[diag(codegen_llvm_write_output)]
+    WriteOutput { path: &'a Path },
+    #[diag(codegen_llvm_target_machine)]
+    CreateTargetMachine { triple: SmallCStr },
+    #[diag(codegen_llvm_run_passes)]
+    RunLlvmPasses,
+    #[diag(codegen_llvm_serialize_module)]
+    SerializeModule { name: &'a str },
+    #[diag(codegen_llvm_write_ir)]
+    WriteIr { path: &'a Path },
+    #[diag(codegen_llvm_prepare_thin_lto_context)]
+    PrepareThinLtoContext,
+    #[diag(codegen_llvm_load_bitcode)]
+    LoadBitcode { name: CString },
+    #[diag(codegen_llvm_write_thinlto_key)]
+    WriteThinLtoKey { err: std::io::Error },
+    #[diag(codegen_llvm_multiple_source_dicompileunit)]
+    MultipleSourceDiCompileUnit,
+    #[diag(codegen_llvm_prepare_thin_lto_module)]
+    PrepareThinLtoModule,
+    #[diag(codegen_llvm_parse_bitcode)]
+    ParseBitcode,
+}
+
+pub(crate) struct WithLlvmError<'a>(pub LlvmError<'a>, pub String);
+
+impl<EM: EmissionGuarantee> IntoDiagnostic<'_, EM> for WithLlvmError<'_> {
+    fn into_diagnostic(self, sess: &'_ Handler) -> DiagnosticBuilder<'_, EM> {
+        use LlvmError::*;
+        let msg_with_llvm_err = match &self.0 {
+            WriteOutput { .. } => fluent::codegen_llvm_write_output_with_llvm_err,
+            CreateTargetMachine { .. } => fluent::codegen_llvm_target_machine_with_llvm_err,
+            RunLlvmPasses => fluent::codegen_llvm_run_passes_with_llvm_err,
+            SerializeModule { .. } => fluent::codegen_llvm_serialize_module_with_llvm_err,
+            WriteIr { .. } => fluent::codegen_llvm_write_ir_with_llvm_err,
+            PrepareThinLtoContext => fluent::codegen_llvm_prepare_thin_lto_context_with_llvm_err,
+            LoadBitcode { .. } => fluent::codegen_llvm_load_bitcode_with_llvm_err,
+            WriteThinLtoKey { .. } => fluent::codegen_llvm_write_thinlto_key_with_llvm_err,
+            MultipleSourceDiCompileUnit => {
+                fluent::codegen_llvm_multiple_source_dicompileunit_with_llvm_err
+            }
+            PrepareThinLtoModule => fluent::codegen_llvm_prepare_thin_lto_module_with_llvm_err,
+            ParseBitcode => fluent::codegen_llvm_parse_bitcode_with_llvm_err,
+        };
+        let mut diag = self.0.into_diagnostic(sess);
+        diag.set_primary_message(msg_with_llvm_err);
+        diag.set_arg("llvm_err", self.1);
+        diag
+    }
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_from_llvm_optimization_diag)]
+pub(crate) struct FromLlvmOptimizationDiag<'a> {
+    pub filename: &'a str,
+    pub line: std::ffi::c_uint,
+    pub column: std::ffi::c_uint,
+    pub pass_name: &'a str,
+    pub message: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_from_llvm_diag)]
+pub(crate) struct FromLlvmDiag {
+    pub message: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_write_bytecode)]
+pub(crate) struct WriteBytecode<'a> {
+    pub path: &'a Path,
+    pub err: std::io::Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_copy_bitcode)]
+pub(crate) struct CopyBitcode {
+    pub err: std::io::Error,
+}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 2f5dd519b26..012e25884ca 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -8,8 +8,8 @@ use crate::va_arg::emit_va_arg;
 use crate::value::Value;
 
 use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
-use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
+use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
 use rustc_codegen_ssa::mir::operand::OperandRef;
 use rustc_codegen_ssa::mir::place::PlaceRef;
 use rustc_codegen_ssa::traits::*;
@@ -22,7 +22,6 @@ use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
 use rustc_target::spec::{HasTargetSpec, PanicStrategy};
 
 use std::cmp::Ordering;
-use std::iter;
 
 fn get_simple_intrinsic<'ll>(
     cx: &CodegenCx<'ll, '_>,
@@ -72,6 +71,8 @@ fn get_simple_intrinsic<'ll>(
         sym::roundf32 => "llvm.round.f32",
         sym::roundf64 => "llvm.round.f64",
         sym::ptr_mask => "llvm.ptrmask",
+        sym::roundevenf32 => "llvm.roundeven.f32",
+        sym::roundevenf64 => "llvm.roundeven.f64",
         _ => return None,
     };
     Some(cx.get_intrinsic(llvm_name))
@@ -149,7 +150,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                                     emit_va_arg(self, args[0], ret_ty)
                                 }
                             }
-                            Primitive::F64 | Primitive::Pointer => {
+                            Primitive::F64 | Primitive::Pointer(_) => {
                                 emit_va_arg(self, args[0], ret_ty)
                             }
                             // `va_arg` should never be used with the return type f32.
@@ -284,15 +285,11 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                         _ => bug!(),
                     },
                     None => {
-                        span_invalid_monomorphization_error(
-                            tcx.sess,
+                        tcx.sess.emit_err(InvalidMonomorphization::BasicIntegerType {
                             span,
-                            &format!(
-                                "invalid monomorphization of `{}` intrinsic: \
-                                      expected basic integer type, found `{}`",
-                                name, ty
-                            ),
-                        );
+                            name,
+                            ty,
+                        });
                         return;
                     }
                 }
@@ -381,7 +378,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 }
             }
 
-            _ => bug!("unknown intrinsic '{}'", name),
+            _ => bug!("unknown intrinsic '{}' -- should it have been lowered earlier?", name),
         };
 
         if !fn_abi.ret.is_ignore() {
@@ -424,7 +421,9 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
         typeid: &'ll Value,
     ) -> Self::Value {
         let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
-        self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid])
+        let type_checked_load =
+            self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid]);
+        self.extract_value(type_checked_load, 0)
     }
 
     fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
@@ -565,7 +564,7 @@ fn codegen_msvc_try<'ll>(
         // module.
         //
         // When modifying, make sure that the type_name string exactly matches
-        // the one used in src/libpanic_unwind/seh.rs.
+        // the one used in library/panic_unwind/src/seh.rs.
         let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
         let type_name = bx.const_bytes(b"rust_panic\0");
         let type_info =
@@ -656,7 +655,7 @@ fn codegen_gnu_try<'ll>(
         // Type indicator for the exception being thrown.
         //
         // The first value in this tuple is a pointer to the exception object
-        // being thrown.  The second value is a "selector" indicating which of
+        // being thrown. The second value is a "selector" indicating which of
         // the landing pad clauses the exception's type had been matched to.
         // rust_try ignores the selector.
         bx.switch_to_block(catch);
@@ -720,7 +719,7 @@ fn codegen_emcc_try<'ll>(
         // Type indicator for the exception being thrown.
         //
         // The first value in this tuple is a pointer to the exception object
-        // being thrown.  The second value is a "selector" indicating which of
+        // being thrown. The second value is a "selector" indicating which of
         // the landing pad clauses the exception's type had been matched to.
         bx.switch_to_block(catch);
         let tydesc = bx.eh_catch_typeinfo();
@@ -800,7 +799,7 @@ fn get_rust_try_fn<'ll, 'tcx>(
     let i8p = tcx.mk_mut_ptr(tcx.types.i8);
     // `unsafe fn(*mut i8) -> ()`
     let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
-        iter::once(i8p),
+        [i8p],
         tcx.mk_unit(),
         false,
         hir::Unsafety::Unsafe,
@@ -808,7 +807,7 @@ fn get_rust_try_fn<'ll, 'tcx>(
     )));
     // `unsafe fn(*mut i8, *mut i8) -> ()`
     let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
-        [i8p, i8p].iter().cloned(),
+        [i8p, i8p],
         tcx.mk_unit(),
         false,
         hir::Unsafety::Unsafe,
@@ -816,7 +815,7 @@ fn get_rust_try_fn<'ll, 'tcx>(
     )));
     // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
     let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
-        [try_fn_ty, i8p, catch_fn_ty].into_iter(),
+        [try_fn_ty, i8p, catch_fn_ty],
         tcx.types.i32,
         false,
         hir::Unsafety::Unsafe,
@@ -836,40 +835,24 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     llret_ty: &'ll Type,
     span: Span,
 ) -> Result<&'ll Value, ()> {
-    // macros for error handling:
-    #[allow(unused_macro_rules)]
-    macro_rules! emit_error {
-        ($msg: tt) => {
-            emit_error!($msg, )
-        };
-        ($msg: tt, $($fmt: tt)*) => {
-            span_invalid_monomorphization_error(
-                bx.sess(), span,
-                &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
-                         name, $($fmt)*));
-        }
-    }
-
     macro_rules! return_error {
-        ($($fmt: tt)*) => {
-            {
-                emit_error!($($fmt)*);
-                return Err(());
-            }
-        }
+        ($diag: expr) => {{
+            bx.sess().emit_err($diag);
+            return Err(());
+        }};
     }
 
     macro_rules! require {
-        ($cond: expr, $($fmt: tt)*) => {
+        ($cond: expr, $diag: expr) => {
             if !$cond {
-                return_error!($($fmt)*);
+                return_error!($diag);
             }
         };
     }
 
     macro_rules! require_simd {
-        ($ty: expr, $position: expr) => {
-            require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
+        ($ty: expr, $diag: expr) => {
+            require!($ty.is_simd(), $diag)
         };
     }
 
@@ -879,7 +862,11 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     let arg_tys = sig.inputs();
 
     if name == sym::simd_select_bitmask {
-        require_simd!(arg_tys[1], "argument");
+        require_simd!(
+            arg_tys[1],
+            InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] }
+        );
+
         let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
 
         let expected_int_bits = (len.max(8) - 1).next_power_of_two();
@@ -891,7 +878,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
             ty::Array(elem, len)
                 if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
-                    && len.try_eval_usize(bx.tcx, ty::ParamEnv::reveal_all())
+                    && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all())
                         == Some(expected_bytes) =>
             {
                 let place = PlaceRef::alloca(bx, args[0].layout);
@@ -900,12 +887,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
                 bx.load(int_ty, ptr, Align::ONE)
             }
-            _ => return_error!(
-                "invalid bitmask `{}`, expected `u{}` or `[u8; {}]`",
+            _ => return_error!(InvalidMonomorphization::InvalidBitmask {
+                span,
+                name,
                 mask_ty,
                 expected_int_bits,
                 expected_bytes
-            ),
+            }),
         };
 
         let i1 = bx.type_i1();
@@ -917,7 +905,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     }
 
     // every intrinsic below takes a SIMD vector as its first argument
-    require_simd!(arg_tys[0], "input");
+    require_simd!(arg_tys[0], InvalidMonomorphization::SimdInput { span, name, ty: arg_tys[0] });
     let in_ty = arg_tys[0];
 
     let comparison = match name {
@@ -932,23 +920,24 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
 
     let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
     if let Some(cmp_op) = comparison {
-        require_simd!(ret_ty, "return");
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
 
         let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+
         require!(
             in_len == out_len,
-            "expected return type with length {} (same as input type `{}`), \
-             found `{}` with length {}",
-            in_len,
-            in_ty,
-            ret_ty,
-            out_len
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
         );
         require!(
             bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
-            "expected return type with integer elements, found `{}` with non-integer `{}`",
-            ret_ty,
-            out_ty
+            InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
         );
 
         return Ok(compare_simd_types(
@@ -969,14 +958,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             // version of this intrinsic.
             match args[2].layout.ty.kind() {
                 ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
-                    len.try_eval_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(|| {
-                        span_bug!(span, "could not evaluate shuffle index array length")
-                    })
+                    len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(
+                        || span_bug!(span, "could not evaluate shuffle index array length"),
+                    )
                 }
-                _ => return_error!(
-                    "simd_shuffle index must be an array of `u32`, got `{}`",
-                    args[2].layout.ty
-                ),
+                _ => return_error!(InvalidMonomorphization::SimdShuffle {
+                    span,
+                    name,
+                    ty: args[2].layout.ty
+                }),
             }
         } else {
             stripped.parse().unwrap_or_else(|_| {
@@ -984,23 +974,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             })
         };
 
-        require_simd!(ret_ty, "return");
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
         let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
         require!(
             out_len == n,
-            "expected return type of length {}, found `{}` with length {}",
-            n,
-            ret_ty,
-            out_len
+            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
         );
         require!(
             in_elem == out_ty,
-            "expected return element type `{}` (element of input `{}`), \
-             found `{}` with element type `{}`",
-            in_elem,
-            in_ty,
-            ret_ty,
-            out_ty
+            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
         );
 
         let total_len = u128::from(in_len) * 2;
@@ -1013,15 +995,20 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 let val = bx.const_get_elt(vector, i as u64);
                 match bx.const_to_opt_u128(val, true) {
                     None => {
-                        emit_error!("shuffle index #{} is not a constant", arg_idx);
+                        bx.sess().emit_err(InvalidMonomorphization::ShuffleIndexNotConstant {
+                            span,
+                            name,
+                            arg_idx,
+                        });
                         None
                     }
                     Some(idx) if idx >= total_len => {
-                        emit_error!(
-                            "shuffle index #{} is out of bounds (limit {})",
+                        bx.sess().emit_err(InvalidMonomorphization::ShuffleIndexOutOfBounds {
+                            span,
+                            name,
                             arg_idx,
-                            total_len
-                        );
+                            total_len,
+                        });
                         None
                     }
                     Some(idx) => Some(bx.const_i32(idx as i32)),
@@ -1042,10 +1029,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     if name == sym::simd_insert {
         require!(
             in_elem == arg_tys[2],
-            "expected inserted type `{}` (element of input `{}`), found `{}`",
-            in_elem,
-            in_ty,
-            arg_tys[2]
+            InvalidMonomorphization::InsertedType {
+                span,
+                name,
+                in_elem,
+                in_ty,
+                out_ty: arg_tys[2]
+            }
         );
         return Ok(bx.insert_element(
             args[0].immediate(),
@@ -1056,10 +1046,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     if name == sym::simd_extract {
         require!(
             ret_ty == in_elem,
-            "expected return type `{}` (element of input `{}`), found `{}`",
-            in_elem,
-            in_ty,
-            ret_ty
+            InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
         );
         return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
     }
@@ -1067,17 +1054,18 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     if name == sym::simd_select {
         let m_elem_ty = in_elem;
         let m_len = in_len;
-        require_simd!(arg_tys[1], "argument");
+        require_simd!(
+            arg_tys[1],
+            InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] }
+        );
         let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
         require!(
             m_len == v_len,
-            "mismatched lengths: mask length `{}` != other vector length `{}`",
-            m_len,
-            v_len
+            InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
         );
         match m_elem_ty.kind() {
             ty::Int(_) => {}
-            _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
+            _ => return_error!(InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty }),
         }
         // truncate the mask to a vector of i1s
         let i1 = bx.type_i1();
@@ -1109,11 +1097,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 args[0].immediate(),
                 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
             ),
-            _ => return_error!(
-                "vector argument `{}`'s element type `{}`, expected integer element type",
+            _ => return_error!(InvalidMonomorphization::VectorArgument {
+                span,
+                name,
                 in_ty,
                 in_elem
-            ),
+            }),
         };
 
         // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
@@ -1135,7 +1124,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             }
             ty::Array(elem, len)
                 if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
-                    && len.try_eval_usize(bx.tcx, ty::ParamEnv::reveal_all())
+                    && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all())
                         == Some(expected_bytes) =>
             {
                 // Zero-extend iN to the array length:
@@ -1148,12 +1137,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));
                 return Ok(bx.load(array_ty, ptr, Align::ONE));
             }
-            _ => return_error!(
-                "cannot return `{}`, expected `u{}` or `[u8; {}]`",
+            _ => return_error!(InvalidMonomorphization::CannotReturn {
+                span,
+                name,
                 ret_ty,
                 expected_int_bits,
                 expected_bytes
-            ),
+            }),
         }
     }
 
@@ -1166,25 +1156,11 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         span: Span,
         args: &[OperandRef<'tcx, &'ll Value>],
     ) -> Result<&'ll Value, ()> {
-        #[allow(unused_macro_rules)]
-        macro_rules! emit_error {
-            ($msg: tt) => {
-                emit_error!($msg, )
-            };
-            ($msg: tt, $($fmt: tt)*) => {
-                span_invalid_monomorphization_error(
-                    bx.sess(), span,
-                    &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
-                             name, $($fmt)*));
-            }
-        }
         macro_rules! return_error {
-            ($($fmt: tt)*) => {
-                {
-                    emit_error!($($fmt)*);
-                    return Err(());
-                }
-            }
+            ($diag: expr) => {{
+                bx.sess().emit_err($diag);
+                return Err(());
+            }};
         }
 
         let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
@@ -1192,16 +1168,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             match f.bit_width() {
                 32 => ("f32", elem_ty),
                 64 => ("f64", elem_ty),
-                _ => {
-                    return_error!(
-                        "unsupported element type `{}` of floating-point vector `{}`",
-                        f.name_str(),
-                        in_ty
-                    );
-                }
+                _ => return_error!(InvalidMonomorphization::FloatingPointVector {
+                    span,
+                    name,
+                    f_ty: *f,
+                    in_ty,
+                }),
             }
         } else {
-            return_error!("`{}` is not a floating-point type", in_ty);
+            return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
         };
 
         let vec_ty = bx.type_vector(elem_ty, in_len);
@@ -1223,7 +1198,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
             sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
             sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
-            _ => return_error!("unrecognized intrinsic `{}`", name),
+            _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
         };
         let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
         let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
@@ -1317,37 +1292,48 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         // * M: any integer width is supported, will be truncated to i1
 
         // All types must be simd vector types
-        require_simd!(in_ty, "first");
-        require_simd!(arg_tys[1], "second");
-        require_simd!(arg_tys[2], "third");
-        require_simd!(ret_ty, "return");
+        require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
+        require_simd!(
+            arg_tys[1],
+            InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] }
+        );
+        require_simd!(
+            arg_tys[2],
+            InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] }
+        );
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
 
         // Of the same length:
         let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
         let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
         require!(
             in_len == out_len,
-            "expected {} argument with length {} (same as input type `{}`), \
-             found `{}` with length {}",
-            "second",
-            in_len,
-            in_ty,
-            arg_tys[1],
-            out_len
+            InvalidMonomorphization::SecondArgumentLength {
+                span,
+                name,
+                in_len,
+                in_ty,
+                arg_ty: arg_tys[1],
+                out_len
+            }
         );
         require!(
             in_len == out_len2,
-            "expected {} argument with length {} (same as input type `{}`), \
-             found `{}` with length {}",
-            "third",
-            in_len,
-            in_ty,
-            arg_tys[2],
-            out_len2
+            InvalidMonomorphization::ThirdArgumentLength {
+                span,
+                name,
+                in_len,
+                in_ty,
+                arg_ty: arg_tys[2],
+                out_len: out_len2
+            }
         );
 
         // The return type must match the first argument type
-        require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
+        require!(
+            ret_ty == in_ty,
+            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
+        );
 
         // This counts how many pointers
         fn ptr_count(t: Ty<'_>) -> usize {
@@ -1374,15 +1360,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             _ => {
                 require!(
                     false,
-                    "expected element type `{}` of second argument `{}` \
-                        to be a pointer to the element type `{}` of the first \
-                        argument `{}`, found `{}` != `*_ {}`",
-                    element_ty1,
-                    arg_tys[1],
-                    in_elem,
-                    in_ty,
-                    element_ty1,
-                    in_elem
+                    InvalidMonomorphization::ExpectedElementType {
+                        span,
+                        name,
+                        expected_element: element_ty1,
+                        second_arg: arg_tys[1],
+                        in_elem,
+                        in_ty,
+                        mutability: ExpectedPointerMutability::Not,
+                    }
                 );
                 unreachable!();
             }
@@ -1398,10 +1384,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             _ => {
                 require!(
                     false,
-                    "expected element type `{}` of third argument `{}` \
-                                 to be a signed integer type",
-                    element_ty2,
-                    arg_tys[2]
+                    InvalidMonomorphization::ThirdArgElementType {
+                        span,
+                        name,
+                        expected_element: element_ty2,
+                        third_arg: arg_tys[2]
+                    }
                 );
             }
         }
@@ -1450,32 +1438,40 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         // * M: any integer width is supported, will be truncated to i1
 
         // All types must be simd vector types
-        require_simd!(in_ty, "first");
-        require_simd!(arg_tys[1], "second");
-        require_simd!(arg_tys[2], "third");
+        require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
+        require_simd!(
+            arg_tys[1],
+            InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] }
+        );
+        require_simd!(
+            arg_tys[2],
+            InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] }
+        );
 
         // Of the same length:
         let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
         let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
         require!(
             in_len == element_len1,
-            "expected {} argument with length {} (same as input type `{}`), \
-            found `{}` with length {}",
-            "second",
-            in_len,
-            in_ty,
-            arg_tys[1],
-            element_len1
+            InvalidMonomorphization::SecondArgumentLength {
+                span,
+                name,
+                in_len,
+                in_ty,
+                arg_ty: arg_tys[1],
+                out_len: element_len1
+            }
         );
         require!(
             in_len == element_len2,
-            "expected {} argument with length {} (same as input type `{}`), \
-            found `{}` with length {}",
-            "third",
-            in_len,
-            in_ty,
-            arg_tys[2],
-            element_len2
+            InvalidMonomorphization::ThirdArgumentLength {
+                span,
+                name,
+                in_len,
+                in_ty,
+                arg_ty: arg_tys[2],
+                out_len: element_len2
+            }
         );
 
         // This counts how many pointers
@@ -1506,15 +1502,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             _ => {
                 require!(
                     false,
-                    "expected element type `{}` of second argument `{}` \
-                        to be a pointer to the element type `{}` of the first \
-                        argument `{}`, found `{}` != `*mut {}`",
-                    element_ty1,
-                    arg_tys[1],
-                    in_elem,
-                    in_ty,
-                    element_ty1,
-                    in_elem
+                    InvalidMonomorphization::ExpectedElementType {
+                        span,
+                        name,
+                        expected_element: element_ty1,
+                        second_arg: arg_tys[1],
+                        in_elem,
+                        in_ty,
+                        mutability: ExpectedPointerMutability::Mut,
+                    }
                 );
                 unreachable!();
             }
@@ -1529,10 +1525,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             _ => {
                 require!(
                     false,
-                    "expected element type `{}` of third argument `{}` \
-                         be a signed integer type",
-                    element_ty2,
-                    arg_tys[2]
+                    InvalidMonomorphization::ThirdArgElementType {
+                        span,
+                        name,
+                        expected_element: element_ty2,
+                        third_arg: arg_tys[2]
+                    }
                 );
             }
         }
@@ -1579,10 +1577,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             if name == sym::$name {
                 require!(
                     ret_ty == in_elem,
-                    "expected return type `{}` (element of input `{}`), found `{}`",
-                    in_elem,
-                    in_ty,
-                    ret_ty
+                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
                 );
                 return match in_elem.kind() {
                     ty::Int(_) | ty::Uint(_) => {
@@ -1605,25 +1600,28 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                                 32 => bx.const_real(bx.type_f32(), $identity),
                                 64 => bx.const_real(bx.type_f64(), $identity),
                                 v => return_error!(
-                                    r#"
-unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
-                                    sym::$name,
-                                    in_ty,
-                                    in_elem,
-                                    v,
-                                    ret_ty
+                                    InvalidMonomorphization::UnsupportedSymbolOfSize {
+                                        span,
+                                        name,
+                                        symbol: sym::$name,
+                                        in_ty,
+                                        in_elem,
+                                        size: v,
+                                        ret_ty
+                                    }
                                 ),
                             }
                         };
                         Ok(bx.$float_reduce(acc, args[0].immediate()))
                     }
-                    _ => return_error!(
-                        "unsupported {} from `{}` with element `{}` to `{}`",
-                        sym::$name,
+                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+                        span,
+                        name,
+                        symbol: sym::$name,
                         in_ty,
                         in_elem,
                         ret_ty
-                    ),
+                    }),
                 };
             }
         };
@@ -1651,22 +1649,20 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
             if name == sym::$name {
                 require!(
                     ret_ty == in_elem,
-                    "expected return type `{}` (element of input `{}`), found `{}`",
-                    in_elem,
-                    in_ty,
-                    ret_ty
+                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
                 );
                 return match in_elem.kind() {
                     ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
                     ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
                     ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
-                    _ => return_error!(
-                        "unsupported {} from `{}` with element `{}` to `{}`",
-                        sym::$name,
+                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+                        span,
+                        name,
+                        symbol: sym::$name,
                         in_ty,
                         in_elem,
                         ret_ty
-                    ),
+                    }),
                 };
             }
         };
@@ -1684,22 +1680,20 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
                 let input = if !$boolean {
                     require!(
                         ret_ty == in_elem,
-                        "expected return type `{}` (element of input `{}`), found `{}`",
-                        in_elem,
-                        in_ty,
-                        ret_ty
+                        InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
                     );
                     args[0].immediate()
                 } else {
                     match in_elem.kind() {
                         ty::Int(_) | ty::Uint(_) => {}
-                        _ => return_error!(
-                            "unsupported {} from `{}` with element `{}` to `{}`",
-                            sym::$name,
+                        _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+                            span,
+                            name,
+                            symbol: sym::$name,
                             in_ty,
                             in_elem,
                             ret_ty
-                        ),
+                        }),
                     }
 
                     // boolean reductions operate on vectors of i1s:
@@ -1712,13 +1706,14 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
                         let r = bx.$red(input);
                         Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
                     }
-                    _ => return_error!(
-                        "unsupported {} from `{}` with element `{}` to `{}`",
-                        sym::$name,
+                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+                        span,
+                        name,
+                        symbol: sym::$name,
                         in_ty,
                         in_elem,
                         ret_ty
-                    ),
+                    }),
                 };
             }
         };
@@ -1731,16 +1726,18 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
     bitwise_red!(simd_reduce_any: vector_reduce_or, true);
 
     if name == sym::simd_cast_ptr {
-        require_simd!(ret_ty, "return");
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
         let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
         require!(
             in_len == out_len,
-            "expected return type with length {} (same as input type `{}`), \
-                  found `{}` with length {}",
-            in_len,
-            in_ty,
-            ret_ty,
-            out_len
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
         );
 
         match in_elem.kind() {
@@ -1749,9 +1746,14 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
                     bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty)
                 });
                 assert!(!check_sized); // we are in codegen, so we shouldn't see these types
-                require!(metadata.is_unit(), "cannot cast fat pointer `{}`", in_elem)
+                require!(
+                    metadata.is_unit(),
+                    InvalidMonomorphization::CastFatPointer { span, name, ty: in_elem }
+                );
+            }
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
             }
-            _ => return_error!("expected pointer, got `{}`", in_elem),
         }
         match out_elem.kind() {
             ty::RawPtr(p) => {
@@ -1759,9 +1761,14 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
                     bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty)
                 });
                 assert!(!check_sized); // we are in codegen, so we shouldn't see these types
-                require!(metadata.is_unit(), "cannot cast to fat pointer `{}`", out_elem)
+                require!(
+                    metadata.is_unit(),
+                    InvalidMonomorphization::CastFatPointer { span, name, ty: out_elem }
+                );
+            }
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
             }
-            _ => return_error!("expected pointer, got `{}`", out_elem),
         }
 
         if in_elem == out_elem {
@@ -1772,66 +1779,76 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
     }
 
     if name == sym::simd_expose_addr {
-        require_simd!(ret_ty, "return");
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
         let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
         require!(
             in_len == out_len,
-            "expected return type with length {} (same as input type `{}`), \
-                  found `{}` with length {}",
-            in_len,
-            in_ty,
-            ret_ty,
-            out_len
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
         );
 
         match in_elem.kind() {
             ty::RawPtr(_) => {}
-            _ => return_error!("expected pointer, got `{}`", in_elem),
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
+            }
         }
         match out_elem.kind() {
             ty::Uint(ty::UintTy::Usize) => {}
-            _ => return_error!("expected `usize`, got `{}`", out_elem),
+            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
         }
 
         return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
     }
 
     if name == sym::simd_from_exposed_addr {
-        require_simd!(ret_ty, "return");
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
         let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
         require!(
             in_len == out_len,
-            "expected return type with length {} (same as input type `{}`), \
-                  found `{}` with length {}",
-            in_len,
-            in_ty,
-            ret_ty,
-            out_len
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
         );
 
         match in_elem.kind() {
             ty::Uint(ty::UintTy::Usize) => {}
-            _ => return_error!("expected `usize`, got `{}`", in_elem),
+            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
         }
         match out_elem.kind() {
             ty::RawPtr(_) => {}
-            _ => return_error!("expected pointer, got `{}`", out_elem),
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
+            }
         }
 
         return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
     }
 
     if name == sym::simd_cast || name == sym::simd_as {
-        require_simd!(ret_ty, "return");
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
         let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
         require!(
             in_len == out_len,
-            "expected return type with length {} (same as input type `{}`), \
-                  found `{}` with length {}",
-            in_len,
-            in_ty,
-            ret_ty,
-            out_len
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
         );
         // casting cares about nominal type, not just structural type
         if in_elem == out_elem {
@@ -1910,11 +1927,14 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
         }
         require!(
             false,
-            "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
-            in_ty,
-            in_elem,
-            ret_ty,
-            out_elem
+            InvalidMonomorphization::UnsupportedCast {
+                span,
+                name,
+                in_ty,
+                in_elem,
+                ret_ty,
+                out_elem
+            }
         );
     }
     macro_rules! arith_binary {
@@ -1926,10 +1946,10 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
                     })*
                     _ => {},
                 }
-                require!(false,
-                         "unsupported operation on `{}` with element `{}`",
-                         in_ty,
-                         in_elem)
+                require!(
+                    false,
+                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
+                );
             })*
         }
     }
@@ -1957,10 +1977,10 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
                     })*
                     _ => {},
                 }
-                require!(false,
-                         "unsupported operation on `{}` with element `{}`",
-                         in_ty,
-                         in_elem)
+                require!(
+                    false,
+                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
+                );
             })*
         }
     }
@@ -1998,12 +2018,12 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
             ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
             ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
             _ => {
-                return_error!(
-                    "expected element type `{}` of vector type `{}` \
-                     to be a signed or unsigned integer type",
-                    arg_tys[0].simd_size_and_type(bx.tcx()).1,
-                    arg_tys[0]
-                );
+                return_error!(InvalidMonomorphization::ExpectedVectorElementType {
+                    span,
+                    name,
+                    expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1,
+                    vector_type: arg_tys[0]
+                });
             }
         };
         let llvm_intrinsic = &format!(
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index 246e82545c8..e5bae009ed6 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -5,11 +5,12 @@
 //! This API is completely unstable and subject to change.
 
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(extern_types)]
 #![feature(hash_raw_entry)]
+#![feature(iter_intersperse)]
 #![feature(let_chains)]
-#![feature(extern_types)]
+#![feature(never_type)]
 #![feature(once_cell)]
-#![feature(iter_intersperse)]
 #![recursion_limit = "256"]
 #![allow(rustc::potential_query_instability)]
 #![deny(rustc::untranslatable_diagnostic)]
@@ -22,7 +23,7 @@ extern crate tracing;
 
 use back::write::{create_informational_target_machine, create_target_machine};
 
-use errors::FailParsingTargetMachineConfigToTargetMachine;
+use errors::ParseTargetMachineConfig;
 pub use llvm_util::target_features;
 use rustc_ast::expand::allocator::AllocatorKind;
 use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
@@ -33,7 +34,8 @@ use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::ModuleCodegen;
 use rustc_codegen_ssa::{CodegenResults, CompiledModule};
 use rustc_data_structures::fx::FxHashMap;
-use rustc_errors::{ErrorGuaranteed, FatalError, Handler};
+use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, FatalError, Handler, SubdiagnosticMessage};
+use rustc_macros::fluent_messages;
 use rustc_metadata::EncodedMetadata;
 use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
 use rustc_middle::ty::query::Providers;
@@ -82,6 +84,8 @@ mod type_of;
 mod va_arg;
 mod value;
 
+fluent_messages! { "../messages.ftl" }
+
 #[derive(Clone)]
 pub struct LlvmCodegenBackend(());
 
@@ -169,6 +173,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
     type Module = ModuleLlvm;
     type ModuleBuffer = back::lto::ModuleBuffer;
     type TargetMachine = &'static mut llvm::TargetMachine;
+    type TargetMachineError = crate::errors::LlvmError<'static>;
     type ThinData = back::lto::ThinData;
     type ThinBuffer = back::lto::ThinBuffer;
     fn print_pass_timings(&self) {
@@ -244,6 +249,10 @@ impl LlvmCodegenBackend {
 }
 
 impl CodegenBackend for LlvmCodegenBackend {
+    fn locale_resource(&self) -> &'static str {
+        crate::DEFAULT_LOCALE_RESOURCE
+    }
+
     fn init(&self, sess: &Session) {
         llvm_util::init(sess); // Make sure llvm is inited
     }
@@ -352,12 +361,12 @@ impl CodegenBackend for LlvmCodegenBackend {
             .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
             .join(sess);
 
-        sess.time("llvm_dump_timing_file", || {
-            if sess.opts.unstable_opts.llvm_time_trace {
+        if sess.opts.unstable_opts.llvm_time_trace {
+            sess.time("llvm_dump_timing_file", || {
                 let file_name = outputs.with_extension("llvm_timings.json");
                 llvm_util::time_trace_profiler_finish(&file_name);
-            }
-        });
+            });
+        }
 
         Ok((codegen_results, work_products))
     }
@@ -416,8 +425,7 @@ impl ModuleLlvm {
             let tm = match (cgcx.tm_factory)(tm_factory_config) {
                 Ok(m) => m,
                 Err(e) => {
-                    handler.emit_err(FailParsingTargetMachineConfigToTargetMachine { error: e });
-                    return Err(FatalError);
+                    return Err(handler.emit_almost_fatal(ParseTargetMachineConfig(e)));
                 }
             };
 
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index c14e1656291..9e5265188b5 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -79,6 +79,7 @@ pub enum LLVMModFlagBehavior {
     Append = 5,
     AppendUnique = 6,
     Max = 7,
+    Min = 8,
 }
 
 // Consts for the LLVM CallConv type, pre-cast to usize.
@@ -427,6 +428,7 @@ pub enum MetadataType {
     MD_type = 19,
     MD_vcall_visibility = 28,
     MD_noundef = 29,
+    MD_kcfi_type = 36,
 }
 
 /// LLVMRustAsmDialect
@@ -480,6 +482,8 @@ pub struct SanitizerOptions {
     pub sanitize_thread: bool,
     pub sanitize_hwaddress: bool,
     pub sanitize_hwaddress_recover: bool,
+    pub sanitize_kernel_address: bool,
+    pub sanitize_kernel_address_recover: bool,
 }
 
 /// LLVMRelocMode
@@ -637,9 +641,6 @@ pub struct Builder<'a>(InvariantOpaque<'a>);
 #[repr(C)]
 pub struct PassManager<'a>(InvariantOpaque<'a>);
 extern "C" {
-    pub type PassManagerBuilder;
-}
-extern "C" {
     pub type Pass;
 }
 extern "C" {
@@ -983,6 +984,9 @@ pub type SelfProfileBeforePassCallback =
     unsafe extern "C" fn(*mut c_void, *const c_char, *const c_char);
 pub type SelfProfileAfterPassCallback = unsafe extern "C" fn(*mut c_void);
 
+pub type GetSymbolsCallback = unsafe extern "C" fn(*mut c_void, *const c_char) -> *mut c_void;
+pub type GetSymbolsErrorCallback = unsafe extern "C" fn(*const c_char) -> *mut c_void;
+
 extern "C" {
     pub fn LLVMRustInstallFatalErrorHandler();
     pub fn LLVMRustDisableSystemDialogsOnCrash();
@@ -1060,10 +1064,12 @@ extern "C" {
     pub fn LLVMGlobalSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
     pub fn LLVMRustGlobalAddMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
     pub fn LLVMValueAsMetadata(Node: &Value) -> &Metadata;
+    pub fn LLVMIsAFunction(Val: &Value) -> Option<&Value>;
 
     // Operations on constants of any type
     pub fn LLVMConstNull(Ty: &Type) -> &Value;
     pub fn LLVMGetUndef(Ty: &Type) -> &Value;
+    pub fn LLVMGetPoison(Ty: &Type) -> &Value;
 
     // Operations on metadata
     pub fn LLVMMDStringInContext(C: &Context, Str: *const c_char, SLen: c_uint) -> &Value;
@@ -1270,7 +1276,8 @@ extern "C" {
         NumArgs: c_uint,
         Then: &'a BasicBlock,
         Catch: &'a BasicBlock,
-        Bundle: Option<&OperandBundleDef<'a>>,
+        OpBundles: *const Option<&OperandBundleDef<'a>>,
+        NumOpBundles: c_uint,
         Name: *const c_char,
     ) -> &'a Value;
     pub fn LLVMBuildLandingPad<'a>(
@@ -1640,7 +1647,8 @@ extern "C" {
         Fn: &'a Value,
         Args: *const &'a Value,
         NumArgs: c_uint,
-        Bundle: Option<&OperandBundleDef<'a>>,
+        OpBundles: *const Option<&OperandBundleDef<'a>>,
+        NumOpBundles: c_uint,
     ) -> &'a Value;
     pub fn LLVMRustBuildMemCpy<'a>(
         B: &Builder<'a>,
@@ -1804,8 +1812,6 @@ extern "C" {
     /// Creates a legacy pass manager -- only used for final codegen.
     pub fn LLVMCreatePassManager<'a>() -> &'a mut PassManager<'a>;
 
-    pub fn LLVMInitializePasses();
-
     pub fn LLVMTimeTraceProfilerInitialize();
 
     pub fn LLVMTimeTraceProfilerFinishThread();
@@ -2382,11 +2388,11 @@ extern "C" {
 
     pub fn LLVMRustSetDataLayoutFromTargetMachine<'a>(M: &'a Module, TM: &'a TargetMachine);
 
-    pub fn LLVMRustBuildOperandBundleDef<'a>(
+    pub fn LLVMRustBuildOperandBundleDef(
         Name: *const c_char,
-        Inputs: *const &'a Value,
+        Inputs: *const &'_ Value,
         NumInputs: c_uint,
-    ) -> &'a mut OperandBundleDef<'a>;
+    ) -> &mut OperandBundleDef<'_>;
     pub fn LLVMRustFreeOperandBundleDef<'a>(Bundle: &'a mut OperandBundleDef<'a>);
 
     pub fn LLVMRustPositionBuilderAtStart<'a>(B: &Builder<'a>, BB: &'a BasicBlock);
@@ -2400,6 +2406,8 @@ extern "C" {
     pub fn LLVMRustModuleBufferLen(p: &ModuleBuffer) -> usize;
     pub fn LLVMRustModuleBufferFree(p: &'static mut ModuleBuffer);
     pub fn LLVMRustModuleCost(M: &Module) -> u64;
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustModuleInstructionStats(M: &Module, Str: &RustString);
 
     pub fn LLVMRustThinLTOBufferCreate(M: &Module, is_thin: bool) -> &'static mut ThinLTOBuffer;
     pub fn LLVMRustThinLTOBufferFree(M: &'static mut ThinLTOBuffer);
@@ -2474,4 +2482,14 @@ extern "C" {
     pub fn LLVMRustGetMangledName(V: &Value, out: &RustString);
 
     pub fn LLVMRustGetElementTypeArgIndex(CallSite: &Value) -> i32;
+
+    pub fn LLVMRustIsBitcode(ptr: *const u8, len: usize) -> bool;
+
+    pub fn LLVMRustGetSymbols(
+        buf_ptr: *const u8,
+        buf_len: usize,
+        state: *mut c_void,
+        callback: GetSymbolsCallback,
+        error_callback: GetSymbolsErrorCallback,
+    ) -> *mut c_void;
 }
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index c9f5dd0f2c6..46692fd5e8b 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -81,10 +81,10 @@ unsafe fn configure_llvm(sess: &Session) {
         };
         // Set the llvm "program name" to make usage and invalid argument messages more clear.
         add("rustc -Cllvm-args=\"...\" with", true);
-        if sess.time_llvm_passes() {
+        if sess.opts.unstable_opts.time_llvm_passes {
             add("-time-passes", false);
         }
-        if sess.print_llvm_passes() {
+        if sess.opts.unstable_opts.print_llvm_passes {
             add("-debug-pass=Structure", false);
         }
         if sess.target.generate_arange_section
@@ -120,8 +120,6 @@ unsafe fn configure_llvm(sess: &Session) {
         llvm::LLVMTimeTraceProfilerInitialize();
     }
 
-    llvm::LLVMInitializePasses();
-
     rustc_llvm::initialize_available_targets();
 
     llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr());
@@ -152,13 +150,7 @@ pub fn time_trace_profiler_finish(file_name: &Path) {
 pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]> {
     let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch };
     match (arch, s) {
-        ("x86", "sse4.2") => {
-            if get_version() >= (14, 0, 0) {
-                smallvec!["sse4.2", "crc32"]
-            } else {
-                smallvec!["sse4.2"]
-            }
-        }
+        ("x86", "sse4.2") => smallvec!["sse4.2", "crc32"],
         ("x86", "pclmulqdq") => smallvec!["pclmul"],
         ("x86", "rdrand") => smallvec!["rdrnd"],
         ("x86", "bmi1") => smallvec!["bmi"],
@@ -217,7 +209,7 @@ pub fn check_tied_features(
 /// Must express features in the way Rust understands them
 pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
     let target_machine = create_informational_target_machine(sess);
-    let mut features: Vec<Symbol> = supported_target_features(sess)
+    supported_target_features(sess)
         .iter()
         .filter_map(|&(feature, gate)| {
             if sess.is_nightly_build() || allow_unstable || gate.is_none() {
@@ -237,16 +229,7 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
             true
         })
         .map(|feature| Symbol::intern(feature))
-        .collect();
-
-    // LLVM 14 changed the ABI for i128 arguments to __float/__fix builtins on Win64
-    // (see https://reviews.llvm.org/D110413). This unstable target feature is intended for use
-    // by compiler-builtins, to export the builtins with the expected, LLVM-version-dependent ABI.
-    // The target feature can be dropped once we no longer support older LLVM versions.
-    if sess.is_nightly_build() && get_version() >= (14, 0, 0) {
-        features.push(Symbol::intern("llvm14-builtins-abi"));
-    }
-    features
+        .collect()
 }
 
 pub fn print_version() {
@@ -441,7 +424,7 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str
         .filter_map(|s| {
             let enable_disable = match s.chars().next() {
                 None => return None,
-                Some(c @ '+' | c @ '-') => c,
+                Some(c @ ('+' | '-')) => c,
                 Some(_) => {
                     if diagnostics {
                         sess.emit_warning(UnknownCTargetFeaturePrefix { feature: s });
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
index 76f692b2016..d0ae36349df 100644
--- a/compiler/rustc_codegen_llvm/src/mono_item.rs
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -9,7 +9,7 @@ use rustc_hir::def_id::{DefId, LOCAL_CRATE};
 pub use rustc_middle::mir::mono::MonoItem;
 use rustc_middle::mir::mono::{Linkage, Visibility};
 use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
-use rustc_middle::ty::{self, Instance, TypeVisitable};
+use rustc_middle::ty::{self, Instance, TypeVisitableExt};
 use rustc_session::config::CrateType;
 use rustc_target::spec::RelocModel;
 
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
index 5772b7e1d81..ff111d96f84 100644
--- a/compiler/rustc_codegen_llvm/src/type_.rs
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -316,4 +316,19 @@ impl<'ll, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             )
         }
     }
+
+    fn set_kcfi_type_metadata(&self, function: &'ll Value, kcfi_typeid: u32) {
+        let kcfi_type_metadata = self.const_u32(kcfi_typeid);
+        unsafe {
+            llvm::LLVMGlobalSetMetadata(
+                function,
+                llvm::MD_kcfi_type as c_uint,
+                llvm::LLVMMDNodeInContext2(
+                    self.llcx,
+                    &llvm::LLVMValueAsMetadata(kcfi_type_metadata),
+                    1,
+                ),
+            )
+        }
+    }
 }
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 182adf81785..e264ce78f0d 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -1,13 +1,12 @@
 use crate::common::*;
 use crate::context::TypeLowering;
-use crate::llvm_util::get_version;
 use crate::type_::Type;
 use rustc_codegen_ssa::traits::*;
 use rustc_middle::bug;
 use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
 use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
-use rustc_middle::ty::{self, Ty, TypeVisitable};
-use rustc_target::abi::{Abi, AddressSpace, Align, FieldsShape};
+use rustc_middle::ty::{self, Ty, TypeVisitableExt};
+use rustc_target::abi::{Abi, Align, FieldsShape};
 use rustc_target::abi::{Int, Pointer, F32, F64};
 use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants};
 use smallvec::{smallvec, SmallVec};
@@ -43,10 +42,8 @@ fn uncached_llvm_type<'a, 'tcx>(
         // in problematically distinct types due to HRTB and subtyping (see #47638).
         // ty::Dynamic(..) |
         ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
-            // For performance reasons we use names only when emitting LLVM IR. Unless we are on
-            // LLVM < 14, where the use of unnamed types resulted in various issues, e.g., #76213,
-            // #79564, and #79246.
-            if get_version() < (14, 0, 0) || !cx.sess().fewer_names() =>
+            // For performance reasons we use names only when emitting LLVM IR.
+            if !cx.sess().fewer_names() =>
         {
             let mut name = with_no_visible_paths!(with_no_trimmed_paths!(layout.ty.to_string()));
             if let (&ty::Adt(def, _), &Variants::Single { index }) =
@@ -157,7 +154,7 @@ fn struct_llfields<'a, 'tcx>(
     } else {
         debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size);
     }
-    let field_remapping = if padding_used { Some(field_remapping) } else { None };
+    let field_remapping = padding_used.then_some(field_remapping);
     (result, packed, field_remapping)
 }
 
@@ -312,14 +309,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
             Int(i, _) => cx.type_from_integer(i),
             F32 => cx.type_f32(),
             F64 => cx.type_f64(),
-            Pointer => {
+            Pointer(address_space) => {
                 // If we know the alignment, pick something better than i8.
-                let (pointee, address_space) =
-                    if let Some(pointee) = self.pointee_info_at(cx, offset) {
-                        (cx.type_pointee_for_align(pointee.align), pointee.address_space)
-                    } else {
-                        (cx.type_i8(), AddressSpace::DATA)
-                    };
+                let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
+                    cx.type_pointee_for_align(pointee.align)
+                } else {
+                    cx.type_i8()
+                };
                 cx.type_ptr_to_ext(pointee, address_space)
             }
         }
@@ -333,7 +329,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     ) -> &'a Type {
         // HACK(eddyb) special-case fat pointers until LLVM removes
         // pointee types, to avoid bitcasting every `OperandRef::deref`.
-        match self.ty.kind() {
+        match *self.ty.kind() {
             ty::Ref(..) | ty::RawPtr(_) => {
                 return self.field(cx, index).llvm_type(cx);
             }
@@ -343,6 +339,11 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
                 let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
                 return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
             }
+            // `dyn* Trait` has the same ABI as `*mut dyn Trait`
+            ty::Dynamic(bounds, region, ty::DynStar) => {
+                let ptr_ty = cx.tcx.mk_mut_ptr(cx.tcx.mk_dynamic(bounds, region, ty::Dyn));
+                return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
+            }
             _ => {}
         }
 
@@ -352,10 +353,10 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         let scalar = [a, b][index];
 
         // Make sure to return the same type `immediate_llvm_type` would when
-        // dealing with an immediate pair.  This means that `(bool, bool)` is
+        // dealing with an immediate pair. This means that `(bool, bool)` is
         // effectively represented as `{i8, i8}` in memory and two `i1`s as an
         // immediate, just like `bool` is typically `i8` in memory and only `i1`
-        // when immediate.  We need to load/store `bool` as `i8` to avoid
+        // when immediate. We need to load/store `bool` as `i8` to avoid
         // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
         if immediate && scalar.is_bool() {
             return cx.type_i1();
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index ceb3d5a84ab..b19398e68c2 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -175,6 +175,89 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
     val
 }
 
+fn emit_s390x_va_arg<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
+    list: OperandRef<'tcx, &'ll Value>,
+    target_ty: Ty<'tcx>,
+) -> &'ll Value {
+    // Implementation of the s390x ELF ABI calling convention for va_args see
+    // https://github.com/IBM/s390x-abi (chapter 1.2.4)
+    let va_list_addr = list.immediate();
+    let va_list_layout = list.deref(bx.cx).layout;
+    let va_list_ty = va_list_layout.llvm_type(bx);
+    let layout = bx.cx.layout_of(target_ty);
+
+    let in_reg = bx.append_sibling_block("va_arg.in_reg");
+    let in_mem = bx.append_sibling_block("va_arg.in_mem");
+    let end = bx.append_sibling_block("va_arg.end");
+
+    // FIXME: vector ABI not yet supported.
+    let target_ty_size = bx.cx.size_of(target_ty).bytes();
+    let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
+    let unpadded_size = if indirect { 8 } else { target_ty_size };
+    let padded_size = 8;
+    let padding = padded_size - unpadded_size;
+
+    let gpr_type = indirect || !layout.is_single_fp_element(bx.cx);
+    let (max_regs, reg_count_field, reg_save_index, reg_padding) =
+        if gpr_type { (5, 0, 2, padding) } else { (4, 1, 16, 0) };
+
+    // Check whether the value was passed in a register or in memory.
+    let reg_count = bx.struct_gep(
+        va_list_ty,
+        va_list_addr,
+        va_list_layout.llvm_field_index(bx.cx, reg_count_field),
+    );
+    let reg_count_v = bx.load(bx.type_i64(), reg_count, Align::from_bytes(8).unwrap());
+    let use_regs = bx.icmp(IntPredicate::IntULT, reg_count_v, bx.const_u64(max_regs));
+    bx.cond_br(use_regs, in_reg, in_mem);
+
+    // Emit code to load the value if it was passed in a register.
+    bx.switch_to_block(in_reg);
+
+    // Work out the address of the value in the register save area.
+    let reg_ptr =
+        bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3));
+    let reg_ptr_v = bx.load(bx.type_i8p(), reg_ptr, bx.tcx().data_layout.pointer_align.abi);
+    let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
+    let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
+    let reg_addr = bx.gep(bx.type_i8(), reg_ptr_v, &[reg_off]);
+
+    // Update the register count.
+    let new_reg_count_v = bx.add(reg_count_v, bx.const_u64(1));
+    bx.store(new_reg_count_v, reg_count, Align::from_bytes(8).unwrap());
+    bx.br(end);
+
+    // Emit code to load the value if it was passed in memory.
+    bx.switch_to_block(in_mem);
+
+    // Work out the address of the value in the argument overflow area.
+    let arg_ptr =
+        bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 2));
+    let arg_ptr_v = bx.load(bx.type_i8p(), arg_ptr, bx.tcx().data_layout.pointer_align.abi);
+    let arg_off = bx.const_u64(padding);
+    let mem_addr = bx.gep(bx.type_i8(), arg_ptr_v, &[arg_off]);
+
+    // Update the argument overflow area pointer.
+    let arg_size = bx.cx().const_u64(padded_size);
+    let new_arg_ptr_v = bx.inbounds_gep(bx.type_i8(), arg_ptr_v, &[arg_size]);
+    bx.store(new_arg_ptr_v, arg_ptr, bx.tcx().data_layout.pointer_align.abi);
+    bx.br(end);
+
+    // Return the appropriate result.
+    bx.switch_to_block(end);
+    let val_addr = bx.phi(bx.type_i8p(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
+    let val_type = layout.llvm_type(bx);
+    let val_addr = if indirect {
+        let ptr_type = bx.cx.type_ptr_to(val_type);
+        let ptr_addr = bx.bitcast(val_addr, bx.cx.type_ptr_to(ptr_type));
+        bx.load(ptr_type, ptr_addr, bx.tcx().data_layout.pointer_align.abi)
+    } else {
+        bx.bitcast(val_addr, bx.cx.type_ptr_to(val_type))
+    };
+    bx.load(val_type, val_addr, layout.align.abi)
+}
+
 pub(super) fn emit_va_arg<'ll, 'tcx>(
     bx: &mut Builder<'_, 'll, 'tcx>,
     addr: OperandRef<'tcx, &'ll Value>,
@@ -200,6 +283,7 @@ pub(super) fn emit_va_arg<'ll, 'tcx>(
             emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
         }
         "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
+        "s390x" => emit_s390x_va_arg(bx, addr, target_ty),
         // Windows x86_64
         "x86_64" if target.is_like_windows => {
             let target_ty_size = bx.cx.size_of(target_ty).bytes();