about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs19
-rw-r--r--compiler/rustc_codegen_llvm/src/allocator.rs15
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs39
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs98
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs43
-rw-r--r--compiler/rustc_codegen_llvm/src/base.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs40
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs93
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs87
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs71
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs78
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs38
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs94
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs13
-rw-r--r--compiler/rustc_codegen_llvm/src/mono_item.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs21
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs4
20 files changed, 475 insertions, 315 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index abf0ea8cc0a..1a0a3a0c340 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -11,11 +11,12 @@ use rustc_codegen_ssa::mir::place::PlaceRef;
 use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::MemFlags;
 use rustc_middle::bug;
+use rustc_middle::ty::layout::LayoutOf;
 pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
 use rustc_middle::ty::Ty;
 use rustc_target::abi::call::ArgAbi;
 pub use rustc_target::abi::call::*;
-use rustc_target::abi::{self, HasDataLayout, Int, LayoutOf};
+use rustc_target::abi::{self, HasDataLayout, Int};
 pub use rustc_target::spec::abi::Abi;
 
 use libc::c_uint;
@@ -510,7 +511,12 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
     }
 
     fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
-        // FIXME(wesleywiser, eddyb): We should apply `nounwind` and `noreturn` as appropriate to this callsite.
+        if self.ret.layout.abi.is_uninhabited() {
+            llvm::Attribute::NoReturn.apply_callsite(llvm::AttributePlace::Function, callsite);
+        }
+        if !self.can_unwind {
+            llvm::Attribute::NoUnwind.apply_callsite(llvm::AttributePlace::Function, callsite);
+        }
 
         let mut i = 0;
         let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| {
@@ -535,16 +541,13 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
             }
             _ => {}
         }
-        if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi {
+        if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
             // If the value is a boolean, the range is 0..2 and that ultimately
             // become 0..0 when the type becomes i1, which would be rejected
             // by the LLVM verifier.
             if let Int(..) = scalar.value {
-                if !scalar.is_bool() {
-                    let range = scalar.valid_range_exclusive(bx);
-                    if range.start != range.end {
-                        bx.range_metadata(callsite, range);
-                    }
+                if !scalar.is_bool() && !scalar.is_always_valid(bx) {
+                    bx.range_metadata(callsite, scalar.valid_range);
                 }
             }
         }
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
index 2d79b73cf9f..30d91b41a8e 100644
--- a/compiler/rustc_codegen_llvm/src/allocator.rs
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -3,19 +3,22 @@ use libc::c_uint;
 use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
 use rustc_middle::bug;
 use rustc_middle::ty::TyCtxt;
+use rustc_session::config::DebugInfo;
 use rustc_span::symbol::sym;
 
+use crate::debuginfo;
 use crate::llvm::{self, False, True};
 use crate::ModuleLlvm;
 
 pub(crate) unsafe fn codegen(
     tcx: TyCtxt<'_>,
-    mods: &mut ModuleLlvm,
+    module_llvm: &mut ModuleLlvm,
+    module_name: &str,
     kind: AllocatorKind,
     has_alloc_error_handler: bool,
 ) {
-    let llcx = &*mods.llcx;
-    let llmod = mods.llmod();
+    let llcx = &*module_llvm.llcx;
+    let llmod = module_llvm.llmod();
     let usize = match tcx.sess.target.pointer_width {
         16 => llvm::LLVMInt16TypeInContext(llcx),
         32 => llvm::LLVMInt32TypeInContext(llcx),
@@ -132,4 +135,10 @@ pub(crate) unsafe fn codegen(
     llvm::LLVMSetTailCall(ret, True);
     llvm::LLVMBuildRetVoid(llbuilder);
     llvm::LLVMDisposeBuilder(llbuilder);
+
+    if tcx.sess.opts.debuginfo != DebugInfo::None {
+        let dbg_cx = debuginfo::CrateDebugContext::new(llmod);
+        debuginfo::metadata::compile_unit_metadata(tcx, module_name, &dbg_cx);
+        dbg_cx.finalize(tcx.sess);
+    }
 }
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index ebc3773df57..9690ad8b246 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -302,11 +302,19 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                         "~{flags}".to_string(),
                     ]);
                 }
-                InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {}
+                InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
+                    constraints.extend_from_slice(&[
+                        "~{vtype}".to_string(),
+                        "~{vl}".to_string(),
+                        "~{vxsat}".to_string(),
+                        "~{vxrm}".to_string(),
+                    ]);
+                }
                 InlineAsmArch::Nvptx64 => {}
                 InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {}
                 InlineAsmArch::Hexagon => {}
                 InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
+                InlineAsmArch::S390x => {}
                 InlineAsmArch::SpirV => {}
                 InlineAsmArch::Wasm32 => {}
                 InlineAsmArch::Bpf => {}
@@ -425,7 +433,7 @@ impl AsmMethods for CodegenCx<'ll, 'tcx> {
     }
 }
 
-fn inline_asm_call(
+pub(crate) fn inline_asm_call(
     bx: &mut Builder<'a, 'll, 'tcx>,
     asm: &str,
     cons: &str,
@@ -608,6 +616,10 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'tcx>>)
             InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
             InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
             InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
+            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+            | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+                unreachable!("clobber-only")
+            }
             InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
             InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
             InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
@@ -626,6 +638,8 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'tcx>>)
             InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
             InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
             InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
+            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
             InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
                 bug!("LLVM backend does not support SPIR-V")
             }
@@ -704,6 +718,7 @@ fn modifier_to_llvm(
         }
         InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
         InlineAsmRegClass::Bpf(_) => None,
+        InlineAsmRegClass::S390x(_) => None,
         InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
             bug!("LLVM backend does not support SPIR-V")
         }
@@ -744,6 +759,10 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll
         InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
         InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
         InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+        | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+            unreachable!("clobber-only")
+        }
         InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
         InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
         InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
@@ -762,6 +781,8 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll
         InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
         InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
         InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
         InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
             bug!("LLVM backend does not support SPIR-V")
         }
@@ -771,7 +792,7 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll
 
 /// Helper function to get the LLVM type for a Scalar. Pointers are returned as
 /// the equivalent integer type.
-fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: &Scalar) -> &'ll Type {
+fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: Scalar) -> &'ll Type {
     match scalar.value {
         Primitive::Int(Integer::I8, _) => cx.type_i8(),
         Primitive::Int(Integer::I16, _) => cx.type_i16(),
@@ -791,7 +812,7 @@ fn llvm_fixup_input(
     reg: InlineAsmRegClass,
     layout: &TyAndLayout<'tcx>,
 ) -> &'ll Value {
-    match (reg, &layout.abi) {
+    match (reg, layout.abi) {
         (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.value {
                 let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
@@ -814,7 +835,7 @@ fn llvm_fixup_input(
             Abi::Vector { element, count },
         ) if layout.size.bytes() == 8 => {
             let elem_ty = llvm_asm_scalar_type(bx.cx, element);
-            let vec_ty = bx.cx.type_vector(elem_ty, *count);
+            let vec_ty = bx.cx.type_vector(elem_ty, count);
             let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
             bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
         }
@@ -869,7 +890,7 @@ fn llvm_fixup_output(
     reg: InlineAsmRegClass,
     layout: &TyAndLayout<'tcx>,
 ) -> &'ll Value {
-    match (reg, &layout.abi) {
+    match (reg, layout.abi) {
         (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.value {
                 bx.extract_element(value, bx.const_i32(0))
@@ -889,8 +910,8 @@ fn llvm_fixup_output(
             Abi::Vector { element, count },
         ) if layout.size.bytes() == 8 => {
             let elem_ty = llvm_asm_scalar_type(bx.cx, element);
-            let vec_ty = bx.cx.type_vector(elem_ty, *count * 2);
-            let indices: Vec<_> = (0..*count).map(|x| bx.const_i32(x as i32)).collect();
+            let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
+            let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
             bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
         }
         (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
@@ -944,7 +965,7 @@ fn llvm_fixup_output_type(
     reg: InlineAsmRegClass,
     layout: &TyAndLayout<'tcx>,
 ) -> &'ll Type {
-    match (reg, &layout.abi) {
+    match (reg, layout.abi) {
         (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.value {
                 cx.type_vector(cx.type_i8(), 8)
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index 6ac7093b7de..4e86946219f 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -9,18 +9,15 @@ use std::str;
 
 use crate::llvm::archive_ro::{ArchiveRO, Child};
 use crate::llvm::{self, ArchiveKind, LLVMMachineType, LLVMRustCOFFShortExport};
-use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder};
-use rustc_codegen_ssa::{looks_like_rust_object_file, METADATA_FILENAME};
+use rustc_codegen_ssa::back::archive::ArchiveBuilder;
 use rustc_data_structures::temp_dir::MaybeTempDir;
 use rustc_middle::middle::cstore::{DllCallingConvention, DllImport};
 use rustc_session::Session;
-use rustc_span::symbol::Symbol;
 
 struct ArchiveConfig<'a> {
     pub sess: &'a Session,
     pub dst: PathBuf,
     pub src: Option<PathBuf>,
-    pub lib_search_paths: Vec<PathBuf>,
 }
 
 /// Helper for adding many files to an archive.
@@ -54,13 +51,7 @@ fn is_relevant_child(c: &Child<'_>) -> bool {
 }
 
 fn archive_config<'a>(sess: &'a Session, output: &Path, input: Option<&Path>) -> ArchiveConfig<'a> {
-    use rustc_codegen_ssa::back::link::archive_search_paths;
-    ArchiveConfig {
-        sess,
-        dst: output.to_path_buf(),
-        src: input.map(|p| p.to_path_buf()),
-        lib_search_paths: archive_search_paths(sess),
-    }
+    ArchiveConfig { sess, dst: output.to_path_buf(), src: input.map(|p| p.to_path_buf()) }
 }
 
 /// Map machine type strings to values of LLVM's MachineTypes enum.
@@ -111,57 +102,23 @@ impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> {
             .collect()
     }
 
-    /// Adds all of the contents of a native library to this archive. This will
-    /// search in the relevant locations for a library named `name`.
-    fn add_native_library(&mut self, name: Symbol, verbatim: bool) {
-        let location =
-            find_library(name, verbatim, &self.config.lib_search_paths, self.config.sess);
-        self.add_archive(&location, |_| false).unwrap_or_else(|e| {
-            self.config.sess.fatal(&format!(
-                "failed to add native library {}: {}",
-                location.to_string_lossy(),
-                e
-            ));
+    fn add_archive<F>(&mut self, archive: &Path, skip: F) -> io::Result<()>
+    where
+        F: FnMut(&str) -> bool + 'static,
+    {
+        let archive_ro = match ArchiveRO::open(archive) {
+            Ok(ar) => ar,
+            Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)),
+        };
+        if self.additions.iter().any(|ar| ar.path() == archive) {
+            return Ok(());
+        }
+        self.additions.push(Addition::Archive {
+            path: archive.to_path_buf(),
+            archive: archive_ro,
+            skip: Box::new(skip),
         });
-    }
-
-    /// Adds all of the contents of the rlib at the specified path to this
-    /// archive.
-    ///
-    /// This ignores adding the bytecode from the rlib, and if LTO is enabled
-    /// then the object file also isn't added.
-    fn add_rlib(
-        &mut self,
-        rlib: &Path,
-        name: &str,
-        lto: bool,
-        skip_objects: bool,
-    ) -> io::Result<()> {
-        // Ignoring obj file starting with the crate name
-        // as simple comparison is not enough - there
-        // might be also an extra name suffix
-        let obj_start = name.to_owned();
-
-        self.add_archive(rlib, move |fname: &str| {
-            // Ignore metadata files, no matter the name.
-            if fname == METADATA_FILENAME {
-                return true;
-            }
-
-            // Don't include Rust objects if LTO is enabled
-            if lto && looks_like_rust_object_file(fname) {
-                return true;
-            }
-
-            // Otherwise if this is *not* a rust object and we're skipping
-            // objects then skip this file
-            if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) {
-                return true;
-            }
-
-            // ok, don't skip this
-            false
-        })
+        Ok(())
     }
 
     /// Adds an arbitrary file to this archive
@@ -270,25 +227,6 @@ impl<'a> LlvmArchiveBuilder<'a> {
         self.src_archive.as_ref().unwrap().as_ref()
     }
 
-    fn add_archive<F>(&mut self, archive: &Path, skip: F) -> io::Result<()>
-    where
-        F: FnMut(&str) -> bool + 'static,
-    {
-        let archive_ro = match ArchiveRO::open(archive) {
-            Ok(ar) => ar,
-            Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)),
-        };
-        if self.additions.iter().any(|ar| ar.path() == archive) {
-            return Ok(());
-        }
-        self.additions.push(Addition::Archive {
-            path: archive.to_path_buf(),
-            archive: archive_ro,
-            skip: Box::new(skip),
-        });
-        Ok(())
-    }
-
     fn llvm_archive_kind(&self) -> Result<ArchiveKind, &str> {
         let kind = &*self.config.sess.target.archive_format;
         kind.parse().map_err(|_| kind)
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 5b4a187a1d5..791604a1827 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -296,39 +296,8 @@ unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, user: *const c_void
     }
     let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
 
-    // Recover the post-substitution assembly code from LLVM for better
-    // diagnostics.
-    let mut have_source = false;
-    let mut buffer = String::new();
-    let mut level = llvm::DiagnosticLevel::Error;
-    let mut loc = 0;
-    let mut ranges = [0; 8];
-    let mut num_ranges = ranges.len() / 2;
-    let msg = llvm::build_string(|msg| {
-        buffer = llvm::build_string(|buffer| {
-            have_source = llvm::LLVMRustUnpackSMDiagnostic(
-                diag,
-                msg,
-                buffer,
-                &mut level,
-                &mut loc,
-                ranges.as_mut_ptr(),
-                &mut num_ranges,
-            );
-        })
-        .expect("non-UTF8 inline asm");
-    })
-    .expect("non-UTF8 SMDiagnostic");
-
-    let source = have_source.then(|| {
-        let mut spans = vec![InnerSpan::new(loc as usize, loc as usize)];
-        for i in 0..num_ranges {
-            spans.push(InnerSpan::new(ranges[i * 2] as usize, ranges[i * 2 + 1] as usize));
-        }
-        (buffer, spans)
-    });
-
-    report_inline_asm(cgcx, msg, level, cookie, source);
+    let smdiag = llvm::diagnostic::SrcMgrDiagnostic::unpack(diag);
+    report_inline_asm(cgcx, smdiag.message, smdiag.level, cookie, smdiag.source);
 }
 
 unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
@@ -339,13 +308,7 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void
 
     match llvm::diagnostic::Diagnostic::unpack(info) {
         llvm::diagnostic::InlineAsm(inline) => {
-            report_inline_asm(
-                cgcx,
-                llvm::twine_to_string(inline.message),
-                inline.level,
-                inline.cookie,
-                None,
-            );
+            report_inline_asm(cgcx, inline.message, inline.level, inline.cookie, inline.source);
         }
 
         llvm::diagnostic::Optimization(opt) => {
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
index cc3cbea4def..a6bdbd11899 100644
--- a/compiler/rustc_codegen_llvm/src/base.rs
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -157,16 +157,18 @@ pub fn compile_codegen_unit(
             }
 
             // Finalize code coverage by injecting the coverage map. Note, the coverage map will
-            // also be added to the `llvm.used` variable, created next.
+            // also be added to the `llvm.compiler.used` variable, created next.
             if cx.sess().instrument_coverage() {
                 cx.coverageinfo_finalize();
             }
 
-            // Create the llvm.used variable
-            // This variable has type [N x i8*] and is stored in the llvm.metadata section
+            // Create the llvm.used and llvm.compiler.used variables.
             if !cx.used_statics().borrow().is_empty() {
                 cx.create_used_variable()
             }
+            if !cx.compiler_used_statics().borrow().is_empty() {
+                cx.create_compiler_used_variable()
+            }
 
             // Finalize debuginfo
             if cx.sess().opts.debuginfo != DebugInfo::None {
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 2139f9776b7..799f9a57e93 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -15,15 +15,15 @@ use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::MemFlags;
 use rustc_data_structures::small_c_str::SmallCStr;
 use rustc_hir::def_id::DefId;
-use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::layout::{LayoutError, LayoutOfHelpers, TyAndLayout};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_span::Span;
-use rustc_target::abi::{self, Align, Size};
+use rustc_target::abi::{self, Align, Size, WrappingRange};
 use rustc_target::spec::{HasTargetSpec, Target};
 use std::borrow::Cow;
 use std::ffi::CStr;
 use std::iter;
-use std::ops::{Deref, Range};
+use std::ops::Deref;
 use std::ptr;
 use tracing::debug;
 
@@ -88,12 +88,12 @@ impl HasTargetSpec for Builder<'_, '_, 'tcx> {
     }
 }
 
-impl abi::LayoutOf for Builder<'_, '_, 'tcx> {
-    type Ty = Ty<'tcx>;
-    type TyAndLayout = TyAndLayout<'tcx>;
+impl LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+    type LayoutOfResult = TyAndLayout<'tcx>;
 
-    fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
-        self.cx.layout_of(ty)
+    #[inline]
+    fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+        self.cx.handle_layout_err(err, span, ty)
     }
 }
 
@@ -382,7 +382,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
             val
         }
     }
-    fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value {
+    fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
         if scalar.is_bool() {
             return self.trunc(val, self.cx().type_i1());
         }
@@ -460,17 +460,15 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         fn scalar_load_metadata<'a, 'll, 'tcx>(
             bx: &mut Builder<'a, 'll, 'tcx>,
             load: &'ll Value,
-            scalar: &abi::Scalar,
+            scalar: abi::Scalar,
         ) {
-            let vr = scalar.valid_range.clone();
             match scalar.value {
                 abi::Int(..) => {
-                    let range = scalar.valid_range_exclusive(bx);
-                    if range.start != range.end {
-                        bx.range_metadata(load, range);
+                    if !scalar.is_always_valid(bx) {
+                        bx.range_metadata(load, scalar.valid_range);
                     }
                 }
-                abi::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
+                abi::Pointer if !scalar.valid_range.contains(0) => {
                     bx.nonnull_metadata(load);
                 }
                 _ => {}
@@ -490,17 +488,17 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
             }
             let llval = const_llval.unwrap_or_else(|| {
                 let load = self.load(place.layout.llvm_type(self), place.llval, place.align);
-                if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
+                if let abi::Abi::Scalar(scalar) = place.layout.abi {
                     scalar_load_metadata(self, load, scalar);
                 }
                 load
             });
             OperandValue::Immediate(self.to_immediate(llval, place.layout))
-        } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
+        } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
             let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
             let pair_ty = place.layout.llvm_type(self);
 
-            let mut load = |i, scalar: &abi::Scalar, align| {
+            let mut load = |i, scalar: abi::Scalar, align| {
                 let llptr = self.struct_gep(pair_ty, place.llval, i as u64);
                 let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
                 let load = self.load(llty, llptr, align);
@@ -556,9 +554,9 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         next_bx
     }
 
-    fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
+    fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {
         if self.sess().target.arch == "amdgpu" {
-            // amdgpu/LLVM does something weird and thinks a i64 value is
+            // amdgpu/LLVM does something weird and thinks an i64 value is
             // split into a v2i32, halving the bitwidth LLVM expects,
             // tripping an assertion. So, for now, just disable this
             // optimization.
@@ -569,7 +567,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
             let llty = self.cx.val_ty(load);
             let v = [
                 self.cx.const_uint_big(llty, range.start),
-                self.cx.const_uint_big(llty, range.end),
+                self.cx.const_uint_big(llty, range.end.wrapping_add(1)),
             ];
 
             llvm::LLVMSetMetadata(
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index 5532f53e408..73a8d464431 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -12,9 +12,10 @@ use rustc_codegen_ssa::mir::place::PlaceRef;
 use rustc_codegen_ssa::traits::*;
 use rustc_middle::bug;
 use rustc_middle::mir::interpret::{Allocation, GlobalAlloc, Scalar};
-use rustc_middle::ty::{layout::TyAndLayout, ScalarInt};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::ScalarInt;
 use rustc_span::symbol::Symbol;
-use rustc_target::abi::{self, AddressSpace, HasDataLayout, LayoutOf, Pointer, Size};
+use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer, Size};
 
 use libc::{c_char, c_uint};
 use tracing::debug;
@@ -227,7 +228,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         })
     }
 
-    fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: &'ll Type) -> &'ll Value {
+    fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
         let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
         match cv {
             Scalar::Int(ScalarInt::ZST) => {
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 3ca295f4a7e..ef3a90fdeca 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -11,12 +11,17 @@ use rustc_codegen_ssa::traits::*;
 use rustc_hir::def_id::DefId;
 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
 use rustc_middle::mir::interpret::{
-    read_target_uint, Allocation, ErrorHandled, GlobalAlloc, Pointer, Scalar as InterpScalar,
+    read_target_uint, Allocation, ErrorHandled, GlobalAlloc, InitChunk, Pointer,
+    Scalar as InterpScalar,
 };
 use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::{self, Instance, Ty};
 use rustc_middle::{bug, span_bug};
-use rustc_target::abi::{AddressSpace, Align, HasDataLayout, LayoutOf, Primitive, Scalar, Size};
+use rustc_target::abi::{
+    AddressSpace, Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange,
+};
+use std::ops::Range;
 use tracing::debug;
 
 pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
@@ -24,6 +29,57 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
     let dl = cx.data_layout();
     let pointer_size = dl.pointer_size.bytes() as usize;
 
+    // Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`,
+    // so `range` must be within the bounds of `alloc` and not contain or overlap a relocation.
+    fn append_chunks_of_init_and_uninit_bytes<'ll, 'a, 'b>(
+        llvals: &mut Vec<&'ll Value>,
+        cx: &'a CodegenCx<'ll, 'b>,
+        alloc: &'a Allocation,
+        range: Range<usize>,
+    ) {
+        let mut chunks = alloc
+            .init_mask()
+            .range_as_init_chunks(Size::from_bytes(range.start), Size::from_bytes(range.end));
+
+        let chunk_to_llval = move |chunk| match chunk {
+            InitChunk::Init(range) => {
+                let range = (range.start.bytes() as usize)..(range.end.bytes() as usize);
+                let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+                cx.const_bytes(bytes)
+            }
+            InitChunk::Uninit(range) => {
+                let len = range.end.bytes() - range.start.bytes();
+                cx.const_undef(cx.type_array(cx.type_i8(), len))
+            }
+        };
+
+        // Generating partially-uninit consts inhibits optimizations, so it is disabled by default.
+        // See https://github.com/rust-lang/rust/issues/84565.
+        let allow_partially_uninit =
+            match cx.sess().opts.debugging_opts.partially_uninit_const_threshold {
+                Some(max) => range.len() <= max,
+                None => false,
+            };
+
+        if allow_partially_uninit {
+            llvals.extend(chunks.map(chunk_to_llval));
+        } else {
+            let llval = match (chunks.next(), chunks.next()) {
+                (Some(chunk), None) => {
+                    // exactly one chunk, either fully init or fully uninit
+                    chunk_to_llval(chunk)
+                }
+                _ => {
+                    // partially uninit, codegen as if it was initialized
+                    // (using some arbitrary value for uninit bytes)
+                    let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+                    cx.const_bytes(bytes)
+                }
+            };
+            llvals.push(llval);
+        }
+    }
+
     let mut next_offset = 0;
     for &(offset, alloc_id) in alloc.relocations().iter() {
         let offset = offset.bytes();
@@ -32,12 +88,8 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
         if offset > next_offset {
             // This `inspect` is okay since we have checked that it is not within a relocation, it
             // is within the bounds of the allocation, and it doesn't affect interpreter execution
-            // (we inspect the result after interpreter execution). Any undef byte is replaced with
-            // some arbitrary byte value.
-            //
-            // FIXME: relay undef bytes to codegen as undef const bytes
-            let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset);
-            llvals.push(cx.const_bytes(bytes));
+            // (we inspect the result after interpreter execution).
+            append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, next_offset..offset);
         }
         let ptr_offset = read_target_uint(
             dl.endian,
@@ -59,7 +111,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
                 Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
                 &cx.tcx,
             ),
-            &Scalar { value: Primitive::Pointer, valid_range: 0..=!0 },
+            Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
             cx.type_i8p_ext(address_space),
         ));
         next_offset = offset + pointer_size;
@@ -68,12 +120,8 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
         let range = next_offset..alloc.len();
         // This `inspect` is okay since we have check that it is after all relocations, it is
         // within the bounds of the allocation, and it doesn't affect interpreter execution (we
-        // inspect the result after interpreter execution). Any undef byte is replaced with some
-        // arbitrary byte value.
-        //
-        // FIXME: relay undef bytes to codegen as undef const bytes
-        let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
-        llvals.push(cx.const_bytes(bytes));
+        // inspect the result after interpreter execution).
+        append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, range);
     }
 
     cx.const_struct(&llvals, true)
@@ -474,7 +522,13 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> {
             }
 
             if attrs.flags.contains(CodegenFnAttrFlags::USED) {
-                self.add_used_global(g);
+                // The semantics of #[used] in Rust only require the symbol to make it into the
+                // object file. It is explicitly allowed for the linker to strip the symbol if it
+                // is dead. As such, use llvm.compiler.used instead of llvm.used.
+                // Additionally, https://reviews.llvm.org/D97448 in LLVM 13 started emitting unique
+                // sections with SHF_GNU_RETAIN flag for llvm.used symbols, which may trigger bugs
+                // in some versions of the gold linker.
+                self.add_compiler_used_global(g);
             }
         }
     }
@@ -484,4 +538,11 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> {
         let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
         self.used_statics.borrow_mut().push(cast);
     }
+
+    /// Add a global value to a list to be stored in the `llvm.compiler.used` variable,
+    /// an array of i8*.
+    fn add_compiler_used_global(&self, global: &'ll Value) {
+        let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
+        self.compiler_used_statics.borrow_mut().push(cast);
+    }
 }
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 5d56c93f835..2d397dc5835 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -14,15 +14,15 @@ use rustc_codegen_ssa::traits::*;
 use rustc_data_structures::base_n;
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::small_c_str::SmallCStr;
-use rustc_middle::bug;
 use rustc_middle::mir::mono::CodegenUnit;
-use rustc_middle::ty::layout::{HasParamEnv, LayoutError, TyAndLayout};
+use rustc_middle::ty::layout::{HasParamEnv, LayoutError, LayoutOfHelpers, TyAndLayout};
 use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
+use rustc_middle::{bug, span_bug};
 use rustc_session::config::{CFGuard, CrateType, DebugInfo};
 use rustc_session::Session;
-use rustc_span::source_map::{Span, DUMMY_SP};
+use rustc_span::source_map::Span;
 use rustc_span::symbol::Symbol;
-use rustc_target::abi::{HasDataLayout, LayoutOf, PointeeInfo, Size, TargetDataLayout, VariantIdx};
+use rustc_target::abi::{HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx};
 use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel};
 use smallvec::SmallVec;
 
@@ -75,6 +75,10 @@ pub struct CodegenCx<'ll, 'tcx> {
     /// See <https://llvm.org/docs/LangRef.html#the-llvm-used-global-variable> for details
     pub used_statics: RefCell<Vec<&'ll Value>>,
 
+    /// Statics that will be placed in the llvm.compiler.used variable
+    /// See <https://llvm.org/docs/LangRef.html#the-llvm-compiler-used-global-variable> for details
+    pub compiler_used_statics: RefCell<Vec<&'ll Value>>,
+
     /// Mapping of non-scalar types to llvm types and field remapping if needed.
     pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), TypeLowering<'ll>>>,
 
@@ -115,10 +119,6 @@ fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
     }
 }
 
-fn strip_powerpc64_vectors(data_layout: String) -> String {
-    data_layout.replace("-v256:256:256-v512:512:512", "")
-}
-
 pub unsafe fn create_module(
     tcx: TyCtxt<'_>,
     llcx: &'ll llvm::Context,
@@ -130,7 +130,18 @@ pub unsafe fn create_module(
 
     let mut target_data_layout = sess.target.data_layout.clone();
     if llvm_util::get_version() < (12, 0, 0) && sess.target.arch == "powerpc64" {
-        target_data_layout = strip_powerpc64_vectors(target_data_layout);
+        target_data_layout = target_data_layout.replace("-v256:256:256-v512:512:512", "");
+    }
+    if llvm_util::get_version() < (13, 0, 0) {
+        if sess.target.arch == "powerpc64" {
+            target_data_layout = target_data_layout.replace("-S128", "");
+        }
+        if sess.target.arch == "wasm32" {
+            target_data_layout = "e-m:e-p:32:32-i64:64-n32:64-S128".to_string();
+        }
+        if sess.target.arch == "wasm64" {
+            target_data_layout = "e-m:e-p:64:64-i64:64-n32:64-S128".to_string();
+        }
     }
 
     // Ensure the data-layout values hardcoded remain the defaults.
@@ -318,6 +329,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
             const_globals: Default::default(),
             statics_to_rauw: RefCell::new(Vec::new()),
             used_statics: RefCell::new(Vec::new()),
+            compiler_used_statics: RefCell::new(Vec::new()),
             type_lowering: Default::default(),
             scalar_lltypes: Default::default(),
             pointee_infos: Default::default(),
@@ -340,6 +352,18 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
     pub fn coverage_context(&'a self) -> Option<&'a coverageinfo::CrateCoverageContext<'ll, 'tcx>> {
         self.coverage_cx.as_ref()
     }
+
+    fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
+        let section = cstr!("llvm.metadata");
+        let array = self.const_array(&self.type_ptr_to(self.type_i8()), values);
+
+        unsafe {
+            let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
+            llvm::LLVMSetInitializer(g, array);
+            llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
+            llvm::LLVMSetSection(g, section.as_ptr());
+        }
+    }
 }
 
 impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
@@ -430,6 +454,10 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         &self.used_statics
     }
 
+    fn compiler_used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
+        &self.compiler_used_statics
+    }
+
     fn set_frame_pointer_type(&self, llfn: &'ll Value) {
         attributes::set_frame_pointer_type(self, llfn)
     }
@@ -440,17 +468,14 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     }
 
     fn create_used_variable(&self) {
-        let name = cstr!("llvm.used");
-        let section = cstr!("llvm.metadata");
-        let array =
-            self.const_array(&self.type_ptr_to(self.type_i8()), &*self.used_statics.borrow());
+        self.create_used_variable_impl(cstr!("llvm.used"), &*self.used_statics.borrow());
+    }
 
-        unsafe {
-            let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
-            llvm::LLVMSetInitializer(g, array);
-            llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
-            llvm::LLVMSetSection(g, section.as_ptr());
-        }
+    fn create_compiler_used_variable(&self) {
+        self.create_used_variable_impl(
+            cstr!("llvm.compiler.used"),
+            &*self.compiler_used_statics.borrow(),
+        );
     }
 
     fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
@@ -810,22 +835,16 @@ impl ty::layout::HasTyCtxt<'tcx> for CodegenCx<'ll, 'tcx> {
     }
 }
 
-impl LayoutOf for CodegenCx<'ll, 'tcx> {
-    type Ty = Ty<'tcx>;
-    type TyAndLayout = TyAndLayout<'tcx>;
-
-    fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
-        self.spanned_layout_of(ty, DUMMY_SP)
-    }
+impl LayoutOfHelpers<'tcx> for CodegenCx<'ll, 'tcx> {
+    type LayoutOfResult = TyAndLayout<'tcx>;
 
-    fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::TyAndLayout {
-        self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty)).unwrap_or_else(|e| {
-            if let LayoutError::SizeOverflow(_) = e {
-                self.sess().span_fatal(span, &e.to_string())
-            } else {
-                bug!("failed to get layout for `{}`: {}", ty, e)
-            }
-        })
+    #[inline]
+    fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+        if let LayoutError::SizeOverflow(_) = err {
+            self.sess().span_fatal(span, &err.to_string())
+        } else {
+            span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+        }
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 7e136c1b24c..f913c3e4b70 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -28,15 +28,16 @@ use rustc_hir::def_id::{DefId, LOCAL_CRATE};
 use rustc_index::vec::{Idx, IndexVec};
 use rustc_middle::ich::NodeIdHashingMode;
 use rustc_middle::mir::{self, GeneratorLayout};
-use rustc_middle::ty::layout::{self, IntegerExt, PrimitiveExt, TyAndLayout};
+use rustc_middle::ty::layout::{self, IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout};
 use rustc_middle::ty::subst::GenericArgKind;
 use rustc_middle::ty::Instance;
 use rustc_middle::ty::{self, AdtKind, GeneratorSubsts, ParamEnv, Ty, TyCtxt};
 use rustc_middle::{bug, span_bug};
 use rustc_session::config::{self, DebugInfo};
 use rustc_span::symbol::{Interner, Symbol};
+use rustc_span::FileNameDisplayPreference;
 use rustc_span::{self, SourceFile, SourceFileHash, Span};
-use rustc_target::abi::{Abi, Align, HasDataLayout, Integer, LayoutOf, TagEncoding};
+use rustc_target::abi::{Abi, Align, HasDataLayout, Integer, TagEncoding};
 use rustc_target::abi::{Int, Pointer, F32, F64};
 use rustc_target::abi::{Primitive, Size, VariantIdx, Variants};
 use tracing::debug;
@@ -771,7 +772,13 @@ pub fn file_metadata(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> &'ll
     let hash = Some(&source_file.src_hash);
     let file_name = Some(source_file.name.prefer_remapped().to_string());
     let directory = if source_file.is_real_file() && !source_file.is_imported() {
-        Some(cx.sess().working_dir.to_string_lossy(false).to_string())
+        Some(
+            cx.sess()
+                .opts
+                .working_dir
+                .to_string_lossy(FileNameDisplayPreference::Remapped)
+                .to_string(),
+        )
     } else {
         // If the path comes from an upstream crate we assume it has been made
         // independent of the compiler's working directory one way or another.
@@ -999,7 +1006,7 @@ pub fn compile_unit_metadata(
     let producer = format!("clang LLVM ({})", rustc_producer);
 
     let name_in_debuginfo = name_in_debuginfo.to_string_lossy();
-    let work_dir = tcx.sess.working_dir.to_string_lossy(false);
+    let work_dir = tcx.sess.opts.working_dir.to_string_lossy(FileNameDisplayPreference::Remapped);
     let flags = "\0";
     let output_filenames = tcx.output_filenames(());
     let out_dir = &output_filenames.out_directory;
@@ -1280,6 +1287,31 @@ fn prepare_struct_metadata(
 // Tuples
 //=-----------------------------------------------------------------------------
 
+/// Returns names of captured upvars for closures and generators.
+///
+/// Here are some examples:
+///  - `name__field1__field2` when the upvar is captured by value.
+///  - `_ref__name__field` when the upvar is captured by reference.
+fn closure_saved_names_of_captured_variables(tcx: TyCtxt<'tcx>, def_id: DefId) -> Vec<String> {
+    let body = tcx.optimized_mir(def_id);
+
+    body.var_debug_info
+        .iter()
+        .filter_map(|var| {
+            let is_ref = match var.value {
+                mir::VarDebugInfoContents::Place(place) if place.local == mir::Local::new(1) => {
+                    // The projection is either `[.., Field, Deref]` or `[.., Field]`. It
+                    // implies whether the variable is captured by value or by reference.
+                    matches!(place.projection.last().unwrap(), mir::ProjectionElem::Deref)
+                }
+                _ => return None,
+            };
+            let prefix = if is_ref { "_ref__" } else { "" };
+            Some(prefix.to_owned() + &var.name.as_str())
+        })
+        .collect::<Vec<_>>()
+}
+
 /// Creates `MemberDescription`s for the fields of a tuple.
 struct TupleMemberDescriptionFactory<'tcx> {
     ty: Ty<'tcx>,
@@ -1289,14 +1321,25 @@ struct TupleMemberDescriptionFactory<'tcx> {
 
 impl<'tcx> TupleMemberDescriptionFactory<'tcx> {
     fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> {
+        let mut capture_names = match *self.ty.kind() {
+            ty::Generator(def_id, ..) | ty::Closure(def_id, ..) => {
+                Some(closure_saved_names_of_captured_variables(cx.tcx, def_id).into_iter())
+            }
+            _ => None,
+        };
         let layout = cx.layout_of(self.ty);
         self.component_types
             .iter()
             .enumerate()
             .map(|(i, &component_type)| {
                 let (size, align) = cx.size_and_align_of(component_type);
+                let name = if let Some(names) = capture_names.as_mut() {
+                    names.next().unwrap()
+                } else {
+                    format!("__{}", i)
+                };
                 MemberDescription {
-                    name: format!("__{}", i),
+                    name,
                     type_metadata: type_metadata(cx, component_type, self.span),
                     offset: layout.fields.offset(i),
                     size,
@@ -1613,7 +1656,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
             Variants::Multiple {
                 tag_encoding:
                     TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant },
-                ref tag,
+                tag,
                 ref variants,
                 tag_field,
             } => {
@@ -2039,10 +2082,8 @@ fn prepare_enum_metadata(
 
     let layout = cx.layout_of(enum_type);
 
-    if let (
-        &Abi::Scalar(_),
-        &Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. },
-    ) = (&layout.abi, &layout.variants)
+    if let (Abi::Scalar(_), Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. }) =
+        (layout.abi, &layout.variants)
     {
         return FinalMetadata(discriminant_type_metadata(tag.value));
     }
@@ -2050,8 +2091,8 @@ fn prepare_enum_metadata(
     if use_enum_fallback(cx) {
         let discriminant_type_metadata = match layout.variants {
             Variants::Single { .. } => None,
-            Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, ref tag, .. }
-            | Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. } => {
+            Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. }
+            | Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
                 Some(discriminant_type_metadata(tag.value))
             }
         };
@@ -2103,9 +2144,7 @@ fn prepare_enum_metadata(
         // A single-variant enum has no discriminant.
         Variants::Single { .. } => None,
 
-        Variants::Multiple {
-            tag_encoding: TagEncoding::Niche { .. }, ref tag, tag_field, ..
-        } => {
+        Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, tag_field, .. } => {
             // Find the integer type of the correct size.
             let size = tag.value.size(cx);
             let align = tag.value.align(cx);
@@ -2136,7 +2175,7 @@ fn prepare_enum_metadata(
             }
         }
 
-        Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, tag_field, .. } => {
+        Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, tag_field, .. } => {
             let discr_type = tag.value.to_ty(cx.tcx);
             let (size, align) = cx.size_and_align_of(discr_type);
 
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 8375d4c7ca5..fbaf8c8bdf6 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -25,13 +25,14 @@ use rustc_data_structures::sync::Lrc;
 use rustc_hir::def_id::{DefId, DefIdMap};
 use rustc_index::vec::IndexVec;
 use rustc_middle::mir;
-use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
 use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
 use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeFoldable};
 use rustc_session::config::{self, DebugInfo};
+use rustc_session::Session;
 use rustc_span::symbol::Symbol;
 use rustc_span::{self, BytePos, Pos, SourceFile, SourceFileAndLine, Span};
-use rustc_target::abi::{LayoutOf, Primitive, Size};
+use rustc_target::abi::{Primitive, Size};
 
 use libc::c_uint;
 use smallvec::SmallVec;
@@ -95,45 +96,52 @@ impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> {
             composite_types_completed: Default::default(),
         }
     }
-}
-
-/// Creates any deferred debug metadata nodes
-pub fn finalize(cx: &CodegenCx<'_, '_>) {
-    if cx.dbg_cx.is_none() {
-        return;
-    }
 
-    debug!("finalize");
+    pub fn finalize(&self, sess: &Session) {
+        unsafe {
+            llvm::LLVMRustDIBuilderFinalize(self.builder);
+
+            // Debuginfo generation in LLVM by default uses a higher
+            // version of dwarf than macOS currently understands. We can
+            // instruct LLVM to emit an older version of dwarf, however,
+            // for macOS to understand. For more info see #11352
+            // This can be overridden using --llvm-opts -dwarf-version,N.
+            // Android has the same issue (#22398)
+            if let Some(version) = sess.target.dwarf_version {
+                llvm::LLVMRustAddModuleFlag(self.llmod, "Dwarf Version\0".as_ptr().cast(), version)
+            }
 
-    if gdb::needs_gdb_debug_scripts_section(cx) {
-        // Add a .debug_gdb_scripts section to this compile-unit. This will
-        // cause GDB to try and load the gdb_load_rust_pretty_printers.py file,
-        // which activates the Rust pretty printers for binary this section is
-        // contained in.
-        gdb::get_or_insert_gdb_debug_scripts_section_global(cx);
-    }
+            // Indicate that we want CodeView debug information on MSVC
+            if sess.target.is_like_msvc {
+                llvm::LLVMRustAddModuleFlag(self.llmod, "CodeView\0".as_ptr().cast(), 1)
+            }
 
-    unsafe {
-        llvm::LLVMRustDIBuilderFinalize(DIB(cx));
-        // Debuginfo generation in LLVM by default uses a higher
-        // version of dwarf than macOS currently understands. We can
-        // instruct LLVM to emit an older version of dwarf, however,
-        // for macOS to understand. For more info see #11352
-        // This can be overridden using --llvm-opts -dwarf-version,N.
-        // Android has the same issue (#22398)
-        if let Some(version) = cx.sess().target.dwarf_version {
-            llvm::LLVMRustAddModuleFlag(cx.llmod, "Dwarf Version\0".as_ptr().cast(), version)
+            // Prevent bitcode readers from deleting the debug info.
+            let ptr = "Debug Info Version\0".as_ptr();
+            llvm::LLVMRustAddModuleFlag(
+                self.llmod,
+                ptr.cast(),
+                llvm::LLVMRustDebugMetadataVersion(),
+            );
         }
+    }
+}
 
-        // Indicate that we want CodeView debug information on MSVC
-        if cx.sess().target.is_like_msvc {
-            llvm::LLVMRustAddModuleFlag(cx.llmod, "CodeView\0".as_ptr().cast(), 1)
+/// Creates any deferred debug metadata nodes
+pub fn finalize(cx: &CodegenCx<'_, '_>) {
+    if let Some(dbg_cx) = &cx.dbg_cx {
+        debug!("finalize");
+
+        if gdb::needs_gdb_debug_scripts_section(cx) {
+            // Add a .debug_gdb_scripts section to this compile-unit. This will
+            // cause GDB to try and load the gdb_load_rust_pretty_printers.py file,
+            // which activates the Rust pretty printers for binary this section is
+            // contained in.
+            gdb::get_or_insert_gdb_debug_scripts_section_global(cx);
         }
 
-        // Prevent bitcode readers from deleting the debug info.
-        let ptr = "Debug Info Version\0".as_ptr();
-        llvm::LLVMRustAddModuleFlag(cx.llmod, ptr.cast(), llvm::LLVMRustDebugMetadataVersion());
-    };
+        dbg_cx.finalize(cx.sess());
+    }
 }
 
 impl DebugInfoBuilderMethods for Builder<'a, 'll, 'tcx> {
@@ -499,7 +507,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                         ty::Adt(def, ..) if !def.is_box() => {
                             // Again, only create type information if full debuginfo is enabled
                             if cx.sess().opts.debuginfo == DebugInfo::Full
-                                && !impl_self_ty.needs_subst()
+                                && !impl_self_ty.definitely_needs_subst(cx.tcx)
                             {
                                 Some(type_metadata(cx, impl_self_ty, rustc_span::DUMMY_SP))
                             } else {
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index ed484185865..1060f911a9e 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -7,6 +7,7 @@ use crate::type_of::LayoutLlvmExt;
 use crate::va_arg::emit_va_arg;
 use crate::value::Value;
 
+use rustc_ast as ast;
 use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
 use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
@@ -14,11 +15,11 @@ use rustc_codegen_ssa::mir::operand::OperandRef;
 use rustc_codegen_ssa::mir::place::PlaceRef;
 use rustc_codegen_ssa::traits::*;
 use rustc_hir as hir;
-use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
+use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt, LayoutOf};
 use rustc_middle::ty::{self, Ty};
 use rustc_middle::{bug, span_bug};
 use rustc_span::{sym, symbol::kw, Span, Symbol};
-use rustc_target::abi::{self, HasDataLayout, LayoutOf, Primitive};
+use rustc_target::abi::{self, HasDataLayout, Primitive};
 use rustc_target::spec::PanicStrategy;
 
 use std::cmp::Ordering;
@@ -132,13 +133,13 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
             }
             sym::va_arg => {
                 match fn_abi.ret.layout.abi {
-                    abi::Abi::Scalar(ref scalar) => {
+                    abi::Abi::Scalar(scalar) => {
                         match scalar.value {
                             Primitive::Int(..) => {
                                 if self.cx().size_of(ret_ty).bytes() < 4 {
-                                    // `va_arg` should not be called on a integer type
+                                    // `va_arg` should not be called on an integer type
                                     // less than 4 bytes in length. If it is, promote
-                                    // the integer to a `i32` and truncate the result
+                                    // the integer to an `i32` and truncate the result
                                     // back to the smaller type.
                                     let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
                                     self.trunc(promoted_result, llret_ty)
@@ -327,6 +328,31 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                 }
             }
 
+            sym::black_box => {
+                args[0].val.store(self, result);
+
+                // We need to "use" the argument in some way LLVM can't introspect, and on
+                // targets that support it we can typically leverage inline assembly to do
+                // this. LLVM's interpretation of inline assembly is that it's, well, a black
+                // box. This isn't the greatest implementation since it probably deoptimizes
+                // more than we want, but it's so far good enough.
+                crate::asm::inline_asm_call(
+                    self,
+                    "",
+                    "r,~{memory}",
+                    &[result.llval],
+                    self.type_void(),
+                    true,
+                    false,
+                    ast::LlvmAsmDialect::Att,
+                    &[span],
+                )
+                .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
+
+                // We have copied the value to `result` already.
+                return;
+            }
+
             _ if name_str.starts_with("simd_") => {
                 match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
                     Ok(llval) => llval,
@@ -1005,7 +1031,7 @@ fn generic_simd_intrinsic(
         // vector mask and returns an unsigned integer containing the most
         // significant bit (MSB) of each lane.
 
-        // If the vector has less than 8 lanes, an u8 is returned with zeroed
+        // If the vector has less than 8 lanes, a u8 is returned with zeroed
         // trailing bits.
         let expected_int_bits = in_len.max(8);
         match ret_ty.kind() {
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index aa4db1622b2..1da14344b1d 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -92,11 +92,12 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
     fn codegen_allocator<'tcx>(
         &self,
         tcx: TyCtxt<'tcx>,
-        mods: &mut ModuleLlvm,
+        module_llvm: &mut ModuleLlvm,
+        module_name: &str,
         kind: AllocatorKind,
         has_alloc_error_handler: bool,
     ) {
-        unsafe { allocator::codegen(tcx, mods, kind, has_alloc_error_handler) }
+        unsafe { allocator::codegen(tcx, module_llvm, module_name, kind, has_alloc_error_handler) }
     }
     fn compile_codegen_unit(
         &self,
@@ -352,8 +353,8 @@ impl ModuleLlvm {
 impl Drop for ModuleLlvm {
     fn drop(&mut self) {
         unsafe {
-            llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
             llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
+            llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
         }
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
index ccd3e42e458..36aa022d746 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
@@ -6,7 +6,8 @@ pub use self::OptimizationDiagnosticKind::*;
 use crate::value::Value;
 use libc::c_uint;
 
-use super::{DiagnosticInfo, Twine};
+use super::{DiagnosticInfo, SMDiagnostic};
+use rustc_span::InnerSpan;
 
 #[derive(Copy, Clone)]
 pub enum OptimizationDiagnosticKind {
@@ -86,36 +87,91 @@ impl OptimizationDiagnostic<'ll> {
     }
 }
 
-#[derive(Copy, Clone)]
-pub struct InlineAsmDiagnostic<'ll> {
+pub struct SrcMgrDiagnostic {
+    pub level: super::DiagnosticLevel,
+    pub message: String,
+    pub source: Option<(String, Vec<InnerSpan>)>,
+}
+
+impl SrcMgrDiagnostic {
+    pub unsafe fn unpack(diag: &SMDiagnostic) -> SrcMgrDiagnostic {
+        // Recover the post-substitution assembly code from LLVM for better
+        // diagnostics.
+        let mut have_source = false;
+        let mut buffer = String::new();
+        let mut level = super::DiagnosticLevel::Error;
+        let mut loc = 0;
+        let mut ranges = [0; 8];
+        let mut num_ranges = ranges.len() / 2;
+        let message = super::build_string(|message| {
+            buffer = super::build_string(|buffer| {
+                have_source = super::LLVMRustUnpackSMDiagnostic(
+                    diag,
+                    message,
+                    buffer,
+                    &mut level,
+                    &mut loc,
+                    ranges.as_mut_ptr(),
+                    &mut num_ranges,
+                );
+            })
+            .expect("non-UTF8 inline asm");
+        })
+        .expect("non-UTF8 SMDiagnostic");
+
+        SrcMgrDiagnostic {
+            message,
+            level,
+            source: have_source.then(|| {
+                let mut spans = vec![InnerSpan::new(loc as usize, loc as usize)];
+                for i in 0..num_ranges {
+                    spans.push(InnerSpan::new(ranges[i * 2] as usize, ranges[i * 2 + 1] as usize));
+                }
+                (buffer, spans)
+            }),
+        }
+    }
+}
+
+#[derive(Clone)]
+pub struct InlineAsmDiagnostic {
     pub level: super::DiagnosticLevel,
     pub cookie: c_uint,
-    pub message: &'ll Twine,
-    pub instruction: Option<&'ll Value>,
+    pub message: String,
+    pub source: Option<(String, Vec<InnerSpan>)>,
 }
 
-impl InlineAsmDiagnostic<'ll> {
-    unsafe fn unpack(di: &'ll DiagnosticInfo) -> Self {
+impl InlineAsmDiagnostic {
+    unsafe fn unpackInlineAsm(di: &'ll DiagnosticInfo) -> Self {
         let mut cookie = 0;
         let mut message = None;
-        let mut instruction = None;
         let mut level = super::DiagnosticLevel::Error;
 
-        super::LLVMRustUnpackInlineAsmDiagnostic(
-            di,
-            &mut level,
-            &mut cookie,
-            &mut message,
-            &mut instruction,
-        );
+        super::LLVMRustUnpackInlineAsmDiagnostic(di, &mut level, &mut cookie, &mut message);
 
-        InlineAsmDiagnostic { level, cookie, message: message.unwrap(), instruction }
+        InlineAsmDiagnostic {
+            level,
+            cookie,
+            message: super::twine_to_string(message.unwrap()),
+            source: None,
+        }
+    }
+
+    unsafe fn unpackSrcMgr(di: &'ll DiagnosticInfo) -> Self {
+        let mut cookie = 0;
+        let smdiag = SrcMgrDiagnostic::unpack(super::LLVMRustGetSMDiagnostic(di, &mut cookie));
+        InlineAsmDiagnostic {
+            level: smdiag.level,
+            cookie,
+            message: smdiag.message,
+            source: smdiag.source,
+        }
     }
 }
 
 pub enum Diagnostic<'ll> {
     Optimization(OptimizationDiagnostic<'ll>),
-    InlineAsm(InlineAsmDiagnostic<'ll>),
+    InlineAsm(InlineAsmDiagnostic),
     PGO(&'ll DiagnosticInfo),
     Linker(&'ll DiagnosticInfo),
     Unsupported(&'ll DiagnosticInfo),
@@ -130,7 +186,7 @@ impl Diagnostic<'ll> {
         let kind = super::LLVMRustGetDiagInfoKind(di);
 
         match kind {
-            Dk::InlineAsm => InlineAsm(InlineAsmDiagnostic::unpack(di)),
+            Dk::InlineAsm => InlineAsm(InlineAsmDiagnostic::unpackInlineAsm(di)),
 
             Dk::OptimizationRemark => {
                 Optimization(OptimizationDiagnostic::unpack(OptimizationRemark, di))
@@ -162,6 +218,8 @@ impl Diagnostic<'ll> {
             Dk::Linker => Linker(di),
             Dk::Unsupported => Unsupported(di),
 
+            Dk::SrcMgr => InlineAsm(InlineAsmDiagnostic::unpackSrcMgr(di)),
+
             _ => UnknownDiagnostic(di),
         }
     }
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 57173a49107..3f2ed02d90d 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -490,6 +490,7 @@ pub enum DiagnosticKind {
     PGOProfile,
     Linker,
     Unsupported,
+    SrcMgr,
 }
 
 /// LLVMRustDiagnosticLevel
@@ -2264,13 +2265,17 @@ extern "C" {
         level_out: &mut DiagnosticLevel,
         cookie_out: &mut c_uint,
         message_out: &mut Option<&'a Twine>,
-        instruction_out: &mut Option<&'a Value>,
     );
 
     #[allow(improper_ctypes)]
     pub fn LLVMRustWriteDiagnosticInfoToString(DI: &DiagnosticInfo, s: &RustString);
     pub fn LLVMRustGetDiagInfoKind(DI: &DiagnosticInfo) -> DiagnosticKind;
 
+    pub fn LLVMRustGetSMDiagnostic(
+        DI: &'a DiagnosticInfo,
+        cookie_out: &mut c_uint,
+    ) -> &'a SMDiagnostic;
+
     pub fn LLVMRustSetInlineAsmDiagnosticHandler(
         C: &Context,
         H: InlineAsmDiagHandler,
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 0ca51bd2883..3b64ec1a991 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -89,13 +89,14 @@ unsafe fn configure_llvm(sess: &Session) {
             add("-generate-arange-section", false);
         }
 
-        // FIXME(nagisa): disable the machine outliner by default in LLVM versions 11, where it was
-        // introduced and up.
+        // Disable the machine outliner by default in LLVM versions 11 and LLVM
+        // version 12, where it leads to miscompilation.
         //
-        // This should remain in place until https://reviews.llvm.org/D103167 is fixed. If LLVM
-        // has been upgraded since, consider adjusting the version check below to contain an upper
-        // bound.
-        if llvm_util::get_version() >= (11, 0, 0) {
+        // Ref:
+        // - https://github.com/rust-lang/rust/issues/85351
+        // - https://reviews.llvm.org/D103167
+        let llvm_version = llvm_util::get_version();
+        if llvm_version >= (11, 0, 0) && llvm_version < (13, 0, 0) {
             add("-enable-machine-outliner=never", false);
         }
 
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
index 93456443aa0..8ba3e870fbb 100644
--- a/compiler/rustc_codegen_llvm/src/mono_item.rs
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -8,10 +8,9 @@ use rustc_codegen_ssa::traits::*;
 use rustc_hir::def_id::{DefId, LOCAL_CRATE};
 pub use rustc_middle::mir::mono::MonoItem;
 use rustc_middle::mir::mono::{Linkage, Visibility};
-use rustc_middle::ty::layout::FnAbiExt;
+use rustc_middle::ty::layout::{FnAbiExt, LayoutOf};
 use rustc_middle::ty::{self, Instance, TypeFoldable};
 use rustc_session::config::CrateType;
-use rustc_target::abi::LayoutOf;
 use rustc_target::spec::RelocModel;
 use tracing::debug;
 
@@ -135,6 +134,11 @@ impl CodegenCx<'ll, 'tcx> {
             return false;
         }
 
+        // Match clang by only supporting COFF and ELF for now.
+        if self.tcx.sess.target.is_like_osx {
+            return false;
+        }
+
         // Static relocation model should force copy relocations everywhere.
         if self.tcx.sess.relocation_model() == RelocModel::Static {
             return true;
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 85efe3e6483..3e39bf3e995 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -4,12 +4,12 @@ use crate::context::TypeLowering;
 use crate::type_::Type;
 use rustc_codegen_ssa::traits::*;
 use rustc_middle::bug;
-use rustc_middle::ty::layout::{FnAbiExt, TyAndLayout};
+use rustc_middle::ty::layout::{FnAbiExt, LayoutOf, TyAndLayout};
 use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::{self, Ty, TypeFoldable};
 use rustc_target::abi::{Abi, AddressSpace, Align, FieldsShape};
 use rustc_target::abi::{Int, Pointer, F32, F64};
-use rustc_target::abi::{LayoutOf, PointeeInfo, Scalar, Size, TyAndLayoutMethods, Variants};
+use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants};
 use smallvec::{smallvec, SmallVec};
 use tracing::debug;
 
@@ -23,7 +23,7 @@ fn uncached_llvm_type<'a, 'tcx>(
 ) -> &'a Type {
     match layout.abi {
         Abi::Scalar(_) => bug!("handled elsewhere"),
-        Abi::Vector { ref element, count } => {
+        Abi::Vector { element, count } => {
             let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
             return cx.type_vector(element, count);
         }
@@ -177,7 +177,7 @@ pub trait LayoutLlvmExt<'tcx> {
     fn scalar_llvm_type_at<'a>(
         &self,
         cx: &CodegenCx<'a, 'tcx>,
-        scalar: &Scalar,
+        scalar: Scalar,
         offset: Size,
     ) -> &'a Type;
     fn scalar_pair_element_llvm_type<'a>(
@@ -218,7 +218,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     /// of that field's type - this is useful for taking the address of
     /// that field and ensuring the struct has the right alignment.
     fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
-        if let Abi::Scalar(ref scalar) = self.abi {
+        if let Abi::Scalar(scalar) = self.abi {
             // Use a different cache for scalars because pointers to DSTs
             // can be either fat or thin (data pointers of fat pointers).
             if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
@@ -286,7 +286,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     }
 
     fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
-        if let Abi::Scalar(ref scalar) = self.abi {
+        if let Abi::Scalar(scalar) = self.abi {
             if scalar.is_bool() {
                 return cx.type_i1();
             }
@@ -297,7 +297,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     fn scalar_llvm_type_at<'a>(
         &self,
         cx: &CodegenCx<'a, 'tcx>,
-        scalar: &Scalar,
+        scalar: Scalar,
         offset: Size,
     ) -> &'a Type {
         match scalar.value {
@@ -337,7 +337,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         }
 
         let (a, b) = match self.abi {
-            Abi::ScalarPair(ref a, ref b) => (a, b),
+            Abi::ScalarPair(a, b) => (a, b),
             _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
         };
         let scalar = [a, b][index];
@@ -393,12 +393,15 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         }
     }
 
+    // FIXME(eddyb) this having the same name as `TyAndLayout::pointee_info_at`
+    // (the inherent method, which is lacking this caching logic) can result in
+    // the uncached version being called - not wrong, but potentially inefficient.
     fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
         if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
             return pointee;
         }
 
-        let result = Ty::pointee_info_at(*self, cx, offset);
+        let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset);
 
         cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
         result
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index 2208ec37a42..caafae6c267 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -7,9 +7,9 @@ use rustc_codegen_ssa::{
     common::IntPredicate,
     traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods},
 };
-use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
 use rustc_middle::ty::Ty;
-use rustc_target::abi::{Align, Endian, HasDataLayout, LayoutOf, Size};
+use rustc_target::abi::{Align, Endian, HasDataLayout, Size};
 
 fn round_pointer_up_to_alignment(
     bx: &mut Builder<'a, 'll, 'tcx>,