about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs125
-rw-r--r--compiler/rustc_codegen_llvm/src/allocator.rs193
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs38
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs81
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs65
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs124
-rw-r--r--compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs103
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs241
-rw-r--r--compiler/rustc_codegen_llvm/src/base.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs234
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs63
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs69
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs55
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs156
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs279
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs294
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs231
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs238
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs53
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs105
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs24
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs27
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs25
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs138
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/utils.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/declare.rs63
-rw-r--r--compiler/rustc_codegen_llvm/src/errors.rs28
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs536
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs94
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs295
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs234
-rw-r--r--compiler/rustc_codegen_llvm/src/mono_item.rs9
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs73
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs120
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs37
38 files changed, 2792 insertions, 1686 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index 28be6d033f8..9e834b83df4 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -211,14 +211,12 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
             OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
         } else if self.is_unsized_indirect() {
             bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
-        } else if let PassMode::Cast(cast, _) = &self.mode {
+        } else if let PassMode::Cast { cast, pad_i32: _ } = &self.mode {
             // FIXME(eddyb): Figure out when the simpler Store is safe, clang
             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
             let can_store_through_cast_ptr = false;
             if can_store_through_cast_ptr {
-                let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
-                let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
-                bx.store(val, cast_dst, self.layout.align.abi);
+                bx.store(val, dst.llval, self.layout.align.abi);
             } else {
                 // The actual return type is a struct, but the ABI
                 // adaptation code has cast it into some scalar type. The
@@ -276,12 +274,12 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
             PassMode::Pair(..) => {
                 OperandValue::Pair(next(), next()).store(bx, dst);
             }
-            PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+            PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
                 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
             }
             PassMode::Direct(_)
-            | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
-            | PassMode::Cast(..) => {
+            | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ }
+            | PassMode::Cast { .. } => {
                 let next_arg = next();
                 self.store(bx, next_arg, dst);
             }
@@ -334,39 +332,86 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         let llreturn_ty = match &self.ret.mode {
             PassMode::Ignore => cx.type_void(),
             PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
-            PassMode::Cast(cast, _) => cast.llvm_type(cx),
+            PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx),
             PassMode::Indirect { .. } => {
-                llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+                llargument_tys.push(cx.type_ptr());
                 cx.type_void()
             }
         };
 
         for arg in args {
+            // Note that the exact number of arguments pushed here is carefully synchronized with
+            // code all over the place, both in the codegen_llvm and codegen_ssa crates. That's how
+            // other code then knows which LLVM argument(s) correspond to the n-th Rust argument.
             let llarg_ty = match &arg.mode {
                 PassMode::Ignore => continue,
-                PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
+                PassMode::Direct(_) => {
+                    // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+                    // and for Scalar ABIs the LLVM type is fully determined by `layout.abi`,
+                    // guarnateeing that we generate ABI-compatible LLVM IR. Things get tricky for
+                    // aggregates...
+                    if matches!(arg.layout.abi, abi::Abi::Aggregate { .. }) {
+                        assert!(
+                            arg.layout.is_sized(),
+                            "`PassMode::Direct` for unsized type: {}",
+                            arg.layout.ty
+                        );
+                        // This really shouldn't happen, since `immediate_llvm_type` will use
+                        // `layout.fields` to turn this Rust type into an LLVM type. This means all
+                        // sorts of Rust type details leak into the ABI. However wasm sadly *does*
+                        // currently use this mode so we have to allow it -- but we absolutely
+                        // shouldn't let any more targets do that.
+                        // (Also see <https://github.com/rust-lang/rust/issues/115666>.)
+                        assert!(
+                            matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64"),
+                            "`PassMode::Direct` for aggregates only allowed on wasm targets\nProblematic type: {:#?}",
+                            arg.layout,
+                        );
+                    }
+                    arg.layout.immediate_llvm_type(cx)
+                }
                 PassMode::Pair(..) => {
+                    // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+                    // so for ScalarPair we can easily be sure that we are generating ABI-compatible
+                    // LLVM IR.
+                    assert!(
+                        matches!(arg.layout.abi, abi::Abi::ScalarPair(..)),
+                        "PassMode::Pair for type {}",
+                        arg.layout.ty
+                    );
                     llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
                     llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
                     continue;
                 }
-                PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
-                    let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
+                PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack } => {
+                    // `Indirect` with metadata is only for unsized types, and doesn't work with
+                    // on-stack passing.
+                    assert!(arg.layout.is_unsized() && !on_stack);
+                    // Construct the type of a (wide) pointer to `ty`, and pass its two fields.
+                    // Any two ABI-compatible unsized types have the same metadata type and
+                    // moreover the same metadata value leads to the same dynamic size and
+                    // alignment, so this respects ABI compatibility.
+                    let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty);
                     let ptr_layout = cx.layout_of(ptr_ty);
                     llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
                     llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
                     continue;
                 }
-                PassMode::Cast(cast, pad_i32) => {
+                PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
+                    assert!(arg.layout.is_sized());
+                    cx.type_ptr()
+                }
+                PassMode::Cast { cast, pad_i32 } => {
+                    // `Cast` means "transmute to `CastType`"; that only makes sense for sized types.
+                    assert!(arg.layout.is_sized());
                     // add padding
                     if *pad_i32 {
                         llargument_tys.push(Reg::i32().llvm_type(cx));
                     }
+                    // Compute the LLVM type we use for this function from the cast type.
+                    // We assume here that ABI-compatible Rust types have the same cast type.
                     cast.llvm_type(cx)
                 }
-                PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
-                    cx.type_ptr_to(arg.memory_ty(cx))
-                }
             };
             llargument_tys.push(llarg_ty);
         }
@@ -379,12 +424,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
     }
 
     fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
-        unsafe {
-            llvm::LLVMPointerType(
-                self.llvm_type(cx),
-                cx.data_layout().instruction_address_space.0 as c_uint,
-            )
-        }
+        cx.type_ptr_ext(cx.data_layout().instruction_address_space)
     }
 
     fn llvm_cconv(&self) -> llvm::CallConv {
@@ -392,13 +432,16 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
     }
 
     fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
-        let mut func_attrs = SmallVec::<[_; 2]>::new();
+        let mut func_attrs = SmallVec::<[_; 3]>::new();
         if self.ret.layout.abi.is_uninhabited() {
             func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
         }
         if !self.can_unwind {
             func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
         }
+        if let Conv::RiscvInterrupt { kind } = self.conv {
+            func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", kind.as_str()));
+        }
         attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
 
         let mut i = 0;
@@ -411,13 +454,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
             PassMode::Direct(attrs) => {
                 attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
             }
-            PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
                 assert!(!on_stack);
                 let i = apply(attrs);
                 let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
                 attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
             }
-            PassMode::Cast(cast, _) => {
+            PassMode::Cast { cast, pad_i32: _ } => {
                 cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
             }
             _ => {}
@@ -425,25 +468,25 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         for arg in self.args.iter() {
             match &arg.mode {
                 PassMode::Ignore => {}
-                PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
                     let i = apply(attrs);
                     let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
                     attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
                 }
                 PassMode::Direct(attrs)
-                | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+                | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
                     apply(attrs);
                 }
-                PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
                     assert!(!on_stack);
                     apply(attrs);
-                    apply(extra_attrs);
+                    apply(meta_attrs);
                 }
                 PassMode::Pair(a, b) => {
                     apply(a);
                     apply(b);
                 }
-                PassMode::Cast(cast, pad_i32) => {
+                PassMode::Cast { cast, pad_i32 } => {
                     if *pad_i32 {
                         apply(&ArgAttributes::new());
                     }
@@ -473,13 +516,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
             PassMode::Direct(attrs) => {
                 attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
             }
-            PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
                 assert!(!on_stack);
                 let i = apply(bx.cx, attrs);
                 let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
                 attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
             }
-            PassMode::Cast(cast, _) => {
+            PassMode::Cast { cast, pad_i32: _ } => {
                 cast.attrs.apply_attrs_to_callsite(
                     llvm::AttributePlace::ReturnValue,
                     &bx.cx,
@@ -501,7 +544,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         for arg in self.args.iter() {
             match &arg.mode {
                 PassMode::Ignore => {}
-                PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
                     let i = apply(bx.cx, attrs);
                     let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
                     attributes::apply_to_callsite(
@@ -511,18 +554,18 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                     );
                 }
                 PassMode::Direct(attrs)
-                | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+                | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
                     apply(bx.cx, attrs);
                 }
-                PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack: _ } => {
+                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack: _ } => {
                     apply(bx.cx, attrs);
-                    apply(bx.cx, extra_attrs);
+                    apply(bx.cx, meta_attrs);
                 }
                 PassMode::Pair(a, b) => {
                     apply(bx.cx, a);
                     apply(bx.cx, b);
                 }
-                PassMode::Cast(cast, pad_i32) => {
+                PassMode::Cast { cast, pad_i32 } => {
                     if *pad_i32 {
                         apply(bx.cx, &ArgAttributes::new());
                     }
@@ -574,8 +617,12 @@ impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
 impl From<Conv> for llvm::CallConv {
     fn from(conv: Conv) -> Self {
         match conv {
-            Conv::C | Conv::Rust | Conv::CCmseNonSecureCall => llvm::CCallConv,
-            Conv::RustCold => llvm::ColdCallConv,
+            Conv::C | Conv::Rust | Conv::CCmseNonSecureCall | Conv::RiscvInterrupt { .. } => {
+                llvm::CCallConv
+            }
+            Conv::Cold => llvm::ColdCallConv,
+            Conv::PreserveMost => llvm::PreserveMost,
+            Conv::PreserveAll => llvm::PreserveAll,
             Conv::AmdGpuKernel => llvm::AmdGpuKernel,
             Conv::AvrInterrupt => llvm::AvrInterrupt,
             Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
index 668d9292705..db5c1388ef8 100644
--- a/compiler/rustc_codegen_llvm/src/allocator.rs
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -1,13 +1,15 @@
 use crate::attributes;
 use libc::c_uint;
-use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_ast::expand::allocator::{
+    alloc_error_handler_name, default_fn_name, global_fn_name, AllocatorKind, AllocatorTy,
+    ALLOCATOR_METHODS, NO_ALLOC_SHIM_IS_UNSTABLE,
+};
 use rustc_middle::bug;
 use rustc_middle::ty::TyCtxt;
 use rustc_session::config::{DebugInfo, OomStrategy};
-use rustc_span::symbol::sym;
 
 use crate::debuginfo;
-use crate::llvm::{self, False, True};
+use crate::llvm::{self, Context, False, Module, True, Type};
 use crate::ModuleLlvm;
 
 pub(crate) unsafe fn codegen(
@@ -26,39 +28,107 @@ pub(crate) unsafe fn codegen(
         tws => bug!("Unsupported target word size for int: {}", tws),
     };
     let i8 = llvm::LLVMInt8TypeInContext(llcx);
-    let i8p = llvm::LLVMPointerType(i8, 0);
-    let void = llvm::LLVMVoidTypeInContext(llcx);
-
-    for method in ALLOCATOR_METHODS {
-        let mut args = Vec::with_capacity(method.inputs.len());
-        for ty in method.inputs.iter() {
-            match *ty {
-                AllocatorTy::Layout => {
-                    args.push(usize); // size
-                    args.push(usize); // align
+    let i8p = llvm::LLVMPointerTypeInContext(llcx, 0);
+
+    if kind == AllocatorKind::Default {
+        for method in ALLOCATOR_METHODS {
+            let mut args = Vec::with_capacity(method.inputs.len());
+            for input in method.inputs.iter() {
+                match input.ty {
+                    AllocatorTy::Layout => {
+                        args.push(usize); // size
+                        args.push(usize); // align
+                    }
+                    AllocatorTy::Ptr => args.push(i8p),
+                    AllocatorTy::Usize => args.push(usize),
+
+                    AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
                 }
-                AllocatorTy::Ptr => args.push(i8p),
-                AllocatorTy::Usize => args.push(usize),
-
-                AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
             }
+            let output = match method.output {
+                AllocatorTy::ResultPtr => Some(i8p),
+                AllocatorTy::Unit => None,
+
+                AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+                    panic!("invalid allocator output")
+                }
+            };
+
+            let from_name = global_fn_name(method.name);
+            let to_name = default_fn_name(method.name);
+
+            create_wrapper_function(tcx, llcx, llmod, &from_name, &to_name, &args, output, false);
         }
-        let output = match method.output {
-            AllocatorTy::ResultPtr => Some(i8p),
-            AllocatorTy::Unit => None,
+    }
 
-            AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
-                panic!("invalid allocator output")
-            }
-        };
+    // rust alloc error handler
+    create_wrapper_function(
+        tcx,
+        llcx,
+        llmod,
+        "__rust_alloc_error_handler",
+        &alloc_error_handler_name(alloc_error_handler_kind),
+        &[usize, usize], // size, align
+        None,
+        true,
+    );
+
+    // __rust_alloc_error_handler_should_panic
+    let name = OomStrategy::SYMBOL;
+    let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8);
+    if tcx.sess.target.default_hidden_visibility {
+        llvm::LLVMRustSetVisibility(ll_g, llvm::Visibility::Hidden);
+    }
+    let val = tcx.sess.opts.unstable_opts.oom.should_panic();
+    let llval = llvm::LLVMConstInt(i8, val as u64, False);
+    llvm::LLVMSetInitializer(ll_g, llval);
+
+    let name = NO_ALLOC_SHIM_IS_UNSTABLE;
+    let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8);
+    if tcx.sess.target.default_hidden_visibility {
+        llvm::LLVMRustSetVisibility(ll_g, llvm::Visibility::Hidden);
+    }
+    let llval = llvm::LLVMConstInt(i8, 0, False);
+    llvm::LLVMSetInitializer(ll_g, llval);
+
+    if tcx.sess.opts.debuginfo != DebugInfo::None {
+        let dbg_cx = debuginfo::CodegenUnitDebugContext::new(llmod);
+        debuginfo::metadata::build_compile_unit_di_node(tcx, module_name, &dbg_cx);
+        dbg_cx.finalize(tcx.sess);
+    }
+}
+
+fn create_wrapper_function(
+    tcx: TyCtxt<'_>,
+    llcx: &Context,
+    llmod: &Module,
+    from_name: &str,
+    to_name: &str,
+    args: &[&Type],
+    output: Option<&Type>,
+    no_return: bool,
+) {
+    unsafe {
         let ty = llvm::LLVMFunctionType(
-            output.unwrap_or(void),
+            output.unwrap_or_else(|| llvm::LLVMVoidTypeInContext(llcx)),
             args.as_ptr(),
             args.len() as c_uint,
             False,
         );
-        let name = format!("__rust_{}", method.name);
-        let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
+        let llfn = llvm::LLVMRustGetOrInsertFunction(
+            llmod,
+            from_name.as_ptr().cast(),
+            from_name.len(),
+            ty,
+        );
+        let no_return = if no_return {
+            // -> ! DIFlagNoReturn
+            let no_return = llvm::AttributeKind::NoReturn.create_attr(llcx);
+            attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]);
+            Some(no_return)
+        } else {
+            None
+        };
 
         if tcx.sess.target.default_hidden_visibility {
             llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
@@ -68,9 +138,12 @@ pub(crate) unsafe fn codegen(
             attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
         }
 
-        let callee = kind.fn_name(method.name);
         let callee =
-            llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
+            llvm::LLVMRustGetOrInsertFunction(llmod, to_name.as_ptr().cast(), to_name.len(), ty);
+        if let Some(no_return) = no_return {
+            // -> ! DIFlagNoReturn
+            attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]);
+        }
         llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
 
         let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
@@ -99,66 +172,4 @@ pub(crate) unsafe fn codegen(
         }
         llvm::LLVMDisposeBuilder(llbuilder);
     }
-
-    // rust alloc error handler
-    let args = [usize, usize]; // size, align
-
-    let ty = llvm::LLVMFunctionType(void, args.as_ptr(), args.len() as c_uint, False);
-    let name = "__rust_alloc_error_handler";
-    let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
-    // -> ! DIFlagNoReturn
-    let no_return = llvm::AttributeKind::NoReturn.create_attr(llcx);
-    attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]);
-
-    if tcx.sess.target.default_hidden_visibility {
-        llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
-    }
-    if tcx.sess.must_emit_unwind_tables() {
-        let uwtable = attributes::uwtable_attr(llcx);
-        attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
-    }
-
-    let callee = alloc_error_handler_kind.fn_name(sym::oom);
-    let callee = llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
-    // -> ! DIFlagNoReturn
-    attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]);
-    llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
-
-    let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
-
-    let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
-    llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
-    let args = args
-        .iter()
-        .enumerate()
-        .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
-        .collect::<Vec<_>>();
-    let ret = llvm::LLVMRustBuildCall(
-        llbuilder,
-        ty,
-        callee,
-        args.as_ptr(),
-        args.len() as c_uint,
-        [].as_ptr(),
-        0 as c_uint,
-    );
-    llvm::LLVMSetTailCall(ret, True);
-    llvm::LLVMBuildRetVoid(llbuilder);
-    llvm::LLVMDisposeBuilder(llbuilder);
-
-    // __rust_alloc_error_handler_should_panic
-    let name = OomStrategy::SYMBOL;
-    let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8);
-    if tcx.sess.target.default_hidden_visibility {
-        llvm::LLVMRustSetVisibility(ll_g, llvm::Visibility::Hidden);
-    }
-    let val = tcx.sess.opts.unstable_opts.oom.should_panic();
-    let llval = llvm::LLVMConstInt(i8, val as u64, False);
-    llvm::LLVMSetInitializer(ll_g, llval);
-
-    if tcx.sess.opts.debuginfo != DebugInfo::None {
-        let dbg_cx = debuginfo::CodegenUnitDebugContext::new(llmod);
-        debuginfo::metadata::build_compile_unit_di_node(tcx, module_name, &dbg_cx);
-        dbg_cx.finalize(tcx.sess);
-    }
 }
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 1a3865360a3..1323261ae92 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -44,9 +44,10 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     let is_target_supported = |reg_class: InlineAsmRegClass| {
                         for &(_, feature) in reg_class.supported_types(asm_arch) {
                             if let Some(feature) = feature {
-                                let codegen_fn_attrs = self.tcx.codegen_fn_attrs(instance.def_id());
-                                if self.tcx.sess.target_features.contains(&feature)
-                                    || codegen_fn_attrs.target_features.contains(&feature)
+                                if self
+                                    .tcx
+                                    .asm_target_features(instance.def_id())
+                                    .contains(&feature)
                                 {
                                     return true;
                                 }
@@ -236,8 +237,22 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 InlineAsmArch::Nvptx64 => {}
                 InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {}
                 InlineAsmArch::Hexagon => {}
+                InlineAsmArch::LoongArch64 => {
+                    constraints.extend_from_slice(&[
+                        "~{$fcc0}".to_string(),
+                        "~{$fcc1}".to_string(),
+                        "~{$fcc2}".to_string(),
+                        "~{$fcc3}".to_string(),
+                        "~{$fcc4}".to_string(),
+                        "~{$fcc5}".to_string(),
+                        "~{$fcc6}".to_string(),
+                        "~{$fcc7}".to_string(),
+                    ]);
+                }
                 InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
-                InlineAsmArch::S390x => {}
+                InlineAsmArch::S390x => {
+                    constraints.push("~{cc}".to_string());
+                }
                 InlineAsmArch::SpirV => {}
                 InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {}
                 InlineAsmArch::Bpf => {}
@@ -247,6 +262,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 InlineAsmArch::M68k => {
                     constraints.push("~{ccr}".to_string());
                 }
+                InlineAsmArch::CSKY => {}
             }
         }
         if !options.contains(InlineAsmOptions::NOMEM) {
@@ -442,9 +458,9 @@ pub(crate) fn inline_asm_call<'ll>(
             );
 
             let call = if let Some((dest, catch, funclet)) = dest_catch_funclet {
-                bx.invoke(fty, None, v, inputs, dest, catch, funclet)
+                bx.invoke(fty, None, None, v, inputs, dest, catch, funclet)
             } else {
-                bx.call(fty, None, v, inputs, None)
+                bx.call(fty, None, None, v, inputs, None)
             };
 
             // Store mark in a metadata node so we can map LLVM errors
@@ -633,6 +649,8 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) ->
             InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
             | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
             InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
             InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r",
             InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
             InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
@@ -677,6 +695,8 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) ->
             InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r",
             InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a",
             InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d",
+            InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f",
             InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
                 bug!("LLVM backend does not support SPIR-V")
             }
@@ -719,6 +739,7 @@ fn modifier_to_llvm(
             }
         }
         InlineAsmRegClass::Hexagon(_) => None,
+        InlineAsmRegClass::LoongArch(_) => None,
         InlineAsmRegClass::Mips(_) => None,
         InlineAsmRegClass::Nvptx(_) => None,
         InlineAsmRegClass::PowerPC(_) => None,
@@ -775,6 +796,7 @@ fn modifier_to_llvm(
             bug!("LLVM backend does not support SPIR-V")
         }
         InlineAsmRegClass::M68k(_) => None,
+        InlineAsmRegClass::CSKY(_) => None,
         InlineAsmRegClass::Err => unreachable!(),
     }
 }
@@ -803,6 +825,8 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'
             cx.type_vector(cx.type_i64(), 2)
         }
         InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(),
         InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
         InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
         InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
@@ -849,6 +873,8 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'
         InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
         InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
         InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
+        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
         InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
             bug!("LLVM backend does not support SPIR-V")
         }
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 651d644ebb6..b6c01545f30 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -1,7 +1,6 @@
 //! Set and unset common attributes on LLVM values.
 
 use rustc_codegen_ssa::traits::*;
-use rustc_data_structures::small_str::SmallStr;
 use rustc_hir::def_id::DefId;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::ty::{self, TyCtxt};
@@ -88,6 +87,9 @@ pub fn sanitize_attrs<'ll>(
 
         attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx));
     }
+    if enabled.contains(SanitizerSet::SAFESTACK) {
+        attrs.push(llvm::AttributeKind::SanitizeSafeStack.create_attr(cx.llcx));
+    }
     attrs
 }
 
@@ -126,7 +128,10 @@ fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attr
 
         // The function name varies on platforms.
         // See test/CodeGen/mcount.c in clang.
-        let mcount_name = cx.sess().target.mcount.as_ref();
+        let mcount_name = match &cx.sess().target.llvm_mcount_intrinsic {
+            Some(llvm_mcount_intrinsic) => llvm_mcount_intrinsic.as_ref(),
+            None => cx.sess().target.mcount.as_ref(),
+        };
 
         attrs.push(llvm::CreateAttrStringValue(
             cx.llcx,
@@ -333,6 +338,10 @@ pub fn from_fn_attrs<'ll, 'tcx>(
     to_add.extend(probestack_attr(cx));
     to_add.extend(stackprotector_attr(cx));
 
+    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_BUILTINS) {
+        to_add.push(llvm::CreateAttrString(cx.llcx, "no-builtins"));
+    }
+
     if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
         to_add.push(AttributeKind::Cold.create_attr(cx.llcx));
     }
@@ -357,50 +366,44 @@ pub fn from_fn_attrs<'ll, 'tcx>(
     if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR)
         || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED)
     {
-        if llvm_util::get_version() >= (15, 0, 0) {
-            to_add.push(create_alloc_family_attr(cx.llcx));
-            // apply to argument place instead of function
-            let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
-            attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
-            to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
-            let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
-            if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
-                flags |= AllocKindFlags::Uninitialized;
-            } else {
-                flags |= AllocKindFlags::Zeroed;
-            }
-            to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
+        to_add.push(create_alloc_family_attr(cx.llcx));
+        // apply to argument place instead of function
+        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
+        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
+        let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
+        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
+            flags |= AllocKindFlags::Uninitialized;
+        } else {
+            flags |= AllocKindFlags::Zeroed;
         }
+        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
         // apply to return place instead of function (unlike all other attributes applied in this function)
         let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
         attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
     }
     if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::REALLOCATOR) {
-        if llvm_util::get_version() >= (15, 0, 0) {
-            to_add.push(create_alloc_family_attr(cx.llcx));
-            to_add.push(llvm::CreateAllocKindAttr(
-                cx.llcx,
-                AllocKindFlags::Realloc | AllocKindFlags::Aligned,
-            ));
-            // applies to argument place instead of function place
-            let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
-            attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
-            // apply to argument place instead of function
-            let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
-            attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
-            to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
-        }
+        to_add.push(create_alloc_family_attr(cx.llcx));
+        to_add.push(llvm::CreateAllocKindAttr(
+            cx.llcx,
+            AllocKindFlags::Realloc | AllocKindFlags::Aligned,
+        ));
+        // applies to argument place instead of function place
+        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
+        // apply to argument place instead of function
+        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
+        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
         let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
         attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
     }
     if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::DEALLOCATOR) {
-        if llvm_util::get_version() >= (15, 0, 0) {
-            to_add.push(create_alloc_family_attr(cx.llcx));
-            to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
-            // applies to argument place instead of function place
-            let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
-            attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
-        }
+        to_add.push(create_alloc_family_attr(cx.llcx));
+        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
+        // applies to argument place instead of function place
+        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
     }
     if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY) {
         to_add.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"));
@@ -444,7 +447,7 @@ pub fn from_fn_attrs<'ll, 'tcx>(
     let mut function_features = function_features
         .iter()
         .flat_map(|feat| {
-            llvm_util::to_llvm_features(cx.tcx.sess, feat).into_iter().map(|f| format!("+{}", f))
+            llvm_util::to_llvm_features(cx.tcx.sess, feat).into_iter().map(|f| format!("+{f}"))
         })
         .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
             InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
@@ -478,8 +481,8 @@ pub fn from_fn_attrs<'ll, 'tcx>(
 
     let global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str());
     let function_features = function_features.iter().map(|s| s.as_str());
-    let target_features =
-        global_features.chain(function_features).intersperse(",").collect::<SmallStr<1024>>();
+    let target_features: String =
+        global_features.chain(function_features).intersperse(",").collect();
     if !target_features.is_empty() {
         to_add.push(llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features));
     }
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index 12da21dc477..f33075a8879 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -56,7 +56,7 @@ fn llvm_machine_type(cpu: &str) -> LLVMMachineType {
         "x86" => LLVMMachineType::I386,
         "aarch64" => LLVMMachineType::ARM64,
         "arm" => LLVMMachineType::ARM,
-        _ => panic!("unsupported cpu type {}", cpu),
+        _ => panic!("unsupported cpu type {cpu}"),
     }
 }
 
@@ -128,7 +128,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
         let name_suffix = if is_direct_dependency { "_imports" } else { "_imports_indirect" };
         let output_path = {
             let mut output_path: PathBuf = tmpdir.to_path_buf();
-            output_path.push(format!("{}{}", lib_name, name_suffix));
+            output_path.push(format!("{lib_name}{name_suffix}"));
             output_path.with_extension("lib")
         };
 
@@ -156,7 +156,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
             // functions. Therefore, use binutils to create the import library instead,
             // by writing a .DEF file to the temp dir and calling binutils's dlltool.
             let def_file_path =
-                tmpdir.join(format!("{}{}", lib_name, name_suffix)).with_extension("def");
+                tmpdir.join(format!("{lib_name}{name_suffix}")).with_extension("def");
 
             let def_file_content = format!(
                 "EXPORTS\n{}",
@@ -164,7 +164,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
                     .into_iter()
                     .map(|(name, ordinal)| {
                         match ordinal {
-                            Some(n) => format!("{} @{} NONAME", name, n),
+                            Some(n) => format!("{name} @{n} NONAME"),
                             None => name,
                         }
                     })
@@ -198,30 +198,39 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
                 "arm" => ("arm", "--32"),
                 _ => panic!("unsupported arch {}", sess.target.arch),
             };
-            let result = std::process::Command::new(dlltool)
-                .args([
-                    "-d",
-                    def_file_path.to_str().unwrap(),
-                    "-D",
-                    lib_name,
-                    "-l",
-                    output_path.to_str().unwrap(),
-                    "-m",
-                    dlltool_target_arch,
-                    "-f",
-                    dlltool_target_bitness,
-                    "--no-leading-underscore",
-                    "--temp-prefix",
-                    temp_prefix.to_str().unwrap(),
-                ])
-                .output();
-
-            match result {
+            let mut dlltool_cmd = std::process::Command::new(&dlltool);
+            dlltool_cmd.args([
+                "-d",
+                def_file_path.to_str().unwrap(),
+                "-D",
+                lib_name,
+                "-l",
+                output_path.to_str().unwrap(),
+                "-m",
+                dlltool_target_arch,
+                "-f",
+                dlltool_target_bitness,
+                "--no-leading-underscore",
+                "--temp-prefix",
+                temp_prefix.to_str().unwrap(),
+            ]);
+
+            match dlltool_cmd.output() {
                 Err(e) => {
-                    sess.emit_fatal(ErrorCallingDllTool { error: e });
+                    sess.emit_fatal(ErrorCallingDllTool {
+                        dlltool_path: dlltool.to_string_lossy(),
+                        error: e,
+                    });
                 }
-                Ok(output) if !output.status.success() => {
+                // dlltool returns '0' on failure, so check for error output instead.
+                Ok(output) if !output.stderr.is_empty() => {
                     sess.emit_fatal(DlltoolFailImportLibrary {
+                        dlltool_path: dlltool.to_string_lossy(),
+                        dlltool_args: dlltool_cmd
+                            .get_args()
+                            .map(|arg| arg.to_string_lossy())
+                            .collect::<Vec<_>>()
+                            .join(" "),
                         stdout: String::from_utf8_lossy(&output.stdout),
                         stderr: String::from_utf8_lossy(&output.stderr),
                     })
@@ -358,7 +367,7 @@ impl<'a> LlvmArchiveBuilder<'a> {
                 match addition {
                     Addition::File { path, name_in_archive } => {
                         let path = CString::new(path.to_str().unwrap())?;
-                        let name = CString::new(name_in_archive.clone())?;
+                        let name = CString::new(name_in_archive.as_bytes())?;
                         members.push(llvm::LLVMRustArchiveMemberNew(
                             path.as_ptr(),
                             name.as_ptr(),
@@ -426,12 +435,12 @@ impl<'a> LlvmArchiveBuilder<'a> {
 }
 
 fn string_to_io_error(s: String) -> io::Error {
-    io::Error::new(io::ErrorKind::Other, format!("bad archive: {}", s))
+    io::Error::new(io::ErrorKind::Other, format!("bad archive: {s}"))
 }
 
 fn find_binutils_dlltool(sess: &Session) -> OsString {
     assert!(sess.target.options.is_like_windows && !sess.target.options.is_like_msvc);
-    if let Some(dlltool_path) = &sess.opts.unstable_opts.dlltool {
+    if let Some(dlltool_path) = &sess.opts.cg.dlltool {
         return dlltool_path.clone().into_os_string();
     }
 
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index d2e01708a37..8655aeec13d 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -1,13 +1,15 @@
-use crate::back::write::{self, save_temp_bitcode, DiagnosticHandlers};
+use crate::back::write::{
+    self, bitcode_section_name, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers,
+};
 use crate::errors::{
-    DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib,
+    DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro,
 };
 use crate::llvm::{self, build_string};
 use crate::{LlvmCodegenBackend, ModuleLlvm};
 use object::read::archive::ArchiveFile;
 use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
 use rustc_codegen_ssa::back::symbol_export;
-use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, TargetMachineFactoryConfig};
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, TargetMachineFactoryConfig};
 use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
 use rustc_data_structures::fx::FxHashMap;
@@ -17,7 +19,6 @@ use rustc_hir::def_id::LOCAL_CRATE;
 use rustc_middle::bug;
 use rustc_middle::dep_graph::WorkProduct;
 use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
-use rustc_session::cgu_reuse_tracker::CguReuse;
 use rustc_session::config::{self, CrateType, Lto};
 
 use std::ffi::{CStr, CString};
@@ -25,7 +26,6 @@ use std::fs::File;
 use std::io;
 use std::iter;
 use std::path::Path;
-use std::ptr;
 use std::slice;
 use std::sync::Arc;
 
@@ -35,8 +35,12 @@ pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
 
 pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
     match crate_type {
-        CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true,
-        CrateType::Rlib | CrateType::ProcMacro => false,
+        CrateType::Executable
+        | CrateType::Dylib
+        | CrateType::Staticlib
+        | CrateType::Cdylib
+        | CrateType::ProcMacro => true,
+        CrateType::Rlib => false,
     }
 }
 
@@ -86,6 +90,11 @@ fn prepare_lto(
                     diag_handler.emit_err(LtoDylib);
                     return Err(FatalError);
                 }
+            } else if *crate_type == CrateType::ProcMacro {
+                if !cgcx.opts.unstable_opts.dylib_lto {
+                    diag_handler.emit_err(LtoProcMacro);
+                    return Err(FatalError);
+                }
             }
         }
 
@@ -121,6 +130,7 @@ fn prepare_lto(
                 info!("adding bitcode from {}", name);
                 match get_bitcode_slice_from_object_data(
                     child.data(&*archive_data).expect("corrupt rlib"),
+                    cgcx,
                 ) {
                     Ok(data) => {
                         let module = SerializedModule::FromRlib(data.to_vec());
@@ -142,10 +152,29 @@ fn prepare_lto(
     Ok((symbols_below_threshold, upstream_modules))
 }
 
-fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], LtoBitcodeFromRlib> {
+fn get_bitcode_slice_from_object_data<'a>(
+    obj: &'a [u8],
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+) -> Result<&'a [u8], LtoBitcodeFromRlib> {
+    // We're about to assume the data here is an object file with sections, but if it's raw LLVM IR that
+    // won't work. Fortunately, if that's what we have we can just return the object directly, so we sniff
+    // the relevant magic strings here and return.
+    if obj.starts_with(b"\xDE\xC0\x17\x0B") || obj.starts_with(b"BC\xC0\xDE") {
+        return Ok(obj);
+    }
+    // We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment name"
+    // which in the public API for sections gets treated as part of the section name, but internally
+    // in MachOObjectFile.cpp gets treated separately.
+    let section_name = bitcode_section_name(cgcx).trim_start_matches("__LLVM,");
     let mut len = 0;
-    let data =
-        unsafe { llvm::LLVMRustGetBitcodeSliceFromObjectData(obj.as_ptr(), obj.len(), &mut len) };
+    let data = unsafe {
+        llvm::LLVMRustGetSliceFromObjectDataByName(
+            obj.as_ptr(),
+            obj.len(),
+            section_name.as_ptr(),
+            &mut len,
+        )
+    };
     if !data.is_null() {
         assert!(len != 0);
         let bc = unsafe { slice::from_raw_parts(data, len) };
@@ -167,7 +196,7 @@ fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], LtoBitcodeFro
 /// for further optimization.
 pub(crate) fn run_fat(
     cgcx: &CodegenContext<LlvmCodegenBackend>,
-    modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+    modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
     cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
 ) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
     let diag_handler = cgcx.create_diag_handler();
@@ -221,7 +250,7 @@ pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBu
 fn fat_lto(
     cgcx: &CodegenContext<LlvmCodegenBackend>,
     diag_handler: &Handler,
-    modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+    modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
     cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
     mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
     symbols_below_threshold: &[*const libc::c_char],
@@ -246,8 +275,8 @@ fn fat_lto(
     }));
     for module in modules {
         match module {
-            FatLTOInput::InMemory(m) => in_memory.push(m),
-            FatLTOInput::Serialized { name, buffer } => {
+            FatLtoInput::InMemory(m) => in_memory.push(m),
+            FatLtoInput::Serialized { name, buffer } => {
                 info!("pushing serialized module {:?}", name);
                 let buffer = SerializedModule::Local(buffer);
                 serialized_modules.push((buffer, CString::new(name).unwrap()));
@@ -303,7 +332,13 @@ fn fat_lto(
         // The linking steps below may produce errors and diagnostics within LLVM
         // which we'd like to handle and print, so set up our diagnostic handlers
         // (which get unregistered when they go out of scope below).
-        let _handler = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+        let _handler = DiagnosticHandlers::new(
+            cgcx,
+            diag_handler,
+            llcx,
+            &module,
+            CodegenDiagnosticsStage::LTO,
+        );
 
         // For all other modules we codegened we'll need to link them into our own
         // bitcode. All modules were codegened in their own LLVM context, however,
@@ -327,7 +362,7 @@ fn fat_lto(
             let _timer = cgcx
                 .prof
                 .generic_activity_with_arg_recorder("LLVM_fat_lto_link_module", |recorder| {
-                    recorder.record_arg(format!("{:?}", name))
+                    recorder.record_arg(format!("{name:?}"))
                 });
             info!("linking {:?}", name);
             let data = bc_decoded.data();
@@ -436,7 +471,7 @@ fn thin_lto(
 
         for (i, (name, buffer)) in modules.into_iter().enumerate() {
             info!("local module: {} - {}", i, name);
-            let cname = CString::new(name.clone()).unwrap();
+            let cname = CString::new(name.as_bytes()).unwrap();
             thin_modules.push(llvm::ThinLTOModule {
                 identifier: cname.as_ptr(),
                 data: buffer.data().as_ptr(),
@@ -548,7 +583,6 @@ fn thin_lto(
                     copy_jobs.push(work_product);
                     info!(" - {}: re-used", module_name);
                     assert!(cgcx.incr_comp_session_dir.is_some());
-                    cgcx.cgu_reuse_tracker.set_actual_reuse(module_name, CguReuse::PostLto);
                     continue;
                 }
             }
@@ -578,7 +612,7 @@ pub(crate) fn run_pass_manager(
     module: &mut ModuleCodegen<ModuleLlvm>,
     thin: bool,
 ) -> Result<(), FatalError> {
-    let _timer = cgcx.prof.verbose_generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
+    let _timer = cgcx.prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
     let config = cgcx.config(module.kind);
 
     // Now we have one massive module inside of llmod. Time to run the
@@ -709,17 +743,6 @@ pub unsafe fn optimize_thin_module(
         let llmod = module.module_llvm.llmod();
         save_temp_bitcode(cgcx, &module, "thin-lto-input");
 
-        // Before we do much else find the "main" `DICompileUnit` that we'll be
-        // using below. If we find more than one though then rustc has changed
-        // in a way we're not ready for, so generate an ICE by returning
-        // an error.
-        let mut cu1 = ptr::null_mut();
-        let mut cu2 = ptr::null_mut();
-        llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2);
-        if !cu2.is_null() {
-            return Err(write::llvm_err(&diag_handler, LlvmError::MultipleSourceDiCompileUnit));
-        }
-
         // Up next comes the per-module local analyses that we do for Thin LTO.
         // Each of these functions is basically copied from the LLVM
         // implementation and then tailored to suit this implementation. Ideally
@@ -766,43 +789,6 @@ pub unsafe fn optimize_thin_module(
             save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
         }
 
-        // Ok now this is a bit unfortunate. This is also something you won't
-        // find upstream in LLVM's ThinLTO passes! This is a hack for now to
-        // work around bugs in LLVM.
-        //
-        // First discovered in #45511 it was found that as part of ThinLTO
-        // importing passes LLVM will import `DICompileUnit` metadata
-        // information across modules. This means that we'll be working with one
-        // LLVM module that has multiple `DICompileUnit` instances in it (a
-        // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of
-        // bugs in LLVM's backend which generates invalid DWARF in a situation
-        // like this:
-        //
-        //  https://bugs.llvm.org/show_bug.cgi?id=35212
-        //  https://bugs.llvm.org/show_bug.cgi?id=35562
-        //
-        // While the first bug there is fixed the second ended up causing #46346
-        // which was basically a resurgence of #45511 after LLVM's bug 35212 was
-        // fixed.
-        //
-        // This function below is a huge hack around this problem. The function
-        // below is defined in `PassWrapper.cpp` and will basically "merge"
-        // all `DICompileUnit` instances in a module. Basically it'll take all
-        // the objects, rewrite all pointers of `DISubprogram` to point to the
-        // first `DICompileUnit`, and then delete all the other units.
-        //
-        // This is probably mangling to the debug info slightly (but hopefully
-        // not too much) but for now at least gets LLVM to emit valid DWARF (or
-        // so it appears). Hopefully we can remove this once upstream bugs are
-        // fixed in LLVM.
-        {
-            let _timer = cgcx
-                .prof
-                .generic_activity_with_arg("LLVM_thin_lto_patch_debuginfo", thin_module.name());
-            llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1);
-            save_temp_bitcode(cgcx, &module, "thin-lto-after-patch");
-        }
-
         // Alright now that we've done everything related to the ThinLTO
         // analysis it's time to run some optimizations! Here we use the same
         // `run_pass_manager` as the "fat" LTO above except that we tell it to
@@ -830,7 +816,7 @@ impl ThinLTOKeysMap {
         let file = File::create(path)?;
         let mut writer = io::BufWriter::new(file);
         for (module, key) in &self.keys {
-            writeln!(writer, "{} {}", module, key)?;
+            writeln!(writer, "{module} {key}")?;
         }
         Ok(())
     }
@@ -844,7 +830,7 @@ impl ThinLTOKeysMap {
             let mut split = line.split(' ');
             let module = split.next().unwrap();
             let key = split.next().unwrap();
-            assert_eq!(split.next(), None, "Expected two space-separated values, found {:?}", line);
+            assert_eq!(split.next(), None, "Expected two space-separated values, found {line:?}");
             keys.insert(module.to_string(), key.to_string());
         }
         Ok(Self { keys })
diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
new file mode 100644
index 00000000000..36484c3c3fc
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
@@ -0,0 +1,103 @@
+use std::{
+    ffi::{c_char, CStr},
+    marker::PhantomData,
+    ops::Deref,
+    ptr::NonNull,
+};
+
+use rustc_data_structures::small_c_str::SmallCStr;
+
+use crate::{errors::LlvmError, llvm};
+
+/// Responsible for safely creating and disposing llvm::TargetMachine via ffi functions.
+/// Not cloneable as there is no clone function for llvm::TargetMachine.
+#[repr(transparent)]
+pub struct OwnedTargetMachine {
+    tm_unique: NonNull<llvm::TargetMachine>,
+    phantom: PhantomData<llvm::TargetMachine>,
+}
+
+impl OwnedTargetMachine {
+    pub fn new(
+        triple: &CStr,
+        cpu: &CStr,
+        features: &CStr,
+        abi: &CStr,
+        model: llvm::CodeModel,
+        reloc: llvm::RelocModel,
+        level: llvm::CodeGenOptLevel,
+        use_soft_fp: bool,
+        function_sections: bool,
+        data_sections: bool,
+        unique_section_names: bool,
+        trap_unreachable: bool,
+        singletree: bool,
+        asm_comments: bool,
+        emit_stack_size_section: bool,
+        relax_elf_relocations: bool,
+        use_init_array: bool,
+        split_dwarf_file: &CStr,
+        output_obj_file: &CStr,
+        debug_info_compression: &CStr,
+        force_emulated_tls: bool,
+        args_cstr_buff: &[u8],
+    ) -> Result<Self, LlvmError<'static>> {
+        assert!(args_cstr_buff.len() > 0);
+        assert!(
+            *args_cstr_buff.last().unwrap() == 0,
+            "The last character must be a null terminator."
+        );
+
+        // SAFETY: llvm::LLVMRustCreateTargetMachine copies pointed to data
+        let tm_ptr = unsafe {
+            llvm::LLVMRustCreateTargetMachine(
+                triple.as_ptr(),
+                cpu.as_ptr(),
+                features.as_ptr(),
+                abi.as_ptr(),
+                model,
+                reloc,
+                level,
+                use_soft_fp,
+                function_sections,
+                data_sections,
+                unique_section_names,
+                trap_unreachable,
+                singletree,
+                asm_comments,
+                emit_stack_size_section,
+                relax_elf_relocations,
+                use_init_array,
+                split_dwarf_file.as_ptr(),
+                output_obj_file.as_ptr(),
+                debug_info_compression.as_ptr(),
+                force_emulated_tls,
+                args_cstr_buff.as_ptr() as *const c_char,
+                args_cstr_buff.len(),
+            )
+        };
+
+        NonNull::new(tm_ptr)
+            .map(|tm_unique| Self { tm_unique, phantom: PhantomData })
+            .ok_or_else(|| LlvmError::CreateTargetMachine { triple: SmallCStr::from(triple) })
+    }
+}
+
+impl Deref for OwnedTargetMachine {
+    type Target = llvm::TargetMachine;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
+        unsafe { self.tm_unique.as_ref() }
+    }
+}
+
+impl Drop for OwnedTargetMachine {
+    fn drop(&mut self) {
+        // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
+        // OwnedTargetMachine is not copyable so there is no double free or use after free
+        unsafe {
+            llvm::LLVMRustDisposeTargetMachine(self.tm_unique.as_mut());
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 7136f750f39..c778a6e017f 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -1,18 +1,22 @@
 use crate::back::lto::ThinBuffer;
+use crate::back::owned_target_machine::OwnedTargetMachine;
 use crate::back::profiling::{
     selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler,
 };
 use crate::base;
 use crate::common;
-use crate::consts;
 use crate::errors::{
-    CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, WithLlvmError, WriteBytecode,
+    CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression,
+    WithLlvmError, WriteBytecode,
 };
 use crate::llvm::{self, DiagnosticInfo, PassManager};
 use crate::llvm_util;
 use crate::type_::Type;
 use crate::LlvmCodegenBackend;
 use crate::ModuleLlvm;
+use llvm::{
+    LLVMRustLLVMHasZlibCompressionForDebugSymbols, LLVMRustLLVMHasZstdCompressionForDebugSymbols,
+};
 use rustc_codegen_ssa::back::link::ensure_removed;
 use rustc_codegen_ssa::back::write::{
     BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
@@ -31,6 +35,7 @@ use rustc_span::symbol::sym;
 use rustc_span::InnerSpan;
 use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo};
 
+use crate::llvm::diagnostic::OptimizationDiagnosticKind;
 use libc::{c_char, c_int, c_uint, c_void, size_t};
 use std::ffi::CString;
 use std::fs;
@@ -94,8 +99,8 @@ pub fn write_output_file<'ll>(
     }
 }
 
-pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm::TargetMachine {
-    let config = TargetMachineFactoryConfig { split_dwarf_file: None };
+pub fn create_informational_target_machine(sess: &Session) -> OwnedTargetMachine {
+    let config = TargetMachineFactoryConfig { split_dwarf_file: None, output_obj_file: None };
     // Can't use query system here quite yet because this function is invoked before the query
     // system/tcx is set up.
     let features = llvm_util::global_llvm_features(sess, false);
@@ -103,7 +108,7 @@ pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm:
         .unwrap_or_else(|err| llvm_err(sess.diagnostic(), err).raise())
 }
 
-pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut llvm::TargetMachine {
+pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine {
     let split_dwarf_file = if tcx.sess.target_can_use_split_dwarf() {
         tcx.output_filenames(()).split_dwarf_path(
             tcx.sess.split_debuginfo(),
@@ -113,7 +118,11 @@ pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut ll
     } else {
         None
     };
-    let config = TargetMachineFactoryConfig { split_dwarf_file };
+
+    let output_obj_file =
+        Some(tcx.output_filenames(()).temp_path(OutputType::Object, Some(mod_name)));
+    let config = TargetMachineFactoryConfig { split_dwarf_file, output_obj_file };
+
     target_machine_factory(
         &tcx.sess,
         tcx.backend_optimization_level(()),
@@ -216,36 +225,73 @@ pub fn target_machine_factory(
 
     let force_emulated_tls = sess.target.force_emulated_tls;
 
+    // copy the exe path, followed by path all into one buffer
+    // null terminating them so we can use them as null terminated strings
+    let args_cstr_buff = {
+        let mut args_cstr_buff: Vec<u8> = Vec::new();
+        let exe_path = std::env::current_exe().unwrap_or_default();
+        let exe_path_str = exe_path.into_os_string().into_string().unwrap_or_default();
+
+        args_cstr_buff.extend_from_slice(exe_path_str.as_bytes());
+        args_cstr_buff.push(0);
+
+        for arg in sess.expanded_args.iter() {
+            args_cstr_buff.extend_from_slice(arg.as_bytes());
+            args_cstr_buff.push(0);
+        }
+
+        args_cstr_buff
+    };
+
+    let debuginfo_compression = sess.opts.debuginfo_compression.to_string();
+    match sess.opts.debuginfo_compression {
+        rustc_session::config::DebugInfoCompression::Zlib => {
+            if !unsafe { LLVMRustLLVMHasZlibCompressionForDebugSymbols() } {
+                sess.emit_warning(UnknownCompression { algorithm: "zlib" });
+            }
+        }
+        rustc_session::config::DebugInfoCompression::Zstd => {
+            if !unsafe { LLVMRustLLVMHasZstdCompressionForDebugSymbols() } {
+                sess.emit_warning(UnknownCompression { algorithm: "zstd" });
+            }
+        }
+        rustc_session::config::DebugInfoCompression::None => {}
+    };
+    let debuginfo_compression = SmallCStr::new(&debuginfo_compression);
+
     Arc::new(move |config: TargetMachineFactoryConfig| {
-        let split_dwarf_file =
-            path_mapping.map_prefix(config.split_dwarf_file.unwrap_or_default()).0;
-        let split_dwarf_file = CString::new(split_dwarf_file.to_str().unwrap()).unwrap();
-
-        let tm = unsafe {
-            llvm::LLVMRustCreateTargetMachine(
-                triple.as_ptr(),
-                cpu.as_ptr(),
-                features.as_ptr(),
-                abi.as_ptr(),
-                code_model,
-                reloc_model,
-                opt_level,
-                use_softfp,
-                ffunction_sections,
-                fdata_sections,
-                funique_section_names,
-                trap_unreachable,
-                singlethread,
-                asm_comments,
-                emit_stack_size_section,
-                relax_elf_relocations,
-                use_init_array,
-                split_dwarf_file.as_ptr(),
-                force_emulated_tls,
-            )
+        let path_to_cstring_helper = |path: Option<PathBuf>| -> CString {
+            let path = path_mapping.map_prefix(path.unwrap_or_default()).0;
+            CString::new(path.to_str().unwrap()).unwrap()
         };
 
-        tm.ok_or_else(|| LlvmError::CreateTargetMachine { triple: triple.clone() })
+        let split_dwarf_file = path_to_cstring_helper(config.split_dwarf_file);
+        let output_obj_file = path_to_cstring_helper(config.output_obj_file);
+
+        OwnedTargetMachine::new(
+            &triple,
+            &cpu,
+            &features,
+            &abi,
+            code_model,
+            reloc_model,
+            opt_level,
+            use_softfp,
+            ffunction_sections,
+            fdata_sections,
+            funique_section_names,
+            trap_unreachable,
+            singlethread,
+            asm_comments,
+            emit_stack_size_section,
+            relax_elf_relocations,
+            use_init_array,
+            &split_dwarf_file,
+            &output_obj_file,
+            &debuginfo_compression,
+            force_emulated_tls,
+            &args_cstr_buff,
+        )
     })
 }
 
@@ -258,7 +304,7 @@ pub(crate) fn save_temp_bitcode(
         return;
     }
     unsafe {
-        let ext = format!("{}.bc", name);
+        let ext = format!("{name}.bc");
         let cgu = Some(&module.name[..]);
         let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
         let cstr = path_to_c_string(&path);
@@ -267,6 +313,16 @@ pub(crate) fn save_temp_bitcode(
     }
 }
 
+/// In what context is a dignostic handler being attached to a codegen unit?
+pub enum CodegenDiagnosticsStage {
+    /// Prelink optimization stage.
+    Opt,
+    /// LTO/ThinLTO postlink optimization stage.
+    LTO,
+    /// Code generation.
+    Codegen,
+}
+
 pub struct DiagnosticHandlers<'a> {
     data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
     llcx: &'a llvm::Context,
@@ -278,6 +334,8 @@ impl<'a> DiagnosticHandlers<'a> {
         cgcx: &'a CodegenContext<LlvmCodegenBackend>,
         handler: &'a Handler,
         llcx: &'a llvm::Context,
+        module: &ModuleCodegen<ModuleLlvm>,
+        stage: CodegenDiagnosticsStage,
     ) -> Self {
         let remark_passes_all: bool;
         let remark_passes: Vec<CString>;
@@ -294,6 +352,21 @@ impl<'a> DiagnosticHandlers<'a> {
         };
         let remark_passes: Vec<*const c_char> =
             remark_passes.iter().map(|name: &CString| name.as_ptr()).collect();
+        let remark_file = cgcx
+            .remark_dir
+            .as_ref()
+            // Use the .opt.yaml file suffix, which is supported by LLVM's opt-viewer.
+            .map(|dir| {
+                let stage_suffix = match stage {
+                    CodegenDiagnosticsStage::Codegen => "codegen",
+                    CodegenDiagnosticsStage::Opt => "opt",
+                    CodegenDiagnosticsStage::LTO => "lto",
+                };
+                dir.join(format!("{}.{stage_suffix}.opt.yaml", module.name))
+            })
+            .and_then(|dir| dir.to_str().and_then(|p| CString::new(p).ok()));
+
+        let pgo_available = cgcx.opts.cg.profile_use.is_some();
         let data = Box::into_raw(Box::new((cgcx, handler)));
         unsafe {
             let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
@@ -304,6 +377,10 @@ impl<'a> DiagnosticHandlers<'a> {
                 remark_passes_all,
                 remark_passes.as_ptr(),
                 remark_passes.len(),
+                // The `as_ref()` is important here, otherwise the `CString` will be dropped
+                // too soon!
+                remark_file.as_ref().map(|dir| dir.as_ptr()).unwrap_or(std::ptr::null()),
+                pgo_available,
             );
             DiagnosticHandlers { data, llcx, old_handler }
         }
@@ -352,20 +429,22 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void
         }
 
         llvm::diagnostic::Optimization(opt) => {
-            let enabled = match cgcx.remark {
-                Passes::All => true,
-                Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
-            };
-
-            if enabled {
-                diag_handler.emit_note(FromLlvmOptimizationDiag {
-                    filename: &opt.filename,
-                    line: opt.line,
-                    column: opt.column,
-                    pass_name: &opt.pass_name,
-                    message: &opt.message,
-                });
-            }
+            diag_handler.emit_note(FromLlvmOptimizationDiag {
+                filename: &opt.filename,
+                line: opt.line,
+                column: opt.column,
+                pass_name: &opt.pass_name,
+                kind: match opt.kind {
+                    OptimizationDiagnosticKind::OptimizationRemark => "success",
+                    OptimizationDiagnosticKind::OptimizationMissed
+                    | OptimizationDiagnosticKind::OptimizationFailure => "missed",
+                    OptimizationDiagnosticKind::OptimizationAnalysis
+                    | OptimizationDiagnosticKind::OptimizationAnalysisFPCommute
+                    | OptimizationDiagnosticKind::OptimizationAnalysisAliasing => "analysis",
+                    OptimizationDiagnosticKind::OptimizationRemarkOther => "other",
+                },
+                message: &opt.message,
+            });
         }
         llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
             let message = llvm::build_string(|s| {
@@ -439,6 +518,8 @@ pub(crate) unsafe fn llvm_optimize(
         Some(llvm::SanitizerOptions {
             sanitize_address: config.sanitizer.contains(SanitizerSet::ADDRESS),
             sanitize_address_recover: config.sanitizer_recover.contains(SanitizerSet::ADDRESS),
+            sanitize_cfi: config.sanitizer.contains(SanitizerSet::CFI),
+            sanitize_kcfi: config.sanitizer.contains(SanitizerSet::KCFI),
             sanitize_memory: config.sanitizer.contains(SanitizerSet::MEMORY),
             sanitize_memory_recover: config.sanitizer_recover.contains(SanitizerSet::MEMORY),
             sanitize_memory_track_origins: config.sanitizer_memory_track_origins as c_int,
@@ -474,6 +555,7 @@ pub(crate) unsafe fn llvm_optimize(
         &*module.module_llvm.tm,
         to_pass_builder_opt_level(opt_level),
         opt_stage,
+        cgcx.opts.cg.linker_plugin_lto.enabled(),
         config.no_prepopulate_passes,
         config.verify_llvm_ir,
         using_thin_buffers,
@@ -513,7 +595,8 @@ pub(crate) unsafe fn optimize(
 
     let llmod = module.module_llvm.llmod();
     let llcx = &*module.module_llvm.llcx;
-    let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+    let _handlers =
+        DiagnosticHandlers::new(cgcx, diag_handler, llcx, module, CodegenDiagnosticsStage::Opt);
 
     let module_name = module.name.clone();
     let module_name = Some(&module_name[..]);
@@ -572,7 +655,13 @@ pub(crate) unsafe fn codegen(
         let tm = &*module.module_llvm.tm;
         let module_name = module.name.clone();
         let module_name = Some(&module_name[..]);
-        let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+        let _handlers = DiagnosticHandlers::new(
+            cgcx,
+            diag_handler,
+            llcx,
+            &module,
+            CodegenDiagnosticsStage::Codegen,
+        );
 
         if cgcx.msvc_imps_needed {
             create_msvc_imps(cgcx, llcx, llmod);
@@ -667,7 +756,7 @@ pub(crate) unsafe fn codegen(
 
                 let Ok(demangled) = rustc_demangle::try_demangle(input) else { return 0 };
 
-                if write!(cursor, "{:#}", demangled).is_err() {
+                if write!(cursor, "{demangled:#}").is_err() {
                     // Possible only if provided buffer is not big enough
                     return 0;
                 }
@@ -765,7 +854,6 @@ pub(crate) unsafe fn codegen(
         }
 
         record_llvm_cgu_instructions_stats(&cgcx.prof, llmod);
-        drop(handlers);
     }
 
     // `.dwo` files are only emitted if:
@@ -789,7 +877,7 @@ pub(crate) unsafe fn codegen(
 }
 
 fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: &[u8]) -> Vec<u8> {
-    let mut asm = format!(".section {},\"{}\"\n", section_name, section_flags).into_bytes();
+    let mut asm = format!(".section {section_name},\"{section_flags}\"\n").into_bytes();
     asm.extend_from_slice(b".ascii \"");
     asm.reserve(data.len());
     for &byte in data {
@@ -811,6 +899,27 @@ fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data:
     asm
 }
 
+fn target_is_apple(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
+    cgcx.opts.target_triple.triple().contains("-ios")
+        || cgcx.opts.target_triple.triple().contains("-darwin")
+        || cgcx.opts.target_triple.triple().contains("-tvos")
+        || cgcx.opts.target_triple.triple().contains("-watchos")
+}
+
+fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
+    cgcx.opts.target_triple.triple().contains("-aix")
+}
+
+pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> &'static str {
+    if target_is_apple(cgcx) {
+        "__LLVM,__bitcode\0"
+    } else if target_is_aix(cgcx) {
+        ".ipa\0"
+    } else {
+        ".llvmbc\0"
+    }
+}
+
 /// Embed the bitcode of an LLVM module in the LLVM module itself.
 ///
 /// This is done primarily for iOS where it appears to be standard to compile C
@@ -865,14 +974,16 @@ unsafe fn embed_bitcode(
     //   passed though then these sections will show up in the final output.
     //   Additionally the flag that we need to set here is `SHF_EXCLUDE`.
     //
+    // * XCOFF - AIX linker ignores content in .ipa and .info if no auxiliary
+    //   symbol associated with these sections.
+    //
     // Unfortunately, LLVM provides no way to set custom section flags. For ELF
     // and COFF we emit the sections using module level inline assembly for that
     // reason (see issue #90326 for historical background).
-    let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
-        || cgcx.opts.target_triple.triple().contains("-darwin")
-        || cgcx.opts.target_triple.triple().contains("-tvos")
-        || cgcx.opts.target_triple.triple().contains("-watchos");
+    let is_aix = target_is_aix(cgcx);
+    let is_apple = target_is_apple(cgcx);
     if is_apple
+        || is_aix
         || cgcx.opts.target_triple.triple().starts_with("wasm")
         || cgcx.opts.target_triple.triple().starts_with("asmjs")
     {
@@ -885,7 +996,7 @@ unsafe fn embed_bitcode(
         );
         llvm::LLVMSetInitializer(llglobal, llconst);
 
-        let section = if is_apple { "__LLVM,__bitcode\0" } else { ".llvmbc\0" };
+        let section = bitcode_section_name(cgcx);
         llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
         llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
         llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
@@ -897,7 +1008,13 @@ unsafe fn embed_bitcode(
             "rustc.embedded.cmdline\0".as_ptr().cast(),
         );
         llvm::LLVMSetInitializer(llglobal, llconst);
-        let section = if is_apple { "__LLVM,__cmdline\0" } else { ".llvmcmd\0" };
+        let section = if is_apple {
+            "__LLVM,__cmdline\0"
+        } else if is_aix {
+            ".info\0"
+        } else {
+            ".llvmcmd\0"
+        };
         llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
         llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
     } else {
@@ -930,7 +1047,7 @@ fn create_msvc_imps(
     let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
 
     unsafe {
-        let i8p_ty = Type::i8p_llcx(llcx);
+        let ptr_ty = Type::ptr_llcx(llcx);
         let globals = base::iter_globals(llmod)
             .filter(|&val| {
                 llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
@@ -950,8 +1067,8 @@ fn create_msvc_imps(
             .collect::<Vec<_>>();
 
         for (imp_name, val) in globals {
-            let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast());
-            llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty));
+            let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr().cast());
+            llvm::LLVMSetInitializer(imp, val);
             llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
         }
     }
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
index 5b2bbdb4bde..b659fd02eec 100644
--- a/compiler/rustc_codegen_llvm/src/base.rs
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -86,8 +86,8 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen
         {
             let cx = CodegenCx::new(tcx, cgu, &llvm_module);
             let mono_items = cx.codegen_unit.items_in_deterministic_order(cx.tcx);
-            for &(mono_item, (linkage, visibility)) in &mono_items {
-                mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
+            for &(mono_item, data) in &mono_items {
+                mono_item.predefine::<Builder<'_, '_, '_>>(&cx, data.linkage, data.visibility);
             }
 
             // ... and now that we have everything pre-defined, fill out those definitions.
@@ -123,8 +123,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen
             // happen after the llvm.used variables are created.
             for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
                 unsafe {
-                    let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
-                    llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
+                    llvm::LLVMReplaceAllUsesWith(old_g, new_g);
                     llvm::LLVMDeleteGlobal(old_g);
                 }
             }
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 6819a2af09d..7b259055d40 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -3,6 +3,7 @@ use crate::attributes;
 use crate::common::Funclet;
 use crate::context::CodegenCx;
 use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, True};
+use crate::llvm_util;
 use crate::type_::Type;
 use crate::type_of::LayoutLlvmExt;
 use crate::value::Value;
@@ -15,14 +16,16 @@ use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::MemFlags;
 use rustc_data_structures::small_c_str::SmallCStr;
 use rustc_hir::def_id::DefId;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
 use rustc_middle::ty::layout::{
     FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout,
 };
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_span::Span;
-use rustc_symbol_mangling::typeid::kcfi_typeid_for_fnabi;
+use rustc_symbol_mangling::typeid::{kcfi_typeid_for_fnabi, typeid_for_fnabi, TypeIdOptions};
 use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange};
-use rustc_target::spec::{HasTargetSpec, Target};
+use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target};
+use smallvec::SmallVec;
 use std::borrow::Cow;
 use std::ffi::CStr;
 use std::iter;
@@ -216,6 +219,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
     fn invoke(
         &mut self,
         llty: &'ll Type,
+        fn_attrs: Option<&CodegenFnAttrs>,
         fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
         llfn: &'ll Value,
         args: &[&'ll Value],
@@ -228,23 +232,21 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         let args = self.check_call("invoke", llty, llfn, args);
         let funclet_bundle = funclet.map(|funclet| funclet.bundle());
         let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
-        let mut bundles = vec![funclet_bundle];
-
-        // Set KCFI operand bundle
-        let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() };
-        let kcfi_bundle =
-            if self.tcx.sess.is_sanitizer_kcfi_enabled() && let Some(fn_abi) = fn_abi && is_indirect_call {
-                let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi);
-                Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
-            } else {
-                None
-            };
-        if kcfi_bundle.is_some() {
-            let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
+        let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
+        if let Some(funclet_bundle) = funclet_bundle {
+            bundles.push(funclet_bundle);
+        }
+
+        // Emit CFI pointer type membership test
+        self.cfi_type_test(fn_attrs, fn_abi, llfn);
+
+        // Emit KCFI operand bundle
+        let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn);
+        let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
+        if let Some(kcfi_bundle) = kcfi_bundle {
             bundles.push(kcfi_bundle);
         }
 
-        bundles.retain(|bundle| bundle.is_some());
         let invoke = unsafe {
             llvm::LLVMRustBuildInvoke(
                 self.llbuilder,
@@ -490,7 +492,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
 
         if place.layout.is_zst() {
-            return OperandRef::new_zst(self, place.layout);
+            return OperandRef::zero_sized(place.layout);
         }
 
         #[instrument(level = "trace", skip(bx))]
@@ -581,8 +583,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
     ) {
         let zero = self.const_usize(0);
         let count = self.const_usize(count);
-        let start = dest.project_index(self, zero).llval;
-        let end = dest.project_index(self, count).llval;
 
         let header_bb = self.append_sibling_block("repeat_loop_header");
         let body_bb = self.append_sibling_block("repeat_loop_body");
@@ -591,24 +591,18 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         self.br(header_bb);
 
         let mut header_bx = Self::build(self.cx, header_bb);
-        let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
+        let i = header_bx.phi(self.val_ty(zero), &[zero], &[self.llbb()]);
 
-        let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
+        let keep_going = header_bx.icmp(IntPredicate::IntULT, i, count);
         header_bx.cond_br(keep_going, body_bb, next_bb);
 
         let mut body_bx = Self::build(self.cx, body_bb);
-        let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
-        cg_elem
-            .val
-            .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
-
-        let next = body_bx.inbounds_gep(
-            self.backend_type(cg_elem.layout),
-            current,
-            &[self.const_usize(1)],
-        );
+        let dest_elem = dest.project_index(&mut body_bx, i);
+        cg_elem.val.store(&mut body_bx, dest_elem);
+
+        let next = body_bx.unchecked_uadd(i, self.const_usize(1));
         body_bx.br(header_bb);
-        header_bx.add_incoming_to_phi(current, next, body_bb);
+        header_bx.add_incoming_to_phi(i, next, body_bb);
 
         *self = Self::build(self.cx, next_bb);
     }
@@ -659,7 +653,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         flags: MemFlags,
     ) -> &'ll Value {
         debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
-        let ptr = self.check_store(val, ptr);
+        assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
         unsafe {
             let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
             let align =
@@ -689,7 +683,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         size: Size,
     ) {
         debug!("Store {:?} -> {:?}", val, ptr);
-        let ptr = self.check_store(val, ptr);
+        assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
         unsafe {
             let store = llvm::LLVMRustBuildAtomicStore(
                 self.llbuilder,
@@ -880,8 +874,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
         let size = self.intcast(size, self.type_isize(), false);
         let is_volatile = flags.contains(MemFlags::VOLATILE);
-        let dst = self.pointercast(dst, self.type_i8p());
-        let src = self.pointercast(src, self.type_i8p());
         unsafe {
             llvm::LLVMRustBuildMemCpy(
                 self.llbuilder,
@@ -907,8 +899,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
         let size = self.intcast(size, self.type_isize(), false);
         let is_volatile = flags.contains(MemFlags::VOLATILE);
-        let dst = self.pointercast(dst, self.type_i8p());
-        let src = self.pointercast(src, self.type_i8p());
         unsafe {
             llvm::LLVMRustBuildMemMove(
                 self.llbuilder,
@@ -931,7 +921,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         flags: MemFlags,
     ) {
         let is_volatile = flags.contains(MemFlags::VOLATILE);
-        let ptr = self.pointercast(ptr, self.type_i8p());
         unsafe {
             llvm::LLVMRustBuildMemSet(
                 self.llbuilder,
@@ -988,16 +977,23 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
     }
 
     fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
-        let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
-        let landing_pad = self.landing_pad(ty, pers_fn, 1 /* FIXME should this be 0? */);
+        let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
+        let landing_pad = self.landing_pad(ty, pers_fn, 0);
         unsafe {
             llvm::LLVMSetCleanup(landing_pad, llvm::True);
         }
         (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1))
     }
 
+    fn filter_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
+        let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
+        let landing_pad = self.landing_pad(ty, pers_fn, 1);
+        self.add_clause(landing_pad, self.const_array(self.type_ptr(), &[]));
+        (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1))
+    }
+
     fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) {
-        let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
+        let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
         let mut exn = self.const_poison(ty);
         exn = self.insert_value(exn, exn0, 0);
         exn = self.insert_value(exn, exn1, 1);
@@ -1161,7 +1157,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 
         let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
         let llty = self.cx.type_func(
-            &[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
+            &[self.cx.type_ptr(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
             self.cx.type_void(),
         );
         let args = &[fn_name, hash, num_counters, index];
@@ -1183,6 +1179,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
     fn call(
         &mut self,
         llty: &'ll Type,
+        fn_attrs: Option<&CodegenFnAttrs>,
         fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
         llfn: &'ll Value,
         args: &[&'ll Value],
@@ -1193,23 +1190,21 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         let args = self.check_call("call", llty, llfn, args);
         let funclet_bundle = funclet.map(|funclet| funclet.bundle());
         let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
-        let mut bundles = vec![funclet_bundle];
-
-        // Set KCFI operand bundle
-        let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() };
-        let kcfi_bundle =
-            if let Some(fn_abi) = fn_abi && self.tcx.sess.is_sanitizer_kcfi_enabled() && is_indirect_call {
-                let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi);
-                Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
-            } else {
-                None
-            };
-        if kcfi_bundle.is_some() {
-            let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
+        let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
+        if let Some(funclet_bundle) = funclet_bundle {
+            bundles.push(funclet_bundle);
+        }
+
+        // Emit CFI pointer type membership test
+        self.cfi_type_test(fn_attrs, fn_abi, llfn);
+
+        // Emit KCFI operand bundle
+        let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn);
+        let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
+        if let Some(kcfi_bundle) = kcfi_bundle {
             bundles.push(kcfi_bundle);
         }
 
-        bundles.retain(|bundle| bundle.is_some());
         let call = unsafe {
             llvm::LLVMRustBuildCall(
                 self.llbuilder,
@@ -1231,9 +1226,16 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
     }
 
-    fn do_not_inline(&mut self, llret: &'ll Value) {
-        let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx);
-        attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]);
+    fn apply_attrs_to_cleanup_callsite(&mut self, llret: &'ll Value) {
+        if llvm_util::get_version() < (17, 0, 2) {
+            // Work around https://github.com/llvm/llvm-project/issues/66984.
+            let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx);
+            attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]);
+        } else {
+            // Cleanup is always the cold path.
+            let cold_inline = llvm::AttributeKind::Cold.create_attr(self.llcx);
+            attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[cold_inline]);
+        }
     }
 }
 
@@ -1388,25 +1390,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
         ret.expect("LLVM does not have support for catchret")
     }
 
-    fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
-        let dest_ptr_ty = self.cx.val_ty(ptr);
-        let stored_ty = self.cx.val_ty(val);
-        let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
-
-        assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
-
-        if dest_ptr_ty == stored_ptr_ty {
-            ptr
-        } else {
-            debug!(
-                "type mismatch in store. \
-                    Expected {:?}, got {:?}; inserting bitcast",
-                dest_ptr_ty, stored_ptr_ty
-            );
-            self.bitcast(ptr, stored_ptr_ty)
-        }
-    }
-
     fn check_call<'b>(
         &mut self,
         typ: &str,
@@ -1416,9 +1399,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
     ) -> Cow<'b, [&'ll Value]> {
         assert!(
             self.cx.type_kind(fn_ty) == TypeKind::Function,
-            "builder::{} not passed a function, but {:?}",
-            typ,
-            fn_ty
+            "builder::{typ} not passed a function, but {fn_ty:?}"
         );
 
         let param_tys = self.cx.func_params_types(fn_ty);
@@ -1456,7 +1437,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
 
     pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value {
         let (ty, f) = self.cx.get_intrinsic(intrinsic);
-        self.call(ty, None, f, args, None)
+        self.call(ty, None, None, f, args, None)
     }
 
     fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
@@ -1469,7 +1450,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
             return;
         }
 
-        let ptr = self.pointercast(ptr, self.cx.type_i8p());
         self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]);
     }
 
@@ -1510,15 +1490,12 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
 
         let instr = if signed { "fptosi" } else { "fptoui" };
         let name = if let Some(vector_length) = vector_length {
-            format!(
-                "llvm.{}.sat.v{}i{}.v{}f{}",
-                instr, vector_length, int_width, vector_length, float_width
-            )
+            format!("llvm.{instr}.sat.v{vector_length}i{int_width}.v{vector_length}f{float_width}")
         } else {
-            format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width)
+            format!("llvm.{instr}.sat.i{int_width}.f{float_width}")
         };
         let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty));
-        self.call(self.type_func(&[src_ty], dest_ty), None, f, &[val], None)
+        self.call(self.type_func(&[src_ty], dest_ty), None, None, f, &[val], None)
     }
 
     pub(crate) fn landing_pad(
@@ -1535,4 +1512,81 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
             llvm::LLVMBuildLandingPad(self.llbuilder, ty, None, num_clauses as c_uint, UNNAMED)
         }
     }
+
+    // Emits CFI pointer type membership tests.
+    fn cfi_type_test(
+        &mut self,
+        fn_attrs: Option<&CodegenFnAttrs>,
+        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        llfn: &'ll Value,
+    ) {
+        let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
+        if self.tcx.sess.is_sanitizer_cfi_enabled()
+            && let Some(fn_abi) = fn_abi
+            && is_indirect_call
+        {
+            if let Some(fn_attrs) = fn_attrs
+                && fn_attrs.no_sanitize.contains(SanitizerSet::CFI)
+            {
+                return;
+            }
+
+            let mut options = TypeIdOptions::empty();
+            if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
+                options.insert(TypeIdOptions::GENERALIZE_POINTERS);
+            }
+            if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
+                options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
+            }
+
+            let typeid = typeid_for_fnabi(self.tcx, fn_abi, options);
+            let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap();
+
+            // Test whether the function pointer is associated with the type identifier.
+            let cond = self.type_test(llfn, typeid_metadata);
+            let bb_pass = self.append_sibling_block("type_test.pass");
+            let bb_fail = self.append_sibling_block("type_test.fail");
+            self.cond_br(cond, bb_pass, bb_fail);
+
+            self.switch_to_block(bb_fail);
+            self.abort();
+            self.unreachable();
+
+            self.switch_to_block(bb_pass);
+        }
+    }
+
+    // Emits KCFI operand bundles.
+    fn kcfi_operand_bundle(
+        &mut self,
+        fn_attrs: Option<&CodegenFnAttrs>,
+        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        llfn: &'ll Value,
+    ) -> Option<llvm::OperandBundleDef<'ll>> {
+        let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
+        let kcfi_bundle = if self.tcx.sess.is_sanitizer_kcfi_enabled()
+            && let Some(fn_abi) = fn_abi
+            && is_indirect_call
+        {
+            if let Some(fn_attrs) = fn_attrs
+                && fn_attrs.no_sanitize.contains(SanitizerSet::KCFI)
+            {
+                return None;
+            }
+
+            let mut options = TypeIdOptions::empty();
+            if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
+                options.insert(TypeIdOptions::GENERALIZE_POINTERS);
+            }
+            if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
+                options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
+            }
+
+            let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options);
+            Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
+        } else {
+            None
+        };
+        kcfi_bundle
+    }
 }
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 6ee2a05ffd7..d5778757caa 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -4,13 +4,11 @@
 //! and methods are represented as just a fn ptr and not a full
 //! closure.
 
-use crate::abi::FnAbiLlvmExt;
 use crate::attributes;
 use crate::common;
 use crate::context::CodegenCx;
 use crate::llvm;
 use crate::value::Value;
-use rustc_codegen_ssa::traits::*;
 
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
 use rustc_middle::ty::{self, Instance, TypeVisitableExt};
@@ -27,8 +25,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
 
     debug!("get_fn(instance={:?})", instance);
 
-    assert!(!instance.substs.needs_infer());
-    assert!(!instance.substs.has_escaping_bound_vars());
+    assert!(!instance.args.has_infer());
+    assert!(!instance.args.has_escaping_bound_vars());
 
     if let Some(&llfn) = cx.instances.borrow().get(&instance) {
         return llfn;
@@ -45,43 +43,11 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
     let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
 
     let llfn = if let Some(llfn) = cx.get_declared_value(sym) {
-        // Create a fn pointer with the new signature.
-        let llptrty = fn_abi.ptr_to_llvm_type(cx);
-
-        // This is subtle and surprising, but sometimes we have to bitcast
-        // the resulting fn pointer. The reason has to do with external
-        // functions. If you have two crates that both bind the same C
-        // library, they may not use precisely the same types: for
-        // example, they will probably each declare their own structs,
-        // which are distinct types from LLVM's point of view (nominal
-        // types).
-        //
-        // Now, if those two crates are linked into an application, and
-        // they contain inlined code, you can wind up with a situation
-        // where both of those functions wind up being loaded into this
-        // application simultaneously. In that case, the same function
-        // (from LLVM's point of view) requires two types. But of course
-        // LLVM won't allow one function to have two types.
-        //
-        // What we currently do, therefore, is declare the function with
-        // one of the two types (whichever happens to come first) and then
-        // bitcast as needed when the function is referenced to make sure
-        // it has the type we expect.
-        //
-        // This can occur on either a crate-local or crate-external
-        // reference. It also occurs when testing libcore and in some
-        // other weird situations. Annoying.
-        if cx.val_ty(llfn) != llptrty {
-            debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
-            cx.const_ptrcast(llfn, llptrty)
-        } else {
-            debug!("get_fn: not casting pointer!");
-            llfn
-        }
+        llfn
     } else {
         let instance_def_id = instance.def_id();
-        let llfn = if tcx.sess.target.arch == "x86" &&
-            let Some(dllimport) = common::get_dllimport(tcx, instance_def_id, sym)
+        let llfn = if tcx.sess.target.arch == "x86"
+            && let Some(dllimport) = common::get_dllimport(tcx, instance_def_id, sym)
         {
             // Fix for https://github.com/rust-lang/rust/issues/104453
             // On x86 Windows, LLVM uses 'L' as the prefix for any private
@@ -94,11 +60,21 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
             // LLVM will prefix the name with `__imp_`. Ideally, we'd like the
             // existing logic below to set the Storage Class, but it has an
             // exemption for MinGW for backwards compatability.
-            let llfn = cx.declare_fn(&common::i686_decorated_name(&dllimport, common::is_mingw_gnu_toolchain(&tcx.sess.target), true), fn_abi);
-            unsafe { llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport); }
+            let llfn = cx.declare_fn(
+                &common::i686_decorated_name(
+                    &dllimport,
+                    common::is_mingw_gnu_toolchain(&tcx.sess.target),
+                    true,
+                ),
+                fn_abi,
+                Some(instance),
+            );
+            unsafe {
+                llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
+            }
             llfn
         } else {
-            cx.declare_fn(sym, fn_abi)
+            cx.declare_fn(sym, fn_abi, Some(instance))
         };
         debug!("get_fn: not casting pointer!");
 
@@ -129,7 +105,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
         unsafe {
             llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
 
-            let is_generic = instance.substs.non_erasable_generics().next().is_some();
+            let is_generic =
+                instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
 
             if is_generic {
                 // This is a monomorphization. Its expected visibility depends
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index 9127fba388b..0b0816c27b6 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -1,23 +1,20 @@
 //! Code that is useful in various codegen modules.
 
-use crate::consts::{self, const_alloc_to_llvm};
+use crate::consts::const_alloc_to_llvm;
 pub use crate::context::CodegenCx;
 use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
 use crate::type_::Type;
-use crate::type_of::LayoutLlvmExt;
 use crate::value::Value;
 
 use rustc_ast::Mutability;
-use rustc_codegen_ssa::mir::place::PlaceRef;
 use rustc_codegen_ssa::traits::*;
 use rustc_data_structures::stable_hasher::{Hash128, HashStable, StableHasher};
 use rustc_hir::def_id::DefId;
 use rustc_middle::bug;
 use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
-use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::TyCtxt;
 use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType};
-use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer, Size};
+use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer};
 use rustc_target::spec::Target;
 
 use libc::{c_char, c_uint};
@@ -169,6 +166,10 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         self.const_uint(self.type_i64(), i)
     }
 
+    fn const_u128(&self, i: u128) -> &'ll Value {
+        self.const_uint_big(self.type_i128(), i)
+    }
+
     fn const_usize(&self, i: u64) -> &'ll Value {
         let bit_size = self.data_layout().pointer_size.bits();
         if bit_size < 64 {
@@ -208,11 +209,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             })
             .1;
         let len = s.len();
-        let cs = consts::ptrcast(
-            str_global,
-            self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)),
-        );
-        (cs, self.const_usize(len as u64))
+        (str_global, self.const_usize(len as u64))
     }
 
     fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value {
@@ -287,9 +284,9 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                     }
                 };
                 let llval = unsafe {
-                    llvm::LLVMRustConstInBoundsGEP2(
+                    llvm::LLVMConstInBoundsGEP2(
                         self.type_i8(),
-                        self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)),
+                        self.const_bitcast(base_addr, self.type_ptr_ext(base_addr_space)),
                         &self.const_usize(offset.bytes()),
                         1,
                     )
@@ -307,37 +304,19 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         const_alloc_to_llvm(self, alloc)
     }
 
-    fn from_const_alloc(
-        &self,
-        layout: TyAndLayout<'tcx>,
-        alloc: ConstAllocation<'tcx>,
-        offset: Size,
-    ) -> PlaceRef<'tcx, &'ll Value> {
-        let alloc_align = alloc.inner().align;
-        assert_eq!(alloc_align, layout.align.abi);
-        let llty = self.type_ptr_to(layout.llvm_type(self));
-        let llval = if layout.size == Size::ZERO {
-            let llval = self.const_usize(alloc_align.bytes());
-            unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
-        } else {
-            let init = const_alloc_to_llvm(self, alloc);
-            let base_addr = self.static_addr_of(init, alloc_align, None);
-
-            let llval = unsafe {
-                llvm::LLVMRustConstInBoundsGEP2(
-                    self.type_i8(),
-                    self.const_bitcast(base_addr, self.type_i8p()),
-                    &self.const_usize(offset.bytes()),
-                    1,
-                )
-            };
-            self.const_bitcast(llval, llty)
-        };
-        PlaceRef::new_sized(llval, layout)
-    }
-
-    fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
-        consts::ptrcast(val, ty)
+    fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+        self.const_bitcast(val, ty)
+    }
+
+    fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value {
+        unsafe {
+            llvm::LLVMConstInBoundsGEP2(
+                self.type_i8(),
+                base_addr,
+                &self.const_usize(offset.bytes()),
+                1,
+            )
+        }
     }
 }
 
@@ -431,10 +410,10 @@ pub(crate) fn i686_decorated_name(
             DllCallingConvention::C => {}
             DllCallingConvention::Stdcall(arg_list_size)
             | DllCallingConvention::Fastcall(arg_list_size) => {
-                write!(&mut decorated_name, "@{}", arg_list_size).unwrap();
+                write!(&mut decorated_name, "@{arg_list_size}").unwrap();
             }
             DllCallingConvention::Vectorcall(arg_list_size) => {
-                write!(&mut decorated_name, "@@{}", arg_list_size).unwrap();
+                write!(&mut decorated_name, "@@{arg_list_size}").unwrap();
             }
         }
     }
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 940358acde9..73821b1685d 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -1,7 +1,9 @@
 use crate::base;
 use crate::common::{self, CodegenCx};
 use crate::debuginfo;
-use crate::errors::{InvalidMinimumAlignment, SymbolAlreadyDefined};
+use crate::errors::{
+    InvalidMinimumAlignmentNotPowerOfTwo, InvalidMinimumAlignmentTooLarge, SymbolAlreadyDefined,
+};
 use crate::llvm::{self, True};
 use crate::type_::Type;
 use crate::type_of::LayoutLlvmExt;
@@ -19,7 +21,9 @@ use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::{self, Instance, Ty};
 use rustc_middle::{bug, span_bug};
 use rustc_session::config::Lto;
-use rustc_target::abi::{Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange};
+use rustc_target::abi::{
+    Align, AlignFromBytesError, HasDataLayout, Primitive, Scalar, Size, WrappingRange,
+};
 use std::ops::Range;
 
 pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value {
@@ -99,7 +103,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
                 value: Primitive::Pointer(address_space),
                 valid_range: WrappingRange::full(dl.pointer_size),
             },
-            cx.type_i8p_ext(address_space),
+            cx.type_ptr_ext(address_space),
         ));
         next_offset = offset + pointer_size;
     }
@@ -129,9 +133,14 @@ fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align:
     if let Some(min) = cx.sess().target.min_global_align {
         match Align::from_bits(min) {
             Ok(min) => align = align.max(min),
-            Err(err) => {
-                cx.sess().emit_err(InvalidMinimumAlignment { err });
-            }
+            Err(err) => match err {
+                AlignFromBytesError::NotPowerOfTwo(align) => {
+                    cx.sess().emit_err(InvalidMinimumAlignmentNotPowerOfTwo { align });
+                }
+                AlignFromBytesError::TooLarge(align) => {
+                    cx.sess().emit_err(InvalidMinimumAlignmentTooLarge { align });
+                }
+            },
         }
     }
     unsafe {
@@ -170,13 +179,20 @@ fn check_and_apply_linkage<'ll, 'tcx>(
                 })
             });
             llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
-            llvm::LLVMSetInitializer(g2, cx.const_ptrcast(g1, llty));
+            llvm::LLVMSetInitializer(g2, g1);
             g2
         }
-    } else if cx.tcx.sess.target.arch == "x86" &&
-        let Some(dllimport) = common::get_dllimport(cx.tcx, def_id, sym)
+    } else if cx.tcx.sess.target.arch == "x86"
+        && let Some(dllimport) = common::get_dllimport(cx.tcx, def_id, sym)
     {
-        cx.declare_global(&common::i686_decorated_name(&dllimport, common::is_mingw_gnu_toolchain(&cx.tcx.sess.target), true), llty)
+        cx.declare_global(
+            &common::i686_decorated_name(
+                &dllimport,
+                common::is_mingw_gnu_toolchain(&cx.tcx.sess.target),
+                true,
+            ),
+            llty,
+        )
     } else {
         // Generate an external declaration.
         // FIXME(nagisa): investigate whether it can be changed into define_global
@@ -184,10 +200,6 @@ fn check_and_apply_linkage<'ll, 'tcx>(
     }
 }
 
-pub fn ptrcast<'ll>(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
-    unsafe { llvm::LLVMConstPointerCast(val, ty) }
-}
-
 impl<'ll> CodegenCx<'ll, '_> {
     pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
         unsafe { llvm::LLVMConstBitCast(val, ty) }
@@ -229,8 +241,7 @@ impl<'ll> CodegenCx<'ll, '_> {
         assert!(
             !defined_in_current_codegen_unit,
             "consts::get_static() should always hit the cache for \
-                 statics defined in the same CGU, but did not for `{:?}`",
-            def_id
+                 statics defined in the same CGU, but did not for `{def_id:?}`"
         );
 
         let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
@@ -242,7 +253,7 @@ impl<'ll> CodegenCx<'ll, '_> {
         let g = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) {
             let llty = self.layout_of(ty).llvm_type(self);
             if let Some(g) = self.get_declared_value(sym) {
-                if self.val_ty(g) != self.type_ptr_to(llty) {
+                if self.val_ty(g) != self.type_ptr() {
                     span_bug!(self.tcx.def_span(def_id), "Conflicting types for static");
                 }
             }
@@ -543,16 +554,14 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
         }
     }
 
-    /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
+    /// Add a global value to a list to be stored in the `llvm.used` variable, an array of ptr.
     fn add_used_global(&self, global: &'ll Value) {
-        let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
-        self.used_statics.borrow_mut().push(cast);
+        self.used_statics.borrow_mut().push(global);
     }
 
     /// Add a global value to a list to be stored in the `llvm.compiler.used` variable,
-    /// an array of i8*.
+    /// an array of ptr.
     fn add_compiler_used_global(&self, global: &'ll Value) {
-        let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
-        self.compiler_used_statics.borrow_mut().push(cast);
+        self.compiler_used_statics.borrow_mut().push(global);
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index f0d729d4779..b4b2ab1e1f8 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -9,7 +9,8 @@ use crate::type_::Type;
 use crate::value::Value;
 
 use cstr::cstr;
-use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
+use rustc_codegen_ssa::errors as ssa_errors;
 use rustc_codegen_ssa::traits::*;
 use rustc_data_structures::base_n;
 use rustc_data_structures::fx::FxHashMap;
@@ -33,6 +34,7 @@ use rustc_target::abi::{
 use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel};
 use smallvec::SmallVec;
 
+use libc::c_uint;
 use std::cell::{Cell, RefCell};
 use std::ffi::CStr;
 use std::str;
@@ -58,17 +60,6 @@ pub struct CodegenCx<'ll, 'tcx> {
     /// Cache of constant strings,
     pub const_str_cache: RefCell<FxHashMap<String, &'ll Value>>,
 
-    /// Reverse-direction for const ptrs cast from globals.
-    ///
-    /// Key is a Value holding a `*T`,
-    /// Val is a Value holding a `*[T]`.
-    ///
-    /// Needed because LLVM loses pointer->pointee association
-    /// when we ptrcast, and we have to ptrcast during codegen
-    /// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not
-    /// a pointer to an LLVM array type. Similar for trait objects.
-    pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
-
     /// Cache of emitted const globals (value -> global)
     pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
 
@@ -155,12 +146,23 @@ pub unsafe fn create_module<'ll>(
             target_data_layout = target_data_layout.replace("-n32:64-", "-n64-");
         }
     }
+    if llvm_version < (17, 0, 0) {
+        if sess.target.arch.starts_with("powerpc") {
+            // LLVM 17 specifies function pointer alignment for ppc:
+            // https://reviews.llvm.org/D147016
+            target_data_layout = target_data_layout
+                .replace("-Fn32", "")
+                .replace("-Fi32", "")
+                .replace("-Fn64", "")
+                .replace("-Fi64", "");
+        }
+    }
 
     // Ensure the data-layout values hardcoded remain the defaults.
     if sess.target.is_builtin {
+        // tm is disposed by its drop impl
         let tm = crate::back::write::create_informational_target_machine(tcx.sess);
-        llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
-        llvm::LLVMRustDisposeTargetMachine(tm);
+        llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, &tm);
 
         let llvm_data_layout = llvm::LLVMGetDataLayoutStr(llmod);
         let llvm_data_layout = str::from_utf8(CStr::from_ptr(llvm_data_layout).to_bytes())
@@ -208,7 +210,7 @@ pub unsafe fn create_module<'ll>(
         // PIE is potentially more effective than PIC, but can only be used in executables.
         // If all our outputs are executables, then we can relax PIC to PIE.
         if reloc_model == RelocModel::Pie
-            || sess.crate_types().iter().all(|ty| *ty == CrateType::Executable)
+            || tcx.crate_types().iter().all(|ty| *ty == CrateType::Executable)
         {
             llvm::LLVMRustSetModulePIELevel(llmod);
         }
@@ -228,18 +230,29 @@ pub unsafe fn create_module<'ll>(
         llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1);
     }
 
-    if sess.is_sanitizer_cfi_enabled() {
-        // FIXME(rcvalle): Add support for non canonical jump tables.
+    // Enable canonical jump tables if CFI is enabled. (See https://reviews.llvm.org/D65629.)
+    if sess.is_sanitizer_cfi_canonical_jump_tables_enabled() && sess.is_sanitizer_cfi_enabled() {
         let canonical_jump_tables = "CFI Canonical Jump Tables\0".as_ptr().cast();
-        // FIXME(rcvalle): Add it with Override behavior flag.
         llvm::LLVMRustAddModuleFlag(
             llmod,
-            llvm::LLVMModFlagBehavior::Warning,
+            llvm::LLVMModFlagBehavior::Override,
             canonical_jump_tables,
             1,
         );
     }
 
+    // Enable LTO unit splitting if specified or if CFI is enabled. (See https://reviews.llvm.org/D53891.)
+    if sess.is_split_lto_unit_enabled() || sess.is_sanitizer_cfi_enabled() {
+        let enable_split_lto_unit = "EnableSplitLTOUnit\0".as_ptr().cast();
+        llvm::LLVMRustAddModuleFlag(
+            llmod,
+            llvm::LLVMModFlagBehavior::Override,
+            enable_split_lto_unit,
+            1,
+        );
+    }
+
+    // Add "kcfi" module flag if KCFI is enabled. (See https://reviews.llvm.org/D119296.)
     if sess.is_sanitizer_kcfi_enabled() {
         let kcfi = "kcfi\0".as_ptr().cast();
         llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1);
@@ -338,6 +351,23 @@ pub unsafe fn create_module<'ll>(
         );
     }
 
+    // Insert `llvm.ident` metadata.
+    //
+    // On the wasm targets it will get hooked up to the "producer" sections
+    // `processed-by` information.
+    let rustc_producer =
+        format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION"));
+    let name_metadata = llvm::LLVMMDStringInContext(
+        llcx,
+        rustc_producer.as_ptr().cast(),
+        rustc_producer.as_bytes().len() as c_uint,
+    );
+    llvm::LLVMAddNamedMetadataOperand(
+        llmod,
+        cstr!("llvm.ident").as_ptr(),
+        llvm::LLVMMDNodeInContext(llcx, &name_metadata, 1),
+    );
+
     llmod
 }
 
@@ -435,7 +465,6 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
             instances: Default::default(),
             vtables: Default::default(),
             const_str_cache: Default::default(),
-            const_unsized: Default::default(),
             const_globals: Default::default(),
             statics_to_rauw: RefCell::new(Vec::new()),
             used_statics: RefCell::new(Vec::new()),
@@ -466,7 +495,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
 
     pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
         let section = cstr!("llvm.metadata");
-        let array = self.const_array(self.type_ptr_to(self.type_i8()), values);
+        let array = self.const_array(self.type_ptr(), values);
 
         unsafe {
             let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
@@ -517,19 +546,28 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         if let Some(llpersonality) = self.eh_personality.get() {
             return llpersonality;
         }
+
+        let name = if wants_msvc_seh(self.sess()) {
+            Some("__CxxFrameHandler3")
+        } else if wants_wasm_eh(self.sess()) {
+            // LLVM specifically tests for the name of the personality function
+            // There is no need for this function to exist anywhere, it will
+            // not be called. However, its name has to be "__gxx_wasm_personality_v0"
+            // for native wasm exceptions.
+            Some("__gxx_wasm_personality_v0")
+        } else {
+            None
+        };
+
         let tcx = self.tcx;
         let llfn = match tcx.lang_items().eh_personality() {
-            Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
+            Some(def_id) if name.is_none() => self.get_fn_addr(
                 ty::Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, ty::List::empty())
                     .unwrap()
                     .unwrap(),
             ),
             _ => {
-                let name = if wants_msvc_seh(self.sess()) {
-                    "__CxxFrameHandler3"
-                } else {
-                    "rust_eh_personality"
-                };
+                let name = name.unwrap_or("rust_eh_personality");
                 if let Some(llfn) = self.get_declared_value(name) {
                     llfn
                 } else {
@@ -635,7 +673,7 @@ impl<'ll> CodegenCx<'ll, '_> {
             ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
         }
 
-        let i8p = self.type_i8p();
+        let ptr = self.type_ptr();
         let void = self.type_void();
         let i1 = self.type_i1();
         let t_i8 = self.type_i8();
@@ -647,6 +685,10 @@ impl<'ll> CodegenCx<'ll, '_> {
         let t_f32 = self.type_f32();
         let t_f64 = self.type_f64();
         let t_metadata = self.type_metadata();
+        let t_token = self.type_token();
+
+        ifn!("llvm.wasm.get.exception", fn(t_token) -> ptr);
+        ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32);
 
         ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
         ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
@@ -681,7 +723,7 @@ impl<'ll> CodegenCx<'ll, '_> {
 
         ifn!("llvm.trap", fn() -> void);
         ifn!("llvm.debugtrap", fn() -> void);
-        ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
+        ifn!("llvm.frameaddress", fn(t_i32) -> ptr);
 
         ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
         ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
@@ -848,43 +890,44 @@ impl<'ll> CodegenCx<'ll, '_> {
         ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
         ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
 
-        ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void);
-        ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void);
+        ifn!("llvm.lifetime.start.p0i8", fn(t_i64, ptr) -> void);
+        ifn!("llvm.lifetime.end.p0i8", fn(t_i64, ptr) -> void);
 
         ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
-        ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
+        ifn!("llvm.eh.typeid.for", fn(ptr) -> t_i32);
         ifn!("llvm.localescape", fn(...) -> void);
-        ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
-        ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
+        ifn!("llvm.localrecover", fn(ptr, ptr, t_i32) -> ptr);
+        ifn!("llvm.x86.seh.recoverfp", fn(ptr, ptr) -> ptr);
 
         ifn!("llvm.assume", fn(i1) -> void);
-        ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
+        ifn!("llvm.prefetch", fn(ptr, t_i32, t_i32, t_i32) -> void);
 
         // This isn't an "LLVM intrinsic", but LLVM's optimization passes
-        // recognize it like one and we assume it exists in `core::slice::cmp`
+        // recognize it like one (including turning it into `bcmp` sometimes)
+        // and we use it to implement intrinsics like `raw_eq` and `compare_bytes`
         match self.sess().target.arch.as_ref() {
-            "avr" | "msp430" => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i16),
-            _ => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i32),
+            "avr" | "msp430" => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i16),
+            _ => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i32),
         }
 
         // variadic intrinsics
-        ifn!("llvm.va_start", fn(i8p) -> void);
-        ifn!("llvm.va_end", fn(i8p) -> void);
-        ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
+        ifn!("llvm.va_start", fn(ptr) -> void);
+        ifn!("llvm.va_end", fn(ptr) -> void);
+        ifn!("llvm.va_copy", fn(ptr, ptr) -> void);
 
         if self.sess().instrument_coverage() {
-            ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
+            ifn!("llvm.instrprof.increment", fn(ptr, t_i64, t_i32, t_i32) -> void);
         }
 
-        ifn!("llvm.type.test", fn(i8p, t_metadata) -> i1);
-        ifn!("llvm.type.checked.load", fn(i8p, t_i32, t_metadata) -> mk_struct! {i8p, i1});
+        ifn!("llvm.type.test", fn(ptr, t_metadata) -> i1);
+        ifn!("llvm.type.checked.load", fn(ptr, t_i32, t_metadata) -> mk_struct! {ptr, i1});
 
         if self.sess().opts.debuginfo != DebugInfo::None {
             ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void);
             ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void);
         }
 
-        ifn!("llvm.ptrmask", fn(i8p, t_isize) -> i8p);
+        ifn!("llvm.ptrmask", fn(ptr, t_isize) -> ptr);
 
         None
     }
@@ -898,12 +941,10 @@ impl<'ll> CodegenCx<'ll, '_> {
         let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
             Some(def_id) => self.get_static(def_id),
             _ => {
-                let ty = self
-                    .type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false);
+                let ty = self.type_struct(&[self.type_ptr(), self.type_ptr()], false);
                 self.declare_global("rust_eh_catch_typeinfo", ty)
             }
         };
-        let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p());
         self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
         eh_catch_typeinfo
     }
@@ -957,10 +998,10 @@ impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
 
     #[inline]
     fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
-        if let LayoutError::SizeOverflow(_) = err {
-            self.sess().emit_fatal(Spanned { span, node: err })
+        if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
+            self.sess().emit_fatal(Spanned { span, node: err.into_diagnostic() })
         } else {
-            span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+            self.tcx.sess.emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err })
         }
     }
 }
@@ -980,21 +1021,12 @@ impl<'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
         } else {
             match fn_abi_request {
                 FnAbiRequest::OfFnPtr { sig, extra_args } => {
-                    span_bug!(
-                        span,
-                        "`fn_abi_of_fn_ptr({}, {:?})` failed: {}",
-                        sig,
-                        extra_args,
-                        err
-                    );
+                    span_bug!(span, "`fn_abi_of_fn_ptr({sig}, {extra_args:?})` failed: {err:?}",);
                 }
                 FnAbiRequest::OfInstance { instance, extra_args } => {
                     span_bug!(
                         span,
-                        "`fn_abi_of_instance({}, {:?})` failed: {}",
-                        instance,
-                        extra_args,
-                        err
+                        "`fn_abi_of_instance({instance}, {extra_args:?})` failed: {err:?}",
                     );
                 }
             }
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
new file mode 100644
index 00000000000..763186a58bf
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -0,0 +1,279 @@
+use rustc_middle::mir::coverage::{CounterId, ExpressionId, Operand};
+
+/// Must match the layout of `LLVMRustCounterKind`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum CounterKind {
+    Zero = 0,
+    CounterValueReference = 1,
+    Expression = 2,
+}
+
+/// A reference to an instance of an abstract "counter" that will yield a value in a coverage
+/// report. Note that `id` has different interpretations, depending on the `kind`:
+///   * For `CounterKind::Zero`, `id` is assumed to be `0`
+///   * For `CounterKind::CounterValueReference`,  `id` matches the `counter_id` of the injected
+///     instrumentation counter (the `index` argument to the LLVM intrinsic
+///     `instrprof.increment()`)
+///   * For `CounterKind::Expression`, `id` is the index into the coverage map's array of
+///     counter expressions.
+///
+/// Corresponds to struct `llvm::coverage::Counter`.
+///
+/// Must match the layout of `LLVMRustCounter`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct Counter {
+    // Important: The layout (order and types of fields) must match its C++ counterpart.
+    pub kind: CounterKind,
+    id: u32,
+}
+
+impl Counter {
+    /// A `Counter` of kind `Zero`. For this counter kind, the `id` is not used.
+    pub(crate) const ZERO: Self = Self { kind: CounterKind::Zero, id: 0 };
+
+    /// Constructs a new `Counter` of kind `CounterValueReference`.
+    pub fn counter_value_reference(counter_id: CounterId) -> Self {
+        Self { kind: CounterKind::CounterValueReference, id: counter_id.as_u32() }
+    }
+
+    /// Constructs a new `Counter` of kind `Expression`.
+    pub(crate) fn expression(expression_id: ExpressionId) -> Self {
+        Self { kind: CounterKind::Expression, id: expression_id.as_u32() }
+    }
+
+    pub(crate) fn from_operand(operand: Operand) -> Self {
+        match operand {
+            Operand::Zero => Self::ZERO,
+            Operand::Counter(id) => Self::counter_value_reference(id),
+            Operand::Expression(id) => Self::expression(id),
+        }
+    }
+}
+
+/// Corresponds to enum `llvm::coverage::CounterExpression::ExprKind`.
+///
+/// Must match the layout of `LLVMRustCounterExprKind`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum ExprKind {
+    Subtract = 0,
+    Add = 1,
+}
+
+/// Corresponds to struct `llvm::coverage::CounterExpression`.
+///
+/// Must match the layout of `LLVMRustCounterExpression`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct CounterExpression {
+    pub kind: ExprKind,
+    pub lhs: Counter,
+    pub rhs: Counter,
+}
+
+impl CounterExpression {
+    /// The dummy expression `(0 - 0)` has a representation of all zeroes,
+    /// making it marginally more efficient to initialize than `(0 + 0)`.
+    pub(crate) const DUMMY: Self =
+        Self { lhs: Counter::ZERO, kind: ExprKind::Subtract, rhs: Counter::ZERO };
+
+    pub fn new(lhs: Counter, kind: ExprKind, rhs: Counter) -> Self {
+        Self { kind, lhs, rhs }
+    }
+}
+
+/// Corresponds to enum `llvm::coverage::CounterMappingRegion::RegionKind`.
+///
+/// Must match the layout of `LLVMRustCounterMappingRegionKind`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum RegionKind {
+    /// A CodeRegion associates some code with a counter
+    CodeRegion = 0,
+
+    /// An ExpansionRegion represents a file expansion region that associates
+    /// a source range with the expansion of a virtual source file, such as
+    /// for a macro instantiation or #include file.
+    ExpansionRegion = 1,
+
+    /// A SkippedRegion represents a source range with code that was skipped
+    /// by a preprocessor or similar means.
+    SkippedRegion = 2,
+
+    /// A GapRegion is like a CodeRegion, but its count is only set as the
+    /// line execution count when its the only region in the line.
+    GapRegion = 3,
+
+    /// A BranchRegion represents leaf-level boolean expressions and is
+    /// associated with two counters, each representing the number of times the
+    /// expression evaluates to true or false.
+    BranchRegion = 4,
+}
+
+/// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the
+/// coverage map, in accordance with the
+/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
+/// The struct composes fields representing the `Counter` type and value(s) (injected counter
+/// ID, or expression type and operands), the source file (an indirect index into a "filenames
+/// array", encoded separately), and source location (start and end positions of the represented
+/// code region).
+///
+/// Corresponds to struct `llvm::coverage::CounterMappingRegion`.
+///
+/// Must match the layout of `LLVMRustCounterMappingRegion`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct CounterMappingRegion {
+    /// The counter type and type-dependent counter data, if any.
+    counter: Counter,
+
+    /// If the `RegionKind` is a `BranchRegion`, this represents the counter
+    /// for the false branch of the region.
+    false_counter: Counter,
+
+    /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the
+    /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes
+    /// that, in turn, are used to look up the filename for this region.
+    file_id: u32,
+
+    /// If the `RegionKind` is an `ExpansionRegion`, the `expanded_file_id` can be used to find
+    /// the mapping regions created as a result of macro expansion, by checking if their file id
+    /// matches the expanded file id.
+    expanded_file_id: u32,
+
+    /// 1-based starting line of the mapping region.
+    start_line: u32,
+
+    /// 1-based starting column of the mapping region.
+    start_col: u32,
+
+    /// 1-based ending line of the mapping region.
+    end_line: u32,
+
+    /// 1-based ending column of the mapping region. If the high bit is set, the current
+    /// mapping region is a gap area.
+    end_col: u32,
+
+    kind: RegionKind,
+}
+
+impl CounterMappingRegion {
+    pub(crate) fn code_region(
+        counter: Counter,
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter,
+            false_counter: Counter::ZERO,
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::CodeRegion,
+        }
+    }
+
+    // This function might be used in the future; the LLVM API is still evolving, as is coverage
+    // support.
+    #[allow(dead_code)]
+    pub(crate) fn branch_region(
+        counter: Counter,
+        false_counter: Counter,
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter,
+            false_counter,
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::BranchRegion,
+        }
+    }
+
+    // This function might be used in the future; the LLVM API is still evolving, as is coverage
+    // support.
+    #[allow(dead_code)]
+    pub(crate) fn expansion_region(
+        file_id: u32,
+        expanded_file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter: Counter::ZERO,
+            false_counter: Counter::ZERO,
+            file_id,
+            expanded_file_id,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::ExpansionRegion,
+        }
+    }
+
+    // This function might be used in the future; the LLVM API is still evolving, as is coverage
+    // support.
+    #[allow(dead_code)]
+    pub(crate) fn skipped_region(
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter: Counter::ZERO,
+            false_counter: Counter::ZERO,
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::SkippedRegion,
+        }
+    }
+
+    // This function might be used in the future; the LLVM API is still evolving, as is coverage
+    // support.
+    #[allow(dead_code)]
+    pub(crate) fn gap_region(
+        counter: Counter,
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter,
+            false_counter: Counter::ZERO,
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col: (1_u32 << 31) | end_col,
+            kind: RegionKind::GapRegion,
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
new file mode 100644
index 00000000000..55f43aa5341
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
@@ -0,0 +1,294 @@
+use crate::coverageinfo::ffi::{Counter, CounterExpression, ExprKind};
+
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_index::IndexVec;
+use rustc_middle::mir::coverage::{CodeRegion, CounterId, ExpressionId, Op, Operand};
+use rustc_middle::ty::Instance;
+use rustc_middle::ty::TyCtxt;
+
+#[derive(Clone, Debug, PartialEq)]
+pub struct Expression {
+    lhs: Operand,
+    op: Op,
+    rhs: Operand,
+    code_regions: Vec<CodeRegion>,
+}
+
+/// Collects all of the coverage regions associated with (a) injected counters, (b) counter
+/// expressions (additions or subtraction), and (c) unreachable regions (always counted as zero),
+/// for a given Function. This struct also stores the `function_source_hash`,
+/// computed during instrumentation, and forwarded with counters.
+///
+/// Note, it may be important to understand LLVM's definitions of `unreachable` regions versus "gap
+/// regions" (or "gap areas"). A gap region is a code region within a counted region (either counter
+/// or expression), but the line or lines in the gap region are not executable (such as lines with
+/// only whitespace or comments). According to LLVM Code Coverage Mapping documentation, "A count
+/// for a gap area is only used as the line execution count if there are no other regions on a
+/// line."
+#[derive(Debug)]
+pub struct FunctionCoverage<'tcx> {
+    instance: Instance<'tcx>,
+    source_hash: u64,
+    is_used: bool,
+    counters: IndexVec<CounterId, Option<Vec<CodeRegion>>>,
+    expressions: IndexVec<ExpressionId, Option<Expression>>,
+    unreachable_regions: Vec<CodeRegion>,
+}
+
+impl<'tcx> FunctionCoverage<'tcx> {
+    /// Creates a new set of coverage data for a used (called) function.
+    pub fn new(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+        Self::create(tcx, instance, true)
+    }
+
+    /// Creates a new set of coverage data for an unused (never called) function.
+    pub fn unused(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+        Self::create(tcx, instance, false)
+    }
+
+    fn create(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>, is_used: bool) -> Self {
+        let coverageinfo = tcx.coverageinfo(instance.def);
+        debug!(
+            "FunctionCoverage::create(instance={:?}) has coverageinfo={:?}. is_used={}",
+            instance, coverageinfo, is_used
+        );
+        Self {
+            instance,
+            source_hash: 0, // will be set with the first `add_counter()`
+            is_used,
+            counters: IndexVec::from_elem_n(None, coverageinfo.num_counters as usize),
+            expressions: IndexVec::from_elem_n(None, coverageinfo.num_expressions as usize),
+            unreachable_regions: Vec::new(),
+        }
+    }
+
+    /// Returns true for a used (called) function, and false for an unused function.
+    pub fn is_used(&self) -> bool {
+        self.is_used
+    }
+
+    /// Sets the function source hash value. If called multiple times for the same function, all
+    /// calls should have the same hash value.
+    pub fn set_function_source_hash(&mut self, source_hash: u64) {
+        if self.source_hash == 0 {
+            self.source_hash = source_hash;
+        } else {
+            debug_assert_eq!(source_hash, self.source_hash);
+        }
+    }
+
+    /// Adds code regions to be counted by an injected counter intrinsic.
+    #[instrument(level = "debug", skip(self))]
+    pub(crate) fn add_counter(&mut self, id: CounterId, code_regions: &[CodeRegion]) {
+        if code_regions.is_empty() {
+            return;
+        }
+
+        let slot = &mut self.counters[id];
+        match slot {
+            None => *slot = Some(code_regions.to_owned()),
+            // If this counter ID slot has already been filled, it should
+            // contain identical information.
+            Some(ref previous_regions) => assert_eq!(
+                previous_regions, code_regions,
+                "add_counter: code regions for id changed"
+            ),
+        }
+    }
+
+    /// Adds information about a coverage expression, along with zero or more
+    /// code regions mapped to that expression.
+    ///
+    /// Both counters and "counter expressions" (or simply, "expressions") can be operands in other
+    /// expressions. These are tracked as separate variants of `Operand`, so there is no ambiguity
+    /// between operands that are counter IDs and operands that are expression IDs.
+    #[instrument(level = "debug", skip(self))]
+    pub(crate) fn add_counter_expression(
+        &mut self,
+        expression_id: ExpressionId,
+        lhs: Operand,
+        op: Op,
+        rhs: Operand,
+        code_regions: &[CodeRegion],
+    ) {
+        debug_assert!(
+            expression_id.as_usize() < self.expressions.len(),
+            "expression_id {} is out of range for expressions.len() = {}
+            for {:?}",
+            expression_id.as_usize(),
+            self.expressions.len(),
+            self,
+        );
+
+        let expression = Expression { lhs, op, rhs, code_regions: code_regions.to_owned() };
+        let slot = &mut self.expressions[expression_id];
+        match slot {
+            None => *slot = Some(expression),
+            // If this expression ID slot has already been filled, it should
+            // contain identical information.
+            Some(ref previous_expression) => assert_eq!(
+                previous_expression, &expression,
+                "add_counter_expression: expression for id changed"
+            ),
+        }
+    }
+
+    /// Adds regions that will be marked as "unreachable", with a constant "zero counter".
+    #[instrument(level = "debug", skip(self))]
+    pub(crate) fn add_unreachable_regions(&mut self, code_regions: &[CodeRegion]) {
+        assert!(!code_regions.is_empty(), "unreachable regions always have code regions");
+        self.unreachable_regions.extend_from_slice(code_regions);
+    }
+
+    /// Perform some simplifications to make the final coverage mappings
+    /// slightly smaller.
+    ///
+    /// This method mainly exists to preserve the simplifications that were
+    /// already being performed by the Rust-side expression renumbering, so that
+    /// the resulting coverage mappings don't get worse.
+    pub(crate) fn simplify_expressions(&mut self) {
+        // The set of expressions that either were optimized out entirely, or
+        // have zero as both of their operands, and will therefore always have
+        // a value of zero. Other expressions that refer to these as operands
+        // can have those operands replaced with `Operand::Zero`.
+        let mut zero_expressions = FxIndexSet::default();
+
+        // For each expression, perform simplifications based on lower-numbered
+        // expressions, and then update the set of always-zero expressions if
+        // necessary.
+        // (By construction, expressions can only refer to other expressions
+        // that have lower IDs, so one simplification pass is sufficient.)
+        for (id, maybe_expression) in self.expressions.iter_enumerated_mut() {
+            let Some(expression) = maybe_expression else {
+                // If an expression is missing, it must have been optimized away,
+                // so any operand that refers to it can be replaced with zero.
+                zero_expressions.insert(id);
+                continue;
+            };
+
+            // If an operand refers to an expression that is always zero, then
+            // that operand can be replaced with `Operand::Zero`.
+            let maybe_set_operand_to_zero = |operand: &mut Operand| match &*operand {
+                Operand::Expression(id) if zero_expressions.contains(id) => {
+                    *operand = Operand::Zero;
+                }
+                _ => (),
+            };
+            maybe_set_operand_to_zero(&mut expression.lhs);
+            maybe_set_operand_to_zero(&mut expression.rhs);
+
+            // Coverage counter values cannot be negative, so if an expression
+            // involves subtraction from zero, assume that its RHS must also be zero.
+            // (Do this after simplifications that could set the LHS to zero.)
+            if let Expression { lhs: Operand::Zero, op: Op::Subtract, .. } = expression {
+                expression.rhs = Operand::Zero;
+            }
+
+            // After the above simplifications, if both operands are zero, then
+            // we know that this expression is always zero too.
+            if let Expression { lhs: Operand::Zero, rhs: Operand::Zero, .. } = expression {
+                zero_expressions.insert(id);
+            }
+        }
+    }
+
+    /// Return the source hash, generated from the HIR node structure, and used to indicate whether
+    /// or not the source code structure changed between different compilations.
+    pub fn source_hash(&self) -> u64 {
+        self.source_hash
+    }
+
+    /// Generate an array of CounterExpressions, and an iterator over all `Counter`s and their
+    /// associated `Regions` (from which the LLVM-specific `CoverageMapGenerator` will create
+    /// `CounterMappingRegion`s.
+    pub fn get_expressions_and_counter_regions(
+        &self,
+    ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
+        assert!(
+            self.source_hash != 0 || !self.is_used,
+            "No counters provided the source_hash for used function: {:?}",
+            self.instance
+        );
+
+        let counter_expressions = self.counter_expressions();
+        // Expression IDs are indices into `self.expressions`, and on the LLVM
+        // side they will be treated as indices into `counter_expressions`, so
+        // the two vectors should correspond 1:1.
+        assert_eq!(self.expressions.len(), counter_expressions.len());
+
+        let counter_regions = self.counter_regions();
+        let expression_regions = self.expression_regions();
+        let unreachable_regions = self.unreachable_regions();
+
+        let counter_regions =
+            counter_regions.chain(expression_regions.into_iter().chain(unreachable_regions));
+        (counter_expressions, counter_regions)
+    }
+
+    fn counter_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
+        self.counters
+            .iter_enumerated()
+            // Filter out counter IDs that we never saw during MIR traversal.
+            // This can happen if a counter was optimized out by MIR transforms
+            // (and replaced with `CoverageKind::Unreachable` instead).
+            .filter_map(|(id, maybe_code_regions)| Some((id, maybe_code_regions.as_ref()?)))
+            .flat_map(|(id, code_regions)| {
+                let counter = Counter::counter_value_reference(id);
+                code_regions.iter().map(move |region| (counter, region))
+            })
+    }
+
+    /// Convert this function's coverage expression data into a form that can be
+    /// passed through FFI to LLVM.
+    fn counter_expressions(&self) -> Vec<CounterExpression> {
+        // We know that LLVM will optimize out any unused expressions before
+        // producing the final coverage map, so there's no need to do the same
+        // thing on the Rust side unless we're confident we can do much better.
+        // (See `CounterExpressionsMinimizer` in `CoverageMappingWriter.cpp`.)
+
+        self.expressions
+            .iter()
+            .map(|expression| match expression {
+                None => {
+                    // This expression ID was allocated, but we never saw the
+                    // actual expression, so it must have been optimized out.
+                    // Replace it with a dummy expression, and let LLVM take
+                    // care of omitting it from the expression list.
+                    CounterExpression::DUMMY
+                }
+                &Some(Expression { lhs, op, rhs, .. }) => {
+                    // Convert the operands and operator as normal.
+                    CounterExpression::new(
+                        Counter::from_operand(lhs),
+                        match op {
+                            Op::Add => ExprKind::Add,
+                            Op::Subtract => ExprKind::Subtract,
+                        },
+                        Counter::from_operand(rhs),
+                    )
+                }
+            })
+            .collect::<Vec<_>>()
+    }
+
+    fn expression_regions(&self) -> Vec<(Counter, &CodeRegion)> {
+        // Find all of the expression IDs that weren't optimized out AND have
+        // one or more attached code regions, and return the corresponding
+        // mappings as counter/region pairs.
+        self.expressions
+            .iter_enumerated()
+            .filter_map(|(id, maybe_expression)| {
+                let code_regions = &maybe_expression.as_ref()?.code_regions;
+                Some((id, code_regions))
+            })
+            .flat_map(|(id, code_regions)| {
+                let counter = Counter::expression(id);
+                code_regions.iter().map(move |code_region| (counter, code_region))
+            })
+            .collect::<Vec<_>>()
+    }
+
+    fn unreachable_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
+        self.unreachable_regions.iter().map(|region| (Counter::ZERO, region))
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 240a9d2f371..d4e77525698 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -1,20 +1,19 @@
 use crate::common::CodegenCx;
 use crate::coverageinfo;
+use crate::coverageinfo::ffi::CounterMappingRegion;
+use crate::coverageinfo::map_data::FunctionCoverage;
 use crate::llvm;
 
-use llvm::coverageinfo::CounterMappingRegion;
-use rustc_codegen_ssa::coverageinfo::map::{Counter, CounterExpression};
-use rustc_codegen_ssa::traits::{ConstMethods, CoverageInfoMethods};
+use rustc_codegen_ssa::traits::ConstMethods;
 use rustc_data_structures::fx::FxIndexSet;
 use rustc_hir::def::DefKind;
 use rustc_hir::def_id::DefId;
-use rustc_llvm::RustString;
+use rustc_index::IndexVec;
 use rustc_middle::bug;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir::coverage::CodeRegion;
 use rustc_middle::ty::TyCtxt;
-
-use std::ffi::CString;
+use rustc_span::Symbol;
 
 /// Generates and exports the Coverage Map.
 ///
@@ -57,21 +56,21 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
         return;
     }
 
-    let mut mapgen = CoverageMapGenerator::new(tcx);
+    let mut global_file_table = GlobalFileTable::new(tcx);
 
     // Encode coverage mappings and generate function records
     let mut function_data = Vec::new();
-    for (instance, function_coverage) in function_coverage_map {
+    for (instance, mut function_coverage) in function_coverage_map {
         debug!("Generate function coverage for {}, {:?}", cx.codegen_unit.name(), instance);
-        let mangled_function_name = tcx.symbol_name(instance).to_string();
+        function_coverage.simplify_expressions();
+        let function_coverage = function_coverage;
+
+        let mangled_function_name = tcx.symbol_name(instance).name;
         let source_hash = function_coverage.source_hash();
         let is_used = function_coverage.is_used();
-        let (expressions, counter_regions) =
-            function_coverage.get_expressions_and_counter_regions();
 
-        let coverage_mapping_buffer = llvm::build_byte_buffer(|coverage_mapping_buffer| {
-            mapgen.write_coverage_mapping(expressions, counter_regions, coverage_mapping_buffer);
-        });
+        let coverage_mapping_buffer =
+            encode_mappings_for_function(&mut global_file_table, &function_coverage);
 
         if coverage_mapping_buffer.is_empty() {
             if function_coverage.is_used() {
@@ -89,20 +88,20 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
     }
 
     // Encode all filenames referenced by counters/expressions in this module
-    let filenames_buffer = llvm::build_byte_buffer(|filenames_buffer| {
-        coverageinfo::write_filenames_section_to_buffer(&mapgen.filenames, filenames_buffer);
-    });
+    let filenames_buffer = global_file_table.into_filenames_buffer();
 
     let filenames_size = filenames_buffer.len();
     let filenames_val = cx.const_bytes(&filenames_buffer);
-    let filenames_ref = coverageinfo::hash_bytes(filenames_buffer);
+    let filenames_ref = coverageinfo::hash_bytes(&filenames_buffer);
 
     // Generate the LLVM IR representation of the coverage map and store it in a well-known global
-    let cov_data_val = mapgen.generate_coverage_map(cx, version, filenames_size, filenames_val);
+    let cov_data_val = generate_coverage_map(cx, version, filenames_size, filenames_val);
 
+    let covfun_section_name = coverageinfo::covfun_section_name(cx);
     for (mangled_function_name, source_hash, is_used, coverage_mapping_buffer) in function_data {
         save_function_record(
             cx,
+            &covfun_section_name,
             mangled_function_name,
             source_hash,
             filenames_ref,
@@ -115,112 +114,129 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
     coverageinfo::save_cov_data_to_mod(cx, cov_data_val);
 }
 
-struct CoverageMapGenerator {
-    filenames: FxIndexSet<CString>,
+struct GlobalFileTable {
+    global_file_table: FxIndexSet<Symbol>,
 }
 
-impl CoverageMapGenerator {
+impl GlobalFileTable {
     fn new(tcx: TyCtxt<'_>) -> Self {
-        let mut filenames = FxIndexSet::default();
+        let mut global_file_table = FxIndexSet::default();
         // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
         // requires setting the first filename to the compilation directory.
         // Since rustc generates coverage maps with relative paths, the
         // compilation directory can be combined with the relative paths
         // to get absolute paths, if needed.
-        let working_dir =
-            tcx.sess.opts.working_dir.remapped_path_if_available().to_string_lossy().to_string();
-        let c_filename =
-            CString::new(working_dir).expect("null error converting filename to C string");
-        filenames.insert(c_filename);
-        Self { filenames }
+        let working_dir = Symbol::intern(
+            &tcx.sess.opts.working_dir.remapped_path_if_available().to_string_lossy(),
+        );
+        global_file_table.insert(working_dir);
+        Self { global_file_table }
     }
 
-    /// Using the `expressions` and `counter_regions` collected for the current function, generate
-    /// the `mapping_regions` and `virtual_file_mapping`, and capture any new filenames. Then use
-    /// LLVM APIs to encode the `virtual_file_mapping`, `expressions`, and `mapping_regions` into
-    /// the given `coverage_mapping` byte buffer, compliant with the LLVM Coverage Mapping format.
-    fn write_coverage_mapping<'a>(
-        &mut self,
-        expressions: Vec<CounterExpression>,
-        counter_regions: impl Iterator<Item = (Counter, &'a CodeRegion)>,
-        coverage_mapping_buffer: &RustString,
-    ) {
-        let mut counter_regions = counter_regions.collect::<Vec<_>>();
-        if counter_regions.is_empty() {
-            return;
-        }
+    fn global_file_id_for_file_name(&mut self, file_name: Symbol) -> u32 {
+        let (global_file_id, _) = self.global_file_table.insert_full(file_name);
+        global_file_id as u32
+    }
 
-        let mut virtual_file_mapping = Vec::new();
-        let mut mapping_regions = Vec::new();
-        let mut current_file_name = None;
-        let mut current_file_id = 0;
-
-        // Convert the list of (Counter, CodeRegion) pairs to an array of `CounterMappingRegion`, sorted
-        // by filename and position. Capture any new files to compute the `CounterMappingRegion`s
-        // `file_id` (indexing files referenced by the current function), and construct the
-        // function-specific `virtual_file_mapping` from `file_id` to its index in the module's
-        // `filenames` array.
-        counter_regions.sort_unstable_by_key(|(_counter, region)| *region);
-        for (counter, region) in counter_regions {
-            let CodeRegion { file_name, start_line, start_col, end_line, end_col } = *region;
-            let same_file = current_file_name.map_or(false, |p| p == file_name);
-            if !same_file {
-                if current_file_name.is_some() {
-                    current_file_id += 1;
-                }
-                current_file_name = Some(file_name);
-                let c_filename = CString::new(file_name.to_string())
-                    .expect("null error converting filename to C string");
-                debug!("  file_id: {} = '{:?}'", current_file_id, c_filename);
-                let (filenames_index, _) = self.filenames.insert_full(c_filename);
-                virtual_file_mapping.push(filenames_index as u32);
-            }
-            debug!("Adding counter {:?} to map for {:?}", counter, region);
+    fn into_filenames_buffer(self) -> Vec<u8> {
+        // This method takes `self` so that the caller can't accidentally
+        // modify the original file table after encoding it into a buffer.
+
+        llvm::build_byte_buffer(|buffer| {
+            coverageinfo::write_filenames_section_to_buffer(
+                self.global_file_table.iter().map(Symbol::as_str),
+                buffer,
+            );
+        })
+    }
+}
+
+/// Using the expressions and counter regions collected for a single function,
+/// generate the variable-sized payload of its corresponding `__llvm_covfun`
+/// entry. The payload is returned as a vector of bytes.
+///
+/// Newly-encountered filenames will be added to the global file table.
+fn encode_mappings_for_function(
+    global_file_table: &mut GlobalFileTable,
+    function_coverage: &FunctionCoverage<'_>,
+) -> Vec<u8> {
+    let (expressions, counter_regions) = function_coverage.get_expressions_and_counter_regions();
+
+    let mut counter_regions = counter_regions.collect::<Vec<_>>();
+    if counter_regions.is_empty() {
+        return Vec::new();
+    }
+
+    let mut virtual_file_mapping = IndexVec::<u32, u32>::new();
+    let mut mapping_regions = Vec::with_capacity(counter_regions.len());
+
+    // Sort the list of (counter, region) mapping pairs by region, so that they
+    // can be grouped by filename. Prepare file IDs for each filename, and
+    // prepare the mapping data so that we can pass it through FFI to LLVM.
+    counter_regions.sort_by_key(|(_counter, region)| *region);
+    for counter_regions_for_file in
+        counter_regions.group_by(|(_, a), (_, b)| a.file_name == b.file_name)
+    {
+        // Look up (or allocate) the global file ID for this filename.
+        let file_name = counter_regions_for_file[0].1.file_name;
+        let global_file_id = global_file_table.global_file_id_for_file_name(file_name);
+
+        // Associate that global file ID with a local file ID for this function.
+        let local_file_id: u32 = virtual_file_mapping.push(global_file_id);
+        debug!("  file id: local {local_file_id} => global {global_file_id} = '{file_name:?}'");
+
+        // For each counter/region pair in this function+file, convert it to a
+        // form suitable for FFI.
+        for &(counter, region) in counter_regions_for_file {
+            let CodeRegion { file_name: _, start_line, start_col, end_line, end_col } = *region;
+
+            debug!("Adding counter {counter:?} to map for {region:?}");
             mapping_regions.push(CounterMappingRegion::code_region(
                 counter,
-                current_file_id,
+                local_file_id,
                 start_line,
                 start_col,
                 end_line,
                 end_col,
             ));
         }
+    }
 
-        // Encode and append the current function's coverage mapping data
+    // Encode the function's coverage mappings into a buffer.
+    llvm::build_byte_buffer(|buffer| {
         coverageinfo::write_mapping_to_buffer(
-            virtual_file_mapping,
+            virtual_file_mapping.raw,
             expressions,
             mapping_regions,
-            coverage_mapping_buffer,
+            buffer,
         );
-    }
+    })
+}
 
-    /// Construct coverage map header and the array of function records, and combine them into the
-    /// coverage map. Save the coverage map data into the LLVM IR as a static global using a
-    /// specific, well-known section and name.
-    fn generate_coverage_map<'ll>(
-        self,
-        cx: &CodegenCx<'ll, '_>,
-        version: u32,
-        filenames_size: usize,
-        filenames_val: &'ll llvm::Value,
-    ) -> &'ll llvm::Value {
-        debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
-
-        // Create the coverage data header (Note, fields 0 and 2 are now always zero,
-        // as of `llvm::coverage::CovMapVersion::Version4`.)
-        let zero_was_n_records_val = cx.const_u32(0);
-        let filenames_size_val = cx.const_u32(filenames_size as u32);
-        let zero_was_coverage_size_val = cx.const_u32(0);
-        let version_val = cx.const_u32(version);
-        let cov_data_header_val = cx.const_struct(
-            &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
-            /*packed=*/ false,
-        );
+/// Construct coverage map header and the array of function records, and combine them into the
+/// coverage map. Save the coverage map data into the LLVM IR as a static global using a
+/// specific, well-known section and name.
+fn generate_coverage_map<'ll>(
+    cx: &CodegenCx<'ll, '_>,
+    version: u32,
+    filenames_size: usize,
+    filenames_val: &'ll llvm::Value,
+) -> &'ll llvm::Value {
+    debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
+
+    // Create the coverage data header (Note, fields 0 and 2 are now always zero,
+    // as of `llvm::coverage::CovMapVersion::Version4`.)
+    let zero_was_n_records_val = cx.const_u32(0);
+    let filenames_size_val = cx.const_u32(filenames_size as u32);
+    let zero_was_coverage_size_val = cx.const_u32(0);
+    let version_val = cx.const_u32(version);
+    let cov_data_header_val = cx.const_struct(
+        &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
+        /*packed=*/ false,
+    );
 
-        // Create the complete LLVM coverage data value to add to the LLVM IR
-        cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
-    }
+    // Create the complete LLVM coverage data value to add to the LLVM IR
+    cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
 }
 
 /// Construct a function record and combine it with the function's coverage mapping data.
@@ -228,7 +244,8 @@ impl CoverageMapGenerator {
 /// specific, well-known section and name.
 fn save_function_record(
     cx: &CodegenCx<'_, '_>,
-    mangled_function_name: String,
+    covfun_section_name: &str,
+    mangled_function_name: &str,
     source_hash: u64,
     filenames_ref: u64,
     coverage_mapping_buffer: Vec<u8>,
@@ -238,7 +255,7 @@ fn save_function_record(
     let coverage_mapping_size = coverage_mapping_buffer.len();
     let coverage_mapping_val = cx.const_bytes(&coverage_mapping_buffer);
 
-    let func_name_hash = coverageinfo::hash_str(&mangled_function_name);
+    let func_name_hash = coverageinfo::hash_bytes(mangled_function_name.as_bytes());
     let func_name_hash_val = cx.const_u64(func_name_hash);
     let coverage_mapping_size_val = cx.const_u32(coverage_mapping_size as u32);
     let source_hash_val = cx.const_u64(source_hash);
@@ -254,7 +271,13 @@ fn save_function_record(
         /*packed=*/ true,
     );
 
-    coverageinfo::save_func_record_to_mod(cx, func_name_hash, func_record_val, is_used);
+    coverageinfo::save_func_record_to_mod(
+        cx,
+        covfun_section_name,
+        func_name_hash,
+        func_record_val,
+        is_used,
+    );
 }
 
 /// When finalizing the coverage map, `FunctionCoverage` only has the `CodeRegion`s and counters for
@@ -310,10 +333,10 @@ fn add_unused_functions(cx: &CodegenCx<'_, '_>) {
     {
         let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
 
-        // If a function is marked `#[no_coverage]`, then skip generating a
+        // If a function is marked `#[coverage(off)]`, then skip generating a
         // dead code stub for it.
         if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
-            debug!("skipping unused fn marked #[no_coverage]: {:?}", non_codegenned_def_id);
+            debug!("skipping unused fn marked #[coverage(off)]: {:?}", non_codegenned_def_id);
             continue;
         }
 
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index 3dc0ac03312..dd2ce9b525b 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -3,33 +3,34 @@ use crate::llvm;
 use crate::abi::Abi;
 use crate::builder::Builder;
 use crate::common::CodegenCx;
+use crate::coverageinfo::ffi::{CounterExpression, CounterMappingRegion};
+use crate::coverageinfo::map_data::FunctionCoverage;
 
 use libc::c_uint;
-use llvm::coverageinfo::CounterMappingRegion;
-use rustc_codegen_ssa::coverageinfo::map::{CounterExpression, FunctionCoverage};
 use rustc_codegen_ssa::traits::{
-    BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, CoverageInfoMethods,
-    MiscMethods, StaticMethods,
+    BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, MiscMethods,
+    StaticMethods,
 };
 use rustc_data_structures::fx::FxHashMap;
 use rustc_hir as hir;
 use rustc_hir::def_id::DefId;
 use rustc_llvm::RustString;
 use rustc_middle::bug;
-use rustc_middle::mir::coverage::{
-    CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId, Op,
-};
+use rustc_middle::mir::coverage::{CounterId, CoverageKind};
+use rustc_middle::mir::Coverage;
 use rustc_middle::ty;
-use rustc_middle::ty::layout::FnAbiOf;
-use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
+use rustc_middle::ty::GenericArgs;
 use rustc_middle::ty::Instance;
+use rustc_middle::ty::Ty;
 
 use std::cell::RefCell;
-use std::ffi::CString;
 
+pub(crate) mod ffi;
+pub(crate) mod map_data;
 pub mod mapgen;
 
-const UNUSED_FUNCTION_COUNTER_ID: CounterValueReference = CounterValueReference::START;
+const UNUSED_FUNCTION_COUNTER_ID: CounterId = CounterId::START;
 
 const VAR_ALIGN_BYTES: usize = 8;
 
@@ -53,11 +54,17 @@ impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
     }
 }
 
-impl<'ll, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
-    fn coverageinfo_finalize(&self) {
+// These methods used to be part of trait `CoverageInfoMethods`, which no longer
+// exists after most coverage code was moved out of SSA.
+impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
+    pub(crate) fn coverageinfo_finalize(&self) {
         mapgen::finalize(self)
     }
 
+    /// For LLVM codegen, returns a function-specific `Value` for a global
+    /// string, to hold the function name passed to LLVM intrinsic
+    /// `instrprof.increment()`. The `Value` is only created once per instance.
+    /// Multiple invocations with the same instance return the same `Value`.
     fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> &'ll llvm::Value {
         if let Some(coverage_context) = self.coverage_context() {
             debug!("getting pgo_func_name_var for instance={:?}", instance);
@@ -82,9 +89,7 @@ impl<'ll, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     /// `function_coverage_map` (keyed by function `Instance`) during codegen.
     /// But in this case, since the unused function was _not_ previously
     /// codegenned, collect the coverage `CodeRegion`s from the MIR and add
-    /// them. The first `CodeRegion` is used to add a single counter, with the
-    /// same counter ID used in the injected `instrprof.increment` intrinsic
-    /// call. Since the function is never called, all other `CodeRegion`s can be
+    /// them. Since the function is never called, all of its `CodeRegion`s can be
     /// added as `unreachable_region`s.
     fn define_unused_fn(&self, def_id: DefId) {
         let instance = declare_unused_fn(self, def_id);
@@ -94,89 +99,46 @@ impl<'ll, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
 }
 
 impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
-    fn set_function_source_hash(
-        &mut self,
-        instance: Instance<'tcx>,
-        function_source_hash: u64,
-    ) -> bool {
-        if let Some(coverage_context) = self.coverage_context() {
-            debug!(
-                "ensuring function source hash is set for instance={:?}; function_source_hash={}",
-                instance, function_source_hash,
-            );
-            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
-            coverage_map
-                .entry(instance)
-                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-                .set_function_source_hash(function_source_hash);
-            true
-        } else {
-            false
-        }
-    }
-
-    fn add_coverage_counter(
-        &mut self,
-        instance: Instance<'tcx>,
-        id: CounterValueReference,
-        region: CodeRegion,
-    ) -> bool {
-        if let Some(coverage_context) = self.coverage_context() {
-            debug!(
-                "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
-                instance, id, region,
-            );
-            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
-            coverage_map
-                .entry(instance)
-                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-                .add_counter(id, region);
-            true
-        } else {
-            false
-        }
-    }
-
-    fn add_coverage_counter_expression(
-        &mut self,
-        instance: Instance<'tcx>,
-        id: InjectedExpressionId,
-        lhs: ExpressionOperandId,
-        op: Op,
-        rhs: ExpressionOperandId,
-        region: Option<CodeRegion>,
-    ) -> bool {
-        if let Some(coverage_context) = self.coverage_context() {
-            debug!(
-                "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; \
-                region: {:?}",
-                instance, id, lhs, op, rhs, region,
-            );
-            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
-            coverage_map
-                .entry(instance)
-                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-                .add_counter_expression(id, lhs, op, rhs, region);
-            true
-        } else {
-            false
-        }
-    }
-
-    fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool {
-        if let Some(coverage_context) = self.coverage_context() {
-            debug!(
-                "adding unreachable code to coverage_map: instance={:?}, at {:?}",
-                instance, region,
-            );
-            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
-            coverage_map
-                .entry(instance)
-                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-                .add_unreachable_region(region);
-            true
-        } else {
-            false
+    fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) {
+        let bx = self;
+
+        let Some(coverage_context) = bx.coverage_context() else { return };
+        let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+        let func_coverage = coverage_map
+            .entry(instance)
+            .or_insert_with(|| FunctionCoverage::new(bx.tcx(), instance));
+
+        let Coverage { kind, code_regions } = coverage;
+        match *kind {
+            CoverageKind::Counter { function_source_hash, id } => {
+                debug!(
+                    "ensuring function source hash is set for instance={:?}; function_source_hash={}",
+                    instance, function_source_hash,
+                );
+                func_coverage.set_function_source_hash(function_source_hash);
+                func_coverage.add_counter(id, code_regions);
+                // We need to explicitly drop the `RefMut` before calling into `instrprof_increment`,
+                // as that needs an exclusive borrow.
+                drop(coverage_map);
+
+                let coverageinfo = bx.tcx().coverageinfo(instance.def);
+
+                let fn_name = bx.get_pgo_func_name_var(instance);
+                let hash = bx.const_u64(function_source_hash);
+                let num_counters = bx.const_u32(coverageinfo.num_counters);
+                let index = bx.const_u32(id.as_u32());
+                debug!(
+                    "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+                    fn_name, hash, num_counters, index,
+                );
+                bx.instrprof_increment(fn_name, hash, num_counters, index);
+            }
+            CoverageKind::Expression { id, lhs, op, rhs } => {
+                func_coverage.add_counter_expression(id, lhs, op, rhs, code_regions);
+            }
+            CoverageKind::Unreachable => {
+                func_coverage.add_unreachable_regions(code_regions);
+            }
         }
     }
 }
@@ -186,7 +148,7 @@ fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<
 
     let instance = Instance::new(
         def_id,
-        InternalSubsts::for_item(tcx, def_id, |param, _| {
+        GenericArgs::for_item(tcx, def_id, |param, _| {
             if let ty::GenericParamDefKind::Lifetime = param.kind {
                 tcx.lifetimes.re_erased.into()
             } else {
@@ -199,14 +161,15 @@ fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<
         tcx.symbol_name(instance).name,
         cx.fn_abi_of_fn_ptr(
             ty::Binder::dummy(tcx.mk_fn_sig(
-                [tcx.mk_unit()],
-                tcx.mk_unit(),
+                [Ty::new_unit(tcx)],
+                Ty::new_unit(tcx),
                 false,
                 hir::Unsafety::Unsafe,
                 Abi::Rust,
             )),
             ty::List::empty(),
         ),
+        None,
     );
 
     llvm::set_linkage(llfn, llvm::Linkage::PrivateLinkage);
@@ -242,14 +205,9 @@ fn add_unused_function_coverage<'tcx>(
     let tcx = cx.tcx;
 
     let mut function_coverage = FunctionCoverage::unused(tcx, instance);
-    for (index, &code_region) in tcx.covered_code_regions(def_id).iter().enumerate() {
-        if index == 0 {
-            // Insert at least one real counter so the LLVM CoverageMappingReader will find expected
-            // definitions.
-            function_coverage.add_counter(UNUSED_FUNCTION_COUNTER_ID, code_region.clone());
-        } else {
-            function_coverage.add_unreachable_region(code_region.clone());
-        }
+    for &code_region in tcx.covered_code_regions(def_id) {
+        let code_region = std::slice::from_ref(code_region);
+        function_coverage.add_unreachable_regions(code_region);
     }
 
     if let Some(coverage_context) = cx.coverage_context() {
@@ -269,21 +227,32 @@ fn create_pgo_func_name_var<'ll, 'tcx>(
     cx: &CodegenCx<'ll, 'tcx>,
     instance: Instance<'tcx>,
 ) -> &'ll llvm::Value {
-    let mangled_fn_name = CString::new(cx.tcx.symbol_name(instance).name)
-        .expect("error converting function name to C string");
+    let mangled_fn_name: &str = cx.tcx.symbol_name(instance).name;
     let llfn = cx.get_fn(instance);
-    unsafe { llvm::LLVMRustCoverageCreatePGOFuncNameVar(llfn, mangled_fn_name.as_ptr()) }
+    unsafe {
+        llvm::LLVMRustCoverageCreatePGOFuncNameVar(
+            llfn,
+            mangled_fn_name.as_ptr().cast(),
+            mangled_fn_name.len(),
+        )
+    }
 }
 
 pub(crate) fn write_filenames_section_to_buffer<'a>(
-    filenames: impl IntoIterator<Item = &'a CString>,
+    filenames: impl IntoIterator<Item = &'a str>,
     buffer: &RustString,
 ) {
-    let c_str_vec = filenames.into_iter().map(|cstring| cstring.as_ptr()).collect::<Vec<_>>();
+    let (pointers, lengths) = filenames
+        .into_iter()
+        .map(|s: &str| (s.as_ptr().cast(), s.len()))
+        .unzip::<_, _, Vec<_>, Vec<_>>();
+
     unsafe {
         llvm::LLVMRustCoverageWriteFilenamesSectionToBuffer(
-            c_str_vec.as_ptr(),
-            c_str_vec.len(),
+            pointers.as_ptr(),
+            pointers.len(),
+            lengths.as_ptr(),
+            lengths.len(),
             buffer,
         );
     }
@@ -308,12 +277,7 @@ pub(crate) fn write_mapping_to_buffer(
     }
 }
 
-pub(crate) fn hash_str(strval: &str) -> u64 {
-    let strval = CString::new(strval).expect("null error converting hashable str to C string");
-    unsafe { llvm::LLVMRustCoverageHashCString(strval.as_ptr()) }
-}
-
-pub(crate) fn hash_bytes(bytes: Vec<u8>) -> u64 {
+pub(crate) fn hash_bytes(bytes: &[u8]) -> u64 {
     unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_ptr().cast(), bytes.len()) }
 }
 
@@ -348,6 +312,7 @@ pub(crate) fn save_cov_data_to_mod<'ll, 'tcx>(
 
 pub(crate) fn save_func_record_to_mod<'ll, 'tcx>(
     cx: &CodegenCx<'ll, 'tcx>,
+    covfun_section_name: &str,
     func_name_hash: u64,
     func_record_val: &'ll llvm::Value,
     is_used: bool,
@@ -363,20 +328,33 @@ pub(crate) fn save_func_record_to_mod<'ll, 'tcx>(
     let func_record_var_name =
         format!("__covrec_{:X}{}", func_name_hash, if is_used { "u" } else { "" });
     debug!("function record var name: {:?}", func_record_var_name);
-
-    let func_record_section_name = llvm::build_string(|s| unsafe {
-        llvm::LLVMRustCoverageWriteFuncSectionNameToString(cx.llmod, s);
-    })
-    .expect("Rust Coverage function record section name failed UTF-8 conversion");
-    debug!("function record section name: {:?}", func_record_section_name);
+    debug!("function record section name: {:?}", covfun_section_name);
 
     let llglobal = llvm::add_global(cx.llmod, cx.val_ty(func_record_val), &func_record_var_name);
     llvm::set_initializer(llglobal, func_record_val);
     llvm::set_global_constant(llglobal, true);
     llvm::set_linkage(llglobal, llvm::Linkage::LinkOnceODRLinkage);
     llvm::set_visibility(llglobal, llvm::Visibility::Hidden);
-    llvm::set_section(llglobal, &func_record_section_name);
+    llvm::set_section(llglobal, covfun_section_name);
     llvm::set_alignment(llglobal, VAR_ALIGN_BYTES);
     llvm::set_comdat(cx.llmod, llglobal, &func_record_var_name);
     cx.add_used_global(llglobal);
 }
+
+/// Returns the section name string to pass through to the linker when embedding
+/// per-function coverage information in the object file, according to the target
+/// platform's object file format.
+///
+/// LLVM's coverage tools read coverage mapping details from this section when
+/// producing coverage reports.
+///
+/// Typical values are:
+/// - `__llvm_covfun` on Linux
+/// - `__LLVM_COV,__llvm_covfun` on macOS (includes `__LLVM_COV,` segment prefix)
+/// - `.lcovfun$M` on Windows (includes `$M` sorting suffix)
+pub(crate) fn covfun_section_name(cx: &CodegenCx<'_, '_>) -> String {
+    llvm::build_string(|s| unsafe {
+        llvm::LLVMRustCoverageWriteFuncSectionNameToString(cx.llmod, s);
+    })
+    .expect("Rust Coverage function record section name failed UTF-8 conversion")
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index 99e4ded62f1..6a63eda4b99 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -12,7 +12,7 @@ use rustc_middle::ty::{self, Instance};
 use rustc_session::config::DebugInfo;
 
 use rustc_index::bit_set::BitSet;
-use rustc_index::vec::Idx;
+use rustc_index::Idx;
 
 /// Produces DIScope DIEs for each MIR Scope which has variables defined in it.
 // FIXME(eddyb) almost all of this should be in `rustc_codegen_ssa::mir::debuginfo`.
@@ -20,7 +20,7 @@ pub fn compute_mir_scopes<'ll, 'tcx>(
     cx: &CodegenCx<'ll, 'tcx>,
     instance: Instance<'tcx>,
     mir: &Body<'tcx>,
-    debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+    debug_context: &mut FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>,
 ) {
     // Find all scopes with variables defined in them.
     let variables = if cx.sess().opts.debuginfo == DebugInfo::Full {
@@ -51,7 +51,7 @@ fn make_mir_scope<'ll, 'tcx>(
     instance: Instance<'tcx>,
     mir: &Body<'tcx>,
     variables: &Option<BitSet<SourceScope>>,
-    debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+    debug_context: &mut FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>,
     instantiated: &mut BitSet<SourceScope>,
     scope: SourceScope,
 ) {
@@ -65,17 +65,20 @@ fn make_mir_scope<'ll, 'tcx>(
         debug_context.scopes[parent]
     } else {
         // The root is the function itself.
-        let loc = cx.lookup_debug_loc(mir.span.lo());
+        let file = cx.sess().source_map().lookup_source_file(mir.span.lo());
         debug_context.scopes[scope] = DebugScope {
-            file_start_pos: loc.file.start_pos,
-            file_end_pos: loc.file.end_pos,
+            file_start_pos: file.start_pos,
+            file_end_pos: file.end_position(),
             ..debug_context.scopes[scope]
         };
         instantiated.insert(scope);
         return;
     };
 
-    if let Some(vars) = variables && !vars.contains(scope) && scope_data.inlined.is_none() {
+    if let Some(vars) = variables
+        && !vars.contains(scope)
+        && scope_data.inlined.is_none()
+    {
         // Do not create a DIScope if there are no variables defined in this
         // MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
         debug_context.scopes[scope] = parent_scope;
@@ -86,27 +89,31 @@ fn make_mir_scope<'ll, 'tcx>(
     let loc = cx.lookup_debug_loc(scope_data.span.lo());
     let file_metadata = file_metadata(cx, &loc.file);
 
-    let dbg_scope = match scope_data.inlined {
+    let parent_dbg_scope = match scope_data.inlined {
         Some((callee, _)) => {
             // FIXME(eddyb) this would be `self.monomorphize(&callee)`
             // if this is moved to `rustc_codegen_ssa::mir::debuginfo`.
-            let callee = cx.tcx.subst_and_normalize_erasing_regions(
-                instance.substs,
+            let callee = cx.tcx.instantiate_and_normalize_erasing_regions(
+                instance.args,
                 ty::ParamEnv::reveal_all(),
-                callee,
+                ty::EarlyBinder::bind(callee),
             );
-            let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
-            cx.dbg_scope_fn(callee, callee_fn_abi, None)
+            debug_context.inlined_function_scopes.entry(callee).or_insert_with(|| {
+                let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
+                cx.dbg_scope_fn(callee, callee_fn_abi, None)
+            })
         }
-        None => unsafe {
-            llvm::LLVMRustDIBuilderCreateLexicalBlock(
-                DIB(cx),
-                parent_scope.dbg_scope,
-                file_metadata,
-                loc.line,
-                loc.col,
-            )
-        },
+        None => parent_scope.dbg_scope,
+    };
+
+    let dbg_scope = unsafe {
+        llvm::LLVMRustDIBuilderCreateLexicalBlock(
+            DIB(cx),
+            parent_dbg_scope,
+            file_metadata,
+            loc.line,
+            loc.col,
+        )
     };
 
     let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
@@ -120,7 +127,7 @@ fn make_mir_scope<'ll, 'tcx>(
         dbg_scope,
         inlined_at: inlined_at.or(parent_scope.inlined_at),
         file_start_pos: loc.file.start_pos,
-        file_end_pos: loc.file.end_pos,
+        file_end_pos: loc.file.end_position(),
     };
     instantiated.insert(scope);
 }
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
index aaf5dbd9930..425e935bc9f 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -9,17 +9,15 @@ use rustc_ast::attr;
 use rustc_codegen_ssa::base::collect_debugger_visualizers_transitive;
 use rustc_codegen_ssa::traits::*;
 use rustc_hir::def_id::LOCAL_CRATE;
-use rustc_middle::bug;
+use rustc_middle::{bug, middle::debugger_visualizer::DebuggerVisualizerType};
 use rustc_session::config::{CrateType, DebugInfo};
 use rustc_span::symbol::sym;
-use rustc_span::DebuggerVisualizerType;
 
 /// Inserts a side-effect free instruction sequence that makes sure that the
 /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
 pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) {
     if needs_gdb_debug_scripts_section(bx) {
-        let gdb_debug_scripts_section =
-            bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_i8p());
+        let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx);
         // Load just the first byte as that's all that's necessary to force
         // LLVM to keep around the reference to the global.
         let volatile_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section);
@@ -55,7 +53,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, '
             // The initial byte `4` instructs GDB that the following pretty printer
             // is defined inline as opposed to in a standalone file.
             section_contents.extend_from_slice(b"\x04");
-            let vis_name = format!("pretty-printer-{}-{}\n", crate_name, index);
+            let vis_name = format!("pretty-printer-{crate_name}-{index}\n");
             section_contents.extend_from_slice(vis_name.as_bytes());
             section_contents.extend_from_slice(&visualizer.src);
 
@@ -94,7 +92,7 @@ pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool {
     // each rlib could produce a different set of visualizers that would be embedded
     // in the `.debug_gdb_scripts` section. For that reason, we make sure that the
     // section is only emitted for leaf crates.
-    let embed_visualizers = cx.sess().crate_types().iter().any(|&crate_type| match crate_type {
+    let embed_visualizers = cx.tcx.crate_types().iter().any(|&crate_type| match crate_type {
         CrateType::Executable | CrateType::Dylib | CrateType::Cdylib | CrateType::Staticlib => {
             // These are crate types for which we will embed pretty printers since they
             // are treated as leaf crates.
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 21a0a60b012..11874898a5a 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -29,7 +29,6 @@ use rustc_hir::def::CtorKind;
 use rustc_hir::def_id::{DefId, LOCAL_CRATE};
 use rustc_middle::bug;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
-use rustc_middle::ty::subst::GenericArgKind;
 use rustc_middle::ty::{
     self, AdtKind, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt, Visibility,
 };
@@ -169,7 +168,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
     // a (fat) pointer. Make sure it is not called for e.g. `Box<T, NonZSTAllocator>`.
     debug_assert_eq!(
         cx.size_and_align_of(ptr_type),
-        cx.size_and_align_of(cx.tcx.mk_mut_ptr(pointee_type))
+        cx.size_and_align_of(Ty::new_mut_ptr(cx.tcx, pointee_type))
     );
 
     let pointee_type_di_node = type_di_node(cx, pointee_type);
@@ -185,9 +184,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
             debug_assert_eq!(
                 (data_layout.pointer_size, data_layout.pointer_align.abi),
                 cx.size_and_align_of(ptr_type),
-                "ptr_type={}, pointee_type={}",
-                ptr_type,
-                pointee_type,
+                "ptr_type={ptr_type}, pointee_type={pointee_type}",
             );
 
             let di_node = unsafe {
@@ -224,8 +221,11 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
                     //        at all and instead emit regular struct debuginfo for it. We just
                     //        need to make sure that we don't break existing debuginfo consumers
                     //        by doing that (at least not without a warning period).
-                    let layout_type =
-                        if ptr_type.is_box() { cx.tcx.mk_mut_ptr(pointee_type) } else { ptr_type };
+                    let layout_type = if ptr_type.is_box() {
+                        Ty::new_mut_ptr(cx.tcx, pointee_type)
+                    } else {
+                        ptr_type
+                    };
 
                     let layout = cx.layout_of(layout_type);
                     let addr_field = layout.field(cx, abi::FAT_PTR_ADDR);
@@ -335,12 +335,20 @@ fn build_subroutine_type_di_node<'ll, 'tcx>(
 
     // This is actually a function pointer, so wrap it in pointer DI.
     let name = compute_debuginfo_type_name(cx.tcx, fn_ty, false);
+    let (size, align) = match fn_ty.kind() {
+        ty::FnDef(..) => (0, 1),
+        ty::FnPtr(..) => (
+            cx.tcx.data_layout.pointer_size.bits(),
+            cx.tcx.data_layout.pointer_align.abi.bits() as u32,
+        ),
+        _ => unreachable!(),
+    };
     let di_node = unsafe {
         llvm::LLVMRustDIBuilderCreatePointerType(
             DIB(cx),
             fn_di_node,
-            cx.tcx.data_layout.pointer_size.bits(),
-            cx.tcx.data_layout.pointer_align.abi.bits() as u32,
+            size,
+            align,
             0, // Ignore DWARF address space.
             name.as_ptr().cast(),
             name.len(),
@@ -431,7 +439,7 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D
         return existing_di_node;
     }
 
-    debug!("type_di_node: {:?}", t);
+    debug!("type_di_node: {:?} kind: {:?}", t, t.kind());
 
     let DINodeCreationResult { di_node, already_stored_in_typemap } = match *t.kind() {
         ty::Never | ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) => {
@@ -445,9 +453,9 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D
         ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => {
             build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id)
         }
-        // Box<T, A> may have a non-ZST allocator A. In that case, we
+        // Box<T, A> may have a non-1-ZST allocator A. In that case, we
         // cannot treat Box<T, A> as just an owned alias of `*mut T`.
-        ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
+        ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_1zst() => {
             build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id)
         }
         ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id),
@@ -519,7 +527,7 @@ fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll D
 fn hex_encode(data: &[u8]) -> String {
     let mut hex_string = String::with_capacity(data.len() * 2);
     for byte in data.iter() {
-        write!(&mut hex_string, "{:02x}", byte).unwrap();
+        write!(&mut hex_string, "{byte:02x}").unwrap();
     }
     hex_string
 }
@@ -737,7 +745,10 @@ fn build_foreign_type_di_node<'ll, 'tcx>(
     debug!("build_foreign_type_di_node: {:?}", t);
 
     let &ty::Foreign(def_id) = unique_type_id.expect_ty().kind() else {
-        bug!("build_foreign_type_di_node() called with unexpected type: {:?}", unique_type_id.expect_ty());
+        bug!(
+            "build_foreign_type_di_node() called with unexpected type: {:?}",
+            unique_type_id.expect_ty()
+        );
     };
 
     build_type_with_children(
@@ -761,7 +772,7 @@ fn build_param_type_di_node<'ll, 'tcx>(
     t: Ty<'tcx>,
 ) -> DINodeCreationResult<'ll> {
     debug!("build_param_type_di_node: {:?}", t);
-    let name = format!("{:?}", t);
+    let name = format!("{t:?}");
     DINodeCreationResult {
         di_node: unsafe {
             llvm::LLVMRustDIBuilderCreateBasicType(
@@ -807,10 +818,9 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
     name_in_debuginfo.push(codegen_unit_name);
 
     debug!("build_compile_unit_di_node: {:?}", name_in_debuginfo);
-    let rustc_producer =
-        format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION"),);
+    let rustc_producer = format!("rustc version {}", tcx.sess.cfg_version);
     // FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice.
-    let producer = format!("clang LLVM ({})", rustc_producer);
+    let producer = format!("clang LLVM ({rustc_producer})");
 
     let name_in_debuginfo = name_in_debuginfo.to_string_lossy();
     let work_dir = tcx.sess.opts.working_dir.to_string_lossy(FileNameDisplayPreference::Remapped);
@@ -884,21 +894,6 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
             llvm::LLVMAddNamedMetadataOperand(debug_context.llmod, llvm_gcov_ident.as_ptr(), val);
         }
 
-        // Insert `llvm.ident` metadata on the wasm targets since that will
-        // get hooked up to the "producer" sections `processed-by` information.
-        if tcx.sess.target.is_like_wasm {
-            let name_metadata = llvm::LLVMMDStringInContext(
-                debug_context.llcontext,
-                rustc_producer.as_ptr().cast(),
-                rustc_producer.as_bytes().len() as c_uint,
-            );
-            llvm::LLVMAddNamedMetadataOperand(
-                debug_context.llmod,
-                cstr!("llvm.ident").as_ptr(),
-                llvm::LLVMMDNodeInContext(debug_context.llcontext, &name_metadata, 1),
-            );
-        }
-
         return unit_metadata;
     };
 
@@ -1003,14 +998,8 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
     closure_or_generator_di_node: &'ll DIType,
 ) -> SmallVec<&'ll DIType> {
     let (&def_id, up_var_tys) = match closure_or_generator_ty.kind() {
-        ty::Generator(def_id, substs, _) => {
-            let upvar_tys: SmallVec<_> = substs.as_generator().prefix_tys().collect();
-            (def_id, upvar_tys)
-        }
-        ty::Closure(def_id, substs) => {
-            let upvar_tys: SmallVec<_> = substs.as_closure().upvar_tys().collect();
-            (def_id, upvar_tys)
-        }
+        ty::Generator(def_id, args, _) => (def_id, args.as_generator().prefix_tys()),
+        ty::Closure(def_id, args) => (def_id, args.as_closure().upvar_tys()),
         _ => {
             bug!(
                 "build_upvar_field_di_nodes() called with non-closure-or-generator-type: {:?}",
@@ -1020,9 +1009,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
     };
 
     debug_assert!(
-        up_var_tys
-            .iter()
-            .all(|&t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
+        up_var_tys.iter().all(|t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
     );
 
     let capture_names = cx.tcx.closure_saved_names_of_captured_variables(def_id);
@@ -1036,7 +1023,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
             build_field_di_node(
                 cx,
                 closure_or_generator_di_node,
-                capture_name,
+                capture_name.as_str(),
                 cx.size_and_align_of(up_var_ty),
                 layout.fields.offset(index),
                 DIFlags::FlagZero,
@@ -1098,7 +1085,7 @@ fn build_closure_env_di_node<'ll, 'tcx>(
     unique_type_id: UniqueTypeId<'tcx>,
 ) -> DINodeCreationResult<'ll> {
     let closure_env_type = unique_type_id.expect_ty();
-    let &ty::Closure(def_id, _substs) = closure_env_type.kind() else {
+    let &ty::Closure(def_id, _args) = closure_env_type.kind() else {
         bug!("build_closure_env_di_node() called with non-closure-type: {:?}", closure_env_type)
     };
     let containing_scope = get_namespace_for_item(cx, def_id);
@@ -1176,18 +1163,18 @@ fn build_generic_type_param_di_nodes<'ll, 'tcx>(
     cx: &CodegenCx<'ll, 'tcx>,
     ty: Ty<'tcx>,
 ) -> SmallVec<&'ll DIType> {
-    if let ty::Adt(def, substs) = *ty.kind() {
-        if substs.types().next().is_some() {
+    if let ty::Adt(def, args) = *ty.kind() {
+        if args.types().next().is_some() {
             let generics = cx.tcx.generics_of(def.did());
             let names = get_parameter_names(cx, generics);
-            let template_params: SmallVec<_> = iter::zip(substs, names)
+            let template_params: SmallVec<_> = iter::zip(args, names)
                 .filter_map(|(kind, name)| {
-                    if let GenericArgKind::Type(ty) = kind.unpack() {
+                    kind.as_type().map(|ty| {
                         let actual_type =
                             cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
                         let actual_type_di_node = type_di_node(cx, actual_type);
                         let name = name.as_str();
-                        Some(unsafe {
+                        unsafe {
                             llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
                                 DIB(cx),
                                 None,
@@ -1195,10 +1182,8 @@ fn build_generic_type_param_di_nodes<'ll, 'tcx>(
                                 name.len(),
                                 actual_type_di_node,
                             )
-                        })
-                    } else {
-                        None
-                    }
+                        }
+                    })
                 })
                 .collect();
 
@@ -1302,7 +1287,7 @@ fn build_vtable_type_di_node<'ll, 'tcx>(
 
     // All function pointers are described as opaque pointers. This could be improved in the future
     // by describing them as actual function pointers.
-    let void_pointer_ty = tcx.mk_imm_ptr(tcx.types.unit);
+    let void_pointer_ty = Ty::new_imm_ptr(tcx, tcx.types.unit);
     let void_pointer_type_di_node = type_di_node(cx, void_pointer_ty);
     let usize_di_node = type_di_node(cx, tcx.types.usize);
     let (pointer_size, pointer_align) = cx.size_and_align_of(void_pointer_ty);
@@ -1344,10 +1329,10 @@ fn build_vtable_type_di_node<'ll, 'tcx>(
                             // Note: This code does not try to give a proper name to each method
                             //       because their might be multiple methods with the same name
                             //       (coming from different traits).
-                            (format!("__method{}", index), void_pointer_type_di_node)
+                            (format!("__method{index}"), void_pointer_type_di_node)
                         }
                         ty::VtblEntry::TraitVPtr(_) => {
-                            (format!("__super_trait_ptr{}", index), void_pointer_type_di_node)
+                            (format!("__super_trait_ptr{index}"), void_pointer_type_di_node)
                         }
                         ty::VtblEntry::MetadataAlign => ("align".to_string(), usize_di_node),
                         ty::VtblEntry::MetadataSize => ("size".to_string(), usize_di_node),
@@ -1392,7 +1377,7 @@ fn vcall_visibility_metadata<'ll, 'tcx>(
     let trait_def_id = trait_ref_self.def_id();
     let trait_vis = cx.tcx.visibility(trait_def_id);
 
-    let cgus = cx.sess().codegen_units();
+    let cgus = cx.sess().codegen_units().as_usize();
     let single_cgu = cgus == 1;
 
     let lto = cx.sess().lto();
@@ -1517,5 +1502,5 @@ pub fn tuple_field_name(field_index: usize) -> Cow<'static, str> {
     TUPLE_FIELD_NAMES
         .get(field_index)
         .map(|s| Cow::from(*s))
-        .unwrap_or_else(|| Cow::from(format!("__{}", field_index)))
+        .unwrap_or_else(|| Cow::from(format!("__{field_index}")))
 }
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
index 38ad42370d3..88040557a9b 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
@@ -6,13 +6,13 @@ use rustc_codegen_ssa::{
     traits::ConstMethods,
 };
 
-use rustc_index::vec::IndexVec;
+use rustc_index::IndexVec;
 use rustc_middle::{
     bug,
     ty::{
         self,
         layout::{LayoutOf, TyAndLayout},
-        AdtDef, GeneratorSubsts, Ty,
+        AdtDef, GeneratorArgs, Ty,
     },
 };
 use rustc_target::abi::{Align, Endian, Size, TagEncoding, VariantIdx, Variants};
@@ -199,7 +199,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
     let enum_type = unique_type_id.expect_ty();
     let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
         bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
-        };
+    };
 
     let enum_type_and_layout = cx.layout_of(enum_type);
     let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false);
@@ -667,20 +667,21 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
     generator_type_and_layout: TyAndLayout<'tcx>,
     generator_type_di_node: &'ll DIType,
 ) -> SmallVec<&'ll DIType> {
-    let Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } = generator_type_and_layout.variants else {
+    let Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } =
+        generator_type_and_layout.variants
+    else {
         bug!("This function only supports layouts with directly encoded tags.")
     };
 
-    let (generator_def_id, generator_substs) = match generator_type_and_layout.ty.kind() {
-        &ty::Generator(def_id, substs, _) => (def_id, substs.as_generator()),
+    let (generator_def_id, generator_args) = match generator_type_and_layout.ty.kind() {
+        &ty::Generator(def_id, args, _) => (def_id, args.as_generator()),
         _ => unreachable!(),
     };
 
-    let (generator_layout, state_specific_upvar_names) =
-        cx.tcx.generator_layout_and_saved_local_names(generator_def_id);
+    let generator_layout = cx.tcx.optimized_mir(generator_def_id).generator_layout().unwrap();
 
     let common_upvar_names = cx.tcx.closure_saved_names_of_captured_variables(generator_def_id);
-    let variant_range = generator_substs.variant_range(generator_def_id, cx.tcx);
+    let variant_range = generator_args.variant_range(generator_def_id, cx.tcx);
     let variant_count = (variant_range.start.as_u32()..variant_range.end.as_u32()).len();
 
     let tag_base_type = tag_base_type(cx, generator_type_and_layout);
@@ -690,11 +691,11 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
         generator_type_di_node,
         variant_range
             .clone()
-            .map(|variant_index| (variant_index, GeneratorSubsts::variant_name(variant_index))),
+            .map(|variant_index| (variant_index, GeneratorArgs::variant_name(variant_index))),
     );
 
     let discriminants: IndexVec<VariantIdx, DiscrResult> = {
-        let discriminants_iter = generator_substs.discriminants(generator_def_id, cx.tcx);
+        let discriminants_iter = generator_args.discriminants(generator_def_id, cx.tcx);
         let mut discriminants: IndexVec<VariantIdx, DiscrResult> =
             IndexVec::with_capacity(variant_count);
         for (variant_index, discr) in discriminants_iter {
@@ -714,7 +715,6 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
                 generator_type_and_layout,
                 generator_type_di_node,
                 generator_layout,
-                &state_specific_upvar_names,
                 &common_upvar_names,
             );
 
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
index 55a217f59f9..d3239d5c358 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
@@ -3,14 +3,14 @@ use rustc_codegen_ssa::debuginfo::{
     wants_c_like_enum_debuginfo,
 };
 use rustc_hir::def::CtorKind;
-use rustc_index::vec::IndexSlice;
+use rustc_index::IndexSlice;
 use rustc_middle::{
     bug,
-    mir::{GeneratorLayout, GeneratorSavedLocal},
+    mir::GeneratorLayout,
     ty::{
         self,
         layout::{IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout},
-        AdtDef, GeneratorSubsts, Ty, VariantDef,
+        AdtDef, GeneratorArgs, Ty, VariantDef,
     },
 };
 use rustc_span::Symbol;
@@ -51,7 +51,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
     let enum_type = unique_type_id.expect_ty();
     let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
         bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
-        };
+    };
 
     let enum_type_and_layout = cx.layout_of(enum_type);
 
@@ -323,10 +323,9 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
     generator_type_and_layout: TyAndLayout<'tcx>,
     generator_type_di_node: &'ll DIType,
     generator_layout: &GeneratorLayout<'tcx>,
-    state_specific_upvar_names: &IndexSlice<GeneratorSavedLocal, Option<Symbol>>,
-    common_upvar_names: &[String],
+    common_upvar_names: &IndexSlice<FieldIdx, Symbol>,
 ) -> &'ll DIType {
-    let variant_name = GeneratorSubsts::variant_name(variant_index);
+    let variant_name = GeneratorArgs::variant_name(variant_index);
     let unique_type_id = UniqueTypeId::for_enum_variant_struct_type(
         cx.tcx,
         generator_type_and_layout.ty,
@@ -335,8 +334,8 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
 
     let variant_layout = generator_type_and_layout.for_variant(cx, variant_index);
 
-    let generator_substs = match generator_type_and_layout.ty.kind() {
-        ty::Generator(_, substs, _) => substs.as_generator(),
+    let generator_args = match generator_type_and_layout.ty.kind() {
+        ty::Generator(_, args, _) => args.as_generator(),
         _ => unreachable!(),
     };
 
@@ -357,7 +356,7 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
                 .map(|field_index| {
                     let generator_saved_local = generator_layout.variant_fields[variant_index]
                         [FieldIdx::from_usize(field_index)];
-                    let field_name_maybe = state_specific_upvar_names[generator_saved_local];
+                    let field_name_maybe = generator_layout.field_names[generator_saved_local];
                     let field_name = field_name_maybe
                         .as_ref()
                         .map(|s| Cow::from(s.as_str()))
@@ -378,14 +377,16 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
                 .collect();
 
             // Fields that are common to all states
-            let common_fields: SmallVec<_> = generator_substs
+            let common_fields: SmallVec<_> = generator_args
                 .prefix_tys()
+                .iter()
+                .zip(common_upvar_names)
                 .enumerate()
-                .map(|(index, upvar_ty)| {
+                .map(|(index, (upvar_ty, upvar_name))| {
                     build_field_di_node(
                         cx,
                         variant_struct_type_di_node,
-                        &common_upvar_names[index],
+                        upvar_name.as_str(),
                         cx.size_and_align_of(upvar_ty),
                         generator_type_and_layout.fields.offset(index),
                         DIFlags::FlagZero,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
index 978141917c6..feac40d8c30 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -57,7 +57,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
     let enum_type = unique_type_id.expect_ty();
     let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
         bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
-        };
+    };
 
     let containing_scope = get_namespace_for_item(cx, enum_adt_def.did());
     let enum_type_and_layout = cx.layout_of(enum_type);
@@ -132,9 +132,9 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
     unique_type_id: UniqueTypeId<'tcx>,
 ) -> DINodeCreationResult<'ll> {
     let generator_type = unique_type_id.expect_ty();
-    let &ty::Generator(generator_def_id, _, _ ) = generator_type.kind() else {
+    let &ty::Generator(generator_def_id, _, _) = generator_type.kind() else {
         bug!("build_generator_di_node() called with non-generator type: `{:?}`", generator_type)
-        };
+    };
 
     let containing_scope = get_namespace_for_item(cx, generator_def_id);
     let generator_type_and_layout = cx.layout_of(generator_type);
@@ -155,10 +155,12 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
             DIFlags::FlagZero,
         ),
         |cx, generator_type_di_node| {
-            let (generator_layout, state_specific_upvar_names) =
-                cx.tcx.generator_layout_and_saved_local_names(generator_def_id);
+            let generator_layout =
+                cx.tcx.optimized_mir(generator_def_id).generator_layout().unwrap();
 
-            let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } = generator_type_and_layout.variants else {
+            let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } =
+                generator_type_and_layout.variants
+            else {
                 bug!(
                     "Encountered generator with non-direct-tag layout: {:?}",
                     generator_type_and_layout
@@ -173,7 +175,7 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
                 .indices()
                 .map(|variant_index| {
                     // FIXME: This is problematic because just a number is not a valid identifier.
-                    //        GeneratorSubsts::variant_name(variant_index), would be consistent
+                    //        GeneratorArgs::variant_name(variant_index), would be consistent
                     //        with enums?
                     let variant_name = format!("{}", variant_index.as_usize()).into();
 
@@ -195,7 +197,6 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
                                 generator_type_and_layout,
                                 generator_type_di_node,
                                 generator_layout,
-                                &state_specific_upvar_names,
                                 &common_upvar_names,
                             ),
                         source_info,
@@ -412,13 +413,7 @@ fn build_enum_variant_member_di_node<'ll, 'tcx>(
             enum_type_and_layout.size.bits(),
             enum_type_and_layout.align.abi.bits() as u32,
             Size::ZERO.bits(),
-            discr_value.opt_single_val().map(|value| {
-                // NOTE(eddyb) do *NOT* remove this assert, until
-                // we pass the full 128-bit value to LLVM, otherwise
-                // truncation will be silent and remain undetected.
-                assert_eq!(value as u64 as u128, value);
-                cx.const_u64(value as u64)
-            }),
+            discr_value.opt_single_val().map(|value| cx.const_u128(value)),
             DIFlags::FlagZero,
             variant_member_info.variant_struct_type_di_node,
         )
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 56844c7951f..d874b3ab99d 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -24,10 +24,10 @@ use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::stable_hasher::Hash128;
 use rustc_data_structures::sync::Lrc;
 use rustc_hir::def_id::{DefId, DefIdMap};
-use rustc_index::vec::IndexVec;
+use rustc_index::IndexVec;
 use rustc_middle::mir;
 use rustc_middle::ty::layout::LayoutOf;
-use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::GenericArgsRef;
 use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeVisitableExt};
 use rustc_session::config::{self, DebugInfo};
 use rustc_session::Session;
@@ -263,11 +263,11 @@ impl CodegenCx<'_, '_> {
     pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
         let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
             Ok(SourceFileAndLine { sf: file, line }) => {
-                let line_pos = file.line_begin_pos(pos);
+                let line_pos = file.lines()[line];
 
                 // Use 1-based indexing.
                 let line = (line + 1) as u32;
-                let col = (pos - line_pos).to_u32() + 1;
+                let col = (file.relative_position(pos) - line_pos).to_u32() + 1;
 
                 (file, line, col)
             }
@@ -292,7 +292,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
         llfn: &'ll Value,
         mir: &mir::Body<'tcx>,
-    ) -> Option<FunctionDebugContext<&'ll DIScope, &'ll DILocation>> {
+    ) -> Option<FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>> {
         if self.sess().opts.debuginfo == DebugInfo::None {
             return None;
         }
@@ -304,8 +304,10 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             file_start_pos: BytePos(0),
             file_end_pos: BytePos(0),
         };
-        let mut fn_debug_context =
-            FunctionDebugContext { scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes) };
+        let mut fn_debug_context = FunctionDebugContext {
+            scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes),
+            inlined_function_scopes: Default::default(),
+        };
 
         // Fill in all the scopes, with the information from the MIR body.
         compute_mir_scopes(self, instance, mir, &mut fn_debug_context);
@@ -322,7 +324,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         let tcx = self.tcx;
 
         let def_id = instance.def_id();
-        let containing_scope = get_containing_scope(self, instance);
+        let (containing_scope, is_method) = get_containing_scope(self, instance);
         let span = tcx.def_span(def_id);
         let loc = self.lookup_debug_loc(span.lo());
         let file_metadata = file_metadata(self, &loc.file);
@@ -332,25 +334,26 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature)
         };
 
-        let mut name = String::new();
+        let mut name = String::with_capacity(64);
         type_names::push_item_name(tcx, def_id, false, &mut name);
 
         // Find the enclosing function, in case this is a closure.
         let enclosing_fn_def_id = tcx.typeck_root_def_id(def_id);
 
-        // We look up the generics of the enclosing function and truncate the substs
+        // We look up the generics of the enclosing function and truncate the args
         // to their length in order to cut off extra stuff that might be in there for
         // closures or generators.
         let generics = tcx.generics_of(enclosing_fn_def_id);
-        let substs = instance.substs.truncate_to(tcx, generics);
+        let args = instance.args.truncate_to(tcx, generics);
 
         type_names::push_generic_params(
             tcx,
-            tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substs),
+            tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args),
+            enclosing_fn_def_id,
             &mut name,
         );
 
-        let template_parameters = get_template_parameters(self, generics, substs);
+        let template_parameters = get_template_parameters(self, generics, args);
 
         let linkage_name = &mangled_name_of_instance(self, instance).name;
         // Omit the linkage_name if it is the same as subprogram name.
@@ -378,8 +381,29 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             }
         }
 
-        unsafe {
-            return llvm::LLVMRustDIBuilderCreateFunction(
+        // When we're adding a method to a type DIE, we only want a DW_AT_declaration there, because
+        // LLVM LTO can't unify type definitions when a child DIE is a full subprogram definition.
+        // When we use this `decl` below, the subprogram definition gets created at the CU level
+        // with a DW_AT_specification pointing back to the type's declaration.
+        let decl = is_method.then(|| unsafe {
+            llvm::LLVMRustDIBuilderCreateMethod(
+                DIB(self),
+                containing_scope,
+                name.as_ptr().cast(),
+                name.len(),
+                linkage_name.as_ptr().cast(),
+                linkage_name.len(),
+                file_metadata,
+                loc.line,
+                function_type_metadata,
+                flags,
+                spflags & !DISPFlags::SPFlagDefinition,
+                template_parameters,
+            )
+        });
+
+        return unsafe {
+            llvm::LLVMRustDIBuilderCreateFunction(
                 DIB(self),
                 containing_scope,
                 name.as_ptr().cast(),
@@ -394,9 +418,9 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                 spflags,
                 maybe_definition_llfn,
                 template_parameters,
-                None,
-            );
-        }
+                decl,
+            )
+        };
 
         fn get_function_signature<'ll, 'tcx>(
             cx: &CodegenCx<'ll, 'tcx>,
@@ -433,7 +457,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                         ty::Array(ct, _)
                             if (*ct == cx.tcx.types.u8) || cx.layout_of(*ct).is_zst() =>
                         {
-                            cx.tcx.mk_imm_ptr(*ct)
+                            Ty::new_imm_ptr(cx.tcx, *ct)
                         }
                         _ => t,
                     };
@@ -450,23 +474,23 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         fn get_template_parameters<'ll, 'tcx>(
             cx: &CodegenCx<'ll, 'tcx>,
             generics: &ty::Generics,
-            substs: SubstsRef<'tcx>,
+            args: GenericArgsRef<'tcx>,
         ) -> &'ll DIArray {
-            if substs.types().next().is_none() {
+            if args.types().next().is_none() {
                 return create_DIArray(DIB(cx), &[]);
             }
 
             // Again, only create type information if full debuginfo is enabled
             let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full {
                 let names = get_parameter_names(cx, generics);
-                iter::zip(substs, names)
+                iter::zip(args, names)
                     .filter_map(|(kind, name)| {
-                        if let GenericArgKind::Type(ty) = kind.unpack() {
+                        kind.as_type().map(|ty| {
                             let actual_type =
                                 cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
                             let actual_type_metadata = type_di_node(cx, actual_type);
                             let name = name.as_str();
-                            Some(unsafe {
+                            unsafe {
                                 Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
                                     DIB(cx),
                                     None,
@@ -474,10 +498,8 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                                     name.len(),
                                     actual_type_metadata,
                                 ))
-                            })
-                        } else {
-                            None
-                        }
+                            }
+                        })
                     })
                     .collect()
             } else {
@@ -495,57 +517,55 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             names
         }
 
+        /// Returns a scope, plus `true` if that's a type scope for "class" methods,
+        /// otherwise `false` for plain namespace scopes.
         fn get_containing_scope<'ll, 'tcx>(
             cx: &CodegenCx<'ll, 'tcx>,
             instance: Instance<'tcx>,
-        ) -> &'ll DIScope {
+        ) -> (&'ll DIScope, bool) {
             // First, let's see if this is a method within an inherent impl. Because
             // if yes, we want to make the result subroutine DIE a child of the
             // subroutine's self-type.
-            let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| {
+            if let Some(impl_def_id) = cx.tcx.impl_of_method(instance.def_id()) {
                 // If the method does *not* belong to a trait, proceed
                 if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
-                    let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
-                        instance.substs,
+                    let impl_self_ty = cx.tcx.instantiate_and_normalize_erasing_regions(
+                        instance.args,
                         ty::ParamEnv::reveal_all(),
-                        cx.tcx.type_of(impl_def_id).skip_binder(),
+                        cx.tcx.type_of(impl_def_id),
                     );
 
                     // Only "class" methods are generally understood by LLVM,
                     // so avoid methods on other types (e.g., `<*mut T>::null`).
-                    match impl_self_ty.kind() {
-                        ty::Adt(def, ..) if !def.is_box() => {
-                            // Again, only create type information if full debuginfo is enabled
-                            if cx.sess().opts.debuginfo == DebugInfo::Full
-                                && !impl_self_ty.needs_subst()
-                            {
-                                Some(type_di_node(cx, impl_self_ty))
-                            } else {
-                                Some(namespace::item_namespace(cx, def.did()))
-                            }
+                    if let ty::Adt(def, ..) = impl_self_ty.kind()
+                        && !def.is_box()
+                    {
+                        // Again, only create type information if full debuginfo is enabled
+                        if cx.sess().opts.debuginfo == DebugInfo::Full && !impl_self_ty.has_param()
+                        {
+                            return (type_di_node(cx, impl_self_ty), true);
+                        } else {
+                            return (namespace::item_namespace(cx, def.did()), false);
                         }
-                        _ => None,
                     }
                 } else {
                     // For trait method impls we still use the "parallel namespace"
                     // strategy
-                    None
                 }
-            });
+            }
 
-            self_type.unwrap_or_else(|| {
-                namespace::item_namespace(
-                    cx,
-                    DefId {
-                        krate: instance.def_id().krate,
-                        index: cx
-                            .tcx
-                            .def_key(instance.def_id())
-                            .parent
-                            .expect("get_containing_scope: missing parent?"),
-                    },
-                )
-            })
+            let scope = namespace::item_namespace(
+                cx,
+                DefId {
+                    krate: instance.def_id().krate,
+                    index: cx
+                        .tcx
+                        .def_key(instance.def_id())
+                        .parent
+                        .expect("get_containing_scope: missing parent?"),
+                },
+            );
+            (scope, false)
         }
     }
 
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
index d5ea48c311b..fa61c7dde18 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
@@ -28,7 +28,7 @@ pub fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DISco
         .map(|parent| item_namespace(cx, DefId { krate: def_id.krate, index: parent }));
 
     let namespace_name_string = {
-        let mut output = String::new();
+        let mut output = String::with_capacity(64);
         type_names::push_item_name(cx.tcx, def_id, false, &mut output);
         output
     };
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
index 6bcd3e5bf58..c758010c581 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -82,8 +82,8 @@ pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
         ty::Foreign(_) => {
             // Assert that pointers to foreign types really are thin:
             debug_assert_eq!(
-                cx.size_of(cx.tcx.mk_imm_ptr(pointee_tail_ty)),
-                cx.size_of(cx.tcx.mk_imm_ptr(cx.tcx.types.u8))
+                cx.size_of(Ty::new_imm_ptr(cx.tcx, pointee_tail_ty)),
+                cx.size_of(Ty::new_imm_ptr(cx.tcx, cx.tcx.types.u8))
             );
             None
         }
@@ -91,8 +91,7 @@ pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
             // For all other pointee types we should already have returned None
             // at the beginning of the function.
             panic!(
-                "fat_pointer_kind() - Encountered unexpected `pointee_tail_ty`: {:?}",
-                pointee_tail_ty
+                "fat_pointer_kind() - Encountered unexpected `pointee_tail_ty`: {pointee_tail_ty:?}"
             )
         }
     }
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
index 6a575095f7e..164b12cf8d4 100644
--- a/compiler/rustc_codegen_llvm/src/declare.rs
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -19,8 +19,11 @@ use crate::llvm::AttributePlace::Function;
 use crate::type_::Type;
 use crate::value::Value;
 use rustc_codegen_ssa::traits::TypeMembershipMethods;
-use rustc_middle::ty::Ty;
-use rustc_symbol_mangling::typeid::{kcfi_typeid_for_fnabi, typeid_for_fnabi};
+use rustc_middle::ty::{Instance, Ty};
+use rustc_symbol_mangling::typeid::{
+    kcfi_typeid_for_fnabi, kcfi_typeid_for_instance, typeid_for_fnabi, typeid_for_instance,
+    TypeIdOptions,
+};
 use smallvec::SmallVec;
 
 /// Declare a function.
@@ -116,7 +119,12 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
     ///
     /// If there’s a value with the same name already declared, the function will
     /// update the declaration and return existing Value instead.
-    pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Value {
+    pub fn declare_fn(
+        &self,
+        name: &str,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        instance: Option<Instance<'tcx>>,
+    ) -> &'ll Value {
         debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi);
 
         // Function addresses in Rust are never significant, allowing functions to
@@ -132,13 +140,54 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
         fn_abi.apply_attrs_llfn(self, llfn);
 
         if self.tcx.sess.is_sanitizer_cfi_enabled() {
-            let typeid = typeid_for_fnabi(self.tcx, fn_abi);
-            self.set_type_metadata(llfn, typeid);
+            if let Some(instance) = instance {
+                let typeid = typeid_for_instance(self.tcx, &instance, TypeIdOptions::empty());
+                self.set_type_metadata(llfn, typeid);
+                let typeid =
+                    typeid_for_instance(self.tcx, &instance, TypeIdOptions::GENERALIZE_POINTERS);
+                self.add_type_metadata(llfn, typeid);
+                let typeid =
+                    typeid_for_instance(self.tcx, &instance, TypeIdOptions::NORMALIZE_INTEGERS);
+                self.add_type_metadata(llfn, typeid);
+                let typeid = typeid_for_instance(
+                    self.tcx,
+                    &instance,
+                    TypeIdOptions::GENERALIZE_POINTERS | TypeIdOptions::NORMALIZE_INTEGERS,
+                );
+                self.add_type_metadata(llfn, typeid);
+            } else {
+                let typeid = typeid_for_fnabi(self.tcx, fn_abi, TypeIdOptions::empty());
+                self.set_type_metadata(llfn, typeid);
+                let typeid = typeid_for_fnabi(self.tcx, fn_abi, TypeIdOptions::GENERALIZE_POINTERS);
+                self.add_type_metadata(llfn, typeid);
+                let typeid = typeid_for_fnabi(self.tcx, fn_abi, TypeIdOptions::NORMALIZE_INTEGERS);
+                self.add_type_metadata(llfn, typeid);
+                let typeid = typeid_for_fnabi(
+                    self.tcx,
+                    fn_abi,
+                    TypeIdOptions::GENERALIZE_POINTERS | TypeIdOptions::NORMALIZE_INTEGERS,
+                );
+                self.add_type_metadata(llfn, typeid);
+            }
         }
 
         if self.tcx.sess.is_sanitizer_kcfi_enabled() {
-            let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi);
-            self.set_kcfi_type_metadata(llfn, kcfi_typeid);
+            // LLVM KCFI does not support multiple !kcfi_type attachments
+            let mut options = TypeIdOptions::empty();
+            if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
+                options.insert(TypeIdOptions::GENERALIZE_POINTERS);
+            }
+            if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
+                options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
+            }
+
+            if let Some(instance) = instance {
+                let kcfi_typeid = kcfi_typeid_for_instance(self.tcx, &instance, options);
+                self.set_kcfi_type_metadata(llfn, kcfi_typeid);
+            } else {
+                let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options);
+                self.set_kcfi_type_metadata(llfn, kcfi_typeid);
+            }
         }
 
         llfn
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
index bae88d94293..665d195790c 100644
--- a/compiler/rustc_codegen_llvm/src/errors.rs
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -50,9 +50,15 @@ pub(crate) struct SymbolAlreadyDefined<'a> {
 }
 
 #[derive(Diagnostic)]
-#[diag(codegen_llvm_invalid_minimum_alignment)]
-pub(crate) struct InvalidMinimumAlignment {
-    pub err: String,
+#[diag(codegen_llvm_invalid_minimum_alignment_not_power_of_two)]
+pub(crate) struct InvalidMinimumAlignmentNotPowerOfTwo {
+    pub align: u64,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_invalid_minimum_alignment_too_large)]
+pub(crate) struct InvalidMinimumAlignmentTooLarge {
+    pub align: u64,
 }
 
 #[derive(Diagnostic)]
@@ -67,13 +73,16 @@ pub(crate) struct ErrorWritingDEFFile {
 
 #[derive(Diagnostic)]
 #[diag(codegen_llvm_error_calling_dlltool)]
-pub(crate) struct ErrorCallingDllTool {
+pub(crate) struct ErrorCallingDllTool<'a> {
+    pub dlltool_path: Cow<'a, str>,
     pub error: std::io::Error,
 }
 
 #[derive(Diagnostic)]
 #[diag(codegen_llvm_dlltool_fail_import_library)]
 pub(crate) struct DlltoolFailImportLibrary<'a> {
+    pub dlltool_path: Cow<'a, str>,
+    pub dlltool_args: String,
     pub stdout: Cow<'a, str>,
     pub stderr: Cow<'a, str>,
 }
@@ -130,6 +139,10 @@ pub(crate) struct LtoDisallowed;
 pub(crate) struct LtoDylib;
 
 #[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_proc_macro)]
+pub(crate) struct LtoProcMacro;
+
+#[derive(Diagnostic)]
 #[diag(codegen_llvm_lto_bitcode_from_rlib)]
 pub(crate) struct LtoBitcodeFromRlib {
     pub llvm_err: String,
@@ -195,6 +208,7 @@ pub(crate) struct FromLlvmOptimizationDiag<'a> {
     pub line: std::ffi::c_uint,
     pub column: std::ffi::c_uint,
     pub pass_name: &'a str,
+    pub kind: &'a str,
     pub message: &'a str,
 }
 
@@ -216,3 +230,9 @@ pub(crate) struct WriteBytecode<'a> {
 pub(crate) struct CopyBitcode {
     pub err: std::io::Error,
 }
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_unknown_debuginfo_compression)]
+pub struct UnknownCompression {
+    pub algorithm: &'static str,
+}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 012e25884ca..a97b803fc64 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -7,7 +7,7 @@ use crate::type_of::LayoutLlvmExt;
 use crate::va_arg::emit_va_arg;
 use crate::value::Value;
 
-use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
+use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
 use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
 use rustc_codegen_ssa::mir::operand::OperandRef;
@@ -15,7 +15,7 @@ use rustc_codegen_ssa::mir::place::PlaceRef;
 use rustc_codegen_ssa::traits::*;
 use rustc_hir as hir;
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
-use rustc_middle::ty::{self, Ty};
+use rustc_middle::ty::{self, GenericArgsRef, Ty};
 use rustc_middle::{bug, span_bug};
 use rustc_span::{sym, symbol::kw, Span, Symbol};
 use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
@@ -90,7 +90,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
         let tcx = self.tcx;
         let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
 
-        let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
+        let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
             bug!("expected fn item type, found {}", callee_ty);
         };
 
@@ -110,6 +110,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 self.call(
                     simple_ty,
                     None,
+                    None,
                     simple_fn,
                     &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
                     None,
@@ -162,11 +163,10 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
             }
 
             sym::volatile_load | sym::unaligned_volatile_load => {
-                let tp_ty = substs.type_at(0);
+                let tp_ty = fn_args.type_at(0);
                 let ptr = args[0].immediate();
-                let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
+                let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
                     let llty = ty.llvm_type(self);
-                    let ptr = self.pointercast(ptr, self.type_ptr_to(llty));
                     self.volatile_load(llty, ptr)
                 } else {
                     self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
@@ -229,22 +229,22 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                         sym::ctlz | sym::cttz => {
                             let y = self.const_bool(false);
                             self.call_intrinsic(
-                                &format!("llvm.{}.i{}", name, width),
+                                &format!("llvm.{name}.i{width}"),
                                 &[args[0].immediate(), y],
                             )
                         }
                         sym::ctlz_nonzero => {
                             let y = self.const_bool(true);
-                            let llvm_name = &format!("llvm.ctlz.i{}", width);
+                            let llvm_name = &format!("llvm.ctlz.i{width}");
                             self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
                         }
                         sym::cttz_nonzero => {
                             let y = self.const_bool(true);
-                            let llvm_name = &format!("llvm.cttz.i{}", width);
+                            let llvm_name = &format!("llvm.cttz.i{width}");
                             self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
                         }
                         sym::ctpop => self.call_intrinsic(
-                            &format!("llvm.ctpop.i{}", width),
+                            &format!("llvm.ctpop.i{width}"),
                             &[args[0].immediate()],
                         ),
                         sym::bswap => {
@@ -252,13 +252,13 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                                 args[0].immediate() // byte swap a u8/i8 is just a no-op
                             } else {
                                 self.call_intrinsic(
-                                    &format!("llvm.bswap.i{}", width),
+                                    &format!("llvm.bswap.i{width}"),
                                     &[args[0].immediate()],
                                 )
                             }
                         }
                         sym::bitreverse => self.call_intrinsic(
-                            &format!("llvm.bitreverse.i{}", width),
+                            &format!("llvm.bitreverse.i{width}"),
                             &[args[0].immediate()],
                         ),
                         sym::rotate_left | sym::rotate_right => {
@@ -297,7 +297,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
 
             sym::raw_eq => {
                 use abi::Abi::*;
-                let tp_ty = substs.type_at(0);
+                let tp_ty = fn_args.type_at(0);
                 let layout = self.layout_of(tp_ty).layout;
                 let use_integer_compare = match layout.abi() {
                     Scalar(_) | ScalarPair(_, _) => true,
@@ -316,18 +316,12 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     self.const_bool(true)
                 } else if use_integer_compare {
                     let integer_ty = self.type_ix(layout.size().bits());
-                    let ptr_ty = self.type_ptr_to(integer_ty);
-                    let a_ptr = self.bitcast(a, ptr_ty);
-                    let a_val = self.load(integer_ty, a_ptr, layout.align().abi);
-                    let b_ptr = self.bitcast(b, ptr_ty);
-                    let b_val = self.load(integer_ty, b_ptr, layout.align().abi);
+                    let a_val = self.load(integer_ty, a, layout.align().abi);
+                    let b_val = self.load(integer_ty, b, layout.align().abi);
                     self.icmp(IntPredicate::IntEQ, a_val, b_val)
                 } else {
-                    let i8p_ty = self.type_i8p();
-                    let a_ptr = self.bitcast(a, i8p_ty);
-                    let b_ptr = self.bitcast(b, i8p_ty);
                     let n = self.const_usize(layout.size().bytes());
-                    let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]);
+                    let cmp = self.call_intrinsic("memcmp", &[a, b, n]);
                     match self.cx.sess().target.arch.as_ref() {
                         "avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)),
                         _ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)),
@@ -335,6 +329,16 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 }
             }
 
+            sym::compare_bytes => {
+                // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+                let cmp = self.call_intrinsic(
+                    "memcmp",
+                    &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
+                );
+                // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`.
+                self.sext(cmp, self.type_ix(32))
+            }
+
             sym::black_box => {
                 args[0].val.store(self, result);
                 let result_val_span = [result.llval];
@@ -372,7 +376,9 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
             }
 
             _ if name.as_str().starts_with("simd_") => {
-                match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+                match generic_simd_intrinsic(
+                    self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span,
+                ) {
                     Ok(llval) => llval,
                     Err(()) => return,
                 }
@@ -382,10 +388,8 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
         };
 
         if !fn_abi.ret.is_ignore() {
-            if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
-                let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
-                let ptr = self.pointercast(result.llval, ptr_llty);
-                self.store(llval, ptr, result.align);
+            if let PassMode::Cast { .. } = &fn_abi.ret.mode {
+                self.store(llval, result.llval, result.align);
             } else {
                 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
                     .val
@@ -409,9 +413,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
     fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value {
         // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
         // optimization pass replaces calls to this intrinsic with code to test type membership.
-        let i8p_ty = self.type_i8p();
-        let bitcast = self.bitcast(pointer, i8p_ty);
-        self.call_intrinsic("llvm.type.test", &[bitcast, typeid])
+        self.call_intrinsic("llvm.type.test", &[pointer, typeid])
     }
 
     fn type_checked_load(
@@ -443,14 +445,16 @@ fn try_intrinsic<'ll>(
     dest: &'ll Value,
 ) {
     if bx.sess().panic_strategy() == PanicStrategy::Abort {
-        let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
-        bx.call(try_func_ty, None, try_func, &[data], None);
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
+        bx.call(try_func_ty, None, None, try_func, &[data], None);
         // Return 0 unconditionally from the intrinsic call;
         // we can never unwind.
         let ret_align = bx.tcx().data_layout.i32_align.abi;
         bx.store(bx.const_i32(0), dest, ret_align);
     } else if wants_msvc_seh(bx.sess()) {
         codegen_msvc_try(bx, try_func, data, catch_func, dest);
+    } else if wants_wasm_eh(bx.sess()) {
+        codegen_wasm_try(bx, try_func, data, catch_func, dest);
     } else if bx.sess().target.os == "emscripten" {
         codegen_emcc_try(bx, try_func, data, catch_func, dest);
     } else {
@@ -541,9 +545,9 @@ fn codegen_msvc_try<'ll>(
         //
         // More information can be found in libstd's seh.rs implementation.
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
-        let slot = bx.alloca(bx.type_i8p(), ptr_align);
-        let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
-        bx.invoke(try_func_ty, None, try_func, &[data], normal, catchswitch, None);
+        let slot = bx.alloca(bx.type_ptr(), ptr_align);
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
+        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
 
         bx.switch_to_block(normal);
         bx.ret(bx.const_i32(0));
@@ -565,10 +569,10 @@ fn codegen_msvc_try<'ll>(
         //
         // When modifying, make sure that the type_name string exactly matches
         // the one used in library/panic_unwind/src/seh.rs.
-        let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
+        let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
         let type_name = bx.const_bytes(b"rust_panic\0");
         let type_info =
-            bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
+            bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
         let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
         unsafe {
             llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
@@ -585,17 +589,91 @@ fn codegen_msvc_try<'ll>(
         bx.switch_to_block(catchpad_rust);
         let flags = bx.const_i32(8);
         let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
-        let ptr = bx.load(bx.type_i8p(), slot, ptr_align);
-        let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
-        bx.call(catch_ty, None, catch_func, &[data, ptr], Some(&funclet));
+        let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
+        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
+        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet));
         bx.catch_ret(&funclet, caught);
 
         // The flag value of 64 indicates a "catch-all".
         bx.switch_to_block(catchpad_foreign);
         let flags = bx.const_i32(64);
-        let null = bx.const_null(bx.type_i8p());
+        let null = bx.const_null(bx.type_ptr());
         let funclet = bx.catch_pad(cs, &[null, flags, null]);
-        bx.call(catch_ty, None, catch_func, &[data, null], Some(&funclet));
+        bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet));
+        bx.catch_ret(&funclet, caught);
+
+        bx.switch_to_block(caught);
+        bx.ret(bx.const_i32(1));
+    });
+
+    // Note that no invoke is used here because by definition this function
+    // can't panic (that's what it's catching).
+    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None);
+    let i32_align = bx.tcx().data_layout.i32_align.abi;
+    bx.store(ret, dest, i32_align);
+}
+
+// WASM's definition of the `rust_try` function.
+fn codegen_wasm_try<'ll>(
+    bx: &mut Builder<'_, 'll, '_>,
+    try_func: &'ll Value,
+    data: &'ll Value,
+    catch_func: &'ll Value,
+    dest: &'ll Value,
+) {
+    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
+        bx.set_personality_fn(bx.eh_personality());
+
+        let normal = bx.append_sibling_block("normal");
+        let catchswitch = bx.append_sibling_block("catchswitch");
+        let catchpad = bx.append_sibling_block("catchpad");
+        let caught = bx.append_sibling_block("caught");
+
+        let try_func = llvm::get_param(bx.llfn(), 0);
+        let data = llvm::get_param(bx.llfn(), 1);
+        let catch_func = llvm::get_param(bx.llfn(), 2);
+
+        // We're generating an IR snippet that looks like:
+        //
+        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
+        //      %slot = alloca i8*
+        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
+        //
+        //   normal:
+        //      ret i32 0
+        //
+        //   catchswitch:
+        //      %cs = catchswitch within none [%catchpad] unwind to caller
+        //
+        //   catchpad:
+        //      %tok = catchpad within %cs [null]
+        //      %ptr = call @llvm.wasm.get.exception(token %tok)
+        //      %sel = call @llvm.wasm.get.ehselector(token %tok)
+        //      call %catch_func(%data, %ptr)
+        //      catchret from %tok to label %caught
+        //
+        //   caught:
+        //      ret i32 1
+        //   }
+        //
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
+        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
+
+        bx.switch_to_block(normal);
+        bx.ret(bx.const_i32(0));
+
+        bx.switch_to_block(catchswitch);
+        let cs = bx.catch_switch(None, None, &[catchpad]);
+
+        bx.switch_to_block(catchpad);
+        let null = bx.const_null(bx.type_ptr());
+        let funclet = bx.catch_pad(cs, &[null]);
+
+        let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]);
+        let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]);
+
+        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
+        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet));
         bx.catch_ret(&funclet, caught);
 
         bx.switch_to_block(caught);
@@ -604,7 +682,7 @@ fn codegen_msvc_try<'ll>(
 
     // Note that no invoke is used here because by definition this function
     // can't panic (that's what it's catching).
-    let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
+    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None);
     let i32_align = bx.tcx().data_layout.i32_align.abi;
     bx.store(ret, dest, i32_align);
 }
@@ -646,8 +724,8 @@ fn codegen_gnu_try<'ll>(
         let try_func = llvm::get_param(bx.llfn(), 0);
         let data = llvm::get_param(bx.llfn(), 1);
         let catch_func = llvm::get_param(bx.llfn(), 2);
-        let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
-        bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None);
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
+        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None);
 
         bx.switch_to_block(then);
         bx.ret(bx.const_i32(0));
@@ -659,19 +737,19 @@ fn codegen_gnu_try<'ll>(
         // the landing pad clauses the exception's type had been matched to.
         // rust_try ignores the selector.
         bx.switch_to_block(catch);
-        let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
+        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
         let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
-        let tydesc = bx.const_null(bx.type_i8p());
+        let tydesc = bx.const_null(bx.type_ptr());
         bx.add_clause(vals, tydesc);
         let ptr = bx.extract_value(vals, 0);
-        let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
-        bx.call(catch_ty, None, catch_func, &[data, ptr], None);
+        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
+        bx.call(catch_ty, None, None, catch_func, &[data, ptr], None);
         bx.ret(bx.const_i32(1));
     });
 
     // Note that no invoke is used here because by definition this function
     // can't panic (that's what it's catching).
-    let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
+    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None);
     let i32_align = bx.tcx().data_layout.i32_align.abi;
     bx.store(ret, dest, i32_align);
 }
@@ -710,8 +788,8 @@ fn codegen_emcc_try<'ll>(
         let try_func = llvm::get_param(bx.llfn(), 0);
         let data = llvm::get_param(bx.llfn(), 1);
         let catch_func = llvm::get_param(bx.llfn(), 2);
-        let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
-        bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None);
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
+        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None);
 
         bx.switch_to_block(then);
         bx.ret(bx.const_i32(0));
@@ -723,10 +801,10 @@ fn codegen_emcc_try<'ll>(
         // the landing pad clauses the exception's type had been matched to.
         bx.switch_to_block(catch);
         let tydesc = bx.eh_catch_typeinfo();
-        let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
+        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
         let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
         bx.add_clause(vals, tydesc);
-        bx.add_clause(vals, bx.const_null(bx.type_i8p()));
+        bx.add_clause(vals, bx.const_null(bx.type_ptr()));
         let ptr = bx.extract_value(vals, 0);
         let selector = bx.extract_value(vals, 1);
 
@@ -739,7 +817,7 @@ fn codegen_emcc_try<'ll>(
         // create an alloca and pass a pointer to that.
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
         let i8_align = bx.tcx().data_layout.i8_align.abi;
-        let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false);
+        let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false);
         let catch_data = bx.alloca(catch_data_type, ptr_align);
         let catch_data_0 =
             bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
@@ -747,16 +825,15 @@ fn codegen_emcc_try<'ll>(
         let catch_data_1 =
             bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
         bx.store(is_rust_panic, catch_data_1, i8_align);
-        let catch_data = bx.bitcast(catch_data, bx.type_i8p());
 
-        let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
-        bx.call(catch_ty, None, catch_func, &[data, catch_data], None);
+        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
+        bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None);
         bx.ret(bx.const_i32(1));
     });
 
     // Note that no invoke is used here because by definition this function
     // can't panic (that's what it's catching).
-    let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
+    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None);
     let i32_align = bx.tcx().data_layout.i32_align.abi;
     bx.store(ret, dest, i32_align);
 }
@@ -771,7 +848,7 @@ fn gen_fn<'ll, 'tcx>(
 ) -> (&'ll Type, &'ll Value) {
     let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
     let llty = fn_abi.llvm_type(cx);
-    let llfn = cx.declare_fn(name, fn_abi);
+    let llfn = cx.declare_fn(name, fn_abi, None);
     cx.set_frame_pointer_type(llfn);
     cx.apply_target_cpu_attr(llfn);
     // FIXME(eddyb) find a nicer way to do this.
@@ -796,23 +873,29 @@ fn get_rust_try_fn<'ll, 'tcx>(
 
     // Define the type up front for the signature of the rust_try function.
     let tcx = cx.tcx;
-    let i8p = tcx.mk_mut_ptr(tcx.types.i8);
+    let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
     // `unsafe fn(*mut i8) -> ()`
-    let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
-        [i8p],
-        tcx.mk_unit(),
-        false,
-        hir::Unsafety::Unsafe,
-        Abi::Rust,
-    )));
+    let try_fn_ty = Ty::new_fn_ptr(
+        tcx,
+        ty::Binder::dummy(tcx.mk_fn_sig(
+            [i8p],
+            Ty::new_unit(tcx),
+            false,
+            hir::Unsafety::Unsafe,
+            Abi::Rust,
+        )),
+    );
     // `unsafe fn(*mut i8, *mut i8) -> ()`
-    let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
-        [i8p, i8p],
-        tcx.mk_unit(),
-        false,
-        hir::Unsafety::Unsafe,
-        Abi::Rust,
-    )));
+    let catch_fn_ty = Ty::new_fn_ptr(
+        tcx,
+        ty::Binder::dummy(tcx.mk_fn_sig(
+            [i8p, i8p],
+            Ty::new_unit(tcx),
+            false,
+            hir::Unsafety::Unsafe,
+            Abi::Rust,
+        )),
+    );
     // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
     let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
         [try_fn_ty, i8p, catch_fn_ty],
@@ -830,6 +913,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     bx: &mut Builder<'_, 'll, 'tcx>,
     name: Symbol,
     callee_ty: Ty<'tcx>,
+    fn_args: GenericArgsRef<'tcx>,
     args: &[OperandRef<'tcx, &'ll Value>],
     ret_ty: Ty<'tcx>,
     llret_ty: &'ll Type,
@@ -884,8 +968,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 let place = PlaceRef::alloca(bx, args[0].layout);
                 args[0].val.store(bx, place);
                 let int_ty = bx.type_ix(expected_bytes * 8);
-                let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
-                bx.load(int_ty, ptr, Align::ONE)
+                bx.load(int_ty, place.llval, Align::ONE)
             }
             _ => return_error!(InvalidMonomorphization::InvalidBitmask {
                 span,
@@ -950,28 +1033,70 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         ));
     }
 
-    if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") {
-        // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
-        // If there is no suffix, use the index array length.
-        let n: u64 = if stripped.is_empty() {
-            // Make sure this is actually an array, since typeck only checks the length-suffixed
-            // version of this intrinsic.
-            match args[2].layout.ty.kind() {
-                ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
-                    len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(
-                        || span_bug!(span, "could not evaluate shuffle index array length"),
-                    )
+    if name == sym::simd_shuffle_generic {
+        let idx = fn_args[2]
+            .expect_const()
+            .eval(tcx, ty::ParamEnv::reveal_all(), Some(span))
+            .unwrap()
+            .unwrap_branch();
+        let n = idx.len() as u64;
+
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
+        let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+        require!(
+            out_len == n,
+            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
+        );
+        require!(
+            in_elem == out_ty,
+            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
+        );
+
+        let total_len = in_len * 2;
+
+        let indices: Option<Vec<_>> = idx
+            .iter()
+            .enumerate()
+            .map(|(arg_idx, val)| {
+                let idx = val.unwrap_leaf().try_to_i32().unwrap();
+                if idx >= i32::try_from(total_len).unwrap() {
+                    bx.sess().emit_err(InvalidMonomorphization::ShuffleIndexOutOfBounds {
+                        span,
+                        name,
+                        arg_idx: arg_idx as u64,
+                        total_len: total_len.into(),
+                    });
+                    None
+                } else {
+                    Some(bx.const_i32(idx))
                 }
-                _ => return_error!(InvalidMonomorphization::SimdShuffle {
-                    span,
-                    name,
-                    ty: args[2].layout.ty
-                }),
-            }
-        } else {
-            stripped.parse().unwrap_or_else(|_| {
-                span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
             })
+            .collect();
+        let Some(indices) = indices else {
+            return Ok(bx.const_null(llret_ty));
+        };
+
+        return Ok(bx.shuffle_vector(
+            args[0].immediate(),
+            args[1].immediate(),
+            bx.const_vector(&indices),
+        ));
+    }
+
+    if name == sym::simd_shuffle {
+        // Make sure this is actually an array, since typeck only checks the length-suffixed
+        // version of this intrinsic.
+        let n: u64 = match args[2].layout.ty.kind() {
+            ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
+                len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(
+                    || span_bug!(span, "could not evaluate shuffle index array length"),
+                )
+            }
+            _ => return_error!(InvalidMonomorphization::SimdShuffle {
+                span,
+                name,
+                ty: args[2].layout.ty
+            }),
         };
 
         require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
@@ -1134,7 +1259,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
                 bx.store(ze, ptr, Align::ONE);
                 let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
-                let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));
                 return Ok(bx.load(array_ty, ptr, Align::ONE));
             }
             _ => return_error!(InvalidMonomorphization::CannotReturn {
@@ -1200,11 +1324,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
             _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
         };
-        let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
+        let llvm_name = &format!("llvm.{intr_name}.v{in_len}{elem_ty_str}");
         let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
         let c = bx.call(
             fn_ty,
             None,
+            None,
             f,
             &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
             None,
@@ -1237,50 +1362,34 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     // FIXME: use:
     //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
     //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
-    fn llvm_vector_str(
-        elem_ty: Ty<'_>,
-        vec_len: u64,
-        no_pointers: usize,
-        bx: &Builder<'_, '_, '_>,
-    ) -> String {
-        let p0s: String = "p0".repeat(no_pointers);
+    fn llvm_vector_str(bx: &Builder<'_, '_, '_>, elem_ty: Ty<'_>, vec_len: u64) -> String {
         match *elem_ty.kind() {
             ty::Int(v) => format!(
-                "v{}{}i{}",
+                "v{}i{}",
                 vec_len,
-                p0s,
                 // Normalize to prevent crash if v: IntTy::Isize
                 v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
             ),
             ty::Uint(v) => format!(
-                "v{}{}i{}",
+                "v{}i{}",
                 vec_len,
-                p0s,
                 // Normalize to prevent crash if v: UIntTy::Usize
                 v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
             ),
-            ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
+            ty::Float(v) => format!("v{}f{}", vec_len, v.bit_width()),
+            ty::RawPtr(_) => format!("v{}p0", vec_len),
             _ => unreachable!(),
         }
     }
 
-    fn llvm_vector_ty<'ll>(
-        cx: &CodegenCx<'ll, '_>,
-        elem_ty: Ty<'_>,
-        vec_len: u64,
-        mut no_pointers: usize,
-    ) -> &'ll Type {
-        // FIXME: use cx.layout_of(ty).llvm_type() ?
-        let mut elem_ty = match *elem_ty.kind() {
+    fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
+        let elem_ty = match *elem_ty.kind() {
             ty::Int(v) => cx.type_int_from_ty(v),
             ty::Uint(v) => cx.type_uint_from_ty(v),
             ty::Float(v) => cx.type_float_from_ty(v),
+            ty::RawPtr(_) => cx.type_ptr(),
             _ => unreachable!(),
         };
-        while no_pointers > 0 {
-            elem_ty = cx.type_ptr_to(elem_ty);
-            no_pointers -= 1;
-        }
         cx.type_vector(elem_ty, vec_len)
     }
 
@@ -1335,47 +1444,26 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
         );
 
-        // This counts how many pointers
-        fn ptr_count(t: Ty<'_>) -> usize {
-            match t.kind() {
-                ty::RawPtr(p) => 1 + ptr_count(p.ty),
-                _ => 0,
-            }
-        }
-
-        // Non-ptr type
-        fn non_ptr(t: Ty<'_>) -> Ty<'_> {
-            match t.kind() {
-                ty::RawPtr(p) => non_ptr(p.ty),
-                _ => t,
-            }
-        }
-
         // The second argument must be a simd vector with an element type that's a pointer
         // to the element type of the first argument
         let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
         let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
-        let (pointer_count, underlying_ty) = match element_ty1.kind() {
-            ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
-            _ => {
-                require!(
-                    false,
-                    InvalidMonomorphization::ExpectedElementType {
-                        span,
-                        name,
-                        expected_element: element_ty1,
-                        second_arg: arg_tys[1],
-                        in_elem,
-                        in_ty,
-                        mutability: ExpectedPointerMutability::Not,
-                    }
-                );
-                unreachable!();
+
+        require!(
+            matches!(
+                element_ty1.kind(),
+                ty::RawPtr(p) if p.ty == in_elem && p.ty.kind() == element_ty0.kind()
+            ),
+            InvalidMonomorphization::ExpectedElementType {
+                span,
+                name,
+                expected_element: element_ty1,
+                second_arg: arg_tys[1],
+                in_elem,
+                in_ty,
+                mutability: ExpectedPointerMutability::Not,
             }
-        };
-        assert!(pointer_count > 0);
-        assert_eq!(pointer_count - 1, ptr_count(element_ty0));
-        assert_eq!(underlying_ty, non_ptr(element_ty0));
+        );
 
         // The element type of the third argument must be a signed integer type of any width:
         let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
@@ -1406,15 +1494,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         };
 
         // Type of the vector of pointers:
-        let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
-        let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx);
+        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
+        let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
 
         // Type of the vector of elements:
-        let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
-        let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx);
+        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
+        let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
 
         let llvm_intrinsic =
-            format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
+            format!("llvm.masked.gather.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
         let fn_ty = bx.type_func(
             &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
             llvm_elem_vec_ty,
@@ -1423,6 +1511,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         let v = bx.call(
             fn_ty,
             None,
+            None,
             f,
             &[args[1].immediate(), alignment, mask, args[0].immediate()],
             None,
@@ -1474,50 +1563,28 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             }
         );
 
-        // This counts how many pointers
-        fn ptr_count(t: Ty<'_>) -> usize {
-            match t.kind() {
-                ty::RawPtr(p) => 1 + ptr_count(p.ty),
-                _ => 0,
-            }
-        }
-
-        // Non-ptr type
-        fn non_ptr(t: Ty<'_>) -> Ty<'_> {
-            match t.kind() {
-                ty::RawPtr(p) => non_ptr(p.ty),
-                _ => t,
-            }
-        }
-
         // The second argument must be a simd vector with an element type that's a pointer
         // to the element type of the first argument
         let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
         let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
         let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
-        let (pointer_count, underlying_ty) = match element_ty1.kind() {
-            ty::RawPtr(p) if p.ty == in_elem && p.mutbl.is_mut() => {
-                (ptr_count(element_ty1), non_ptr(element_ty1))
-            }
-            _ => {
-                require!(
-                    false,
-                    InvalidMonomorphization::ExpectedElementType {
-                        span,
-                        name,
-                        expected_element: element_ty1,
-                        second_arg: arg_tys[1],
-                        in_elem,
-                        in_ty,
-                        mutability: ExpectedPointerMutability::Mut,
-                    }
-                );
-                unreachable!();
+
+        require!(
+            matches!(
+                element_ty1.kind(),
+                ty::RawPtr(p)
+                    if p.ty == in_elem && p.mutbl.is_mut() && p.ty.kind() == element_ty0.kind()
+            ),
+            InvalidMonomorphization::ExpectedElementType {
+                span,
+                name,
+                expected_element: element_ty1,
+                second_arg: arg_tys[1],
+                in_elem,
+                in_ty,
+                mutability: ExpectedPointerMutability::Mut,
             }
-        };
-        assert!(pointer_count > 0);
-        assert_eq!(pointer_count - 1, ptr_count(element_ty0));
-        assert_eq!(underlying_ty, non_ptr(element_ty0));
+        );
 
         // The element type of the third argument must be a signed integer type of any width:
         match element_ty2.kind() {
@@ -1549,21 +1616,22 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         let ret_t = bx.type_void();
 
         // Type of the vector of pointers:
-        let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
-        let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx);
+        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
+        let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
 
         // Type of the vector of elements:
-        let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
-        let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx);
+        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
+        let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
 
         let llvm_intrinsic =
-            format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
+            format!("llvm.masked.scatter.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
         let fn_ty =
             bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
         let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
         let v = bx.call(
             fn_ty,
             None,
+            None,
             f,
             &[args[0].immediate(), args[1].immediate(), alignment, mask],
             None,
@@ -1771,11 +1839,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             }
         }
 
-        if in_elem == out_elem {
-            return Ok(args[0].immediate());
-        } else {
-            return Ok(bx.pointercast(args[0].immediate(), llret_ty));
-        }
+        return Ok(args[0].immediate());
     }
 
     if name == sym::simd_expose_addr {
@@ -1988,6 +2052,52 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         simd_neg: Int => neg, Float => fneg;
     }
 
+    // Unary integer intrinsics
+    if matches!(name, sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctlz | sym::simd_cttz) {
+        let vec_ty = bx.cx.type_vector(
+            match *in_elem.kind() {
+                ty::Int(i) => bx.cx.type_int_from_ty(i),
+                ty::Uint(i) => bx.cx.type_uint_from_ty(i),
+                _ => return_error!(InvalidMonomorphization::UnsupportedOperation {
+                    span,
+                    name,
+                    in_ty,
+                    in_elem
+                }),
+            },
+            in_len as u64,
+        );
+        let intrinsic_name = match name {
+            sym::simd_bswap => "bswap",
+            sym::simd_bitreverse => "bitreverse",
+            sym::simd_ctlz => "ctlz",
+            sym::simd_cttz => "cttz",
+            _ => unreachable!(),
+        };
+        let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
+        let llvm_intrinsic = &format!("llvm.{}.v{}i{}", intrinsic_name, in_len, int_size,);
+
+        return if name == sym::simd_bswap && int_size == 8 {
+            // byte swap is no-op for i8/u8
+            Ok(args[0].immediate())
+        } else if matches!(name, sym::simd_ctlz | sym::simd_cttz) {
+            let fn_ty = bx.type_func(&[vec_ty, bx.type_i1()], vec_ty);
+            let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+            Ok(bx.call(
+                fn_ty,
+                None,
+                None,
+                f,
+                &[args[0].immediate(), bx.const_int(bx.type_i1(), 0)],
+                None,
+            ))
+        } else {
+            let fn_ty = bx.type_func(&[vec_ty], vec_ty);
+            let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+            Ok(bx.call(fn_ty, None, None, f, &[args[0].immediate()], None))
+        };
+    }
+
     if name == sym::simd_arith_offset {
         // This also checks that the first operand is a ptr type.
         let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
@@ -2037,7 +2147,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
 
         let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty);
         let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
-        let v = bx.call(fn_ty, None, f, &[lhs, rhs], None);
+        let v = bx.call(fn_ty, None, None, f, &[lhs, rhs], None);
         return Ok(v);
     }
 
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index 8305a0a4c28..59d1ea05d8a 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -10,6 +10,8 @@
 #![feature(iter_intersperse)]
 #![feature(let_chains)]
 #![feature(never_type)]
+#![feature(slice_group_by)]
+#![feature(impl_trait_in_assoc_type)]
 #![recursion_limit = "256"]
 #![allow(rustc::potential_query_instability)]
 #![deny(rustc::untranslatable_diagnostic)]
@@ -20,6 +22,7 @@ extern crate rustc_macros;
 #[macro_use]
 extern crate tracing;
 
+use back::owned_target_machine::OwnedTargetMachine;
 use back::write::{create_informational_target_machine, create_target_machine};
 
 use errors::ParseTargetMachineConfig;
@@ -27,28 +30,30 @@ pub use llvm_util::target_features;
 use rustc_ast::expand::allocator::AllocatorKind;
 use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
 use rustc_codegen_ssa::back::write::{
-    CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
+    CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
 };
 use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::ModuleCodegen;
 use rustc_codegen_ssa::{CodegenResults, CompiledModule};
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::FxIndexMap;
 use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, FatalError, Handler, SubdiagnosticMessage};
 use rustc_fluent_macro::fluent_messages;
 use rustc_metadata::EncodedMetadata;
 use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
-use rustc_middle::ty::query::Providers;
 use rustc_middle::ty::TyCtxt;
-use rustc_session::config::{OptLevel, OutputFilenames, PrintRequest};
+use rustc_middle::util::Providers;
+use rustc_session::config::{OptLevel, OutputFilenames, PrintKind, PrintRequest};
 use rustc_session::Session;
 use rustc_span::symbol::Symbol;
 
 use std::any::Any;
 use std::ffi::CStr;
+use std::io::Write;
 
 mod back {
     pub mod archive;
     pub mod lto;
+    pub mod owned_target_machine;
     mod profiling;
     pub mod write;
 }
@@ -139,18 +144,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
         back::write::target_machine_factory(sess, optlvl, target_features)
     }
 
-    fn spawn_thread<F, T>(time_trace: bool, f: F) -> std::thread::JoinHandle<T>
-    where
-        F: FnOnce() -> T,
-        F: Send + 'static,
-        T: Send + 'static,
-    {
-        std::thread::spawn(move || {
-            let _profiler = TimeTraceProfiler::new(time_trace);
-            f()
-        })
-    }
-
     fn spawn_named_thread<F, T>(
         time_trace: bool,
         name: String,
@@ -171,13 +164,34 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
 impl WriteBackendMethods for LlvmCodegenBackend {
     type Module = ModuleLlvm;
     type ModuleBuffer = back::lto::ModuleBuffer;
-    type TargetMachine = &'static mut llvm::TargetMachine;
+    type TargetMachine = OwnedTargetMachine;
     type TargetMachineError = crate::errors::LlvmError<'static>;
     type ThinData = back::lto::ThinData;
     type ThinBuffer = back::lto::ThinBuffer;
     fn print_pass_timings(&self) {
         unsafe {
-            llvm::LLVMRustPrintPassTimings();
+            let mut size = 0;
+            let cstr = llvm::LLVMRustPrintPassTimings(&mut size as *mut usize);
+            if cstr.is_null() {
+                println!("failed to get pass timings");
+            } else {
+                let timings = std::slice::from_raw_parts(cstr as *const u8, size);
+                std::io::stdout().write_all(timings).unwrap();
+                libc::free(cstr as *mut _);
+            }
+        }
+    }
+    fn print_statistics(&self) {
+        unsafe {
+            let mut size = 0;
+            let cstr = llvm::LLVMRustPrintStatistics(&mut size as *mut usize);
+            if cstr.is_null() {
+                println!("failed to get pass stats");
+            } else {
+                let stats = std::slice::from_raw_parts(cstr as *const u8, size);
+                std::io::stdout().write_all(stats).unwrap();
+                libc::free(cstr as *mut _);
+            }
         }
     }
     fn run_link(
@@ -189,7 +203,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
     }
     fn run_fat_lto(
         cgcx: &CodegenContext<Self>,
-        modules: Vec<FatLTOInput<Self>>,
+        modules: Vec<FatLtoInput<Self>>,
         cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
     ) -> Result<LtoModuleCodegen<Self>, FatalError> {
         back::lto::run_fat(cgcx, modules, cached_modules)
@@ -261,10 +275,10 @@ impl CodegenBackend for LlvmCodegenBackend {
             |tcx, ()| llvm_util::global_llvm_features(tcx.sess, true)
     }
 
-    fn print(&self, req: PrintRequest, sess: &Session) {
-        match req {
-            PrintRequest::RelocationModels => {
-                println!("Available relocation models:");
+    fn print(&self, req: &PrintRequest, out: &mut dyn PrintBackendInfo, sess: &Session) {
+        match req.kind {
+            PrintKind::RelocationModels => {
+                writeln!(out, "Available relocation models:");
                 for name in &[
                     "static",
                     "pic",
@@ -275,26 +289,27 @@ impl CodegenBackend for LlvmCodegenBackend {
                     "ropi-rwpi",
                     "default",
                 ] {
-                    println!("    {}", name);
+                    writeln!(out, "    {name}");
                 }
-                println!();
+                writeln!(out);
             }
-            PrintRequest::CodeModels => {
-                println!("Available code models:");
+            PrintKind::CodeModels => {
+                writeln!(out, "Available code models:");
                 for name in &["tiny", "small", "kernel", "medium", "large"] {
-                    println!("    {}", name);
+                    writeln!(out, "    {name}");
                 }
-                println!();
+                writeln!(out);
             }
-            PrintRequest::TlsModels => {
-                println!("Available TLS models:");
+            PrintKind::TlsModels => {
+                writeln!(out, "Available TLS models:");
                 for name in &["global-dynamic", "local-dynamic", "initial-exec", "local-exec"] {
-                    println!("    {}", name);
+                    writeln!(out, "    {name}");
                 }
-                println!();
+                writeln!(out);
             }
-            PrintRequest::StackProtectorStrategies => {
-                println!(
+            PrintKind::StackProtectorStrategies => {
+                writeln!(
+                    out,
                     r#"Available stack protector strategies:
     all
         Generate stack canaries in all functions.
@@ -318,7 +333,7 @@ impl CodegenBackend for LlvmCodegenBackend {
 "#
                 );
             }
-            req => llvm_util::print(req, sess),
+            _other => llvm_util::print(req, out, sess),
         }
     }
 
@@ -354,7 +369,7 @@ impl CodegenBackend for LlvmCodegenBackend {
         ongoing_codegen: Box<dyn Any>,
         sess: &Session,
         outputs: &OutputFilenames,
-    ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
+    ) -> Result<(CodegenResults, FxIndexMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
         let (codegen_results, work_products) = ongoing_codegen
             .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
             .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
@@ -388,7 +403,9 @@ impl CodegenBackend for LlvmCodegenBackend {
 pub struct ModuleLlvm {
     llcx: &'static mut llvm::Context,
     llmod_raw: *const llvm::Module,
-    tm: &'static mut llvm::TargetMachine,
+
+    // independent from llcx and llmod_raw, resources get disposed by drop impl
+    tm: OwnedTargetMachine,
 }
 
 unsafe impl Send for ModuleLlvm {}
@@ -440,7 +457,6 @@ impl ModuleLlvm {
 impl Drop for ModuleLlvm {
     fn drop(&mut self) {
         unsafe {
-            llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
             llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
         }
     }
diff --git a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
index 45de284d22a..06e846a2b45 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
@@ -9,7 +9,7 @@ use libc::c_uint;
 use super::{DiagnosticInfo, SMDiagnostic};
 use rustc_span::InnerSpan;
 
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, Debug)]
 pub enum OptimizationDiagnosticKind {
     OptimizationRemark,
     OptimizationMissed,
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index c95148013eb..a038b3af03d 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -1,8 +1,6 @@
 #![allow(non_camel_case_types)]
 #![allow(non_upper_case_globals)]
 
-use rustc_codegen_ssa::coverageinfo::map as coverage_map;
-
 use super::debuginfo::{
     DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator,
     DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DILocation, DINameSpace,
@@ -85,12 +83,17 @@ pub enum LLVMModFlagBehavior {
 // Consts for the LLVM CallConv type, pre-cast to usize.
 
 /// LLVM CallingConv::ID. Should we wrap this?
+///
+/// See <https://github.com/llvm/llvm-project/blob/main/llvm/include/llvm/IR/CallingConv.h>
 #[derive(Copy, Clone, PartialEq, Debug)]
 #[repr(C)]
 pub enum CallConv {
     CCallConv = 0,
     FastCallConv = 8,
     ColdCallConv = 9,
+    PreserveMost = 14,
+    PreserveAll = 15,
+    Tail = 18,
     X86StdcallCallConv = 64,
     X86FastcallCallConv = 65,
     ArmAapcsCallConv = 67,
@@ -196,6 +199,7 @@ pub enum AttributeKind {
     AllocSize = 37,
     AllocatedPointer = 38,
     AllocAlign = 39,
+    SanitizeSafeStack = 40,
 }
 
 /// LLVMIntPredicate
@@ -476,6 +480,8 @@ pub enum OptStage {
 pub struct SanitizerOptions {
     pub sanitize_address: bool,
     pub sanitize_address_recover: bool,
+    pub sanitize_cfi: bool,
+    pub sanitize_kcfi: bool,
     pub sanitize_memory: bool,
     pub sanitize_memory_recover: bool,
     pub sanitize_memory_track_origins: c_int,
@@ -584,6 +590,16 @@ pub enum ThreadLocalMode {
     LocalExec,
 }
 
+/// LLVMRustTailCallKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum TailCallKind {
+    None,
+    Tail,
+    MustTail,
+    NoTail,
+}
+
 /// LLVMRustChecksumKind
 #[derive(Copy, Clone)]
 #[repr(C)]
@@ -677,200 +693,6 @@ extern "C" {
 pub type DiagnosticHandlerTy = unsafe extern "C" fn(&DiagnosticInfo, *mut c_void);
 pub type InlineAsmDiagHandlerTy = unsafe extern "C" fn(&SMDiagnostic, *const c_void, c_uint);
 
-pub mod coverageinfo {
-    use super::coverage_map;
-
-    /// Aligns with [llvm::coverage::CounterMappingRegion::RegionKind](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L209-L230)
-    #[derive(Copy, Clone, Debug)]
-    #[repr(C)]
-    pub enum RegionKind {
-        /// A CodeRegion associates some code with a counter
-        CodeRegion = 0,
-
-        /// An ExpansionRegion represents a file expansion region that associates
-        /// a source range with the expansion of a virtual source file, such as
-        /// for a macro instantiation or #include file.
-        ExpansionRegion = 1,
-
-        /// A SkippedRegion represents a source range with code that was skipped
-        /// by a preprocessor or similar means.
-        SkippedRegion = 2,
-
-        /// A GapRegion is like a CodeRegion, but its count is only set as the
-        /// line execution count when its the only region in the line.
-        GapRegion = 3,
-
-        /// A BranchRegion represents leaf-level boolean expressions and is
-        /// associated with two counters, each representing the number of times the
-        /// expression evaluates to true or false.
-        BranchRegion = 4,
-    }
-
-    /// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the
-    /// coverage map, in accordance with the
-    /// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
-    /// The struct composes fields representing the `Counter` type and value(s) (injected counter
-    /// ID, or expression type and operands), the source file (an indirect index into a "filenames
-    /// array", encoded separately), and source location (start and end positions of the represented
-    /// code region).
-    ///
-    /// Matches LLVMRustCounterMappingRegion.
-    #[derive(Copy, Clone, Debug)]
-    #[repr(C)]
-    pub struct CounterMappingRegion {
-        /// The counter type and type-dependent counter data, if any.
-        counter: coverage_map::Counter,
-
-        /// If the `RegionKind` is a `BranchRegion`, this represents the counter
-        /// for the false branch of the region.
-        false_counter: coverage_map::Counter,
-
-        /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the
-        /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes
-        /// that, in turn, are used to look up the filename for this region.
-        file_id: u32,
-
-        /// If the `RegionKind` is an `ExpansionRegion`, the `expanded_file_id` can be used to find
-        /// the mapping regions created as a result of macro expansion, by checking if their file id
-        /// matches the expanded file id.
-        expanded_file_id: u32,
-
-        /// 1-based starting line of the mapping region.
-        start_line: u32,
-
-        /// 1-based starting column of the mapping region.
-        start_col: u32,
-
-        /// 1-based ending line of the mapping region.
-        end_line: u32,
-
-        /// 1-based ending column of the mapping region. If the high bit is set, the current
-        /// mapping region is a gap area.
-        end_col: u32,
-
-        kind: RegionKind,
-    }
-
-    impl CounterMappingRegion {
-        pub(crate) fn code_region(
-            counter: coverage_map::Counter,
-            file_id: u32,
-            start_line: u32,
-            start_col: u32,
-            end_line: u32,
-            end_col: u32,
-        ) -> Self {
-            Self {
-                counter,
-                false_counter: coverage_map::Counter::zero(),
-                file_id,
-                expanded_file_id: 0,
-                start_line,
-                start_col,
-                end_line,
-                end_col,
-                kind: RegionKind::CodeRegion,
-            }
-        }
-
-        // This function might be used in the future; the LLVM API is still evolving, as is coverage
-        // support.
-        #[allow(dead_code)]
-        pub(crate) fn branch_region(
-            counter: coverage_map::Counter,
-            false_counter: coverage_map::Counter,
-            file_id: u32,
-            start_line: u32,
-            start_col: u32,
-            end_line: u32,
-            end_col: u32,
-        ) -> Self {
-            Self {
-                counter,
-                false_counter,
-                file_id,
-                expanded_file_id: 0,
-                start_line,
-                start_col,
-                end_line,
-                end_col,
-                kind: RegionKind::BranchRegion,
-            }
-        }
-
-        // This function might be used in the future; the LLVM API is still evolving, as is coverage
-        // support.
-        #[allow(dead_code)]
-        pub(crate) fn expansion_region(
-            file_id: u32,
-            expanded_file_id: u32,
-            start_line: u32,
-            start_col: u32,
-            end_line: u32,
-            end_col: u32,
-        ) -> Self {
-            Self {
-                counter: coverage_map::Counter::zero(),
-                false_counter: coverage_map::Counter::zero(),
-                file_id,
-                expanded_file_id,
-                start_line,
-                start_col,
-                end_line,
-                end_col,
-                kind: RegionKind::ExpansionRegion,
-            }
-        }
-
-        // This function might be used in the future; the LLVM API is still evolving, as is coverage
-        // support.
-        #[allow(dead_code)]
-        pub(crate) fn skipped_region(
-            file_id: u32,
-            start_line: u32,
-            start_col: u32,
-            end_line: u32,
-            end_col: u32,
-        ) -> Self {
-            Self {
-                counter: coverage_map::Counter::zero(),
-                false_counter: coverage_map::Counter::zero(),
-                file_id,
-                expanded_file_id: 0,
-                start_line,
-                start_col,
-                end_line,
-                end_col,
-                kind: RegionKind::SkippedRegion,
-            }
-        }
-
-        // This function might be used in the future; the LLVM API is still evolving, as is coverage
-        // support.
-        #[allow(dead_code)]
-        pub(crate) fn gap_region(
-            counter: coverage_map::Counter,
-            file_id: u32,
-            start_line: u32,
-            start_col: u32,
-            end_line: u32,
-            end_col: u32,
-        ) -> Self {
-            Self {
-                counter,
-                false_counter: coverage_map::Counter::zero(),
-                file_id,
-                expanded_file_id: 0,
-                start_line,
-                start_col,
-                end_line,
-                end_col: (1_u32 << 31) | end_col,
-                kind: RegionKind::GapRegion,
-            }
-        }
-    }
-}
-
 pub mod debuginfo {
     use super::{InvariantOpaque, Metadata};
     use bitflags::bitflags;
@@ -1058,7 +880,7 @@ extern "C" {
 
     // Operations on array, pointer, and vector types (sequence types)
     pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type;
-    pub fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type;
+    pub fn LLVMPointerTypeInContext(C: &Context, AddressSpace: c_uint) -> &Type;
     pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type;
 
     pub fn LLVMGetElementType(Ty: &Type) -> &Type;
@@ -1066,6 +888,7 @@ extern "C" {
 
     // Operations on other types
     pub fn LLVMVoidTypeInContext(C: &Context) -> &Type;
+    pub fn LLVMTokenTypeInContext(C: &Context) -> &Type;
     pub fn LLVMMetadataTypeInContext(C: &Context) -> &Type;
 
     // Operations on all values
@@ -1078,6 +901,7 @@ extern "C" {
     pub fn LLVMRustGlobalAddMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
     pub fn LLVMValueAsMetadata(Node: &Value) -> &Metadata;
     pub fn LLVMIsAFunction(Val: &Value) -> Option<&Value>;
+    pub fn LLVMRustIsNonGVFunctionPointerTy(Val: &Value) -> bool;
 
     // Operations on constants of any type
     pub fn LLVMConstNull(Ty: &Type) -> &Value;
@@ -1139,7 +963,7 @@ extern "C" {
     pub fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value;
 
     // Constant expressions
-    pub fn LLVMRustConstInBoundsGEP2<'a>(
+    pub fn LLVMConstInBoundsGEP2<'a>(
         ty: &'a Type,
         ConstantVal: &'a Value,
         ConstantIndices: *const &'a Value,
@@ -1190,6 +1014,7 @@ extern "C" {
         NameLen: size_t,
     ) -> Option<&Value>;
     pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool);
+    pub fn LLVMRustSetTailCallKind(CallInst: &Value, TKC: TailCallKind);
 
     // Operations on attributes
     pub fn LLVMRustCreateAttrNoValue(C: &Context, attr: AttributeKind) -> &Attribute;
@@ -1296,7 +1121,7 @@ extern "C" {
         NumArgs: c_uint,
         Then: &'a BasicBlock,
         Catch: &'a BasicBlock,
-        OpBundles: *const Option<&OperandBundleDef<'a>>,
+        OpBundles: *const &OperandBundleDef<'a>,
         NumOpBundles: c_uint,
         Name: *const c_char,
     ) -> &'a Value;
@@ -1668,7 +1493,7 @@ extern "C" {
         Fn: &'a Value,
         Args: *const &'a Value,
         NumArgs: c_uint,
-        OpBundles: *const Option<&OperandBundleDef<'a>>,
+        OpBundles: *const &OperandBundleDef<'a>,
         NumOpBundles: c_uint,
     ) -> &'a Value;
     pub fn LLVMRustBuildMemCpy<'a>(
@@ -1851,7 +1676,10 @@ extern "C" {
     pub fn LLVMRustGetLastError() -> *const c_char;
 
     /// Print the pass timings since static dtors aren't picking them up.
-    pub fn LLVMRustPrintPassTimings();
+    pub fn LLVMRustPrintPassTimings(size: *const size_t) -> *const c_char;
+
+    /// Print the statistics since static dtors aren't picking them up.
+    pub fn LLVMRustPrintStatistics(size: *const size_t) -> *const c_char;
 
     pub fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> &Type;
 
@@ -1884,6 +1712,8 @@ extern "C" {
     pub fn LLVMRustCoverageWriteFilenamesSectionToBuffer(
         Filenames: *const *const c_char,
         FilenamesLen: size_t,
+        Lengths: *const size_t,
+        LengthsLen: size_t,
         BufferOut: &RustString,
     );
 
@@ -1891,15 +1721,18 @@ extern "C" {
     pub fn LLVMRustCoverageWriteMappingToBuffer(
         VirtualFileMappingIDs: *const c_uint,
         NumVirtualFileMappingIDs: c_uint,
-        Expressions: *const coverage_map::CounterExpression,
+        Expressions: *const crate::coverageinfo::ffi::CounterExpression,
         NumExpressions: c_uint,
-        MappingRegions: *const coverageinfo::CounterMappingRegion,
+        MappingRegions: *const crate::coverageinfo::ffi::CounterMappingRegion,
         NumMappingRegions: c_uint,
         BufferOut: &RustString,
     );
 
-    pub fn LLVMRustCoverageCreatePGOFuncNameVar(F: &Value, FuncName: *const c_char) -> &Value;
-    pub fn LLVMRustCoverageHashCString(StrVal: *const c_char) -> u64;
+    pub fn LLVMRustCoverageCreatePGOFuncNameVar(
+        F: &Value,
+        FuncName: *const c_char,
+        FuncNameLen: size_t,
+    ) -> &Value;
     pub fn LLVMRustCoverageHashByteArray(Bytes: *const c_char, NumBytes: size_t) -> u64;
 
     #[allow(improper_ctypes)]
@@ -1987,6 +1820,21 @@ extern "C" {
         Decl: Option<&'a DIDescriptor>,
     ) -> &'a DISubprogram;
 
+    pub fn LLVMRustDIBuilderCreateMethod<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: &'a DIDescriptor,
+        Name: *const c_char,
+        NameLen: size_t,
+        LinkageName: *const c_char,
+        LinkageNameLen: size_t,
+        File: &'a DIFile,
+        LineNo: c_uint,
+        Ty: &'a DIType,
+        Flags: DIFlags,
+        SPFlags: DISPFlags,
+        TParam: &'a DIArray,
+    ) -> &'a DISubprogram;
+
     pub fn LLVMRustDIBuilderCreateBasicType<'a>(
         Builder: &DIBuilder<'a>,
         Name: *const c_char,
@@ -2249,7 +2097,12 @@ extern "C" {
 
     pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool;
 
-    pub fn LLVMRustPrintTargetCPUs(T: &TargetMachine);
+    pub fn LLVMRustPrintTargetCPUs(
+        T: &TargetMachine,
+        cpu: *const c_char,
+        print: unsafe extern "C" fn(out: *mut c_void, string: *const c_char, len: usize),
+        out: *mut c_void,
+    );
     pub fn LLVMRustGetTargetFeaturesCount(T: &TargetMachine) -> size_t;
     pub fn LLVMRustGetTargetFeature(
         T: &TargetMachine,
@@ -2259,6 +2112,8 @@ extern "C" {
     );
 
     pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char;
+
+    // This function makes copies of pointed to data, so the data's lifetime may end after this function returns
     pub fn LLVMRustCreateTargetMachine(
         Triple: *const c_char,
         CPU: *const c_char,
@@ -2278,9 +2133,14 @@ extern "C" {
         RelaxELFRelocations: bool,
         UseInitArray: bool,
         SplitDwarfFile: *const c_char,
+        OutputObjFile: *const c_char,
+        DebugInfoCompression: *const c_char,
         ForceEmulatedTls: bool,
-    ) -> Option<&'static mut TargetMachine>;
-    pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine);
+        ArgsCstrBuff: *const c_char,
+        ArgsCstrBuffLen: usize,
+    ) -> *mut TargetMachine;
+
+    pub fn LLVMRustDisposeTargetMachine(T: *mut TargetMachine);
     pub fn LLVMRustAddLibraryInfo<'a>(
         PM: &PassManager<'a>,
         M: &'a Module,
@@ -2299,6 +2159,7 @@ extern "C" {
         TM: &'a TargetMachine,
         OptLevel: PassBuilderOptLevel,
         OptStage: OptStage,
+        IsLinkerPluginLTO: bool,
         NoPrepopulatePasses: bool,
         VerifyIR: bool,
         UseThinLTOBuffers: bool,
@@ -2465,12 +2326,12 @@ extern "C" {
         len: usize,
         out_len: &mut usize,
     ) -> *const u8;
-    pub fn LLVMRustThinLTOGetDICompileUnit(
-        M: &Module,
-        CU1: &mut *mut c_void,
-        CU2: &mut *mut c_void,
-    );
-    pub fn LLVMRustThinLTOPatchDICompileUnit(M: &Module, CU: *mut c_void);
+    pub fn LLVMRustGetSliceFromObjectDataByName(
+        data: *const u8,
+        len: usize,
+        name: *const u8,
+        out_len: &mut usize,
+    ) -> *const u8;
 
     pub fn LLVMRustLinkerNew(M: &Module) -> &mut Linker<'_>;
     pub fn LLVMRustLinkerAdd(
@@ -2498,6 +2359,8 @@ extern "C" {
         remark_all_passes: bool,
         remark_passes: *const *const c_char,
         remark_passes_len: usize,
+        remark_file: *const c_char,
+        pgo_available: bool,
     );
 
     #[allow(improper_ctypes)]
@@ -2507,6 +2370,10 @@ extern "C" {
 
     pub fn LLVMRustIsBitcode(ptr: *const u8, len: usize) -> bool;
 
+    pub fn LLVMRustLLVMHasZlibCompressionForDebugSymbols() -> bool;
+
+    pub fn LLVMRustLLVMHasZstdCompressionForDebugSymbols() -> bool;
+
     pub fn LLVMRustGetSymbols(
         buf_ptr: *const u8,
         buf_len: usize,
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 46692fd5e8b..7c8ef67ffd1 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -8,17 +8,17 @@ use libc::c_int;
 use rustc_codegen_ssa::target_features::{
     supported_target_features, tied_target_features, RUSTC_SPECIFIC_FEATURES,
 };
+use rustc_codegen_ssa::traits::PrintBackendInfo;
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 use rustc_data_structures::small_c_str::SmallCStr;
 use rustc_fs_util::path_to_c_string;
 use rustc_middle::bug;
-use rustc_session::config::PrintRequest;
+use rustc_session::config::{PrintKind, PrintRequest};
 use rustc_session::Session;
 use rustc_span::symbol::Symbol;
 use rustc_target::spec::{MergeFunctions, PanicStrategy};
-use smallvec::{smallvec, SmallVec};
-use std::ffi::{CStr, CString};
 
+use std::ffi::{c_char, c_void, CStr, CString};
 use std::path::Path;
 use std::ptr;
 use std::slice;
@@ -111,6 +111,10 @@ unsafe fn configure_llvm(sess: &Session) {
         // Use non-zero `import-instr-limit` multiplier for cold callsites.
         add("-import-cold-multiplier=0.1", false);
 
+        if sess.print_llvm_stats() {
+            add("-stats", false);
+        }
+
         for arg in sess_args {
             add(&(*arg), true);
         }
@@ -132,6 +136,60 @@ pub fn time_trace_profiler_finish(file_name: &Path) {
     }
 }
 
+pub enum TargetFeatureFoldStrength<'a> {
+    // The feature is only tied when enabling the feature, disabling
+    // this feature shouldn't disable the tied feature.
+    EnableOnly(&'a str),
+    // The feature is tied for both enabling and disabling this feature.
+    Both(&'a str),
+}
+
+impl<'a> TargetFeatureFoldStrength<'a> {
+    fn as_str(&self) -> &'a str {
+        match self {
+            TargetFeatureFoldStrength::EnableOnly(feat) => feat,
+            TargetFeatureFoldStrength::Both(feat) => feat,
+        }
+    }
+}
+
+pub struct LLVMFeature<'a> {
+    pub llvm_feature_name: &'a str,
+    pub dependency: Option<TargetFeatureFoldStrength<'a>>,
+}
+
+impl<'a> LLVMFeature<'a> {
+    pub fn new(llvm_feature_name: &'a str) -> Self {
+        Self { llvm_feature_name, dependency: None }
+    }
+
+    pub fn with_dependency(
+        llvm_feature_name: &'a str,
+        dependency: TargetFeatureFoldStrength<'a>,
+    ) -> Self {
+        Self { llvm_feature_name, dependency: Some(dependency) }
+    }
+
+    pub fn contains(&self, feat: &str) -> bool {
+        self.iter().any(|dep| dep == feat)
+    }
+
+    pub fn iter(&'a self) -> impl Iterator<Item = &'a str> {
+        let dependencies = self.dependency.iter().map(|feat| feat.as_str());
+        std::iter::once(self.llvm_feature_name).chain(dependencies)
+    }
+}
+
+impl<'a> IntoIterator for LLVMFeature<'a> {
+    type Item = &'a str;
+    type IntoIter = impl Iterator<Item = &'a str>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        let dependencies = self.dependency.into_iter().map(|feat| feat.as_str());
+        std::iter::once(self.llvm_feature_name).chain(dependencies)
+    }
+}
+
 // WARNING: the features after applying `to_llvm_features` must be known
 // to LLVM or the feature detection code will walk past the end of the feature
 // array, leading to crashes.
@@ -147,42 +205,65 @@ pub fn time_trace_profiler_finish(file_name: &Path) {
 // Though note that Rust can also be build with an external precompiled version of LLVM
 // which might lead to failures if the oldest tested / supported LLVM version
 // doesn't yet support the relevant intrinsics
-pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]> {
+pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> LLVMFeature<'a> {
     let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch };
     match (arch, s) {
-        ("x86", "sse4.2") => smallvec!["sse4.2", "crc32"],
-        ("x86", "pclmulqdq") => smallvec!["pclmul"],
-        ("x86", "rdrand") => smallvec!["rdrnd"],
-        ("x86", "bmi1") => smallvec!["bmi"],
-        ("x86", "cmpxchg16b") => smallvec!["cx16"],
-        // FIXME: These aliases are misleading, and should be removed before avx512_target_feature is
-        // stabilized. They must remain until std::arch switches off them.
-        // rust#100752
-        ("x86", "avx512vaes") => smallvec!["vaes"],
-        ("x86", "avx512gfni") => smallvec!["gfni"],
-        ("x86", "avx512vpclmulqdq") => smallvec!["vpclmulqdq"],
-        ("aarch64", "rcpc2") => smallvec!["rcpc-immo"],
-        ("aarch64", "dpb") => smallvec!["ccpp"],
-        ("aarch64", "dpb2") => smallvec!["ccdp"],
-        ("aarch64", "frintts") => smallvec!["fptoint"],
-        ("aarch64", "fcma") => smallvec!["complxnum"],
-        ("aarch64", "pmuv3") => smallvec!["perfmon"],
-        ("aarch64", "paca") => smallvec!["pauth"],
-        ("aarch64", "pacg") => smallvec!["pauth"],
-        // Rust ties fp and neon together. In LLVM neon implicitly enables fp,
-        // but we manually enable neon when a feature only implicitly enables fp
-        ("aarch64", "f32mm") => smallvec!["f32mm", "neon"],
-        ("aarch64", "f64mm") => smallvec!["f64mm", "neon"],
-        ("aarch64", "fhm") => smallvec!["fp16fml", "neon"],
-        ("aarch64", "fp16") => smallvec!["fullfp16", "neon"],
-        ("aarch64", "jsconv") => smallvec!["jsconv", "neon"],
-        ("aarch64", "sve") => smallvec!["sve", "neon"],
-        ("aarch64", "sve2") => smallvec!["sve2", "neon"],
-        ("aarch64", "sve2-aes") => smallvec!["sve2-aes", "neon"],
-        ("aarch64", "sve2-sm4") => smallvec!["sve2-sm4", "neon"],
-        ("aarch64", "sve2-sha3") => smallvec!["sve2-sha3", "neon"],
-        ("aarch64", "sve2-bitperm") => smallvec!["sve2-bitperm", "neon"],
-        (_, s) => smallvec![s],
+        ("x86", "sse4.2") => {
+            LLVMFeature::with_dependency("sse4.2", TargetFeatureFoldStrength::EnableOnly("crc32"))
+        }
+        ("x86", "pclmulqdq") => LLVMFeature::new("pclmul"),
+        ("x86", "rdrand") => LLVMFeature::new("rdrnd"),
+        ("x86", "bmi1") => LLVMFeature::new("bmi"),
+        ("x86", "cmpxchg16b") => LLVMFeature::new("cx16"),
+        ("aarch64", "rcpc2") => LLVMFeature::new("rcpc-immo"),
+        ("aarch64", "dpb") => LLVMFeature::new("ccpp"),
+        ("aarch64", "dpb2") => LLVMFeature::new("ccdp"),
+        ("aarch64", "frintts") => LLVMFeature::new("fptoint"),
+        ("aarch64", "fcma") => LLVMFeature::new("complxnum"),
+        ("aarch64", "pmuv3") => LLVMFeature::new("perfmon"),
+        ("aarch64", "paca") => LLVMFeature::new("pauth"),
+        ("aarch64", "pacg") => LLVMFeature::new("pauth"),
+        // Rust ties fp and neon together.
+        ("aarch64", "neon") => {
+            LLVMFeature::with_dependency("neon", TargetFeatureFoldStrength::Both("fp-armv8"))
+        }
+        // In LLVM neon implicitly enables fp, but we manually enable
+        // neon when a feature only implicitly enables fp
+        ("aarch64", "f32mm") => {
+            LLVMFeature::with_dependency("f32mm", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "f64mm") => {
+            LLVMFeature::with_dependency("f64mm", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "fhm") => {
+            LLVMFeature::with_dependency("fp16fml", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "fp16") => {
+            LLVMFeature::with_dependency("fullfp16", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "jsconv") => {
+            LLVMFeature::with_dependency("jsconv", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "sve") => {
+            LLVMFeature::with_dependency("sve", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "sve2") => {
+            LLVMFeature::with_dependency("sve2", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "sve2-aes") => {
+            LLVMFeature::with_dependency("sve2-aes", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "sve2-sm4") => {
+            LLVMFeature::with_dependency("sve2-sm4", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "sve2-sha3") => {
+            LLVMFeature::with_dependency("sve2-sha3", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "sve2-bitperm") => LLVMFeature::with_dependency(
+            "sve2-bitperm",
+            TargetFeatureFoldStrength::EnableOnly("neon"),
+        ),
+        (_, s) => LLVMFeature::new(s),
     }
 }
 
@@ -222,7 +303,7 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
             // check that all features in a given smallvec are enabled
             for llvm_feature in to_llvm_features(sess, feature) {
                 let cstr = SmallCStr::new(llvm_feature);
-                if !unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) } {
+                if !unsafe { llvm::LLVMRustHasFeature(&target_machine, cstr.as_ptr()) } {
                     return false;
                 }
             }
@@ -234,7 +315,7 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
 
 pub fn print_version() {
     let (major, minor, patch) = get_version();
-    println!("LLVM version: {}.{}.{}", major, minor, patch);
+    println!("LLVM version: {major}.{minor}.{patch}");
 }
 
 pub fn get_version() -> (u32, u32, u32) {
@@ -274,24 +355,23 @@ fn llvm_target_features(tm: &llvm::TargetMachine) -> Vec<(&str, &str)> {
     ret
 }
 
-fn print_target_features(sess: &Session, tm: &llvm::TargetMachine) {
+fn print_target_features(out: &mut dyn PrintBackendInfo, sess: &Session, tm: &llvm::TargetMachine) {
     let mut llvm_target_features = llvm_target_features(tm);
     let mut known_llvm_target_features = FxHashSet::<&'static str>::default();
     let mut rustc_target_features = supported_target_features(sess)
         .iter()
         .map(|(feature, _gate)| {
-            let desc = if let Some(llvm_feature) = to_llvm_features(sess, *feature).first() {
-                // LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these strings.
+            // LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these strings.
+            let llvm_feature = to_llvm_features(sess, *feature).llvm_feature_name;
+            let desc =
                 match llvm_target_features.binary_search_by_key(&llvm_feature, |(f, _d)| f).ok() {
                     Some(index) => {
                         known_llvm_target_features.insert(llvm_feature);
                         llvm_target_features[index].1
                     }
                     None => "",
-                }
-            } else {
-                ""
-            };
+                };
+
             (*feature, desc)
         })
         .collect::<Vec<_>>();
@@ -308,29 +388,48 @@ fn print_target_features(sess: &Session, tm: &llvm::TargetMachine) {
         .max()
         .unwrap_or(0);
 
-    println!("Features supported by rustc for this target:");
+    writeln!(out, "Features supported by rustc for this target:");
     for (feature, desc) in &rustc_target_features {
-        println!("    {1:0$} - {2}.", max_feature_len, feature, desc);
+        writeln!(out, "    {feature:max_feature_len$} - {desc}.");
     }
-    println!("\nCode-generation features supported by LLVM for this target:");
+    writeln!(out, "\nCode-generation features supported by LLVM for this target:");
     for (feature, desc) in &llvm_target_features {
-        println!("    {1:0$} - {2}.", max_feature_len, feature, desc);
+        writeln!(out, "    {feature:max_feature_len$} - {desc}.");
     }
     if llvm_target_features.is_empty() {
-        println!("    Target features listing is not supported by this LLVM version.");
+        writeln!(out, "    Target features listing is not supported by this LLVM version.");
     }
-    println!("\nUse +feature to enable a feature, or -feature to disable it.");
-    println!("For example, rustc -C target-cpu=mycpu -C target-feature=+feature1,-feature2\n");
-    println!("Code-generation features cannot be used in cfg or #[target_feature],");
-    println!("and may be renamed or removed in a future version of LLVM or rustc.\n");
+    writeln!(out, "\nUse +feature to enable a feature, or -feature to disable it.");
+    writeln!(out, "For example, rustc -C target-cpu=mycpu -C target-feature=+feature1,-feature2\n");
+    writeln!(out, "Code-generation features cannot be used in cfg or #[target_feature],");
+    writeln!(out, "and may be renamed or removed in a future version of LLVM or rustc.\n");
 }
 
-pub(crate) fn print(req: PrintRequest, sess: &Session) {
+pub(crate) fn print(req: &PrintRequest, mut out: &mut dyn PrintBackendInfo, sess: &Session) {
     require_inited();
     let tm = create_informational_target_machine(sess);
-    match req {
-        PrintRequest::TargetCPUs => unsafe { llvm::LLVMRustPrintTargetCPUs(tm) },
-        PrintRequest::TargetFeatures => print_target_features(sess, tm),
+    match req.kind {
+        PrintKind::TargetCPUs => {
+            // SAFETY generate a C compatible string from a byte slice to pass
+            // the target CPU name into LLVM, the lifetime of the reference is
+            // at least as long as the C function
+            let cpu_cstring = CString::new(handle_native(sess.target.cpu.as_ref()))
+                .unwrap_or_else(|e| bug!("failed to convert to cstring: {}", e));
+            unsafe extern "C" fn callback(out: *mut c_void, string: *const c_char, len: usize) {
+                let out = &mut *(out as *mut &mut dyn PrintBackendInfo);
+                let bytes = slice::from_raw_parts(string as *const u8, len);
+                write!(out, "{}", String::from_utf8_lossy(bytes));
+            }
+            unsafe {
+                llvm::LLVMRustPrintTargetCPUs(
+                    &tm,
+                    cpu_cstring.as_ptr(),
+                    callback,
+                    &mut out as *mut &mut dyn PrintBackendInfo as *mut c_void,
+                );
+            }
+        }
+        PrintKind::TargetFeatures => print_target_features(out, sess, &tm),
         _ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
     }
 }
@@ -408,8 +507,6 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str
             .features
             .split(',')
             .filter(|v| !v.is_empty() && backend_feature_name(v).is_some())
-            // Drop +atomics-32 feature introduced in LLVM 15.
-            .filter(|v| *v != "+atomics-32" || get_version() >= (15, 0, 0))
             .map(String::from),
     );
 
@@ -468,10 +565,19 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str
             // passing requests down to LLVM. This means that all in-language
             // features also work on the command line instead of having two
             // different names when the LLVM name and the Rust name differ.
+            let llvm_feature = to_llvm_features(sess, feature);
+
             Some(
-                to_llvm_features(sess, feature)
-                    .into_iter()
-                    .map(move |f| format!("{}{}", enable_disable, f)),
+                std::iter::once(format!("{}{}", enable_disable, llvm_feature.llvm_feature_name))
+                    .chain(llvm_feature.dependency.into_iter().filter_map(move |feat| {
+                        match (enable_disable, feat) {
+                            ('-' | '+', TargetFeatureFoldStrength::Both(f))
+                            | ('+', TargetFeatureFoldStrength::EnableOnly(f)) => {
+                                Some(format!("{enable_disable}{f}"))
+                            }
+                            _ => None,
+                        }
+                    })),
             )
         })
         .flatten();
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
index d0ae36349df..38e8220569a 100644
--- a/compiler/rustc_codegen_llvm/src/mono_item.rs
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -48,10 +48,10 @@ impl<'tcx> PreDefineMethods<'tcx> for CodegenCx<'_, 'tcx> {
         visibility: Visibility,
         symbol_name: &str,
     ) {
-        assert!(!instance.substs.needs_infer());
+        assert!(!instance.args.has_infer());
 
         let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
-        let lldecl = self.declare_fn(symbol_name, fn_abi);
+        let lldecl = self.declare_fn(symbol_name, fn_abi, Some(instance));
         unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) };
         let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
         base::set_link_section(lldecl, attrs);
@@ -111,7 +111,7 @@ impl CodegenCx<'_, '_> {
         }
 
         // Symbols from executables can't really be imported any further.
-        let all_exe = self.tcx.sess.crate_types().iter().all(|ty| *ty == CrateType::Executable);
+        let all_exe = self.tcx.crate_types().iter().all(|ty| *ty == CrateType::Executable);
         let is_declaration_for_linker =
             is_declaration || linkage == llvm::Linkage::AvailableExternallyLinkage;
         if all_exe && !is_declaration_for_linker {
@@ -125,8 +125,7 @@ impl CodegenCx<'_, '_> {
 
         // Thread-local variables generally don't support copy relocations.
         let is_thread_local_var = llvm::LLVMIsAGlobalVariable(llval)
-            .map(|v| llvm::LLVMIsThreadLocal(v) == llvm::True)
-            .unwrap_or(false);
+            .is_some_and(|v| llvm::LLVMIsThreadLocal(v) == llvm::True);
         if is_thread_local_var {
             return false;
         }
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
index bef4647f207..06b7703672f 100644
--- a/compiler/rustc_codegen_llvm/src/type_.rs
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -52,6 +52,10 @@ impl<'ll> CodegenCx<'ll, '_> {
         unsafe { llvm::LLVMVoidTypeInContext(self.llcx) }
     }
 
+    pub(crate) fn type_token(&self) -> &'ll Type {
+        unsafe { llvm::LLVMTokenTypeInContext(self.llcx) }
+    }
+
     pub(crate) fn type_metadata(&self) -> &'ll Type {
         unsafe { llvm::LLVMMetadataTypeInContext(self.llcx) }
     }
@@ -108,13 +112,7 @@ impl<'ll> CodegenCx<'ll, '_> {
         }
     }
 
-    pub(crate) fn type_pointee_for_align(&self, align: Align) -> &'ll Type {
-        // FIXME(eddyb) We could find a better approximation if ity.align < align.
-        let ity = Integer::approximate_align(self, align);
-        self.type_from_integer(ity)
-    }
-
-    /// Return a LLVM type that has at most the required alignment,
+    /// Return an LLVM type that has at most the required alignment,
     /// and exactly the required size, as a best-effort padding array.
     pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type {
         let unit = Integer::approximate_align(self, align);
@@ -185,17 +183,12 @@ impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() }
     }
 
-    fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
-        assert_ne!(
-            self.type_kind(ty),
-            TypeKind::Function,
-            "don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense"
-        );
-        ty.ptr_to(AddressSpace::DATA)
+    fn type_ptr(&self) -> &'ll Type {
+        self.type_ptr_ext(AddressSpace::DATA)
     }
 
-    fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type {
-        ty.ptr_to(address_space)
+    fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type {
+        unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) }
     }
 
     fn element_type(&self, ty: &'ll Type) -> &'ll Type {
@@ -243,12 +236,8 @@ impl Type {
         unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) }
     }
 
-    pub fn i8p_llcx(llcx: &llvm::Context) -> &Type {
-        Type::i8_llcx(llcx).ptr_to(AddressSpace::DATA)
-    }
-
-    fn ptr_to(&self, address_space: AddressSpace) -> &Type {
-        unsafe { llvm::LLVMPointerType(self, address_space.0) }
+    pub fn ptr_llcx(llcx: &llvm::Context) -> &Type {
+        unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::DATA.0) }
     }
 }
 
@@ -288,11 +277,30 @@ impl<'ll, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     fn reg_backend_type(&self, ty: &Reg) -> &'ll Type {
         ty.llvm_type(self)
     }
+    fn scalar_copy_backend_type(&self, layout: TyAndLayout<'tcx>) -> Option<Self::Type> {
+        layout.scalar_copy_llvm_type(self)
+    }
 }
 
 impl<'ll, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+    fn add_type_metadata(&self, function: &'ll Value, typeid: String) {
+        let typeid_metadata = self.typeid_metadata(typeid).unwrap();
+        let v = [self.const_usize(0), typeid_metadata];
+        unsafe {
+            llvm::LLVMRustGlobalAddMetadata(
+                function,
+                llvm::MD_type as c_uint,
+                llvm::LLVMValueAsMetadata(llvm::LLVMMDNodeInContext(
+                    self.llcx,
+                    v.as_ptr(),
+                    v.len() as c_uint,
+                )),
+            )
+        }
+    }
+
     fn set_type_metadata(&self, function: &'ll Value, typeid: String) {
-        let typeid_metadata = self.typeid_metadata(typeid);
+        let typeid_metadata = self.typeid_metadata(typeid).unwrap();
         let v = [self.const_usize(0), typeid_metadata];
         unsafe {
             llvm::LLVMGlobalSetMetadata(
@@ -307,13 +315,28 @@ impl<'ll, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         }
     }
 
-    fn typeid_metadata(&self, typeid: String) -> &'ll Value {
-        unsafe {
+    fn typeid_metadata(&self, typeid: String) -> Option<&'ll Value> {
+        Some(unsafe {
             llvm::LLVMMDStringInContext(
                 self.llcx,
                 typeid.as_ptr() as *const c_char,
                 typeid.len() as c_uint,
             )
+        })
+    }
+
+    fn add_kcfi_type_metadata(&self, function: &'ll Value, kcfi_typeid: u32) {
+        let kcfi_type_metadata = self.const_u32(kcfi_typeid);
+        unsafe {
+            llvm::LLVMRustGlobalAddMetadata(
+                function,
+                llvm::MD_kcfi_type as c_uint,
+                llvm::LLVMMDNodeInContext2(
+                    self.llcx,
+                    &llvm::LLVMValueAsMetadata(kcfi_type_metadata),
+                    1,
+                ),
+            )
         }
     }
 
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index e264ce78f0d..fd4c9572af2 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -3,9 +3,10 @@ use crate::context::TypeLowering;
 use crate::type_::Type;
 use rustc_codegen_ssa::traits::*;
 use rustc_middle::bug;
-use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
 use rustc_middle::ty::{self, Ty, TypeVisitableExt};
+use rustc_target::abi::HasDataLayout;
 use rustc_target::abi::{Abi, Align, FieldsShape};
 use rustc_target::abi::{Int, Pointer, F32, F64};
 use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants};
@@ -22,7 +23,7 @@ fn uncached_llvm_type<'a, 'tcx>(
     match layout.abi {
         Abi::Scalar(_) => bug!("handled elsewhere"),
         Abi::Vector { element, count } => {
-            let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
+            let element = layout.scalar_llvm_type_at(cx, element);
             return cx.type_vector(element, count);
         }
         Abi::ScalarPair(..) => {
@@ -56,13 +57,10 @@ fn uncached_llvm_type<'a, 'tcx>(
             if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
                 (layout.ty.kind(), &layout.variants)
             {
-                write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
+                write!(&mut name, "::{}", ty::GeneratorArgs::variant_name(index)).unwrap();
             }
             Some(name)
         }
-        // Use identified structure types for ADT. Due to pointee types in LLVM IR their definition
-        // might be recursive. Other cases are non-recursive and we can use literal structure types.
-        ty::Adt(..) => Some(String::new()),
         _ => None,
     };
 
@@ -178,12 +176,7 @@ pub trait LayoutLlvmExt<'tcx> {
     fn is_llvm_scalar_pair(&self) -> bool;
     fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
     fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
-    fn scalar_llvm_type_at<'a>(
-        &self,
-        cx: &CodegenCx<'a, 'tcx>,
-        scalar: Scalar,
-        offset: Size,
-    ) -> &'a Type;
+    fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type;
     fn scalar_pair_element_llvm_type<'a>(
         &self,
         cx: &CodegenCx<'a, 'tcx>,
@@ -192,14 +185,14 @@ pub trait LayoutLlvmExt<'tcx> {
     ) -> &'a Type;
     fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64;
     fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo>;
+    fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type>;
 }
 
 impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     fn is_llvm_immediate(&self) -> bool {
         match self.abi {
             Abi::Scalar(_) | Abi::Vector { .. } => true,
-            Abi::ScalarPair(..) => false,
-            Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(),
+            Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
         }
     }
 
@@ -222,24 +215,16 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     /// of that field's type - this is useful for taking the address of
     /// that field and ensuring the struct has the right alignment.
     fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
+        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+        // In other words, this should generally not look at the type at all, but only at the
+        // layout.
         if let Abi::Scalar(scalar) = self.abi {
             // Use a different cache for scalars because pointers to DSTs
             // can be either fat or thin (data pointers of fat pointers).
             if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
                 return llty;
             }
-            let llty = match *self.ty.kind() {
-                ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
-                    cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
-                }
-                ty::Adt(def, _) if def.is_box() => {
-                    cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
-                }
-                ty::FnPtr(sig) => {
-                    cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
-                }
-                _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO),
-            };
+            let llty = self.scalar_llvm_type_at(cx, scalar);
             cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
             return llty;
         }
@@ -299,25 +284,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         self.llvm_type(cx)
     }
 
-    fn scalar_llvm_type_at<'a>(
-        &self,
-        cx: &CodegenCx<'a, 'tcx>,
-        scalar: Scalar,
-        offset: Size,
-    ) -> &'a Type {
+    fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type {
         match scalar.primitive() {
             Int(i, _) => cx.type_from_integer(i),
             F32 => cx.type_f32(),
             F64 => cx.type_f64(),
-            Pointer(address_space) => {
-                // If we know the alignment, pick something better than i8.
-                let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
-                    cx.type_pointee_for_align(pointee.align)
-                } else {
-                    cx.type_i8()
-                };
-                cx.type_ptr_to_ext(pointee, address_space)
-            }
+            Pointer(address_space) => cx.type_ptr_ext(address_space),
         }
     }
 
@@ -327,26 +299,9 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         index: usize,
         immediate: bool,
     ) -> &'a Type {
-        // HACK(eddyb) special-case fat pointers until LLVM removes
-        // pointee types, to avoid bitcasting every `OperandRef::deref`.
-        match *self.ty.kind() {
-            ty::Ref(..) | ty::RawPtr(_) => {
-                return self.field(cx, index).llvm_type(cx);
-            }
-            // only wide pointer boxes are handled as pointers
-            // thin pointer boxes with scalar allocators are handled by the general logic below
-            ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
-                let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
-                return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
-            }
-            // `dyn* Trait` has the same ABI as `*mut dyn Trait`
-            ty::Dynamic(bounds, region, ty::DynStar) => {
-                let ptr_ty = cx.tcx.mk_mut_ptr(cx.tcx.mk_dynamic(bounds, region, ty::Dyn));
-                return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
-            }
-            _ => {}
-        }
-
+        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+        // In other words, this should generally not look at the type at all, but only at the
+        // layout.
         let Abi::ScalarPair(a, b) = self.abi else {
             bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
         };
@@ -362,8 +317,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
             return cx.type_i1();
         }
 
-        let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
-        self.scalar_llvm_type_at(cx, scalar, offset)
+        self.scalar_llvm_type_at(cx, scalar)
     }
 
     fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 {
@@ -415,4 +369,44 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
         result
     }
+
+    fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type> {
+        debug_assert!(self.is_sized());
+
+        // FIXME: this is a fairly arbitrary choice, but 128 bits on WASM
+        // (matching the 128-bit SIMD types proposal) and 256 bits on x64
+        // (like AVX2 registers) seems at least like a tolerable starting point.
+        let threshold = cx.data_layout().pointer_size * 4;
+        if self.layout.size() > threshold {
+            return None;
+        }
+
+        // Vectors, even for non-power-of-two sizes, have the same layout as
+        // arrays but don't count as aggregate types
+        // While LLVM theoretically supports non-power-of-two sizes, and they
+        // often work fine, sometimes x86-isel deals with them horribly
+        // (see #115212) so for now only use power-of-two ones.
+        if let FieldsShape::Array { count, .. } = self.layout.fields()
+            && count.is_power_of_two()
+            && let element = self.field(cx, 0)
+            && element.ty.is_integral()
+        {
+            // `cx.type_ix(bits)` is tempting here, but while that works great
+            // for things that *stay* as memory-to-memory copies, it also ends
+            // up suppressing vectorization as it introduces shifts when it
+            // extracts all the individual values.
+
+            let ety = element.llvm_type(cx);
+            if *count == 1 {
+                // Emitting `<1 x T>` would be silly; just use the scalar.
+                return Some(ety);
+            } else {
+                return Some(cx.type_vector(ety, *count));
+            }
+        }
+
+        // FIXME: The above only handled integer arrays; surely more things
+        // would also be possible. Be careful about provenance, though!
+        None
+    }
 }
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index b19398e68c2..172c66a7af1 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -5,7 +5,7 @@ use crate::value::Value;
 use rustc_codegen_ssa::mir::operand::OperandRef;
 use rustc_codegen_ssa::{
     common::IntPredicate,
-    traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods},
+    traits::{BaseTypeMethods, BuilderMethods, ConstMethods},
 };
 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
 use rustc_middle::ty::Ty;
@@ -26,24 +26,18 @@ fn round_pointer_up_to_alignment<'ll>(
 fn emit_direct_ptr_va_arg<'ll, 'tcx>(
     bx: &mut Builder<'_, 'll, 'tcx>,
     list: OperandRef<'tcx, &'ll Value>,
-    llty: &'ll Type,
     size: Size,
     align: Align,
     slot_size: Align,
     allow_higher_align: bool,
 ) -> (&'ll Value, Align) {
-    let va_list_ty = bx.type_i8p();
-    let va_list_ptr_ty = bx.type_ptr_to(va_list_ty);
-    let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
-        bx.bitcast(list.immediate(), va_list_ptr_ty)
-    } else {
-        list.immediate()
-    };
+    let va_list_ty = bx.type_ptr();
+    let va_list_addr = list.immediate();
 
     let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
 
     let (addr, addr_align) = if allow_higher_align && align > slot_size {
-        (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
+        (round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align)
     } else {
         (ptr, slot_size)
     };
@@ -56,9 +50,9 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
     if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
         let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
         let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]);
-        (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
+        (adjusted, addr_align)
     } else {
-        (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
+        (addr, addr_align)
     }
 }
 
@@ -73,7 +67,7 @@ fn emit_ptr_va_arg<'ll, 'tcx>(
     let layout = bx.cx.layout_of(target_ty);
     let (llty, size, align) = if indirect {
         (
-            bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).llvm_type(bx.cx),
+            bx.cx.layout_of(Ty::new_imm_ptr(bx.cx.tcx, target_ty)).llvm_type(bx.cx),
             bx.cx.data_layout().pointer_size,
             bx.cx.data_layout().pointer_align,
         )
@@ -81,7 +75,7 @@ fn emit_ptr_va_arg<'ll, 'tcx>(
         (layout.llvm_type(bx.cx), layout.size, layout.align)
     };
     let (addr, addr_align) =
-        emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
+        emit_direct_ptr_va_arg(bx, list, size, align.abi, slot_size, allow_higher_align);
     if indirect {
         let tmp_ret = bx.load(llty, addr, addr_align);
         bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
@@ -146,7 +140,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
     bx.cond_br(use_stack, on_stack, in_reg);
 
     bx.switch_to_block(in_reg);
-    let top_type = bx.type_i8p();
+    let top_type = bx.type_ptr();
     let top = bx.struct_gep(va_list_ty, va_list_addr, reg_top_index);
     let top = bx.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
 
@@ -158,7 +152,6 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
         reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]);
     }
     let reg_type = layout.llvm_type(bx);
-    let reg_addr = bx.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
     let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
     bx.br(end);
 
@@ -218,7 +211,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
     // Work out the address of the value in the register save area.
     let reg_ptr =
         bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3));
-    let reg_ptr_v = bx.load(bx.type_i8p(), reg_ptr, bx.tcx().data_layout.pointer_align.abi);
+    let reg_ptr_v = bx.load(bx.type_ptr(), reg_ptr, bx.tcx().data_layout.pointer_align.abi);
     let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
     let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
     let reg_addr = bx.gep(bx.type_i8(), reg_ptr_v, &[reg_off]);
@@ -234,7 +227,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
     // Work out the address of the value in the argument overflow area.
     let arg_ptr =
         bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 2));
-    let arg_ptr_v = bx.load(bx.type_i8p(), arg_ptr, bx.tcx().data_layout.pointer_align.abi);
+    let arg_ptr_v = bx.load(bx.type_ptr(), arg_ptr, bx.tcx().data_layout.pointer_align.abi);
     let arg_off = bx.const_u64(padding);
     let mem_addr = bx.gep(bx.type_i8(), arg_ptr_v, &[arg_off]);
 
@@ -246,14 +239,12 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
 
     // Return the appropriate result.
     bx.switch_to_block(end);
-    let val_addr = bx.phi(bx.type_i8p(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
+    let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
     let val_type = layout.llvm_type(bx);
     let val_addr = if indirect {
-        let ptr_type = bx.cx.type_ptr_to(val_type);
-        let ptr_addr = bx.bitcast(val_addr, bx.cx.type_ptr_to(ptr_type));
-        bx.load(ptr_type, ptr_addr, bx.tcx().data_layout.pointer_align.abi)
+        bx.load(bx.cx.type_ptr(), val_addr, bx.tcx().data_layout.pointer_align.abi)
     } else {
-        bx.bitcast(val_addr, bx.cx.type_ptr_to(val_type))
+        val_addr
     };
     bx.load(val_type, val_addr, layout.align.abi)
 }