about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs123
-rw-r--r--compiler/rustc_codegen_llvm/src/allocator.rs188
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs13
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs69
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs12
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs65
-rw-r--r--compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs103
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs203
-rw-r--r--compiler/rustc_codegen_llvm/src/base.rs3
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs107
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs39
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs22
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs23
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs79
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs236
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs392
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs221
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs240
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs38
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs34
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs15
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/utils.rs3
-rw-r--r--compiler/rustc_codegen_llvm/src/errors.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs392
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs34
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs245
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs16
-rw-r--r--compiler/rustc_codegen_llvm/src/mono_item.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs29
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs78
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs35
34 files changed, 1553 insertions, 1526 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index d221bad28ef..9e834b83df4 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -211,14 +211,12 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
             OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
         } else if self.is_unsized_indirect() {
             bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
-        } else if let PassMode::Cast(cast, _) = &self.mode {
+        } else if let PassMode::Cast { cast, pad_i32: _ } = &self.mode {
             // FIXME(eddyb): Figure out when the simpler Store is safe, clang
             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
             let can_store_through_cast_ptr = false;
             if can_store_through_cast_ptr {
-                let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
-                let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
-                bx.store(val, cast_dst, self.layout.align.abi);
+                bx.store(val, dst.llval, self.layout.align.abi);
             } else {
                 // The actual return type is a struct, but the ABI
                 // adaptation code has cast it into some scalar type. The
@@ -276,12 +274,12 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
             PassMode::Pair(..) => {
                 OperandValue::Pair(next(), next()).store(bx, dst);
             }
-            PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+            PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
                 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
             }
             PassMode::Direct(_)
-            | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
-            | PassMode::Cast(..) => {
+            | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ }
+            | PassMode::Cast { .. } => {
                 let next_arg = next();
                 self.store(bx, next_arg, dst);
             }
@@ -334,39 +332,86 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         let llreturn_ty = match &self.ret.mode {
             PassMode::Ignore => cx.type_void(),
             PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
-            PassMode::Cast(cast, _) => cast.llvm_type(cx),
+            PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx),
             PassMode::Indirect { .. } => {
-                llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+                llargument_tys.push(cx.type_ptr());
                 cx.type_void()
             }
         };
 
         for arg in args {
+            // Note that the exact number of arguments pushed here is carefully synchronized with
+            // code all over the place, both in the codegen_llvm and codegen_ssa crates. That's how
+            // other code then knows which LLVM argument(s) correspond to the n-th Rust argument.
             let llarg_ty = match &arg.mode {
                 PassMode::Ignore => continue,
-                PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
+                PassMode::Direct(_) => {
+                    // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+                    // and for Scalar ABIs the LLVM type is fully determined by `layout.abi`,
+                    // guarnateeing that we generate ABI-compatible LLVM IR. Things get tricky for
+                    // aggregates...
+                    if matches!(arg.layout.abi, abi::Abi::Aggregate { .. }) {
+                        assert!(
+                            arg.layout.is_sized(),
+                            "`PassMode::Direct` for unsized type: {}",
+                            arg.layout.ty
+                        );
+                        // This really shouldn't happen, since `immediate_llvm_type` will use
+                        // `layout.fields` to turn this Rust type into an LLVM type. This means all
+                        // sorts of Rust type details leak into the ABI. However wasm sadly *does*
+                        // currently use this mode so we have to allow it -- but we absolutely
+                        // shouldn't let any more targets do that.
+                        // (Also see <https://github.com/rust-lang/rust/issues/115666>.)
+                        assert!(
+                            matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64"),
+                            "`PassMode::Direct` for aggregates only allowed on wasm targets\nProblematic type: {:#?}",
+                            arg.layout,
+                        );
+                    }
+                    arg.layout.immediate_llvm_type(cx)
+                }
                 PassMode::Pair(..) => {
+                    // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+                    // so for ScalarPair we can easily be sure that we are generating ABI-compatible
+                    // LLVM IR.
+                    assert!(
+                        matches!(arg.layout.abi, abi::Abi::ScalarPair(..)),
+                        "PassMode::Pair for type {}",
+                        arg.layout.ty
+                    );
                     llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
                     llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
                     continue;
                 }
-                PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+                PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack } => {
+                    // `Indirect` with metadata is only for unsized types, and doesn't work with
+                    // on-stack passing.
+                    assert!(arg.layout.is_unsized() && !on_stack);
+                    // Construct the type of a (wide) pointer to `ty`, and pass its two fields.
+                    // Any two ABI-compatible unsized types have the same metadata type and
+                    // moreover the same metadata value leads to the same dynamic size and
+                    // alignment, so this respects ABI compatibility.
                     let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty);
                     let ptr_layout = cx.layout_of(ptr_ty);
                     llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
                     llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
                     continue;
                 }
-                PassMode::Cast(cast, pad_i32) => {
+                PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
+                    assert!(arg.layout.is_sized());
+                    cx.type_ptr()
+                }
+                PassMode::Cast { cast, pad_i32 } => {
+                    // `Cast` means "transmute to `CastType`"; that only makes sense for sized types.
+                    assert!(arg.layout.is_sized());
                     // add padding
                     if *pad_i32 {
                         llargument_tys.push(Reg::i32().llvm_type(cx));
                     }
+                    // Compute the LLVM type we use for this function from the cast type.
+                    // We assume here that ABI-compatible Rust types have the same cast type.
                     cast.llvm_type(cx)
                 }
-                PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
-                    cx.type_ptr_to(arg.memory_ty(cx))
-                }
             };
             llargument_tys.push(llarg_ty);
         }
@@ -379,12 +424,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
     }
 
     fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
-        unsafe {
-            llvm::LLVMPointerType(
-                self.llvm_type(cx),
-                cx.data_layout().instruction_address_space.0 as c_uint,
-            )
-        }
+        cx.type_ptr_ext(cx.data_layout().instruction_address_space)
     }
 
     fn llvm_cconv(&self) -> llvm::CallConv {
@@ -392,13 +432,16 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
     }
 
     fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
-        let mut func_attrs = SmallVec::<[_; 2]>::new();
+        let mut func_attrs = SmallVec::<[_; 3]>::new();
         if self.ret.layout.abi.is_uninhabited() {
             func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
         }
         if !self.can_unwind {
             func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
         }
+        if let Conv::RiscvInterrupt { kind } = self.conv {
+            func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", kind.as_str()));
+        }
         attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
 
         let mut i = 0;
@@ -411,13 +454,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
             PassMode::Direct(attrs) => {
                 attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
             }
-            PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
                 assert!(!on_stack);
                 let i = apply(attrs);
                 let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
                 attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
             }
-            PassMode::Cast(cast, _) => {
+            PassMode::Cast { cast, pad_i32: _ } => {
                 cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
             }
             _ => {}
@@ -425,25 +468,25 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         for arg in self.args.iter() {
             match &arg.mode {
                 PassMode::Ignore => {}
-                PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
                     let i = apply(attrs);
                     let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
                     attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
                 }
                 PassMode::Direct(attrs)
-                | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+                | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
                     apply(attrs);
                 }
-                PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
                     assert!(!on_stack);
                     apply(attrs);
-                    apply(extra_attrs);
+                    apply(meta_attrs);
                 }
                 PassMode::Pair(a, b) => {
                     apply(a);
                     apply(b);
                 }
-                PassMode::Cast(cast, pad_i32) => {
+                PassMode::Cast { cast, pad_i32 } => {
                     if *pad_i32 {
                         apply(&ArgAttributes::new());
                     }
@@ -473,13 +516,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
             PassMode::Direct(attrs) => {
                 attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
             }
-            PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
                 assert!(!on_stack);
                 let i = apply(bx.cx, attrs);
                 let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
                 attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
             }
-            PassMode::Cast(cast, _) => {
+            PassMode::Cast { cast, pad_i32: _ } => {
                 cast.attrs.apply_attrs_to_callsite(
                     llvm::AttributePlace::ReturnValue,
                     &bx.cx,
@@ -501,7 +544,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         for arg in self.args.iter() {
             match &arg.mode {
                 PassMode::Ignore => {}
-                PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
                     let i = apply(bx.cx, attrs);
                     let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
                     attributes::apply_to_callsite(
@@ -511,18 +554,18 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                     );
                 }
                 PassMode::Direct(attrs)
-                | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+                | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
                     apply(bx.cx, attrs);
                 }
-                PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack: _ } => {
+                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack: _ } => {
                     apply(bx.cx, attrs);
-                    apply(bx.cx, extra_attrs);
+                    apply(bx.cx, meta_attrs);
                 }
                 PassMode::Pair(a, b) => {
                     apply(bx.cx, a);
                     apply(bx.cx, b);
                 }
-                PassMode::Cast(cast, pad_i32) => {
+                PassMode::Cast { cast, pad_i32 } => {
                     if *pad_i32 {
                         apply(bx.cx, &ArgAttributes::new());
                     }
@@ -574,8 +617,12 @@ impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
 impl From<Conv> for llvm::CallConv {
     fn from(conv: Conv) -> Self {
         match conv {
-            Conv::C | Conv::Rust | Conv::CCmseNonSecureCall => llvm::CCallConv,
-            Conv::RustCold => llvm::ColdCallConv,
+            Conv::C | Conv::Rust | Conv::CCmseNonSecureCall | Conv::RiscvInterrupt { .. } => {
+                llvm::CCallConv
+            }
+            Conv::Cold => llvm::ColdCallConv,
+            Conv::PreserveMost => llvm::PreserveMost,
+            Conv::PreserveAll => llvm::PreserveAll,
             Conv::AmdGpuKernel => llvm::AmdGpuKernel,
             Conv::AvrInterrupt => llvm::AvrInterrupt,
             Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
index a57508815d6..db5c1388ef8 100644
--- a/compiler/rustc_codegen_llvm/src/allocator.rs
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -9,7 +9,7 @@ use rustc_middle::ty::TyCtxt;
 use rustc_session::config::{DebugInfo, OomStrategy};
 
 use crate::debuginfo;
-use crate::llvm::{self, False, True};
+use crate::llvm::{self, Context, False, Module, True, Type};
 use crate::ModuleLlvm;
 
 pub(crate) unsafe fn codegen(
@@ -28,14 +28,13 @@ pub(crate) unsafe fn codegen(
         tws => bug!("Unsupported target word size for int: {}", tws),
     };
     let i8 = llvm::LLVMInt8TypeInContext(llcx);
-    let i8p = llvm::LLVMPointerType(i8, 0);
-    let void = llvm::LLVMVoidTypeInContext(llcx);
+    let i8p = llvm::LLVMPointerTypeInContext(llcx, 0);
 
     if kind == AllocatorKind::Default {
         for method in ALLOCATOR_METHODS {
             let mut args = Vec::with_capacity(method.inputs.len());
-            for ty in method.inputs.iter() {
-                match *ty {
+            for input in method.inputs.iter() {
+                match input.ty {
                     AllocatorTy::Layout => {
                         args.push(usize); // size
                         args.push(usize); // align
@@ -54,102 +53,25 @@ pub(crate) unsafe fn codegen(
                     panic!("invalid allocator output")
                 }
             };
-            let ty = llvm::LLVMFunctionType(
-                output.unwrap_or(void),
-                args.as_ptr(),
-                args.len() as c_uint,
-                False,
-            );
-            let name = global_fn_name(method.name);
-            let llfn =
-                llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
-
-            if tcx.sess.target.default_hidden_visibility {
-                llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
-            }
-            if tcx.sess.must_emit_unwind_tables() {
-                let uwtable = attributes::uwtable_attr(llcx);
-                attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
-            }
 
-            let callee = default_fn_name(method.name);
-            let callee =
-                llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
-            llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
-
-            let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
-
-            let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
-            llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
-            let args = args
-                .iter()
-                .enumerate()
-                .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
-                .collect::<Vec<_>>();
-            let ret = llvm::LLVMRustBuildCall(
-                llbuilder,
-                ty,
-                callee,
-                args.as_ptr(),
-                args.len() as c_uint,
-                [].as_ptr(),
-                0 as c_uint,
-            );
-            llvm::LLVMSetTailCall(ret, True);
-            if output.is_some() {
-                llvm::LLVMBuildRet(llbuilder, ret);
-            } else {
-                llvm::LLVMBuildRetVoid(llbuilder);
-            }
-            llvm::LLVMDisposeBuilder(llbuilder);
+            let from_name = global_fn_name(method.name);
+            let to_name = default_fn_name(method.name);
+
+            create_wrapper_function(tcx, llcx, llmod, &from_name, &to_name, &args, output, false);
         }
     }
 
     // rust alloc error handler
-    let args = [usize, usize]; // size, align
-
-    let ty = llvm::LLVMFunctionType(void, args.as_ptr(), args.len() as c_uint, False);
-    let name = "__rust_alloc_error_handler";
-    let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
-    // -> ! DIFlagNoReturn
-    let no_return = llvm::AttributeKind::NoReturn.create_attr(llcx);
-    attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]);
-
-    if tcx.sess.target.default_hidden_visibility {
-        llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
-    }
-    if tcx.sess.must_emit_unwind_tables() {
-        let uwtable = attributes::uwtable_attr(llcx);
-        attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
-    }
-
-    let callee = alloc_error_handler_name(alloc_error_handler_kind);
-    let callee = llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
-    // -> ! DIFlagNoReturn
-    attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]);
-    llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
-
-    let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
-
-    let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
-    llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
-    let args = args
-        .iter()
-        .enumerate()
-        .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
-        .collect::<Vec<_>>();
-    let ret = llvm::LLVMRustBuildCall(
-        llbuilder,
-        ty,
-        callee,
-        args.as_ptr(),
-        args.len() as c_uint,
-        [].as_ptr(),
-        0 as c_uint,
+    create_wrapper_function(
+        tcx,
+        llcx,
+        llmod,
+        "__rust_alloc_error_handler",
+        &alloc_error_handler_name(alloc_error_handler_kind),
+        &[usize, usize], // size, align
+        None,
+        true,
     );
-    llvm::LLVMSetTailCall(ret, True);
-    llvm::LLVMBuildRetVoid(llbuilder);
-    llvm::LLVMDisposeBuilder(llbuilder);
 
     // __rust_alloc_error_handler_should_panic
     let name = OomStrategy::SYMBOL;
@@ -175,3 +97,79 @@ pub(crate) unsafe fn codegen(
         dbg_cx.finalize(tcx.sess);
     }
 }
+
+fn create_wrapper_function(
+    tcx: TyCtxt<'_>,
+    llcx: &Context,
+    llmod: &Module,
+    from_name: &str,
+    to_name: &str,
+    args: &[&Type],
+    output: Option<&Type>,
+    no_return: bool,
+) {
+    unsafe {
+        let ty = llvm::LLVMFunctionType(
+            output.unwrap_or_else(|| llvm::LLVMVoidTypeInContext(llcx)),
+            args.as_ptr(),
+            args.len() as c_uint,
+            False,
+        );
+        let llfn = llvm::LLVMRustGetOrInsertFunction(
+            llmod,
+            from_name.as_ptr().cast(),
+            from_name.len(),
+            ty,
+        );
+        let no_return = if no_return {
+            // -> ! DIFlagNoReturn
+            let no_return = llvm::AttributeKind::NoReturn.create_attr(llcx);
+            attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]);
+            Some(no_return)
+        } else {
+            None
+        };
+
+        if tcx.sess.target.default_hidden_visibility {
+            llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+        }
+        if tcx.sess.must_emit_unwind_tables() {
+            let uwtable = attributes::uwtable_attr(llcx);
+            attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
+        }
+
+        let callee =
+            llvm::LLVMRustGetOrInsertFunction(llmod, to_name.as_ptr().cast(), to_name.len(), ty);
+        if let Some(no_return) = no_return {
+            // -> ! DIFlagNoReturn
+            attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]);
+        }
+        llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+        let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
+
+        let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
+        llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
+        let args = args
+            .iter()
+            .enumerate()
+            .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
+            .collect::<Vec<_>>();
+        let ret = llvm::LLVMRustBuildCall(
+            llbuilder,
+            ty,
+            callee,
+            args.as_ptr(),
+            args.len() as c_uint,
+            [].as_ptr(),
+            0 as c_uint,
+        );
+        llvm::LLVMSetTailCall(ret, True);
+        if output.is_some() {
+            llvm::LLVMBuildRet(llbuilder, ret);
+        } else {
+            llvm::LLVMBuildRetVoid(llbuilder);
+        }
+        llvm::LLVMDisposeBuilder(llbuilder);
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 2a6ad1be763..1323261ae92 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -44,9 +44,10 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     let is_target_supported = |reg_class: InlineAsmRegClass| {
                         for &(_, feature) in reg_class.supported_types(asm_arch) {
                             if let Some(feature) = feature {
-                                let codegen_fn_attrs = self.tcx.codegen_fn_attrs(instance.def_id());
-                                if self.tcx.sess.target_features.contains(&feature)
-                                    || codegen_fn_attrs.target_features.contains(&feature)
+                                if self
+                                    .tcx
+                                    .asm_target_features(instance.def_id())
+                                    .contains(&feature)
                                 {
                                     return true;
                                 }
@@ -261,6 +262,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 InlineAsmArch::M68k => {
                     constraints.push("~{ccr}".to_string());
                 }
+                InlineAsmArch::CSKY => {}
             }
         }
         if !options.contains(InlineAsmOptions::NOMEM) {
@@ -693,6 +695,8 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) ->
             InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r",
             InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a",
             InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d",
+            InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f",
             InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
                 bug!("LLVM backend does not support SPIR-V")
             }
@@ -792,6 +796,7 @@ fn modifier_to_llvm(
             bug!("LLVM backend does not support SPIR-V")
         }
         InlineAsmRegClass::M68k(_) => None,
+        InlineAsmRegClass::CSKY(_) => None,
         InlineAsmRegClass::Err => unreachable!(),
     }
 }
@@ -868,6 +873,8 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'
         InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
         InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
         InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
+        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
         InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
             bug!("LLVM backend does not support SPIR-V")
         }
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 4c69b9503a2..b6c01545f30 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -128,7 +128,10 @@ fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attr
 
         // The function name varies on platforms.
         // See test/CodeGen/mcount.c in clang.
-        let mcount_name = cx.sess().target.mcount.as_ref();
+        let mcount_name = match &cx.sess().target.llvm_mcount_intrinsic {
+            Some(llvm_mcount_intrinsic) => llvm_mcount_intrinsic.as_ref(),
+            None => cx.sess().target.mcount.as_ref(),
+        };
 
         attrs.push(llvm::CreateAttrStringValue(
             cx.llcx,
@@ -363,50 +366,44 @@ pub fn from_fn_attrs<'ll, 'tcx>(
     if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR)
         || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED)
     {
-        if llvm_util::get_version() >= (15, 0, 0) {
-            to_add.push(create_alloc_family_attr(cx.llcx));
-            // apply to argument place instead of function
-            let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
-            attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
-            to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
-            let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
-            if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
-                flags |= AllocKindFlags::Uninitialized;
-            } else {
-                flags |= AllocKindFlags::Zeroed;
-            }
-            to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
+        to_add.push(create_alloc_family_attr(cx.llcx));
+        // apply to argument place instead of function
+        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
+        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
+        let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
+        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
+            flags |= AllocKindFlags::Uninitialized;
+        } else {
+            flags |= AllocKindFlags::Zeroed;
         }
+        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
         // apply to return place instead of function (unlike all other attributes applied in this function)
         let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
         attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
     }
     if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::REALLOCATOR) {
-        if llvm_util::get_version() >= (15, 0, 0) {
-            to_add.push(create_alloc_family_attr(cx.llcx));
-            to_add.push(llvm::CreateAllocKindAttr(
-                cx.llcx,
-                AllocKindFlags::Realloc | AllocKindFlags::Aligned,
-            ));
-            // applies to argument place instead of function place
-            let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
-            attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
-            // apply to argument place instead of function
-            let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
-            attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
-            to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
-        }
+        to_add.push(create_alloc_family_attr(cx.llcx));
+        to_add.push(llvm::CreateAllocKindAttr(
+            cx.llcx,
+            AllocKindFlags::Realloc | AllocKindFlags::Aligned,
+        ));
+        // applies to argument place instead of function place
+        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
+        // apply to argument place instead of function
+        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
+        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
         let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
         attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
     }
     if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::DEALLOCATOR) {
-        if llvm_util::get_version() >= (15, 0, 0) {
-            to_add.push(create_alloc_family_attr(cx.llcx));
-            to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
-            // applies to argument place instead of function place
-            let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
-            attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
-        }
+        to_add.push(create_alloc_family_attr(cx.llcx));
+        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
+        // applies to argument place instead of function place
+        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
     }
     if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY) {
         to_add.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"));
@@ -450,7 +447,7 @@ pub fn from_fn_attrs<'ll, 'tcx>(
     let mut function_features = function_features
         .iter()
         .flat_map(|feat| {
-            llvm_util::to_llvm_features(cx.tcx.sess, feat).into_iter().map(|f| format!("+{}", f))
+            llvm_util::to_llvm_features(cx.tcx.sess, feat).into_iter().map(|f| format!("+{f}"))
         })
         .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
             InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index 10bf954d242..f33075a8879 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -56,7 +56,7 @@ fn llvm_machine_type(cpu: &str) -> LLVMMachineType {
         "x86" => LLVMMachineType::I386,
         "aarch64" => LLVMMachineType::ARM64,
         "arm" => LLVMMachineType::ARM,
-        _ => panic!("unsupported cpu type {}", cpu),
+        _ => panic!("unsupported cpu type {cpu}"),
     }
 }
 
@@ -128,7 +128,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
         let name_suffix = if is_direct_dependency { "_imports" } else { "_imports_indirect" };
         let output_path = {
             let mut output_path: PathBuf = tmpdir.to_path_buf();
-            output_path.push(format!("{}{}", lib_name, name_suffix));
+            output_path.push(format!("{lib_name}{name_suffix}"));
             output_path.with_extension("lib")
         };
 
@@ -156,7 +156,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
             // functions. Therefore, use binutils to create the import library instead,
             // by writing a .DEF file to the temp dir and calling binutils's dlltool.
             let def_file_path =
-                tmpdir.join(format!("{}{}", lib_name, name_suffix)).with_extension("def");
+                tmpdir.join(format!("{lib_name}{name_suffix}")).with_extension("def");
 
             let def_file_content = format!(
                 "EXPORTS\n{}",
@@ -164,7 +164,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
                     .into_iter()
                     .map(|(name, ordinal)| {
                         match ordinal {
-                            Some(n) => format!("{} @{} NONAME", name, n),
+                            Some(n) => format!("{name} @{n} NONAME"),
                             None => name,
                         }
                     })
@@ -367,7 +367,7 @@ impl<'a> LlvmArchiveBuilder<'a> {
                 match addition {
                     Addition::File { path, name_in_archive } => {
                         let path = CString::new(path.to_str().unwrap())?;
-                        let name = CString::new(name_in_archive.clone())?;
+                        let name = CString::new(name_in_archive.as_bytes())?;
                         members.push(llvm::LLVMRustArchiveMemberNew(
                             path.as_ptr(),
                             name.as_ptr(),
@@ -435,7 +435,7 @@ impl<'a> LlvmArchiveBuilder<'a> {
 }
 
 fn string_to_io_error(s: String) -> io::Error {
-    io::Error::new(io::ErrorKind::Other, format!("bad archive: {}", s))
+    io::Error::new(io::ErrorKind::Other, format!("bad archive: {s}"))
 }
 
 fn find_binutils_dlltool(sess: &Session) -> OsString {
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index d7dd98d7938..cb5acf79135 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -1,13 +1,15 @@
-use crate::back::write::{self, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers};
+use crate::back::write::{
+    self, bitcode_section_name, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers,
+};
 use crate::errors::{
-    DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib,
+    DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro,
 };
 use crate::llvm::{self, build_string};
 use crate::{LlvmCodegenBackend, ModuleLlvm};
 use object::read::archive::ArchiveFile;
 use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
 use rustc_codegen_ssa::back::symbol_export;
-use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, TargetMachineFactoryConfig};
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, TargetMachineFactoryConfig};
 use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
 use rustc_data_structures::fx::FxHashMap;
@@ -34,8 +36,12 @@ pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
 
 pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
     match crate_type {
-        CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true,
-        CrateType::Rlib | CrateType::ProcMacro => false,
+        CrateType::Executable
+        | CrateType::Dylib
+        | CrateType::Staticlib
+        | CrateType::Cdylib
+        | CrateType::ProcMacro => true,
+        CrateType::Rlib => false,
     }
 }
 
@@ -85,6 +91,11 @@ fn prepare_lto(
                     diag_handler.emit_err(LtoDylib);
                     return Err(FatalError);
                 }
+            } else if *crate_type == CrateType::ProcMacro {
+                if !cgcx.opts.unstable_opts.dylib_lto {
+                    diag_handler.emit_err(LtoProcMacro);
+                    return Err(FatalError);
+                }
             }
         }
 
@@ -120,6 +131,7 @@ fn prepare_lto(
                 info!("adding bitcode from {}", name);
                 match get_bitcode_slice_from_object_data(
                     child.data(&*archive_data).expect("corrupt rlib"),
+                    cgcx,
                 ) {
                     Ok(data) => {
                         let module = SerializedModule::FromRlib(data.to_vec());
@@ -141,10 +153,29 @@ fn prepare_lto(
     Ok((symbols_below_threshold, upstream_modules))
 }
 
-fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], LtoBitcodeFromRlib> {
+fn get_bitcode_slice_from_object_data<'a>(
+    obj: &'a [u8],
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+) -> Result<&'a [u8], LtoBitcodeFromRlib> {
+    // We're about to assume the data here is an object file with sections, but if it's raw LLVM IR that
+    // won't work. Fortunately, if that's what we have we can just return the object directly, so we sniff
+    // the relevant magic strings here and return.
+    if obj.starts_with(b"\xDE\xC0\x17\x0B") || obj.starts_with(b"BC\xC0\xDE") {
+        return Ok(obj);
+    }
+    // We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment name"
+    // which in the public API for sections gets treated as part of the section name, but internally
+    // in MachOObjectFile.cpp gets treated separately.
+    let section_name = bitcode_section_name(cgcx).trim_start_matches("__LLVM,");
     let mut len = 0;
-    let data =
-        unsafe { llvm::LLVMRustGetBitcodeSliceFromObjectData(obj.as_ptr(), obj.len(), &mut len) };
+    let data = unsafe {
+        llvm::LLVMRustGetSliceFromObjectDataByName(
+            obj.as_ptr(),
+            obj.len(),
+            section_name.as_ptr(),
+            &mut len,
+        )
+    };
     if !data.is_null() {
         assert!(len != 0);
         let bc = unsafe { slice::from_raw_parts(data, len) };
@@ -166,7 +197,7 @@ fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], LtoBitcodeFro
 /// for further optimization.
 pub(crate) fn run_fat(
     cgcx: &CodegenContext<LlvmCodegenBackend>,
-    modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+    modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
     cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
 ) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
     let diag_handler = cgcx.create_diag_handler();
@@ -220,7 +251,7 @@ pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBu
 fn fat_lto(
     cgcx: &CodegenContext<LlvmCodegenBackend>,
     diag_handler: &Handler,
-    modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+    modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
     cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
     mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
     symbols_below_threshold: &[*const libc::c_char],
@@ -245,8 +276,8 @@ fn fat_lto(
     }));
     for module in modules {
         match module {
-            FatLTOInput::InMemory(m) => in_memory.push(m),
-            FatLTOInput::Serialized { name, buffer } => {
+            FatLtoInput::InMemory(m) => in_memory.push(m),
+            FatLtoInput::Serialized { name, buffer } => {
                 info!("pushing serialized module {:?}", name);
                 let buffer = SerializedModule::Local(buffer);
                 serialized_modules.push((buffer, CString::new(name).unwrap()));
@@ -332,7 +363,7 @@ fn fat_lto(
             let _timer = cgcx
                 .prof
                 .generic_activity_with_arg_recorder("LLVM_fat_lto_link_module", |recorder| {
-                    recorder.record_arg(format!("{:?}", name))
+                    recorder.record_arg(format!("{name:?}"))
                 });
             info!("linking {:?}", name);
             let data = bc_decoded.data();
@@ -441,7 +472,7 @@ fn thin_lto(
 
         for (i, (name, buffer)) in modules.into_iter().enumerate() {
             info!("local module: {} - {}", i, name);
-            let cname = CString::new(name.clone()).unwrap();
+            let cname = CString::new(name.as_bytes()).unwrap();
             thin_modules.push(llvm::ThinLTOModule {
                 identifier: cname.as_ptr(),
                 data: buffer.data().as_ptr(),
@@ -583,7 +614,7 @@ pub(crate) fn run_pass_manager(
     module: &mut ModuleCodegen<ModuleLlvm>,
     thin: bool,
 ) -> Result<(), FatalError> {
-    let _timer = cgcx.prof.verbose_generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
+    let _timer = cgcx.prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
     let config = cgcx.config(module.kind);
 
     // Now we have one massive module inside of llmod. Time to run the
@@ -787,7 +818,7 @@ impl ThinLTOKeysMap {
         let file = File::create(path)?;
         let mut writer = io::BufWriter::new(file);
         for (module, key) in &self.keys {
-            writeln!(writer, "{} {}", module, key)?;
+            writeln!(writer, "{module} {key}")?;
         }
         Ok(())
     }
@@ -801,7 +832,7 @@ impl ThinLTOKeysMap {
             let mut split = line.split(' ');
             let module = split.next().unwrap();
             let key = split.next().unwrap();
-            assert_eq!(split.next(), None, "Expected two space-separated values, found {:?}", line);
+            assert_eq!(split.next(), None, "Expected two space-separated values, found {line:?}");
             keys.insert(module.to_string(), key.to_string());
         }
         Ok(Self { keys })
diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
new file mode 100644
index 00000000000..36484c3c3fc
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
@@ -0,0 +1,103 @@
+use std::{
+    ffi::{c_char, CStr},
+    marker::PhantomData,
+    ops::Deref,
+    ptr::NonNull,
+};
+
+use rustc_data_structures::small_c_str::SmallCStr;
+
+use crate::{errors::LlvmError, llvm};
+
+/// Responsible for safely creating and disposing llvm::TargetMachine via ffi functions.
+/// Not cloneable as there is no clone function for llvm::TargetMachine.
+#[repr(transparent)]
+pub struct OwnedTargetMachine {
+    tm_unique: NonNull<llvm::TargetMachine>,
+    phantom: PhantomData<llvm::TargetMachine>,
+}
+
+impl OwnedTargetMachine {
+    pub fn new(
+        triple: &CStr,
+        cpu: &CStr,
+        features: &CStr,
+        abi: &CStr,
+        model: llvm::CodeModel,
+        reloc: llvm::RelocModel,
+        level: llvm::CodeGenOptLevel,
+        use_soft_fp: bool,
+        function_sections: bool,
+        data_sections: bool,
+        unique_section_names: bool,
+        trap_unreachable: bool,
+        singletree: bool,
+        asm_comments: bool,
+        emit_stack_size_section: bool,
+        relax_elf_relocations: bool,
+        use_init_array: bool,
+        split_dwarf_file: &CStr,
+        output_obj_file: &CStr,
+        debug_info_compression: &CStr,
+        force_emulated_tls: bool,
+        args_cstr_buff: &[u8],
+    ) -> Result<Self, LlvmError<'static>> {
+        assert!(args_cstr_buff.len() > 0);
+        assert!(
+            *args_cstr_buff.last().unwrap() == 0,
+            "The last character must be a null terminator."
+        );
+
+        // SAFETY: llvm::LLVMRustCreateTargetMachine copies pointed to data
+        let tm_ptr = unsafe {
+            llvm::LLVMRustCreateTargetMachine(
+                triple.as_ptr(),
+                cpu.as_ptr(),
+                features.as_ptr(),
+                abi.as_ptr(),
+                model,
+                reloc,
+                level,
+                use_soft_fp,
+                function_sections,
+                data_sections,
+                unique_section_names,
+                trap_unreachable,
+                singletree,
+                asm_comments,
+                emit_stack_size_section,
+                relax_elf_relocations,
+                use_init_array,
+                split_dwarf_file.as_ptr(),
+                output_obj_file.as_ptr(),
+                debug_info_compression.as_ptr(),
+                force_emulated_tls,
+                args_cstr_buff.as_ptr() as *const c_char,
+                args_cstr_buff.len(),
+            )
+        };
+
+        NonNull::new(tm_ptr)
+            .map(|tm_unique| Self { tm_unique, phantom: PhantomData })
+            .ok_or_else(|| LlvmError::CreateTargetMachine { triple: SmallCStr::from(triple) })
+    }
+}
+
+impl Deref for OwnedTargetMachine {
+    type Target = llvm::TargetMachine;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
+        unsafe { self.tm_unique.as_ref() }
+    }
+}
+
+impl Drop for OwnedTargetMachine {
+    fn drop(&mut self) {
+        // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
+        // OwnedTargetMachine is not copyable so there is no double free or use after free
+        unsafe {
+            llvm::LLVMRustDisposeTargetMachine(self.tm_unique.as_mut());
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 0f5e975445f..c778a6e017f 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -1,18 +1,22 @@
 use crate::back::lto::ThinBuffer;
+use crate::back::owned_target_machine::OwnedTargetMachine;
 use crate::back::profiling::{
     selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler,
 };
 use crate::base;
 use crate::common;
-use crate::consts;
 use crate::errors::{
-    CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, WithLlvmError, WriteBytecode,
+    CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression,
+    WithLlvmError, WriteBytecode,
 };
 use crate::llvm::{self, DiagnosticInfo, PassManager};
 use crate::llvm_util;
 use crate::type_::Type;
 use crate::LlvmCodegenBackend;
 use crate::ModuleLlvm;
+use llvm::{
+    LLVMRustLLVMHasZlibCompressionForDebugSymbols, LLVMRustLLVMHasZstdCompressionForDebugSymbols,
+};
 use rustc_codegen_ssa::back::link::ensure_removed;
 use rustc_codegen_ssa::back::write::{
     BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
@@ -95,8 +99,8 @@ pub fn write_output_file<'ll>(
     }
 }
 
-pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm::TargetMachine {
-    let config = TargetMachineFactoryConfig { split_dwarf_file: None };
+pub fn create_informational_target_machine(sess: &Session) -> OwnedTargetMachine {
+    let config = TargetMachineFactoryConfig { split_dwarf_file: None, output_obj_file: None };
     // Can't use query system here quite yet because this function is invoked before the query
     // system/tcx is set up.
     let features = llvm_util::global_llvm_features(sess, false);
@@ -104,7 +108,7 @@ pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm:
         .unwrap_or_else(|err| llvm_err(sess.diagnostic(), err).raise())
 }
 
-pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut llvm::TargetMachine {
+pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine {
     let split_dwarf_file = if tcx.sess.target_can_use_split_dwarf() {
         tcx.output_filenames(()).split_dwarf_path(
             tcx.sess.split_debuginfo(),
@@ -114,7 +118,11 @@ pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut ll
     } else {
         None
     };
-    let config = TargetMachineFactoryConfig { split_dwarf_file };
+
+    let output_obj_file =
+        Some(tcx.output_filenames(()).temp_path(OutputType::Object, Some(mod_name)));
+    let config = TargetMachineFactoryConfig { split_dwarf_file, output_obj_file };
+
     target_machine_factory(
         &tcx.sess,
         tcx.backend_optimization_level(()),
@@ -217,36 +225,73 @@ pub fn target_machine_factory(
 
     let force_emulated_tls = sess.target.force_emulated_tls;
 
+    // copy the exe path, followed by path all into one buffer
+    // null terminating them so we can use them as null terminated strings
+    let args_cstr_buff = {
+        let mut args_cstr_buff: Vec<u8> = Vec::new();
+        let exe_path = std::env::current_exe().unwrap_or_default();
+        let exe_path_str = exe_path.into_os_string().into_string().unwrap_or_default();
+
+        args_cstr_buff.extend_from_slice(exe_path_str.as_bytes());
+        args_cstr_buff.push(0);
+
+        for arg in sess.expanded_args.iter() {
+            args_cstr_buff.extend_from_slice(arg.as_bytes());
+            args_cstr_buff.push(0);
+        }
+
+        args_cstr_buff
+    };
+
+    let debuginfo_compression = sess.opts.debuginfo_compression.to_string();
+    match sess.opts.debuginfo_compression {
+        rustc_session::config::DebugInfoCompression::Zlib => {
+            if !unsafe { LLVMRustLLVMHasZlibCompressionForDebugSymbols() } {
+                sess.emit_warning(UnknownCompression { algorithm: "zlib" });
+            }
+        }
+        rustc_session::config::DebugInfoCompression::Zstd => {
+            if !unsafe { LLVMRustLLVMHasZstdCompressionForDebugSymbols() } {
+                sess.emit_warning(UnknownCompression { algorithm: "zstd" });
+            }
+        }
+        rustc_session::config::DebugInfoCompression::None => {}
+    };
+    let debuginfo_compression = SmallCStr::new(&debuginfo_compression);
+
     Arc::new(move |config: TargetMachineFactoryConfig| {
-        let split_dwarf_file =
-            path_mapping.map_prefix(config.split_dwarf_file.unwrap_or_default()).0;
-        let split_dwarf_file = CString::new(split_dwarf_file.to_str().unwrap()).unwrap();
-
-        let tm = unsafe {
-            llvm::LLVMRustCreateTargetMachine(
-                triple.as_ptr(),
-                cpu.as_ptr(),
-                features.as_ptr(),
-                abi.as_ptr(),
-                code_model,
-                reloc_model,
-                opt_level,
-                use_softfp,
-                ffunction_sections,
-                fdata_sections,
-                funique_section_names,
-                trap_unreachable,
-                singlethread,
-                asm_comments,
-                emit_stack_size_section,
-                relax_elf_relocations,
-                use_init_array,
-                split_dwarf_file.as_ptr(),
-                force_emulated_tls,
-            )
+        let path_to_cstring_helper = |path: Option<PathBuf>| -> CString {
+            let path = path_mapping.map_prefix(path.unwrap_or_default()).0;
+            CString::new(path.to_str().unwrap()).unwrap()
         };
 
-        tm.ok_or_else(|| LlvmError::CreateTargetMachine { triple: triple.clone() })
+        let split_dwarf_file = path_to_cstring_helper(config.split_dwarf_file);
+        let output_obj_file = path_to_cstring_helper(config.output_obj_file);
+
+        OwnedTargetMachine::new(
+            &triple,
+            &cpu,
+            &features,
+            &abi,
+            code_model,
+            reloc_model,
+            opt_level,
+            use_softfp,
+            ffunction_sections,
+            fdata_sections,
+            funique_section_names,
+            trap_unreachable,
+            singlethread,
+            asm_comments,
+            emit_stack_size_section,
+            relax_elf_relocations,
+            use_init_array,
+            &split_dwarf_file,
+            &output_obj_file,
+            &debuginfo_compression,
+            force_emulated_tls,
+            &args_cstr_buff,
+        )
     })
 }
 
@@ -259,7 +304,7 @@ pub(crate) fn save_temp_bitcode(
         return;
     }
     unsafe {
-        let ext = format!("{}.bc", name);
+        let ext = format!("{name}.bc");
         let cgu = Some(&module.name[..]);
         let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
         let cstr = path_to_c_string(&path);
@@ -321,6 +366,7 @@ impl<'a> DiagnosticHandlers<'a> {
             })
             .and_then(|dir| dir.to_str().and_then(|p| CString::new(p).ok()));
 
+        let pgo_available = cgcx.opts.cg.profile_use.is_some();
         let data = Box::into_raw(Box::new((cgcx, handler)));
         unsafe {
             let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
@@ -334,6 +380,7 @@ impl<'a> DiagnosticHandlers<'a> {
                 // The `as_ref()` is important here, otherwise the `CString` will be dropped
                 // too soon!
                 remark_file.as_ref().map(|dir| dir.as_ptr()).unwrap_or(std::ptr::null()),
+                pgo_available,
             );
             DiagnosticHandlers { data, llcx, old_handler }
         }
@@ -382,29 +429,22 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void
         }
 
         llvm::diagnostic::Optimization(opt) => {
-            let enabled = match cgcx.remark {
-                Passes::All => true,
-                Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
-            };
-
-            if enabled {
-                diag_handler.emit_note(FromLlvmOptimizationDiag {
-                    filename: &opt.filename,
-                    line: opt.line,
-                    column: opt.column,
-                    pass_name: &opt.pass_name,
-                    kind: match opt.kind {
-                        OptimizationDiagnosticKind::OptimizationRemark => "success",
-                        OptimizationDiagnosticKind::OptimizationMissed
-                        | OptimizationDiagnosticKind::OptimizationFailure => "missed",
-                        OptimizationDiagnosticKind::OptimizationAnalysis
-                        | OptimizationDiagnosticKind::OptimizationAnalysisFPCommute
-                        | OptimizationDiagnosticKind::OptimizationAnalysisAliasing => "analysis",
-                        OptimizationDiagnosticKind::OptimizationRemarkOther => "other",
-                    },
-                    message: &opt.message,
-                });
-            }
+            diag_handler.emit_note(FromLlvmOptimizationDiag {
+                filename: &opt.filename,
+                line: opt.line,
+                column: opt.column,
+                pass_name: &opt.pass_name,
+                kind: match opt.kind {
+                    OptimizationDiagnosticKind::OptimizationRemark => "success",
+                    OptimizationDiagnosticKind::OptimizationMissed
+                    | OptimizationDiagnosticKind::OptimizationFailure => "missed",
+                    OptimizationDiagnosticKind::OptimizationAnalysis
+                    | OptimizationDiagnosticKind::OptimizationAnalysisFPCommute
+                    | OptimizationDiagnosticKind::OptimizationAnalysisAliasing => "analysis",
+                    OptimizationDiagnosticKind::OptimizationRemarkOther => "other",
+                },
+                message: &opt.message,
+            });
         }
         llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
             let message = llvm::build_string(|s| {
@@ -478,6 +518,8 @@ pub(crate) unsafe fn llvm_optimize(
         Some(llvm::SanitizerOptions {
             sanitize_address: config.sanitizer.contains(SanitizerSet::ADDRESS),
             sanitize_address_recover: config.sanitizer_recover.contains(SanitizerSet::ADDRESS),
+            sanitize_cfi: config.sanitizer.contains(SanitizerSet::CFI),
+            sanitize_kcfi: config.sanitizer.contains(SanitizerSet::KCFI),
             sanitize_memory: config.sanitizer.contains(SanitizerSet::MEMORY),
             sanitize_memory_recover: config.sanitizer_recover.contains(SanitizerSet::MEMORY),
             sanitize_memory_track_origins: config.sanitizer_memory_track_origins as c_int,
@@ -513,6 +555,7 @@ pub(crate) unsafe fn llvm_optimize(
         &*module.module_llvm.tm,
         to_pass_builder_opt_level(opt_level),
         opt_stage,
+        cgcx.opts.cg.linker_plugin_lto.enabled(),
         config.no_prepopulate_passes,
         config.verify_llvm_ir,
         using_thin_buffers,
@@ -713,7 +756,7 @@ pub(crate) unsafe fn codegen(
 
                 let Ok(demangled) = rustc_demangle::try_demangle(input) else { return 0 };
 
-                if write!(cursor, "{:#}", demangled).is_err() {
+                if write!(cursor, "{demangled:#}").is_err() {
                     // Possible only if provided buffer is not big enough
                     return 0;
                 }
@@ -834,7 +877,7 @@ pub(crate) unsafe fn codegen(
 }
 
 fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: &[u8]) -> Vec<u8> {
-    let mut asm = format!(".section {},\"{}\"\n", section_name, section_flags).into_bytes();
+    let mut asm = format!(".section {section_name},\"{section_flags}\"\n").into_bytes();
     asm.extend_from_slice(b".ascii \"");
     asm.reserve(data.len());
     for &byte in data {
@@ -856,6 +899,27 @@ fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data:
     asm
 }
 
+fn target_is_apple(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
+    cgcx.opts.target_triple.triple().contains("-ios")
+        || cgcx.opts.target_triple.triple().contains("-darwin")
+        || cgcx.opts.target_triple.triple().contains("-tvos")
+        || cgcx.opts.target_triple.triple().contains("-watchos")
+}
+
+fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
+    cgcx.opts.target_triple.triple().contains("-aix")
+}
+
+pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> &'static str {
+    if target_is_apple(cgcx) {
+        "__LLVM,__bitcode\0"
+    } else if target_is_aix(cgcx) {
+        ".ipa\0"
+    } else {
+        ".llvmbc\0"
+    }
+}
+
 /// Embed the bitcode of an LLVM module in the LLVM module itself.
 ///
 /// This is done primarily for iOS where it appears to be standard to compile C
@@ -916,11 +980,8 @@ unsafe fn embed_bitcode(
     // Unfortunately, LLVM provides no way to set custom section flags. For ELF
     // and COFF we emit the sections using module level inline assembly for that
     // reason (see issue #90326 for historical background).
-    let is_aix = cgcx.opts.target_triple.triple().contains("-aix");
-    let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
-        || cgcx.opts.target_triple.triple().contains("-darwin")
-        || cgcx.opts.target_triple.triple().contains("-tvos")
-        || cgcx.opts.target_triple.triple().contains("-watchos");
+    let is_aix = target_is_aix(cgcx);
+    let is_apple = target_is_apple(cgcx);
     if is_apple
         || is_aix
         || cgcx.opts.target_triple.triple().starts_with("wasm")
@@ -935,13 +996,7 @@ unsafe fn embed_bitcode(
         );
         llvm::LLVMSetInitializer(llglobal, llconst);
 
-        let section = if is_apple {
-            "__LLVM,__bitcode\0"
-        } else if is_aix {
-            ".ipa\0"
-        } else {
-            ".llvmbc\0"
-        };
+        let section = bitcode_section_name(cgcx);
         llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
         llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
         llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
@@ -992,7 +1047,7 @@ fn create_msvc_imps(
     let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
 
     unsafe {
-        let i8p_ty = Type::i8p_llcx(llcx);
+        let ptr_ty = Type::ptr_llcx(llcx);
         let globals = base::iter_globals(llmod)
             .filter(|&val| {
                 llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
@@ -1012,8 +1067,8 @@ fn create_msvc_imps(
             .collect::<Vec<_>>();
 
         for (imp_name, val) in globals {
-            let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast());
-            llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty));
+            let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr().cast());
+            llvm::LLVMSetInitializer(imp, val);
             llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
         }
     }
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
index 5b5f81c0329..b659fd02eec 100644
--- a/compiler/rustc_codegen_llvm/src/base.rs
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -123,8 +123,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen
             // happen after the llvm.used variables are created.
             for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
                 unsafe {
-                    let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
-                    llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
+                    llvm::LLVMReplaceAllUsesWith(old_g, new_g);
                     llvm::LLVMDeleteGlobal(old_g);
                 }
             }
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index d55992bf092..4f9b86ec20a 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -3,6 +3,7 @@ use crate::attributes;
 use crate::common::Funclet;
 use crate::context::CodegenCx;
 use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, True};
+use crate::llvm_util;
 use crate::type_::Type;
 use crate::type_of::LayoutLlvmExt;
 use crate::value::Value;
@@ -652,7 +653,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         flags: MemFlags,
     ) -> &'ll Value {
         debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
-        let ptr = self.check_store(val, ptr);
+        assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
         unsafe {
             let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
             let align =
@@ -682,7 +683,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         size: Size,
     ) {
         debug!("Store {:?} -> {:?}", val, ptr);
-        let ptr = self.check_store(val, ptr);
+        assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
         unsafe {
             let store = llvm::LLVMRustBuildAtomicStore(
                 self.llbuilder,
@@ -873,8 +874,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
         let size = self.intcast(size, self.type_isize(), false);
         let is_volatile = flags.contains(MemFlags::VOLATILE);
-        let dst = self.pointercast(dst, self.type_i8p());
-        let src = self.pointercast(src, self.type_i8p());
         unsafe {
             llvm::LLVMRustBuildMemCpy(
                 self.llbuilder,
@@ -900,8 +899,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
         let size = self.intcast(size, self.type_isize(), false);
         let is_volatile = flags.contains(MemFlags::VOLATILE);
-        let dst = self.pointercast(dst, self.type_i8p());
-        let src = self.pointercast(src, self.type_i8p());
         unsafe {
             llvm::LLVMRustBuildMemMove(
                 self.llbuilder,
@@ -924,7 +921,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         flags: MemFlags,
     ) {
         let is_volatile = flags.contains(MemFlags::VOLATILE);
-        let ptr = self.pointercast(ptr, self.type_i8p());
         unsafe {
             llvm::LLVMRustBuildMemSet(
                 self.llbuilder,
@@ -981,7 +977,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
     }
 
     fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
-        let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
+        let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
         let landing_pad = self.landing_pad(ty, pers_fn, 0);
         unsafe {
             llvm::LLVMSetCleanup(landing_pad, llvm::True);
@@ -990,14 +986,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
     }
 
     fn filter_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
-        let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
+        let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
         let landing_pad = self.landing_pad(ty, pers_fn, 1);
-        self.add_clause(landing_pad, self.const_array(self.type_i8p(), &[]));
+        self.add_clause(landing_pad, self.const_array(self.type_ptr(), &[]));
         (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1))
     }
 
     fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) {
-        let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
+        let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
         let mut exn = self.const_poison(ty);
         exn = self.insert_value(exn, exn0, 0);
         exn = self.insert_value(exn, exn1, 1);
@@ -1161,7 +1157,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 
         let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
         let llty = self.cx.type_func(
-            &[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
+            &[self.cx.type_ptr(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
             self.cx.type_void(),
         );
         let args = &[fn_name, hash, num_counters, index];
@@ -1230,9 +1226,16 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
     }
 
-    fn do_not_inline(&mut self, llret: &'ll Value) {
-        let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx);
-        attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]);
+    fn apply_attrs_to_cleanup_callsite(&mut self, llret: &'ll Value) {
+        if llvm_util::get_version() < (17, 0, 2) {
+            // Work around https://github.com/llvm/llvm-project/issues/66984.
+            let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx);
+            attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]);
+        } else {
+            // Cleanup is always the cold path.
+            let cold_inline = llvm::AttributeKind::Cold.create_attr(self.llcx);
+            attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[cold_inline]);
+        }
     }
 }
 
@@ -1387,25 +1390,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
         ret.expect("LLVM does not have support for catchret")
     }
 
-    fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
-        let dest_ptr_ty = self.cx.val_ty(ptr);
-        let stored_ty = self.cx.val_ty(val);
-        let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
-
-        assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
-
-        if dest_ptr_ty == stored_ptr_ty {
-            ptr
-        } else {
-            debug!(
-                "type mismatch in store. \
-                    Expected {:?}, got {:?}; inserting bitcast",
-                dest_ptr_ty, stored_ptr_ty
-            );
-            self.bitcast(ptr, stored_ptr_ty)
-        }
-    }
-
     fn check_call<'b>(
         &mut self,
         typ: &str,
@@ -1415,9 +1399,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
     ) -> Cow<'b, [&'ll Value]> {
         assert!(
             self.cx.type_kind(fn_ty) == TypeKind::Function,
-            "builder::{} not passed a function, but {:?}",
-            typ,
-            fn_ty
+            "builder::{typ} not passed a function, but {fn_ty:?}"
         );
 
         let param_tys = self.cx.func_params_types(fn_ty);
@@ -1468,7 +1450,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
             return;
         }
 
-        let ptr = self.pointercast(ptr, self.cx.type_i8p());
         self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]);
     }
 
@@ -1509,12 +1490,9 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
 
         let instr = if signed { "fptosi" } else { "fptoui" };
         let name = if let Some(vector_length) = vector_length {
-            format!(
-                "llvm.{}.sat.v{}i{}.v{}f{}",
-                instr, vector_length, int_width, vector_length, float_width
-            )
+            format!("llvm.{instr}.sat.v{vector_length}i{int_width}.v{vector_length}f{float_width}")
         } else {
-            format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width)
+            format!("llvm.{instr}.sat.i{int_width}.f{float_width}")
         };
         let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty));
         self.call(self.type_func(&[src_ty], dest_ty), None, None, f, &[val], None)
@@ -1542,9 +1520,9 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
         fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
         llfn: &'ll Value,
     ) {
-        let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() };
-        if is_indirect_call && fn_abi.is_some() && self.tcx.sess.is_sanitizer_cfi_enabled() {
-            if fn_attrs.is_some() && fn_attrs.unwrap().no_sanitize.contains(SanitizerSet::CFI) {
+        let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
+        if self.tcx.sess.is_sanitizer_cfi_enabled() && let Some(fn_abi) = fn_abi && is_indirect_call {
+            if let Some(fn_attrs) = fn_attrs && fn_attrs.no_sanitize.contains(SanitizerSet::CFI) {
                 return;
             }
 
@@ -1556,7 +1534,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
                 options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
             }
 
-            let typeid = typeid_for_fnabi(self.tcx, fn_abi.unwrap(), options);
+            let typeid = typeid_for_fnabi(self.tcx, fn_abi, options);
             let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap();
 
             // Test whether the function pointer is associated with the type identifier.
@@ -1580,25 +1558,26 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
         fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
         llfn: &'ll Value,
     ) -> Option<llvm::OperandBundleDef<'ll>> {
-        let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() };
-        let kcfi_bundle = if is_indirect_call && self.tcx.sess.is_sanitizer_kcfi_enabled() {
-            if fn_attrs.is_some() && fn_attrs.unwrap().no_sanitize.contains(SanitizerSet::KCFI) {
-                return None;
-            }
+        let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
+        let kcfi_bundle =
+            if self.tcx.sess.is_sanitizer_kcfi_enabled() && let Some(fn_abi) = fn_abi && is_indirect_call {
+                if let Some(fn_attrs) = fn_attrs && fn_attrs.no_sanitize.contains(SanitizerSet::KCFI) {
+                    return None;
+                }
 
-            let mut options = TypeIdOptions::empty();
-            if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
-                options.insert(TypeIdOptions::GENERALIZE_POINTERS);
-            }
-            if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
-                options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
-            }
+                let mut options = TypeIdOptions::empty();
+                if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
+                    options.insert(TypeIdOptions::GENERALIZE_POINTERS);
+                }
+                if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
+                    options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
+                }
 
-            let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi.unwrap(), options);
-            Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
-        } else {
-            None
-        };
+                let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options);
+                Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
+            } else {
+                None
+            };
         kcfi_bundle
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 5d3e245f45f..5254c3f9c9a 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -4,13 +4,11 @@
 //! and methods are represented as just a fn ptr and not a full
 //! closure.
 
-use crate::abi::FnAbiLlvmExt;
 use crate::attributes;
 use crate::common;
 use crate::context::CodegenCx;
 use crate::llvm;
 use crate::value::Value;
-use rustc_codegen_ssa::traits::*;
 
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
 use rustc_middle::ty::{self, Instance, TypeVisitableExt};
@@ -45,39 +43,7 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
     let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
 
     let llfn = if let Some(llfn) = cx.get_declared_value(sym) {
-        // Create a fn pointer with the new signature.
-        let llptrty = fn_abi.ptr_to_llvm_type(cx);
-
-        // This is subtle and surprising, but sometimes we have to bitcast
-        // the resulting fn pointer. The reason has to do with external
-        // functions. If you have two crates that both bind the same C
-        // library, they may not use precisely the same types: for
-        // example, they will probably each declare their own structs,
-        // which are distinct types from LLVM's point of view (nominal
-        // types).
-        //
-        // Now, if those two crates are linked into an application, and
-        // they contain inlined code, you can wind up with a situation
-        // where both of those functions wind up being loaded into this
-        // application simultaneously. In that case, the same function
-        // (from LLVM's point of view) requires two types. But of course
-        // LLVM won't allow one function to have two types.
-        //
-        // What we currently do, therefore, is declare the function with
-        // one of the two types (whichever happens to come first) and then
-        // bitcast as needed when the function is referenced to make sure
-        // it has the type we expect.
-        //
-        // This can occur on either a crate-local or crate-external
-        // reference. It also occurs when testing libcore and in some
-        // other weird situations. Annoying.
-        if cx.val_ty(llfn) != llptrty {
-            debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
-            cx.const_ptrcast(llfn, llptrty)
-        } else {
-            debug!("get_fn: not casting pointer!");
-            llfn
-        }
+        llfn
     } else {
         let instance_def_id = instance.def_id();
         let llfn = if tcx.sess.target.arch == "x86" &&
@@ -129,7 +95,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
         unsafe {
             llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
 
-            let is_generic = instance.args.non_erasable_generics().next().is_some();
+            let is_generic =
+                instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
 
             if is_generic {
                 // This is a monomorphization. Its expected visibility depends
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index 4d879fbdcb7..0b0816c27b6 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -1,10 +1,9 @@
 //! Code that is useful in various codegen modules.
 
-use crate::consts::{self, const_alloc_to_llvm};
+use crate::consts::const_alloc_to_llvm;
 pub use crate::context::CodegenCx;
 use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
 use crate::type_::Type;
-use crate::type_of::LayoutLlvmExt;
 use crate::value::Value;
 
 use rustc_ast::Mutability;
@@ -13,7 +12,6 @@ use rustc_data_structures::stable_hasher::{Hash128, HashStable, StableHasher};
 use rustc_hir::def_id::DefId;
 use rustc_middle::bug;
 use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
-use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::TyCtxt;
 use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType};
 use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer};
@@ -211,11 +209,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             })
             .1;
         let len = s.len();
-        let cs = consts::ptrcast(
-            str_global,
-            self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)),
-        );
-        (cs, self.const_usize(len as u64))
+        (str_global, self.const_usize(len as u64))
     }
 
     fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value {
@@ -292,7 +286,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                 let llval = unsafe {
                     llvm::LLVMConstInBoundsGEP2(
                         self.type_i8(),
-                        self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)),
+                        self.const_bitcast(base_addr, self.type_ptr_ext(base_addr_space)),
                         &self.const_usize(offset.bytes()),
                         1,
                     )
@@ -310,10 +304,6 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         const_alloc_to_llvm(self, alloc)
     }
 
-    fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
-        consts::ptrcast(val, ty)
-    }
-
     fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
         self.const_bitcast(val, ty)
     }
@@ -322,7 +312,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         unsafe {
             llvm::LLVMConstInBoundsGEP2(
                 self.type_i8(),
-                self.const_bitcast(base_addr, self.type_i8p()),
+                base_addr,
                 &self.const_usize(offset.bytes()),
                 1,
             )
@@ -420,10 +410,10 @@ pub(crate) fn i686_decorated_name(
             DllCallingConvention::C => {}
             DllCallingConvention::Stdcall(arg_list_size)
             | DllCallingConvention::Fastcall(arg_list_size) => {
-                write!(&mut decorated_name, "@{}", arg_list_size).unwrap();
+                write!(&mut decorated_name, "@{arg_list_size}").unwrap();
             }
             DllCallingConvention::Vectorcall(arg_list_size) => {
-                write!(&mut decorated_name, "@@{}", arg_list_size).unwrap();
+                write!(&mut decorated_name, "@@{arg_list_size}").unwrap();
             }
         }
     }
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index df52f50f86f..95af2f8ef4a 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -103,7 +103,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
                 value: Primitive::Pointer(address_space),
                 valid_range: WrappingRange::full(dl.pointer_size),
             },
-            cx.type_i8p_ext(address_space),
+            cx.type_ptr_ext(address_space),
         ));
         next_offset = offset + pointer_size;
     }
@@ -179,7 +179,7 @@ fn check_and_apply_linkage<'ll, 'tcx>(
                 })
             });
             llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
-            llvm::LLVMSetInitializer(g2, cx.const_ptrcast(g1, llty));
+            llvm::LLVMSetInitializer(g2, g1);
             g2
         }
     } else if cx.tcx.sess.target.arch == "x86" &&
@@ -193,10 +193,6 @@ fn check_and_apply_linkage<'ll, 'tcx>(
     }
 }
 
-pub fn ptrcast<'ll>(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
-    unsafe { llvm::LLVMConstPointerCast(val, ty) }
-}
-
 impl<'ll> CodegenCx<'ll, '_> {
     pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
         unsafe { llvm::LLVMConstBitCast(val, ty) }
@@ -238,8 +234,7 @@ impl<'ll> CodegenCx<'ll, '_> {
         assert!(
             !defined_in_current_codegen_unit,
             "consts::get_static() should always hit the cache for \
-                 statics defined in the same CGU, but did not for `{:?}`",
-            def_id
+                 statics defined in the same CGU, but did not for `{def_id:?}`"
         );
 
         let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
@@ -251,7 +246,7 @@ impl<'ll> CodegenCx<'ll, '_> {
         let g = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) {
             let llty = self.layout_of(ty).llvm_type(self);
             if let Some(g) = self.get_declared_value(sym) {
-                if self.val_ty(g) != self.type_ptr_to(llty) {
+                if self.val_ty(g) != self.type_ptr() {
                     span_bug!(self.tcx.def_span(def_id), "Conflicting types for static");
                 }
             }
@@ -552,16 +547,14 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
         }
     }
 
-    /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
+    /// Add a global value to a list to be stored in the `llvm.used` variable, an array of ptr.
     fn add_used_global(&self, global: &'ll Value) {
-        let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
-        self.used_statics.borrow_mut().push(cast);
+        self.used_statics.borrow_mut().push(global);
     }
 
     /// Add a global value to a list to be stored in the `llvm.compiler.used` variable,
-    /// an array of i8*.
+    /// an array of ptr.
     fn add_compiler_used_global(&self, global: &'ll Value) {
-        let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
-        self.compiler_used_statics.borrow_mut().push(cast);
+        self.compiler_used_statics.borrow_mut().push(global);
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 5bf641055c5..b4b2ab1e1f8 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -10,6 +10,7 @@ use crate::value::Value;
 
 use cstr::cstr;
 use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
+use rustc_codegen_ssa::errors as ssa_errors;
 use rustc_codegen_ssa::traits::*;
 use rustc_data_structures::base_n;
 use rustc_data_structures::fx::FxHashMap;
@@ -59,17 +60,6 @@ pub struct CodegenCx<'ll, 'tcx> {
     /// Cache of constant strings,
     pub const_str_cache: RefCell<FxHashMap<String, &'ll Value>>,
 
-    /// Reverse-direction for const ptrs cast from globals.
-    ///
-    /// Key is a Value holding a `*T`,
-    /// Val is a Value holding a `*[T]`.
-    ///
-    /// Needed because LLVM loses pointer->pointee association
-    /// when we ptrcast, and we have to ptrcast during codegen
-    /// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not
-    /// a pointer to an LLVM array type. Similar for trait objects.
-    pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
-
     /// Cache of emitted const globals (value -> global)
     pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
 
@@ -156,12 +146,23 @@ pub unsafe fn create_module<'ll>(
             target_data_layout = target_data_layout.replace("-n32:64-", "-n64-");
         }
     }
+    if llvm_version < (17, 0, 0) {
+        if sess.target.arch.starts_with("powerpc") {
+            // LLVM 17 specifies function pointer alignment for ppc:
+            // https://reviews.llvm.org/D147016
+            target_data_layout = target_data_layout
+                .replace("-Fn32", "")
+                .replace("-Fi32", "")
+                .replace("-Fn64", "")
+                .replace("-Fi64", "");
+        }
+    }
 
     // Ensure the data-layout values hardcoded remain the defaults.
     if sess.target.is_builtin {
+        // tm is disposed by its drop impl
         let tm = crate::back::write::create_informational_target_machine(tcx.sess);
-        llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
-        llvm::LLVMRustDisposeTargetMachine(tm);
+        llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, &tm);
 
         let llvm_data_layout = llvm::LLVMGetDataLayoutStr(llmod);
         let llvm_data_layout = str::from_utf8(CStr::from_ptr(llvm_data_layout).to_bytes())
@@ -209,7 +210,7 @@ pub unsafe fn create_module<'ll>(
         // PIE is potentially more effective than PIC, but can only be used in executables.
         // If all our outputs are executables, then we can relax PIC to PIE.
         if reloc_model == RelocModel::Pie
-            || sess.crate_types().iter().all(|ty| *ty == CrateType::Executable)
+            || tcx.crate_types().iter().all(|ty| *ty == CrateType::Executable)
         {
             llvm::LLVMRustSetModulePIELevel(llmod);
         }
@@ -464,7 +465,6 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
             instances: Default::default(),
             vtables: Default::default(),
             const_str_cache: Default::default(),
-            const_unsized: Default::default(),
             const_globals: Default::default(),
             statics_to_rauw: RefCell::new(Vec::new()),
             used_statics: RefCell::new(Vec::new()),
@@ -495,7 +495,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
 
     pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
         let section = cstr!("llvm.metadata");
-        let array = self.const_array(self.type_ptr_to(self.type_i8()), values);
+        let array = self.const_array(self.type_ptr(), values);
 
         unsafe {
             let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
@@ -673,7 +673,7 @@ impl<'ll> CodegenCx<'ll, '_> {
             ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
         }
 
-        let i8p = self.type_i8p();
+        let ptr = self.type_ptr();
         let void = self.type_void();
         let i1 = self.type_i1();
         let t_i8 = self.type_i8();
@@ -687,7 +687,7 @@ impl<'ll> CodegenCx<'ll, '_> {
         let t_metadata = self.type_metadata();
         let t_token = self.type_token();
 
-        ifn!("llvm.wasm.get.exception", fn(t_token) -> i8p);
+        ifn!("llvm.wasm.get.exception", fn(t_token) -> ptr);
         ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32);
 
         ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
@@ -723,7 +723,7 @@ impl<'ll> CodegenCx<'ll, '_> {
 
         ifn!("llvm.trap", fn() -> void);
         ifn!("llvm.debugtrap", fn() -> void);
-        ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
+        ifn!("llvm.frameaddress", fn(t_i32) -> ptr);
 
         ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
         ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
@@ -890,43 +890,44 @@ impl<'ll> CodegenCx<'ll, '_> {
         ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
         ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
 
-        ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void);
-        ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void);
+        ifn!("llvm.lifetime.start.p0i8", fn(t_i64, ptr) -> void);
+        ifn!("llvm.lifetime.end.p0i8", fn(t_i64, ptr) -> void);
 
         ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
-        ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
+        ifn!("llvm.eh.typeid.for", fn(ptr) -> t_i32);
         ifn!("llvm.localescape", fn(...) -> void);
-        ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
-        ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
+        ifn!("llvm.localrecover", fn(ptr, ptr, t_i32) -> ptr);
+        ifn!("llvm.x86.seh.recoverfp", fn(ptr, ptr) -> ptr);
 
         ifn!("llvm.assume", fn(i1) -> void);
-        ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
+        ifn!("llvm.prefetch", fn(ptr, t_i32, t_i32, t_i32) -> void);
 
         // This isn't an "LLVM intrinsic", but LLVM's optimization passes
-        // recognize it like one and we assume it exists in `core::slice::cmp`
+        // recognize it like one (including turning it into `bcmp` sometimes)
+        // and we use it to implement intrinsics like `raw_eq` and `compare_bytes`
         match self.sess().target.arch.as_ref() {
-            "avr" | "msp430" => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i16),
-            _ => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i32),
+            "avr" | "msp430" => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i16),
+            _ => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i32),
         }
 
         // variadic intrinsics
-        ifn!("llvm.va_start", fn(i8p) -> void);
-        ifn!("llvm.va_end", fn(i8p) -> void);
-        ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
+        ifn!("llvm.va_start", fn(ptr) -> void);
+        ifn!("llvm.va_end", fn(ptr) -> void);
+        ifn!("llvm.va_copy", fn(ptr, ptr) -> void);
 
         if self.sess().instrument_coverage() {
-            ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
+            ifn!("llvm.instrprof.increment", fn(ptr, t_i64, t_i32, t_i32) -> void);
         }
 
-        ifn!("llvm.type.test", fn(i8p, t_metadata) -> i1);
-        ifn!("llvm.type.checked.load", fn(i8p, t_i32, t_metadata) -> mk_struct! {i8p, i1});
+        ifn!("llvm.type.test", fn(ptr, t_metadata) -> i1);
+        ifn!("llvm.type.checked.load", fn(ptr, t_i32, t_metadata) -> mk_struct! {ptr, i1});
 
         if self.sess().opts.debuginfo != DebugInfo::None {
             ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void);
             ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void);
         }
 
-        ifn!("llvm.ptrmask", fn(i8p, t_isize) -> i8p);
+        ifn!("llvm.ptrmask", fn(ptr, t_isize) -> ptr);
 
         None
     }
@@ -940,12 +941,10 @@ impl<'ll> CodegenCx<'ll, '_> {
         let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
             Some(def_id) => self.get_static(def_id),
             _ => {
-                let ty = self
-                    .type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false);
+                let ty = self.type_struct(&[self.type_ptr(), self.type_ptr()], false);
                 self.declare_global("rust_eh_catch_typeinfo", ty)
             }
         };
-        let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p());
         self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
         eh_catch_typeinfo
     }
@@ -999,10 +998,10 @@ impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
 
     #[inline]
     fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
-        if let LayoutError::SizeOverflow(_) = err {
+        if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
             self.sess().emit_fatal(Spanned { span, node: err.into_diagnostic() })
         } else {
-            span_bug!(span, "failed to get layout for `{ty}`: {err:?}")
+            self.tcx.sess.emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err })
         }
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
index 1791ce4b315..763186a58bf 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -1,4 +1,4 @@
-use rustc_middle::mir::coverage::{CounterValueReference, MappedExpressionIndex};
+use rustc_middle::mir::coverage::{CounterId, ExpressionId, Operand};
 
 /// Must match the layout of `LLVMRustCounterKind`.
 #[derive(Copy, Clone, Debug)]
@@ -30,34 +30,25 @@ pub struct Counter {
 }
 
 impl Counter {
-    /// Constructs a new `Counter` of kind `Zero`. For this `CounterKind`, the
-    /// `id` is not used.
-    pub fn zero() -> Self {
-        Self { kind: CounterKind::Zero, id: 0 }
-    }
+    /// A `Counter` of kind `Zero`. For this counter kind, the `id` is not used.
+    pub(crate) const ZERO: Self = Self { kind: CounterKind::Zero, id: 0 };
 
-    /// Constructs a new `Counter` of kind `CounterValueReference`, and converts
-    /// the given 1-based counter_id to the required 0-based equivalent for
-    /// the `Counter` encoding.
-    pub fn counter_value_reference(counter_id: CounterValueReference) -> Self {
-        Self { kind: CounterKind::CounterValueReference, id: counter_id.zero_based_index() }
+    /// Constructs a new `Counter` of kind `CounterValueReference`.
+    pub fn counter_value_reference(counter_id: CounterId) -> Self {
+        Self { kind: CounterKind::CounterValueReference, id: counter_id.as_u32() }
     }
 
     /// Constructs a new `Counter` of kind `Expression`.
-    pub fn expression(mapped_expression_index: MappedExpressionIndex) -> Self {
-        Self { kind: CounterKind::Expression, id: mapped_expression_index.into() }
-    }
-
-    /// Returns true if the `Counter` kind is `Zero`.
-    pub fn is_zero(&self) -> bool {
-        matches!(self.kind, CounterKind::Zero)
+    pub(crate) fn expression(expression_id: ExpressionId) -> Self {
+        Self { kind: CounterKind::Expression, id: expression_id.as_u32() }
     }
 
-    /// An explicitly-named function to get the ID value, making it more obvious
-    /// that the stored value is now 0-based.
-    pub fn zero_based_id(&self) -> u32 {
-        debug_assert!(!self.is_zero(), "`id` is undefined for CounterKind::Zero");
-        self.id
+    pub(crate) fn from_operand(operand: Operand) -> Self {
+        match operand {
+            Operand::Zero => Self::ZERO,
+            Operand::Counter(id) => Self::counter_value_reference(id),
+            Operand::Expression(id) => Self::expression(id),
+        }
     }
 }
 
@@ -83,7 +74,206 @@ pub struct CounterExpression {
 }
 
 impl CounterExpression {
+    /// The dummy expression `(0 - 0)` has a representation of all zeroes,
+    /// making it marginally more efficient to initialize than `(0 + 0)`.
+    pub(crate) const DUMMY: Self =
+        Self { lhs: Counter::ZERO, kind: ExprKind::Subtract, rhs: Counter::ZERO };
+
     pub fn new(lhs: Counter, kind: ExprKind, rhs: Counter) -> Self {
         Self { kind, lhs, rhs }
     }
 }
+
+/// Corresponds to enum `llvm::coverage::CounterMappingRegion::RegionKind`.
+///
+/// Must match the layout of `LLVMRustCounterMappingRegionKind`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum RegionKind {
+    /// A CodeRegion associates some code with a counter
+    CodeRegion = 0,
+
+    /// An ExpansionRegion represents a file expansion region that associates
+    /// a source range with the expansion of a virtual source file, such as
+    /// for a macro instantiation or #include file.
+    ExpansionRegion = 1,
+
+    /// A SkippedRegion represents a source range with code that was skipped
+    /// by a preprocessor or similar means.
+    SkippedRegion = 2,
+
+    /// A GapRegion is like a CodeRegion, but its count is only set as the
+    /// line execution count when its the only region in the line.
+    GapRegion = 3,
+
+    /// A BranchRegion represents leaf-level boolean expressions and is
+    /// associated with two counters, each representing the number of times the
+    /// expression evaluates to true or false.
+    BranchRegion = 4,
+}
+
+/// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the
+/// coverage map, in accordance with the
+/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
+/// The struct composes fields representing the `Counter` type and value(s) (injected counter
+/// ID, or expression type and operands), the source file (an indirect index into a "filenames
+/// array", encoded separately), and source location (start and end positions of the represented
+/// code region).
+///
+/// Corresponds to struct `llvm::coverage::CounterMappingRegion`.
+///
+/// Must match the layout of `LLVMRustCounterMappingRegion`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct CounterMappingRegion {
+    /// The counter type and type-dependent counter data, if any.
+    counter: Counter,
+
+    /// If the `RegionKind` is a `BranchRegion`, this represents the counter
+    /// for the false branch of the region.
+    false_counter: Counter,
+
+    /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the
+    /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes
+    /// that, in turn, are used to look up the filename for this region.
+    file_id: u32,
+
+    /// If the `RegionKind` is an `ExpansionRegion`, the `expanded_file_id` can be used to find
+    /// the mapping regions created as a result of macro expansion, by checking if their file id
+    /// matches the expanded file id.
+    expanded_file_id: u32,
+
+    /// 1-based starting line of the mapping region.
+    start_line: u32,
+
+    /// 1-based starting column of the mapping region.
+    start_col: u32,
+
+    /// 1-based ending line of the mapping region.
+    end_line: u32,
+
+    /// 1-based ending column of the mapping region. If the high bit is set, the current
+    /// mapping region is a gap area.
+    end_col: u32,
+
+    kind: RegionKind,
+}
+
+impl CounterMappingRegion {
+    pub(crate) fn code_region(
+        counter: Counter,
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter,
+            false_counter: Counter::ZERO,
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::CodeRegion,
+        }
+    }
+
+    // This function might be used in the future; the LLVM API is still evolving, as is coverage
+    // support.
+    #[allow(dead_code)]
+    pub(crate) fn branch_region(
+        counter: Counter,
+        false_counter: Counter,
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter,
+            false_counter,
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::BranchRegion,
+        }
+    }
+
+    // This function might be used in the future; the LLVM API is still evolving, as is coverage
+    // support.
+    #[allow(dead_code)]
+    pub(crate) fn expansion_region(
+        file_id: u32,
+        expanded_file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter: Counter::ZERO,
+            false_counter: Counter::ZERO,
+            file_id,
+            expanded_file_id,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::ExpansionRegion,
+        }
+    }
+
+    // This function might be used in the future; the LLVM API is still evolving, as is coverage
+    // support.
+    #[allow(dead_code)]
+    pub(crate) fn skipped_region(
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter: Counter::ZERO,
+            false_counter: Counter::ZERO,
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::SkippedRegion,
+        }
+    }
+
+    // This function might be used in the future; the LLVM API is still evolving, as is coverage
+    // support.
+    #[allow(dead_code)]
+    pub(crate) fn gap_region(
+        counter: Counter,
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter,
+            false_counter: Counter::ZERO,
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col: (1_u32 << 31) | end_col,
+            kind: RegionKind::GapRegion,
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
index 06844afd6b8..55f43aa5341 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
@@ -1,26 +1,22 @@
-pub use super::ffi::*;
+use crate::coverageinfo::ffi::{Counter, CounterExpression, ExprKind};
 
-use rustc_index::{IndexSlice, IndexVec};
-use rustc_middle::bug;
-use rustc_middle::mir::coverage::{
-    CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId,
-    InjectedExpressionIndex, MappedExpressionIndex, Op,
-};
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_index::IndexVec;
+use rustc_middle::mir::coverage::{CodeRegion, CounterId, ExpressionId, Op, Operand};
 use rustc_middle::ty::Instance;
 use rustc_middle::ty::TyCtxt;
 
 #[derive(Clone, Debug, PartialEq)]
 pub struct Expression {
-    lhs: ExpressionOperandId,
+    lhs: Operand,
     op: Op,
-    rhs: ExpressionOperandId,
-    region: Option<CodeRegion>,
+    rhs: Operand,
+    code_regions: Vec<CodeRegion>,
 }
 
 /// Collects all of the coverage regions associated with (a) injected counters, (b) counter
 /// expressions (additions or subtraction), and (c) unreachable regions (always counted as zero),
-/// for a given Function. Counters and counter expressions have non-overlapping `id`s because they
-/// can both be operands in an expression. This struct also stores the `function_source_hash`,
+/// for a given Function. This struct also stores the `function_source_hash`,
 /// computed during instrumentation, and forwarded with counters.
 ///
 /// Note, it may be important to understand LLVM's definitions of `unreachable` regions versus "gap
@@ -34,8 +30,8 @@ pub struct FunctionCoverage<'tcx> {
     instance: Instance<'tcx>,
     source_hash: u64,
     is_used: bool,
-    counters: IndexVec<CounterValueReference, Option<CodeRegion>>,
-    expressions: IndexVec<InjectedExpressionIndex, Option<Expression>>,
+    counters: IndexVec<CounterId, Option<Vec<CodeRegion>>>,
+    expressions: IndexVec<ExpressionId, Option<Expression>>,
     unreachable_regions: Vec<CodeRegion>,
 }
 
@@ -81,65 +77,119 @@ impl<'tcx> FunctionCoverage<'tcx> {
         }
     }
 
-    /// Adds a code region to be counted by an injected counter intrinsic.
-    pub fn add_counter(&mut self, id: CounterValueReference, region: CodeRegion) {
-        if let Some(previous_region) = self.counters[id].replace(region.clone()) {
-            assert_eq!(previous_region, region, "add_counter: code region for id changed");
+    /// Adds code regions to be counted by an injected counter intrinsic.
+    #[instrument(level = "debug", skip(self))]
+    pub(crate) fn add_counter(&mut self, id: CounterId, code_regions: &[CodeRegion]) {
+        if code_regions.is_empty() {
+            return;
+        }
+
+        let slot = &mut self.counters[id];
+        match slot {
+            None => *slot = Some(code_regions.to_owned()),
+            // If this counter ID slot has already been filled, it should
+            // contain identical information.
+            Some(ref previous_regions) => assert_eq!(
+                previous_regions, code_regions,
+                "add_counter: code regions for id changed"
+            ),
         }
     }
 
-    /// Both counters and "counter expressions" (or simply, "expressions") can be operands in other
-    /// expressions. Expression IDs start from `u32::MAX` and go down, so the range of expression
-    /// IDs will not overlap with the range of counter IDs. Counters and expressions can be added in
-    /// any order, and expressions can still be assigned contiguous (though descending) IDs, without
-    /// knowing what the last counter ID will be.
+    /// Adds information about a coverage expression, along with zero or more
+    /// code regions mapped to that expression.
     ///
-    /// When storing the expression data in the `expressions` vector in the `FunctionCoverage`
-    /// struct, its vector index is computed, from the given expression ID, by subtracting from
-    /// `u32::MAX`.
-    ///
-    /// Since the expression operands (`lhs` and `rhs`) can reference either counters or
-    /// expressions, an operand that references an expression also uses its original ID, descending
-    /// from `u32::MAX`. Theses operands are translated only during code generation, after all
-    /// counters and expressions have been added.
-    pub fn add_counter_expression(
+    /// Both counters and "counter expressions" (or simply, "expressions") can be operands in other
+    /// expressions. These are tracked as separate variants of `Operand`, so there is no ambiguity
+    /// between operands that are counter IDs and operands that are expression IDs.
+    #[instrument(level = "debug", skip(self))]
+    pub(crate) fn add_counter_expression(
         &mut self,
-        expression_id: InjectedExpressionId,
-        lhs: ExpressionOperandId,
+        expression_id: ExpressionId,
+        lhs: Operand,
         op: Op,
-        rhs: ExpressionOperandId,
-        region: Option<CodeRegion>,
+        rhs: Operand,
+        code_regions: &[CodeRegion],
     ) {
-        debug!(
-            "add_counter_expression({:?}, lhs={:?}, op={:?}, rhs={:?} at {:?}",
-            expression_id, lhs, op, rhs, region
-        );
-        let expression_index = self.expression_index(u32::from(expression_id));
         debug_assert!(
-            expression_index.as_usize() < self.expressions.len(),
-            "expression_index {} is out of range for expressions.len() = {}
+            expression_id.as_usize() < self.expressions.len(),
+            "expression_id {} is out of range for expressions.len() = {}
             for {:?}",
-            expression_index.as_usize(),
+            expression_id.as_usize(),
             self.expressions.len(),
             self,
         );
-        if let Some(previous_expression) = self.expressions[expression_index].replace(Expression {
-            lhs,
-            op,
-            rhs,
-            region: region.clone(),
-        }) {
-            assert_eq!(
-                previous_expression,
-                Expression { lhs, op, rhs, region },
+
+        let expression = Expression { lhs, op, rhs, code_regions: code_regions.to_owned() };
+        let slot = &mut self.expressions[expression_id];
+        match slot {
+            None => *slot = Some(expression),
+            // If this expression ID slot has already been filled, it should
+            // contain identical information.
+            Some(ref previous_expression) => assert_eq!(
+                previous_expression, &expression,
                 "add_counter_expression: expression for id changed"
-            );
+            ),
         }
     }
 
-    /// Add a region that will be marked as "unreachable", with a constant "zero counter".
-    pub fn add_unreachable_region(&mut self, region: CodeRegion) {
-        self.unreachable_regions.push(region)
+    /// Adds regions that will be marked as "unreachable", with a constant "zero counter".
+    #[instrument(level = "debug", skip(self))]
+    pub(crate) fn add_unreachable_regions(&mut self, code_regions: &[CodeRegion]) {
+        assert!(!code_regions.is_empty(), "unreachable regions always have code regions");
+        self.unreachable_regions.extend_from_slice(code_regions);
+    }
+
+    /// Perform some simplifications to make the final coverage mappings
+    /// slightly smaller.
+    ///
+    /// This method mainly exists to preserve the simplifications that were
+    /// already being performed by the Rust-side expression renumbering, so that
+    /// the resulting coverage mappings don't get worse.
+    pub(crate) fn simplify_expressions(&mut self) {
+        // The set of expressions that either were optimized out entirely, or
+        // have zero as both of their operands, and will therefore always have
+        // a value of zero. Other expressions that refer to these as operands
+        // can have those operands replaced with `Operand::Zero`.
+        let mut zero_expressions = FxIndexSet::default();
+
+        // For each expression, perform simplifications based on lower-numbered
+        // expressions, and then update the set of always-zero expressions if
+        // necessary.
+        // (By construction, expressions can only refer to other expressions
+        // that have lower IDs, so one simplification pass is sufficient.)
+        for (id, maybe_expression) in self.expressions.iter_enumerated_mut() {
+            let Some(expression) = maybe_expression else {
+                // If an expression is missing, it must have been optimized away,
+                // so any operand that refers to it can be replaced with zero.
+                zero_expressions.insert(id);
+                continue;
+            };
+
+            // If an operand refers to an expression that is always zero, then
+            // that operand can be replaced with `Operand::Zero`.
+            let maybe_set_operand_to_zero = |operand: &mut Operand| match &*operand {
+                Operand::Expression(id) if zero_expressions.contains(id) => {
+                    *operand = Operand::Zero;
+                }
+                _ => (),
+            };
+            maybe_set_operand_to_zero(&mut expression.lhs);
+            maybe_set_operand_to_zero(&mut expression.rhs);
+
+            // Coverage counter values cannot be negative, so if an expression
+            // involves subtraction from zero, assume that its RHS must also be zero.
+            // (Do this after simplifications that could set the LHS to zero.)
+            if let Expression { lhs: Operand::Zero, op: Op::Subtract, .. } = expression {
+                expression.rhs = Operand::Zero;
+            }
+
+            // After the above simplifications, if both operands are zero, then
+            // we know that this expression is always zero too.
+            if let Expression { lhs: Operand::Zero, rhs: Operand::Zero, .. } = expression {
+                zero_expressions.insert(id);
+            }
+        }
     }
 
     /// Return the source hash, generated from the HIR node structure, and used to indicate whether
@@ -160,8 +210,14 @@ impl<'tcx> FunctionCoverage<'tcx> {
             self.instance
         );
 
+        let counter_expressions = self.counter_expressions();
+        // Expression IDs are indices into `self.expressions`, and on the LLVM
+        // side they will be treated as indices into `counter_expressions`, so
+        // the two vectors should correspond 1:1.
+        assert_eq!(self.expressions.len(), counter_expressions.len());
+
         let counter_regions = self.counter_regions();
-        let (counter_expressions, expression_regions) = self.expressions_with_regions();
+        let expression_regions = self.expression_regions();
         let unreachable_regions = self.unreachable_regions();
 
         let counter_regions =
@@ -170,179 +226,69 @@ impl<'tcx> FunctionCoverage<'tcx> {
     }
 
     fn counter_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
-        self.counters.iter_enumerated().filter_map(|(index, entry)| {
-            // Option::map() will return None to filter out missing counters. This may happen
-            // if, for example, a MIR-instrumented counter is removed during an optimization.
-            entry.as_ref().map(|region| (Counter::counter_value_reference(index), region))
-        })
+        self.counters
+            .iter_enumerated()
+            // Filter out counter IDs that we never saw during MIR traversal.
+            // This can happen if a counter was optimized out by MIR transforms
+            // (and replaced with `CoverageKind::Unreachable` instead).
+            .filter_map(|(id, maybe_code_regions)| Some((id, maybe_code_regions.as_ref()?)))
+            .flat_map(|(id, code_regions)| {
+                let counter = Counter::counter_value_reference(id);
+                code_regions.iter().map(move |region| (counter, region))
+            })
     }
 
-    fn expressions_with_regions(
-        &self,
-    ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
-        let mut counter_expressions = Vec::with_capacity(self.expressions.len());
-        let mut expression_regions = Vec::with_capacity(self.expressions.len());
-        let mut new_indexes = IndexVec::from_elem_n(None, self.expressions.len());
-
-        // This closure converts any `Expression` operand (`lhs` or `rhs` of the `Op::Add` or
-        // `Op::Subtract` operation) into its native `llvm::coverage::Counter::CounterKind` type
-        // and value. Operand ID value `0` maps to `CounterKind::Zero`; values in the known range
-        // of injected LLVM counters map to `CounterKind::CounterValueReference` (and the value
-        // matches the injected counter index); and any other value is converted into a
-        // `CounterKind::Expression` with the expression's `new_index`.
-        //
-        // Expressions will be returned from this function in a sequential vector (array) of
-        // `CounterExpression`, so the expression IDs must be mapped from their original,
-        // potentially sparse set of indexes, originally in reverse order from `u32::MAX`.
-        //
-        // An `Expression` as an operand will have already been encountered as an `Expression` with
-        // operands, so its new_index will already have been generated (as a 1-up index value).
-        // (If an `Expression` as an operand does not have a corresponding new_index, it was
-        // probably optimized out, after the expression was injected into the MIR, so it will
-        // get a `CounterKind::Zero` instead.)
-        //
-        // In other words, an `Expression`s at any given index can include other expressions as
-        // operands, but expression operands can only come from the subset of expressions having
-        // `expression_index`s lower than the referencing `Expression`. Therefore, it is
-        // reasonable to look up the new index of an expression operand while the `new_indexes`
-        // vector is only complete up to the current `ExpressionIndex`.
-        let id_to_counter = |new_indexes: &IndexSlice<
-            InjectedExpressionIndex,
-            Option<MappedExpressionIndex>,
-        >,
-                             id: ExpressionOperandId| {
-            if id == ExpressionOperandId::ZERO {
-                Some(Counter::zero())
-            } else if id.index() < self.counters.len() {
-                debug_assert!(
-                    id.index() > 0,
-                    "ExpressionOperandId indexes for counters are 1-based, but this id={}",
-                    id.index()
-                );
-                // Note: Some codegen-injected Counters may be only referenced by `Expression`s,
-                // and may not have their own `CodeRegion`s,
-                let index = CounterValueReference::from(id.index());
-                // Note, the conversion to LLVM `Counter` adjusts the index to be zero-based.
-                Some(Counter::counter_value_reference(index))
-            } else {
-                let index = self.expression_index(u32::from(id));
-                self.expressions
-                    .get(index)
-                    .expect("expression id is out of range")
-                    .as_ref()
-                    // If an expression was optimized out, assume it would have produced a count
-                    // of zero. This ensures that expressions dependent on optimized-out
-                    // expressions are still valid.
-                    .map_or(Some(Counter::zero()), |_| new_indexes[index].map(Counter::expression))
-            }
-        };
-
-        for (original_index, expression) in
-            self.expressions.iter_enumerated().filter_map(|(original_index, entry)| {
-                // Option::map() will return None to filter out missing expressions. This may happen
-                // if, for example, a MIR-instrumented expression is removed during an optimization.
-                entry.as_ref().map(|expression| (original_index, expression))
-            })
-        {
-            let optional_region = &expression.region;
-            let Expression { lhs, op, rhs, .. } = *expression;
+    /// Convert this function's coverage expression data into a form that can be
+    /// passed through FFI to LLVM.
+    fn counter_expressions(&self) -> Vec<CounterExpression> {
+        // We know that LLVM will optimize out any unused expressions before
+        // producing the final coverage map, so there's no need to do the same
+        // thing on the Rust side unless we're confident we can do much better.
+        // (See `CounterExpressionsMinimizer` in `CoverageMappingWriter.cpp`.)
 
-            if let Some(Some((lhs_counter, mut rhs_counter))) = id_to_counter(&new_indexes, lhs)
-                .map(|lhs_counter| {
-                    id_to_counter(&new_indexes, rhs).map(|rhs_counter| (lhs_counter, rhs_counter))
-                })
-            {
-                if lhs_counter.is_zero() && op.is_subtract() {
-                    // The left side of a subtraction was probably optimized out. As an example,
-                    // a branch condition might be evaluated as a constant expression, and the
-                    // branch could be removed, dropping unused counters in the process.
-                    //
-                    // Since counters are unsigned, we must assume the result of the expression
-                    // can be no more and no less than zero. An expression known to evaluate to zero
-                    // does not need to be added to the coverage map.
-                    //
-                    // Coverage test `loops_branches.rs` includes multiple variations of branches
-                    // based on constant conditional (literal `true` or `false`), and demonstrates
-                    // that the expected counts are still correct.
-                    debug!(
-                        "Expression subtracts from zero (assume unreachable): \
-                        original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
-                        original_index, lhs, op, rhs, optional_region,
-                    );
-                    rhs_counter = Counter::zero();
+        self.expressions
+            .iter()
+            .map(|expression| match expression {
+                None => {
+                    // This expression ID was allocated, but we never saw the
+                    // actual expression, so it must have been optimized out.
+                    // Replace it with a dummy expression, and let LLVM take
+                    // care of omitting it from the expression list.
+                    CounterExpression::DUMMY
                 }
-                debug_assert!(
-                    lhs_counter.is_zero()
-                        // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
-                        || ((lhs_counter.zero_based_id() as usize)
-                            <= usize::max(self.counters.len(), self.expressions.len())),
-                    "lhs id={} > both counters.len()={} and expressions.len()={}
-                    ({:?} {:?} {:?})",
-                    lhs_counter.zero_based_id(),
-                    self.counters.len(),
-                    self.expressions.len(),
-                    lhs_counter,
-                    op,
-                    rhs_counter,
-                );
-
-                debug_assert!(
-                    rhs_counter.is_zero()
-                        // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
-                        || ((rhs_counter.zero_based_id() as usize)
-                            <= usize::max(self.counters.len(), self.expressions.len())),
-                    "rhs id={} > both counters.len()={} and expressions.len()={}
-                    ({:?} {:?} {:?})",
-                    rhs_counter.zero_based_id(),
-                    self.counters.len(),
-                    self.expressions.len(),
-                    lhs_counter,
-                    op,
-                    rhs_counter,
-                );
-
-                // Both operands exist. `Expression` operands exist in `self.expressions` and have
-                // been assigned a `new_index`.
-                let mapped_expression_index =
-                    MappedExpressionIndex::from(counter_expressions.len());
-                let expression = CounterExpression::new(
-                    lhs_counter,
-                    match op {
-                        Op::Add => ExprKind::Add,
-                        Op::Subtract => ExprKind::Subtract,
-                    },
-                    rhs_counter,
-                );
-                debug!(
-                    "Adding expression {:?} = {:?}, region: {:?}",
-                    mapped_expression_index, expression, optional_region
-                );
-                counter_expressions.push(expression);
-                new_indexes[original_index] = Some(mapped_expression_index);
-                if let Some(region) = optional_region {
-                    expression_regions.push((Counter::expression(mapped_expression_index), region));
+                &Some(Expression { lhs, op, rhs, .. }) => {
+                    // Convert the operands and operator as normal.
+                    CounterExpression::new(
+                        Counter::from_operand(lhs),
+                        match op {
+                            Op::Add => ExprKind::Add,
+                            Op::Subtract => ExprKind::Subtract,
+                        },
+                        Counter::from_operand(rhs),
+                    )
                 }
-            } else {
-                bug!(
-                    "expression has one or more missing operands \
-                      original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
-                    original_index,
-                    lhs,
-                    op,
-                    rhs,
-                    optional_region,
-                );
-            }
-        }
-        (counter_expressions, expression_regions.into_iter())
+            })
+            .collect::<Vec<_>>()
     }
 
-    fn unreachable_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
-        self.unreachable_regions.iter().map(|region| (Counter::zero(), region))
+    fn expression_regions(&self) -> Vec<(Counter, &CodeRegion)> {
+        // Find all of the expression IDs that weren't optimized out AND have
+        // one or more attached code regions, and return the corresponding
+        // mappings as counter/region pairs.
+        self.expressions
+            .iter_enumerated()
+            .filter_map(|(id, maybe_expression)| {
+                let code_regions = &maybe_expression.as_ref()?.code_regions;
+                Some((id, code_regions))
+            })
+            .flat_map(|(id, code_regions)| {
+                let counter = Counter::expression(id);
+                code_regions.iter().map(move |code_region| (counter, code_region))
+            })
+            .collect::<Vec<_>>()
     }
 
-    fn expression_index(&self, id_descending_from_max: u32) -> InjectedExpressionIndex {
-        debug_assert!(id_descending_from_max >= self.counters.len() as u32);
-        InjectedExpressionIndex::from(u32::MAX - id_descending_from_max)
+    fn unreachable_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
+        self.unreachable_regions.iter().map(|region| (Counter::ZERO, region))
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 6e815ab3ff5..d4e77525698 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -1,20 +1,19 @@
 use crate::common::CodegenCx;
 use crate::coverageinfo;
-use crate::coverageinfo::map_data::{Counter, CounterExpression};
+use crate::coverageinfo::ffi::CounterMappingRegion;
+use crate::coverageinfo::map_data::FunctionCoverage;
 use crate::llvm;
 
-use llvm::coverageinfo::CounterMappingRegion;
 use rustc_codegen_ssa::traits::ConstMethods;
 use rustc_data_structures::fx::FxIndexSet;
 use rustc_hir::def::DefKind;
 use rustc_hir::def_id::DefId;
-use rustc_llvm::RustString;
+use rustc_index::IndexVec;
 use rustc_middle::bug;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir::coverage::CodeRegion;
 use rustc_middle::ty::TyCtxt;
-
-use std::ffi::CString;
+use rustc_span::Symbol;
 
 /// Generates and exports the Coverage Map.
 ///
@@ -57,21 +56,21 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
         return;
     }
 
-    let mut mapgen = CoverageMapGenerator::new(tcx);
+    let mut global_file_table = GlobalFileTable::new(tcx);
 
     // Encode coverage mappings and generate function records
     let mut function_data = Vec::new();
-    for (instance, function_coverage) in function_coverage_map {
+    for (instance, mut function_coverage) in function_coverage_map {
         debug!("Generate function coverage for {}, {:?}", cx.codegen_unit.name(), instance);
+        function_coverage.simplify_expressions();
+        let function_coverage = function_coverage;
+
         let mangled_function_name = tcx.symbol_name(instance).name;
         let source_hash = function_coverage.source_hash();
         let is_used = function_coverage.is_used();
-        let (expressions, counter_regions) =
-            function_coverage.get_expressions_and_counter_regions();
 
-        let coverage_mapping_buffer = llvm::build_byte_buffer(|coverage_mapping_buffer| {
-            mapgen.write_coverage_mapping(expressions, counter_regions, coverage_mapping_buffer);
-        });
+        let coverage_mapping_buffer =
+            encode_mappings_for_function(&mut global_file_table, &function_coverage);
 
         if coverage_mapping_buffer.is_empty() {
             if function_coverage.is_used() {
@@ -89,20 +88,20 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
     }
 
     // Encode all filenames referenced by counters/expressions in this module
-    let filenames_buffer = llvm::build_byte_buffer(|filenames_buffer| {
-        coverageinfo::write_filenames_section_to_buffer(&mapgen.filenames, filenames_buffer);
-    });
+    let filenames_buffer = global_file_table.into_filenames_buffer();
 
     let filenames_size = filenames_buffer.len();
     let filenames_val = cx.const_bytes(&filenames_buffer);
     let filenames_ref = coverageinfo::hash_bytes(&filenames_buffer);
 
     // Generate the LLVM IR representation of the coverage map and store it in a well-known global
-    let cov_data_val = mapgen.generate_coverage_map(cx, version, filenames_size, filenames_val);
+    let cov_data_val = generate_coverage_map(cx, version, filenames_size, filenames_val);
 
+    let covfun_section_name = coverageinfo::covfun_section_name(cx);
     for (mangled_function_name, source_hash, is_used, coverage_mapping_buffer) in function_data {
         save_function_record(
             cx,
+            &covfun_section_name,
             mangled_function_name,
             source_hash,
             filenames_ref,
@@ -115,112 +114,129 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
     coverageinfo::save_cov_data_to_mod(cx, cov_data_val);
 }
 
-struct CoverageMapGenerator {
-    filenames: FxIndexSet<CString>,
+struct GlobalFileTable {
+    global_file_table: FxIndexSet<Symbol>,
 }
 
-impl CoverageMapGenerator {
+impl GlobalFileTable {
     fn new(tcx: TyCtxt<'_>) -> Self {
-        let mut filenames = FxIndexSet::default();
+        let mut global_file_table = FxIndexSet::default();
         // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
         // requires setting the first filename to the compilation directory.
         // Since rustc generates coverage maps with relative paths, the
         // compilation directory can be combined with the relative paths
         // to get absolute paths, if needed.
-        let working_dir =
-            tcx.sess.opts.working_dir.remapped_path_if_available().to_string_lossy().to_string();
-        let c_filename =
-            CString::new(working_dir).expect("null error converting filename to C string");
-        filenames.insert(c_filename);
-        Self { filenames }
+        let working_dir = Symbol::intern(
+            &tcx.sess.opts.working_dir.remapped_path_if_available().to_string_lossy(),
+        );
+        global_file_table.insert(working_dir);
+        Self { global_file_table }
     }
 
-    /// Using the `expressions` and `counter_regions` collected for the current function, generate
-    /// the `mapping_regions` and `virtual_file_mapping`, and capture any new filenames. Then use
-    /// LLVM APIs to encode the `virtual_file_mapping`, `expressions`, and `mapping_regions` into
-    /// the given `coverage_mapping` byte buffer, compliant with the LLVM Coverage Mapping format.
-    fn write_coverage_mapping<'a>(
-        &mut self,
-        expressions: Vec<CounterExpression>,
-        counter_regions: impl Iterator<Item = (Counter, &'a CodeRegion)>,
-        coverage_mapping_buffer: &RustString,
-    ) {
-        let mut counter_regions = counter_regions.collect::<Vec<_>>();
-        if counter_regions.is_empty() {
-            return;
-        }
+    fn global_file_id_for_file_name(&mut self, file_name: Symbol) -> u32 {
+        let (global_file_id, _) = self.global_file_table.insert_full(file_name);
+        global_file_id as u32
+    }
 
-        let mut virtual_file_mapping = Vec::new();
-        let mut mapping_regions = Vec::new();
-        let mut current_file_name = None;
-        let mut current_file_id = 0;
-
-        // Convert the list of (Counter, CodeRegion) pairs to an array of `CounterMappingRegion`, sorted
-        // by filename and position. Capture any new files to compute the `CounterMappingRegion`s
-        // `file_id` (indexing files referenced by the current function), and construct the
-        // function-specific `virtual_file_mapping` from `file_id` to its index in the module's
-        // `filenames` array.
-        counter_regions.sort_unstable_by_key(|(_counter, region)| *region);
-        for (counter, region) in counter_regions {
-            let CodeRegion { file_name, start_line, start_col, end_line, end_col } = *region;
-            let same_file = current_file_name.is_some_and(|p| p == file_name);
-            if !same_file {
-                if current_file_name.is_some() {
-                    current_file_id += 1;
-                }
-                current_file_name = Some(file_name);
-                let c_filename = CString::new(file_name.to_string())
-                    .expect("null error converting filename to C string");
-                debug!("  file_id: {} = '{:?}'", current_file_id, c_filename);
-                let (filenames_index, _) = self.filenames.insert_full(c_filename);
-                virtual_file_mapping.push(filenames_index as u32);
-            }
-            debug!("Adding counter {:?} to map for {:?}", counter, region);
+    fn into_filenames_buffer(self) -> Vec<u8> {
+        // This method takes `self` so that the caller can't accidentally
+        // modify the original file table after encoding it into a buffer.
+
+        llvm::build_byte_buffer(|buffer| {
+            coverageinfo::write_filenames_section_to_buffer(
+                self.global_file_table.iter().map(Symbol::as_str),
+                buffer,
+            );
+        })
+    }
+}
+
+/// Using the expressions and counter regions collected for a single function,
+/// generate the variable-sized payload of its corresponding `__llvm_covfun`
+/// entry. The payload is returned as a vector of bytes.
+///
+/// Newly-encountered filenames will be added to the global file table.
+fn encode_mappings_for_function(
+    global_file_table: &mut GlobalFileTable,
+    function_coverage: &FunctionCoverage<'_>,
+) -> Vec<u8> {
+    let (expressions, counter_regions) = function_coverage.get_expressions_and_counter_regions();
+
+    let mut counter_regions = counter_regions.collect::<Vec<_>>();
+    if counter_regions.is_empty() {
+        return Vec::new();
+    }
+
+    let mut virtual_file_mapping = IndexVec::<u32, u32>::new();
+    let mut mapping_regions = Vec::with_capacity(counter_regions.len());
+
+    // Sort the list of (counter, region) mapping pairs by region, so that they
+    // can be grouped by filename. Prepare file IDs for each filename, and
+    // prepare the mapping data so that we can pass it through FFI to LLVM.
+    counter_regions.sort_by_key(|(_counter, region)| *region);
+    for counter_regions_for_file in
+        counter_regions.group_by(|(_, a), (_, b)| a.file_name == b.file_name)
+    {
+        // Look up (or allocate) the global file ID for this filename.
+        let file_name = counter_regions_for_file[0].1.file_name;
+        let global_file_id = global_file_table.global_file_id_for_file_name(file_name);
+
+        // Associate that global file ID with a local file ID for this function.
+        let local_file_id: u32 = virtual_file_mapping.push(global_file_id);
+        debug!("  file id: local {local_file_id} => global {global_file_id} = '{file_name:?}'");
+
+        // For each counter/region pair in this function+file, convert it to a
+        // form suitable for FFI.
+        for &(counter, region) in counter_regions_for_file {
+            let CodeRegion { file_name: _, start_line, start_col, end_line, end_col } = *region;
+
+            debug!("Adding counter {counter:?} to map for {region:?}");
             mapping_regions.push(CounterMappingRegion::code_region(
                 counter,
-                current_file_id,
+                local_file_id,
                 start_line,
                 start_col,
                 end_line,
                 end_col,
             ));
         }
+    }
 
-        // Encode and append the current function's coverage mapping data
+    // Encode the function's coverage mappings into a buffer.
+    llvm::build_byte_buffer(|buffer| {
         coverageinfo::write_mapping_to_buffer(
-            virtual_file_mapping,
+            virtual_file_mapping.raw,
             expressions,
             mapping_regions,
-            coverage_mapping_buffer,
+            buffer,
         );
-    }
+    })
+}
 
-    /// Construct coverage map header and the array of function records, and combine them into the
-    /// coverage map. Save the coverage map data into the LLVM IR as a static global using a
-    /// specific, well-known section and name.
-    fn generate_coverage_map<'ll>(
-        self,
-        cx: &CodegenCx<'ll, '_>,
-        version: u32,
-        filenames_size: usize,
-        filenames_val: &'ll llvm::Value,
-    ) -> &'ll llvm::Value {
-        debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
-
-        // Create the coverage data header (Note, fields 0 and 2 are now always zero,
-        // as of `llvm::coverage::CovMapVersion::Version4`.)
-        let zero_was_n_records_val = cx.const_u32(0);
-        let filenames_size_val = cx.const_u32(filenames_size as u32);
-        let zero_was_coverage_size_val = cx.const_u32(0);
-        let version_val = cx.const_u32(version);
-        let cov_data_header_val = cx.const_struct(
-            &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
-            /*packed=*/ false,
-        );
+/// Construct coverage map header and the array of function records, and combine them into the
+/// coverage map. Save the coverage map data into the LLVM IR as a static global using a
+/// specific, well-known section and name.
+fn generate_coverage_map<'ll>(
+    cx: &CodegenCx<'ll, '_>,
+    version: u32,
+    filenames_size: usize,
+    filenames_val: &'ll llvm::Value,
+) -> &'ll llvm::Value {
+    debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
+
+    // Create the coverage data header (Note, fields 0 and 2 are now always zero,
+    // as of `llvm::coverage::CovMapVersion::Version4`.)
+    let zero_was_n_records_val = cx.const_u32(0);
+    let filenames_size_val = cx.const_u32(filenames_size as u32);
+    let zero_was_coverage_size_val = cx.const_u32(0);
+    let version_val = cx.const_u32(version);
+    let cov_data_header_val = cx.const_struct(
+        &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
+        /*packed=*/ false,
+    );
 
-        // Create the complete LLVM coverage data value to add to the LLVM IR
-        cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
-    }
+    // Create the complete LLVM coverage data value to add to the LLVM IR
+    cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
 }
 
 /// Construct a function record and combine it with the function's coverage mapping data.
@@ -228,6 +244,7 @@ impl CoverageMapGenerator {
 /// specific, well-known section and name.
 fn save_function_record(
     cx: &CodegenCx<'_, '_>,
+    covfun_section_name: &str,
     mangled_function_name: &str,
     source_hash: u64,
     filenames_ref: u64,
@@ -254,7 +271,13 @@ fn save_function_record(
         /*packed=*/ true,
     );
 
-    coverageinfo::save_func_record_to_mod(cx, func_name_hash, func_record_val, is_used);
+    coverageinfo::save_func_record_to_mod(
+        cx,
+        covfun_section_name,
+        func_name_hash,
+        func_record_val,
+        is_used,
+    );
 }
 
 /// When finalizing the coverage map, `FunctionCoverage` only has the `CodeRegion`s and counters for
@@ -310,10 +333,10 @@ fn add_unused_functions(cx: &CodegenCx<'_, '_>) {
     {
         let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
 
-        // If a function is marked `#[no_coverage]`, then skip generating a
+        // If a function is marked `#[coverage(off)]`, then skip generating a
         // dead code stub for it.
         if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
-            debug!("skipping unused fn marked #[no_coverage]: {:?}", non_codegenned_def_id);
+            debug!("skipping unused fn marked #[coverage(off)]: {:?}", non_codegenned_def_id);
             continue;
         }
 
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index 1f8f4033931..dd2ce9b525b 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -3,10 +3,10 @@ use crate::llvm;
 use crate::abi::Abi;
 use crate::builder::Builder;
 use crate::common::CodegenCx;
-use crate::coverageinfo::map_data::{CounterExpression, FunctionCoverage};
+use crate::coverageinfo::ffi::{CounterExpression, CounterMappingRegion};
+use crate::coverageinfo::map_data::FunctionCoverage;
 
 use libc::c_uint;
-use llvm::coverageinfo::CounterMappingRegion;
 use rustc_codegen_ssa::traits::{
     BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, MiscMethods,
     StaticMethods,
@@ -16,9 +16,7 @@ use rustc_hir as hir;
 use rustc_hir::def_id::DefId;
 use rustc_llvm::RustString;
 use rustc_middle::bug;
-use rustc_middle::mir::coverage::{
-    CodeRegion, CounterValueReference, CoverageKind, ExpressionOperandId, InjectedExpressionId, Op,
-};
+use rustc_middle::mir::coverage::{CounterId, CoverageKind};
 use rustc_middle::mir::Coverage;
 use rustc_middle::ty;
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
@@ -27,13 +25,12 @@ use rustc_middle::ty::Instance;
 use rustc_middle::ty::Ty;
 
 use std::cell::RefCell;
-use std::ffi::CString;
 
-mod ffi;
+pub(crate) mod ffi;
 pub(crate) mod map_data;
 pub mod mapgen;
 
-const UNUSED_FUNCTION_COUNTER_ID: CounterValueReference = CounterValueReference::START;
+const UNUSED_FUNCTION_COUNTER_ID: CounterId = CounterId::START;
 
 const VAR_ALIGN_BYTES: usize = 8;
 
@@ -92,9 +89,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
     /// `function_coverage_map` (keyed by function `Instance`) during codegen.
     /// But in this case, since the unused function was _not_ previously
     /// codegenned, collect the coverage `CodeRegion`s from the MIR and add
-    /// them. The first `CodeRegion` is used to add a single counter, with the
-    /// same counter ID used in the injected `instrprof.increment` intrinsic
-    /// call. Since the function is never called, all other `CodeRegion`s can be
+    /// them. Since the function is never called, all of its `CodeRegion`s can be
     /// added as `unreachable_region`s.
     fn define_unused_fn(&self, def_id: DefId) {
         let instance = declare_unused_fn(self, def_id);
@@ -107,144 +102,47 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
     fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) {
         let bx = self;
 
-        let Coverage { kind, code_region } = coverage.clone();
-        match kind {
+        let Some(coverage_context) = bx.coverage_context() else { return };
+        let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+        let func_coverage = coverage_map
+            .entry(instance)
+            .or_insert_with(|| FunctionCoverage::new(bx.tcx(), instance));
+
+        let Coverage { kind, code_regions } = coverage;
+        match *kind {
             CoverageKind::Counter { function_source_hash, id } => {
-                if bx.set_function_source_hash(instance, function_source_hash) {
-                    // If `set_function_source_hash()` returned true, the coverage map is enabled,
-                    // so continue adding the counter.
-                    if let Some(code_region) = code_region {
-                        // Note: Some counters do not have code regions, but may still be referenced
-                        // from expressions. In that case, don't add the counter to the coverage map,
-                        // but do inject the counter intrinsic.
-                        bx.add_coverage_counter(instance, id, code_region);
-                    }
-
-                    let coverageinfo = bx.tcx().coverageinfo(instance.def);
-
-                    let fn_name = bx.get_pgo_func_name_var(instance);
-                    let hash = bx.const_u64(function_source_hash);
-                    let num_counters = bx.const_u32(coverageinfo.num_counters);
-                    let index = bx.const_u32(id.zero_based_index());
-                    debug!(
-                        "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
-                        fn_name, hash, num_counters, index,
-                    );
-                    bx.instrprof_increment(fn_name, hash, num_counters, index);
-                }
+                debug!(
+                    "ensuring function source hash is set for instance={:?}; function_source_hash={}",
+                    instance, function_source_hash,
+                );
+                func_coverage.set_function_source_hash(function_source_hash);
+                func_coverage.add_counter(id, code_regions);
+                // We need to explicitly drop the `RefMut` before calling into `instrprof_increment`,
+                // as that needs an exclusive borrow.
+                drop(coverage_map);
+
+                let coverageinfo = bx.tcx().coverageinfo(instance.def);
+
+                let fn_name = bx.get_pgo_func_name_var(instance);
+                let hash = bx.const_u64(function_source_hash);
+                let num_counters = bx.const_u32(coverageinfo.num_counters);
+                let index = bx.const_u32(id.as_u32());
+                debug!(
+                    "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+                    fn_name, hash, num_counters, index,
+                );
+                bx.instrprof_increment(fn_name, hash, num_counters, index);
             }
             CoverageKind::Expression { id, lhs, op, rhs } => {
-                bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region);
+                func_coverage.add_counter_expression(id, lhs, op, rhs, code_regions);
             }
             CoverageKind::Unreachable => {
-                bx.add_coverage_unreachable(
-                    instance,
-                    code_region.expect("unreachable regions always have code regions"),
-                );
+                func_coverage.add_unreachable_regions(code_regions);
             }
         }
     }
 }
 
-// These methods used to be part of trait `CoverageInfoBuilderMethods`, but
-// after moving most coverage code out of SSA they are now just ordinary methods.
-impl<'tcx> Builder<'_, '_, 'tcx> {
-    /// Returns true if the function source hash was added to the coverage map (even if it had
-    /// already been added, for this instance). Returns false *only* if `-C instrument-coverage` is
-    /// not enabled (a coverage map is not being generated).
-    fn set_function_source_hash(
-        &mut self,
-        instance: Instance<'tcx>,
-        function_source_hash: u64,
-    ) -> bool {
-        if let Some(coverage_context) = self.coverage_context() {
-            debug!(
-                "ensuring function source hash is set for instance={:?}; function_source_hash={}",
-                instance, function_source_hash,
-            );
-            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
-            coverage_map
-                .entry(instance)
-                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-                .set_function_source_hash(function_source_hash);
-            true
-        } else {
-            false
-        }
-    }
-
-    /// Returns true if the counter was added to the coverage map; false if `-C instrument-coverage`
-    /// is not enabled (a coverage map is not being generated).
-    fn add_coverage_counter(
-        &mut self,
-        instance: Instance<'tcx>,
-        id: CounterValueReference,
-        region: CodeRegion,
-    ) -> bool {
-        if let Some(coverage_context) = self.coverage_context() {
-            debug!(
-                "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
-                instance, id, region,
-            );
-            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
-            coverage_map
-                .entry(instance)
-                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-                .add_counter(id, region);
-            true
-        } else {
-            false
-        }
-    }
-
-    /// Returns true if the expression was added to the coverage map; false if
-    /// `-C instrument-coverage` is not enabled (a coverage map is not being generated).
-    fn add_coverage_counter_expression(
-        &mut self,
-        instance: Instance<'tcx>,
-        id: InjectedExpressionId,
-        lhs: ExpressionOperandId,
-        op: Op,
-        rhs: ExpressionOperandId,
-        region: Option<CodeRegion>,
-    ) -> bool {
-        if let Some(coverage_context) = self.coverage_context() {
-            debug!(
-                "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; \
-                region: {:?}",
-                instance, id, lhs, op, rhs, region,
-            );
-            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
-            coverage_map
-                .entry(instance)
-                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-                .add_counter_expression(id, lhs, op, rhs, region);
-            true
-        } else {
-            false
-        }
-    }
-
-    /// Returns true if the region was added to the coverage map; false if `-C instrument-coverage`
-    /// is not enabled (a coverage map is not being generated).
-    fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool {
-        if let Some(coverage_context) = self.coverage_context() {
-            debug!(
-                "adding unreachable code to coverage_map: instance={:?}, at {:?}",
-                instance, region,
-            );
-            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
-            coverage_map
-                .entry(instance)
-                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-                .add_unreachable_region(region);
-            true
-        } else {
-            false
-        }
-    }
-}
-
 fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<'tcx> {
     let tcx = cx.tcx;
 
@@ -307,14 +205,9 @@ fn add_unused_function_coverage<'tcx>(
     let tcx = cx.tcx;
 
     let mut function_coverage = FunctionCoverage::unused(tcx, instance);
-    for (index, &code_region) in tcx.covered_code_regions(def_id).iter().enumerate() {
-        if index == 0 {
-            // Insert at least one real counter so the LLVM CoverageMappingReader will find expected
-            // definitions.
-            function_coverage.add_counter(UNUSED_FUNCTION_COUNTER_ID, code_region.clone());
-        } else {
-            function_coverage.add_unreachable_region(code_region.clone());
-        }
+    for &code_region in tcx.covered_code_regions(def_id) {
+        let code_region = std::slice::from_ref(code_region);
+        function_coverage.add_unreachable_regions(code_region);
     }
 
     if let Some(coverage_context) = cx.coverage_context() {
@@ -334,21 +227,32 @@ fn create_pgo_func_name_var<'ll, 'tcx>(
     cx: &CodegenCx<'ll, 'tcx>,
     instance: Instance<'tcx>,
 ) -> &'ll llvm::Value {
-    let mangled_fn_name = CString::new(cx.tcx.symbol_name(instance).name)
-        .expect("error converting function name to C string");
+    let mangled_fn_name: &str = cx.tcx.symbol_name(instance).name;
     let llfn = cx.get_fn(instance);
-    unsafe { llvm::LLVMRustCoverageCreatePGOFuncNameVar(llfn, mangled_fn_name.as_ptr()) }
+    unsafe {
+        llvm::LLVMRustCoverageCreatePGOFuncNameVar(
+            llfn,
+            mangled_fn_name.as_ptr().cast(),
+            mangled_fn_name.len(),
+        )
+    }
 }
 
 pub(crate) fn write_filenames_section_to_buffer<'a>(
-    filenames: impl IntoIterator<Item = &'a CString>,
+    filenames: impl IntoIterator<Item = &'a str>,
     buffer: &RustString,
 ) {
-    let c_str_vec = filenames.into_iter().map(|cstring| cstring.as_ptr()).collect::<Vec<_>>();
+    let (pointers, lengths) = filenames
+        .into_iter()
+        .map(|s: &str| (s.as_ptr().cast(), s.len()))
+        .unzip::<_, _, Vec<_>, Vec<_>>();
+
     unsafe {
         llvm::LLVMRustCoverageWriteFilenamesSectionToBuffer(
-            c_str_vec.as_ptr(),
-            c_str_vec.len(),
+            pointers.as_ptr(),
+            pointers.len(),
+            lengths.as_ptr(),
+            lengths.len(),
             buffer,
         );
     }
@@ -408,6 +312,7 @@ pub(crate) fn save_cov_data_to_mod<'ll, 'tcx>(
 
 pub(crate) fn save_func_record_to_mod<'ll, 'tcx>(
     cx: &CodegenCx<'ll, 'tcx>,
+    covfun_section_name: &str,
     func_name_hash: u64,
     func_record_val: &'ll llvm::Value,
     is_used: bool,
@@ -423,20 +328,33 @@ pub(crate) fn save_func_record_to_mod<'ll, 'tcx>(
     let func_record_var_name =
         format!("__covrec_{:X}{}", func_name_hash, if is_used { "u" } else { "" });
     debug!("function record var name: {:?}", func_record_var_name);
-
-    let func_record_section_name = llvm::build_string(|s| unsafe {
-        llvm::LLVMRustCoverageWriteFuncSectionNameToString(cx.llmod, s);
-    })
-    .expect("Rust Coverage function record section name failed UTF-8 conversion");
-    debug!("function record section name: {:?}", func_record_section_name);
+    debug!("function record section name: {:?}", covfun_section_name);
 
     let llglobal = llvm::add_global(cx.llmod, cx.val_ty(func_record_val), &func_record_var_name);
     llvm::set_initializer(llglobal, func_record_val);
     llvm::set_global_constant(llglobal, true);
     llvm::set_linkage(llglobal, llvm::Linkage::LinkOnceODRLinkage);
     llvm::set_visibility(llglobal, llvm::Visibility::Hidden);
-    llvm::set_section(llglobal, &func_record_section_name);
+    llvm::set_section(llglobal, covfun_section_name);
     llvm::set_alignment(llglobal, VAR_ALIGN_BYTES);
     llvm::set_comdat(cx.llmod, llglobal, &func_record_var_name);
     cx.add_used_global(llglobal);
 }
+
+/// Returns the section name string to pass through to the linker when embedding
+/// per-function coverage information in the object file, according to the target
+/// platform's object file format.
+///
+/// LLVM's coverage tools read coverage mapping details from this section when
+/// producing coverage reports.
+///
+/// Typical values are:
+/// - `__llvm_covfun` on Linux
+/// - `__LLVM_COV,__llvm_covfun` on macOS (includes `__LLVM_COV,` segment prefix)
+/// - `.lcovfun$M` on Windows (includes `$M` sorting suffix)
+pub(crate) fn covfun_section_name(cx: &CodegenCx<'_, '_>) -> String {
+    llvm::build_string(|s| unsafe {
+        llvm::LLVMRustCoverageWriteFuncSectionNameToString(cx.llmod, s);
+    })
+    .expect("Rust Coverage function record section name failed UTF-8 conversion")
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index d174a3593b9..aff764f0224 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -20,7 +20,7 @@ pub fn compute_mir_scopes<'ll, 'tcx>(
     cx: &CodegenCx<'ll, 'tcx>,
     instance: Instance<'tcx>,
     mir: &Body<'tcx>,
-    debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+    debug_context: &mut FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>,
 ) {
     // Find all scopes with variables defined in them.
     let variables = if cx.sess().opts.debuginfo == DebugInfo::Full {
@@ -51,7 +51,7 @@ fn make_mir_scope<'ll, 'tcx>(
     instance: Instance<'tcx>,
     mir: &Body<'tcx>,
     variables: &Option<BitSet<SourceScope>>,
-    debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+    debug_context: &mut FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>,
     instantiated: &mut BitSet<SourceScope>,
     scope: SourceScope,
 ) {
@@ -68,7 +68,7 @@ fn make_mir_scope<'ll, 'tcx>(
         let file = cx.sess().source_map().lookup_source_file(mir.span.lo());
         debug_context.scopes[scope] = DebugScope {
             file_start_pos: file.start_pos,
-            file_end_pos: file.end_pos,
+            file_end_pos: file.end_position(),
             ..debug_context.scopes[scope]
         };
         instantiated.insert(scope);
@@ -86,27 +86,31 @@ fn make_mir_scope<'ll, 'tcx>(
     let loc = cx.lookup_debug_loc(scope_data.span.lo());
     let file_metadata = file_metadata(cx, &loc.file);
 
-    let dbg_scope = match scope_data.inlined {
+    let parent_dbg_scope = match scope_data.inlined {
         Some((callee, _)) => {
             // FIXME(eddyb) this would be `self.monomorphize(&callee)`
             // if this is moved to `rustc_codegen_ssa::mir::debuginfo`.
-            let callee = cx.tcx.subst_and_normalize_erasing_regions(
+            let callee = cx.tcx.instantiate_and_normalize_erasing_regions(
                 instance.args,
                 ty::ParamEnv::reveal_all(),
                 ty::EarlyBinder::bind(callee),
             );
-            let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
-            cx.dbg_scope_fn(callee, callee_fn_abi, None)
+            debug_context.inlined_function_scopes.entry(callee).or_insert_with(|| {
+                let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
+                cx.dbg_scope_fn(callee, callee_fn_abi, None)
+            })
         }
-        None => unsafe {
-            llvm::LLVMRustDIBuilderCreateLexicalBlock(
-                DIB(cx),
-                parent_scope.dbg_scope,
-                file_metadata,
-                loc.line,
-                loc.col,
-            )
-        },
+        None => parent_scope.dbg_scope,
+    };
+
+    let dbg_scope = unsafe {
+        llvm::LLVMRustDIBuilderCreateLexicalBlock(
+            DIB(cx),
+            parent_dbg_scope,
+            file_metadata,
+            loc.line,
+            loc.col,
+        )
     };
 
     let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
@@ -120,7 +124,7 @@ fn make_mir_scope<'ll, 'tcx>(
         dbg_scope,
         inlined_at: inlined_at.or(parent_scope.inlined_at),
         file_start_pos: loc.file.start_pos,
-        file_end_pos: loc.file.end_pos,
+        file_end_pos: loc.file.end_position(),
     };
     instantiated.insert(scope);
 }
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
index 37f30917609..425e935bc9f 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -17,8 +17,7 @@ use rustc_span::symbol::sym;
 /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
 pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) {
     if needs_gdb_debug_scripts_section(bx) {
-        let gdb_debug_scripts_section =
-            bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_i8p());
+        let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx);
         // Load just the first byte as that's all that's necessary to force
         // LLVM to keep around the reference to the global.
         let volatile_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section);
@@ -54,7 +53,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, '
             // The initial byte `4` instructs GDB that the following pretty printer
             // is defined inline as opposed to in a standalone file.
             section_contents.extend_from_slice(b"\x04");
-            let vis_name = format!("pretty-printer-{}-{}\n", crate_name, index);
+            let vis_name = format!("pretty-printer-{crate_name}-{index}\n");
             section_contents.extend_from_slice(vis_name.as_bytes());
             section_contents.extend_from_slice(&visualizer.src);
 
@@ -93,7 +92,7 @@ pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool {
     // each rlib could produce a different set of visualizers that would be embedded
     // in the `.debug_gdb_scripts` section. For that reason, we make sure that the
     // section is only emitted for leaf crates.
-    let embed_visualizers = cx.sess().crate_types().iter().any(|&crate_type| match crate_type {
+    let embed_visualizers = cx.tcx.crate_types().iter().any(|&crate_type| match crate_type {
         CrateType::Executable | CrateType::Dylib | CrateType::Cdylib | CrateType::Staticlib => {
             // These are crate types for which we will embed pretty printers since they
             // are treated as leaf crates.
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 905e0e541a8..ed938761694 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -184,9 +184,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
             debug_assert_eq!(
                 (data_layout.pointer_size, data_layout.pointer_align.abi),
                 cx.size_and_align_of(ptr_type),
-                "ptr_type={}, pointee_type={}",
-                ptr_type,
-                pointee_type,
+                "ptr_type={ptr_type}, pointee_type={pointee_type}",
             );
 
             let di_node = unsafe {
@@ -447,9 +445,9 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D
         ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => {
             build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id)
         }
-        // Box<T, A> may have a non-ZST allocator A. In that case, we
+        // Box<T, A> may have a non-1-ZST allocator A. In that case, we
         // cannot treat Box<T, A> as just an owned alias of `*mut T`.
-        ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => {
+        ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_1zst() => {
             build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id)
         }
         ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id),
@@ -521,7 +519,7 @@ fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll D
 fn hex_encode(data: &[u8]) -> String {
     let mut hex_string = String::with_capacity(data.len() * 2);
     for byte in data.iter() {
-        write!(&mut hex_string, "{:02x}", byte).unwrap();
+        write!(&mut hex_string, "{byte:02x}").unwrap();
     }
     hex_string
 }
@@ -766,7 +764,7 @@ fn build_param_type_di_node<'ll, 'tcx>(
     t: Ty<'tcx>,
 ) -> DINodeCreationResult<'ll> {
     debug!("build_param_type_di_node: {:?}", t);
-    let name = format!("{:?}", t);
+    let name = format!("{t:?}");
     DINodeCreationResult {
         di_node: unsafe {
             llvm::LLVMRustDIBuilderCreateBasicType(
@@ -814,7 +812,7 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
     debug!("build_compile_unit_di_node: {:?}", name_in_debuginfo);
     let rustc_producer = format!("rustc version {}", tcx.sess.cfg_version);
     // FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice.
-    let producer = format!("clang LLVM ({})", rustc_producer);
+    let producer = format!("clang LLVM ({rustc_producer})");
 
     let name_in_debuginfo = name_in_debuginfo.to_string_lossy();
     let work_dir = tcx.sess.opts.working_dir.to_string_lossy(FileNameDisplayPreference::Remapped);
@@ -992,14 +990,8 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
     closure_or_generator_di_node: &'ll DIType,
 ) -> SmallVec<&'ll DIType> {
     let (&def_id, up_var_tys) = match closure_or_generator_ty.kind() {
-        ty::Generator(def_id, args, _) => {
-            let upvar_tys: SmallVec<_> = args.as_generator().prefix_tys().collect();
-            (def_id, upvar_tys)
-        }
-        ty::Closure(def_id, args) => {
-            let upvar_tys: SmallVec<_> = args.as_closure().upvar_tys().collect();
-            (def_id, upvar_tys)
-        }
+        ty::Generator(def_id, args, _) => (def_id, args.as_generator().prefix_tys()),
+        ty::Closure(def_id, args) => (def_id, args.as_closure().upvar_tys()),
         _ => {
             bug!(
                 "build_upvar_field_di_nodes() called with non-closure-or-generator-type: {:?}",
@@ -1009,9 +1001,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
     };
 
     debug_assert!(
-        up_var_tys
-            .iter()
-            .all(|&t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
+        up_var_tys.iter().all(|t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
     );
 
     let capture_names = cx.tcx.closure_saved_names_of_captured_variables(def_id);
@@ -1331,10 +1321,10 @@ fn build_vtable_type_di_node<'ll, 'tcx>(
                             // Note: This code does not try to give a proper name to each method
                             //       because their might be multiple methods with the same name
                             //       (coming from different traits).
-                            (format!("__method{}", index), void_pointer_type_di_node)
+                            (format!("__method{index}"), void_pointer_type_di_node)
                         }
                         ty::VtblEntry::TraitVPtr(_) => {
-                            (format!("__super_trait_ptr{}", index), void_pointer_type_di_node)
+                            (format!("__super_trait_ptr{index}"), void_pointer_type_di_node)
                         }
                         ty::VtblEntry::MetadataAlign => ("align".to_string(), usize_di_node),
                         ty::VtblEntry::MetadataSize => ("size".to_string(), usize_di_node),
@@ -1504,5 +1494,5 @@ pub fn tuple_field_name(field_index: usize) -> Cow<'static, str> {
     TUPLE_FIELD_NAMES
         .get(field_index)
         .map(|s| Cow::from(*s))
-        .unwrap_or_else(|| Cow::from(format!("__{}", field_index)))
+        .unwrap_or_else(|| Cow::from(format!("__{field_index}")))
 }
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
index b4beb80ca8b..d3239d5c358 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
@@ -379,6 +379,7 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
             // Fields that are common to all states
             let common_fields: SmallVec<_> = generator_args
                 .prefix_tys()
+                .iter()
                 .zip(common_upvar_names)
                 .enumerate()
                 .map(|(index, (upvar_ty, upvar_name))| {
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 40714a0afe9..30cc9ea9b82 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -263,11 +263,11 @@ impl CodegenCx<'_, '_> {
     pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
         let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
             Ok(SourceFileAndLine { sf: file, line }) => {
-                let line_pos = file.lines(|lines| lines[line]);
+                let line_pos = file.lines()[line];
 
                 // Use 1-based indexing.
                 let line = (line + 1) as u32;
-                let col = (pos - line_pos).to_u32() + 1;
+                let col = (file.relative_position(pos) - line_pos).to_u32() + 1;
 
                 (file, line, col)
             }
@@ -292,7 +292,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
         llfn: &'ll Value,
         mir: &mir::Body<'tcx>,
-    ) -> Option<FunctionDebugContext<&'ll DIScope, &'ll DILocation>> {
+    ) -> Option<FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>> {
         if self.sess().opts.debuginfo == DebugInfo::None {
             return None;
         }
@@ -304,8 +304,10 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             file_start_pos: BytePos(0),
             file_end_pos: BytePos(0),
         };
-        let mut fn_debug_context =
-            FunctionDebugContext { scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes) };
+        let mut fn_debug_context = FunctionDebugContext {
+            scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes),
+            inlined_function_scopes: Default::default(),
+        };
 
         // Fill in all the scopes, with the information from the MIR body.
         compute_mir_scopes(self, instance, mir, &mut fn_debug_context);
@@ -347,6 +349,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         type_names::push_generic_params(
             tcx,
             tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args),
+            enclosing_fn_def_id,
             &mut name,
         );
 
@@ -526,7 +529,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             if let Some(impl_def_id) = cx.tcx.impl_of_method(instance.def_id()) {
                 // If the method does *not* belong to a trait, proceed
                 if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
-                    let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
+                    let impl_self_ty = cx.tcx.instantiate_and_normalize_erasing_regions(
                         instance.args,
                         ty::ParamEnv::reveal_all(),
                         cx.tcx.type_of(impl_def_id),
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
index 7be83638676..c758010c581 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -91,8 +91,7 @@ pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
             // For all other pointee types we should already have returned None
             // at the beginning of the function.
             panic!(
-                "fat_pointer_kind() - Encountered unexpected `pointee_tail_ty`: {:?}",
-                pointee_tail_ty
+                "fat_pointer_kind() - Encountered unexpected `pointee_tail_ty`: {pointee_tail_ty:?}"
             )
         }
     }
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
index fced6d504d2..665d195790c 100644
--- a/compiler/rustc_codegen_llvm/src/errors.rs
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -139,6 +139,10 @@ pub(crate) struct LtoDisallowed;
 pub(crate) struct LtoDylib;
 
 #[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_proc_macro)]
+pub(crate) struct LtoProcMacro;
+
+#[derive(Diagnostic)]
 #[diag(codegen_llvm_lto_bitcode_from_rlib)]
 pub(crate) struct LtoBitcodeFromRlib {
     pub llvm_err: String,
@@ -226,3 +230,9 @@ pub(crate) struct WriteBytecode<'a> {
 pub(crate) struct CopyBitcode {
     pub err: std::io::Error,
 }
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_unknown_debuginfo_compression)]
+pub struct UnknownCompression {
+    pub algorithm: &'static str,
+}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 6df1b708ccd..a97b803fc64 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -15,7 +15,7 @@ use rustc_codegen_ssa::mir::place::PlaceRef;
 use rustc_codegen_ssa::traits::*;
 use rustc_hir as hir;
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
-use rustc_middle::ty::{self, Ty};
+use rustc_middle::ty::{self, GenericArgsRef, Ty};
 use rustc_middle::{bug, span_bug};
 use rustc_span::{sym, symbol::kw, Span, Symbol};
 use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
@@ -165,9 +165,8 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
             sym::volatile_load | sym::unaligned_volatile_load => {
                 let tp_ty = fn_args.type_at(0);
                 let ptr = args[0].immediate();
-                let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
+                let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
                     let llty = ty.llvm_type(self);
-                    let ptr = self.pointercast(ptr, self.type_ptr_to(llty));
                     self.volatile_load(llty, ptr)
                 } else {
                     self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
@@ -230,22 +229,22 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                         sym::ctlz | sym::cttz => {
                             let y = self.const_bool(false);
                             self.call_intrinsic(
-                                &format!("llvm.{}.i{}", name, width),
+                                &format!("llvm.{name}.i{width}"),
                                 &[args[0].immediate(), y],
                             )
                         }
                         sym::ctlz_nonzero => {
                             let y = self.const_bool(true);
-                            let llvm_name = &format!("llvm.ctlz.i{}", width);
+                            let llvm_name = &format!("llvm.ctlz.i{width}");
                             self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
                         }
                         sym::cttz_nonzero => {
                             let y = self.const_bool(true);
-                            let llvm_name = &format!("llvm.cttz.i{}", width);
+                            let llvm_name = &format!("llvm.cttz.i{width}");
                             self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
                         }
                         sym::ctpop => self.call_intrinsic(
-                            &format!("llvm.ctpop.i{}", width),
+                            &format!("llvm.ctpop.i{width}"),
                             &[args[0].immediate()],
                         ),
                         sym::bswap => {
@@ -253,13 +252,13 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                                 args[0].immediate() // byte swap a u8/i8 is just a no-op
                             } else {
                                 self.call_intrinsic(
-                                    &format!("llvm.bswap.i{}", width),
+                                    &format!("llvm.bswap.i{width}"),
                                     &[args[0].immediate()],
                                 )
                             }
                         }
                         sym::bitreverse => self.call_intrinsic(
-                            &format!("llvm.bitreverse.i{}", width),
+                            &format!("llvm.bitreverse.i{width}"),
                             &[args[0].immediate()],
                         ),
                         sym::rotate_left | sym::rotate_right => {
@@ -317,18 +316,12 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     self.const_bool(true)
                 } else if use_integer_compare {
                     let integer_ty = self.type_ix(layout.size().bits());
-                    let ptr_ty = self.type_ptr_to(integer_ty);
-                    let a_ptr = self.bitcast(a, ptr_ty);
-                    let a_val = self.load(integer_ty, a_ptr, layout.align().abi);
-                    let b_ptr = self.bitcast(b, ptr_ty);
-                    let b_val = self.load(integer_ty, b_ptr, layout.align().abi);
+                    let a_val = self.load(integer_ty, a, layout.align().abi);
+                    let b_val = self.load(integer_ty, b, layout.align().abi);
                     self.icmp(IntPredicate::IntEQ, a_val, b_val)
                 } else {
-                    let i8p_ty = self.type_i8p();
-                    let a_ptr = self.bitcast(a, i8p_ty);
-                    let b_ptr = self.bitcast(b, i8p_ty);
                     let n = self.const_usize(layout.size().bytes());
-                    let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]);
+                    let cmp = self.call_intrinsic("memcmp", &[a, b, n]);
                     match self.cx.sess().target.arch.as_ref() {
                         "avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)),
                         _ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)),
@@ -336,6 +329,16 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 }
             }
 
+            sym::compare_bytes => {
+                // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+                let cmp = self.call_intrinsic(
+                    "memcmp",
+                    &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
+                );
+                // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`.
+                self.sext(cmp, self.type_ix(32))
+            }
+
             sym::black_box => {
                 args[0].val.store(self, result);
                 let result_val_span = [result.llval];
@@ -373,7 +376,9 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
             }
 
             _ if name.as_str().starts_with("simd_") => {
-                match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+                match generic_simd_intrinsic(
+                    self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span,
+                ) {
                     Ok(llval) => llval,
                     Err(()) => return,
                 }
@@ -383,10 +388,8 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
         };
 
         if !fn_abi.ret.is_ignore() {
-            if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
-                let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
-                let ptr = self.pointercast(result.llval, ptr_llty);
-                self.store(llval, ptr, result.align);
+            if let PassMode::Cast { .. } = &fn_abi.ret.mode {
+                self.store(llval, result.llval, result.align);
             } else {
                 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
                     .val
@@ -410,9 +413,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
     fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value {
         // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
         // optimization pass replaces calls to this intrinsic with code to test type membership.
-        let i8p_ty = self.type_i8p();
-        let bitcast = self.bitcast(pointer, i8p_ty);
-        self.call_intrinsic("llvm.type.test", &[bitcast, typeid])
+        self.call_intrinsic("llvm.type.test", &[pointer, typeid])
     }
 
     fn type_checked_load(
@@ -444,7 +445,7 @@ fn try_intrinsic<'ll>(
     dest: &'ll Value,
 ) {
     if bx.sess().panic_strategy() == PanicStrategy::Abort {
-        let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
         bx.call(try_func_ty, None, None, try_func, &[data], None);
         // Return 0 unconditionally from the intrinsic call;
         // we can never unwind.
@@ -544,8 +545,8 @@ fn codegen_msvc_try<'ll>(
         //
         // More information can be found in libstd's seh.rs implementation.
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
-        let slot = bx.alloca(bx.type_i8p(), ptr_align);
-        let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+        let slot = bx.alloca(bx.type_ptr(), ptr_align);
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
         bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
 
         bx.switch_to_block(normal);
@@ -568,10 +569,10 @@ fn codegen_msvc_try<'ll>(
         //
         // When modifying, make sure that the type_name string exactly matches
         // the one used in library/panic_unwind/src/seh.rs.
-        let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
+        let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
         let type_name = bx.const_bytes(b"rust_panic\0");
         let type_info =
-            bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
+            bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
         let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
         unsafe {
             llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
@@ -588,15 +589,15 @@ fn codegen_msvc_try<'ll>(
         bx.switch_to_block(catchpad_rust);
         let flags = bx.const_i32(8);
         let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
-        let ptr = bx.load(bx.type_i8p(), slot, ptr_align);
-        let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+        let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
+        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
         bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet));
         bx.catch_ret(&funclet, caught);
 
         // The flag value of 64 indicates a "catch-all".
         bx.switch_to_block(catchpad_foreign);
         let flags = bx.const_i32(64);
-        let null = bx.const_null(bx.type_i8p());
+        let null = bx.const_null(bx.type_ptr());
         let funclet = bx.catch_pad(cs, &[null, flags, null]);
         bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet));
         bx.catch_ret(&funclet, caught);
@@ -655,7 +656,7 @@ fn codegen_wasm_try<'ll>(
         //      ret i32 1
         //   }
         //
-        let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
         bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
 
         bx.switch_to_block(normal);
@@ -665,13 +666,13 @@ fn codegen_wasm_try<'ll>(
         let cs = bx.catch_switch(None, None, &[catchpad]);
 
         bx.switch_to_block(catchpad);
-        let null = bx.const_null(bx.type_i8p());
+        let null = bx.const_null(bx.type_ptr());
         let funclet = bx.catch_pad(cs, &[null]);
 
         let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]);
         let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]);
 
-        let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
         bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet));
         bx.catch_ret(&funclet, caught);
 
@@ -723,7 +724,7 @@ fn codegen_gnu_try<'ll>(
         let try_func = llvm::get_param(bx.llfn(), 0);
         let data = llvm::get_param(bx.llfn(), 1);
         let catch_func = llvm::get_param(bx.llfn(), 2);
-        let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
         bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None);
 
         bx.switch_to_block(then);
@@ -736,12 +737,12 @@ fn codegen_gnu_try<'ll>(
         // the landing pad clauses the exception's type had been matched to.
         // rust_try ignores the selector.
         bx.switch_to_block(catch);
-        let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
+        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
         let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
-        let tydesc = bx.const_null(bx.type_i8p());
+        let tydesc = bx.const_null(bx.type_ptr());
         bx.add_clause(vals, tydesc);
         let ptr = bx.extract_value(vals, 0);
-        let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
         bx.call(catch_ty, None, None, catch_func, &[data, ptr], None);
         bx.ret(bx.const_i32(1));
     });
@@ -787,7 +788,7 @@ fn codegen_emcc_try<'ll>(
         let try_func = llvm::get_param(bx.llfn(), 0);
         let data = llvm::get_param(bx.llfn(), 1);
         let catch_func = llvm::get_param(bx.llfn(), 2);
-        let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
         bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None);
 
         bx.switch_to_block(then);
@@ -800,10 +801,10 @@ fn codegen_emcc_try<'ll>(
         // the landing pad clauses the exception's type had been matched to.
         bx.switch_to_block(catch);
         let tydesc = bx.eh_catch_typeinfo();
-        let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
+        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
         let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
         bx.add_clause(vals, tydesc);
-        bx.add_clause(vals, bx.const_null(bx.type_i8p()));
+        bx.add_clause(vals, bx.const_null(bx.type_ptr()));
         let ptr = bx.extract_value(vals, 0);
         let selector = bx.extract_value(vals, 1);
 
@@ -816,7 +817,7 @@ fn codegen_emcc_try<'ll>(
         // create an alloca and pass a pointer to that.
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
         let i8_align = bx.tcx().data_layout.i8_align.abi;
-        let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false);
+        let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false);
         let catch_data = bx.alloca(catch_data_type, ptr_align);
         let catch_data_0 =
             bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
@@ -824,9 +825,8 @@ fn codegen_emcc_try<'ll>(
         let catch_data_1 =
             bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
         bx.store(is_rust_panic, catch_data_1, i8_align);
-        let catch_data = bx.bitcast(catch_data, bx.type_i8p());
 
-        let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
         bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None);
         bx.ret(bx.const_i32(1));
     });
@@ -913,6 +913,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     bx: &mut Builder<'_, 'll, 'tcx>,
     name: Symbol,
     callee_ty: Ty<'tcx>,
+    fn_args: GenericArgsRef<'tcx>,
     args: &[OperandRef<'tcx, &'ll Value>],
     ret_ty: Ty<'tcx>,
     llret_ty: &'ll Type,
@@ -967,8 +968,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 let place = PlaceRef::alloca(bx, args[0].layout);
                 args[0].val.store(bx, place);
                 let int_ty = bx.type_ix(expected_bytes * 8);
-                let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
-                bx.load(int_ty, ptr, Align::ONE)
+                bx.load(int_ty, place.llval, Align::ONE)
             }
             _ => return_error!(InvalidMonomorphization::InvalidBitmask {
                 span,
@@ -1033,28 +1033,70 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         ));
     }
 
-    if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") {
-        // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
-        // If there is no suffix, use the index array length.
-        let n: u64 = if stripped.is_empty() {
-            // Make sure this is actually an array, since typeck only checks the length-suffixed
-            // version of this intrinsic.
-            match args[2].layout.ty.kind() {
-                ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
-                    len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(
-                        || span_bug!(span, "could not evaluate shuffle index array length"),
-                    )
+    if name == sym::simd_shuffle_generic {
+        let idx = fn_args[2]
+            .expect_const()
+            .eval(tcx, ty::ParamEnv::reveal_all(), Some(span))
+            .unwrap()
+            .unwrap_branch();
+        let n = idx.len() as u64;
+
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
+        let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+        require!(
+            out_len == n,
+            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
+        );
+        require!(
+            in_elem == out_ty,
+            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
+        );
+
+        let total_len = in_len * 2;
+
+        let indices: Option<Vec<_>> = idx
+            .iter()
+            .enumerate()
+            .map(|(arg_idx, val)| {
+                let idx = val.unwrap_leaf().try_to_i32().unwrap();
+                if idx >= i32::try_from(total_len).unwrap() {
+                    bx.sess().emit_err(InvalidMonomorphization::ShuffleIndexOutOfBounds {
+                        span,
+                        name,
+                        arg_idx: arg_idx as u64,
+                        total_len: total_len.into(),
+                    });
+                    None
+                } else {
+                    Some(bx.const_i32(idx))
                 }
-                _ => return_error!(InvalidMonomorphization::SimdShuffle {
-                    span,
-                    name,
-                    ty: args[2].layout.ty
-                }),
-            }
-        } else {
-            stripped.parse().unwrap_or_else(|_| {
-                span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
             })
+            .collect();
+        let Some(indices) = indices else {
+            return Ok(bx.const_null(llret_ty));
+        };
+
+        return Ok(bx.shuffle_vector(
+            args[0].immediate(),
+            args[1].immediate(),
+            bx.const_vector(&indices),
+        ));
+    }
+
+    if name == sym::simd_shuffle {
+        // Make sure this is actually an array, since typeck only checks the length-suffixed
+        // version of this intrinsic.
+        let n: u64 = match args[2].layout.ty.kind() {
+            ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
+                len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(
+                    || span_bug!(span, "could not evaluate shuffle index array length"),
+                )
+            }
+            _ => return_error!(InvalidMonomorphization::SimdShuffle {
+                span,
+                name,
+                ty: args[2].layout.ty
+            }),
         };
 
         require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
@@ -1217,7 +1259,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
                 bx.store(ze, ptr, Align::ONE);
                 let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
-                let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));
                 return Ok(bx.load(array_ty, ptr, Align::ONE));
             }
             _ => return_error!(InvalidMonomorphization::CannotReturn {
@@ -1283,7 +1324,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
             _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
         };
-        let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
+        let llvm_name = &format!("llvm.{intr_name}.v{in_len}{elem_ty_str}");
         let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
         let c = bx.call(
             fn_ty,
@@ -1321,50 +1362,34 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     // FIXME: use:
     //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
     //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
-    fn llvm_vector_str(
-        elem_ty: Ty<'_>,
-        vec_len: u64,
-        no_pointers: usize,
-        bx: &Builder<'_, '_, '_>,
-    ) -> String {
-        let p0s: String = "p0".repeat(no_pointers);
+    fn llvm_vector_str(bx: &Builder<'_, '_, '_>, elem_ty: Ty<'_>, vec_len: u64) -> String {
         match *elem_ty.kind() {
             ty::Int(v) => format!(
-                "v{}{}i{}",
+                "v{}i{}",
                 vec_len,
-                p0s,
                 // Normalize to prevent crash if v: IntTy::Isize
                 v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
             ),
             ty::Uint(v) => format!(
-                "v{}{}i{}",
+                "v{}i{}",
                 vec_len,
-                p0s,
                 // Normalize to prevent crash if v: UIntTy::Usize
                 v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
             ),
-            ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
+            ty::Float(v) => format!("v{}f{}", vec_len, v.bit_width()),
+            ty::RawPtr(_) => format!("v{}p0", vec_len),
             _ => unreachable!(),
         }
     }
 
-    fn llvm_vector_ty<'ll>(
-        cx: &CodegenCx<'ll, '_>,
-        elem_ty: Ty<'_>,
-        vec_len: u64,
-        mut no_pointers: usize,
-    ) -> &'ll Type {
-        // FIXME: use cx.layout_of(ty).llvm_type() ?
-        let mut elem_ty = match *elem_ty.kind() {
+    fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
+        let elem_ty = match *elem_ty.kind() {
             ty::Int(v) => cx.type_int_from_ty(v),
             ty::Uint(v) => cx.type_uint_from_ty(v),
             ty::Float(v) => cx.type_float_from_ty(v),
+            ty::RawPtr(_) => cx.type_ptr(),
             _ => unreachable!(),
         };
-        while no_pointers > 0 {
-            elem_ty = cx.type_ptr_to(elem_ty);
-            no_pointers -= 1;
-        }
         cx.type_vector(elem_ty, vec_len)
     }
 
@@ -1419,47 +1444,26 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
         );
 
-        // This counts how many pointers
-        fn ptr_count(t: Ty<'_>) -> usize {
-            match t.kind() {
-                ty::RawPtr(p) => 1 + ptr_count(p.ty),
-                _ => 0,
-            }
-        }
-
-        // Non-ptr type
-        fn non_ptr(t: Ty<'_>) -> Ty<'_> {
-            match t.kind() {
-                ty::RawPtr(p) => non_ptr(p.ty),
-                _ => t,
-            }
-        }
-
         // The second argument must be a simd vector with an element type that's a pointer
         // to the element type of the first argument
         let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
         let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
-        let (pointer_count, underlying_ty) = match element_ty1.kind() {
-            ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
-            _ => {
-                require!(
-                    false,
-                    InvalidMonomorphization::ExpectedElementType {
-                        span,
-                        name,
-                        expected_element: element_ty1,
-                        second_arg: arg_tys[1],
-                        in_elem,
-                        in_ty,
-                        mutability: ExpectedPointerMutability::Not,
-                    }
-                );
-                unreachable!();
+
+        require!(
+            matches!(
+                element_ty1.kind(),
+                ty::RawPtr(p) if p.ty == in_elem && p.ty.kind() == element_ty0.kind()
+            ),
+            InvalidMonomorphization::ExpectedElementType {
+                span,
+                name,
+                expected_element: element_ty1,
+                second_arg: arg_tys[1],
+                in_elem,
+                in_ty,
+                mutability: ExpectedPointerMutability::Not,
             }
-        };
-        assert!(pointer_count > 0);
-        assert_eq!(pointer_count - 1, ptr_count(element_ty0));
-        assert_eq!(underlying_ty, non_ptr(element_ty0));
+        );
 
         // The element type of the third argument must be a signed integer type of any width:
         let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
@@ -1490,15 +1494,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         };
 
         // Type of the vector of pointers:
-        let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
-        let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx);
+        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
+        let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
 
         // Type of the vector of elements:
-        let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
-        let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx);
+        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
+        let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
 
         let llvm_intrinsic =
-            format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
+            format!("llvm.masked.gather.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
         let fn_ty = bx.type_func(
             &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
             llvm_elem_vec_ty,
@@ -1559,50 +1563,28 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             }
         );
 
-        // This counts how many pointers
-        fn ptr_count(t: Ty<'_>) -> usize {
-            match t.kind() {
-                ty::RawPtr(p) => 1 + ptr_count(p.ty),
-                _ => 0,
-            }
-        }
-
-        // Non-ptr type
-        fn non_ptr(t: Ty<'_>) -> Ty<'_> {
-            match t.kind() {
-                ty::RawPtr(p) => non_ptr(p.ty),
-                _ => t,
-            }
-        }
-
         // The second argument must be a simd vector with an element type that's a pointer
         // to the element type of the first argument
         let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
         let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
         let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
-        let (pointer_count, underlying_ty) = match element_ty1.kind() {
-            ty::RawPtr(p) if p.ty == in_elem && p.mutbl.is_mut() => {
-                (ptr_count(element_ty1), non_ptr(element_ty1))
-            }
-            _ => {
-                require!(
-                    false,
-                    InvalidMonomorphization::ExpectedElementType {
-                        span,
-                        name,
-                        expected_element: element_ty1,
-                        second_arg: arg_tys[1],
-                        in_elem,
-                        in_ty,
-                        mutability: ExpectedPointerMutability::Mut,
-                    }
-                );
-                unreachable!();
+
+        require!(
+            matches!(
+                element_ty1.kind(),
+                ty::RawPtr(p)
+                    if p.ty == in_elem && p.mutbl.is_mut() && p.ty.kind() == element_ty0.kind()
+            ),
+            InvalidMonomorphization::ExpectedElementType {
+                span,
+                name,
+                expected_element: element_ty1,
+                second_arg: arg_tys[1],
+                in_elem,
+                in_ty,
+                mutability: ExpectedPointerMutability::Mut,
             }
-        };
-        assert!(pointer_count > 0);
-        assert_eq!(pointer_count - 1, ptr_count(element_ty0));
-        assert_eq!(underlying_ty, non_ptr(element_ty0));
+        );
 
         // The element type of the third argument must be a signed integer type of any width:
         match element_ty2.kind() {
@@ -1634,15 +1616,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         let ret_t = bx.type_void();
 
         // Type of the vector of pointers:
-        let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
-        let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx);
+        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
+        let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
 
         // Type of the vector of elements:
-        let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
-        let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx);
+        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
+        let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
 
         let llvm_intrinsic =
-            format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
+            format!("llvm.masked.scatter.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
         let fn_ty =
             bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
         let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
@@ -1857,11 +1839,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             }
         }
 
-        if in_elem == out_elem {
-            return Ok(args[0].immediate());
-        } else {
-            return Ok(bx.pointercast(args[0].immediate(), llret_ty));
-        }
+        return Ok(args[0].immediate());
     }
 
     if name == sym::simd_expose_addr {
@@ -2074,6 +2052,52 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         simd_neg: Int => neg, Float => fneg;
     }
 
+    // Unary integer intrinsics
+    if matches!(name, sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctlz | sym::simd_cttz) {
+        let vec_ty = bx.cx.type_vector(
+            match *in_elem.kind() {
+                ty::Int(i) => bx.cx.type_int_from_ty(i),
+                ty::Uint(i) => bx.cx.type_uint_from_ty(i),
+                _ => return_error!(InvalidMonomorphization::UnsupportedOperation {
+                    span,
+                    name,
+                    in_ty,
+                    in_elem
+                }),
+            },
+            in_len as u64,
+        );
+        let intrinsic_name = match name {
+            sym::simd_bswap => "bswap",
+            sym::simd_bitreverse => "bitreverse",
+            sym::simd_ctlz => "ctlz",
+            sym::simd_cttz => "cttz",
+            _ => unreachable!(),
+        };
+        let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
+        let llvm_intrinsic = &format!("llvm.{}.v{}i{}", intrinsic_name, in_len, int_size,);
+
+        return if name == sym::simd_bswap && int_size == 8 {
+            // byte swap is no-op for i8/u8
+            Ok(args[0].immediate())
+        } else if matches!(name, sym::simd_ctlz | sym::simd_cttz) {
+            let fn_ty = bx.type_func(&[vec_ty, bx.type_i1()], vec_ty);
+            let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+            Ok(bx.call(
+                fn_ty,
+                None,
+                None,
+                f,
+                &[args[0].immediate(), bx.const_int(bx.type_i1(), 0)],
+                None,
+            ))
+        } else {
+            let fn_ty = bx.type_func(&[vec_ty], vec_ty);
+            let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+            Ok(bx.call(fn_ty, None, None, f, &[args[0].immediate()], None))
+        };
+    }
+
     if name == sym::simd_arith_offset {
         // This also checks that the first operand is a ptr type.
         let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index 5aab06febe4..59d1ea05d8a 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -10,6 +10,7 @@
 #![feature(iter_intersperse)]
 #![feature(let_chains)]
 #![feature(never_type)]
+#![feature(slice_group_by)]
 #![feature(impl_trait_in_assoc_type)]
 #![recursion_limit = "256"]
 #![allow(rustc::potential_query_instability)]
@@ -21,6 +22,7 @@ extern crate rustc_macros;
 #[macro_use]
 extern crate tracing;
 
+use back::owned_target_machine::OwnedTargetMachine;
 use back::write::{create_informational_target_machine, create_target_machine};
 
 use errors::ParseTargetMachineConfig;
@@ -28,7 +30,7 @@ pub use llvm_util::target_features;
 use rustc_ast::expand::allocator::AllocatorKind;
 use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
 use rustc_codegen_ssa::back::write::{
-    CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
+    CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
 };
 use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::ModuleCodegen;
@@ -38,8 +40,8 @@ use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, FatalError, Handler, Subd
 use rustc_fluent_macro::fluent_messages;
 use rustc_metadata::EncodedMetadata;
 use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
-use rustc_middle::query::Providers;
 use rustc_middle::ty::TyCtxt;
+use rustc_middle::util::Providers;
 use rustc_session::config::{OptLevel, OutputFilenames, PrintKind, PrintRequest};
 use rustc_session::Session;
 use rustc_span::symbol::Symbol;
@@ -51,6 +53,7 @@ use std::io::Write;
 mod back {
     pub mod archive;
     pub mod lto;
+    pub mod owned_target_machine;
     mod profiling;
     pub mod write;
 }
@@ -141,18 +144,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
         back::write::target_machine_factory(sess, optlvl, target_features)
     }
 
-    fn spawn_thread<F, T>(time_trace: bool, f: F) -> std::thread::JoinHandle<T>
-    where
-        F: FnOnce() -> T,
-        F: Send + 'static,
-        T: Send + 'static,
-    {
-        std::thread::spawn(move || {
-            let _profiler = TimeTraceProfiler::new(time_trace);
-            f()
-        })
-    }
-
     fn spawn_named_thread<F, T>(
         time_trace: bool,
         name: String,
@@ -173,7 +164,7 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
 impl WriteBackendMethods for LlvmCodegenBackend {
     type Module = ModuleLlvm;
     type ModuleBuffer = back::lto::ModuleBuffer;
-    type TargetMachine = &'static mut llvm::TargetMachine;
+    type TargetMachine = OwnedTargetMachine;
     type TargetMachineError = crate::errors::LlvmError<'static>;
     type ThinData = back::lto::ThinData;
     type ThinBuffer = back::lto::ThinBuffer;
@@ -212,7 +203,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
     }
     fn run_fat_lto(
         cgcx: &CodegenContext<Self>,
-        modules: Vec<FatLTOInput<Self>>,
+        modules: Vec<FatLtoInput<Self>>,
         cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
     ) -> Result<LtoModuleCodegen<Self>, FatalError> {
         back::lto::run_fat(cgcx, modules, cached_modules)
@@ -298,21 +289,21 @@ impl CodegenBackend for LlvmCodegenBackend {
                     "ropi-rwpi",
                     "default",
                 ] {
-                    writeln!(out, "    {}", name);
+                    writeln!(out, "    {name}");
                 }
                 writeln!(out);
             }
             PrintKind::CodeModels => {
                 writeln!(out, "Available code models:");
                 for name in &["tiny", "small", "kernel", "medium", "large"] {
-                    writeln!(out, "    {}", name);
+                    writeln!(out, "    {name}");
                 }
                 writeln!(out);
             }
             PrintKind::TlsModels => {
                 writeln!(out, "Available TLS models:");
                 for name in &["global-dynamic", "local-dynamic", "initial-exec", "local-exec"] {
-                    writeln!(out, "    {}", name);
+                    writeln!(out, "    {name}");
                 }
                 writeln!(out);
             }
@@ -412,7 +403,9 @@ impl CodegenBackend for LlvmCodegenBackend {
 pub struct ModuleLlvm {
     llcx: &'static mut llvm::Context,
     llmod_raw: *const llvm::Module,
-    tm: &'static mut llvm::TargetMachine,
+
+    // independent from llcx and llmod_raw, resources get disposed by drop impl
+    tm: OwnedTargetMachine,
 }
 
 unsafe impl Send for ModuleLlvm {}
@@ -464,7 +457,6 @@ impl ModuleLlvm {
 impl Drop for ModuleLlvm {
     fn drop(&mut self) {
         unsafe {
-            llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
             llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
         }
     }
diff --git a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
index 45de284d22a..06e846a2b45 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
@@ -9,7 +9,7 @@ use libc::c_uint;
 use super::{DiagnosticInfo, SMDiagnostic};
 use rustc_span::InnerSpan;
 
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, Debug)]
 pub enum OptimizationDiagnosticKind {
     OptimizationRemark,
     OptimizationMissed,
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 5fe6407ac4f..a038b3af03d 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -1,8 +1,6 @@
 #![allow(non_camel_case_types)]
 #![allow(non_upper_case_globals)]
 
-use crate::coverageinfo::map_data as coverage_map;
-
 use super::debuginfo::{
     DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator,
     DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DILocation, DINameSpace,
@@ -85,12 +83,17 @@ pub enum LLVMModFlagBehavior {
 // Consts for the LLVM CallConv type, pre-cast to usize.
 
 /// LLVM CallingConv::ID. Should we wrap this?
+///
+/// See <https://github.com/llvm/llvm-project/blob/main/llvm/include/llvm/IR/CallingConv.h>
 #[derive(Copy, Clone, PartialEq, Debug)]
 #[repr(C)]
 pub enum CallConv {
     CCallConv = 0,
     FastCallConv = 8,
     ColdCallConv = 9,
+    PreserveMost = 14,
+    PreserveAll = 15,
+    Tail = 18,
     X86StdcallCallConv = 64,
     X86FastcallCallConv = 65,
     ArmAapcsCallConv = 67,
@@ -477,6 +480,8 @@ pub enum OptStage {
 pub struct SanitizerOptions {
     pub sanitize_address: bool,
     pub sanitize_address_recover: bool,
+    pub sanitize_cfi: bool,
+    pub sanitize_kcfi: bool,
     pub sanitize_memory: bool,
     pub sanitize_memory_recover: bool,
     pub sanitize_memory_track_origins: c_int,
@@ -688,204 +693,6 @@ extern "C" {
 pub type DiagnosticHandlerTy = unsafe extern "C" fn(&DiagnosticInfo, *mut c_void);
 pub type InlineAsmDiagHandlerTy = unsafe extern "C" fn(&SMDiagnostic, *const c_void, c_uint);
 
-pub mod coverageinfo {
-    use super::coverage_map;
-
-    /// Corresponds to enum `llvm::coverage::CounterMappingRegion::RegionKind`.
-    ///
-    /// Must match the layout of `LLVMRustCounterMappingRegionKind`.
-    #[derive(Copy, Clone, Debug)]
-    #[repr(C)]
-    pub enum RegionKind {
-        /// A CodeRegion associates some code with a counter
-        CodeRegion = 0,
-
-        /// An ExpansionRegion represents a file expansion region that associates
-        /// a source range with the expansion of a virtual source file, such as
-        /// for a macro instantiation or #include file.
-        ExpansionRegion = 1,
-
-        /// A SkippedRegion represents a source range with code that was skipped
-        /// by a preprocessor or similar means.
-        SkippedRegion = 2,
-
-        /// A GapRegion is like a CodeRegion, but its count is only set as the
-        /// line execution count when its the only region in the line.
-        GapRegion = 3,
-
-        /// A BranchRegion represents leaf-level boolean expressions and is
-        /// associated with two counters, each representing the number of times the
-        /// expression evaluates to true or false.
-        BranchRegion = 4,
-    }
-
-    /// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the
-    /// coverage map, in accordance with the
-    /// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
-    /// The struct composes fields representing the `Counter` type and value(s) (injected counter
-    /// ID, or expression type and operands), the source file (an indirect index into a "filenames
-    /// array", encoded separately), and source location (start and end positions of the represented
-    /// code region).
-    ///
-    /// Corresponds to struct `llvm::coverage::CounterMappingRegion`.
-    ///
-    /// Must match the layout of `LLVMRustCounterMappingRegion`.
-    #[derive(Copy, Clone, Debug)]
-    #[repr(C)]
-    pub struct CounterMappingRegion {
-        /// The counter type and type-dependent counter data, if any.
-        counter: coverage_map::Counter,
-
-        /// If the `RegionKind` is a `BranchRegion`, this represents the counter
-        /// for the false branch of the region.
-        false_counter: coverage_map::Counter,
-
-        /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the
-        /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes
-        /// that, in turn, are used to look up the filename for this region.
-        file_id: u32,
-
-        /// If the `RegionKind` is an `ExpansionRegion`, the `expanded_file_id` can be used to find
-        /// the mapping regions created as a result of macro expansion, by checking if their file id
-        /// matches the expanded file id.
-        expanded_file_id: u32,
-
-        /// 1-based starting line of the mapping region.
-        start_line: u32,
-
-        /// 1-based starting column of the mapping region.
-        start_col: u32,
-
-        /// 1-based ending line of the mapping region.
-        end_line: u32,
-
-        /// 1-based ending column of the mapping region. If the high bit is set, the current
-        /// mapping region is a gap area.
-        end_col: u32,
-
-        kind: RegionKind,
-    }
-
-    impl CounterMappingRegion {
-        pub(crate) fn code_region(
-            counter: coverage_map::Counter,
-            file_id: u32,
-            start_line: u32,
-            start_col: u32,
-            end_line: u32,
-            end_col: u32,
-        ) -> Self {
-            Self {
-                counter,
-                false_counter: coverage_map::Counter::zero(),
-                file_id,
-                expanded_file_id: 0,
-                start_line,
-                start_col,
-                end_line,
-                end_col,
-                kind: RegionKind::CodeRegion,
-            }
-        }
-
-        // This function might be used in the future; the LLVM API is still evolving, as is coverage
-        // support.
-        #[allow(dead_code)]
-        pub(crate) fn branch_region(
-            counter: coverage_map::Counter,
-            false_counter: coverage_map::Counter,
-            file_id: u32,
-            start_line: u32,
-            start_col: u32,
-            end_line: u32,
-            end_col: u32,
-        ) -> Self {
-            Self {
-                counter,
-                false_counter,
-                file_id,
-                expanded_file_id: 0,
-                start_line,
-                start_col,
-                end_line,
-                end_col,
-                kind: RegionKind::BranchRegion,
-            }
-        }
-
-        // This function might be used in the future; the LLVM API is still evolving, as is coverage
-        // support.
-        #[allow(dead_code)]
-        pub(crate) fn expansion_region(
-            file_id: u32,
-            expanded_file_id: u32,
-            start_line: u32,
-            start_col: u32,
-            end_line: u32,
-            end_col: u32,
-        ) -> Self {
-            Self {
-                counter: coverage_map::Counter::zero(),
-                false_counter: coverage_map::Counter::zero(),
-                file_id,
-                expanded_file_id,
-                start_line,
-                start_col,
-                end_line,
-                end_col,
-                kind: RegionKind::ExpansionRegion,
-            }
-        }
-
-        // This function might be used in the future; the LLVM API is still evolving, as is coverage
-        // support.
-        #[allow(dead_code)]
-        pub(crate) fn skipped_region(
-            file_id: u32,
-            start_line: u32,
-            start_col: u32,
-            end_line: u32,
-            end_col: u32,
-        ) -> Self {
-            Self {
-                counter: coverage_map::Counter::zero(),
-                false_counter: coverage_map::Counter::zero(),
-                file_id,
-                expanded_file_id: 0,
-                start_line,
-                start_col,
-                end_line,
-                end_col,
-                kind: RegionKind::SkippedRegion,
-            }
-        }
-
-        // This function might be used in the future; the LLVM API is still evolving, as is coverage
-        // support.
-        #[allow(dead_code)]
-        pub(crate) fn gap_region(
-            counter: coverage_map::Counter,
-            file_id: u32,
-            start_line: u32,
-            start_col: u32,
-            end_line: u32,
-            end_col: u32,
-        ) -> Self {
-            Self {
-                counter,
-                false_counter: coverage_map::Counter::zero(),
-                file_id,
-                expanded_file_id: 0,
-                start_line,
-                start_col,
-                end_line,
-                end_col: (1_u32 << 31) | end_col,
-                kind: RegionKind::GapRegion,
-            }
-        }
-    }
-}
-
 pub mod debuginfo {
     use super::{InvariantOpaque, Metadata};
     use bitflags::bitflags;
@@ -1073,7 +880,7 @@ extern "C" {
 
     // Operations on array, pointer, and vector types (sequence types)
     pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type;
-    pub fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type;
+    pub fn LLVMPointerTypeInContext(C: &Context, AddressSpace: c_uint) -> &Type;
     pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type;
 
     pub fn LLVMGetElementType(Ty: &Type) -> &Type;
@@ -1094,6 +901,7 @@ extern "C" {
     pub fn LLVMRustGlobalAddMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
     pub fn LLVMValueAsMetadata(Node: &Value) -> &Metadata;
     pub fn LLVMIsAFunction(Val: &Value) -> Option<&Value>;
+    pub fn LLVMRustIsNonGVFunctionPointerTy(Val: &Value) -> bool;
 
     // Operations on constants of any type
     pub fn LLVMConstNull(Ty: &Type) -> &Value;
@@ -1904,6 +1712,8 @@ extern "C" {
     pub fn LLVMRustCoverageWriteFilenamesSectionToBuffer(
         Filenames: *const *const c_char,
         FilenamesLen: size_t,
+        Lengths: *const size_t,
+        LengthsLen: size_t,
         BufferOut: &RustString,
     );
 
@@ -1911,14 +1721,18 @@ extern "C" {
     pub fn LLVMRustCoverageWriteMappingToBuffer(
         VirtualFileMappingIDs: *const c_uint,
         NumVirtualFileMappingIDs: c_uint,
-        Expressions: *const coverage_map::CounterExpression,
+        Expressions: *const crate::coverageinfo::ffi::CounterExpression,
         NumExpressions: c_uint,
-        MappingRegions: *const coverageinfo::CounterMappingRegion,
+        MappingRegions: *const crate::coverageinfo::ffi::CounterMappingRegion,
         NumMappingRegions: c_uint,
         BufferOut: &RustString,
     );
 
-    pub fn LLVMRustCoverageCreatePGOFuncNameVar(F: &Value, FuncName: *const c_char) -> &Value;
+    pub fn LLVMRustCoverageCreatePGOFuncNameVar(
+        F: &Value,
+        FuncName: *const c_char,
+        FuncNameLen: size_t,
+    ) -> &Value;
     pub fn LLVMRustCoverageHashByteArray(Bytes: *const c_char, NumBytes: size_t) -> u64;
 
     #[allow(improper_ctypes)]
@@ -2298,6 +2112,8 @@ extern "C" {
     );
 
     pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char;
+
+    // This function makes copies of pointed to data, so the data's lifetime may end after this function returns
     pub fn LLVMRustCreateTargetMachine(
         Triple: *const c_char,
         CPU: *const c_char,
@@ -2317,9 +2133,14 @@ extern "C" {
         RelaxELFRelocations: bool,
         UseInitArray: bool,
         SplitDwarfFile: *const c_char,
+        OutputObjFile: *const c_char,
+        DebugInfoCompression: *const c_char,
         ForceEmulatedTls: bool,
-    ) -> Option<&'static mut TargetMachine>;
-    pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine);
+        ArgsCstrBuff: *const c_char,
+        ArgsCstrBuffLen: usize,
+    ) -> *mut TargetMachine;
+
+    pub fn LLVMRustDisposeTargetMachine(T: *mut TargetMachine);
     pub fn LLVMRustAddLibraryInfo<'a>(
         PM: &PassManager<'a>,
         M: &'a Module,
@@ -2338,6 +2159,7 @@ extern "C" {
         TM: &'a TargetMachine,
         OptLevel: PassBuilderOptLevel,
         OptStage: OptStage,
+        IsLinkerPluginLTO: bool,
         NoPrepopulatePasses: bool,
         VerifyIR: bool,
         UseThinLTOBuffers: bool,
@@ -2504,6 +2326,12 @@ extern "C" {
         len: usize,
         out_len: &mut usize,
     ) -> *const u8;
+    pub fn LLVMRustGetSliceFromObjectDataByName(
+        data: *const u8,
+        len: usize,
+        name: *const u8,
+        out_len: &mut usize,
+    ) -> *const u8;
 
     pub fn LLVMRustLinkerNew(M: &Module) -> &mut Linker<'_>;
     pub fn LLVMRustLinkerAdd(
@@ -2532,6 +2360,7 @@ extern "C" {
         remark_passes: *const *const c_char,
         remark_passes_len: usize,
         remark_file: *const c_char,
+        pgo_available: bool,
     );
 
     #[allow(improper_ctypes)]
@@ -2541,6 +2370,10 @@ extern "C" {
 
     pub fn LLVMRustIsBitcode(ptr: *const u8, len: usize) -> bool;
 
+    pub fn LLVMRustLLVMHasZlibCompressionForDebugSymbols() -> bool;
+
+    pub fn LLVMRustLLVMHasZstdCompressionForDebugSymbols() -> bool;
+
     pub fn LLVMRustGetSymbols(
         buf_ptr: *const u8,
         buf_len: usize,
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 4f20fbf2045..7c8ef67ffd1 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -303,7 +303,7 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
             // check that all features in a given smallvec are enabled
             for llvm_feature in to_llvm_features(sess, feature) {
                 let cstr = SmallCStr::new(llvm_feature);
-                if !unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) } {
+                if !unsafe { llvm::LLVMRustHasFeature(&target_machine, cstr.as_ptr()) } {
                     return false;
                 }
             }
@@ -315,7 +315,7 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
 
 pub fn print_version() {
     let (major, minor, patch) = get_version();
-    println!("LLVM version: {}.{}.{}", major, minor, patch);
+    println!("LLVM version: {major}.{minor}.{patch}");
 }
 
 pub fn get_version() -> (u32, u32, u32) {
@@ -390,11 +390,11 @@ fn print_target_features(out: &mut dyn PrintBackendInfo, sess: &Session, tm: &ll
 
     writeln!(out, "Features supported by rustc for this target:");
     for (feature, desc) in &rustc_target_features {
-        writeln!(out, "    {1:0$} - {2}.", max_feature_len, feature, desc);
+        writeln!(out, "    {feature:max_feature_len$} - {desc}.");
     }
     writeln!(out, "\nCode-generation features supported by LLVM for this target:");
     for (feature, desc) in &llvm_target_features {
-        writeln!(out, "    {1:0$} - {2}.", max_feature_len, feature, desc);
+        writeln!(out, "    {feature:max_feature_len$} - {desc}.");
     }
     if llvm_target_features.is_empty() {
         writeln!(out, "    Target features listing is not supported by this LLVM version.");
@@ -422,14 +422,14 @@ pub(crate) fn print(req: &PrintRequest, mut out: &mut dyn PrintBackendInfo, sess
             }
             unsafe {
                 llvm::LLVMRustPrintTargetCPUs(
-                    tm,
+                    &tm,
                     cpu_cstring.as_ptr(),
                     callback,
                     &mut out as *mut &mut dyn PrintBackendInfo as *mut c_void,
                 );
             }
         }
-        PrintKind::TargetFeatures => print_target_features(out, sess, tm),
+        PrintKind::TargetFeatures => print_target_features(out, sess, &tm),
         _ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
     }
 }
@@ -507,8 +507,6 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str
             .features
             .split(',')
             .filter(|v| !v.is_empty() && backend_feature_name(v).is_some())
-            // Drop +atomics-32 feature introduced in LLVM 15.
-            .filter(|v| *v != "+atomics-32" || get_version() >= (15, 0, 0))
             .map(String::from),
     );
 
@@ -575,7 +573,7 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str
                         match (enable_disable, feat) {
                             ('-' | '+', TargetFeatureFoldStrength::Both(f))
                             | ('+', TargetFeatureFoldStrength::EnableOnly(f)) => {
-                                Some(format!("{}{}", enable_disable, f))
+                                Some(format!("{enable_disable}{f}"))
                             }
                             _ => None,
                         }
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
index e8cda626f54..38e8220569a 100644
--- a/compiler/rustc_codegen_llvm/src/mono_item.rs
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -111,7 +111,7 @@ impl CodegenCx<'_, '_> {
         }
 
         // Symbols from executables can't really be imported any further.
-        let all_exe = self.tcx.sess.crate_types().iter().all(|ty| *ty == CrateType::Executable);
+        let all_exe = self.tcx.crate_types().iter().all(|ty| *ty == CrateType::Executable);
         let is_declaration_for_linker =
             is_declaration || linkage == llvm::Linkage::AvailableExternallyLinkage;
         if all_exe && !is_declaration_for_linker {
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
index 7e672a8dc33..06b7703672f 100644
--- a/compiler/rustc_codegen_llvm/src/type_.rs
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -112,13 +112,7 @@ impl<'ll> CodegenCx<'ll, '_> {
         }
     }
 
-    pub(crate) fn type_pointee_for_align(&self, align: Align) -> &'ll Type {
-        // FIXME(eddyb) We could find a better approximation if ity.align < align.
-        let ity = Integer::approximate_align(self, align);
-        self.type_from_integer(ity)
-    }
-
-    /// Return a LLVM type that has at most the required alignment,
+    /// Return an LLVM type that has at most the required alignment,
     /// and exactly the required size, as a best-effort padding array.
     pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type {
         let unit = Integer::approximate_align(self, align);
@@ -189,17 +183,12 @@ impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() }
     }
 
-    fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
-        assert_ne!(
-            self.type_kind(ty),
-            TypeKind::Function,
-            "don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense"
-        );
-        ty.ptr_to(AddressSpace::DATA)
+    fn type_ptr(&self) -> &'ll Type {
+        self.type_ptr_ext(AddressSpace::DATA)
     }
 
-    fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type {
-        ty.ptr_to(address_space)
+    fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type {
+        unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) }
     }
 
     fn element_type(&self, ty: &'ll Type) -> &'ll Type {
@@ -247,12 +236,8 @@ impl Type {
         unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) }
     }
 
-    pub fn i8p_llcx(llcx: &llvm::Context) -> &Type {
-        Type::i8_llcx(llcx).ptr_to(AddressSpace::DATA)
-    }
-
-    fn ptr_to(&self, address_space: AddressSpace) -> &Type {
-        unsafe { llvm::LLVMPointerType(self, address_space.0) }
+    pub fn ptr_llcx(llcx: &llvm::Context) -> &Type {
+        unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::DATA.0) }
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 2dbd467cc84..dcc62d314ff 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -3,7 +3,7 @@ use crate::context::TypeLowering;
 use crate::type_::Type;
 use rustc_codegen_ssa::traits::*;
 use rustc_middle::bug;
-use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
 use rustc_middle::ty::{self, Ty, TypeVisitableExt};
 use rustc_target::abi::HasDataLayout;
@@ -23,7 +23,7 @@ fn uncached_llvm_type<'a, 'tcx>(
     match layout.abi {
         Abi::Scalar(_) => bug!("handled elsewhere"),
         Abi::Vector { element, count } => {
-            let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
+            let element = layout.scalar_llvm_type_at(cx, element);
             return cx.type_vector(element, count);
         }
         Abi::ScalarPair(..) => {
@@ -61,9 +61,6 @@ fn uncached_llvm_type<'a, 'tcx>(
             }
             Some(name)
         }
-        // Use identified structure types for ADT. Due to pointee types in LLVM IR their definition
-        // might be recursive. Other cases are non-recursive and we can use literal structure types.
-        ty::Adt(..) => Some(String::new()),
         _ => None,
     };
 
@@ -179,12 +176,7 @@ pub trait LayoutLlvmExt<'tcx> {
     fn is_llvm_scalar_pair(&self) -> bool;
     fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
     fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
-    fn scalar_llvm_type_at<'a>(
-        &self,
-        cx: &CodegenCx<'a, 'tcx>,
-        scalar: Scalar,
-        offset: Size,
-    ) -> &'a Type;
+    fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type;
     fn scalar_pair_element_llvm_type<'a>(
         &self,
         cx: &CodegenCx<'a, 'tcx>,
@@ -223,24 +215,16 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     /// of that field's type - this is useful for taking the address of
     /// that field and ensuring the struct has the right alignment.
     fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
+        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+        // In other words, this should generally not look at the type at all, but only at the
+        // layout.
         if let Abi::Scalar(scalar) = self.abi {
             // Use a different cache for scalars because pointers to DSTs
             // can be either fat or thin (data pointers of fat pointers).
             if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
                 return llty;
             }
-            let llty = match *self.ty.kind() {
-                ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
-                    cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
-                }
-                ty::Adt(def, _) if def.is_box() => {
-                    cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
-                }
-                ty::FnPtr(sig) => {
-                    cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
-                }
-                _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO),
-            };
+            let llty = self.scalar_llvm_type_at(cx, scalar);
             cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
             return llty;
         }
@@ -300,25 +284,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         self.llvm_type(cx)
     }
 
-    fn scalar_llvm_type_at<'a>(
-        &self,
-        cx: &CodegenCx<'a, 'tcx>,
-        scalar: Scalar,
-        offset: Size,
-    ) -> &'a Type {
+    fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type {
         match scalar.primitive() {
             Int(i, _) => cx.type_from_integer(i),
             F32 => cx.type_f32(),
             F64 => cx.type_f64(),
-            Pointer(address_space) => {
-                // If we know the alignment, pick something better than i8.
-                let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
-                    cx.type_pointee_for_align(pointee.align)
-                } else {
-                    cx.type_i8()
-                };
-                cx.type_ptr_to_ext(pointee, address_space)
-            }
+            Pointer(address_space) => cx.type_ptr_ext(address_space),
         }
     }
 
@@ -328,27 +299,9 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         index: usize,
         immediate: bool,
     ) -> &'a Type {
-        // HACK(eddyb) special-case fat pointers until LLVM removes
-        // pointee types, to avoid bitcasting every `OperandRef::deref`.
-        match *self.ty.kind() {
-            ty::Ref(..) | ty::RawPtr(_) => {
-                return self.field(cx, index).llvm_type(cx);
-            }
-            // only wide pointer boxes are handled as pointers
-            // thin pointer boxes with scalar allocators are handled by the general logic below
-            ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => {
-                let ptr_ty = Ty::new_mut_ptr(cx.tcx, self.ty.boxed_ty());
-                return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
-            }
-            // `dyn* Trait` has the same ABI as `*mut dyn Trait`
-            ty::Dynamic(bounds, region, ty::DynStar) => {
-                let ptr_ty =
-                    Ty::new_mut_ptr(cx.tcx, Ty::new_dynamic(cx.tcx, bounds, region, ty::Dyn));
-                return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
-            }
-            _ => {}
-        }
-
+        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+        // In other words, this should generally not look at the type at all, but only at the
+        // layout.
         let Abi::ScalarPair(a, b) = self.abi else {
             bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
         };
@@ -364,8 +317,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
             return cx.type_i1();
         }
 
-        let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
-        self.scalar_llvm_type_at(cx, scalar, offset)
+        self.scalar_llvm_type_at(cx, scalar)
     }
 
     fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 {
@@ -431,7 +383,11 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
 
         // Vectors, even for non-power-of-two sizes, have the same layout as
         // arrays but don't count as aggregate types
+        // While LLVM theoretically supports non-power-of-two sizes, and they
+        // often work fine, sometimes x86-isel deals with them horribly
+        // (see #115212) so for now only use power-of-two ones.
         if let FieldsShape::Array { count, .. } = self.layout.fields()
+            && count.is_power_of_two()
             && let element = self.field(cx, 0)
             && element.ty.is_integral()
         {
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index 8800caa71d6..172c66a7af1 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -5,7 +5,7 @@ use crate::value::Value;
 use rustc_codegen_ssa::mir::operand::OperandRef;
 use rustc_codegen_ssa::{
     common::IntPredicate,
-    traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods},
+    traits::{BaseTypeMethods, BuilderMethods, ConstMethods},
 };
 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
 use rustc_middle::ty::Ty;
@@ -26,24 +26,18 @@ fn round_pointer_up_to_alignment<'ll>(
 fn emit_direct_ptr_va_arg<'ll, 'tcx>(
     bx: &mut Builder<'_, 'll, 'tcx>,
     list: OperandRef<'tcx, &'ll Value>,
-    llty: &'ll Type,
     size: Size,
     align: Align,
     slot_size: Align,
     allow_higher_align: bool,
 ) -> (&'ll Value, Align) {
-    let va_list_ty = bx.type_i8p();
-    let va_list_ptr_ty = bx.type_ptr_to(va_list_ty);
-    let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
-        bx.bitcast(list.immediate(), va_list_ptr_ty)
-    } else {
-        list.immediate()
-    };
+    let va_list_ty = bx.type_ptr();
+    let va_list_addr = list.immediate();
 
     let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
 
     let (addr, addr_align) = if allow_higher_align && align > slot_size {
-        (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
+        (round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align)
     } else {
         (ptr, slot_size)
     };
@@ -56,9 +50,9 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
     if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
         let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
         let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]);
-        (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
+        (adjusted, addr_align)
     } else {
-        (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
+        (addr, addr_align)
     }
 }
 
@@ -81,7 +75,7 @@ fn emit_ptr_va_arg<'ll, 'tcx>(
         (layout.llvm_type(bx.cx), layout.size, layout.align)
     };
     let (addr, addr_align) =
-        emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
+        emit_direct_ptr_va_arg(bx, list, size, align.abi, slot_size, allow_higher_align);
     if indirect {
         let tmp_ret = bx.load(llty, addr, addr_align);
         bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
@@ -146,7 +140,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
     bx.cond_br(use_stack, on_stack, in_reg);
 
     bx.switch_to_block(in_reg);
-    let top_type = bx.type_i8p();
+    let top_type = bx.type_ptr();
     let top = bx.struct_gep(va_list_ty, va_list_addr, reg_top_index);
     let top = bx.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
 
@@ -158,7 +152,6 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
         reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]);
     }
     let reg_type = layout.llvm_type(bx);
-    let reg_addr = bx.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
     let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
     bx.br(end);
 
@@ -218,7 +211,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
     // Work out the address of the value in the register save area.
     let reg_ptr =
         bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3));
-    let reg_ptr_v = bx.load(bx.type_i8p(), reg_ptr, bx.tcx().data_layout.pointer_align.abi);
+    let reg_ptr_v = bx.load(bx.type_ptr(), reg_ptr, bx.tcx().data_layout.pointer_align.abi);
     let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
     let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
     let reg_addr = bx.gep(bx.type_i8(), reg_ptr_v, &[reg_off]);
@@ -234,7 +227,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
     // Work out the address of the value in the argument overflow area.
     let arg_ptr =
         bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 2));
-    let arg_ptr_v = bx.load(bx.type_i8p(), arg_ptr, bx.tcx().data_layout.pointer_align.abi);
+    let arg_ptr_v = bx.load(bx.type_ptr(), arg_ptr, bx.tcx().data_layout.pointer_align.abi);
     let arg_off = bx.const_u64(padding);
     let mem_addr = bx.gep(bx.type_i8(), arg_ptr_v, &[arg_off]);
 
@@ -246,14 +239,12 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
 
     // Return the appropriate result.
     bx.switch_to_block(end);
-    let val_addr = bx.phi(bx.type_i8p(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
+    let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
     let val_type = layout.llvm_type(bx);
     let val_addr = if indirect {
-        let ptr_type = bx.cx.type_ptr_to(val_type);
-        let ptr_addr = bx.bitcast(val_addr, bx.cx.type_ptr_to(ptr_type));
-        bx.load(ptr_type, ptr_addr, bx.tcx().data_layout.pointer_align.abi)
+        bx.load(bx.cx.type_ptr(), val_addr, bx.tcx().data_layout.pointer_align.abi)
     } else {
-        bx.bitcast(val_addr, bx.cx.type_ptr_to(val_type))
+        val_addr
     };
     bx.load(val_type, val_addr, layout.align.abi)
 }