about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm')
-rw-r--r--compiler/rustc_codegen_llvm/Cargo.toml1
-rw-r--r--compiler/rustc_codegen_llvm/messages.ftl11
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs79
-rw-r--r--compiler/rustc_codegen_llvm/src/allocator.rs33
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs584
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs44
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs54
-rw-r--r--compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs14
-rw-r--r--compiler/rustc_codegen_llvm/src/back/profiling.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs95
-rw-r--r--compiler/rustc_codegen_llvm/src/base.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs370
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs90
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs88
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs183
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs367
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs402
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs175
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs211
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs157
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs14
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs16
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs18
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs16
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs113
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs9
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/utils.rs20
-rw-r--r--compiler/rustc_codegen_llvm/src/declare.rs25
-rw-r--r--compiler/rustc_codegen_llvm/src/errors.rs33
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs494
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs426
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/mod.rs118
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs300
-rw-r--r--compiler/rustc_codegen_llvm/src/mono_item.rs34
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs30
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs42
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs4
41 files changed, 2103 insertions, 2599 deletions
diff --git a/compiler/rustc_codegen_llvm/Cargo.toml b/compiler/rustc_codegen_llvm/Cargo.toml
index a93baf88413..03a871297c4 100644
--- a/compiler/rustc_codegen_llvm/Cargo.toml
+++ b/compiler/rustc_codegen_llvm/Cargo.toml
@@ -14,6 +14,7 @@ libc = "0.2"
 measureme = "11"
 object = { version = "0.36.3", default-features = false, features = ["std", "read"] }
 rustc-demangle = "0.1.21"
+rustc_abi = { path = "../rustc_abi" }
 rustc_ast = { path = "../rustc_ast" }
 rustc_attr = { path = "../rustc_attr" }
 rustc_codegen_ssa = { path = "../rustc_codegen_ssa" }
diff --git a/compiler/rustc_codegen_llvm/messages.ftl b/compiler/rustc_codegen_llvm/messages.ftl
index df2198df14b..63c64269eb8 100644
--- a/compiler/rustc_codegen_llvm/messages.ftl
+++ b/compiler/rustc_codegen_llvm/messages.ftl
@@ -7,6 +7,11 @@ codegen_llvm_dynamic_linking_with_lto =
 
 codegen_llvm_fixed_x18_invalid_arch = the `-Zfixed-x18` flag is not supported on the `{$arch}` architecture
 
+codegen_llvm_forbidden_ctarget_feature =
+    target feature `{$feature}` cannot be toggled with `-Ctarget-feature`: {$reason}
+    .note = this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+codegen_llvm_forbidden_ctarget_feature_issue = for more information, see issue #116344 <https://github.com/rust-lang/rust/issues/116344>
+
 codegen_llvm_from_llvm_diag = {$message}
 
 codegen_llvm_from_llvm_optimization_diag = {$filename}:{$line}:{$column} {$pass_name} ({$kind}): {$message}
@@ -33,9 +38,6 @@ codegen_llvm_lto_proc_macro = lto cannot be used for `proc-macro` crate type wit
 codegen_llvm_mismatch_data_layout =
     data-layout for target `{$rustc_target}`, `{$rustc_layout}`, differs from LLVM target's `{$llvm_target}` default layout, `{$llvm_layout}`
 
-codegen_llvm_missing_features =
-    add the missing features in a `target_feature` attribute
-
 codegen_llvm_multiple_source_dicompileunit = multiple source DICompileUnits found
 codegen_llvm_multiple_source_dicompileunit_with_llvm_err = multiple source DICompileUnits found: {$llvm_err}
 
@@ -63,9 +65,6 @@ codegen_llvm_serialize_module_with_llvm_err = failed to serialize module {$name}
 codegen_llvm_symbol_already_defined =
     symbol `{$symbol_name}` is already defined
 
-codegen_llvm_target_feature_disable_or_enable =
-    the target features {$features} must all be either enabled or disabled together
-
 codegen_llvm_target_machine = could not create LLVM TargetMachine for triple: {$triple}
 codegen_llvm_target_machine_with_llvm_err = could not create LLVM TargetMachine for triple: {$triple}: {$llvm_err}
 
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index 26718792f5f..1d35138b013 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -1,18 +1,20 @@
 use std::cmp;
 
 use libc::c_uint;
+use rustc_abi as abi;
+pub(crate) use rustc_abi::ExternAbi;
+use rustc_abi::Primitive::Int;
+use rustc_abi::{HasDataLayout, Size};
+use rustc_codegen_ssa::MemFlags;
 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
 use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
 use rustc_codegen_ssa::traits::*;
-use rustc_codegen_ssa::MemFlags;
-use rustc_middle::ty::layout::LayoutOf;
-pub(crate) use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
 use rustc_middle::ty::Ty;
+use rustc_middle::ty::layout::LayoutOf;
+pub(crate) use rustc_middle::ty::layout::{WIDE_PTR_ADDR, WIDE_PTR_EXTRA};
 use rustc_middle::{bug, ty};
 use rustc_session::config;
-pub(crate) use rustc_target::abi::call::*;
-use rustc_target::abi::{self, HasDataLayout, Int, Size};
-pub(crate) use rustc_target::spec::abi::Abi;
+pub(crate) use rustc_target::callconv::*;
 use rustc_target::spec::SanitizerSet;
 use smallvec::SmallVec;
 
@@ -413,7 +415,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         instance: Option<ty::Instance<'tcx>>,
     ) {
         let mut func_attrs = SmallVec::<[_; 3]>::new();
-        if self.ret.layout.abi.is_uninhabited() {
+        if self.ret.layout.is_uninhabited() {
             func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
         }
         if !self.can_unwind {
@@ -422,6 +424,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         if let Conv::RiscvInterrupt { kind } = self.conv {
             func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", kind.as_str()));
         }
+        if let Conv::CCmseNonSecureEntry = self.conv {
+            func_attrs.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"))
+        }
         attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
 
         let mut i = 0;
@@ -431,7 +436,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
             i - 1
         };
 
-        let apply_range_attr = |idx: AttributePlace, scalar: rustc_target::abi::Scalar| {
+        let apply_range_attr = |idx: AttributePlace, scalar: rustc_abi::Scalar| {
             if cx.sess().opts.optimize != config::OptLevel::No
                 && llvm_util::get_version() >= (19, 0, 0)
                 && matches!(scalar.primitive(), Int(..))
@@ -442,18 +447,18 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                 // LLVM also rejects full range.
                 && !scalar.is_always_valid(cx)
             {
-                attributes::apply_to_llfn(
-                    llfn,
-                    idx,
-                    &[llvm::CreateRangeAttr(cx.llcx, scalar.size(cx), scalar.valid_range(cx))],
-                );
+                attributes::apply_to_llfn(llfn, idx, &[llvm::CreateRangeAttr(
+                    cx.llcx,
+                    scalar.size(cx),
+                    scalar.valid_range(cx),
+                )]);
             }
         };
 
         match &self.ret.mode {
             PassMode::Direct(attrs) => {
                 attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
-                if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
+                if let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr {
                     apply_range_attr(llvm::AttributePlace::ReturnValue, scalar);
                 }
             }
@@ -466,14 +471,10 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                 );
                 attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
                 if cx.sess().opts.optimize != config::OptLevel::No {
-                    attributes::apply_to_llfn(
-                        llfn,
-                        llvm::AttributePlace::Argument(i),
-                        &[
-                            llvm::AttributeKind::Writable.create_attr(cx.llcx),
-                            llvm::AttributeKind::DeadOnUnwind.create_attr(cx.llcx),
-                        ],
-                    );
+                    attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[
+                        llvm::AttributeKind::Writable.create_attr(cx.llcx),
+                        llvm::AttributeKind::DeadOnUnwind.create_attr(cx.llcx),
+                    ]);
                 }
             }
             PassMode::Cast { cast, pad_i32: _ } => {
@@ -494,7 +495,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                 }
                 PassMode::Direct(attrs) => {
                     let i = apply(attrs);
-                    if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+                    if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
                         apply_range_attr(llvm::AttributePlace::Argument(i), scalar);
                     }
                 }
@@ -509,7 +510,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                 PassMode::Pair(a, b) => {
                     let i = apply(a);
                     let ii = apply(b);
-                    if let abi::Abi::ScalarPair(scalar_a, scalar_b) = arg.layout.abi {
+                    if let abi::BackendRepr::ScalarPair(scalar_a, scalar_b) =
+                        arg.layout.backend_repr
+                    {
                         apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a);
                         apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b);
                     }
@@ -531,7 +534,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
 
     fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) {
         let mut func_attrs = SmallVec::<[_; 2]>::new();
-        if self.ret.layout.abi.is_uninhabited() {
+        if self.ret.layout.is_uninhabited() {
             func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx));
         }
         if !self.can_unwind {
@@ -569,7 +572,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         }
         if bx.cx.sess().opts.optimize != config::OptLevel::No
                 && llvm_util::get_version() < (19, 0, 0)
-                && let abi::Abi::Scalar(scalar) = self.ret.layout.abi
+                && let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr
                 && matches!(scalar.primitive(), Int(..))
                 // If the value is a boolean, the range is 0..2 and that ultimately
                 // become 0..0 when the type becomes i1, which would be rejected
@@ -589,11 +592,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                         bx.cx.llcx,
                         bx.cx.type_array(bx.cx.type_i8(), arg.layout.size.bytes()),
                     );
-                    attributes::apply_to_callsite(
-                        callsite,
-                        llvm::AttributePlace::Argument(i),
-                        &[byval],
-                    );
+                    attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[
+                        byval,
+                    ]);
                 }
                 PassMode::Direct(attrs)
                 | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
@@ -625,11 +626,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
             // This will probably get ignored on all targets but those supporting the TrustZone-M
             // extension (thumbv8m targets).
             let cmse_nonsecure_call = llvm::CreateAttrString(bx.cx.llcx, "cmse_nonsecure_call");
-            attributes::apply_to_callsite(
-                callsite,
-                llvm::AttributePlace::Function,
-                &[cmse_nonsecure_call],
-            );
+            attributes::apply_to_callsite(callsite, llvm::AttributePlace::Function, &[
+                cmse_nonsecure_call,
+            ]);
         }
 
         // Some intrinsics require that an elementtype attribute (with the pointee type of a
@@ -659,9 +658,11 @@ impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
 impl From<Conv> for llvm::CallConv {
     fn from(conv: Conv) -> Self {
         match conv {
-            Conv::C | Conv::Rust | Conv::CCmseNonSecureCall | Conv::RiscvInterrupt { .. } => {
-                llvm::CCallConv
-            }
+            Conv::C
+            | Conv::Rust
+            | Conv::CCmseNonSecureCall
+            | Conv::CCmseNonSecureEntry
+            | Conv::RiscvInterrupt { .. } => llvm::CCallConv,
             Conv::Cold => llvm::ColdCallConv,
             Conv::PreserveMost => llvm::PreserveMost,
             Conv::PreserveAll => llvm::PreserveAll,
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
index b4f3784a31a..149ded28356 100644
--- a/compiler/rustc_codegen_llvm/src/allocator.rs
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -1,14 +1,15 @@
 use libc::c_uint;
 use rustc_ast::expand::allocator::{
-    alloc_error_handler_name, default_fn_name, global_fn_name, AllocatorKind, AllocatorTy,
-    ALLOCATOR_METHODS, NO_ALLOC_SHIM_IS_UNSTABLE,
+    ALLOCATOR_METHODS, AllocatorKind, AllocatorTy, NO_ALLOC_SHIM_IS_UNSTABLE,
+    alloc_error_handler_name, default_fn_name, global_fn_name,
 };
 use rustc_middle::bug;
 use rustc_middle::ty::TyCtxt;
 use rustc_session::config::{DebugInfo, OomStrategy};
 
+use crate::common::AsCCharPtr;
 use crate::llvm::{self, Context, False, Module, True, Type};
-use crate::{attributes, debuginfo, ModuleLlvm};
+use crate::{ModuleLlvm, attributes, debuginfo};
 
 pub(crate) unsafe fn codegen(
     tcx: TyCtxt<'_>,
@@ -76,19 +77,15 @@ pub(crate) unsafe fn codegen(
     unsafe {
         // __rust_alloc_error_handler_should_panic
         let name = OomStrategy::SYMBOL;
-        let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8);
-        if tcx.sess.default_hidden_visibility() {
-            llvm::LLVMRustSetVisibility(ll_g, llvm::Visibility::Hidden);
-        }
+        let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_c_char_ptr(), name.len(), i8);
+        llvm::set_visibility(ll_g, llvm::Visibility::from_generic(tcx.sess.default_visibility()));
         let val = tcx.sess.opts.unstable_opts.oom.should_panic();
         let llval = llvm::LLVMConstInt(i8, val as u64, False);
         llvm::LLVMSetInitializer(ll_g, llval);
 
         let name = NO_ALLOC_SHIM_IS_UNSTABLE;
-        let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8);
-        if tcx.sess.default_hidden_visibility() {
-            llvm::LLVMRustSetVisibility(ll_g, llvm::Visibility::Hidden);
-        }
+        let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_c_char_ptr(), name.len(), i8);
+        llvm::set_visibility(ll_g, llvm::Visibility::from_generic(tcx.sess.default_visibility()));
         let llval = llvm::LLVMConstInt(i8, 0, False);
         llvm::LLVMSetInitializer(ll_g, llval);
     }
@@ -119,7 +116,7 @@ fn create_wrapper_function(
         );
         let llfn = llvm::LLVMRustGetOrInsertFunction(
             llmod,
-            from_name.as_ptr().cast(),
+            from_name.as_c_char_ptr(),
             from_name.len(),
             ty,
         );
@@ -132,9 +129,8 @@ fn create_wrapper_function(
             None
         };
 
-        if tcx.sess.default_hidden_visibility() {
-            llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
-        }
+        llvm::set_visibility(llfn, llvm::Visibility::from_generic(tcx.sess.default_visibility()));
+
         if tcx.sess.must_emit_unwind_tables() {
             let uwtable =
                 attributes::uwtable_attr(llcx, tcx.sess.opts.unstable_opts.use_sync_unwind);
@@ -142,12 +138,12 @@ fn create_wrapper_function(
         }
 
         let callee =
-            llvm::LLVMRustGetOrInsertFunction(llmod, to_name.as_ptr().cast(), to_name.len(), ty);
+            llvm::LLVMRustGetOrInsertFunction(llmod, to_name.as_c_char_ptr(), to_name.len(), ty);
         if let Some(no_return) = no_return {
             // -> ! DIFlagNoReturn
             attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]);
         }
-        llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+        llvm::set_visibility(callee, llvm::Visibility::Hidden);
 
         let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, c"entry".as_ptr());
 
@@ -158,7 +154,7 @@ fn create_wrapper_function(
             .enumerate()
             .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
             .collect::<Vec<_>>();
-        let ret = llvm::LLVMRustBuildCall(
+        let ret = llvm::LLVMBuildCallWithOperandBundles(
             llbuilder,
             ty,
             callee,
@@ -166,6 +162,7 @@ fn create_wrapper_function(
             args.len() as c_uint,
             [].as_ptr(),
             0 as c_uint,
+            c"".as_ptr(),
         );
         llvm::LLVMSetTailCall(ret, True);
         if output.is_some() {
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 775266fec97..bb74dfe1487 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -1,21 +1,21 @@
 use std::assert_matches::assert_matches;
 
 use libc::{c_char, c_uint};
+use rustc_abi::{BackendRepr, Float, Integer, Primitive, Scalar};
 use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
 use rustc_codegen_ssa::mir::operand::OperandValue;
 use rustc_codegen_ssa::traits::*;
 use rustc_data_structures::fx::FxHashMap;
-use rustc_middle::ty::layout::TyAndLayout;
 use rustc_middle::ty::Instance;
+use rustc_middle::ty::layout::TyAndLayout;
 use rustc_middle::{bug, span_bug};
-use rustc_span::{sym, Pos, Span, Symbol};
-use rustc_target::abi::*;
+use rustc_span::{Pos, Span, Symbol, sym};
 use rustc_target::asm::*;
 use smallvec::SmallVec;
 use tracing::debug;
 
 use crate::builder::Builder;
-use crate::common::Funclet;
+use crate::common::{AsCCharPtr, Funclet};
 use crate::context::CodegenCx;
 use crate::type_::Type;
 use crate::type_of::LayoutLlvmExt;
@@ -154,7 +154,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     // We prefer the latter because it matches the behavior of
                     // Clang.
                     if late && matches!(reg, InlineAsmRegOrRegClass::Reg(_)) {
-                        constraints.push(reg_to_llvm(reg, Some(&in_value.layout)).to_string());
+                        constraints.push(reg_to_llvm(reg, Some(&in_value.layout)));
                     } else {
                         constraints.push(format!("{}", op_idx[&idx]));
                     }
@@ -268,6 +268,15 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 InlineAsmArch::S390x => {
                     constraints.push("~{cc}".to_string());
                 }
+                InlineAsmArch::Sparc | InlineAsmArch::Sparc64 => {
+                    // In LLVM, ~{icc} represents icc and xcc in 64-bit code.
+                    // https://github.com/llvm/llvm-project/blob/llvmorg-19.1.0/llvm/lib/Target/Sparc/SparcRegisterInfo.td#L64
+                    constraints.push("~{icc}".to_string());
+                    constraints.push("~{fcc0}".to_string());
+                    constraints.push("~{fcc1}".to_string());
+                    constraints.push("~{fcc2}".to_string());
+                    constraints.push("~{fcc3}".to_string());
+                }
                 InlineAsmArch::SpirV => {}
                 InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {}
                 InlineAsmArch::Bpf => {}
@@ -420,7 +429,7 @@ impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
         unsafe {
             llvm::LLVMAppendModuleInlineAsm(
                 self.llmod,
-                template_str.as_ptr().cast(),
+                template_str.as_c_char_ptr(),
                 template_str.len(),
             );
         }
@@ -458,14 +467,14 @@ pub(crate) fn inline_asm_call<'ll>(
     let fty = bx.cx.type_func(&argtys, output);
     unsafe {
         // Ask LLVM to verify that the constraints are well-formed.
-        let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len());
+        let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_c_char_ptr(), cons.len());
         debug!("constraint verification result: {:?}", constraints_ok);
         if constraints_ok {
             let v = llvm::LLVMRustInlineAsm(
                 fty,
-                asm.as_ptr().cast(),
+                asm.as_c_char_ptr(),
                 asm.len(),
-                cons.as_ptr().cast(),
+                cons.as_c_char_ptr(),
                 cons.len(),
                 volatile,
                 alignstack,
@@ -504,10 +513,15 @@ pub(crate) fn inline_asm_call<'ll>(
                 // due to the asm template string coming from a macro. LLVM will
                 // default to the first srcloc for lines that don't have an
                 // associated srcloc.
-                srcloc.push(bx.const_i32(0));
+                srcloc.push(llvm::LLVMValueAsMetadata(bx.const_i32(0)));
             }
-            srcloc.extend(line_spans.iter().map(|span| bx.const_i32(span.lo().to_u32() as i32)));
-            let md = llvm::LLVMMDNodeInContext(bx.llcx, srcloc.as_ptr(), srcloc.len() as u32);
+            srcloc.extend(
+                line_spans
+                    .iter()
+                    .map(|span| llvm::LLVMValueAsMetadata(bx.const_i32(span.lo().to_u32() as i32))),
+            );
+            let md = llvm::LLVMMDNodeInContext2(bx.llcx, srcloc.as_ptr(), srcloc.len());
+            let md = llvm::LLVMMetadataAsValue(&bx.llcx, md);
             llvm::LLVMSetMetadata(call, kind, md);
 
             Some(call)
@@ -520,24 +534,16 @@ pub(crate) fn inline_asm_call<'ll>(
 
 /// If the register is an xmm/ymm/zmm register then return its index.
 fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
+    use X86InlineAsmReg::*;
     match reg {
-        InlineAsmReg::X86(reg)
-            if reg as u32 >= X86InlineAsmReg::xmm0 as u32
-                && reg as u32 <= X86InlineAsmReg::xmm15 as u32 =>
-        {
-            Some(reg as u32 - X86InlineAsmReg::xmm0 as u32)
+        InlineAsmReg::X86(reg) if reg as u32 >= xmm0 as u32 && reg as u32 <= xmm15 as u32 => {
+            Some(reg as u32 - xmm0 as u32)
         }
-        InlineAsmReg::X86(reg)
-            if reg as u32 >= X86InlineAsmReg::ymm0 as u32
-                && reg as u32 <= X86InlineAsmReg::ymm15 as u32 =>
-        {
-            Some(reg as u32 - X86InlineAsmReg::ymm0 as u32)
+        InlineAsmReg::X86(reg) if reg as u32 >= ymm0 as u32 && reg as u32 <= ymm15 as u32 => {
+            Some(reg as u32 - ymm0 as u32)
         }
-        InlineAsmReg::X86(reg)
-            if reg as u32 >= X86InlineAsmReg::zmm0 as u32
-                && reg as u32 <= X86InlineAsmReg::zmm31 as u32 =>
-        {
-            Some(reg as u32 - X86InlineAsmReg::zmm0 as u32)
+        InlineAsmReg::X86(reg) if reg as u32 >= zmm0 as u32 && reg as u32 <= zmm31 as u32 => {
+            Some(reg as u32 - zmm0 as u32)
         }
         _ => None,
     }
@@ -546,37 +552,7 @@ fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
 /// If the register is an AArch64 integer register then return its index.
 fn a64_reg_index(reg: InlineAsmReg) -> Option<u32> {
     match reg {
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x0) => Some(0),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x1) => Some(1),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x2) => Some(2),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x3) => Some(3),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x4) => Some(4),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x5) => Some(5),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x6) => Some(6),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x7) => Some(7),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x8) => Some(8),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x9) => Some(9),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x10) => Some(10),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x11) => Some(11),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x12) => Some(12),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x13) => Some(13),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x14) => Some(14),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x15) => Some(15),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x16) => Some(16),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x17) => Some(17),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x18) => Some(18),
-        // x19 is reserved
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x20) => Some(20),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x21) => Some(21),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x22) => Some(22),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x23) => Some(23),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x24) => Some(24),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x25) => Some(25),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x26) => Some(26),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x27) => Some(27),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x28) => Some(28),
-        // x29 is reserved
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) => Some(30),
+        InlineAsmReg::AArch64(r) => r.reg_index(),
         _ => None,
     }
 }
@@ -584,18 +560,14 @@ fn a64_reg_index(reg: InlineAsmReg) -> Option<u32> {
 /// If the register is an AArch64 vector register then return its index.
 fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
     match reg {
-        InlineAsmReg::AArch64(reg)
-            if reg as u32 >= AArch64InlineAsmReg::v0 as u32
-                && reg as u32 <= AArch64InlineAsmReg::v31 as u32 =>
-        {
-            Some(reg as u32 - AArch64InlineAsmReg::v0 as u32)
-        }
+        InlineAsmReg::AArch64(reg) => reg.vreg_index(),
         _ => None,
     }
 }
 
 /// Converts a register class to an LLVM constraint code.
 fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> String {
+    use InlineAsmRegClass::*;
     match reg {
         // For vector registers LLVM wants the register name to match the type size.
         InlineAsmRegOrRegClass::Reg(reg) => {
@@ -652,75 +624,73 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) ->
         // The constraints can be retrieved from
         // https://llvm.org/docs/LangRef.html#supported-constraint-code-list
         InlineAsmRegOrRegClass::RegClass(reg) => match reg {
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
-                unreachable!("clobber-only")
-            }
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
-            InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
-            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
-            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
-            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
-            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
-            | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
-                unreachable!("clobber-only")
-            }
-            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+            AArch64(AArch64InlineAsmRegClass::reg) => "r",
+            AArch64(AArch64InlineAsmRegClass::vreg) => "w",
+            AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
+            AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"),
+            Arm(ArmInlineAsmRegClass::reg) => "r",
+            Arm(ArmInlineAsmRegClass::sreg)
+            | Arm(ArmInlineAsmRegClass::dreg_low16)
+            | Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
+            Arm(ArmInlineAsmRegClass::sreg_low16)
+            | Arm(ArmInlineAsmRegClass::dreg_low8)
+            | Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
+            Arm(ArmInlineAsmRegClass::dreg) | Arm(ArmInlineAsmRegClass::qreg) => "w",
+            Hexagon(HexagonInlineAsmRegClass::reg) => "r",
+            LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
+            LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
+            Mips(MipsInlineAsmRegClass::reg) => "r",
+            Mips(MipsInlineAsmRegClass::freg) => "f",
+            Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
+            Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
+            Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
+            PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
+            PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
+            PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
+            PowerPC(PowerPCInlineAsmRegClass::cr)
+            | PowerPC(PowerPCInlineAsmRegClass::xer)
+            | PowerPC(PowerPCInlineAsmRegClass::vreg) => {
                 unreachable!("clobber-only")
             }
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
-            | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
-            InlineAsmRegClass::X86(
+            RiscV(RiscVInlineAsmRegClass::reg) => "r",
+            RiscV(RiscVInlineAsmRegClass::freg) => "f",
+            RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"),
+            X86(X86InlineAsmRegClass::reg) => "r",
+            X86(X86InlineAsmRegClass::reg_abcd) => "Q",
+            X86(X86InlineAsmRegClass::reg_byte) => "q",
+            X86(X86InlineAsmRegClass::xmm_reg) | X86(X86InlineAsmRegClass::ymm_reg) => "x",
+            X86(X86InlineAsmRegClass::zmm_reg) => "v",
+            X86(X86InlineAsmRegClass::kreg) => "^Yk",
+            X86(
                 X86InlineAsmRegClass::x87_reg
                 | X86InlineAsmRegClass::mmx_reg
                 | X86InlineAsmRegClass::kreg0
                 | X86InlineAsmRegClass::tmm_reg,
             ) => unreachable!("clobber-only"),
-            InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
-            InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
-            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg_addr) => "a",
-            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a",
-            InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d",
-            InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
-                bug!("LLVM backend does not support SPIR-V")
+            Wasm(WasmInlineAsmRegClass::local) => "r",
+            Bpf(BpfInlineAsmRegClass::reg) => "r",
+            Bpf(BpfInlineAsmRegClass::wreg) => "w",
+            Avr(AvrInlineAsmRegClass::reg) => "r",
+            Avr(AvrInlineAsmRegClass::reg_upper) => "d",
+            Avr(AvrInlineAsmRegClass::reg_pair) => "r",
+            Avr(AvrInlineAsmRegClass::reg_iw) => "w",
+            Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
+            S390x(S390xInlineAsmRegClass::reg) => "r",
+            S390x(S390xInlineAsmRegClass::reg_addr) => "a",
+            S390x(S390xInlineAsmRegClass::freg) => "f",
+            S390x(S390xInlineAsmRegClass::vreg | S390xInlineAsmRegClass::areg) => {
+                unreachable!("clobber-only")
             }
-            InlineAsmRegClass::Err => unreachable!(),
+            Sparc(SparcInlineAsmRegClass::reg) => "r",
+            Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"),
+            Msp430(Msp430InlineAsmRegClass::reg) => "r",
+            M68k(M68kInlineAsmRegClass::reg) => "r",
+            M68k(M68kInlineAsmRegClass::reg_addr) => "a",
+            M68k(M68kInlineAsmRegClass::reg_data) => "d",
+            CSKY(CSKYInlineAsmRegClass::reg) => "r",
+            CSKY(CSKYInlineAsmRegClass::freg) => "f",
+            SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"),
+            Err => unreachable!(),
         }
         .to_string(),
     }
@@ -732,44 +702,41 @@ fn modifier_to_llvm(
     reg: InlineAsmRegClass,
     modifier: Option<char>,
 ) -> Option<char> {
+    use InlineAsmRegClass::*;
     // The modifiers can be retrieved from
     // https://llvm.org/docs/LangRef.html#asm-template-argument-modifiers
     match reg {
-        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
-        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
-        | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
-            if modifier == Some('v') { None } else { modifier }
-        }
-        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
-            unreachable!("clobber-only")
+        AArch64(AArch64InlineAsmRegClass::reg) => modifier,
+        AArch64(AArch64InlineAsmRegClass::vreg) | AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+            if modifier == Some('v') {
+                None
+            } else {
+                modifier
+            }
         }
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None,
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None,
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+        AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"),
+        Arm(ArmInlineAsmRegClass::reg) => None,
+        Arm(ArmInlineAsmRegClass::sreg) | Arm(ArmInlineAsmRegClass::sreg_low16) => None,
+        Arm(ArmInlineAsmRegClass::dreg)
+        | Arm(ArmInlineAsmRegClass::dreg_low16)
+        | Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
+        Arm(ArmInlineAsmRegClass::qreg)
+        | Arm(ArmInlineAsmRegClass::qreg_low8)
+        | Arm(ArmInlineAsmRegClass::qreg_low4) => {
             if modifier.is_none() {
                 Some('q')
             } else {
                 modifier
             }
         }
-        InlineAsmRegClass::Hexagon(_) => None,
-        InlineAsmRegClass::LoongArch(_) => None,
-        InlineAsmRegClass::Mips(_) => None,
-        InlineAsmRegClass::Nvptx(_) => None,
-        InlineAsmRegClass::PowerPC(_) => None,
-        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
-        | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
-        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
-            unreachable!("clobber-only")
-        }
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
-        | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
+        Hexagon(_) => None,
+        LoongArch(_) => None,
+        Mips(_) => None,
+        Nvptx(_) => None,
+        PowerPC(_) => None,
+        RiscV(RiscVInlineAsmRegClass::reg) | RiscV(RiscVInlineAsmRegClass::freg) => None,
+        RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"),
+        X86(X86InlineAsmRegClass::reg) | X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
             None if arch == InlineAsmArch::X86_64 => Some('q'),
             None => Some('k'),
             Some('l') => Some('b'),
@@ -779,10 +746,10 @@ fn modifier_to_llvm(
             Some('r') => Some('q'),
             _ => unreachable!(),
         },
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
-        InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
-        | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
-        | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
+        X86(X86InlineAsmRegClass::reg_byte) => None,
+        X86(reg @ X86InlineAsmRegClass::xmm_reg)
+        | X86(reg @ X86InlineAsmRegClass::ymm_reg)
+        | X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
             (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
             (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
             (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
@@ -791,116 +758,105 @@ fn modifier_to_llvm(
             (_, Some('z')) => Some('g'),
             _ => unreachable!(),
         },
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
-        InlineAsmRegClass::X86(
+        X86(X86InlineAsmRegClass::kreg) => None,
+        X86(
             X86InlineAsmRegClass::x87_reg
             | X86InlineAsmRegClass::mmx_reg
             | X86InlineAsmRegClass::kreg0
             | X86InlineAsmRegClass::tmm_reg,
-        ) => {
-            unreachable!("clobber-only")
-        }
-        InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
-        InlineAsmRegClass::Bpf(_) => None,
-        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair)
-        | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw)
-        | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
+        ) => unreachable!("clobber-only"),
+        Wasm(WasmInlineAsmRegClass::local) => None,
+        Bpf(_) => None,
+        Avr(AvrInlineAsmRegClass::reg_pair)
+        | Avr(AvrInlineAsmRegClass::reg_iw)
+        | Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
             Some('h') => Some('B'),
             Some('l') => Some('A'),
             _ => None,
         },
-        InlineAsmRegClass::Avr(_) => None,
-        InlineAsmRegClass::S390x(_) => None,
-        InlineAsmRegClass::Msp430(_) => None,
-        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
-            bug!("LLVM backend does not support SPIR-V")
-        }
-        InlineAsmRegClass::M68k(_) => None,
-        InlineAsmRegClass::CSKY(_) => None,
-        InlineAsmRegClass::Err => unreachable!(),
+        Avr(_) => None,
+        S390x(_) => None,
+        Sparc(_) => None,
+        Msp430(_) => None,
+        SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"),
+        M68k(_) => None,
+        CSKY(_) => None,
+        Err => unreachable!(),
     }
 }
 
 /// Type to use for outputs that are discarded. It doesn't really matter what
 /// the type is, as long as it is valid for the constraint code.
 fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'ll Type {
+    use InlineAsmRegClass::*;
     match reg {
-        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
-        | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+        AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
+        AArch64(AArch64InlineAsmRegClass::vreg) | AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
             cx.type_vector(cx.type_i64(), 2)
         }
-        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+        AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"),
+        Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
+        Arm(ArmInlineAsmRegClass::sreg) | Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
+        Arm(ArmInlineAsmRegClass::dreg)
+        | Arm(ArmInlineAsmRegClass::dreg_low16)
+        | Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
+        Arm(ArmInlineAsmRegClass::qreg)
+        | Arm(ArmInlineAsmRegClass::qreg_low8)
+        | Arm(ArmInlineAsmRegClass::qreg_low4) => cx.type_vector(cx.type_i64(), 2),
+        Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
+        LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(),
+        LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(),
+        Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
+        Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
+        Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
+        Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
+        Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
+        PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
+        PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
+        PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
+        PowerPC(PowerPCInlineAsmRegClass::cr)
+        | PowerPC(PowerPCInlineAsmRegClass::xer)
+        | PowerPC(PowerPCInlineAsmRegClass::vreg) => {
             unreachable!("clobber-only")
         }
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
-            cx.type_vector(cx.type_i64(), 2)
-        }
-        InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(),
-        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
-        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
-        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
-        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
-        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
-        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
-        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
-        | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
-            unreachable!("clobber-only")
-        }
-        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
-        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
-            unreachable!("clobber-only")
-        }
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
-        | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
-        | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
-        | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
-        InlineAsmRegClass::X86(
+        RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
+        RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
+        RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"),
+        X86(X86InlineAsmRegClass::reg) | X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
+        X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
+        X86(X86InlineAsmRegClass::xmm_reg)
+        | X86(X86InlineAsmRegClass::ymm_reg)
+        | X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
+        X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+        X86(
             X86InlineAsmRegClass::x87_reg
             | X86InlineAsmRegClass::mmx_reg
             | X86InlineAsmRegClass::kreg0
             | X86InlineAsmRegClass::tmm_reg,
-        ) => {
+        ) => unreachable!("clobber-only"),
+        Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
+        Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
+        Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
+        Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
+        Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
+        Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
+        Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
+        Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
+        S390x(S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr) => cx.type_i32(),
+        S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
+        S390x(S390xInlineAsmRegClass::vreg | S390xInlineAsmRegClass::areg) => {
             unreachable!("clobber-only")
         }
-        InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
-        InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
-        InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
-        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
-        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
-        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
-        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
-        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
-        InlineAsmRegClass::S390x(
-            S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr,
-        ) => cx.type_i32(),
-        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
-        InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
-        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
-        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
-        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
-        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
-            bug!("LLVM backend does not support SPIR-V")
-        }
-        InlineAsmRegClass::Err => unreachable!(),
+        Sparc(SparcInlineAsmRegClass::reg) => cx.type_i32(),
+        Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"),
+        Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
+        M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
+        M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
+        M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
+        CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
+        CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
+        SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"),
+        Err => unreachable!(),
     }
 }
 
@@ -940,9 +896,10 @@ fn llvm_fixup_input<'ll, 'tcx>(
     layout: &TyAndLayout<'tcx>,
     instance: Instance<'_>,
 ) -> &'ll Value {
+    use InlineAsmRegClass::*;
     let dl = &bx.tcx.data_layout;
-    match (reg, layout.abi) {
-        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+    match (reg, layout.backend_repr) {
+        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
                 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
@@ -950,7 +907,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 value
             }
         }
-        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
             if s.primitive() != Primitive::Float(Float::F128) =>
         {
             let elem_ty = llvm_asm_scalar_type(bx.cx, s);
@@ -963,43 +920,42 @@ fn llvm_fixup_input<'ll, 'tcx>(
             }
             bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
         }
-        (
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
-            Abi::Vector { element, count },
-        ) if layout.size.bytes() == 8 => {
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
+            if layout.size.bytes() == 8 =>
+        {
             let elem_ty = llvm_asm_scalar_type(bx.cx, element);
             let vec_ty = bx.cx.type_vector(elem_ty, count);
             let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
             bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
         }
-        (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F64) =>
         {
             bx.bitcast(value, bx.cx.type_i64())
         }
         (
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
-            Abi::Vector { .. },
+            X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+            BackendRepr::Vector { .. },
         ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
             && s.primitive() == Primitive::Float(Float::F128) =>
         {
             bx.bitcast(value, bx.type_vector(bx.type_i32(), 4))
         }
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if s.primitive() == Primitive::Float(Float::F16) => {
             let value = bx.insert_element(
                 bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
@@ -1009,18 +965,18 @@ fn llvm_fixup_input<'ll, 'tcx>(
             bx.bitcast(value, bx.type_vector(bx.type_i16(), 8))
         }
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Vector { element, count: count @ (8 | 16) },
+            BackendRepr::Vector { element, count: count @ (8 | 16) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
         }
         (
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
-            Abi::Scalar(s),
+            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_f32())
@@ -1029,12 +985,12 @@ fn llvm_fixup_input<'ll, 'tcx>(
             }
         }
         (
-            InlineAsmRegClass::Arm(
+            Arm(
                 ArmInlineAsmRegClass::dreg
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_f64())
@@ -1043,7 +999,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
             }
         }
         (
-            InlineAsmRegClass::Arm(
+            Arm(
                 ArmInlineAsmRegClass::dreg
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16
@@ -1051,11 +1007,11 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::qreg_low4
                 | ArmInlineAsmRegClass::qreg_low8,
             ),
-            Abi::Vector { element, count: count @ (4 | 8) },
+            BackendRepr::Vector { element, count: count @ (4 | 8) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
         }
-        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
             match s.primitive() {
                 // MIPS only supports register-length arithmetics.
                 Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
@@ -1064,7 +1020,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 _ => value,
             }
         }
-        (InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F16)
                 && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
         {
@@ -1086,15 +1042,16 @@ fn llvm_fixup_output<'ll, 'tcx>(
     layout: &TyAndLayout<'tcx>,
     instance: Instance<'_>,
 ) -> &'ll Value {
-    match (reg, layout.abi) {
-        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+    use InlineAsmRegClass::*;
+    match (reg, layout.backend_repr) {
+        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 bx.extract_element(value, bx.const_i32(0))
             } else {
                 value
             }
         }
-        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
             if s.primitive() != Primitive::Float(Float::F128) =>
         {
             value = bx.extract_element(value, bx.const_i32(0));
@@ -1103,60 +1060,59 @@ fn llvm_fixup_output<'ll, 'tcx>(
             }
             value
         }
-        (
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
-            Abi::Vector { element, count },
-        ) if layout.size.bytes() == 8 => {
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
+            if layout.size.bytes() == 8 =>
+        {
             let elem_ty = llvm_asm_scalar_type(bx.cx, element);
             let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
             let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
             bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
         }
-        (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F64) =>
         {
             bx.bitcast(value, bx.cx.type_f64())
         }
         (
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
-            Abi::Vector { .. },
+            X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+            BackendRepr::Vector { .. },
         ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
             && s.primitive() == Primitive::Float(Float::F128) =>
         {
             bx.bitcast(value, bx.type_f128())
         }
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if s.primitive() == Primitive::Float(Float::F16) => {
             let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
             bx.extract_element(value, bx.const_usize(0))
         }
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Vector { element, count: count @ (8 | 16) },
+            BackendRepr::Vector { element, count: count @ (8 | 16) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
         }
         (
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
-            Abi::Scalar(s),
+            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_i32())
@@ -1165,12 +1121,12 @@ fn llvm_fixup_output<'ll, 'tcx>(
             }
         }
         (
-            InlineAsmRegClass::Arm(
+            Arm(
                 ArmInlineAsmRegClass::dreg
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_i64())
@@ -1179,7 +1135,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
             }
         }
         (
-            InlineAsmRegClass::Arm(
+            Arm(
                 ArmInlineAsmRegClass::dreg
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16
@@ -1187,11 +1143,11 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::qreg_low4
                 | ArmInlineAsmRegClass::qreg_low8,
             ),
-            Abi::Vector { element, count: count @ (4 | 8) },
+            BackendRepr::Vector { element, count: count @ (4 | 8) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
         }
-        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
             match s.primitive() {
                 // MIPS only supports register-length arithmetics.
                 Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
@@ -1201,7 +1157,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 _ => value,
             }
         }
-        (InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F16)
                 && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
         {
@@ -1220,70 +1176,70 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
     layout: &TyAndLayout<'tcx>,
     instance: Instance<'_>,
 ) -> &'ll Type {
-    match (reg, layout.abi) {
-        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+    use InlineAsmRegClass::*;
+    match (reg, layout.backend_repr) {
+        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 cx.type_vector(cx.type_i8(), 8)
             } else {
                 layout.llvm_type(cx)
             }
         }
-        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
             if s.primitive() != Primitive::Float(Float::F128) =>
         {
             let elem_ty = llvm_asm_scalar_type(cx, s);
             let count = 16 / layout.size.bytes();
             cx.type_vector(elem_ty, count)
         }
-        (
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
-            Abi::Vector { element, count },
-        ) if layout.size.bytes() == 8 => {
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
+            if layout.size.bytes() == 8 =>
+        {
             let elem_ty = llvm_asm_scalar_type(cx, element);
             cx.type_vector(elem_ty, count * 2)
         }
-        (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F64) =>
         {
             cx.type_i64()
         }
         (
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
-            Abi::Vector { .. },
+            X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+            BackendRepr::Vector { .. },
         ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
             && s.primitive() == Primitive::Float(Float::F128) =>
         {
             cx.type_vector(cx.type_i32(), 4)
         }
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Vector { element, count: count @ (8 | 16) },
+            BackendRepr::Vector { element, count: count @ (8 | 16) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             cx.type_vector(cx.type_i16(), count)
         }
         (
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
-            Abi::Scalar(s),
+            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 cx.type_f32()
@@ -1292,12 +1248,12 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
             }
         }
         (
-            InlineAsmRegClass::Arm(
+            Arm(
                 ArmInlineAsmRegClass::dreg
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 cx.type_f64()
@@ -1306,7 +1262,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
             }
         }
         (
-            InlineAsmRegClass::Arm(
+            Arm(
                 ArmInlineAsmRegClass::dreg
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16
@@ -1314,11 +1270,11 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::qreg_low4
                 | ArmInlineAsmRegClass::qreg_low8,
             ),
-            Abi::Vector { element, count: count @ (4 | 8) },
+            BackendRepr::Vector { element, count: count @ (4 | 8) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             cx.type_vector(cx.type_i16(), count)
         }
-        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
             match s.primitive() {
                 // MIPS only supports register-length arithmetics.
                 Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
@@ -1327,7 +1283,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 _ => layout.llvm_type(cx),
             }
         }
-        (InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F16)
                 && !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) =>
         {
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 9d4497d73a8..cb958c1d4d7 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -6,12 +6,11 @@ use rustc_hir::def_id::DefId;
 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, PatchableFunctionEntry};
 use rustc_middle::ty::{self, TyCtxt};
 use rustc_session::config::{BranchProtection, FunctionReturn, OptLevel, PAuthKey, PacRet};
-use rustc_span::symbol::sym;
 use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector};
 use smallvec::SmallVec;
 
 use crate::context::CodegenCx;
-use crate::errors::{MissingFeatures, SanitizerMemtagRequiresMte, TargetFeatureDisableOrEnable};
+use crate::errors::SanitizerMemtagRequiresMte;
 use crate::llvm::AttributePlace::Function;
 use crate::llvm::{self, AllocKindFlags, Attribute, AttributeKind, AttributePlace, MemoryEffects};
 use crate::value::Value;
@@ -233,11 +232,6 @@ fn probestack_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
         return None;
     }
 
-    // probestack doesn't play nice either with gcov profiling.
-    if cx.sess().opts.unstable_opts.profile {
-        return None;
-    }
-
     let attr_value = match cx.sess().target.stack_probes {
         StackProbeType::None => return None,
         // Request LLVM to generate the probes inline. If the given LLVM version does not support
@@ -403,8 +397,9 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>(
     if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
         to_add.push(AttributeKind::Naked.create_attr(cx.llcx));
         // HACK(jubilee): "indirect branch tracking" works by attaching prologues to functions.
-        // And it is a module-level attribute, so the alternative is pulling naked functions into new LLVM modules.
-        // Otherwise LLVM's "naked" functions come with endbr prefixes per https://github.com/rust-lang/rust/issues/98768
+        // And it is a module-level attribute, so the alternative is pulling naked functions into
+        // new LLVM modules. Otherwise LLVM's "naked" functions come with endbr prefixes per
+        // https://github.com/rust-lang/rust/issues/98768
         to_add.push(AttributeKind::NoCfCheck.create_attr(cx.llcx));
         if llvm_util::get_version() < (19, 0, 0) {
             // Prior to LLVM 19, branch-target-enforcement was disabled by setting the attribute to
@@ -424,7 +419,10 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>(
                 if bti {
                     to_add.push(llvm::CreateAttrString(cx.llcx, "branch-target-enforcement"));
                 }
-                if let Some(PacRet { leaf, key }) = pac_ret {
+                if let Some(PacRet { leaf, pc, key }) = pac_ret {
+                    if pc {
+                        to_add.push(llvm::CreateAttrString(cx.llcx, "branch-protection-pauth-lr"));
+                    }
                     to_add.push(llvm::CreateAttrStringValue(
                         cx.llcx,
                         "sign-return-address",
@@ -454,7 +452,8 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>(
             flags |= AllocKindFlags::Zeroed;
         }
         to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
-        // apply to return place instead of function (unlike all other attributes applied in this function)
+        // apply to return place instead of function (unlike all other attributes applied in this
+        // function)
         let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
         attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
     }
@@ -481,9 +480,6 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>(
         let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
         attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
     }
-    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY) {
-        to_add.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"));
-    }
     if let Some(align) = codegen_fn_attrs.alignment {
         llvm::set_alignment(llfn, align);
     }
@@ -503,26 +499,6 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>(
     let function_features =
         codegen_fn_attrs.target_features.iter().map(|f| f.name.as_str()).collect::<Vec<&str>>();
 
-    if let Some(f) = llvm_util::check_tied_features(
-        cx.tcx.sess,
-        &function_features.iter().map(|f| (*f, true)).collect(),
-    ) {
-        let span = cx
-            .tcx
-            .get_attrs(instance.def_id(), sym::target_feature)
-            .next()
-            .map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span);
-        cx.tcx
-            .dcx()
-            .create_err(TargetFeatureDisableOrEnable {
-                features: f,
-                span: Some(span),
-                missing_features: Some(MissingFeatures),
-            })
-            .emit();
-        return;
-    }
-
     let function_features = function_features
         .iter()
         // Convert to LLVMFeatures and filter out unavailable ones
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index 4f2c83634a8..33a956e552f 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -1,12 +1,12 @@
 //! A helper class for dealing with static archives
 
-use std::ffi::{c_char, c_void, CStr, CString};
+use std::ffi::{CStr, CString, c_char, c_void};
 use std::path::{Path, PathBuf};
 use std::{io, mem, ptr, str};
 
 use rustc_codegen_ssa::back::archive::{
-    try_extract_macho_fat_archive, ArArchiveBuilder, ArchiveBuildFailure, ArchiveBuilder,
-    ArchiveBuilderBuilder, ObjectReader, UnknownArchiveKind, DEFAULT_OBJECT_READER,
+    ArArchiveBuilder, ArchiveBuildFailure, ArchiveBuilder, ArchiveBuilderBuilder,
+    DEFAULT_OBJECT_READER, ObjectReader, UnknownArchiveKind, try_extract_macho_fat_archive,
 };
 use rustc_session::Session;
 
@@ -123,7 +123,7 @@ fn get_llvm_object_symbols(
         llvm::LLVMRustGetSymbols(
             buf.as_ptr(),
             buf.len(),
-            std::ptr::addr_of_mut!(*state) as *mut c_void,
+            (&raw mut *state) as *mut c_void,
             callback,
             error_callback,
         )
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 2ebe0be53aa..48beb9be2b2 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -11,7 +11,7 @@ use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModul
 use rustc_codegen_ssa::back::symbol_export;
 use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, TargetMachineFactoryConfig};
 use rustc_codegen_ssa::traits::*;
-use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
+use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::memmap::Mmap;
 use rustc_errors::{DiagCtxtHandle, FatalError};
@@ -23,7 +23,7 @@ use rustc_session::config::{self, CrateType, Lto};
 use tracing::{debug, info};
 
 use crate::back::write::{
-    self, bitcode_section_name, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers,
+    self, CodegenDiagnosticsStage, DiagnosticHandlers, bitcode_section_name, save_temp_bitcode,
 };
 use crate::errors::{
     DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro,
@@ -156,22 +156,23 @@ fn get_bitcode_slice_from_object_data<'a>(
     obj: &'a [u8],
     cgcx: &CodegenContext<LlvmCodegenBackend>,
 ) -> Result<&'a [u8], LtoBitcodeFromRlib> {
-    // We're about to assume the data here is an object file with sections, but if it's raw LLVM IR that
-    // won't work. Fortunately, if that's what we have we can just return the object directly, so we sniff
-    // the relevant magic strings here and return.
+    // We're about to assume the data here is an object file with sections, but if it's raw LLVM IR
+    // that won't work. Fortunately, if that's what we have we can just return the object directly,
+    // so we sniff the relevant magic strings here and return.
     if obj.starts_with(b"\xDE\xC0\x17\x0B") || obj.starts_with(b"BC\xC0\xDE") {
         return Ok(obj);
     }
-    // We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment name"
-    // which in the public API for sections gets treated as part of the section name, but internally
-    // in MachOObjectFile.cpp gets treated separately.
-    let section_name = bitcode_section_name(cgcx).trim_start_matches("__LLVM,");
+    // We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment
+    // name" which in the public API for sections gets treated as part of the section name, but
+    // internally in MachOObjectFile.cpp gets treated separately.
+    let section_name = bitcode_section_name(cgcx).to_str().unwrap().trim_start_matches("__LLVM,");
     let mut len = 0;
     let data = unsafe {
         llvm::LLVMRustGetSliceFromObjectDataByName(
             obj.as_ptr(),
             obj.len(),
             section_name.as_ptr(),
+            section_name.len(),
             &mut len,
         )
     };
@@ -502,9 +503,9 @@ fn thin_lto(
         // upstream...
         let data = llvm::LLVMRustCreateThinLTOData(
             thin_modules.as_ptr(),
-            thin_modules.len() as u32,
+            thin_modules.len(),
             symbols_below_threshold.as_ptr(),
-            symbols_below_threshold.len() as u32,
+            symbols_below_threshold.len(),
         )
         .ok_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext))?;
 
@@ -569,7 +570,7 @@ fn thin_lto(
 
             info!(" - {}: re-compiled", module_name);
             opt_jobs.push(LtoModuleCodegen::Thin(ThinModule {
-                shared: shared.clone(),
+                shared: Arc::clone(&shared),
                 idx: module_index,
             }));
         }
@@ -601,23 +602,9 @@ pub(crate) fn run_pass_manager(
     // This code is based off the code found in llvm's LTO code generator:
     //      llvm/lib/LTO/LTOCodeGenerator.cpp
     debug!("running the pass manager");
-    unsafe {
-        if !llvm::LLVMRustHasModuleFlag(
-            module.module_llvm.llmod(),
-            "LTOPostLink".as_ptr().cast(),
-            11,
-        ) {
-            llvm::LLVMRustAddModuleFlagU32(
-                module.module_llvm.llmod(),
-                llvm::LLVMModFlagBehavior::Error,
-                c"LTOPostLink".as_ptr(),
-                1,
-            );
-        }
-        let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
-        let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
-        write::llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage)?;
-    }
+    let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
+    let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
+    unsafe { write::llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage) }?;
     debug!("lto done");
     Ok(())
 }
@@ -808,8 +795,7 @@ struct ThinLTOKeysMap {
 impl ThinLTOKeysMap {
     fn save_to_file(&self, path: &Path) -> io::Result<()> {
         use std::io::Write;
-        let file = File::create(path)?;
-        let mut writer = io::BufWriter::new(file);
+        let mut writer = File::create_buffered(path)?;
         // The entries are loaded back into a hash map in `load_from_file()`, so
         // the order in which we write them to file here does not matter.
         for (module, key) in &self.keys {
@@ -821,8 +807,8 @@ impl ThinLTOKeysMap {
     fn load_from_file(path: &Path) -> io::Result<Self> {
         use std::io::BufRead;
         let mut keys = BTreeMap::default();
-        let file = File::open(path)?;
-        for line in io::BufReader::new(file).lines() {
+        let file = File::open_buffered(path)?;
+        for line in file.lines() {
             let line = line?;
             let mut split = line.split(' ');
             let module = split.next().unwrap();
@@ -844,7 +830,7 @@ impl ThinLTOKeysMap {
                     llvm::LLVMRustComputeLTOCacheKey(rust_str, module.identifier, data.0);
                 })
                 .expect("Invalid ThinLTO module key");
-                (name.clone().into_string().unwrap(), key)
+                (module_name_to_str(name).to_string(), key)
             })
             .collect();
         Self { keys }
diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
index 681ac75c877..44c30d22a9e 100644
--- a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
+++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
@@ -1,4 +1,4 @@
-use std::ffi::{c_char, CStr};
+use std::ffi::{CStr, c_char};
 use std::marker::PhantomData;
 use std::ops::Deref;
 use std::ptr::NonNull;
@@ -30,7 +30,7 @@ impl OwnedTargetMachine {
         data_sections: bool,
         unique_section_names: bool,
         trap_unreachable: bool,
-        singletree: bool,
+        singlethread: bool,
         verbose_asm: bool,
         emit_stack_size_section: bool,
         relax_elf_relocations: bool,
@@ -62,7 +62,7 @@ impl OwnedTargetMachine {
                 data_sections,
                 unique_section_names,
                 trap_unreachable,
-                singletree,
+                singlethread,
                 verbose_asm,
                 emit_stack_size_section,
                 relax_elf_relocations,
@@ -86,15 +86,17 @@ impl Deref for OwnedTargetMachine {
     type Target = llvm::TargetMachine;
 
     fn deref(&self) -> &Self::Target {
-        // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
+        // SAFETY: constructing ensures we have a valid pointer created by
+        // llvm::LLVMRustCreateTargetMachine.
         unsafe { self.tm_unique.as_ref() }
     }
 }
 
 impl Drop for OwnedTargetMachine {
     fn drop(&mut self) {
-        // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
-        // OwnedTargetMachine is not copyable so there is no double free or use after free
+        // SAFETY: constructing ensures we have a valid pointer created by
+        // llvm::LLVMRustCreateTargetMachine OwnedTargetMachine is not copyable so there is no
+        // double free or use after free.
         unsafe {
             llvm::LLVMRustDisposeTargetMachine(self.tm_unique.as_mut());
         }
diff --git a/compiler/rustc_codegen_llvm/src/back/profiling.rs b/compiler/rustc_codegen_llvm/src/back/profiling.rs
index 79794775b7b..73ae0072c42 100644
--- a/compiler/rustc_codegen_llvm/src/back/profiling.rs
+++ b/compiler/rustc_codegen_llvm/src/back/profiling.rs
@@ -1,4 +1,4 @@
-use std::ffi::{c_void, CStr};
+use std::ffi::{CStr, c_void};
 use std::os::raw::c_char;
 use std::sync::Arc;
 
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index b1b692cc027..a65ae4df1e3 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -1,4 +1,4 @@
-use std::ffi::CString;
+use std::ffi::{CStr, CString};
 use std::io::{self, Write};
 use std::path::{Path, PathBuf};
 use std::sync::Arc;
@@ -9,6 +9,7 @@ use llvm::{
     LLVMRustLLVMHasZlibCompressionForDebugSymbols, LLVMRustLLVMHasZstdCompressionForDebugSymbols,
 };
 use rustc_codegen_ssa::back::link::ensure_removed;
+use rustc_codegen_ssa::back::versioned_llvm_target;
 use rustc_codegen_ssa::back::write::{
     BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
     TargetMachineFactoryFn,
@@ -20,28 +21,29 @@ use rustc_data_structures::small_c_str::SmallCStr;
 use rustc_errors::{DiagCtxtHandle, FatalError, Level};
 use rustc_fs_util::{link_or_copy, path_to_c_string};
 use rustc_middle::ty::TyCtxt;
+use rustc_session::Session;
 use rustc_session::config::{
     self, Lto, OutputType, Passes, RemapPathScopeComponents, SplitDwarfKind, SwitchWithOptPath,
 };
-use rustc_session::Session;
-use rustc_span::symbol::sym;
 use rustc_span::InnerSpan;
+use rustc_span::symbol::sym;
 use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo, TlsModel};
 use tracing::debug;
 
 use crate::back::lto::ThinBuffer;
 use crate::back::owned_target_machine::OwnedTargetMachine;
 use crate::back::profiling::{
-    selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler,
+    LlvmSelfProfiler, selfprofile_after_pass_callback, selfprofile_before_pass_callback,
 };
+use crate::common::AsCCharPtr;
 use crate::errors::{
     CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression,
     WithLlvmError, WriteBytecode,
 };
-use crate::llvm::diagnostic::OptimizationDiagnosticKind;
+use crate::llvm::diagnostic::OptimizationDiagnosticKind::*;
 use crate::llvm::{self, DiagnosticInfo, PassManager};
 use crate::type_::Type;
-use crate::{base, common, llvm_util, LlvmCodegenBackend, ModuleLlvm};
+use crate::{LlvmCodegenBackend, ModuleLlvm, base, common, llvm_util};
 
 pub(crate) fn llvm_err<'a>(dcx: DiagCtxtHandle<'_>, err: LlvmError<'a>) -> FatalError {
     match llvm::last_error() {
@@ -157,7 +159,8 @@ fn to_pass_builder_opt_level(cfg: config::OptLevel) -> llvm::PassBuilderOptLevel
 fn to_llvm_relocation_model(relocation_model: RelocModel) -> llvm::RelocModel {
     match relocation_model {
         RelocModel::Static => llvm::RelocModel::Static,
-        // LLVM doesn't have a PIE relocation model, it represents PIE as PIC with an extra attribute.
+        // LLVM doesn't have a PIE relocation model, it represents PIE as PIC with an extra
+        // attribute.
         RelocModel::Pic | RelocModel::Pie => llvm::RelocModel::PIC,
         RelocModel::DynamicNoPic => llvm::RelocModel::DynamicNoPic,
         RelocModel::Ropi => llvm::RelocModel::ROPI,
@@ -188,8 +191,8 @@ pub(crate) fn target_machine_factory(
     let use_softfp = if sess.target.arch == "arm" && sess.target.abi == "eabihf" {
         sess.opts.cg.soft_float
     } else {
-        // `validate_commandline_args_with_session_available` has already warned about this being ignored.
-        // Let's make sure LLVM doesn't suddenly start using this flag on more targets.
+        // `validate_commandline_args_with_session_available` has already warned about this being
+        // ignored. Let's make sure LLVM doesn't suddenly start using this flag on more targets.
         false
     };
 
@@ -209,7 +212,7 @@ pub(crate) fn target_machine_factory(
         singlethread = false;
     }
 
-    let triple = SmallCStr::new(&sess.target.llvm_target);
+    let triple = SmallCStr::new(&versioned_llvm_target(sess));
     let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
     let features = CString::new(target_features.join(",")).unwrap();
     let abi = SmallCStr::new(&sess.target.llvm_abiname);
@@ -446,13 +449,12 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void
                 column: opt.column,
                 pass_name: &opt.pass_name,
                 kind: match opt.kind {
-                    OptimizationDiagnosticKind::OptimizationRemark => "success",
-                    OptimizationDiagnosticKind::OptimizationMissed
-                    | OptimizationDiagnosticKind::OptimizationFailure => "missed",
-                    OptimizationDiagnosticKind::OptimizationAnalysis
-                    | OptimizationDiagnosticKind::OptimizationAnalysisFPCommute
-                    | OptimizationDiagnosticKind::OptimizationAnalysisAliasing => "analysis",
-                    OptimizationDiagnosticKind::OptimizationRemarkOther => "other",
+                    OptimizationRemark => "success",
+                    OptimizationMissed | OptimizationFailure => "missed",
+                    OptimizationAnalysis
+                    | OptimizationAnalysisFPCommute
+                    | OptimizationAnalysisAliasing => "analysis",
+                    OptimizationRemarkOther => "other",
                 },
                 message: &opt.message,
             });
@@ -590,15 +592,14 @@ pub(crate) unsafe fn llvm_optimize(
             pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
             config.instrument_coverage,
             instr_profile_output_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
-            config.instrument_gcov,
             pgo_sample_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
             config.debug_info_for_profiling,
             llvm_selfprofiler,
             selfprofile_before_pass_callback,
             selfprofile_after_pass_callback,
-            extra_passes.as_ptr().cast(),
+            extra_passes.as_c_char_ptr(),
             extra_passes.len(),
-            llvm_plugins.as_ptr().cast(),
+            llvm_plugins.as_c_char_ptr(),
             llvm_plugins.len(),
         )
     };
@@ -944,26 +945,13 @@ fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data:
     asm
 }
 
-fn target_is_apple(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
-    cgcx.opts.target_triple.triple().contains("-ios")
-        || cgcx.opts.target_triple.triple().contains("-darwin")
-        || cgcx.opts.target_triple.triple().contains("-tvos")
-        || cgcx.opts.target_triple.triple().contains("-watchos")
-        || cgcx.opts.target_triple.triple().contains("-visionos")
-}
-
-fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
-    cgcx.opts.target_triple.triple().contains("-aix")
-}
-
-//FIXME use c string literals here too
-pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> &'static str {
-    if target_is_apple(cgcx) {
-        "__LLVM,__bitcode\0"
-    } else if target_is_aix(cgcx) {
-        ".ipa\0"
+pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> &'static CStr {
+    if cgcx.target_is_like_osx {
+        c"__LLVM,__bitcode"
+    } else if cgcx.target_is_like_aix {
+        c".ipa"
     } else {
-        ".llvmbc\0"
+        c".llvmbc"
     }
 }
 
@@ -1027,10 +1015,12 @@ unsafe fn embed_bitcode(
     // Unfortunately, LLVM provides no way to set custom section flags. For ELF
     // and COFF we emit the sections using module level inline assembly for that
     // reason (see issue #90326 for historical background).
-    let is_aix = target_is_aix(cgcx);
-    let is_apple = target_is_apple(cgcx);
     unsafe {
-        if is_apple || is_aix || cgcx.opts.target_triple.triple().starts_with("wasm") {
+        if cgcx.target_is_like_osx
+            || cgcx.target_is_like_aix
+            || cgcx.target_arch == "wasm32"
+            || cgcx.target_arch == "wasm64"
+        {
             // We don't need custom section flags, create LLVM globals.
             let llconst = common::bytes_in_context(llcx, bitcode);
             let llglobal = llvm::LLVMAddGlobal(
@@ -1040,9 +1030,8 @@ unsafe fn embed_bitcode(
             );
             llvm::LLVMSetInitializer(llglobal, llconst);
 
-            let section = bitcode_section_name(cgcx);
-            llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
-            llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+            llvm::set_section(llglobal, bitcode_section_name(cgcx));
+            llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
             llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
 
             let llconst = common::bytes_in_context(llcx, cmdline.as_bytes());
@@ -1052,22 +1041,22 @@ unsafe fn embed_bitcode(
                 c"rustc.embedded.cmdline".as_ptr(),
             );
             llvm::LLVMSetInitializer(llglobal, llconst);
-            let section = if is_apple {
+            let section = if cgcx.target_is_like_osx {
                 c"__LLVM,__cmdline"
-            } else if is_aix {
+            } else if cgcx.target_is_like_aix {
                 c".info"
             } else {
                 c".llvmcmd"
             };
-            llvm::LLVMSetSection(llglobal, section.as_ptr());
-            llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+            llvm::set_section(llglobal, section);
+            llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
         } else {
             // We need custom section flags, so emit module-level inline assembly.
             let section_flags = if cgcx.is_pe_coff { "n" } else { "e" };
             let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode);
-            llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+            llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_c_char_ptr(), asm.len());
             let asm = create_section_with_flags_asm(".llvmcmd", section_flags, cmdline.as_bytes());
-            llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+            llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_c_char_ptr(), asm.len());
         }
     }
 }
@@ -1095,7 +1084,7 @@ fn create_msvc_imps(
         let ptr_ty = Type::ptr_llcx(llcx);
         let globals = base::iter_globals(llmod)
             .filter(|&val| {
-                llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
+                llvm::get_linkage(val) == llvm::Linkage::ExternalLinkage
                     && llvm::LLVMIsDeclaration(val) == 0
             })
             .filter_map(|val| {
@@ -1114,7 +1103,7 @@ fn create_msvc_imps(
         for (imp_name, val) in globals {
             let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr());
             llvm::LLVMSetInitializer(imp, val);
-            llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
+            llvm::set_linkage(imp, llvm::Linkage::ExternalLinkage);
         }
     }
 
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
index 0ba8d82406a..32793894794 100644
--- a/compiler/rustc_codegen_llvm/src/base.rs
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -145,10 +145,8 @@ pub(crate) fn compile_codegen_unit(
 
 pub(crate) fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
     let Some(sect) = attrs.link_section else { return };
-    unsafe {
-        let buf = SmallCStr::new(sect.as_str());
-        llvm::LLVMSetSection(llval, buf.as_ptr());
-    }
+    let buf = SmallCStr::new(sect.as_str());
+    llvm::set_section(llval, &buf);
 }
 
 pub(crate) fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index c806d0c5499..751b2235dc8 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -3,11 +3,13 @@ use std::ops::Deref;
 use std::{iter, ptr};
 
 use libc::{c_char, c_uint};
+use rustc_abi as abi;
+use rustc_abi::{Align, Size, WrappingRange};
+use rustc_codegen_ssa::MemFlags;
 use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, SynchronizationScope, TypeKind};
 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
 use rustc_codegen_ssa::mir::place::PlaceRef;
 use rustc_codegen_ssa::traits::*;
-use rustc_codegen_ssa::MemFlags;
 use rustc_data_structures::small_c_str::SmallCStr;
 use rustc_hir::def_id::DefId;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
@@ -19,8 +21,7 @@ use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
 use rustc_sanitizers::{cfi, kcfi};
 use rustc_session::config::OptLevel;
 use rustc_span::Span;
-use rustc_target::abi::call::FnAbi;
-use rustc_target::abi::{self, Align, Size, WrappingRange};
+use rustc_target::callconv::FnAbi;
 use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target};
 use smallvec::SmallVec;
 use tracing::{debug, instrument};
@@ -56,6 +57,7 @@ const UNNAMED: *const c_char = c"".as_ptr();
 
 impl<'ll, 'tcx> BackendTypes for Builder<'_, 'll, 'tcx> {
     type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
+    type Metadata = <CodegenCx<'ll, 'tcx> as BackendTypes>::Metadata;
     type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function;
     type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
     type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
@@ -120,7 +122,7 @@ impl<'ll, 'tcx> Deref for Builder<'_, 'll, 'tcx> {
     }
 }
 
-macro_rules! builder_methods_for_value_instructions {
+macro_rules! math_builder_methods {
     ($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
         $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
             unsafe {
@@ -130,6 +132,18 @@ macro_rules! builder_methods_for_value_instructions {
     }
 }
 
+macro_rules! set_math_builder_methods {
+    ($($name:ident($($arg:ident),*) => ($llvm_capi:ident, $llvm_set_math:ident)),+ $(,)?) => {
+        $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
+            unsafe {
+                let instr = llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED);
+                llvm::$llvm_set_math(instr);
+                instr
+            }
+        })+
+    }
+}
+
 impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
     type CodegenCx = CodegenCx<'ll, 'tcx>;
 
@@ -225,7 +239,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 
         let args = self.check_call("invoke", llty, llfn, args);
         let funclet_bundle = funclet.map(|funclet| funclet.bundle());
-        let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
         let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
         if let Some(funclet_bundle) = funclet_bundle {
             bundles.push(funclet_bundle);
@@ -236,13 +249,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 
         // Emit KCFI operand bundle
         let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn);
-        let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
-        if let Some(kcfi_bundle) = kcfi_bundle {
+        if let Some(kcfi_bundle) = kcfi_bundle.as_deref() {
             bundles.push(kcfi_bundle);
         }
 
         let invoke = unsafe {
-            llvm::LLVMRustBuildInvoke(
+            llvm::LLVMBuildInvokeWithOperandBundles(
                 self.llbuilder,
                 llty,
                 llfn,
@@ -267,7 +279,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         }
     }
 
-    builder_methods_for_value_instructions! {
+    math_builder_methods! {
         add(a, b) => LLVMBuildAdd,
         fadd(a, b) => LLVMBuildFAdd,
         sub(a, b) => LLVMBuildSub,
@@ -299,84 +311,17 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         unchecked_umul(x, y) => LLVMBuildNUWMul,
     }
 
-    fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
-        unsafe {
-            let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetFastMath(instr);
-            instr
-        }
-    }
-
-    fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
-        unsafe {
-            let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetFastMath(instr);
-            instr
-        }
-    }
-
-    fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
-        unsafe {
-            let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetFastMath(instr);
-            instr
-        }
-    }
-
-    fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
-        unsafe {
-            let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetFastMath(instr);
-            instr
-        }
-    }
-
-    fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
-        unsafe {
-            let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetFastMath(instr);
-            instr
-        }
-    }
-
-    fn fadd_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
-        unsafe {
-            let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetAlgebraicMath(instr);
-            instr
-        }
-    }
-
-    fn fsub_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
-        unsafe {
-            let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetAlgebraicMath(instr);
-            instr
-        }
-    }
-
-    fn fmul_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
-        unsafe {
-            let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetAlgebraicMath(instr);
-            instr
-        }
-    }
-
-    fn fdiv_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
-        unsafe {
-            let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetAlgebraicMath(instr);
-            instr
-        }
-    }
-
-    fn frem_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
-        unsafe {
-            let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
-            llvm::LLVMRustSetAlgebraicMath(instr);
-            instr
-        }
+    set_math_builder_methods! {
+        fadd_fast(x, y) => (LLVMBuildFAdd, LLVMRustSetFastMath),
+        fsub_fast(x, y) => (LLVMBuildFSub, LLVMRustSetFastMath),
+        fmul_fast(x, y) => (LLVMBuildFMul, LLVMRustSetFastMath),
+        fdiv_fast(x, y) => (LLVMBuildFDiv, LLVMRustSetFastMath),
+        frem_fast(x, y) => (LLVMBuildFRem, LLVMRustSetFastMath),
+        fadd_algebraic(x, y) => (LLVMBuildFAdd, LLVMRustSetAlgebraicMath),
+        fsub_algebraic(x, y) => (LLVMBuildFSub, LLVMRustSetAlgebraicMath),
+        fmul_algebraic(x, y) => (LLVMBuildFMul, LLVMRustSetAlgebraicMath),
+        fdiv_algebraic(x, y) => (LLVMBuildFDiv, LLVMRustSetAlgebraicMath),
+        frem_algebraic(x, y) => (LLVMBuildFRem, LLVMRustSetAlgebraicMath),
     }
 
     fn checked_binop(
@@ -459,6 +404,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
             val
         }
     }
+
     fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
         if scalar.is_bool() {
             return self.trunc(val, self.cx().type_i1());
@@ -558,12 +504,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
             }
 
             match scalar.primitive() {
-                abi::Int(..) => {
+                abi::Primitive::Int(..) => {
                     if !scalar.is_always_valid(bx) {
                         bx.range_metadata(load, scalar.valid_range(bx));
                     }
                 }
-                abi::Pointer(_) => {
+                abi::Primitive::Pointer(_) => {
                     if !scalar.valid_range(bx).contains(0) {
                         bx.nonnull_metadata(load);
                     }
@@ -574,7 +520,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                         }
                     }
                 }
-                abi::Float(_) => {}
+                abi::Primitive::Float(_) => {}
             }
         }
 
@@ -597,13 +543,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
             }
             let llval = const_llval.unwrap_or_else(|| {
                 let load = self.load(llty, place.val.llval, place.val.align);
-                if let abi::Abi::Scalar(scalar) = place.layout.abi {
+                if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr {
                     scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
                 }
                 load
             });
             OperandValue::Immediate(self.to_immediate(llval, place.layout))
-        } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
+        } else if let abi::BackendRepr::ScalarPair(a, b) = place.layout.backend_repr {
             let b_offset = a.size(self).align_to(b.align(self).abi);
 
             let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
@@ -677,26 +623,19 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 
         unsafe {
             let llty = self.cx.val_ty(load);
-            let v = [
-                self.cx.const_uint_big(llty, range.start),
-                self.cx.const_uint_big(llty, range.end.wrapping_add(1)),
+            let md = [
+                llvm::LLVMValueAsMetadata(self.cx.const_uint_big(llty, range.start)),
+                llvm::LLVMValueAsMetadata(self.cx.const_uint_big(llty, range.end.wrapping_add(1))),
             ];
-
-            llvm::LLVMSetMetadata(
-                load,
-                llvm::MD_range as c_uint,
-                llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
-            );
+            let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, md.as_ptr(), md.len());
+            self.set_metadata(load, llvm::MD_range, md);
         }
     }
 
     fn nonnull_metadata(&mut self, load: &'ll Value) {
         unsafe {
-            llvm::LLVMSetMetadata(
-                load,
-                llvm::MD_nonnull as c_uint,
-                llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
-            );
+            let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, ptr::null(), 0);
+            self.set_metadata(load, llvm::MD_nonnull, md);
         }
     }
 
@@ -727,11 +666,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                 // for performance. LLVM doesn't seem to care about this, and will happily treat
                 // `!nontemporal` stores as-if they were normal stores (for reordering optimizations
                 // etc) even on x86, despite later lowering them to MOVNT which do *not* behave like
-                // regular stores but require special fences.
-                // So we keep a list of architectures where `!nontemporal` is known to be truly just
-                // a hint, and use regular stores everywhere else.
-                // (In the future, we could alternatively ensure that an sfence gets emitted after a sequence of movnt
-                // before any kind of synchronizing operation. But it's not clear how to do that with LLVM.)
+                // regular stores but require special fences. So we keep a list of architectures
+                // where `!nontemporal` is known to be truly just a hint, and use regular stores
+                // everywhere else. (In the future, we could alternatively ensure that an sfence
+                // gets emitted after a sequence of movnt before any kind of synchronizing
+                // operation. But it's not clear how to do that with LLVM.)
                 // For more context, see <https://github.com/rust-lang/rust/issues/114582> and
                 // <https://github.com/llvm/llvm-project/issues/64521>.
                 const WELL_BEHAVED_NONTEMPORAL_ARCHS: &[&str] =
@@ -744,9 +683,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                     // *always* point to a metadata value of the integer 1.
                     //
                     // [1]: https://llvm.org/docs/LangRef.html#store-instruction
-                    let one = self.cx.const_i32(1);
-                    let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
-                    llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
+                    let one = llvm::LLVMValueAsMetadata(self.cx.const_i32(1));
+                    let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, &one, 1);
+                    self.set_metadata(store, llvm::MD_nontemporal, md);
                 }
             }
             store
@@ -1160,6 +1099,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
             (val, success)
         }
     }
+
     fn atomic_rmw(
         &mut self,
         op: rustc_codegen_ssa::common::AtomicRmwBinOp,
@@ -1210,11 +1150,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 
     fn set_invariant_load(&mut self, load: &'ll Value) {
         unsafe {
-            llvm::LLVMSetMetadata(
-                load,
-                llvm::MD_invariant_load as c_uint,
-                llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
-            );
+            let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, ptr::null(), 0);
+            self.set_metadata(load, llvm::MD_invariant_load, md);
         }
     }
 
@@ -1226,39 +1163,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
     }
 
-    fn instrprof_increment(
-        &mut self,
-        fn_name: &'ll Value,
-        hash: &'ll Value,
-        num_counters: &'ll Value,
-        index: &'ll Value,
-    ) {
-        debug!(
-            "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
-            fn_name, hash, num_counters, index
-        );
-
-        let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
-        let llty = self.cx.type_func(
-            &[self.cx.type_ptr(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
-            self.cx.type_void(),
-        );
-        let args = &[fn_name, hash, num_counters, index];
-        let args = self.check_call("call", llty, llfn, args);
-
-        unsafe {
-            let _ = llvm::LLVMRustBuildCall(
-                self.llbuilder,
-                llty,
-                llfn,
-                args.as_ptr() as *const &llvm::Value,
-                args.len() as c_uint,
-                [].as_ptr(),
-                0 as c_uint,
-            );
-        }
-    }
-
     fn call(
         &mut self,
         llty: &'ll Type,
@@ -1273,7 +1177,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 
         let args = self.check_call("call", llty, llfn, args);
         let funclet_bundle = funclet.map(|funclet| funclet.bundle());
-        let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
         let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
         if let Some(funclet_bundle) = funclet_bundle {
             bundles.push(funclet_bundle);
@@ -1284,13 +1187,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 
         // Emit KCFI operand bundle
         let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn);
-        let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
-        if let Some(kcfi_bundle) = kcfi_bundle {
+        if let Some(kcfi_bundle) = kcfi_bundle.as_deref() {
             bundles.push(kcfi_bundle);
         }
 
         let call = unsafe {
-            llvm::LLVMRustBuildCall(
+            llvm::LLVMBuildCallWithOperandBundles(
                 self.llbuilder,
                 llty,
                 llfn,
@@ -1298,6 +1200,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                 args.len() as c_uint,
                 bundles.as_ptr(),
                 bundles.len() as c_uint,
+                c"".as_ptr(),
             )
         };
         if let Some(fn_abi) = fn_abi {
@@ -1343,33 +1246,23 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
 
     fn align_metadata(&mut self, load: &'ll Value, align: Align) {
         unsafe {
-            let v = [self.cx.const_u64(align.bytes())];
-
-            llvm::LLVMSetMetadata(
-                load,
-                llvm::MD_align as c_uint,
-                llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
-            );
+            let md = [llvm::LLVMValueAsMetadata(self.cx.const_u64(align.bytes()))];
+            let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, md.as_ptr(), md.len());
+            self.set_metadata(load, llvm::MD_align, md);
         }
     }
 
     fn noundef_metadata(&mut self, load: &'ll Value) {
         unsafe {
-            llvm::LLVMSetMetadata(
-                load,
-                llvm::MD_noundef as c_uint,
-                llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
-            );
+            let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, ptr::null(), 0);
+            self.set_metadata(load, llvm::MD_noundef, md);
         }
     }
 
     pub(crate) fn set_unpredictable(&mut self, inst: &'ll Value) {
         unsafe {
-            llvm::LLVMSetMetadata(
-                inst,
-                llvm::MD_unpredictable as c_uint,
-                llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
-            );
+            let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, ptr::null(), 0);
+            self.set_metadata(inst, llvm::MD_unpredictable, md);
         }
     }
 
@@ -1613,7 +1506,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
 
         let args = self.check_call("callbr", llty, llfn, args);
         let funclet_bundle = funclet.map(|funclet| funclet.bundle());
-        let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
         let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
         if let Some(funclet_bundle) = funclet_bundle {
             bundles.push(funclet_bundle);
@@ -1624,13 +1516,12 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
 
         // Emit KCFI operand bundle
         let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn);
-        let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
-        if let Some(kcfi_bundle) = kcfi_bundle {
+        if let Some(kcfi_bundle) = kcfi_bundle.as_deref() {
             bundles.push(kcfi_bundle);
         }
 
         let callbr = unsafe {
-            llvm::LLVMRustBuildCallBr(
+            llvm::LLVMBuildCallBr(
                 self.llbuilder,
                 llty,
                 llfn,
@@ -1705,7 +1596,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
         fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
         instance: Option<Instance<'tcx>>,
         llfn: &'ll Value,
-    ) -> Option<llvm::OperandBundleDef<'ll>> {
+    ) -> Option<llvm::OperandBundleOwned<'ll>> {
         let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
         let kcfi_bundle = if self.tcx.sess.is_sanitizer_kcfi_enabled()
             && let Some(fn_abi) = fn_abi
@@ -1731,13 +1622,25 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
                 kcfi::typeid_for_fnabi(self.tcx, fn_abi, options)
             };
 
-            Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
+            Some(llvm::OperandBundleOwned::new("kcfi", &[self.const_u32(kcfi_typeid)]))
         } else {
             None
         };
         kcfi_bundle
     }
 
+    /// Emits a call to `llvm.instrprof.increment`. Used by coverage instrumentation.
+    #[instrument(level = "debug", skip(self))]
+    pub(crate) fn instrprof_increment(
+        &mut self,
+        fn_name: &'ll Value,
+        hash: &'ll Value,
+        num_counters: &'ll Value,
+        index: &'ll Value,
+    ) {
+        self.call_intrinsic("llvm.instrprof.increment", &[fn_name, hash, num_counters, index]);
+    }
+
     /// Emits a call to `llvm.instrprof.mcdc.parameters`.
     ///
     /// This doesn't produce any code directly, but is used as input by
@@ -1747,111 +1650,50 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
     ///
     /// [`CodeGenPGO::emitMCDCParameters`]:
     ///     https://github.com/rust-lang/llvm-project/blob/5399a24/clang/lib/CodeGen/CodeGenPGO.cpp#L1124
+    #[instrument(level = "debug", skip(self))]
     pub(crate) fn mcdc_parameters(
         &mut self,
         fn_name: &'ll Value,
         hash: &'ll Value,
-        bitmap_bytes: &'ll Value,
+        bitmap_bits: &'ll Value,
     ) {
-        debug!("mcdc_parameters() with args ({:?}, {:?}, {:?})", fn_name, hash, bitmap_bytes);
-
-        let llfn = unsafe { llvm::LLVMRustGetInstrProfMCDCParametersIntrinsic(self.cx().llmod) };
-        let llty = self.cx.type_func(
-            &[self.cx.type_ptr(), self.cx.type_i64(), self.cx.type_i32()],
-            self.cx.type_void(),
+        assert!(
+            crate::llvm_util::get_version() >= (19, 0, 0),
+            "MCDC intrinsics require LLVM 19 or later"
         );
-        let args = &[fn_name, hash, bitmap_bytes];
-        let args = self.check_call("call", llty, llfn, args);
-
-        unsafe {
-            let _ = llvm::LLVMRustBuildCall(
-                self.llbuilder,
-                llty,
-                llfn,
-                args.as_ptr() as *const &llvm::Value,
-                args.len() as c_uint,
-                [].as_ptr(),
-                0 as c_uint,
-            );
-        }
+        self.call_intrinsic("llvm.instrprof.mcdc.parameters", &[fn_name, hash, bitmap_bits]);
     }
 
+    #[instrument(level = "debug", skip(self))]
     pub(crate) fn mcdc_tvbitmap_update(
         &mut self,
         fn_name: &'ll Value,
         hash: &'ll Value,
-        bitmap_bytes: &'ll Value,
         bitmap_index: &'ll Value,
         mcdc_temp: &'ll Value,
     ) {
-        debug!(
-            "mcdc_tvbitmap_update() with args ({:?}, {:?}, {:?}, {:?}, {:?})",
-            fn_name, hash, bitmap_bytes, bitmap_index, mcdc_temp
+        assert!(
+            crate::llvm_util::get_version() >= (19, 0, 0),
+            "MCDC intrinsics require LLVM 19 or later"
         );
+        let args = &[fn_name, hash, bitmap_index, mcdc_temp];
+        self.call_intrinsic("llvm.instrprof.mcdc.tvbitmap.update", args);
+    }
 
-        let llfn =
-            unsafe { llvm::LLVMRustGetInstrProfMCDCTVBitmapUpdateIntrinsic(self.cx().llmod) };
-        let llty = self.cx.type_func(
-            &[
-                self.cx.type_ptr(),
-                self.cx.type_i64(),
-                self.cx.type_i32(),
-                self.cx.type_i32(),
-                self.cx.type_ptr(),
-            ],
-            self.cx.type_void(),
-        );
-        let args = &[fn_name, hash, bitmap_bytes, bitmap_index, mcdc_temp];
-        let args = self.check_call("call", llty, llfn, args);
-        unsafe {
-            let _ = llvm::LLVMRustBuildCall(
-                self.llbuilder,
-                llty,
-                llfn,
-                args.as_ptr() as *const &llvm::Value,
-                args.len() as c_uint,
-                [].as_ptr(),
-                0 as c_uint,
-            );
-        }
+    #[instrument(level = "debug", skip(self))]
+    pub(crate) fn mcdc_condbitmap_reset(&mut self, mcdc_temp: &'ll Value) {
         self.store(self.const_i32(0), mcdc_temp, self.tcx.data_layout.i32_align.abi);
     }
 
-    pub(crate) fn mcdc_condbitmap_update(
-        &mut self,
-        fn_name: &'ll Value,
-        hash: &'ll Value,
-        cond_loc: &'ll Value,
-        mcdc_temp: &'ll Value,
-        bool_value: &'ll Value,
-    ) {
-        debug!(
-            "mcdc_condbitmap_update() with args ({:?}, {:?}, {:?}, {:?}, {:?})",
-            fn_name, hash, cond_loc, mcdc_temp, bool_value
-        );
-        let llfn = unsafe { llvm::LLVMRustGetInstrProfMCDCCondBitmapIntrinsic(self.cx().llmod) };
-        let llty = self.cx.type_func(
-            &[
-                self.cx.type_ptr(),
-                self.cx.type_i64(),
-                self.cx.type_i32(),
-                self.cx.type_ptr(),
-                self.cx.type_i1(),
-            ],
-            self.cx.type_void(),
+    #[instrument(level = "debug", skip(self))]
+    pub(crate) fn mcdc_condbitmap_update(&mut self, cond_index: &'ll Value, mcdc_temp: &'ll Value) {
+        assert!(
+            crate::llvm_util::get_version() >= (19, 0, 0),
+            "MCDC intrinsics require LLVM 19 or later"
         );
-        let args = &[fn_name, hash, cond_loc, mcdc_temp, bool_value];
-        self.check_call("call", llty, llfn, args);
-        unsafe {
-            let _ = llvm::LLVMRustBuildCall(
-                self.llbuilder,
-                llty,
-                llfn,
-                args.as_ptr() as *const &llvm::Value,
-                args.len() as c_uint,
-                [].as_ptr(),
-                0 as c_uint,
-            );
-        }
+        let align = self.tcx.data_layout.i32_align.abi;
+        let current_tv_index = self.load(self.cx.type_i32(), mcdc_temp, align);
+        let new_tv_index = self.add(current_tv_index, cond_index);
+        self.store(new_tv_index, mcdc_temp, align);
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index db4e1bae942..dcea9d3b391 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -15,11 +15,6 @@ use crate::value::Value;
 
 /// Codegens a reference to a fn/method item, monomorphizing and
 /// inlining as it goes.
-///
-/// # Parameters
-///
-/// - `cx`: the crate context
-/// - `instance`: the instance to be instantiated
 pub(crate) fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> &'ll Value {
     let tcx = cx.tcx();
 
@@ -107,68 +102,47 @@ pub(crate) fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'t
         // whether we are sharing generics or not. The important thing here is
         // that the visibility we apply to the declaration is the same one that
         // has been applied to the definition (wherever that definition may be).
-        unsafe {
-            llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
-
-            let is_generic =
-                instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
-
-            if is_generic {
-                // This is a monomorphization. Its expected visibility depends
-                // on whether we are in share-generics mode.
 
-                if cx.tcx.sess.opts.share_generics() {
-                    // We are in share_generics mode.
+        llvm::set_linkage(llfn, llvm::Linkage::ExternalLinkage);
+        unsafe {
+            let is_generic = instance.args.non_erasable_generics().next().is_some();
 
+            let is_hidden = if is_generic {
+                // This is a monomorphization of a generic function.
+                if !cx.tcx.sess.opts.share_generics() {
+                    // When not sharing generics, all instances are in the same
+                    // crate and have hidden visibility.
+                    true
+                } else {
                     if let Some(instance_def_id) = instance_def_id.as_local() {
-                        // This is a definition from the current crate. If the
-                        // definition is unreachable for downstream crates or
-                        // the current crate does not re-export generics, the
-                        // definition of the instance will have been declared
-                        // as `hidden`.
-                        if cx.tcx.is_unreachable_local_definition(instance_def_id)
+                        // This is a monomorphization of a generic function
+                        // defined in the current crate. It is hidden if:
+                        // - the definition is unreachable for downstream
+                        //   crates, or
+                        // - the current crate does not re-export generics
+                        //   (because the crate is a C library or executable)
+                        cx.tcx.is_unreachable_local_definition(instance_def_id)
                             || !cx.tcx.local_crate_exports_generics()
-                        {
-                            llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
-                        }
                     } else {
                         // This is a monomorphization of a generic function
-                        // defined in an upstream crate.
-                        if instance.upstream_monomorphization(tcx).is_some() {
-                            // This is instantiated in another crate. It cannot
-                            // be `hidden`.
-                        } else {
-                            // This is a local instantiation of an upstream definition.
-                            // If the current crate does not re-export it
-                            // (because it is a C library or an executable), it
-                            // will have been declared `hidden`.
-                            if !cx.tcx.local_crate_exports_generics() {
-                                llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
-                            }
-                        }
+                        // defined in an upstream crate. It is hidden if:
+                        // - it is instantiated in this crate, and
+                        // - the current crate does not re-export generics
+                        instance.upstream_monomorphization(tcx).is_none()
+                            && !cx.tcx.local_crate_exports_generics()
                     }
-                } else {
-                    // When not sharing generics, all instances are in the same
-                    // crate and have hidden visibility
-                    llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
                 }
             } else {
-                // This is a non-generic function
-                if cx.tcx.is_codegened_item(instance_def_id) {
-                    // This is a function that is instantiated in the local crate
-
-                    if instance_def_id.is_local() {
-                        // This is function that is defined in the local crate.
-                        // If it is not reachable, it is hidden.
-                        if !cx.tcx.is_reachable_non_generic(instance_def_id) {
-                            llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
-                        }
-                    } else {
-                        // This is a function from an upstream crate that has
-                        // been instantiated here. These are always hidden.
-                        llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
-                    }
-                }
+                // This is a non-generic function. It is hidden if:
+                // - it is instantiated in the local crate, and
+                //   - it is defined an upstream crate (non-local), or
+                //   - it is not reachable
+                cx.tcx.is_codegened_item(instance_def_id)
+                    && (!instance_def_id.is_local()
+                        || !cx.tcx.is_reachable_non_generic(instance_def_id))
+            };
+            if is_hidden {
+                llvm::set_visibility(llfn, llvm::Visibility::Hidden);
             }
 
             // MinGW: For backward compatibility we rely on the linker to decide whether it
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index 203c63f0ae7..8852dec7d9f 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -1,7 +1,11 @@
 //! Code that is useful in various codegen modules.
 
 use libc::{c_char, c_uint};
+use rustc_abi as abi;
+use rustc_abi::Primitive::Pointer;
+use rustc_abi::{AddressSpace, HasDataLayout};
 use rustc_ast::Mutability;
+use rustc_codegen_ssa::common::TypeKind;
 use rustc_codegen_ssa::traits::*;
 use rustc_data_structures::stable_hasher::{Hash128, HashStable, StableHasher};
 use rustc_hir::def_id::DefId;
@@ -9,12 +13,11 @@ use rustc_middle::bug;
 use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
 use rustc_middle::ty::TyCtxt;
 use rustc_session::cstore::DllImport;
-use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer};
 use tracing::debug;
 
 use crate::consts::const_alloc_to_llvm;
 pub(crate) use crate::context::CodegenCx;
-use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
+use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, Metadata, True};
 use crate::type_::Type;
 use crate::value::Value;
 
@@ -60,25 +63,26 @@ use crate::value::Value;
 /// the `OperandBundleDef` value created for MSVC landing pads.
 pub(crate) struct Funclet<'ll> {
     cleanuppad: &'ll Value,
-    operand: OperandBundleDef<'ll>,
+    operand: llvm::OperandBundleOwned<'ll>,
 }
 
 impl<'ll> Funclet<'ll> {
     pub(crate) fn new(cleanuppad: &'ll Value) -> Self {
-        Funclet { cleanuppad, operand: OperandBundleDef::new("funclet", &[cleanuppad]) }
+        Funclet { cleanuppad, operand: llvm::OperandBundleOwned::new("funclet", &[cleanuppad]) }
     }
 
     pub(crate) fn cleanuppad(&self) -> &'ll Value {
         self.cleanuppad
     }
 
-    pub(crate) fn bundle(&self) -> &OperandBundleDef<'ll> {
+    pub(crate) fn bundle(&self) -> &llvm::OperandBundle<'ll> {
         &self.operand
     }
 }
 
 impl<'ll> BackendTypes for CodegenCx<'ll, '_> {
     type Value = &'ll Value;
+    type Metadata = &'ll Metadata;
     // FIXME(eddyb) replace this with a `Function` "subclass" of `Value`.
     type Function = &'ll Value;
 
@@ -126,25 +130,14 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         unsafe { llvm::LLVMGetPoison(t) }
     }
 
-    fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
-        unsafe { llvm::LLVMConstInt(t, i as u64, True) }
-    }
-
-    fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
-        unsafe { llvm::LLVMConstInt(t, i, False) }
-    }
-
-    fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
-        unsafe {
-            let words = [u as u64, (u >> 64) as u64];
-            llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
-        }
-    }
-
     fn const_bool(&self, val: bool) -> &'ll Value {
         self.const_uint(self.type_i1(), val as u64)
     }
 
+    fn const_i8(&self, i: i8) -> &'ll Value {
+        self.const_int(self.type_i8(), i as i64)
+    }
+
     fn const_i16(&self, i: i16) -> &'ll Value {
         self.const_int(self.type_i16(), i as i64)
     }
@@ -153,8 +146,16 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         self.const_int(self.type_i32(), i as i64)
     }
 
-    fn const_i8(&self, i: i8) -> &'ll Value {
-        self.const_int(self.type_i8(), i as i64)
+    fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
+        debug_assert!(
+            self.type_kind(t) == TypeKind::Integer,
+            "only allows integer types in const_int"
+        );
+        unsafe { llvm::LLVMConstInt(t, i as u64, True) }
+    }
+
+    fn const_u8(&self, i: u8) -> &'ll Value {
+        self.const_uint(self.type_i8(), i as u64)
     }
 
     fn const_u32(&self, i: u32) -> &'ll Value {
@@ -179,8 +180,23 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         self.const_uint(self.isize_ty, i)
     }
 
-    fn const_u8(&self, i: u8) -> &'ll Value {
-        self.const_uint(self.type_i8(), i as u64)
+    fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
+        debug_assert!(
+            self.type_kind(t) == TypeKind::Integer,
+            "only allows integer types in const_uint"
+        );
+        unsafe { llvm::LLVMConstInt(t, i, False) }
+    }
+
+    fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
+        debug_assert!(
+            self.type_kind(t) == TypeKind::Integer,
+            "only allows integer types in const_uint_big"
+        );
+        unsafe {
+            let words = [u as u64, (u >> 64) as u64];
+            llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
+        }
     }
 
     fn const_real(&self, t: &'ll Type, val: f64) -> &'ll Value {
@@ -203,8 +219,8 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                     llvm::LLVMSetInitializer(g, sc);
                     llvm::LLVMSetGlobalConstant(g, True);
                     llvm::LLVMSetUnnamedAddress(g, llvm::UnnamedAddr::Global);
-                    llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage);
                 }
+                llvm::set_linkage(g, llvm::Linkage::InternalLinkage);
                 (s.to_owned(), g)
             })
             .1;
@@ -290,10 +306,10 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                         self.get_fn_addr(instance.polymorphize(self.tcx)),
                         self.data_layout().instruction_address_space,
                     ),
-                    GlobalAlloc::VTable(ty, trait_ref) => {
+                    GlobalAlloc::VTable(ty, dyn_ty) => {
                         let alloc = self
                             .tcx
-                            .global_alloc(self.tcx.vtable_allocation((ty, trait_ref)))
+                            .global_alloc(self.tcx.vtable_allocation((ty, dyn_ty.principal())))
                             .unwrap_memory();
                         let init = const_alloc_to_llvm(self, alloc, /*static*/ false);
                         let value = self.static_addr_of(init, alloc.inner().align, None);
@@ -376,3 +392,21 @@ pub(crate) fn get_dllimport<'tcx>(
     tcx.native_library(id)
         .and_then(|lib| lib.dll_imports.iter().find(|di| di.name.as_str() == name))
 }
+
+/// Extension trait for explicit casts to `*const c_char`.
+pub(crate) trait AsCCharPtr {
+    /// Equivalent to `self.as_ptr().cast()`, but only casts to `*const c_char`.
+    fn as_c_char_ptr(&self) -> *const c_char;
+}
+
+impl AsCCharPtr for str {
+    fn as_c_char_ptr(&self) -> *const c_char {
+        self.as_ptr().cast()
+    }
+}
+
+impl AsCCharPtr for [u8] {
+    fn as_c_char_ptr(&self) -> *const c_char {
+        self.as_ptr().cast()
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index dc86ef22e37..7ab4f45cd73 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -1,25 +1,25 @@
 use std::ops::Range;
 
+use rustc_abi::{
+    Align, AlignFromBytesError, HasDataLayout, Primitive, Scalar, Size, WrappingRange,
+};
 use rustc_codegen_ssa::common;
 use rustc_codegen_ssa::traits::*;
 use rustc_hir::def::DefKind;
 use rustc_hir::def_id::DefId;
 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
 use rustc_middle::mir::interpret::{
-    read_target_uint, Allocation, ConstAllocation, ErrorHandled, InitChunk, Pointer,
-    Scalar as InterpScalar,
+    Allocation, ConstAllocation, ErrorHandled, InitChunk, Pointer, Scalar as InterpScalar,
+    read_target_uint,
 };
 use rustc_middle::mir::mono::MonoItem;
 use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::{self, Instance};
 use rustc_middle::{bug, span_bug};
 use rustc_session::config::Lto;
-use rustc_target::abi::{
-    Align, AlignFromBytesError, HasDataLayout, Primitive, Scalar, Size, WrappingRange,
-};
 use tracing::{debug, instrument, trace};
 
-use crate::common::CodegenCx;
+use crate::common::{AsCCharPtr, CodegenCx};
 use crate::errors::{
     InvalidMinimumAlignmentNotPowerOfTwo, InvalidMinimumAlignmentTooLarge, SymbolAlreadyDefined,
 };
@@ -73,8 +73,8 @@ pub(crate) fn const_alloc_to_llvm<'ll>(
 
         // Generating partially-uninit consts is limited to small numbers of chunks,
         // to avoid the cost of generating large complex const expressions.
-        // For example, `[(u32, u8); 1024 * 1024]` contains uninit padding in each element,
-        // and would result in `{ [5 x i8] zeroinitializer, [3 x i8] undef, ...repeat 1M times... }`.
+        // For example, `[(u32, u8); 1024 * 1024]` contains uninit padding in each element, and
+        // would result in `{ [5 x i8] zeroinitializer, [3 x i8] undef, ...repeat 1M times... }`.
         let max = cx.sess().opts.unstable_opts.uninit_const_chunk_threshold;
         let allow_uninit_chunks = chunks.clone().take(max.saturating_add(1)).count() <= max;
 
@@ -172,29 +172,27 @@ fn check_and_apply_linkage<'ll, 'tcx>(
     if let Some(linkage) = attrs.import_linkage {
         debug!("get_static: sym={} linkage={:?}", sym, linkage);
 
-        unsafe {
-            // Declare a symbol `foo` with the desired linkage.
-            let g1 = cx.declare_global(sym, cx.type_i8());
-            llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));
-
-            // Declare an internal global `extern_with_linkage_foo` which
-            // is initialized with the address of `foo`. If `foo` is
-            // discarded during linking (for example, if `foo` has weak
-            // linkage and there are no definitions), then
-            // `extern_with_linkage_foo` will instead be initialized to
-            // zero.
-            let mut real_name = "_rust_extern_with_linkage_".to_string();
-            real_name.push_str(sym);
-            let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| {
-                cx.sess().dcx().emit_fatal(SymbolAlreadyDefined {
-                    span: cx.tcx.def_span(def_id),
-                    symbol_name: sym,
-                })
-            });
-            llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
-            llvm::LLVMSetInitializer(g2, g1);
-            g2
-        }
+        // Declare a symbol `foo` with the desired linkage.
+        let g1 = cx.declare_global(sym, cx.type_i8());
+        llvm::set_linkage(g1, base::linkage_to_llvm(linkage));
+
+        // Declare an internal global `extern_with_linkage_foo` which
+        // is initialized with the address of `foo`. If `foo` is
+        // discarded during linking (for example, if `foo` has weak
+        // linkage and there are no definitions), then
+        // `extern_with_linkage_foo` will instead be initialized to
+        // zero.
+        let mut real_name = "_rust_extern_with_linkage_".to_string();
+        real_name.push_str(sym);
+        let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| {
+            cx.sess().dcx().emit_fatal(SymbolAlreadyDefined {
+                span: cx.tcx.def_span(def_id),
+                symbol_name: sym,
+            })
+        });
+        llvm::set_linkage(g2, llvm::Linkage::InternalLinkage);
+        unsafe { llvm::LLVMSetInitializer(g2, g1) };
+        g2
     } else if cx.tcx.sess.target.arch == "x86"
         && common::is_mingw_gnu_toolchain(&cx.tcx.sess.target)
         && let Some(dllimport) = crate::common::get_dllimport(cx.tcx, def_id, sym)
@@ -218,23 +216,21 @@ impl<'ll> CodegenCx<'ll, '_> {
         align: Align,
         kind: Option<&str>,
     ) -> &'ll Value {
-        unsafe {
-            let gv = match kind {
-                Some(kind) if !self.tcx.sess.fewer_names() => {
-                    let name = self.generate_local_symbol_name(kind);
-                    let gv = self.define_global(&name, self.val_ty(cv)).unwrap_or_else(|| {
-                        bug!("symbol `{}` is already defined", name);
-                    });
-                    llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
-                    gv
-                }
-                _ => self.define_private_global(self.val_ty(cv)),
-            };
-            llvm::LLVMSetInitializer(gv, cv);
-            set_global_alignment(self, gv, align);
-            llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global);
-            gv
-        }
+        let gv = match kind {
+            Some(kind) if !self.tcx.sess.fewer_names() => {
+                let name = self.generate_local_symbol_name(kind);
+                let gv = self.define_global(&name, self.val_ty(cv)).unwrap_or_else(|| {
+                    bug!("symbol `{}` is already defined", name);
+                });
+                llvm::set_linkage(gv, llvm::Linkage::PrivateLinkage);
+                gv
+            }
+            _ => self.define_private_global(self.val_ty(cv)),
+        };
+        unsafe { llvm::LLVMSetInitializer(gv, cv) };
+        set_global_alignment(self, gv, align);
+        llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global);
+        gv
     }
 
     #[instrument(level = "debug", skip(self))]
@@ -243,8 +239,8 @@ impl<'ll> CodegenCx<'ll, '_> {
         trace!(?instance);
 
         let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else { bug!() };
-        // Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure out
-        // the llvm type from the actual evaluated initializer.
+        // Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure
+        // out the llvm type from the actual evaluated initializer.
         let llty = if nested {
             self.type_i8()
         } else {
@@ -256,7 +252,7 @@ impl<'ll> CodegenCx<'ll, '_> {
     }
 
     #[instrument(level = "debug", skip(self, llty))]
-    pub(crate) fn get_static_inner(&self, def_id: DefId, llty: &'ll Type) -> &'ll Value {
+    fn get_static_inner(&self, def_id: DefId, llty: &'ll Type) -> &'ll Value {
         let instance = Instance::mono(self.tcx, def_id);
         if let Some(&g) = self.instances.borrow().get(&instance) {
             trace!("used cached value");
@@ -286,9 +282,7 @@ impl<'ll> CodegenCx<'ll, '_> {
             let g = self.declare_global(sym, llty);
 
             if !self.tcx.is_reachable_non_generic(def_id) {
-                unsafe {
-                    llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden);
-                }
+                llvm::set_visibility(g, llvm::Visibility::Hidden);
             }
 
             g
@@ -306,7 +300,7 @@ impl<'ll> CodegenCx<'ll, '_> {
             llvm::set_thread_local_mode(g, self.tls_model);
         }
 
-        let dso_local = unsafe { self.should_assume_dso_local(g, true) };
+        let dso_local = self.should_assume_dso_local(g, true);
         if dso_local {
             unsafe {
                 llvm::LLVMRustSetDSOLocal(g, true);
@@ -314,15 +308,16 @@ impl<'ll> CodegenCx<'ll, '_> {
         }
 
         if !def_id.is_local() {
-            let needs_dll_storage_attr = self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
+            let needs_dll_storage_attr = self.use_dll_storage_attrs
+                && !self.tcx.is_foreign_item(def_id)
                 // Local definitions can never be imported, so we must not apply
                 // the DLLImport annotation.
-                !dso_local &&
+                && !dso_local
                 // ThinLTO can't handle this workaround in all cases, so we don't
                 // emit the attrs. Instead we make them unnecessary by disallowing
                 // dynamic linking when linker plugin based LTO is enabled.
-                !self.tcx.sess.opts.cg.linker_plugin_lto.enabled() &&
-                self.tcx.sess.lto() != Lto::Thin;
+                && !self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
+                && self.tcx.sess.lto() != Lto::Thin;
 
             // If this assertion triggers, there's something wrong with commandline
             // argument validation.
@@ -394,18 +389,18 @@ impl<'ll> CodegenCx<'ll, '_> {
                 let name = llvm::get_value_name(g).to_vec();
                 llvm::set_value_name(g, b"");
 
-                let linkage = llvm::LLVMRustGetLinkage(g);
-                let visibility = llvm::LLVMRustGetVisibility(g);
+                let linkage = llvm::get_linkage(g);
+                let visibility = llvm::get_visibility(g);
 
                 let new_g = llvm::LLVMRustGetOrInsertGlobal(
                     self.llmod,
-                    name.as_ptr().cast(),
+                    name.as_c_char_ptr(),
                     name.len(),
                     val_llty,
                 );
 
-                llvm::LLVMRustSetLinkage(new_g, linkage);
-                llvm::LLVMRustSetVisibility(new_g, visibility);
+                llvm::set_linkage(new_g, linkage);
+                llvm::set_visibility(new_g, visibility);
 
                 // The old global has had its name removed but is returned by
                 // get_static since it is in the instance cache. Provide an
@@ -436,58 +431,6 @@ impl<'ll> CodegenCx<'ll, '_> {
 
             if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
                 llvm::set_thread_local_mode(g, self.tls_model);
-
-                // Do not allow LLVM to change the alignment of a TLS on macOS.
-                //
-                // By default a global's alignment can be freely increased.
-                // This allows LLVM to generate more performant instructions
-                // e.g., using load-aligned into a SIMD register.
-                //
-                // However, on macOS 10.10 or below, the dynamic linker does not
-                // respect any alignment given on the TLS (radar 24221680).
-                // This will violate the alignment assumption, and causing segfault at runtime.
-                //
-                // This bug is very easy to trigger. In `println!` and `panic!`,
-                // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
-                // which the values would be `mem::replace`d on initialization.
-                // The implementation of `mem::replace` will use SIMD
-                // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
-                // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
-                // which macOS's dyld disregarded and causing crashes
-                // (see issues #51794, #51758, #50867, #48866 and #44056).
-                //
-                // To workaround the bug, we trick LLVM into not increasing
-                // the global's alignment by explicitly assigning a section to it
-                // (equivalent to automatically generating a `#[link_section]` attribute).
-                // See the comment in the `GlobalValue::canIncreaseAlignment()` function
-                // of `lib/IR/Globals.cpp` for why this works.
-                //
-                // When the alignment is not increased, the optimized `mem::replace`
-                // will use load-unaligned instructions instead, and thus avoiding the crash.
-                //
-                // We could remove this hack whenever we decide to drop macOS 10.10 support.
-                if self.tcx.sess.target.is_like_osx {
-                    // The `inspect` method is okay here because we checked for provenance, and
-                    // because we are doing this access to inspect the final interpreter state
-                    // (not as part of the interpreter execution).
-                    //
-                    // FIXME: This check requires that the (arbitrary) value of undefined bytes
-                    // happens to be zero. Instead, we should only check the value of defined bytes
-                    // and set all undefined bytes to zero if this allocation is headed for the
-                    // BSS.
-                    let all_bytes_are_zero = alloc.provenance().ptrs().is_empty()
-                        && alloc
-                            .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
-                            .iter()
-                            .all(|&byte| byte == 0);
-
-                    let sect_name = if all_bytes_are_zero {
-                        c"__DATA,__thread_bss"
-                    } else {
-                        c"__DATA,__thread_data"
-                    };
-                    llvm::LLVMSetSection(g, sect_name.as_ptr());
-                }
             }
 
             // Wasm statics with custom link sections get special treatment as they
@@ -502,7 +445,7 @@ impl<'ll> CodegenCx<'ll, '_> {
                 if let Some(section) = attrs.link_section {
                     let section = llvm::LLVMMDStringInContext2(
                         self.llcx,
-                        section.as_str().as_ptr().cast(),
+                        section.as_str().as_c_char_ptr(),
                         section.as_str().len(),
                     );
                     assert!(alloc.provenance().ptrs().is_empty());
@@ -513,7 +456,7 @@ impl<'ll> CodegenCx<'ll, '_> {
                     let bytes =
                         alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len());
                     let alloc =
-                        llvm::LLVMMDStringInContext2(self.llcx, bytes.as_ptr().cast(), bytes.len());
+                        llvm::LLVMMDStringInContext2(self.llcx, bytes.as_c_char_ptr(), bytes.len());
                     let data = [section, alloc];
                     let meta = llvm::LLVMMDNodeInContext2(self.llcx, data.as_ptr(), data.len());
                     let val = llvm::LLVMMetadataAsValue(self.llcx, meta);
@@ -545,8 +488,8 @@ impl<'ll> CodegenCx<'ll, '_> {
                 // `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage
                 // on other targets, in particular MachO targets have *their* static constructor
                 // lists broken if `llvm.compiler.used` is emitted rather than `llvm.used`. However,
-                // that check happens when assigning the `CodegenFnAttrFlags` in `rustc_hir_analysis`,
-                // so we don't need to take care of it here.
+                // that check happens when assigning the `CodegenFnAttrFlags` in
+                // `rustc_hir_analysis`, so we don't need to take care of it here.
                 self.add_compiler_used_global(g);
             }
             if attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) {
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 03af31d36fb..b7ab5d6d5f0 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -1,13 +1,14 @@
 use std::borrow::Borrow;
 use std::cell::{Cell, RefCell};
-use std::ffi::CStr;
+use std::ffi::{CStr, c_uint};
 use std::str;
 
-use libc::c_uint;
+use rustc_abi::{HasDataLayout, TargetDataLayout, VariantIdx};
+use rustc_codegen_ssa::back::versioned_llvm_target;
 use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
 use rustc_codegen_ssa::errors as ssa_errors;
 use rustc_codegen_ssa::traits::*;
-use rustc_data_structures::base_n::{ToBaseN, ALPHANUMERIC_ONLY};
+use rustc_data_structures::base_n::{ALPHANUMERIC_ONLY, ToBaseN};
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::small_c_str::SmallCStr;
 use rustc_hir::def_id::DefId;
@@ -18,25 +19,26 @@ use rustc_middle::ty::layout::{
 };
 use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
 use rustc_middle::{bug, span_bug};
+use rustc_session::Session;
 use rustc_session::config::{
-    BranchProtection, CFGuard, CFProtection, CrateType, DebugInfo, PAuthKey, PacRet,
+    BranchProtection, CFGuard, CFProtection, CrateType, DebugInfo, FunctionReturn, PAuthKey, PacRet,
 };
-use rustc_session::Session;
 use rustc_span::source_map::Spanned;
-use rustc_span::{Span, DUMMY_SP};
-use rustc_target::abi::{HasDataLayout, TargetDataLayout, VariantIdx};
+use rustc_span::{DUMMY_SP, Span};
 use rustc_target::spec::{HasTargetSpec, RelocModel, SmallDataThresholdSupport, Target, TlsModel};
 use smallvec::SmallVec;
 
 use crate::back::write::to_llvm_code_model;
 use crate::callee::get_fn;
+use crate::common::AsCCharPtr;
 use crate::debuginfo::metadata::apply_vcall_visibility_metadata;
+use crate::llvm::{Metadata, MetadataType};
 use crate::type_::Type;
 use crate::value::Value;
 use crate::{attributes, coverageinfo, debuginfo, llvm, llvm_util};
 
-/// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
-/// `llvm::Context` so that several compilation units may be optimized in parallel.
+/// There is one `CodegenCx` per codegen unit. Each one has its own LLVM
+/// `llvm::Context` so that several codegen units may be processed in parallel.
 /// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`.
 pub(crate) struct CodegenCx<'ll, 'tcx> {
     pub tcx: TyCtxt<'tcx>,
@@ -80,6 +82,7 @@ pub(crate) struct CodegenCx<'ll, 'tcx> {
 
     pub isize_ty: &'ll Type,
 
+    /// Extra codegen state needed when coverage instrumentation is enabled.
     pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'ll, 'tcx>>,
     pub dbg_cx: Option<debuginfo::CodegenUnitDebugContext<'ll, 'tcx>>,
 
@@ -138,6 +141,21 @@ pub(crate) unsafe fn create_module<'ll>(
         }
     }
 
+    if llvm_version < (20, 0, 0) {
+        if sess.target.arch == "aarch64" || sess.target.arch.starts_with("arm64") {
+            // LLVM 20 defines three additional address spaces for alternate
+            // pointer kinds used in Windows.
+            // See https://github.com/llvm/llvm-project/pull/111879
+            target_data_layout =
+                target_data_layout.replace("-p270:32:32-p271:32:32-p272:64:64", "");
+        }
+        if sess.target.arch.starts_with("sparc") {
+            // LLVM 20 updates the sparc layout to correctly align 128 bit integers to 128 bit.
+            // See https://github.com/llvm/llvm-project/pull/106951
+            target_data_layout = target_data_layout.replace("-i128:128", "");
+        }
+    }
+
     // Ensure the data-layout values hardcoded remain the defaults.
     {
         let tm = crate::back::write::create_informational_target_machine(tcx.sess, false);
@@ -165,7 +183,7 @@ pub(crate) unsafe fn create_module<'ll>(
         llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
     }
 
-    let llvm_target = SmallCStr::new(&sess.target.llvm_target);
+    let llvm_target = SmallCStr::new(&versioned_llvm_target(sess));
     unsafe {
         llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
     }
@@ -198,132 +216,117 @@ pub(crate) unsafe fn create_module<'ll>(
     // If skipping the PLT is enabled, we need to add some module metadata
     // to ensure intrinsic calls don't use it.
     if !sess.needs_plt() {
-        let avoid_plt = c"RtLibUseGOT".as_ptr();
-        unsafe {
-            llvm::LLVMRustAddModuleFlagU32(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1);
-        }
+        llvm::add_module_flag_u32(llmod, llvm::ModuleFlagMergeBehavior::Warning, "RtLibUseGOT", 1);
     }
 
     // Enable canonical jump tables if CFI is enabled. (See https://reviews.llvm.org/D65629.)
     if sess.is_sanitizer_cfi_canonical_jump_tables_enabled() && sess.is_sanitizer_cfi_enabled() {
-        let canonical_jump_tables = c"CFI Canonical Jump Tables".as_ptr();
-        unsafe {
-            llvm::LLVMRustAddModuleFlagU32(
-                llmod,
-                llvm::LLVMModFlagBehavior::Override,
-                canonical_jump_tables,
-                1,
-            );
-        }
+        llvm::add_module_flag_u32(
+            llmod,
+            llvm::ModuleFlagMergeBehavior::Override,
+            "CFI Canonical Jump Tables",
+            1,
+        );
     }
 
     // If we're normalizing integers with CFI, ensure LLVM generated functions do the same.
     // See https://github.com/llvm/llvm-project/pull/104826
     if sess.is_sanitizer_cfi_normalize_integers_enabled() {
-        let cfi_normalize_integers = c"cfi-normalize-integers".as_ptr().cast();
-        unsafe {
-            llvm::LLVMRustAddModuleFlagU32(
-                llmod,
-                llvm::LLVMModFlagBehavior::Override,
-                cfi_normalize_integers,
-                1,
-            );
-        }
+        llvm::add_module_flag_u32(
+            llmod,
+            llvm::ModuleFlagMergeBehavior::Override,
+            "cfi-normalize-integers",
+            1,
+        );
     }
 
-    // Enable LTO unit splitting if specified or if CFI is enabled. (See https://reviews.llvm.org/D53891.)
+    // Enable LTO unit splitting if specified or if CFI is enabled. (See
+    // https://reviews.llvm.org/D53891.)
     if sess.is_split_lto_unit_enabled() || sess.is_sanitizer_cfi_enabled() {
-        let enable_split_lto_unit = c"EnableSplitLTOUnit".as_ptr();
-        unsafe {
-            llvm::LLVMRustAddModuleFlagU32(
-                llmod,
-                llvm::LLVMModFlagBehavior::Override,
-                enable_split_lto_unit,
-                1,
-            );
-        }
+        llvm::add_module_flag_u32(
+            llmod,
+            llvm::ModuleFlagMergeBehavior::Override,
+            "EnableSplitLTOUnit",
+            1,
+        );
     }
 
     // Add "kcfi" module flag if KCFI is enabled. (See https://reviews.llvm.org/D119296.)
     if sess.is_sanitizer_kcfi_enabled() {
-        let kcfi = c"kcfi".as_ptr();
-        unsafe {
-            llvm::LLVMRustAddModuleFlagU32(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1);
-        }
+        llvm::add_module_flag_u32(llmod, llvm::ModuleFlagMergeBehavior::Override, "kcfi", 1);
 
         // Add "kcfi-offset" module flag with -Z patchable-function-entry (See
         // https://reviews.llvm.org/D141172).
         let pfe =
             PatchableFunctionEntry::from_config(sess.opts.unstable_opts.patchable_function_entry);
         if pfe.prefix() > 0 {
-            let kcfi_offset = c"kcfi-offset".as_ptr().cast();
-            unsafe {
-                llvm::LLVMRustAddModuleFlagU32(
-                    llmod,
-                    llvm::LLVMModFlagBehavior::Override,
-                    kcfi_offset,
-                    pfe.prefix().into(),
-                );
-            }
+            llvm::add_module_flag_u32(
+                llmod,
+                llvm::ModuleFlagMergeBehavior::Override,
+                "kcfi-offset",
+                pfe.prefix().into(),
+            );
         }
     }
 
     // Control Flow Guard is currently only supported by the MSVC linker on Windows.
     if sess.target.is_like_msvc {
-        unsafe {
-            match sess.opts.cg.control_flow_guard {
-                CFGuard::Disabled => {}
-                CFGuard::NoChecks => {
-                    // Set `cfguard=1` module flag to emit metadata only.
-                    llvm::LLVMRustAddModuleFlagU32(
-                        llmod,
-                        llvm::LLVMModFlagBehavior::Warning,
-                        c"cfguard".as_ptr() as *const _,
-                        1,
-                    )
-                }
-                CFGuard::Checks => {
-                    // Set `cfguard=2` module flag to emit metadata and checks.
-                    llvm::LLVMRustAddModuleFlagU32(
-                        llmod,
-                        llvm::LLVMModFlagBehavior::Warning,
-                        c"cfguard".as_ptr() as *const _,
-                        2,
-                    )
-                }
+        match sess.opts.cg.control_flow_guard {
+            CFGuard::Disabled => {}
+            CFGuard::NoChecks => {
+                // Set `cfguard=1` module flag to emit metadata only.
+                llvm::add_module_flag_u32(
+                    llmod,
+                    llvm::ModuleFlagMergeBehavior::Warning,
+                    "cfguard",
+                    1,
+                );
+            }
+            CFGuard::Checks => {
+                // Set `cfguard=2` module flag to emit metadata and checks.
+                llvm::add_module_flag_u32(
+                    llmod,
+                    llvm::ModuleFlagMergeBehavior::Warning,
+                    "cfguard",
+                    2,
+                );
             }
         }
     }
 
     if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection {
         if sess.target.arch == "aarch64" {
-            unsafe {
-                llvm::LLVMRustAddModuleFlagU32(
-                    llmod,
-                    llvm::LLVMModFlagBehavior::Min,
-                    c"branch-target-enforcement".as_ptr(),
-                    bti.into(),
-                );
-                llvm::LLVMRustAddModuleFlagU32(
-                    llmod,
-                    llvm::LLVMModFlagBehavior::Min,
-                    c"sign-return-address".as_ptr(),
-                    pac_ret.is_some().into(),
-                );
-                let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A });
-                llvm::LLVMRustAddModuleFlagU32(
-                    llmod,
-                    llvm::LLVMModFlagBehavior::Min,
-                    c"sign-return-address-all".as_ptr(),
-                    pac_opts.leaf.into(),
-                );
-                llvm::LLVMRustAddModuleFlagU32(
-                    llmod,
-                    llvm::LLVMModFlagBehavior::Min,
-                    c"sign-return-address-with-bkey".as_ptr(),
-                    u32::from(pac_opts.key == PAuthKey::B),
-                );
-            }
+            llvm::add_module_flag_u32(
+                llmod,
+                llvm::ModuleFlagMergeBehavior::Min,
+                "branch-target-enforcement",
+                bti.into(),
+            );
+            llvm::add_module_flag_u32(
+                llmod,
+                llvm::ModuleFlagMergeBehavior::Min,
+                "sign-return-address",
+                pac_ret.is_some().into(),
+            );
+            let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, pc: false, key: PAuthKey::A });
+            llvm::add_module_flag_u32(
+                llmod,
+                llvm::ModuleFlagMergeBehavior::Min,
+                "branch-protection-pauth-lr",
+                pac_opts.pc.into(),
+            );
+            llvm::add_module_flag_u32(
+                llmod,
+                llvm::ModuleFlagMergeBehavior::Min,
+                "sign-return-address-all",
+                pac_opts.leaf.into(),
+            );
+            llvm::add_module_flag_u32(
+                llmod,
+                llvm::ModuleFlagMergeBehavior::Min,
+                "sign-return-address-with-bkey",
+                u32::from(pac_opts.key == PAuthKey::B),
+            );
         } else {
             bug!(
                 "branch-protection used on non-AArch64 target; \
@@ -334,46 +337,45 @@ pub(crate) unsafe fn create_module<'ll>(
 
     // Pass on the control-flow protection flags to LLVM (equivalent to `-fcf-protection` in Clang).
     if let CFProtection::Branch | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
-        unsafe {
-            llvm::LLVMRustAddModuleFlagU32(
-                llmod,
-                llvm::LLVMModFlagBehavior::Override,
-                c"cf-protection-branch".as_ptr(),
-                1,
-            );
-        }
+        llvm::add_module_flag_u32(
+            llmod,
+            llvm::ModuleFlagMergeBehavior::Override,
+            "cf-protection-branch",
+            1,
+        );
     }
     if let CFProtection::Return | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
-        unsafe {
-            llvm::LLVMRustAddModuleFlagU32(
-                llmod,
-                llvm::LLVMModFlagBehavior::Override,
-                c"cf-protection-return".as_ptr(),
-                1,
-            );
-        }
+        llvm::add_module_flag_u32(
+            llmod,
+            llvm::ModuleFlagMergeBehavior::Override,
+            "cf-protection-return",
+            1,
+        );
     }
 
     if sess.opts.unstable_opts.virtual_function_elimination {
-        unsafe {
-            llvm::LLVMRustAddModuleFlagU32(
-                llmod,
-                llvm::LLVMModFlagBehavior::Error,
-                c"Virtual Function Elim".as_ptr(),
-                1,
-            );
-        }
+        llvm::add_module_flag_u32(
+            llmod,
+            llvm::ModuleFlagMergeBehavior::Error,
+            "Virtual Function Elim",
+            1,
+        );
     }
 
     // Set module flag to enable Windows EHCont Guard (/guard:ehcont).
     if sess.opts.unstable_opts.ehcont_guard {
-        unsafe {
-            llvm::LLVMRustAddModuleFlagU32(
+        llvm::add_module_flag_u32(llmod, llvm::ModuleFlagMergeBehavior::Warning, "ehcontguard", 1);
+    }
+
+    match sess.opts.unstable_opts.function_return {
+        FunctionReturn::Keep => {}
+        FunctionReturn::ThunkExtern => {
+            llvm::add_module_flag_u32(
                 llmod,
-                llvm::LLVMModFlagBehavior::Warning,
-                c"ehcontguard".as_ptr() as *const _,
+                llvm::ModuleFlagMergeBehavior::Override,
+                "function_return_thunk_extern",
                 1,
-            )
+            );
         }
     }
 
@@ -382,15 +384,12 @@ pub(crate) unsafe fn create_module<'ll>(
         // Set up the small-data optimization limit for architectures that use
         // an LLVM module flag to control this.
         (Some(threshold), SmallDataThresholdSupport::LlvmModuleFlag(flag)) => {
-            let flag = SmallCStr::new(flag.as_ref());
-            unsafe {
-                llvm::LLVMRustAddModuleFlagU32(
-                    llmod,
-                    llvm::LLVMModFlagBehavior::Error,
-                    flag.as_c_str().as_ptr(),
-                    threshold as u32,
-                )
-            }
+            llvm::add_module_flag_u32(
+                llmod,
+                llvm::ModuleFlagMergeBehavior::Error,
+                &flag,
+                threshold as u32,
+            );
         }
         _ => (),
     };
@@ -403,17 +402,17 @@ pub(crate) unsafe fn create_module<'ll>(
     let rustc_producer =
         format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION"));
     let name_metadata = unsafe {
-        llvm::LLVMMDStringInContext(
+        llvm::LLVMMDStringInContext2(
             llcx,
-            rustc_producer.as_ptr().cast(),
-            rustc_producer.as_bytes().len() as c_uint,
+            rustc_producer.as_c_char_ptr(),
+            rustc_producer.as_bytes().len(),
         )
     };
     unsafe {
         llvm::LLVMAddNamedMetadataOperand(
             llmod,
             c"llvm.ident".as_ptr(),
-            llvm::LLVMMDNodeInContext(llcx, &name_metadata, 1),
+            &llvm::LLVMMetadataAsValue(llcx, llvm::LLVMMDNodeInContext2(llcx, &name_metadata, 1)),
         );
     }
 
@@ -424,33 +423,29 @@ pub(crate) unsafe fn create_module<'ll>(
     // If llvm_abiname is empty, emit nothing.
     let llvm_abiname = &sess.target.options.llvm_abiname;
     if matches!(sess.target.arch.as_ref(), "riscv32" | "riscv64") && !llvm_abiname.is_empty() {
-        unsafe {
-            llvm::LLVMRustAddModuleFlagString(
-                llmod,
-                llvm::LLVMModFlagBehavior::Error,
-                c"target-abi".as_ptr(),
-                llvm_abiname.as_ptr().cast(),
-                llvm_abiname.len(),
-            );
-        }
+        llvm::add_module_flag_str(
+            llmod,
+            llvm::ModuleFlagMergeBehavior::Error,
+            "target-abi",
+            llvm_abiname,
+        );
     }
 
     // Add module flags specified via -Z llvm_module_flag
-    for (key, value, behavior) in &sess.opts.unstable_opts.llvm_module_flag {
-        let key = format!("{key}\0");
-        let behavior = match behavior.as_str() {
-            "error" => llvm::LLVMModFlagBehavior::Error,
-            "warning" => llvm::LLVMModFlagBehavior::Warning,
-            "require" => llvm::LLVMModFlagBehavior::Require,
-            "override" => llvm::LLVMModFlagBehavior::Override,
-            "append" => llvm::LLVMModFlagBehavior::Append,
-            "appendunique" => llvm::LLVMModFlagBehavior::AppendUnique,
-            "max" => llvm::LLVMModFlagBehavior::Max,
-            "min" => llvm::LLVMModFlagBehavior::Min,
+    for (key, value, merge_behavior) in &sess.opts.unstable_opts.llvm_module_flag {
+        let merge_behavior = match merge_behavior.as_str() {
+            "error" => llvm::ModuleFlagMergeBehavior::Error,
+            "warning" => llvm::ModuleFlagMergeBehavior::Warning,
+            "require" => llvm::ModuleFlagMergeBehavior::Require,
+            "override" => llvm::ModuleFlagMergeBehavior::Override,
+            "append" => llvm::ModuleFlagMergeBehavior::Append,
+            "appendunique" => llvm::ModuleFlagMergeBehavior::AppendUnique,
+            "max" => llvm::ModuleFlagMergeBehavior::Max,
+            "min" => llvm::ModuleFlagMergeBehavior::Min,
             // We already checked this during option parsing
             _ => unreachable!(),
         };
-        unsafe { llvm::LLVMRustAddModuleFlagU32(llmod, behavior, key.as_ptr().cast(), *value) }
+        llvm::add_module_flag_u32(llmod, merge_behavior, key, *value);
     }
 
     llmod
@@ -569,11 +564,11 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
         &self.statics_to_rauw
     }
 
+    /// Extra state that is only available when coverage instrumentation is enabled.
     #[inline]
-    pub(crate) fn coverage_context(
-        &self,
-    ) -> Option<&coverageinfo::CrateCoverageContext<'ll, 'tcx>> {
-        self.coverage_cx.as_ref()
+    #[track_caller]
+    pub(crate) fn coverage_cx(&self) -> &coverageinfo::CrateCoverageContext<'ll, 'tcx> {
+        self.coverage_cx.as_ref().expect("only called when coverage instrumentation is enabled")
     }
 
     pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
@@ -582,8 +577,8 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
         unsafe {
             let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
             llvm::LLVMSetInitializer(g, array);
-            llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
-            llvm::LLVMSetSection(g, c"llvm.metadata".as_ptr());
+            llvm::set_linkage(g, llvm::Linkage::AppendingLinkage);
+            llvm::set_section(g, c"llvm.metadata");
         }
     }
 }
@@ -871,6 +866,11 @@ impl<'ll> CodegenCx<'ll, '_> {
         ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
         ifn!("llvm.fma.f128", fn(t_f128, t_f128, t_f128) -> t_f128);
 
+        ifn!("llvm.fmuladd.f16", fn(t_f16, t_f16, t_f16) -> t_f16);
+        ifn!("llvm.fmuladd.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
+        ifn!("llvm.fmuladd.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
+        ifn!("llvm.fmuladd.f128", fn(t_f128, t_f128, t_f128) -> t_f128);
+
         ifn!("llvm.fabs.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
@@ -1071,6 +1071,10 @@ impl<'ll> CodegenCx<'ll, '_> {
 
         if self.sess().instrument_coverage() {
             ifn!("llvm.instrprof.increment", fn(ptr, t_i64, t_i32, t_i32) -> void);
+            if crate::llvm_util::get_version() >= (19, 0, 0) {
+                ifn!("llvm.instrprof.mcdc.parameters", fn(ptr, t_i64, t_i32) -> void);
+                ifn!("llvm.instrprof.mcdc.tvbitmap.update", fn(ptr, t_i64, t_i32, ptr) -> void);
+            }
         }
 
         ifn!("llvm.type.test", fn(ptr, t_metadata) -> i1);
@@ -1118,6 +1122,14 @@ impl CodegenCx<'_, '_> {
         name.push_str(&(idx as u64).to_base(ALPHANUMERIC_ONLY));
         name
     }
+
+    /// A wrapper for [`llvm::LLVMSetMetadata`], but it takes `Metadata` as a parameter instead of `Value`.
+    pub(crate) fn set_metadata<'a>(&self, val: &'a Value, kind_id: MetadataType, md: &'a Metadata) {
+        unsafe {
+            let node = llvm::LLVMMetadataAsValue(&self.llcx, md);
+            llvm::LLVMSetMetadata(val, kind_id as c_uint, node);
+        }
+    }
 }
 
 impl HasDataLayout for CodegenCx<'_, '_> {
@@ -1166,10 +1178,11 @@ impl<'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
         span: Span,
         fn_abi_request: FnAbiRequest<'tcx>,
     ) -> ! {
-        if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
-            self.tcx.dcx().emit_fatal(Spanned { span, node: err })
-        } else {
-            match fn_abi_request {
+        match err {
+            FnAbiError::Layout(LayoutError::SizeOverflow(_) | LayoutError::Cycle(_)) => {
+                self.tcx.dcx().emit_fatal(Spanned { span, node: err });
+            }
+            _ => match fn_abi_request {
                 FnAbiRequest::OfFnPtr { sig, extra_args } => {
                     span_bug!(span, "`fn_abi_of_fn_ptr({sig}, {extra_args:?})` failed: {err:?}",);
                 }
@@ -1179,7 +1192,7 @@ impl<'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
                         "`fn_abi_of_instance({instance}, {extra_args:?})` failed: {err:?}",
                     );
                 }
-            }
+            },
         }
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
index d7a4f105f3c..feac97f3e2b 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -1,11 +1,9 @@
-use rustc_middle::mir::coverage::{
-    ConditionInfo, CounterId, CovTerm, DecisionInfo, ExpressionId, MappingKind, SourceRegion,
-};
+use rustc_middle::mir::coverage::{CounterId, CovTerm, ExpressionId, SourceRegion};
 
 /// Must match the layout of `LLVMRustCounterKind`.
 #[derive(Copy, Clone, Debug)]
 #[repr(C)]
-pub enum CounterKind {
+pub(crate) enum CounterKind {
     Zero = 0,
     CounterValueReference = 1,
     Expression = 2,
@@ -25,9 +23,9 @@ pub enum CounterKind {
 /// Must match the layout of `LLVMRustCounter`.
 #[derive(Copy, Clone, Debug)]
 #[repr(C)]
-pub struct Counter {
+pub(crate) struct Counter {
     // Important: The layout (order and types of fields) must match its C++ counterpart.
-    pub kind: CounterKind,
+    pub(crate) kind: CounterKind,
     id: u32,
 }
 
@@ -36,7 +34,7 @@ impl Counter {
     pub(crate) const ZERO: Self = Self { kind: CounterKind::Zero, id: 0 };
 
     /// Constructs a new `Counter` of kind `CounterValueReference`.
-    pub fn counter_value_reference(counter_id: CounterId) -> Self {
+    pub(crate) fn counter_value_reference(counter_id: CounterId) -> Self {
         Self { kind: CounterKind::CounterValueReference, id: counter_id.as_u32() }
     }
 
@@ -59,7 +57,7 @@ impl Counter {
 /// Must match the layout of `LLVMRustCounterExprKind`.
 #[derive(Copy, Clone, Debug)]
 #[repr(C)]
-pub enum ExprKind {
+pub(crate) enum ExprKind {
     Subtract = 0,
     Add = 1,
 }
@@ -69,49 +67,14 @@ pub enum ExprKind {
 /// Must match the layout of `LLVMRustCounterExpression`.
 #[derive(Copy, Clone, Debug)]
 #[repr(C)]
-pub struct CounterExpression {
-    pub kind: ExprKind,
-    pub lhs: Counter,
-    pub rhs: Counter,
+pub(crate) struct CounterExpression {
+    pub(crate) kind: ExprKind,
+    pub(crate) lhs: Counter,
+    pub(crate) rhs: Counter,
 }
 
-/// Corresponds to enum `llvm::coverage::CounterMappingRegion::RegionKind`.
-///
-/// Must match the layout of `LLVMRustCounterMappingRegionKind`.
-#[derive(Copy, Clone, Debug)]
-#[repr(C)]
-enum RegionKind {
-    /// A CodeRegion associates some code with a counter
-    CodeRegion = 0,
-
-    /// An ExpansionRegion represents a file expansion region that associates
-    /// a source range with the expansion of a virtual source file, such as
-    /// for a macro instantiation or #include file.
-    ExpansionRegion = 1,
-
-    /// A SkippedRegion represents a source range with code that was skipped
-    /// by a preprocessor or similar means.
-    SkippedRegion = 2,
-
-    /// A GapRegion is like a CodeRegion, but its count is only set as the
-    /// line execution count when its the only region in the line.
-    GapRegion = 3,
-
-    /// A BranchRegion represents leaf-level boolean expressions and is
-    /// associated with two counters, each representing the number of times the
-    /// expression evaluates to true or false.
-    BranchRegion = 4,
-
-    /// A DecisionRegion represents a top-level boolean expression and is
-    /// associated with a variable length bitmap index and condition number.
-    MCDCDecisionRegion = 5,
-
-    /// A Branch Region can be extended to include IDs to facilitate MC/DC.
-    MCDCBranchRegion = 6,
-}
-
-mod mcdc {
-    use rustc_middle::mir::coverage::{ConditionInfo, DecisionInfo};
+pub(crate) mod mcdc {
+    use rustc_middle::mir::coverage::{ConditionId, ConditionInfo, DecisionInfo};
 
     /// Must match the layout of `LLVMRustMCDCDecisionParameters`.
     #[repr(C)]
@@ -121,7 +84,6 @@ mod mcdc {
         num_conditions: u16,
     }
 
-    // ConditionId in llvm is `unsigned int` at 18 while `int16_t` at [19](https://github.com/llvm/llvm-project/pull/81257)
     type LLVMConditionId = i16;
 
     /// Must match the layout of `LLVMRustMCDCBranchParameters`.
@@ -132,46 +94,15 @@ mod mcdc {
         condition_ids: [LLVMConditionId; 2],
     }
 
-    #[repr(C)]
-    #[derive(Clone, Copy, Debug)]
-    enum ParameterTag {
-        None = 0,
-        Decision = 1,
-        Branch = 2,
-    }
-    /// Same layout with `LLVMRustMCDCParameters`
-    #[repr(C)]
-    #[derive(Clone, Copy, Debug)]
-    pub(crate) struct Parameters {
-        tag: ParameterTag,
-        decision_params: DecisionParameters,
-        branch_params: BranchParameters,
-    }
-
-    impl Parameters {
-        pub(crate) fn none() -> Self {
-            Self {
-                tag: ParameterTag::None,
-                decision_params: Default::default(),
-                branch_params: Default::default(),
-            }
-        }
-        pub(crate) fn decision(decision_params: DecisionParameters) -> Self {
-            Self { tag: ParameterTag::Decision, decision_params, branch_params: Default::default() }
-        }
-        pub(crate) fn branch(branch_params: BranchParameters) -> Self {
-            Self { tag: ParameterTag::Branch, decision_params: Default::default(), branch_params }
-        }
-    }
-
     impl From<ConditionInfo> for BranchParameters {
         fn from(value: ConditionInfo) -> Self {
+            let to_llvm_cond_id = |cond_id: Option<ConditionId>| {
+                cond_id.and_then(|id| LLVMConditionId::try_from(id.as_usize()).ok()).unwrap_or(-1)
+            };
+            let ConditionInfo { condition_id, true_next_id, false_next_id } = value;
             Self {
-                condition_id: value.condition_id.as_u32() as LLVMConditionId,
-                condition_ids: [
-                    value.false_next_id.as_u32() as LLVMConditionId,
-                    value.true_next_id.as_u32() as LLVMConditionId,
-                ],
+                condition_id: to_llvm_cond_id(Some(condition_id)),
+                condition_ids: [to_llvm_cond_id(false_next_id), to_llvm_cond_id(true_next_id)],
             }
         }
     }
@@ -184,267 +115,68 @@ mod mcdc {
     }
 }
 
-/// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the
-/// coverage map, in accordance with the
-/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
-/// The struct composes fields representing the `Counter` type and value(s) (injected counter
-/// ID, or expression type and operands), the source file (an indirect index into a "filenames
-/// array", encoded separately), and source location (start and end positions of the represented
-/// code region).
+/// A span of source code coordinates to be embedded in coverage metadata.
 ///
-/// Corresponds to struct `llvm::coverage::CounterMappingRegion`.
-///
-/// Must match the layout of `LLVMRustCounterMappingRegion`.
-#[derive(Copy, Clone, Debug)]
+/// Must match the layout of `LLVMRustCoverageSpan`.
+#[derive(Clone, Debug)]
 #[repr(C)]
-pub struct CounterMappingRegion {
-    /// The counter type and type-dependent counter data, if any.
-    counter: Counter,
-
-    /// If the `RegionKind` is a `BranchRegion`, this represents the counter
-    /// for the false branch of the region.
-    false_counter: Counter,
-
-    mcdc_params: mcdc::Parameters,
-    /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the
-    /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes
-    /// that, in turn, are used to look up the filename for this region.
+pub(crate) struct CoverageSpan {
+    /// Local index into the function's local-to-global file ID table.
+    /// The value at that index is itself an index into the coverage filename
+    /// table in the CGU's `__llvm_covmap` section.
     file_id: u32,
 
-    /// If the `RegionKind` is an `ExpansionRegion`, the `expanded_file_id` can be used to find
-    /// the mapping regions created as a result of macro expansion, by checking if their file id
-    /// matches the expanded file id.
-    expanded_file_id: u32,
-
-    /// 1-based starting line of the mapping region.
+    /// 1-based starting line of the source code span.
     start_line: u32,
-
-    /// 1-based starting column of the mapping region.
+    /// 1-based starting column of the source code span.
     start_col: u32,
-
-    /// 1-based ending line of the mapping region.
+    /// 1-based ending line of the source code span.
     end_line: u32,
-
-    /// 1-based ending column of the mapping region. If the high bit is set, the current
-    /// mapping region is a gap area.
+    /// 1-based ending column of the source code span. High bit must be unset.
     end_col: u32,
-
-    kind: RegionKind,
 }
 
-impl CounterMappingRegion {
-    pub(crate) fn from_mapping(
-        mapping_kind: &MappingKind,
-        local_file_id: u32,
-        source_region: &SourceRegion,
-    ) -> Self {
-        let &SourceRegion { file_name: _, start_line, start_col, end_line, end_col } =
-            source_region;
-        match *mapping_kind {
-            MappingKind::Code(term) => Self::code_region(
-                Counter::from_term(term),
-                local_file_id,
-                start_line,
-                start_col,
-                end_line,
-                end_col,
-            ),
-            MappingKind::Branch { true_term, false_term } => Self::branch_region(
-                Counter::from_term(true_term),
-                Counter::from_term(false_term),
-                local_file_id,
-                start_line,
-                start_col,
-                end_line,
-                end_col,
-            ),
-            MappingKind::MCDCBranch { true_term, false_term, mcdc_params } => {
-                Self::mcdc_branch_region(
-                    Counter::from_term(true_term),
-                    Counter::from_term(false_term),
-                    mcdc_params,
-                    local_file_id,
-                    start_line,
-                    start_col,
-                    end_line,
-                    end_col,
-                )
-            }
-            MappingKind::MCDCDecision(decision_info) => Self::decision_region(
-                decision_info,
-                local_file_id,
-                start_line,
-                start_col,
-                end_line,
-                end_col,
-            ),
-        }
-    }
-
-    pub(crate) fn code_region(
-        counter: Counter,
-        file_id: u32,
-        start_line: u32,
-        start_col: u32,
-        end_line: u32,
-        end_col: u32,
-    ) -> Self {
-        Self {
-            counter,
-            false_counter: Counter::ZERO,
-            mcdc_params: mcdc::Parameters::none(),
-            file_id,
-            expanded_file_id: 0,
-            start_line,
-            start_col,
-            end_line,
-            end_col,
-            kind: RegionKind::CodeRegion,
-        }
+impl CoverageSpan {
+    pub(crate) fn from_source_region(file_id: u32, code_region: &SourceRegion) -> Self {
+        let &SourceRegion { file_name: _, start_line, start_col, end_line, end_col } = code_region;
+        // Internally, LLVM uses the high bit of `end_col` to distinguish between
+        // code regions and gap regions, so it can't be used by the column number.
+        assert!(end_col & (1u32 << 31) == 0, "high bit of `end_col` must be unset: {end_col:#X}");
+        Self { file_id, start_line, start_col, end_line, end_col }
     }
+}
 
-    pub(crate) fn branch_region(
-        counter: Counter,
-        false_counter: Counter,
-        file_id: u32,
-        start_line: u32,
-        start_col: u32,
-        end_line: u32,
-        end_col: u32,
-    ) -> Self {
-        Self {
-            counter,
-            false_counter,
-            mcdc_params: mcdc::Parameters::none(),
-            file_id,
-            expanded_file_id: 0,
-            start_line,
-            start_col,
-            end_line,
-            end_col,
-            kind: RegionKind::BranchRegion,
-        }
-    }
-
-    pub(crate) fn mcdc_branch_region(
-        counter: Counter,
-        false_counter: Counter,
-        condition_info: ConditionInfo,
-        file_id: u32,
-        start_line: u32,
-        start_col: u32,
-        end_line: u32,
-        end_col: u32,
-    ) -> Self {
-        Self {
-            counter,
-            false_counter,
-            mcdc_params: mcdc::Parameters::branch(condition_info.into()),
-            file_id,
-            expanded_file_id: 0,
-            start_line,
-            start_col,
-            end_line,
-            end_col,
-            kind: RegionKind::MCDCBranchRegion,
-        }
-    }
-
-    pub(crate) fn decision_region(
-        decision_info: DecisionInfo,
-        file_id: u32,
-        start_line: u32,
-        start_col: u32,
-        end_line: u32,
-        end_col: u32,
-    ) -> Self {
-        let mcdc_params = mcdc::Parameters::decision(decision_info.into());
-
-        Self {
-            counter: Counter::ZERO,
-            false_counter: Counter::ZERO,
-            mcdc_params,
-            file_id,
-            expanded_file_id: 0,
-            start_line,
-            start_col,
-            end_line,
-            end_col,
-            kind: RegionKind::MCDCDecisionRegion,
-        }
-    }
+/// Must match the layout of `LLVMRustCoverageCodeRegion`.
+#[derive(Clone, Debug)]
+#[repr(C)]
+pub(crate) struct CodeRegion {
+    pub(crate) span: CoverageSpan,
+    pub(crate) counter: Counter,
+}
 
-    // This function might be used in the future; the LLVM API is still evolving, as is coverage
-    // support.
-    #[allow(dead_code)]
-    pub(crate) fn expansion_region(
-        file_id: u32,
-        expanded_file_id: u32,
-        start_line: u32,
-        start_col: u32,
-        end_line: u32,
-        end_col: u32,
-    ) -> Self {
-        Self {
-            counter: Counter::ZERO,
-            false_counter: Counter::ZERO,
-            mcdc_params: mcdc::Parameters::none(),
-            file_id,
-            expanded_file_id,
-            start_line,
-            start_col,
-            end_line,
-            end_col,
-            kind: RegionKind::ExpansionRegion,
-        }
-    }
+/// Must match the layout of `LLVMRustCoverageBranchRegion`.
+#[derive(Clone, Debug)]
+#[repr(C)]
+pub(crate) struct BranchRegion {
+    pub(crate) span: CoverageSpan,
+    pub(crate) true_counter: Counter,
+    pub(crate) false_counter: Counter,
+}
 
-    // This function might be used in the future; the LLVM API is still evolving, as is coverage
-    // support.
-    #[allow(dead_code)]
-    pub(crate) fn skipped_region(
-        file_id: u32,
-        start_line: u32,
-        start_col: u32,
-        end_line: u32,
-        end_col: u32,
-    ) -> Self {
-        Self {
-            counter: Counter::ZERO,
-            false_counter: Counter::ZERO,
-            mcdc_params: mcdc::Parameters::none(),
-            file_id,
-            expanded_file_id: 0,
-            start_line,
-            start_col,
-            end_line,
-            end_col,
-            kind: RegionKind::SkippedRegion,
-        }
-    }
+/// Must match the layout of `LLVMRustCoverageMCDCBranchRegion`.
+#[derive(Clone, Debug)]
+#[repr(C)]
+pub(crate) struct MCDCBranchRegion {
+    pub(crate) span: CoverageSpan,
+    pub(crate) true_counter: Counter,
+    pub(crate) false_counter: Counter,
+    pub(crate) mcdc_branch_params: mcdc::BranchParameters,
+}
 
-    // This function might be used in the future; the LLVM API is still evolving, as is coverage
-    // support.
-    #[allow(dead_code)]
-    pub(crate) fn gap_region(
-        counter: Counter,
-        file_id: u32,
-        start_line: u32,
-        start_col: u32,
-        end_line: u32,
-        end_col: u32,
-    ) -> Self {
-        Self {
-            counter,
-            false_counter: Counter::ZERO,
-            mcdc_params: mcdc::Parameters::none(),
-            file_id,
-            expanded_file_id: 0,
-            start_line,
-            start_col,
-            end_line,
-            end_col: (1_u32 << 31) | end_col,
-            kind: RegionKind::GapRegion,
-        }
-    }
+/// Must match the layout of `LLVMRustCoverageMCDCDecisionRegion`.
+#[derive(Clone, Debug)]
+#[repr(C)]
+pub(crate) struct MCDCDecisionRegion {
+    pub(crate) span: CoverageSpan,
+    pub(crate) mcdc_decision_params: mcdc::DecisionParameters,
 }
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index a9f65ee8a93..f6378199fe2 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -1,42 +1,40 @@
+use std::ffi::CString;
+
 use itertools::Itertools as _;
-use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, ConstCodegenMethods};
+use rustc_abi::Align;
+use rustc_codegen_ssa::traits::{
+    BaseTypeCodegenMethods, ConstCodegenMethods, StaticCodegenMethods,
+};
 use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet};
 use rustc_hir::def_id::{DefId, LocalDefId};
 use rustc_index::IndexVec;
+use rustc_middle::mir::coverage::MappingKind;
 use rustc_middle::ty::{self, TyCtxt};
 use rustc_middle::{bug, mir};
-use rustc_span::def_id::DefIdSet;
 use rustc_span::Symbol;
+use rustc_span::def_id::DefIdSet;
+use rustc_target::spec::HasTargetSpec;
 use tracing::debug;
 
 use crate::common::CodegenCx;
-use crate::coverageinfo::ffi::CounterMappingRegion;
+use crate::coverageinfo::ffi;
 use crate::coverageinfo::map_data::{FunctionCoverage, FunctionCoverageCollector};
 use crate::{coverageinfo, llvm};
 
-/// Generates and exports the Coverage Map.
+/// Generates and exports the coverage map, which is embedded in special
+/// linker sections in the final binary.
 ///
-/// Rust Coverage Map generation supports LLVM Coverage Mapping Format versions
-/// 6 and 7 (encoded as 5 and 6 respectively), as described at
-/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/18.0-2024-02-13/llvm/docs/CoverageMappingFormat.rst).
-/// These versions are supported by the LLVM coverage tools (`llvm-profdata` and `llvm-cov`)
-/// distributed in the `llvm-tools-preview` rustup component.
-///
-/// Consequently, Rust's bundled version of Clang also generates Coverage Maps compliant with
-/// the same version. Clang's implementation of Coverage Map generation was referenced when
-/// implementing this Rust version, and though the format documentation is very explicit and
-/// detailed, some undocumented details in Clang's implementation (that may or may not be important)
-/// were also replicated for Rust's Coverage Map.
+/// Those sections are then read and understood by LLVM's `llvm-cov` tool,
+/// which is distributed in the `llvm-tools` rustup component.
 pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) {
     let tcx = cx.tcx;
 
     // Ensure that LLVM is using a version of the coverage mapping format that
     // agrees with our Rust-side code. Expected versions (encoded as n-1) are:
-    // - `CovMapVersion::Version6` (5) used by LLVM 13-17
-    // - `CovMapVersion::Version7` (6) used by LLVM 18
+    // - `CovMapVersion::Version7` (6) used by LLVM 18-19
     let covmap_version = {
         let llvm_covmap_version = coverageinfo::mapping_version();
-        let expected_versions = 5..=6;
+        let expected_versions = 6..=6;
         assert!(
             expected_versions.contains(&llvm_covmap_version),
             "Coverage mapping version exposed by `llvm-wrapper` is out of sync; \
@@ -56,11 +54,11 @@ pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) {
         add_unused_functions(cx);
     }
 
-    let function_coverage_map = match cx.coverage_context() {
-        Some(ctx) => ctx.take_function_coverage_map(),
+    // FIXME(#132395): Can this be none even when coverage is enabled?
+    let function_coverage_map = match cx.coverage_cx {
+        Some(ref cx) => cx.take_function_coverage_map(),
         None => return,
     };
-
     if function_coverage_map.is_empty() {
         // This module has no functions with coverage instrumentation
         return;
@@ -84,11 +82,9 @@ pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) {
 
     // Generate the coverage map header, which contains the filenames used by
     // this CGU's coverage mappings, and store it in a well-known global.
-    let cov_data_val = generate_coverage_map(cx, covmap_version, filenames_size, filenames_val);
-    coverageinfo::save_cov_data_to_mod(cx, cov_data_val);
+    generate_covmap_record(cx, covmap_version, filenames_size, filenames_val);
 
     let mut unused_function_names = Vec::new();
-    let covfun_section_name = coverageinfo::covfun_section_name(cx);
 
     // Encode coverage mappings and generate function records
     for (instance, function_coverage) in function_coverage_entries {
@@ -117,9 +113,8 @@ pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) {
             unused_function_names.push(mangled_function_name);
         }
 
-        save_function_record(
+        generate_covfun_record(
             cx,
-            &covfun_section_name,
             mangled_function_name,
             source_hash,
             filenames_ref,
@@ -141,7 +136,7 @@ pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) {
             .collect::<Vec<_>>();
         let initializer = cx.const_array(cx.type_ptr(), &name_globals);
 
-        let array = llvm::add_global(cx.llmod, cx.val_ty(initializer), "__llvm_coverage_names");
+        let array = llvm::add_global(cx.llmod, cx.val_ty(initializer), c"__llvm_coverage_names");
         llvm::set_global_constant(array, true);
         llvm::set_linkage(array, llvm::Linkage::InternalLinkage);
         llvm::set_initializer(array, initializer);
@@ -183,8 +178,8 @@ impl GlobalFileTable {
         // Since rustc generates coverage maps with relative paths, the
         // compilation directory can be combined with the relative paths
         // to get absolute paths, if needed.
-        use rustc_session::config::RemapPathScopeComponents;
         use rustc_session::RemapFileNameExt;
+        use rustc_session::config::RemapPathScopeComponents;
         let working_dir: &str = &tcx
             .sess
             .opts
@@ -244,7 +239,10 @@ fn encode_mappings_for_function(
     let expressions = function_coverage.counter_expressions().collect::<Vec<_>>();
 
     let mut virtual_file_mapping = VirtualFileMapping::default();
-    let mut mapping_regions = Vec::with_capacity(counter_regions.len());
+    let mut code_regions = vec![];
+    let mut branch_regions = vec![];
+    let mut mcdc_branch_regions = vec![];
+    let mut mcdc_decision_regions = vec![];
 
     // Group mappings into runs with the same filename, preserving the order
     // yielded by `FunctionCoverage`.
@@ -264,11 +262,36 @@ fn encode_mappings_for_function(
         // form suitable for FFI.
         for (mapping_kind, region) in counter_regions_for_file {
             debug!("Adding counter {mapping_kind:?} to map for {region:?}");
-            mapping_regions.push(CounterMappingRegion::from_mapping(
-                &mapping_kind,
-                local_file_id.as_u32(),
-                region,
-            ));
+            let span = ffi::CoverageSpan::from_source_region(local_file_id.as_u32(), region);
+            match mapping_kind {
+                MappingKind::Code(term) => {
+                    code_regions
+                        .push(ffi::CodeRegion { span, counter: ffi::Counter::from_term(term) });
+                }
+                MappingKind::Branch { true_term, false_term } => {
+                    branch_regions.push(ffi::BranchRegion {
+                        span,
+                        true_counter: ffi::Counter::from_term(true_term),
+                        false_counter: ffi::Counter::from_term(false_term),
+                    });
+                }
+                MappingKind::MCDCBranch { true_term, false_term, mcdc_params } => {
+                    mcdc_branch_regions.push(ffi::MCDCBranchRegion {
+                        span,
+                        true_counter: ffi::Counter::from_term(true_term),
+                        false_counter: ffi::Counter::from_term(false_term),
+                        mcdc_branch_params: ffi::mcdc::BranchParameters::from(mcdc_params),
+                    });
+                }
+                MappingKind::MCDCDecision(mcdc_decision_params) => {
+                    mcdc_decision_regions.push(ffi::MCDCDecisionRegion {
+                        span,
+                        mcdc_decision_params: ffi::mcdc::DecisionParameters::from(
+                            mcdc_decision_params,
+                        ),
+                    });
+                }
+            }
         }
     }
 
@@ -277,21 +300,24 @@ fn encode_mappings_for_function(
         coverageinfo::write_mapping_to_buffer(
             virtual_file_mapping.into_vec(),
             expressions,
-            mapping_regions,
+            &code_regions,
+            &branch_regions,
+            &mcdc_branch_regions,
+            &mcdc_decision_regions,
             buffer,
         );
     })
 }
 
-/// Construct coverage map header and the array of function records, and combine them into the
-/// coverage map. Save the coverage map data into the LLVM IR as a static global using a
-/// specific, well-known section and name.
-fn generate_coverage_map<'ll>(
+/// Generates the contents of the covmap record for this CGU, which mostly
+/// consists of a header and a list of filenames. The record is then stored
+/// as a global variable in the `__llvm_covmap` section.
+fn generate_covmap_record<'ll>(
     cx: &CodegenCx<'ll, '_>,
     version: u32,
     filenames_size: usize,
     filenames_val: &'ll llvm::Value,
-) -> &'ll llvm::Value {
+) {
     debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
 
     // Create the coverage data header (Note, fields 0 and 2 are now always zero,
@@ -306,15 +332,37 @@ fn generate_coverage_map<'ll>(
     );
 
     // Create the complete LLVM coverage data value to add to the LLVM IR
-    cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
+    let covmap_data =
+        cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false);
+
+    let covmap_var_name = CString::new(llvm::build_byte_buffer(|s| unsafe {
+        llvm::LLVMRustCoverageWriteMappingVarNameToString(s);
+    }))
+    .unwrap();
+    debug!("covmap var name: {:?}", covmap_var_name);
+
+    let covmap_section_name = CString::new(llvm::build_byte_buffer(|s| unsafe {
+        llvm::LLVMRustCoverageWriteMapSectionNameToString(cx.llmod, s);
+    }))
+    .expect("covmap section name should not contain NUL");
+    debug!("covmap section name: {:?}", covmap_section_name);
+
+    let llglobal = llvm::add_global(cx.llmod, cx.val_ty(covmap_data), &covmap_var_name);
+    llvm::set_initializer(llglobal, covmap_data);
+    llvm::set_global_constant(llglobal, true);
+    llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
+    llvm::set_section(llglobal, &covmap_section_name);
+    // LLVM's coverage mapping format specifies 8-byte alignment for items in this section.
+    // <https://llvm.org/docs/CoverageMappingFormat.html>
+    llvm::set_alignment(llglobal, Align::EIGHT);
+    cx.add_used_global(llglobal);
 }
 
-/// Construct a function record and combine it with the function's coverage mapping data.
-/// Save the function record into the LLVM IR as a static global using a
-/// specific, well-known section and name.
-fn save_function_record(
+/// Generates the contents of the covfun record for this function, which
+/// contains the function's coverage mapping data. The record is then stored
+/// as a global variable in the `__llvm_covfun` section.
+fn generate_covfun_record(
     cx: &CodegenCx<'_, '_>,
-    covfun_section_name: &str,
     mangled_function_name: &str,
     source_hash: u64,
     filenames_ref: u64,
@@ -341,13 +389,28 @@ fn save_function_record(
         /*packed=*/ true,
     );
 
-    coverageinfo::save_func_record_to_mod(
-        cx,
-        covfun_section_name,
-        func_name_hash,
-        func_record_val,
-        is_used,
-    );
+    // Choose a variable name to hold this function's covfun data.
+    // Functions that are used have a suffix ("u") to distinguish them from
+    // unused copies of the same function (from different CGUs), so that if a
+    // linker sees both it won't discard the used copy's data.
+    let func_record_var_name =
+        CString::new(format!("__covrec_{:X}{}", func_name_hash, if is_used { "u" } else { "" }))
+            .unwrap();
+    debug!("function record var name: {:?}", func_record_var_name);
+
+    let llglobal = llvm::add_global(cx.llmod, cx.val_ty(func_record_val), &func_record_var_name);
+    llvm::set_initializer(llglobal, func_record_val);
+    llvm::set_global_constant(llglobal, true);
+    llvm::set_linkage(llglobal, llvm::Linkage::LinkOnceODRLinkage);
+    llvm::set_visibility(llglobal, llvm::Visibility::Hidden);
+    llvm::set_section(llglobal, cx.covfun_section_name());
+    // LLVM's coverage mapping format specifies 8-byte alignment for items in this section.
+    // <https://llvm.org/docs/CoverageMappingFormat.html>
+    llvm::set_alignment(llglobal, Align::EIGHT);
+    if cx.target_spec().supports_comdat() {
+        llvm::set_comdat(cx.llmod, llglobal, &func_record_var_name);
+    }
+    cx.add_used_global(llglobal);
 }
 
 /// Each CGU will normally only emit coverage metadata for the functions that it actually generates.
@@ -479,9 +542,5 @@ fn add_unused_function_coverage<'tcx>(
     // zero, because none of its counters/expressions are marked as seen.
     let function_coverage = FunctionCoverageCollector::unused(instance, function_coverage_info);
 
-    if let Some(coverage_context) = cx.coverage_context() {
-        coverage_context.function_coverage_map.borrow_mut().insert(instance, function_coverage);
-    } else {
-        bug!("Could not get the `coverage_context`");
-    }
+    cx.coverage_cx().function_coverage_map.borrow_mut().insert(instance, function_coverage);
 }
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index 2ca5fc3300b..aaba0684c12 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -1,22 +1,20 @@
-use std::cell::RefCell;
+use std::cell::{OnceCell, RefCell};
+use std::ffi::{CStr, CString};
 
 use libc::c_uint;
+use rustc_abi::Size;
 use rustc_codegen_ssa::traits::{
-    BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods, CoverageInfoBuilderMethods,
-    MiscCodegenMethods, StaticCodegenMethods,
+    BuilderMethods, ConstCodegenMethods, CoverageInfoBuilderMethods, MiscCodegenMethods,
 };
 use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
 use rustc_llvm::RustString;
-use rustc_middle::bug;
 use rustc_middle::mir::coverage::CoverageKind;
-use rustc_middle::ty::layout::HasTyCtxt;
 use rustc_middle::ty::Instance;
-use rustc_target::abi::{Align, Size};
+use rustc_middle::ty::layout::HasTyCtxt;
 use tracing::{debug, instrument};
 
 use crate::builder::Builder;
-use crate::common::CodegenCx;
-use crate::coverageinfo::ffi::{CounterExpression, CounterMappingRegion};
+use crate::common::{AsCCharPtr, CodegenCx};
 use crate::coverageinfo::map_data::FunctionCoverageCollector;
 use crate::llvm;
 
@@ -31,6 +29,8 @@ pub(crate) struct CrateCoverageContext<'ll, 'tcx> {
         RefCell<FxIndexMap<Instance<'tcx>, FunctionCoverageCollector<'tcx>>>,
     pub(crate) pgo_func_name_var_map: RefCell<FxHashMap<Instance<'tcx>, &'ll llvm::Value>>,
     pub(crate) mcdc_condition_bitmap_map: RefCell<FxHashMap<Instance<'tcx>, Vec<&'ll llvm::Value>>>,
+
+    covfun_section_name: OnceCell<CString>,
 }
 
 impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
@@ -39,6 +39,7 @@ impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
             function_coverage_map: Default::default(),
             pgo_func_name_var_map: Default::default(),
             mcdc_condition_bitmap_map: Default::default(),
+            covfun_section_name: Default::default(),
         }
     }
 
@@ -48,11 +49,10 @@ impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
         self.function_coverage_map.replace(FxIndexMap::default())
     }
 
-    /// LLVM use a temp value to record evaluated mcdc test vector of each decision, which is called condition bitmap.
-    /// In order to handle nested decisions, several condition bitmaps can be
-    /// allocated for a function body.
-    /// These values are named `mcdc.addr.{i}` and are a 32-bit integers.
-    /// They respectively hold the condition bitmaps for decisions with a depth of `i`.
+    /// LLVM use a temp value to record evaluated mcdc test vector of each decision, which is
+    /// called condition bitmap. In order to handle nested decisions, several condition bitmaps can
+    /// be allocated for a function body. These values are named `mcdc.addr.{i}` and are a 32-bit
+    /// integers. They respectively hold the condition bitmaps for decisions with a depth of `i`.
     fn try_get_mcdc_condition_bitmap(
         &self,
         instance: &Instance<'tcx>,
@@ -66,27 +66,38 @@ impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
     }
 }
 
-// These methods used to be part of trait `CoverageInfoMethods`, which no longer
-// exists after most coverage code was moved out of SSA.
 impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
     pub(crate) fn coverageinfo_finalize(&self) {
         mapgen::finalize(self)
     }
 
+    /// Returns the section name to use when embedding per-function coverage information
+    /// in the object file, according to the target's object file format. LLVM's coverage
+    /// tools use information from this section when producing coverage reports.
+    ///
+    /// Typical values are:
+    /// - `__llvm_covfun` on Linux
+    /// - `__LLVM_COV,__llvm_covfun` on macOS (includes `__LLVM_COV,` segment prefix)
+    /// - `.lcovfun$M` on Windows (includes `$M` sorting suffix)
+    fn covfun_section_name(&self) -> &CStr {
+        self.coverage_cx().covfun_section_name.get_or_init(|| {
+            CString::new(llvm::build_byte_buffer(|s| unsafe {
+                llvm::LLVMRustCoverageWriteFuncSectionNameToString(self.llmod, s);
+            }))
+            .expect("covfun section name should not contain NUL")
+        })
+    }
+
     /// For LLVM codegen, returns a function-specific `Value` for a global
     /// string, to hold the function name passed to LLVM intrinsic
     /// `instrprof.increment()`. The `Value` is only created once per instance.
     /// Multiple invocations with the same instance return the same `Value`.
     fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> &'ll llvm::Value {
-        if let Some(coverage_context) = self.coverage_context() {
-            debug!("getting pgo_func_name_var for instance={:?}", instance);
-            let mut pgo_func_name_var_map = coverage_context.pgo_func_name_var_map.borrow_mut();
-            pgo_func_name_var_map
-                .entry(instance)
-                .or_insert_with(|| create_pgo_func_name_var(self, instance))
-        } else {
-            bug!("Could not get the `coverage_context`");
-        }
+        debug!("getting pgo_func_name_var for instance={:?}", instance);
+        let mut pgo_func_name_var_map = self.coverage_cx().pgo_func_name_var_map.borrow_mut();
+        pgo_func_name_var_map
+            .entry(instance)
+            .or_insert_with(|| create_pgo_func_name_var(self, instance))
     }
 }
 
@@ -99,14 +110,14 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
         };
 
         // If there are no MC/DC bitmaps to set up, return immediately.
-        if function_coverage_info.mcdc_bitmap_bytes == 0 {
+        if function_coverage_info.mcdc_bitmap_bits == 0 {
             return;
         }
 
         let fn_name = self.get_pgo_func_name_var(instance);
         let hash = self.const_u64(function_coverage_info.function_source_hash);
-        let bitmap_bytes = self.const_u32(function_coverage_info.mcdc_bitmap_bytes);
-        self.mcdc_parameters(fn_name, hash, bitmap_bytes);
+        let bitmap_bits = self.const_u32(function_coverage_info.mcdc_bitmap_bits as u32);
+        self.mcdc_parameters(fn_name, hash, bitmap_bits);
 
         // Create pointers named `mcdc.addr.{i}` to stack-allocated condition bitmaps.
         let mut cond_bitmaps = vec![];
@@ -120,11 +131,7 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
             cond_bitmaps.push(cond_bitmap);
         }
 
-        self.coverage_context()
-            .expect("always present when coverage is enabled")
-            .mcdc_condition_bitmap_map
-            .borrow_mut()
-            .insert(instance, cond_bitmaps);
+        self.coverage_cx().mcdc_condition_bitmap_map.borrow_mut().insert(instance, cond_bitmaps);
     }
 
     #[instrument(level = "debug", skip(self))]
@@ -145,8 +152,12 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
             return;
         };
 
-        let Some(coverage_context) = bx.coverage_context() else { return };
-        let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+        // FIXME(#132395): Unwrapping `coverage_cx` here has led to ICEs in the
+        // wild, so keep this early-return until we understand why.
+        let mut coverage_map = match bx.coverage_cx {
+            Some(ref cx) => cx.function_coverage_map.borrow_mut(),
+            None => return,
+        };
         let func_coverage = coverage_map
             .entry(instance)
             .or_insert_with(|| FunctionCoverageCollector::new(instance, function_coverage_info));
@@ -157,8 +168,8 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
             ),
             CoverageKind::CounterIncrement { id } => {
                 func_coverage.mark_counter_id_seen(id);
-                // We need to explicitly drop the `RefMut` before calling into `instrprof_increment`,
-                // as that needs an exclusive borrow.
+                // We need to explicitly drop the `RefMut` before calling into
+                // `instrprof_increment`, as that needs an exclusive borrow.
                 drop(coverage_map);
 
                 // The number of counters passed to `llvm.instrprof.increment` might
@@ -186,35 +197,30 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
             CoverageKind::ExpressionUsed { id } => {
                 func_coverage.mark_expression_id_seen(id);
             }
-            CoverageKind::CondBitmapUpdate { id, value, decision_depth } => {
+            CoverageKind::CondBitmapUpdate { index, decision_depth } => {
                 drop(coverage_map);
-                assert_ne!(
-                    id.as_u32(),
-                    0,
-                    "ConditionId of evaluated conditions should never be zero"
-                );
-                let cond_bitmap = coverage_context
+                let cond_bitmap = bx
+                    .coverage_cx()
                     .try_get_mcdc_condition_bitmap(&instance, decision_depth)
                     .expect("mcdc cond bitmap should have been allocated for updating");
-                let cond_loc = bx.const_i32(id.as_u32() as i32 - 1);
-                let bool_value = bx.const_bool(value);
-                let fn_name = bx.get_pgo_func_name_var(instance);
-                let hash = bx.const_u64(function_coverage_info.function_source_hash);
-                bx.mcdc_condbitmap_update(fn_name, hash, cond_loc, cond_bitmap, bool_value);
+                let cond_index = bx.const_i32(index as i32);
+                bx.mcdc_condbitmap_update(cond_index, cond_bitmap);
             }
             CoverageKind::TestVectorBitmapUpdate { bitmap_idx, decision_depth } => {
                 drop(coverage_map);
-                let cond_bitmap = coverage_context
+                let cond_bitmap = bx.coverage_cx()
                                     .try_get_mcdc_condition_bitmap(&instance, decision_depth)
                                     .expect("mcdc cond bitmap should have been allocated for merging into the global bitmap");
-                let bitmap_bytes = function_coverage_info.mcdc_bitmap_bytes;
-                assert!(bitmap_idx < bitmap_bytes, "bitmap index of the decision out of range");
+                assert!(
+                    bitmap_idx as usize <= function_coverage_info.mcdc_bitmap_bits,
+                    "bitmap index of the decision out of range"
+                );
 
                 let fn_name = bx.get_pgo_func_name_var(instance);
                 let hash = bx.const_u64(function_coverage_info.function_source_hash);
-                let bitmap_bytes = bx.const_u32(bitmap_bytes);
                 let bitmap_index = bx.const_u32(bitmap_idx);
-                bx.mcdc_tvbitmap_update(fn_name, hash, bitmap_bytes, bitmap_index, cond_bitmap);
+                bx.mcdc_tvbitmap_update(fn_name, hash, bitmap_index, cond_bitmap);
+                bx.mcdc_condbitmap_reset(cond_bitmap);
             }
         }
     }
@@ -235,7 +241,7 @@ fn create_pgo_func_name_var<'ll, 'tcx>(
     unsafe {
         llvm::LLVMRustCoverageCreatePGOFuncNameVar(
             llfn,
-            mangled_fn_name.as_ptr().cast(),
+            mangled_fn_name.as_c_char_ptr(),
             mangled_fn_name.len(),
         )
     }
@@ -247,7 +253,7 @@ pub(crate) fn write_filenames_section_to_buffer<'a>(
 ) {
     let (pointers, lengths) = filenames
         .into_iter()
-        .map(|s: &str| (s.as_ptr().cast(), s.len()))
+        .map(|s: &str| (s.as_c_char_ptr(), s.len()))
         .unzip::<_, _, Vec<_>, Vec<_>>();
 
     unsafe {
@@ -263,8 +269,11 @@ pub(crate) fn write_filenames_section_to_buffer<'a>(
 
 pub(crate) fn write_mapping_to_buffer(
     virtual_file_mapping: Vec<u32>,
-    expressions: Vec<CounterExpression>,
-    mapping_regions: Vec<CounterMappingRegion>,
+    expressions: Vec<ffi::CounterExpression>,
+    code_regions: &[ffi::CodeRegion],
+    branch_regions: &[ffi::BranchRegion],
+    mcdc_branch_regions: &[ffi::MCDCBranchRegion],
+    mcdc_decision_regions: &[ffi::MCDCDecisionRegion],
     buffer: &RustString,
 ) {
     unsafe {
@@ -273,93 +282,23 @@ pub(crate) fn write_mapping_to_buffer(
             virtual_file_mapping.len() as c_uint,
             expressions.as_ptr(),
             expressions.len() as c_uint,
-            mapping_regions.as_ptr(),
-            mapping_regions.len() as c_uint,
+            code_regions.as_ptr(),
+            code_regions.len() as c_uint,
+            branch_regions.as_ptr(),
+            branch_regions.len() as c_uint,
+            mcdc_branch_regions.as_ptr(),
+            mcdc_branch_regions.len() as c_uint,
+            mcdc_decision_regions.as_ptr(),
+            mcdc_decision_regions.len() as c_uint,
             buffer,
         );
     }
 }
 
 pub(crate) fn hash_bytes(bytes: &[u8]) -> u64 {
-    unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_ptr().cast(), bytes.len()) }
+    unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_c_char_ptr(), bytes.len()) }
 }
 
 pub(crate) fn mapping_version() -> u32 {
     unsafe { llvm::LLVMRustCoverageMappingVersion() }
 }
-
-pub(crate) fn save_cov_data_to_mod<'ll, 'tcx>(
-    cx: &CodegenCx<'ll, 'tcx>,
-    cov_data_val: &'ll llvm::Value,
-) {
-    let covmap_var_name = llvm::build_string(|s| unsafe {
-        llvm::LLVMRustCoverageWriteMappingVarNameToString(s);
-    })
-    .expect("Rust Coverage Mapping var name failed UTF-8 conversion");
-    debug!("covmap var name: {:?}", covmap_var_name);
-
-    let covmap_section_name = llvm::build_string(|s| unsafe {
-        llvm::LLVMRustCoverageWriteMapSectionNameToString(cx.llmod, s);
-    })
-    .expect("Rust Coverage section name failed UTF-8 conversion");
-    debug!("covmap section name: {:?}", covmap_section_name);
-
-    let llglobal = llvm::add_global(cx.llmod, cx.val_ty(cov_data_val), &covmap_var_name);
-    llvm::set_initializer(llglobal, cov_data_val);
-    llvm::set_global_constant(llglobal, true);
-    llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
-    llvm::set_section(llglobal, &covmap_section_name);
-    // LLVM's coverage mapping format specifies 8-byte alignment for items in this section.
-    llvm::set_alignment(llglobal, Align::EIGHT);
-    cx.add_used_global(llglobal);
-}
-
-pub(crate) fn save_func_record_to_mod<'ll, 'tcx>(
-    cx: &CodegenCx<'ll, 'tcx>,
-    covfun_section_name: &str,
-    func_name_hash: u64,
-    func_record_val: &'ll llvm::Value,
-    is_used: bool,
-) {
-    // Assign a name to the function record. This is used to merge duplicates.
-    //
-    // In LLVM, a "translation unit" (effectively, a `Crate` in Rust) can describe functions that
-    // are included-but-not-used. If (or when) Rust generates functions that are
-    // included-but-not-used, note that a dummy description for a function included-but-not-used
-    // in a Crate can be replaced by full description provided by a different Crate. The two kinds
-    // of descriptions play distinct roles in LLVM IR; therefore, assign them different names (by
-    // appending "u" to the end of the function record var name, to prevent `linkonce_odr` merging.
-    let func_record_var_name =
-        format!("__covrec_{:X}{}", func_name_hash, if is_used { "u" } else { "" });
-    debug!("function record var name: {:?}", func_record_var_name);
-    debug!("function record section name: {:?}", covfun_section_name);
-
-    let llglobal = llvm::add_global(cx.llmod, cx.val_ty(func_record_val), &func_record_var_name);
-    llvm::set_initializer(llglobal, func_record_val);
-    llvm::set_global_constant(llglobal, true);
-    llvm::set_linkage(llglobal, llvm::Linkage::LinkOnceODRLinkage);
-    llvm::set_visibility(llglobal, llvm::Visibility::Hidden);
-    llvm::set_section(llglobal, covfun_section_name);
-    // LLVM's coverage mapping format specifies 8-byte alignment for items in this section.
-    llvm::set_alignment(llglobal, Align::EIGHT);
-    llvm::set_comdat(cx.llmod, llglobal, &func_record_var_name);
-    cx.add_used_global(llglobal);
-}
-
-/// Returns the section name string to pass through to the linker when embedding
-/// per-function coverage information in the object file, according to the target
-/// platform's object file format.
-///
-/// LLVM's coverage tools read coverage mapping details from this section when
-/// producing coverage reports.
-///
-/// Typical values are:
-/// - `__llvm_covfun` on Linux
-/// - `__LLVM_COV,__llvm_covfun` on macOS (includes `__LLVM_COV,` segment prefix)
-/// - `.lcovfun$M` on Windows (includes `$M` sorting suffix)
-pub(crate) fn covfun_section_name(cx: &CodegenCx<'_, '_>) -> String {
-    llvm::build_string(|s| unsafe {
-        llvm::LLVMRustCoverageWriteFuncSectionNameToString(cx.llmod, s);
-    })
-    .expect("Rust Coverage function record section name failed UTF-8 conversion")
-}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index c3087d8ec30..ac6c2fb1b83 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -1,7 +1,7 @@
 use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext};
 use rustc_codegen_ssa::traits::*;
-use rustc_index::bit_set::BitSet;
 use rustc_index::Idx;
+use rustc_index::bit_set::BitSet;
 use rustc_middle::mir::{Body, SourceScope};
 use rustc_middle::ty::layout::FnAbiOf;
 use rustc_middle::ty::{self, Instance};
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
index dc228e94811..aef8642f199 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -44,7 +44,8 @@ pub(crate) fn get_or_insert_gdb_debug_scripts_section_global<'ll>(
         // Add the pretty printers for the standard library first.
         section_contents.extend_from_slice(b"\x01gdb_load_rust_pretty_printers.py\0");
 
-        // Next, add the pretty printers that were specified via the `#[debugger_visualizer]` attribute.
+        // Next, add the pretty printers that were specified via the `#[debugger_visualizer]`
+        // attribute.
         let visualizers = collect_debugger_visualizers_transitive(
             cx.tcx,
             DebuggerVisualizerType::GdbPrettyPrinter,
@@ -71,11 +72,11 @@ pub(crate) fn get_or_insert_gdb_debug_scripts_section_global<'ll>(
             let section_var = cx
                 .define_global(section_var_name, llvm_type)
                 .unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name));
-            llvm::LLVMSetSection(section_var, c".debug_gdb_scripts".as_ptr());
+            llvm::set_section(section_var, c".debug_gdb_scripts");
             llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents));
             llvm::LLVMSetGlobalConstant(section_var, llvm::True);
             llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global);
-            llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
+            llvm::set_linkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
             // This should make sure that the whole section is not larger than
             // the string it contains. Otherwise we get a warning from GDB.
             llvm::LLVMSetAlignment(section_var, 1);
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index d231b103964..151923a3bd2 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -5,9 +5,9 @@ use std::path::{Path, PathBuf};
 use std::{iter, ptr};
 
 use libc::{c_char, c_longlong, c_uint};
-use rustc_codegen_ssa::debuginfo::type_names::{cpp_like_debuginfo, VTableNameKind};
+use rustc_abi::{Align, Size};
+use rustc_codegen_ssa::debuginfo::type_names::{VTableNameKind, cpp_like_debuginfo};
 use rustc_codegen_ssa::traits::*;
-use rustc_fs_util::path_to_c_string;
 use rustc_hir::def::{CtorKind, DefKind};
 use rustc_hir::def_id::{DefId, LOCAL_CRATE};
 use rustc_middle::bug;
@@ -18,23 +18,22 @@ use rustc_middle::ty::{
 };
 use rustc_session::config::{self, DebugInfo, Lto};
 use rustc_span::symbol::Symbol;
-use rustc_span::{hygiene, FileName, FileNameDisplayPreference, SourceFile, DUMMY_SP};
+use rustc_span::{DUMMY_SP, FileName, FileNameDisplayPreference, SourceFile, hygiene};
 use rustc_symbol_mangling::typeid_for_trait_ref;
-use rustc_target::abi::{Align, Size};
 use rustc_target::spec::DebuginfoKind;
 use smallvec::smallvec;
 use tracing::{debug, instrument};
 
 use self::type_map::{DINodeCreationResult, Stub, UniqueTypeId};
+use super::CodegenUnitDebugContext;
 use super::namespace::mangled_name_of_instance;
 use super::type_names::{compute_debuginfo_type_name, compute_debuginfo_vtable_name};
 use super::utils::{
-    create_DIArray, debug_context, get_namespace_for_item, is_node_local_to_unit, DIB,
+    DIB, create_DIArray, debug_context, get_namespace_for_item, is_node_local_to_unit,
 };
-use super::CodegenUnitDebugContext;
-use crate::common::CodegenCx;
+use crate::common::{AsCCharPtr, CodegenCx};
 use crate::debuginfo::metadata::type_map::build_type_with_children;
-use crate::debuginfo::utils::{fat_pointer_kind, FatPtrKind};
+use crate::debuginfo::utils::{WidePtrKind, wide_pointer_kind};
 use crate::llvm::debuginfo::{
     DIDescriptor, DIFile, DIFlags, DILexicalBlock, DIScope, DIType, DebugEmissionKind,
     DebugNameTableKind,
@@ -125,7 +124,9 @@ fn build_fixed_size_array_di_node<'ll, 'tcx>(
 
     let (size, align) = cx.size_and_align_of(array_type);
 
-    let upper_bound = len.eval_target_usize(cx.tcx, ty::ParamEnv::reveal_all()) as c_longlong;
+    let upper_bound = len
+        .try_to_target_usize(cx.tcx)
+        .expect("expected monomorphic const in codegen") as c_longlong;
 
     let subrange =
         unsafe { Some(llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)) };
@@ -159,7 +160,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
     unique_type_id: UniqueTypeId<'tcx>,
 ) -> DINodeCreationResult<'ll> {
     // The debuginfo generated by this function is only valid if `ptr_type` is really just
-    // a (fat) pointer. Make sure it is not called for e.g. `Box<T, NonZSTAllocator>`.
+    // a (wide) pointer. Make sure it is not called for e.g. `Box<T, NonZSTAllocator>`.
     assert_eq!(
         cx.size_and_align_of(ptr_type),
         cx.size_and_align_of(Ty::new_mut_ptr(cx.tcx, pointee_type))
@@ -172,7 +173,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
     let data_layout = &cx.tcx.data_layout;
     let ptr_type_debuginfo_name = compute_debuginfo_type_name(cx.tcx, ptr_type, true);
 
-    match fat_pointer_kind(cx, pointee_type) {
+    match wide_pointer_kind(cx, pointee_type) {
         None => {
             // This is a thin pointer. Create a regular pointer type and give it the correct name.
             assert_eq!(
@@ -188,14 +189,14 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
                     data_layout.pointer_size.bits(),
                     data_layout.pointer_align.abi.bits() as u32,
                     0, // Ignore DWARF address space.
-                    ptr_type_debuginfo_name.as_ptr().cast(),
+                    ptr_type_debuginfo_name.as_c_char_ptr(),
                     ptr_type_debuginfo_name.len(),
                 )
             };
 
             DINodeCreationResult { di_node, already_stored_in_typemap: false }
         }
-        Some(fat_pointer_kind) => {
+        Some(wide_pointer_kind) => {
             type_map::build_type_with_children(
                 cx,
                 type_map::stub(
@@ -208,7 +209,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
                     DIFlags::FlagZero,
                 ),
                 |cx, owner| {
-                    // FIXME: If this fat pointer is a `Box` then we don't want to use its
+                    // FIXME: If this wide pointer is a `Box` then we don't want to use its
                     //        type layout and instead use the layout of the raw pointer inside
                     //        of it.
                     //        The proper way to handle this is to not treat Box as a pointer
@@ -216,24 +217,25 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
                     //        need to make sure that we don't break existing debuginfo consumers
                     //        by doing that (at least not without a warning period).
                     let layout_type = if ptr_type.is_box() {
-                        // The assertion at the start of this function ensures we have a ZST allocator.
-                        // We'll make debuginfo "skip" all ZST allocators, not just the default allocator.
+                        // The assertion at the start of this function ensures we have a ZST
+                        // allocator. We'll make debuginfo "skip" all ZST allocators, not just the
+                        // default allocator.
                         Ty::new_mut_ptr(cx.tcx, pointee_type)
                     } else {
                         ptr_type
                     };
 
                     let layout = cx.layout_of(layout_type);
-                    let addr_field = layout.field(cx, abi::FAT_PTR_ADDR);
-                    let extra_field = layout.field(cx, abi::FAT_PTR_EXTRA);
+                    let addr_field = layout.field(cx, abi::WIDE_PTR_ADDR);
+                    let extra_field = layout.field(cx, abi::WIDE_PTR_EXTRA);
 
-                    let (addr_field_name, extra_field_name) = match fat_pointer_kind {
-                        FatPtrKind::Dyn => ("pointer", "vtable"),
-                        FatPtrKind::Slice => ("data_ptr", "length"),
+                    let (addr_field_name, extra_field_name) = match wide_pointer_kind {
+                        WidePtrKind::Dyn => ("pointer", "vtable"),
+                        WidePtrKind::Slice => ("data_ptr", "length"),
                     };
 
-                    assert_eq!(abi::FAT_PTR_ADDR, 0);
-                    assert_eq!(abi::FAT_PTR_EXTRA, 1);
+                    assert_eq!(abi::WIDE_PTR_ADDR, 0);
+                    assert_eq!(abi::WIDE_PTR_EXTRA, 1);
 
                     // The data pointer type is a regular, thin pointer, regardless of whether this
                     // is a slice or a trait object.
@@ -255,7 +257,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
                             owner,
                             addr_field_name,
                             (addr_field.size, addr_field.align.abi),
-                            layout.fields.offset(abi::FAT_PTR_ADDR),
+                            layout.fields.offset(abi::WIDE_PTR_ADDR),
                             DIFlags::FlagZero,
                             data_ptr_type_di_node,
                         ),
@@ -264,7 +266,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
                             owner,
                             extra_field_name,
                             (extra_field.size, extra_field.align.abi),
-                            layout.fields.offset(abi::FAT_PTR_EXTRA),
+                            layout.fields.offset(abi::WIDE_PTR_EXTRA),
                             DIFlags::FlagZero,
                             type_di_node(cx, extra_field.ty),
                         ),
@@ -280,8 +282,7 @@ fn build_subroutine_type_di_node<'ll, 'tcx>(
     cx: &CodegenCx<'ll, 'tcx>,
     unique_type_id: UniqueTypeId<'tcx>,
 ) -> DINodeCreationResult<'ll> {
-    // It's possible to create a self-referential
-    // type in Rust by using 'impl trait':
+    // It's possible to create a self-referential type in Rust by using 'impl trait':
     //
     // fn foo() -> impl Copy { foo }
     //
@@ -346,7 +347,7 @@ fn build_subroutine_type_di_node<'ll, 'tcx>(
             size,
             align,
             0, // Ignore DWARF address space.
-            name.as_ptr().cast(),
+            name.as_c_char_ptr(),
             name.len(),
         )
     };
@@ -389,7 +390,7 @@ fn build_dyn_type_di_node<'ll, 'tcx>(
 ///
 /// NOTE: We currently emit just emit the debuginfo for the element type here
 /// (i.e. `T` for slices and `u8` for `str`), so that we end up with
-/// `*const T` for the `data_ptr` field of the corresponding fat-pointer
+/// `*const T` for the `data_ptr` field of the corresponding wide-pointer
 /// debuginfo of `&[T]`.
 ///
 /// It would be preferable and more accurate if we emitted a DIArray of T
@@ -516,7 +517,7 @@ fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll D
             let name = "<recur_type>";
             llvm::LLVMRustDIBuilderCreateBasicType(
                 DIB(cx),
-                name.as_ptr().cast(),
+                name.as_c_char_ptr(),
                 name.len(),
                 cx.tcx.data_layout.pointer_size.bits(),
                 DW_ATE_unsigned,
@@ -573,14 +574,14 @@ pub(crate) fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFi
                     {
                         // If the compiler's working directory (which also is the DW_AT_comp_dir of
                         // the compilation unit) is a prefix of the path we are about to emit, then
-                        // only emit the part relative to the working directory.
-                        // Because of path remapping we sometimes see strange things here: `abs_path`
-                        // might actually look like a relative path
-                        // (e.g. `<crate-name-and-version>/src/lib.rs`), so if we emit it without
-                        // taking the working directory into account, downstream tooling will
-                        // interpret it as `<working-directory>/<crate-name-and-version>/src/lib.rs`,
-                        // which makes no sense. Usually in such cases the working directory will also
-                        // be remapped to `<crate-name-and-version>` or some other prefix of the path
+                        // only emit the part relative to the working directory. Because of path
+                        // remapping we sometimes see strange things here: `abs_path` might
+                        // actually look like a relative path (e.g.
+                        // `<crate-name-and-version>/src/lib.rs`), so if we emit it without taking
+                        // the working directory into account, downstream tooling will interpret it
+                        // as `<working-directory>/<crate-name-and-version>/src/lib.rs`, which
+                        // makes no sense. Usually in such cases the working directory will also be
+                        // remapped to `<crate-name-and-version>` or some other prefix of the path
                         // we are remapping, so we end up with
                         // `<crate-name-and-version>/<crate-name-and-version>/src/lib.rs`.
                         // By moving the working directory portion into the `directory` part of the
@@ -628,6 +629,7 @@ pub(crate) fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFi
             rustc_span::SourceFileHashAlgorithm::Md5 => llvm::ChecksumKind::MD5,
             rustc_span::SourceFileHashAlgorithm::Sha1 => llvm::ChecksumKind::SHA1,
             rustc_span::SourceFileHashAlgorithm::Sha256 => llvm::ChecksumKind::SHA256,
+            rustc_span::SourceFileHashAlgorithm::Blake3 => llvm::ChecksumKind::None,
         };
         let hash_value = hex_encode(source_file.src_hash.hash_bytes());
 
@@ -637,14 +639,14 @@ pub(crate) fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFi
         unsafe {
             llvm::LLVMRustDIBuilderCreateFile(
                 DIB(cx),
-                file_name.as_ptr().cast(),
+                file_name.as_c_char_ptr(),
                 file_name.len(),
-                directory.as_ptr().cast(),
+                directory.as_c_char_ptr(),
                 directory.len(),
                 hash_kind,
-                hash_value.as_ptr().cast(),
+                hash_value.as_c_char_ptr(),
                 hash_value.len(),
-                source.map_or(ptr::null(), |x| x.as_ptr().cast()),
+                source.map_or(ptr::null(), |x| x.as_c_char_ptr()),
                 source.map_or(0, |x| x.len()),
             )
         }
@@ -659,12 +661,12 @@ fn unknown_file_metadata<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile {
 
         llvm::LLVMRustDIBuilderCreateFile(
             DIB(cx),
-            file_name.as_ptr().cast(),
+            file_name.as_c_char_ptr(),
             file_name.len(),
-            directory.as_ptr().cast(),
+            directory.as_c_char_ptr(),
             directory.len(),
             llvm::ChecksumKind::None,
-            hash_value.as_ptr().cast(),
+            hash_value.as_c_char_ptr(),
             hash_value.len(),
             ptr::null(),
             0,
@@ -785,7 +787,7 @@ fn build_basic_type_di_node<'ll, 'tcx>(
     let ty_di_node = unsafe {
         llvm::LLVMRustDIBuilderCreateBasicType(
             DIB(cx),
-            name.as_ptr().cast(),
+            name.as_c_char_ptr(),
             name.len(),
             cx.size_of(t).bits(),
             encoding,
@@ -807,7 +809,7 @@ fn build_basic_type_di_node<'ll, 'tcx>(
         llvm::LLVMRustDIBuilderCreateTypedef(
             DIB(cx),
             ty_di_node,
-            typedef_name.as_ptr().cast(),
+            typedef_name.as_c_char_ptr(),
             typedef_name.len(),
             unknown_file_metadata(cx),
             0,
@@ -858,7 +860,7 @@ fn build_param_type_di_node<'ll, 'tcx>(
         di_node: unsafe {
             llvm::LLVMRustDIBuilderCreateBasicType(
                 DIB(cx),
-                name.as_ptr().cast(),
+                name.as_c_char_ptr(),
                 name.len(),
                 Size::ZERO.bits(),
                 DW_ATE_unsigned,
@@ -873,8 +875,8 @@ pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>(
     codegen_unit_name: &str,
     debug_context: &CodegenUnitDebugContext<'ll, 'tcx>,
 ) -> &'ll DIDescriptor {
-    use rustc_session::config::RemapPathScopeComponents;
     use rustc_session::RemapFileNameExt;
+    use rustc_session::config::RemapPathScopeComponents;
     let mut name_in_debuginfo = tcx
         .sess
         .local_crate_source_file()
@@ -945,9 +947,9 @@ pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>(
     unsafe {
         let compile_unit_file = llvm::LLVMRustDIBuilderCreateFile(
             debug_context.builder,
-            name_in_debuginfo.as_ptr().cast(),
+            name_in_debuginfo.as_c_char_ptr(),
             name_in_debuginfo.len(),
-            work_dir.as_ptr().cast(),
+            work_dir.as_c_char_ptr(),
             work_dir.len(),
             llvm::ChecksumKind::None,
             ptr::null(),
@@ -960,7 +962,7 @@ pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>(
             debug_context.builder,
             DW_LANG_RUST,
             compile_unit_file,
-            producer.as_ptr().cast(),
+            producer.as_c_char_ptr(),
             producer.len(),
             tcx.sess.opts.optimize != config::OptLevel::No,
             c"".as_ptr(),
@@ -968,7 +970,7 @@ pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>(
             // NB: this doesn't actually have any perceptible effect, it seems. LLVM will instead
             // put the path supplied to `MCSplitDwarfFile` into the debug info of the final
             // output(s).
-            split_name.as_ptr().cast(),
+            split_name.as_c_char_ptr(),
             split_name.len(),
             kind,
             0,
@@ -976,33 +978,8 @@ pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>(
             debug_name_table_kind,
         );
 
-        if tcx.sess.opts.unstable_opts.profile {
-            let default_gcda_path = &output_filenames.with_extension("gcda");
-            let gcda_path =
-                tcx.sess.opts.unstable_opts.profile_emit.as_ref().unwrap_or(default_gcda_path);
-
-            let gcov_cu_info = [
-                path_to_mdstring(debug_context.llcontext, &output_filenames.with_extension("gcno")),
-                path_to_mdstring(debug_context.llcontext, gcda_path),
-                unit_metadata,
-            ];
-            let gcov_metadata = llvm::LLVMMDNodeInContext2(
-                debug_context.llcontext,
-                gcov_cu_info.as_ptr(),
-                gcov_cu_info.len(),
-            );
-            let val = llvm::LLVMMetadataAsValue(debug_context.llcontext, gcov_metadata);
-
-            llvm::LLVMAddNamedMetadataOperand(debug_context.llmod, c"llvm.gcov".as_ptr(), val);
-        }
-
         return unit_metadata;
     };
-
-    fn path_to_mdstring<'ll>(llcx: &'ll llvm::Context, path: &Path) -> &'ll llvm::Metadata {
-        let path_str = path_to_c_string(path);
-        unsafe { llvm::LLVMMDStringInContext2(llcx, path_str.as_ptr(), path_str.as_bytes().len()) }
-    }
 }
 
 /// Creates a `DW_TAG_member` entry inside the DIE represented by the given `type_di_node`.
@@ -1019,7 +996,7 @@ fn build_field_di_node<'ll, 'tcx>(
         llvm::LLVMRustDIBuilderCreateMemberType(
             DIB(cx),
             owner,
-            name.as_ptr().cast(),
+            name.as_c_char_ptr(),
             name.len(),
             unknown_file_metadata(cx),
             UNKNOWN_LINE_NUMBER,
@@ -1303,7 +1280,7 @@ fn build_generic_type_param_di_nodes<'ll, 'tcx>(
                             llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
                                 DIB(cx),
                                 None,
-                                name.as_ptr().cast(),
+                                name.as_c_char_ptr(),
                                 name.len(),
                                 actual_type_di_node,
                             )
@@ -1379,9 +1356,9 @@ pub(crate) fn build_global_var_di_node<'ll>(
         llvm::LLVMRustDIBuilderCreateStaticVariable(
             DIB(cx),
             Some(var_scope),
-            var_name.as_ptr().cast(),
+            var_name.as_c_char_ptr(),
             var_name.len(),
-            linkage_name.as_ptr().cast(),
+            linkage_name.as_c_char_ptr(),
             linkage_name.len(),
             file_metadata,
             line_number,
@@ -1545,20 +1522,16 @@ pub(crate) fn apply_vcall_visibility_metadata<'ll, 'tcx>(
     let trait_ref_typeid = typeid_for_trait_ref(cx.tcx, trait_ref);
 
     unsafe {
-        let typeid = llvm::LLVMMDStringInContext(
+        let typeid = llvm::LLVMMDStringInContext2(
             cx.llcx,
             trait_ref_typeid.as_ptr() as *const c_char,
-            trait_ref_typeid.as_bytes().len() as c_uint,
+            trait_ref_typeid.as_bytes().len(),
         );
-        let v = [cx.const_usize(0), typeid];
+        let v = [llvm::LLVMValueAsMetadata(cx.const_usize(0)), typeid];
         llvm::LLVMRustGlobalAddMetadata(
             vtable,
             llvm::MD_type as c_uint,
-            llvm::LLVMValueAsMetadata(llvm::LLVMMDNodeInContext(
-                cx.llcx,
-                v.as_ptr(),
-                v.len() as c_uint,
-            )),
+            llvm::LLVMMDNodeInContext2(cx.llcx, v.as_ptr(), v.len()),
         );
         let vcall_visibility = llvm::LLVMValueAsMetadata(cx.const_u64(vcall_visibility as u64));
         let vcall_visibility_metadata = llvm::LLVMMDNodeInContext2(cx.llcx, &vcall_visibility, 1);
@@ -1603,9 +1576,9 @@ pub(crate) fn create_vtable_di_node<'ll, 'tcx>(
         llvm::LLVMRustDIBuilderCreateStaticVariable(
             DIB(cx),
             NO_SCOPE_METADATA,
-            vtable_name.as_ptr().cast(),
+            vtable_name.as_c_char_ptr(),
             vtable_name.len(),
-            linkage_name.as_ptr().cast(),
+            linkage_name.as_c_char_ptr(),
             linkage_name.len(),
             unknown_file_metadata(cx),
             UNKNOWN_LINE_NUMBER,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
index 8a132f89aa3..100b046cee2 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
@@ -1,6 +1,7 @@
 use std::borrow::Cow;
 
 use libc::c_uint;
+use rustc_abi::{Align, Endian, Size, TagEncoding, VariantIdx, Variants};
 use rustc_codegen_ssa::debuginfo::type_names::compute_debuginfo_type_name;
 use rustc_codegen_ssa::debuginfo::{tag_base_type, wants_c_like_enum_debuginfo};
 use rustc_codegen_ssa::traits::ConstCodegenMethods;
@@ -8,16 +9,15 @@ use rustc_index::IndexVec;
 use rustc_middle::bug;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, AdtDef, CoroutineArgs, CoroutineArgsExt, Ty};
-use rustc_target::abi::{Align, Endian, Size, TagEncoding, VariantIdx, Variants};
 use smallvec::smallvec;
 
-use crate::common::CodegenCx;
+use crate::common::{AsCCharPtr, CodegenCx};
 use crate::debuginfo::metadata::enums::DiscrResult;
 use crate::debuginfo::metadata::type_map::{self, Stub, UniqueTypeId};
 use crate::debuginfo::metadata::{
+    DINodeCreationResult, NO_GENERICS, NO_SCOPE_METADATA, SmallVec, UNKNOWN_LINE_NUMBER,
     build_field_di_node, file_metadata, size_and_align_of, type_di_node, unknown_file_metadata,
-    visibility_di_flags, DINodeCreationResult, SmallVec, NO_GENERICS, NO_SCOPE_METADATA,
-    UNKNOWN_LINE_NUMBER,
+    visibility_di_flags,
 };
 use crate::debuginfo::utils::DIB;
 use crate::llvm::debuginfo::{DIFile, DIFlags, DIType};
@@ -359,7 +359,7 @@ fn build_single_variant_union_fields<'ll, 'tcx>(
             llvm::LLVMRustDIBuilderCreateStaticMemberType(
                 DIB(cx),
                 enum_type_di_node,
-                TAG_FIELD_NAME.as_ptr().cast(),
+                TAG_FIELD_NAME.as_c_char_ptr(),
                 TAG_FIELD_NAME.len(),
                 unknown_file_metadata(cx),
                 UNKNOWN_LINE_NUMBER,
@@ -537,7 +537,7 @@ fn build_variant_struct_wrapper_type_di_node<'ll, 'tcx>(
                     llvm::LLVMRustDIBuilderCreateStaticMemberType(
                         DIB(cx),
                         wrapper_struct_type_di_node,
-                        name.as_ptr().cast(),
+                        name.as_c_char_ptr(),
                         name.len(),
                         unknown_file_metadata(cx),
                         UNKNOWN_LINE_NUMBER,
@@ -785,7 +785,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>(
             llvm::LLVMRustDIBuilderCreateMemberType(
                 DIB(cx),
                 enum_type_di_node,
-                field_name.as_ptr().cast(),
+                field_name.as_c_char_ptr(),
                 field_name.len(),
                 file_di_node,
                 line_number,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
index edaf73b74a2..b3d4a6642a1 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
@@ -1,5 +1,6 @@
 use std::borrow::Cow;
 
+use rustc_abi::{FieldIdx, TagEncoding, VariantIdx, Variants};
 use rustc_codegen_ssa::debuginfo::type_names::{compute_debuginfo_type_name, cpp_like_debuginfo};
 use rustc_codegen_ssa::debuginfo::{tag_base_type, wants_c_like_enum_debuginfo};
 use rustc_hir::def::CtorKind;
@@ -9,17 +10,16 @@ use rustc_middle::mir::CoroutineLayout;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, AdtDef, CoroutineArgs, CoroutineArgsExt, Ty, VariantDef};
 use rustc_span::Symbol;
-use rustc_target::abi::{FieldIdx, TagEncoding, VariantIdx, Variants};
 
 use super::type_map::{DINodeCreationResult, UniqueTypeId};
-use super::{size_and_align_of, SmallVec};
-use crate::common::CodegenCx;
+use super::{SmallVec, size_and_align_of};
+use crate::common::{AsCCharPtr, CodegenCx};
 use crate::debuginfo::metadata::type_map::{self, Stub};
 use crate::debuginfo::metadata::{
-    build_field_di_node, build_generic_type_param_di_nodes, type_di_node, unknown_file_metadata,
-    UNKNOWN_LINE_NUMBER,
+    UNKNOWN_LINE_NUMBER, build_field_di_node, build_generic_type_param_di_nodes, type_di_node,
+    unknown_file_metadata,
 };
-use crate::debuginfo::utils::{create_DIArray, get_namespace_for_item, DIB};
+use crate::debuginfo::utils::{DIB, create_DIArray, get_namespace_for_item};
 use crate::llvm::debuginfo::{DIFlags, DIType};
 use crate::llvm::{self};
 
@@ -106,7 +106,7 @@ fn build_enumeration_type_di_node<'ll, 'tcx>(
             let value = [value as u64, (value >> 64) as u64];
             Some(llvm::LLVMRustDIBuilderCreateEnumerator(
                 DIB(cx),
-                name.as_ptr().cast(),
+                name.as_c_char_ptr(),
                 name.len(),
                 value.as_ptr(),
                 size.bits() as libc::c_uint,
@@ -119,7 +119,7 @@ fn build_enumeration_type_di_node<'ll, 'tcx>(
         llvm::LLVMRustDIBuilderCreateEnumerationType(
             DIB(cx),
             containing_scope,
-            type_name.as_ptr().cast(),
+            type_name.as_c_char_ptr(),
             type_name.len(),
             unknown_file_metadata(cx),
             UNKNOWN_LINE_NUMBER,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
index 0b3140cc91f..d4006691d37 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -1,22 +1,22 @@
 use std::borrow::Cow;
 
 use libc::c_uint;
+use rustc_abi::{Size, TagEncoding, VariantIdx, Variants};
 use rustc_codegen_ssa::debuginfo::type_names::compute_debuginfo_type_name;
 use rustc_codegen_ssa::debuginfo::{tag_base_type, wants_c_like_enum_debuginfo};
 use rustc_codegen_ssa::traits::ConstCodegenMethods;
 use rustc_middle::bug;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self};
-use rustc_target::abi::{Size, TagEncoding, VariantIdx, Variants};
 use smallvec::smallvec;
 
-use crate::common::CodegenCx;
+use crate::common::{AsCCharPtr, CodegenCx};
 use crate::debuginfo::metadata::type_map::{self, Stub, StubInfo, UniqueTypeId};
 use crate::debuginfo::metadata::{
-    file_metadata, size_and_align_of, type_di_node, unknown_file_metadata, visibility_di_flags,
-    DINodeCreationResult, SmallVec, NO_GENERICS, UNKNOWN_LINE_NUMBER,
+    DINodeCreationResult, NO_GENERICS, SmallVec, UNKNOWN_LINE_NUMBER, file_metadata,
+    size_and_align_of, type_di_node, unknown_file_metadata, visibility_di_flags,
 };
-use crate::debuginfo::utils::{create_DIArray, get_namespace_for_item, DIB};
+use crate::debuginfo::utils::{DIB, create_DIArray, get_namespace_for_item};
 use crate::llvm::debuginfo::{DIFile, DIFlags, DIType};
 use crate::llvm::{self};
 
@@ -244,7 +244,7 @@ fn build_enum_variant_part_di_node<'ll, 'tcx>(
             llvm::LLVMRustDIBuilderCreateVariantPart(
                 DIB(cx),
                 enum_type_di_node,
-                variant_part_name.as_ptr().cast(),
+                variant_part_name.as_c_char_ptr(),
                 variant_part_name.len(),
                 unknown_file_metadata(cx),
                 UNKNOWN_LINE_NUMBER,
@@ -253,7 +253,7 @@ fn build_enum_variant_part_di_node<'ll, 'tcx>(
                 DIFlags::FlagZero,
                 tag_member_di_node,
                 create_DIArray(DIB(cx), &[]),
-                variant_part_unique_type_id_str.as_ptr().cast(),
+                variant_part_unique_type_id_str.as_c_char_ptr(),
                 variant_part_unique_type_id_str.len(),
             )
         },
@@ -327,7 +327,7 @@ fn build_discr_member_di_node<'ll, 'tcx>(
                 Some(llvm::LLVMRustDIBuilderCreateMemberType(
                     DIB(cx),
                     containing_scope,
-                    tag_name.as_ptr().cast(),
+                    tag_name.as_c_char_ptr(),
                     tag_name.len(),
                     unknown_file_metadata(cx),
                     UNKNOWN_LINE_NUMBER,
@@ -399,7 +399,7 @@ fn build_enum_variant_member_di_node<'ll, 'tcx>(
         llvm::LLVMRustDIBuilderCreateVariantMemberType(
             DIB(cx),
             variant_part_di_node,
-            variant_member_info.variant_name.as_ptr().cast(),
+            variant_member_info.variant_name.as_c_char_ptr(),
             variant_member_info.variant_name.len(),
             file_di_node,
             line_number,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
index 6d21f4204e3..5120b63d173 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
@@ -1,16 +1,16 @@
 use std::cell::RefCell;
 
+use rustc_abi::{Align, Size, VariantIdx};
 use rustc_data_structures::fingerprint::Fingerprint;
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
 use rustc_macros::HashStable;
 use rustc_middle::bug;
 use rustc_middle::ty::{ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt};
-use rustc_target::abi::{Align, Size, VariantIdx};
 
-use super::{unknown_file_metadata, SmallVec, UNKNOWN_LINE_NUMBER};
-use crate::common::CodegenCx;
-use crate::debuginfo::utils::{create_DIArray, debug_context, DIB};
+use super::{SmallVec, UNKNOWN_LINE_NUMBER, unknown_file_metadata};
+use crate::common::{AsCCharPtr, CodegenCx};
+use crate::debuginfo::utils::{DIB, create_DIArray, debug_context};
 use crate::llvm::debuginfo::{DIFlags, DIScope, DIType};
 use crate::llvm::{self};
 
@@ -191,7 +191,7 @@ pub(super) fn stub<'ll, 'tcx>(
                 llvm::LLVMRustDIBuilderCreateStructType(
                     DIB(cx),
                     containing_scope,
-                    name.as_ptr().cast(),
+                    name.as_c_char_ptr(),
                     name.len(),
                     unknown_file_metadata(cx),
                     UNKNOWN_LINE_NUMBER,
@@ -202,7 +202,7 @@ pub(super) fn stub<'ll, 'tcx>(
                     empty_array,
                     0,
                     vtable_holder,
-                    unique_type_id_str.as_ptr().cast(),
+                    unique_type_id_str.as_c_char_ptr(),
                     unique_type_id_str.len(),
                 )
             }
@@ -211,7 +211,7 @@ pub(super) fn stub<'ll, 'tcx>(
             llvm::LLVMRustDIBuilderCreateUnionType(
                 DIB(cx),
                 containing_scope,
-                name.as_ptr().cast(),
+                name.as_c_char_ptr(),
                 name.len(),
                 unknown_file_metadata(cx),
                 UNKNOWN_LINE_NUMBER,
@@ -220,7 +220,7 @@ pub(super) fn stub<'ll, 'tcx>(
                 flags,
                 Some(empty_array),
                 0,
-                unique_type_id_str.as_ptr().cast(),
+                unique_type_id_str.as_c_char_ptr(),
                 unique_type_id_str.len(),
             )
         },
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 920c9e06be4..9e1e5127e80 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -5,6 +5,7 @@ use std::ops::Range;
 use std::{iter, ptr};
 
 use libc::c_uint;
+use rustc_abi::Size;
 use rustc_codegen_ssa::debuginfo::type_names;
 use rustc_codegen_ssa::mir::debuginfo::VariableKind::*;
 use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind};
@@ -16,22 +17,21 @@ use rustc_index::IndexVec;
 use rustc_middle::mir;
 use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::{self, GenericArgsRef, Instance, ParamEnv, Ty, TypeVisitableExt};
-use rustc_session::config::{self, DebugInfo};
 use rustc_session::Session;
+use rustc_session::config::{self, DebugInfo};
 use rustc_span::symbol::Symbol;
 use rustc_span::{
     BytePos, Pos, SourceFile, SourceFileAndLine, SourceFileHash, Span, StableSourceFileId,
 };
-use rustc_target::abi::Size;
 use smallvec::SmallVec;
 use tracing::debug;
 
-use self::metadata::{file_metadata, type_di_node, UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
+use self::metadata::{UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER, file_metadata, type_di_node};
 use self::namespace::mangled_name_of_instance;
-use self::utils::{create_DIArray, is_node_local_to_unit, DIB};
+use self::utils::{DIB, create_DIArray, is_node_local_to_unit};
 use crate::abi::FnAbi;
 use crate::builder::Builder;
-use crate::common::CodegenCx;
+use crate::common::{AsCCharPtr, CodegenCx};
 use crate::llvm;
 use crate::llvm::debuginfo::{
     DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope, DIType,
@@ -55,7 +55,6 @@ const DW_TAG_arg_variable: c_uint = 0x101;
 
 /// A context object for maintaining all state needed by the debuginfo module.
 pub(crate) struct CodegenUnitDebugContext<'ll, 'tcx> {
-    llcontext: &'ll llvm::Context,
     llmod: &'ll llvm::Module,
     builder: &'ll mut DIBuilder<'ll>,
     created_files: RefCell<UnordMap<Option<(StableSourceFileId, SourceFileHash)>, &'ll DIFile>>,
@@ -78,9 +77,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> {
         debug!("CodegenUnitDebugContext::new");
         let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) };
         // DIBuilder inherits context from the module, so we'd better use the same one
-        let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) };
         CodegenUnitDebugContext {
-            llcontext,
             llmod,
             builder,
             created_files: Default::default(),
@@ -91,45 +88,39 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> {
     }
 
     pub(crate) fn finalize(&self, sess: &Session) {
-        unsafe {
-            llvm::LLVMRustDIBuilderFinalize(self.builder);
-
-            if !sess.target.is_like_msvc {
-                // Debuginfo generation in LLVM by default uses a higher
-                // version of dwarf than macOS currently understands. We can
-                // instruct LLVM to emit an older version of dwarf, however,
-                // for macOS to understand. For more info see #11352
-                // This can be overridden using --llvm-opts -dwarf-version,N.
-                // Android has the same issue (#22398)
-                let dwarf_version = sess
-                    .opts
-                    .unstable_opts
-                    .dwarf_version
-                    .unwrap_or(sess.target.default_dwarf_version);
-                llvm::LLVMRustAddModuleFlagU32(
-                    self.llmod,
-                    llvm::LLVMModFlagBehavior::Warning,
-                    c"Dwarf Version".as_ptr(),
-                    dwarf_version,
-                );
-            } else {
-                // Indicate that we want CodeView debug information on MSVC
-                llvm::LLVMRustAddModuleFlagU32(
-                    self.llmod,
-                    llvm::LLVMModFlagBehavior::Warning,
-                    c"CodeView".as_ptr(),
-                    1,
-                )
-            }
-
-            // Prevent bitcode readers from deleting the debug info.
-            llvm::LLVMRustAddModuleFlagU32(
+        unsafe { llvm::LLVMRustDIBuilderFinalize(self.builder) };
+        if !sess.target.is_like_msvc {
+            // Debuginfo generation in LLVM by default uses a higher
+            // version of dwarf than macOS currently understands. We can
+            // instruct LLVM to emit an older version of dwarf, however,
+            // for macOS to understand. For more info see #11352
+            // This can be overridden using --llvm-opts -dwarf-version,N.
+            // Android has the same issue (#22398)
+            let dwarf_version =
+                sess.opts.unstable_opts.dwarf_version.unwrap_or(sess.target.default_dwarf_version);
+            llvm::add_module_flag_u32(
                 self.llmod,
-                llvm::LLVMModFlagBehavior::Warning,
-                c"Debug Info Version".as_ptr(),
-                llvm::LLVMRustDebugMetadataVersion(),
+                llvm::ModuleFlagMergeBehavior::Warning,
+                "Dwarf Version",
+                dwarf_version,
+            );
+        } else {
+            // Indicate that we want CodeView debug information on MSVC
+            llvm::add_module_flag_u32(
+                self.llmod,
+                llvm::ModuleFlagMergeBehavior::Warning,
+                "CodeView",
+                1,
             );
         }
+
+        // Prevent bitcode readers from deleting the debug info.
+        llvm::add_module_flag_u32(
+            self.llmod,
+            llvm::ModuleFlagMergeBehavior::Warning,
+            "Debug Info Version",
+            unsafe { llvm::LLVMRustDebugMetadataVersion() },
+        );
     }
 }
 
@@ -350,7 +341,6 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         type_names::push_generic_params(
             tcx,
             tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args),
-            enclosing_fn_def_id,
             &mut name,
         );
 
@@ -365,7 +355,7 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
 
         let mut flags = DIFlags::FlagPrototyped;
 
-        if fn_abi.ret.layout.abi.is_uninhabited() {
+        if fn_abi.ret.layout.is_uninhabited() {
             flags |= DIFlags::FlagNoReturn;
         }
 
@@ -390,9 +380,9 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             llvm::LLVMRustDIBuilderCreateMethod(
                 DIB(self),
                 containing_scope,
-                name.as_ptr().cast(),
+                name.as_c_char_ptr(),
                 name.len(),
-                linkage_name.as_ptr().cast(),
+                linkage_name.as_c_char_ptr(),
                 linkage_name.len(),
                 file_metadata,
                 loc.line,
@@ -407,9 +397,9 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             llvm::LLVMRustDIBuilderCreateFunction(
                 DIB(self),
                 containing_scope,
-                name.as_ptr().cast(),
+                name.as_c_char_ptr(),
                 name.len(),
-                linkage_name.as_ptr().cast(),
+                linkage_name.as_c_char_ptr(),
                 linkage_name.len(),
                 file_metadata,
                 loc.line,
@@ -495,7 +485,7 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                                 Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
                                     DIB(cx),
                                     None,
-                                    name.as_ptr().cast(),
+                                    name.as_c_char_ptr(),
                                     name.len(),
                                     actual_type_metadata,
                                 ))
@@ -555,17 +545,14 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                 }
             }
 
-            let scope = namespace::item_namespace(
-                cx,
-                DefId {
-                    krate: instance.def_id().krate,
-                    index: cx
-                        .tcx
-                        .def_key(instance.def_id())
-                        .parent
-                        .expect("get_containing_scope: missing parent?"),
-                },
-            );
+            let scope = namespace::item_namespace(cx, DefId {
+                krate: instance.def_id().krate,
+                index: cx
+                    .tcx
+                    .def_key(instance.def_id())
+                    .parent
+                    .expect("get_containing_scope: missing parent?"),
+            });
             (scope, false)
         }
     }
@@ -639,7 +626,7 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                 DIB(self),
                 dwarf_tag,
                 scope_metadata,
-                name.as_ptr().cast(),
+                name.as_c_char_ptr(),
                 name.len(),
                 file_metadata,
                 loc.line,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
index 83d7a82dadc..33d9bc23890 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
@@ -4,8 +4,8 @@ use rustc_codegen_ssa::debuginfo::type_names;
 use rustc_hir::def_id::DefId;
 use rustc_middle::ty::{self, Instance};
 
-use super::utils::{debug_context, DIB};
-use crate::common::CodegenCx;
+use super::utils::{DIB, debug_context};
+use crate::common::{AsCCharPtr, CodegenCx};
 use crate::llvm;
 use crate::llvm::debuginfo::DIScope;
 
@@ -13,8 +13,7 @@ pub(crate) fn mangled_name_of_instance<'a, 'tcx>(
     cx: &CodegenCx<'a, 'tcx>,
     instance: Instance<'tcx>,
 ) -> ty::SymbolName<'tcx> {
-    let tcx = cx.tcx;
-    tcx.symbol_name(instance)
+    cx.tcx.symbol_name(instance)
 }
 
 pub(crate) fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {
@@ -37,7 +36,7 @@ pub(crate) fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'l
         llvm::LLVMRustDIBuilderCreateNameSpace(
             DIB(cx),
             parent_scope,
-            namespace_name_string.as_ptr().cast(),
+            namespace_name_string.as_c_char_ptr(),
             namespace_name_string.len(),
             false, // ExportSymbols (only relevant for C++ anonymous namespaces)
         )
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
index 321553a3df0..960487ada16 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -5,8 +5,8 @@ use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
 use rustc_middle::ty::{self, Ty};
 use tracing::trace;
 
-use super::namespace::item_namespace;
 use super::CodegenUnitDebugContext;
+use super::namespace::item_namespace;
 use crate::common::CodegenCx;
 use crate::llvm;
 use crate::llvm::debuginfo::{DIArray, DIBuilder, DIDescriptor, DIScope};
@@ -49,23 +49,23 @@ pub(crate) fn get_namespace_for_item<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId
 }
 
 #[derive(Debug, PartialEq, Eq)]
-pub(crate) enum FatPtrKind {
+pub(crate) enum WidePtrKind {
     Slice,
     Dyn,
 }
 
 /// Determines if `pointee_ty` is slice-like or trait-object-like, i.e.
-/// if the second field of the fat pointer is a length or a vtable-pointer.
-/// If `pointee_ty` does not require a fat pointer (because it is Sized) then
+/// if the second field of the wide pointer is a length or a vtable-pointer.
+/// If `pointee_ty` does not require a wide pointer (because it is Sized) then
 /// the function returns `None`.
-pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
+pub(crate) fn wide_pointer_kind<'ll, 'tcx>(
     cx: &CodegenCx<'ll, 'tcx>,
     pointee_ty: Ty<'tcx>,
-) -> Option<FatPtrKind> {
+) -> Option<WidePtrKind> {
     let pointee_tail_ty = cx.tcx.struct_tail_for_codegen(pointee_ty, cx.param_env());
     let layout = cx.layout_of(pointee_tail_ty);
     trace!(
-        "fat_pointer_kind: {:?} has layout {:?} (is_unsized? {})",
+        "wide_pointer_kind: {:?} has layout {:?} (is_unsized? {})",
         pointee_tail_ty,
         layout,
         layout.is_unsized()
@@ -76,8 +76,8 @@ pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
     }
 
     match *pointee_tail_ty.kind() {
-        ty::Str | ty::Slice(_) => Some(FatPtrKind::Slice),
-        ty::Dynamic(..) => Some(FatPtrKind::Dyn),
+        ty::Str | ty::Slice(_) => Some(WidePtrKind::Slice),
+        ty::Dynamic(..) => Some(WidePtrKind::Dyn),
         ty::Foreign(_) => {
             // Assert that pointers to foreign types really are thin:
             assert_eq!(
@@ -90,7 +90,7 @@ pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
             // For all other pointee types we should already have returned None
             // at the beginning of the function.
             panic!(
-                "fat_pointer_kind() - Encountered unexpected `pointee_tail_ty`: {pointee_tail_ty:?}"
+                "wide_pointer_kind() - Encountered unexpected `pointee_tail_ty`: {pointee_tail_ty:?}"
             )
         }
     }
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
index b0b29ca1280..d338c848754 100644
--- a/compiler/rustc_codegen_llvm/src/declare.rs
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -20,8 +20,10 @@ use smallvec::SmallVec;
 use tracing::debug;
 
 use crate::abi::{FnAbi, FnAbiLlvmExt};
+use crate::common::AsCCharPtr;
 use crate::context::CodegenCx;
 use crate::llvm::AttributePlace::Function;
+use crate::llvm::Visibility;
 use crate::type_::Type;
 use crate::value::Value;
 use crate::{attributes, llvm};
@@ -40,7 +42,7 @@ fn declare_raw_fn<'ll>(
 ) -> &'ll Value {
     debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
     let llfn = unsafe {
-        llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty)
+        llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_c_char_ptr(), name.len(), ty)
     };
 
     llvm::SetFunctionCallConv(llfn, callconv);
@@ -67,7 +69,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
     /// return its Value instead.
     pub(crate) fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value {
         debug!("declare_global(name={:?})", name);
-        unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_ptr().cast(), name.len(), ty) }
+        unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_c_char_ptr(), name.len(), ty) }
     }
 
     /// Declare a C ABI function.
@@ -83,14 +85,9 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
         unnamed: llvm::UnnamedAddr,
         fn_type: &'ll Type,
     ) -> &'ll Value {
-        // Declare C ABI functions with the visibility used by C by default.
-        let visibility = if self.tcx.sess.default_hidden_visibility() {
-            llvm::Visibility::Hidden
-        } else {
-            llvm::Visibility::Default
-        };
-
-        declare_raw_fn(self, name, llvm::CCallConv, unnamed, visibility, fn_type)
+        // Visibility should always be default for declarations, otherwise the linker may report an
+        // error.
+        declare_raw_fn(self, name, llvm::CCallConv, unnamed, Visibility::Default, fn_type)
     }
 
     /// Declare an entry Function
@@ -107,11 +104,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
         unnamed: llvm::UnnamedAddr,
         fn_type: &'ll Type,
     ) -> &'ll Value {
-        let visibility = if self.tcx.sess.default_hidden_visibility() {
-            llvm::Visibility::Hidden
-        } else {
-            llvm::Visibility::Default
-        };
+        let visibility = Visibility::from_generic(self.tcx.sess.default_visibility());
         declare_raw_fn(self, name, callconv, unnamed, visibility, fn_type)
     }
 
@@ -217,7 +210,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
     /// Gets declared value by name.
     pub(crate) fn get_declared_value(&self, name: &str) -> Option<&'ll Value> {
         debug!("get_declared_value(name={:?})", name);
-        unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_ptr().cast(), name.len()) }
+        unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_c_char_ptr(), name.len()) }
     }
 
     /// Gets defined or externally defined (AvailableExternally linkage) value by
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
index bb481d2a308..3cdb5b971d9 100644
--- a/compiler/rustc_codegen_llvm/src/errors.rs
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -31,6 +31,15 @@ pub(crate) struct UnstableCTargetFeature<'a> {
     pub feature: &'a str,
 }
 
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_forbidden_ctarget_feature)]
+#[note]
+#[note(codegen_llvm_forbidden_ctarget_feature_issue)]
+pub(crate) struct ForbiddenCTargetFeature<'a> {
+    pub feature: &'a str,
+    pub reason: &'a str,
+}
+
 #[derive(Subdiagnostic)]
 pub(crate) enum PossibleFeature<'a> {
     #[help(codegen_llvm_possible_feature)]
@@ -80,30 +89,6 @@ impl<G: EmissionGuarantee> Diagnostic<'_, G> for ParseTargetMachineConfig<'_> {
     }
 }
 
-pub(crate) struct TargetFeatureDisableOrEnable<'a> {
-    pub features: &'a [&'a str],
-    pub span: Option<Span>,
-    pub missing_features: Option<MissingFeatures>,
-}
-
-#[derive(Subdiagnostic)]
-#[help(codegen_llvm_missing_features)]
-pub(crate) struct MissingFeatures;
-
-impl<G: EmissionGuarantee> Diagnostic<'_, G> for TargetFeatureDisableOrEnable<'_> {
-    fn into_diag(self, dcx: DiagCtxtHandle<'_>, level: Level) -> Diag<'_, G> {
-        let mut diag = Diag::new(dcx, level, fluent::codegen_llvm_target_feature_disable_or_enable);
-        if let Some(span) = self.span {
-            diag.span(span);
-        };
-        if let Some(missing_features) = self.missing_features {
-            diag.subdiagnostic(missing_features);
-        }
-        diag.arg("features", self.features.join(", "));
-        diag
-    }
-}
-
 #[derive(Diagnostic)]
 #[diag(codegen_llvm_lto_disallowed)]
 pub(crate) struct LtoDisallowed;
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index cc921aa87bc..e9c687d75e3 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -1,6 +1,7 @@
 use std::assert_matches::assert_matches;
 use std::cmp::Ordering;
 
+use rustc_abi::{self as abi, Align, Float, HasDataLayout, Primitive, Size};
 use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
 use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
@@ -12,15 +13,14 @@ use rustc_middle::mir::BinOp;
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
 use rustc_middle::ty::{self, GenericArgsRef, Ty};
 use rustc_middle::{bug, span_bug};
-use rustc_span::{sym, Span, Symbol};
-use rustc_target::abi::{self, Align, Float, HasDataLayout, Primitive, Size};
+use rustc_span::{Span, Symbol, sym};
 use rustc_target::spec::{HasTargetSpec, PanicStrategy};
 use tracing::debug;
 
-use crate::abi::{Abi, FnAbi, FnAbiLlvmExt, LlvmType, PassMode};
+use crate::abi::{ExternAbi, FnAbi, FnAbiLlvmExt, LlvmType, PassMode};
 use crate::builder::Builder;
 use crate::context::CodegenCx;
-use crate::llvm;
+use crate::llvm::{self, Metadata};
 use crate::type_::Type;
 use crate::type_of::LayoutLlvmExt;
 use crate::va_arg::emit_va_arg;
@@ -86,6 +86,11 @@ fn get_simple_intrinsic<'ll>(
         sym::fmaf64 => "llvm.fma.f64",
         sym::fmaf128 => "llvm.fma.f128",
 
+        sym::fmuladdf16 => "llvm.fmuladd.f16",
+        sym::fmuladdf32 => "llvm.fmuladd.f32",
+        sym::fmuladdf64 => "llvm.fmuladd.f64",
+        sym::fmuladdf128 => "llvm.fmuladd.f128",
+
         sym::fabsf16 => "llvm.fabs.f16",
         sym::fabsf32 => "llvm.fabs.f32",
         sym::fabsf64 => "llvm.fabs.f64",
@@ -253,8 +258,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
             }
             sym::va_arg => {
-                match fn_abi.ret.layout.abi {
-                    abi::Abi::Scalar(scalar) => {
+                match fn_abi.ret.layout.backend_repr {
+                    abi::BackendRepr::Scalar(scalar) => {
                         match scalar.primitive() {
                             Primitive::Int(..) => {
                                 if self.cx().size_of(ret_ty).bytes() < 4 {
@@ -330,15 +335,12 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     sym::prefetch_write_instruction => (1, 0),
                     _ => bug!(),
                 };
-                self.call_intrinsic(
-                    "llvm.prefetch",
-                    &[
-                        args[0].immediate(),
-                        self.const_i32(rw),
-                        args[1].immediate(),
-                        self.const_i32(cache_type),
-                    ],
-                )
+                self.call_intrinsic("llvm.prefetch", &[
+                    args[0].immediate(),
+                    self.const_i32(rw),
+                    args[1].immediate(),
+                    self.const_i32(cache_type),
+                ])
             }
             sym::ctlz
             | sym::ctlz_nonzero
@@ -356,10 +358,10 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     Some((width, signed)) => match name {
                         sym::ctlz | sym::cttz => {
                             let y = self.const_bool(false);
-                            let ret = self.call_intrinsic(
-                                &format!("llvm.{name}.i{width}"),
-                                &[args[0].immediate(), y],
-                            );
+                            let ret = self.call_intrinsic(&format!("llvm.{name}.i{width}"), &[
+                                args[0].immediate(),
+                                y,
+                            ]);
 
                             self.intcast(ret, llret_ty, false)
                         }
@@ -376,26 +378,24 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                             self.intcast(ret, llret_ty, false)
                         }
                         sym::ctpop => {
-                            let ret = self.call_intrinsic(
-                                &format!("llvm.ctpop.i{width}"),
-                                &[args[0].immediate()],
-                            );
+                            let ret = self.call_intrinsic(&format!("llvm.ctpop.i{width}"), &[args
+                                [0]
+                            .immediate()]);
                             self.intcast(ret, llret_ty, false)
                         }
                         sym::bswap => {
                             if width == 8 {
                                 args[0].immediate() // byte swap a u8/i8 is just a no-op
                             } else {
-                                self.call_intrinsic(
-                                    &format!("llvm.bswap.i{width}"),
-                                    &[args[0].immediate()],
-                                )
+                                self.call_intrinsic(&format!("llvm.bswap.i{width}"), &[
+                                    args[0].immediate()
+                                ])
                             }
                         }
-                        sym::bitreverse => self.call_intrinsic(
-                            &format!("llvm.bitreverse.i{width}"),
-                            &[args[0].immediate()],
-                        ),
+                        sym::bitreverse => self
+                            .call_intrinsic(&format!("llvm.bitreverse.i{width}"), &[
+                                args[0].immediate()
+                            ]),
                         sym::rotate_left | sym::rotate_right => {
                             let is_left = name == sym::rotate_left;
                             let val = args[0].immediate();
@@ -404,7 +404,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                             let llvm_name =
                                 &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
 
-                            // llvm expects shift to be the same type as the values, but rust always uses `u32`
+                            // llvm expects shift to be the same type as the values, but rust
+                            // always uses `u32`.
                             let raw_shift = self.intcast(raw_shift, self.val_ty(val), false);
 
                             self.call_intrinsic(llvm_name, &[val, val, raw_shift])
@@ -435,13 +436,13 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
             }
 
             sym::raw_eq => {
-                use abi::Abi::*;
+                use abi::BackendRepr::*;
                 let tp_ty = fn_args.type_at(0);
                 let layout = self.layout_of(tp_ty).layout;
-                let use_integer_compare = match layout.abi() {
+                let use_integer_compare = match layout.backend_repr() {
                     Scalar(_) | ScalarPair(_, _) => true,
                     Uninhabited | Vector { .. } => false,
-                    Aggregate { .. } => {
+                    Memory { .. } => {
                         // For rusty ABIs, small aggregates are actually passed
                         // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
                         // so we re-use that same threshold here.
@@ -470,10 +471,11 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
 
             sym::compare_bytes => {
                 // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
-                let cmp = self.call_intrinsic(
-                    "memcmp",
-                    &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
-                );
+                let cmp = self.call_intrinsic("memcmp", &[
+                    args[0].immediate(),
+                    args[1].immediate(),
+                    args[2].immediate(),
+                ]);
                 // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`.
                 self.sext(cmp, self.type_ix(32))
             }
@@ -547,7 +549,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 }
 
                 let llret_ty = if ret_ty.is_simd()
-                    && let abi::Abi::Aggregate { .. } = self.layout_of(ret_ty).layout.abi
+                    && let abi::BackendRepr::Memory { .. } =
+                        self.layout_of(ret_ty).layout.backend_repr
                 {
                     let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
                     let elem_ll_ty = match elem_ty.kind() {
@@ -573,8 +576,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     span,
                 ) {
                     Ok(llval) => llval,
-                    // If there was an error, just skip this invocation... we'll abort compilation anyway,
-                    // but we can keep codegen'ing to find more errors.
+                    // If there was an error, just skip this invocation... we'll abort compilation
+                    // anyway, but we can keep codegen'ing to find more errors.
                     Err(()) => return Ok(()),
                 }
             }
@@ -616,9 +619,10 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
         }
     }
 
-    fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value {
+    fn type_test(&mut self, pointer: Self::Value, typeid: Self::Metadata) -> Self::Value {
         // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
         // optimization pass replaces calls to this intrinsic with code to test type membership.
+        let typeid = unsafe { llvm::LLVMMetadataAsValue(&self.llcx, typeid) };
         self.call_intrinsic("llvm.type.test", &[pointer, typeid])
     }
 
@@ -626,8 +630,9 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
         &mut self,
         llvtable: &'ll Value,
         vtable_byte_offset: u64,
-        typeid: &'ll Value,
+        typeid: &'ll Metadata,
     ) -> Self::Value {
+        let typeid = unsafe { llvm::LLVMMetadataAsValue(&self.llcx, typeid) };
         let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
         let type_checked_load =
             self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid]);
@@ -781,11 +786,12 @@ fn codegen_msvc_try<'ll>(
         let type_info =
             bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
         let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
-        unsafe {
-            llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
+
+        llvm::set_linkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
+        if bx.cx.tcx.sess.target.supports_comdat() {
             llvm::SetUniqueComdat(bx.llmod, tydesc);
-            llvm::LLVMSetInitializer(tydesc, type_info);
         }
+        unsafe { llvm::LLVMSetInitializer(tydesc, type_info) };
 
         // The flag value of 8 indicates that we are catching the exception by
         // reference instead of by value. We can't use catch by value because
@@ -1058,7 +1064,7 @@ fn gen_fn<'ll, 'tcx>(
     cx.set_frame_pointer_type(llfn);
     cx.apply_target_cpu_attr(llfn);
     // FIXME(eddyb) find a nicer way to do this.
-    unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
+    llvm::set_linkage(llfn, llvm::Linkage::InternalLinkage);
     let llbb = Builder::append_block(cx, llfn, "entry-block");
     let bx = Builder::build(cx, llbb);
     codegen(bx);
@@ -1088,7 +1094,7 @@ fn get_rust_try_fn<'ll, 'tcx>(
             tcx.types.unit,
             false,
             hir::Safety::Unsafe,
-            Abi::Rust,
+            ExternAbi::Rust,
         )),
     );
     // `unsafe fn(*mut i8, *mut i8) -> ()`
@@ -1099,7 +1105,7 @@ fn get_rust_try_fn<'ll, 'tcx>(
             tcx.types.unit,
             false,
             hir::Safety::Unsafe,
-            Abi::Rust,
+            ExternAbi::Rust,
         )),
     );
     // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
@@ -1108,7 +1114,7 @@ fn get_rust_try_fn<'ll, 'tcx>(
         tcx.types.i32,
         false,
         hir::Safety::Unsafe,
-        Abi::Rust,
+        ExternAbi::Rust,
     ));
     let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
     cx.rust_try_fn.set(Some(rust_try));
@@ -1173,8 +1179,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
             ty::Array(elem, len)
                 if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
-                    && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all())
-                        == Some(expected_bytes) =>
+                    && len
+                        .try_to_target_usize(bx.tcx)
+                        .expect("expected monomorphic const in codegen")
+                        == expected_bytes =>
             {
                 let place = PlaceRef::alloca(bx, args[0].layout);
                 args[0].val.store(bx, place);
@@ -1215,17 +1223,14 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     if let Some(cmp_op) = comparison {
         let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
 
-        require!(
-            in_len == out_len,
-            InvalidMonomorphization::ReturnLengthInputType {
-                span,
-                name,
-                in_len,
-                in_ty,
-                ret_ty,
-                out_len
-            }
-        );
+        require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType {
+            span,
+            name,
+            in_len,
+            in_ty,
+            ret_ty,
+            out_len
+        });
         require!(
             bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
             InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
@@ -1242,23 +1247,25 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     }
 
     if name == sym::simd_shuffle_generic {
-        let idx = fn_args[2]
-            .expect_const()
-            .eval(tcx, ty::ParamEnv::reveal_all(), span)
-            .unwrap()
-            .1
-            .unwrap_branch();
+        let idx = fn_args[2].expect_const().try_to_valtree().unwrap().0.unwrap_branch();
         let n = idx.len() as u64;
 
         let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
-        require!(
-            out_len == n,
-            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
-        );
-        require!(
-            in_elem == out_ty,
-            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
-        );
+        require!(out_len == n, InvalidMonomorphization::ReturnLength {
+            span,
+            name,
+            in_len: n,
+            ret_ty,
+            out_len
+        });
+        require!(in_elem == out_ty, InvalidMonomorphization::ReturnElement {
+            span,
+            name,
+            in_elem,
+            in_ty,
+            ret_ty,
+            out_ty
+        });
 
         let total_len = in_len * 2;
 
@@ -1303,14 +1310,21 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         };
 
         let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
-        require!(
-            out_len == n,
-            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
-        );
-        require!(
-            in_elem == out_ty,
-            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
-        );
+        require!(out_len == n, InvalidMonomorphization::ReturnLength {
+            span,
+            name,
+            in_len: n,
+            ret_ty,
+            out_len
+        });
+        require!(in_elem == out_ty, InvalidMonomorphization::ReturnElement {
+            span,
+            name,
+            in_elem,
+            in_ty,
+            ret_ty,
+            out_ty
+        });
 
         let total_len = u128::from(in_len) * 2;
 
@@ -1335,16 +1349,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     }
 
     if name == sym::simd_insert {
-        require!(
-            in_elem == arg_tys[2],
-            InvalidMonomorphization::InsertedType {
-                span,
-                name,
-                in_elem,
-                in_ty,
-                out_ty: arg_tys[2]
-            }
-        );
+        require!(in_elem == arg_tys[2], InvalidMonomorphization::InsertedType {
+            span,
+            name,
+            in_elem,
+            in_ty,
+            out_ty: arg_tys[2]
+        });
         let idx = bx
             .const_to_opt_u128(args[1].immediate(), false)
             .expect("typeck should have ensure that this is a const");
@@ -1363,10 +1374,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         ));
     }
     if name == sym::simd_extract {
-        require!(
-            ret_ty == in_elem,
-            InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
-        );
+        require!(ret_ty == in_elem, InvalidMonomorphization::ReturnType {
+            span,
+            name,
+            in_elem,
+            in_ty,
+            ret_ty
+        });
         let idx = bx
             .const_to_opt_u128(args[1].immediate(), false)
             .expect("typeck should have ensure that this is a const");
@@ -1385,10 +1399,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         let m_elem_ty = in_elem;
         let m_len = in_len;
         let (v_len, _) = require_simd!(arg_tys[1], SimdArgument);
-        require!(
-            m_len == v_len,
-            InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
-        );
+        require!(m_len == v_len, InvalidMonomorphization::MismatchedLengths {
+            span,
+            name,
+            m_len,
+            v_len
+        });
         match m_elem_ty.kind() {
             ty::Int(_) => {}
             _ => return_error!(InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty }),
@@ -1450,8 +1466,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             }
             ty::Array(elem, len)
                 if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
-                    && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all())
-                        == Some(expected_bytes) =>
+                    && len
+                        .try_to_target_usize(bx.tcx)
+                        .expect("expected monomorphic const in codegen")
+                        == expected_bytes =>
             {
                 // Zero-extend iN to the array length:
                 let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
@@ -1615,34 +1633,30 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         require_simd!(ret_ty, SimdReturn);
 
         // Of the same length:
-        require!(
-            in_len == out_len,
-            InvalidMonomorphization::SecondArgumentLength {
-                span,
-                name,
-                in_len,
-                in_ty,
-                arg_ty: arg_tys[1],
-                out_len
-            }
-        );
-        require!(
-            in_len == out_len2,
-            InvalidMonomorphization::ThirdArgumentLength {
-                span,
-                name,
-                in_len,
-                in_ty,
-                arg_ty: arg_tys[2],
-                out_len: out_len2
-            }
-        );
+        require!(in_len == out_len, InvalidMonomorphization::SecondArgumentLength {
+            span,
+            name,
+            in_len,
+            in_ty,
+            arg_ty: arg_tys[1],
+            out_len
+        });
+        require!(in_len == out_len2, InvalidMonomorphization::ThirdArgumentLength {
+            span,
+            name,
+            in_len,
+            in_ty,
+            arg_ty: arg_tys[2],
+            out_len: out_len2
+        });
 
         // The return type must match the first argument type
-        require!(
-            ret_ty == in_ty,
-            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
-        );
+        require!(ret_ty == in_ty, InvalidMonomorphization::ExpectedReturnType {
+            span,
+            name,
+            in_ty,
+            ret_ty
+        });
 
         require!(
             matches!(
@@ -1733,23 +1747,22 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         require_simd!(ret_ty, SimdReturn);
 
         // Of the same length:
-        require!(
-            values_len == mask_len,
-            InvalidMonomorphization::ThirdArgumentLength {
-                span,
-                name,
-                in_len: mask_len,
-                in_ty: mask_ty,
-                arg_ty: values_ty,
-                out_len: values_len
-            }
-        );
+        require!(values_len == mask_len, InvalidMonomorphization::ThirdArgumentLength {
+            span,
+            name,
+            in_len: mask_len,
+            in_ty: mask_ty,
+            arg_ty: values_ty,
+            out_len: values_len
+        });
 
         // The return type must match the last argument type
-        require!(
-            ret_ty == values_ty,
-            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty }
-        );
+        require!(ret_ty == values_ty, InvalidMonomorphization::ExpectedReturnType {
+            span,
+            name,
+            in_ty: values_ty,
+            ret_ty
+        });
 
         require!(
             matches!(
@@ -1831,23 +1844,21 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
 
         // Of the same length:
-        require!(
-            values_len == mask_len,
-            InvalidMonomorphization::ThirdArgumentLength {
-                span,
-                name,
-                in_len: mask_len,
-                in_ty: mask_ty,
-                arg_ty: values_ty,
-                out_len: values_len
-            }
-        );
+        require!(values_len == mask_len, InvalidMonomorphization::ThirdArgumentLength {
+            span,
+            name,
+            in_len: mask_len,
+            in_ty: mask_ty,
+            arg_ty: values_ty,
+            out_len: values_len
+        });
 
         // The second argument must be a mutable pointer type matching the element type
         require!(
             matches!(
                 *pointer_ty.kind(),
-                ty::RawPtr(p_ty, p_mutbl) if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
+                ty::RawPtr(p_ty, p_mutbl)
+                    if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
             ),
             InvalidMonomorphization::ExpectedElementType {
                 span,
@@ -1919,28 +1930,22 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         let (element_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird);
 
         // Of the same length:
-        require!(
-            in_len == element_len1,
-            InvalidMonomorphization::SecondArgumentLength {
-                span,
-                name,
-                in_len,
-                in_ty,
-                arg_ty: arg_tys[1],
-                out_len: element_len1
-            }
-        );
-        require!(
-            in_len == element_len2,
-            InvalidMonomorphization::ThirdArgumentLength {
-                span,
-                name,
-                in_len,
-                in_ty,
-                arg_ty: arg_tys[2],
-                out_len: element_len2
-            }
-        );
+        require!(in_len == element_len1, InvalidMonomorphization::SecondArgumentLength {
+            span,
+            name,
+            in_len,
+            in_ty,
+            arg_ty: arg_tys[1],
+            out_len: element_len1
+        });
+        require!(in_len == element_len2, InvalidMonomorphization::ThirdArgumentLength {
+            span,
+            name,
+            in_len,
+            in_ty,
+            arg_ty: arg_tys[2],
+            out_len: element_len2
+        });
 
         require!(
             matches!(
@@ -2014,10 +2019,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
          $identity:expr) => {
             if name == sym::$name {
-                require!(
-                    ret_ty == in_elem,
-                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
-                );
+                require!(ret_ty == in_elem, InvalidMonomorphization::ReturnType {
+                    span,
+                    name,
+                    in_elem,
+                    in_ty,
+                    ret_ty
+                });
                 return match in_elem.kind() {
                     ty::Int(_) | ty::Uint(_) => {
                         let r = bx.$integer_reduce(args[0].immediate());
@@ -2086,10 +2094,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     macro_rules! minmax_red {
         ($name:ident: $int_red:ident, $float_red:ident) => {
             if name == sym::$name {
-                require!(
-                    ret_ty == in_elem,
-                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
-                );
+                require!(ret_ty == in_elem, InvalidMonomorphization::ReturnType {
+                    span,
+                    name,
+                    in_elem,
+                    in_ty,
+                    ret_ty
+                });
                 return match in_elem.kind() {
                     ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
                     ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
@@ -2114,10 +2125,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         ($name:ident : $red:ident, $boolean:expr) => {
             if name == sym::$name {
                 let input = if !$boolean {
-                    require!(
-                        ret_ty == in_elem,
-                        InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
-                    );
+                    require!(ret_ty == in_elem, InvalidMonomorphization::ReturnType {
+                        span,
+                        name,
+                        in_elem,
+                        in_ty,
+                        ret_ty
+                    });
                     args[0].immediate()
                 } else {
                     match in_elem.kind() {
@@ -2163,27 +2177,25 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
 
     if name == sym::simd_cast_ptr {
         let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
-        require!(
-            in_len == out_len,
-            InvalidMonomorphization::ReturnLengthInputType {
-                span,
-                name,
-                in_len,
-                in_ty,
-                ret_ty,
-                out_len
-            }
-        );
+        require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType {
+            span,
+            name,
+            in_len,
+            in_ty,
+            ret_ty,
+            out_len
+        });
 
         match in_elem.kind() {
             ty::RawPtr(p_ty, _) => {
                 let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
                     bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty)
                 });
-                require!(
-                    metadata.is_unit(),
-                    InvalidMonomorphization::CastFatPointer { span, name, ty: in_elem }
-                );
+                require!(metadata.is_unit(), InvalidMonomorphization::CastWidePointer {
+                    span,
+                    name,
+                    ty: in_elem
+                });
             }
             _ => {
                 return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
@@ -2194,10 +2206,11 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
                     bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty)
                 });
-                require!(
-                    metadata.is_unit(),
-                    InvalidMonomorphization::CastFatPointer { span, name, ty: out_elem }
-                );
+                require!(metadata.is_unit(), InvalidMonomorphization::CastWidePointer {
+                    span,
+                    name,
+                    ty: out_elem
+                });
             }
             _ => {
                 return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
@@ -2209,17 +2222,14 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
 
     if name == sym::simd_expose_provenance {
         let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
-        require!(
-            in_len == out_len,
-            InvalidMonomorphization::ReturnLengthInputType {
-                span,
-                name,
-                in_len,
-                in_ty,
-                ret_ty,
-                out_len
-            }
-        );
+        require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType {
+            span,
+            name,
+            in_len,
+            in_ty,
+            ret_ty,
+            out_len
+        });
 
         match in_elem.kind() {
             ty::RawPtr(_, _) => {}
@@ -2237,17 +2247,14 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
 
     if name == sym::simd_with_exposed_provenance {
         let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
-        require!(
-            in_len == out_len,
-            InvalidMonomorphization::ReturnLengthInputType {
-                span,
-                name,
-                in_len,
-                in_ty,
-                ret_ty,
-                out_len
-            }
-        );
+        require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType {
+            span,
+            name,
+            in_len,
+            in_ty,
+            ret_ty,
+            out_len
+        });
 
         match in_elem.kind() {
             ty::Uint(ty::UintTy::Usize) => {}
@@ -2265,17 +2272,14 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
 
     if name == sym::simd_cast || name == sym::simd_as {
         let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
-        require!(
-            in_len == out_len,
-            InvalidMonomorphization::ReturnLengthInputType {
-                span,
-                name,
-                in_len,
-                in_ty,
-                ret_ty,
-                out_len
-            }
-        );
+        require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType {
+            span,
+            name,
+            in_len,
+            in_ty,
+            ret_ty,
+            out_len
+        });
         // casting cares about nominal type, not just structural type
         if in_elem == out_elem {
             return Ok(args[0].immediate());
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index 7f26bbd7f87..b85d28a2f1f 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -11,6 +11,7 @@
 #![feature(assert_matches)]
 #![feature(exact_size_is_empty)]
 #![feature(extern_types)]
+#![feature(file_buffered)]
 #![feature(hash_raw_entry)]
 #![feature(impl_trait_in_assoc_type)]
 #![feature(iter_intersperse)]
@@ -41,8 +42,8 @@ use rustc_metadata::EncodedMetadata;
 use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
 use rustc_middle::ty::TyCtxt;
 use rustc_middle::util::Providers;
-use rustc_session::config::{OptLevel, OutputFilenames, PrintKind, PrintRequest};
 use rustc_session::Session;
+use rustc_session::config::{OptLevel, OutputFilenames, PrintKind, PrintRequest};
 use rustc_span::symbol::Symbol;
 
 mod back {
@@ -166,7 +167,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
     fn print_pass_timings(&self) {
         unsafe {
             let mut size = 0;
-            let cstr = llvm::LLVMRustPrintPassTimings(std::ptr::addr_of_mut!(size));
+            let cstr = llvm::LLVMRustPrintPassTimings(&raw mut size);
             if cstr.is_null() {
                 println!("failed to get pass timings");
             } else {
@@ -179,7 +180,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
     fn print_statistics(&self) {
         unsafe {
             let mut size = 0;
-            let cstr = llvm::LLVMRustPrintStatistics(std::ptr::addr_of_mut!(size));
+            let cstr = llvm::LLVMRustPrintStatistics(&raw mut size);
             if cstr.is_null() {
                 println!("failed to get pass stats");
             } else {
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index e84ab0aa538..d84ae8d8836 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -1,23 +1,51 @@
 #![allow(non_camel_case_types)]
 #![allow(non_upper_case_globals)]
 
+use std::fmt::Debug;
 use std::marker::PhantomData;
+use std::ptr;
 
 use libc::{c_char, c_int, c_uint, c_ulonglong, c_void, size_t};
+use rustc_macros::TryFromU32;
+use rustc_target::spec::SymbolVisibility;
 
+use super::RustString;
 use super::debuginfo::{
     DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator,
     DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DILocation, DINameSpace,
     DISPFlags, DIScope, DISubprogram, DISubrange, DITemplateTypeParameter, DIType, DIVariable,
     DebugEmissionKind, DebugNameTableKind,
 };
-use super::RustString;
 
 pub type Bool = c_uint;
 
 pub const True: Bool = 1 as Bool;
 pub const False: Bool = 0 as Bool;
 
+/// Wrapper for a raw enum value returned from LLVM's C APIs.
+///
+/// For C enums returned by LLVM, it's risky to use a Rust enum as the return
+/// type, because it would be UB if a later version of LLVM adds a new enum
+/// value and returns it. Instead, return this raw wrapper, then convert to the
+/// Rust-side enum explicitly.
+#[repr(transparent)]
+pub struct RawEnum<T> {
+    value: u32,
+    /// We don't own or consume a `T`, but we can produce one.
+    _rust_side_type: PhantomData<fn() -> T>,
+}
+
+impl<T: TryFrom<u32>> RawEnum<T> {
+    #[track_caller]
+    pub(crate) fn to_rust(self) -> T
+    where
+        T::Error: Debug,
+    {
+        // If this fails, the Rust-side enum is out of sync with LLVM's enum.
+        T::try_from(self.value).expect("enum value returned by LLVM should be known")
+    }
+}
+
 #[derive(Copy, Clone, PartialEq)]
 #[repr(C)]
 #[allow(dead_code)] // Variants constructed by C++.
@@ -58,7 +86,7 @@ pub enum LLVMMachineType {
     ARM = 0x01c0,
 }
 
-/// LLVM's Module::ModFlagBehavior, defined in llvm/include/llvm/IR/Module.h.
+/// Must match the layout of `LLVMRustModuleFlagMergeBehavior`.
 ///
 /// When merging modules (e.g. during LTO), their metadata flags are combined. Conflicts are
 /// resolved according to the merge behaviors specified here. Flags differing only in merge
@@ -66,9 +94,13 @@ pub enum LLVMMachineType {
 ///
 /// In order for Rust-C LTO to work, we must specify behaviors compatible with Clang. Notably,
 /// 'Error' and 'Warning' cannot be mixed for a given flag.
+///
+/// There is a stable LLVM-C version of this enum (`LLVMModuleFlagBehavior`),
+/// but as of LLVM 19 it does not support all of the enum values in the unstable
+/// C++ API.
 #[derive(Copy, Clone, PartialEq)]
 #[repr(C)]
-pub enum LLVMModFlagBehavior {
+pub enum ModuleFlagMergeBehavior {
     Error = 1,
     Warning = 2,
     Require = 3,
@@ -107,32 +139,52 @@ pub enum CallConv {
     AvrInterrupt = 85,
 }
 
-/// LLVMRustLinkage
-#[derive(Copy, Clone, PartialEq)]
+/// Must match the layout of `LLVMLinkage`.
+#[derive(Copy, Clone, PartialEq, TryFromU32)]
 #[repr(C)]
 pub enum Linkage {
     ExternalLinkage = 0,
     AvailableExternallyLinkage = 1,
     LinkOnceAnyLinkage = 2,
     LinkOnceODRLinkage = 3,
-    WeakAnyLinkage = 4,
-    WeakODRLinkage = 5,
-    AppendingLinkage = 6,
-    InternalLinkage = 7,
-    PrivateLinkage = 8,
-    ExternalWeakLinkage = 9,
-    CommonLinkage = 10,
+    #[deprecated = "marked obsolete by LLVM"]
+    LinkOnceODRAutoHideLinkage = 4,
+    WeakAnyLinkage = 5,
+    WeakODRLinkage = 6,
+    AppendingLinkage = 7,
+    InternalLinkage = 8,
+    PrivateLinkage = 9,
+    #[deprecated = "marked obsolete by LLVM"]
+    DLLImportLinkage = 10,
+    #[deprecated = "marked obsolete by LLVM"]
+    DLLExportLinkage = 11,
+    ExternalWeakLinkage = 12,
+    #[deprecated = "marked obsolete by LLVM"]
+    GhostLinkage = 13,
+    CommonLinkage = 14,
+    LinkerPrivateLinkage = 15,
+    LinkerPrivateWeakLinkage = 16,
 }
 
-// LLVMRustVisibility
+/// Must match the layout of `LLVMVisibility`.
 #[repr(C)]
-#[derive(Copy, Clone, PartialEq)]
+#[derive(Copy, Clone, PartialEq, TryFromU32)]
 pub enum Visibility {
     Default = 0,
     Hidden = 1,
     Protected = 2,
 }
 
+impl Visibility {
+    pub fn from_generic(visibility: SymbolVisibility) -> Self {
+        match visibility {
+            SymbolVisibility::Hidden => Visibility::Hidden,
+            SymbolVisibility::Protected => Visibility::Protected,
+            SymbolVisibility::Interposable => Visibility::Default,
+        }
+    }
+}
+
 /// LLVMUnnamedAddr
 #[repr(C)]
 pub enum UnnamedAddr {
@@ -220,17 +272,18 @@ pub enum IntPredicate {
 
 impl IntPredicate {
     pub fn from_generic(intpre: rustc_codegen_ssa::common::IntPredicate) -> Self {
+        use rustc_codegen_ssa::common::IntPredicate as Common;
         match intpre {
-            rustc_codegen_ssa::common::IntPredicate::IntEQ => IntPredicate::IntEQ,
-            rustc_codegen_ssa::common::IntPredicate::IntNE => IntPredicate::IntNE,
-            rustc_codegen_ssa::common::IntPredicate::IntUGT => IntPredicate::IntUGT,
-            rustc_codegen_ssa::common::IntPredicate::IntUGE => IntPredicate::IntUGE,
-            rustc_codegen_ssa::common::IntPredicate::IntULT => IntPredicate::IntULT,
-            rustc_codegen_ssa::common::IntPredicate::IntULE => IntPredicate::IntULE,
-            rustc_codegen_ssa::common::IntPredicate::IntSGT => IntPredicate::IntSGT,
-            rustc_codegen_ssa::common::IntPredicate::IntSGE => IntPredicate::IntSGE,
-            rustc_codegen_ssa::common::IntPredicate::IntSLT => IntPredicate::IntSLT,
-            rustc_codegen_ssa::common::IntPredicate::IntSLE => IntPredicate::IntSLE,
+            Common::IntEQ => Self::IntEQ,
+            Common::IntNE => Self::IntNE,
+            Common::IntUGT => Self::IntUGT,
+            Common::IntUGE => Self::IntUGE,
+            Common::IntULT => Self::IntULT,
+            Common::IntULE => Self::IntULE,
+            Common::IntSGT => Self::IntSGT,
+            Common::IntSGE => Self::IntSGE,
+            Common::IntSLT => Self::IntSLT,
+            Common::IntSLE => Self::IntSLE,
         }
     }
 }
@@ -259,27 +312,24 @@ pub enum RealPredicate {
 
 impl RealPredicate {
     pub fn from_generic(realp: rustc_codegen_ssa::common::RealPredicate) -> Self {
+        use rustc_codegen_ssa::common::RealPredicate as Common;
         match realp {
-            rustc_codegen_ssa::common::RealPredicate::RealPredicateFalse => {
-                RealPredicate::RealPredicateFalse
-            }
-            rustc_codegen_ssa::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ,
-            rustc_codegen_ssa::common::RealPredicate::RealOGT => RealPredicate::RealOGT,
-            rustc_codegen_ssa::common::RealPredicate::RealOGE => RealPredicate::RealOGE,
-            rustc_codegen_ssa::common::RealPredicate::RealOLT => RealPredicate::RealOLT,
-            rustc_codegen_ssa::common::RealPredicate::RealOLE => RealPredicate::RealOLE,
-            rustc_codegen_ssa::common::RealPredicate::RealONE => RealPredicate::RealONE,
-            rustc_codegen_ssa::common::RealPredicate::RealORD => RealPredicate::RealORD,
-            rustc_codegen_ssa::common::RealPredicate::RealUNO => RealPredicate::RealUNO,
-            rustc_codegen_ssa::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ,
-            rustc_codegen_ssa::common::RealPredicate::RealUGT => RealPredicate::RealUGT,
-            rustc_codegen_ssa::common::RealPredicate::RealUGE => RealPredicate::RealUGE,
-            rustc_codegen_ssa::common::RealPredicate::RealULT => RealPredicate::RealULT,
-            rustc_codegen_ssa::common::RealPredicate::RealULE => RealPredicate::RealULE,
-            rustc_codegen_ssa::common::RealPredicate::RealUNE => RealPredicate::RealUNE,
-            rustc_codegen_ssa::common::RealPredicate::RealPredicateTrue => {
-                RealPredicate::RealPredicateTrue
-            }
+            Common::RealPredicateFalse => Self::RealPredicateFalse,
+            Common::RealOEQ => Self::RealOEQ,
+            Common::RealOGT => Self::RealOGT,
+            Common::RealOGE => Self::RealOGE,
+            Common::RealOLT => Self::RealOLT,
+            Common::RealOLE => Self::RealOLE,
+            Common::RealONE => Self::RealONE,
+            Common::RealORD => Self::RealORD,
+            Common::RealUNO => Self::RealUNO,
+            Common::RealUEQ => Self::RealUEQ,
+            Common::RealUGT => Self::RealUGT,
+            Common::RealUGE => Self::RealUGE,
+            Common::RealULT => Self::RealULT,
+            Common::RealULE => Self::RealULE,
+            Common::RealUNE => Self::RealUNE,
+            Common::RealPredicateTrue => Self::RealPredicateTrue,
         }
     }
 }
@@ -311,26 +361,27 @@ pub enum TypeKind {
 
 impl TypeKind {
     pub fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind {
+        use rustc_codegen_ssa::common::TypeKind as Common;
         match self {
-            TypeKind::Void => rustc_codegen_ssa::common::TypeKind::Void,
-            TypeKind::Half => rustc_codegen_ssa::common::TypeKind::Half,
-            TypeKind::Float => rustc_codegen_ssa::common::TypeKind::Float,
-            TypeKind::Double => rustc_codegen_ssa::common::TypeKind::Double,
-            TypeKind::X86_FP80 => rustc_codegen_ssa::common::TypeKind::X86_FP80,
-            TypeKind::FP128 => rustc_codegen_ssa::common::TypeKind::FP128,
-            TypeKind::PPC_FP128 => rustc_codegen_ssa::common::TypeKind::PPC_FP128,
-            TypeKind::Label => rustc_codegen_ssa::common::TypeKind::Label,
-            TypeKind::Integer => rustc_codegen_ssa::common::TypeKind::Integer,
-            TypeKind::Function => rustc_codegen_ssa::common::TypeKind::Function,
-            TypeKind::Struct => rustc_codegen_ssa::common::TypeKind::Struct,
-            TypeKind::Array => rustc_codegen_ssa::common::TypeKind::Array,
-            TypeKind::Pointer => rustc_codegen_ssa::common::TypeKind::Pointer,
-            TypeKind::Vector => rustc_codegen_ssa::common::TypeKind::Vector,
-            TypeKind::Metadata => rustc_codegen_ssa::common::TypeKind::Metadata,
-            TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token,
-            TypeKind::ScalableVector => rustc_codegen_ssa::common::TypeKind::ScalableVector,
-            TypeKind::BFloat => rustc_codegen_ssa::common::TypeKind::BFloat,
-            TypeKind::X86_AMX => rustc_codegen_ssa::common::TypeKind::X86_AMX,
+            Self::Void => Common::Void,
+            Self::Half => Common::Half,
+            Self::Float => Common::Float,
+            Self::Double => Common::Double,
+            Self::X86_FP80 => Common::X86_FP80,
+            Self::FP128 => Common::FP128,
+            Self::PPC_FP128 => Common::PPC_FP128,
+            Self::Label => Common::Label,
+            Self::Integer => Common::Integer,
+            Self::Function => Common::Function,
+            Self::Struct => Common::Struct,
+            Self::Array => Common::Array,
+            Self::Pointer => Common::Pointer,
+            Self::Vector => Common::Vector,
+            Self::Metadata => Common::Metadata,
+            Self::Token => Common::Token,
+            Self::ScalableVector => Common::ScalableVector,
+            Self::BFloat => Common::BFloat,
+            Self::X86_AMX => Common::X86_AMX,
         }
     }
 }
@@ -354,18 +405,19 @@ pub enum AtomicRmwBinOp {
 
 impl AtomicRmwBinOp {
     pub fn from_generic(op: rustc_codegen_ssa::common::AtomicRmwBinOp) -> Self {
+        use rustc_codegen_ssa::common::AtomicRmwBinOp as Common;
         match op {
-            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg,
-            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd,
-            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub,
-            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd,
-            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand,
-            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr,
-            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor,
-            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax,
-            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin,
-            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax,
-            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin,
+            Common::AtomicXchg => Self::AtomicXchg,
+            Common::AtomicAdd => Self::AtomicAdd,
+            Common::AtomicSub => Self::AtomicSub,
+            Common::AtomicAnd => Self::AtomicAnd,
+            Common::AtomicNand => Self::AtomicNand,
+            Common::AtomicOr => Self::AtomicOr,
+            Common::AtomicXor => Self::AtomicXor,
+            Common::AtomicMax => Self::AtomicMax,
+            Common::AtomicMin => Self::AtomicMin,
+            Common::AtomicUMax => Self::AtomicUMax,
+            Common::AtomicUMin => Self::AtomicUMin,
         }
     }
 }
@@ -387,17 +439,14 @@ pub enum AtomicOrdering {
 
 impl AtomicOrdering {
     pub fn from_generic(ao: rustc_codegen_ssa::common::AtomicOrdering) -> Self {
+        use rustc_codegen_ssa::common::AtomicOrdering as Common;
         match ao {
-            rustc_codegen_ssa::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered,
-            rustc_codegen_ssa::common::AtomicOrdering::Relaxed => AtomicOrdering::Monotonic,
-            rustc_codegen_ssa::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire,
-            rustc_codegen_ssa::common::AtomicOrdering::Release => AtomicOrdering::Release,
-            rustc_codegen_ssa::common::AtomicOrdering::AcquireRelease => {
-                AtomicOrdering::AcquireRelease
-            }
-            rustc_codegen_ssa::common::AtomicOrdering::SequentiallyConsistent => {
-                AtomicOrdering::SequentiallyConsistent
-            }
+            Common::Unordered => Self::Unordered,
+            Common::Relaxed => Self::Monotonic,
+            Common::Acquire => Self::Acquire,
+            Common::Release => Self::Release,
+            Common::AcquireRelease => Self::AcquireRelease,
+            Common::SequentiallyConsistent => Self::SequentiallyConsistent,
         }
     }
 }
@@ -563,13 +612,11 @@ pub enum ArchiveKind {
     K_AIXBIG,
 }
 
-// LLVMRustThinLTOData
 unsafe extern "C" {
+    // LLVMRustThinLTOData
     pub type ThinLTOData;
-}
 
-// LLVMRustThinLTOBuffer
-unsafe extern "C" {
+    // LLVMRustThinLTOBuffer
     pub type ThinLTOBuffer;
 }
 
@@ -633,27 +680,14 @@ struct InvariantOpaque<'a> {
 // Opaque pointer types
 unsafe extern "C" {
     pub type Module;
-}
-unsafe extern "C" {
     pub type Context;
-}
-unsafe extern "C" {
     pub type Type;
-}
-unsafe extern "C" {
     pub type Value;
-}
-unsafe extern "C" {
     pub type ConstantInt;
-}
-unsafe extern "C" {
     pub type Attribute;
-}
-unsafe extern "C" {
     pub type Metadata;
-}
-unsafe extern "C" {
     pub type BasicBlock;
+    pub type Comdat;
 }
 #[repr(C)]
 pub struct Builder<'a>(InvariantOpaque<'a>);
@@ -661,11 +695,7 @@ pub struct Builder<'a>(InvariantOpaque<'a>);
 pub struct PassManager<'a>(InvariantOpaque<'a>);
 unsafe extern "C" {
     pub type Pass;
-}
-unsafe extern "C" {
     pub type TargetMachine;
-}
-unsafe extern "C" {
     pub type Archive;
 }
 #[repr(C)]
@@ -674,17 +704,14 @@ pub struct ArchiveIterator<'a>(InvariantOpaque<'a>);
 pub struct ArchiveChild<'a>(InvariantOpaque<'a>);
 unsafe extern "C" {
     pub type Twine;
-}
-unsafe extern "C" {
     pub type DiagnosticInfo;
-}
-unsafe extern "C" {
     pub type SMDiagnostic;
 }
 #[repr(C)]
 pub struct RustArchiveMember<'a>(InvariantOpaque<'a>);
+/// Opaque pointee of `LLVMOperandBundleRef`.
 #[repr(C)]
-pub struct OperandBundleDef<'a>(InvariantOpaque<'a>);
+pub(crate) struct OperandBundle<'a>(InvariantOpaque<'a>);
 #[repr(C)]
 pub struct Linker<'a>(InvariantOpaque<'a>);
 
@@ -912,17 +939,7 @@ unsafe extern "C" {
     pub fn LLVMGetPoison(Ty: &Type) -> &Value;
 
     // Operations on metadata
-    // FIXME: deprecated, replace with LLVMMDStringInContext2
-    pub fn LLVMMDStringInContext(C: &Context, Str: *const c_char, SLen: c_uint) -> &Value;
-
     pub fn LLVMMDStringInContext2(C: &Context, Str: *const c_char, SLen: size_t) -> &Metadata;
-
-    // FIXME: deprecated, replace with LLVMMDNodeInContext2
-    pub fn LLVMMDNodeInContext<'a>(
-        C: &'a Context,
-        Vals: *const &'a Value,
-        Count: c_uint,
-    ) -> &'a Value;
     pub fn LLVMMDNodeInContext2<'a>(
         C: &'a Context,
         Vals: *const &'a Metadata,
@@ -970,7 +987,11 @@ unsafe extern "C" {
 
     // Operations on global variables, functions, and aliases (globals)
     pub fn LLVMIsDeclaration(Global: &Value) -> Bool;
+    pub fn LLVMGetLinkage(Global: &Value) -> RawEnum<Linkage>;
+    pub fn LLVMSetLinkage(Global: &Value, RustLinkage: Linkage);
     pub fn LLVMSetSection(Global: &Value, Section: *const c_char);
+    pub fn LLVMGetVisibility(Global: &Value) -> RawEnum<Visibility>;
+    pub fn LLVMSetVisibility(Global: &Value, Viz: Visibility);
     pub fn LLVMGetAlignment(Global: &Value) -> c_uint;
     pub fn LLVMSetAlignment(Global: &Value, Bytes: c_uint);
     pub fn LLVMSetDLLStorageClass(V: &Value, C: DLLStorageClass);
@@ -1516,6 +1537,53 @@ unsafe extern "C" {
     pub fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr);
 
     pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>;
+
+    pub fn LLVMGetOrInsertComdat(M: &Module, Name: *const c_char) -> &Comdat;
+    pub fn LLVMSetComdat(V: &Value, C: &Comdat);
+
+    pub(crate) fn LLVMCreateOperandBundle(
+        Tag: *const c_char,
+        TagLen: size_t,
+        Args: *const &'_ Value,
+        NumArgs: c_uint,
+    ) -> *mut OperandBundle<'_>;
+    pub(crate) fn LLVMDisposeOperandBundle(Bundle: ptr::NonNull<OperandBundle<'_>>);
+
+    pub(crate) fn LLVMBuildCallWithOperandBundles<'a>(
+        B: &Builder<'a>,
+        Ty: &'a Type,
+        Fn: &'a Value,
+        Args: *const &'a Value,
+        NumArgs: c_uint,
+        Bundles: *const &OperandBundle<'a>,
+        NumBundles: c_uint,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub(crate) fn LLVMBuildInvokeWithOperandBundles<'a>(
+        B: &Builder<'a>,
+        Ty: &'a Type,
+        Fn: &'a Value,
+        Args: *const &'a Value,
+        NumArgs: c_uint,
+        Then: &'a BasicBlock,
+        Catch: &'a BasicBlock,
+        Bundles: *const &OperandBundle<'a>,
+        NumBundles: c_uint,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub(crate) fn LLVMBuildCallBr<'a>(
+        B: &Builder<'a>,
+        Ty: &'a Type,
+        Fn: &'a Value,
+        DefaultDest: &'a BasicBlock,
+        IndirectDests: *const &'a BasicBlock,
+        NumIndirectDests: c_uint,
+        Args: *const &'a Value,
+        NumArgs: c_uint,
+        Bundles: *const &OperandBundle<'a>,
+        NumBundles: c_uint,
+        Name: *const c_char,
+    ) -> &'a Value;
 }
 
 #[link(name = "llvm-wrapper", kind = "static")]
@@ -1543,10 +1611,6 @@ unsafe extern "C" {
     ) -> bool;
 
     // Operations on global variables, functions, and aliases (globals)
-    pub fn LLVMRustGetLinkage(Global: &Value) -> Linkage;
-    pub fn LLVMRustSetLinkage(Global: &Value, RustLinkage: Linkage);
-    pub fn LLVMRustGetVisibility(Global: &Value) -> Visibility;
-    pub fn LLVMRustSetVisibility(Global: &Value, Viz: Visibility);
     pub fn LLVMRustSetDSOLocal(Global: &Value, is_dso_local: bool);
 
     // Operations on global variables
@@ -1605,52 +1669,11 @@ unsafe extern "C" {
         AttrsLen: size_t,
     );
 
-    pub fn LLVMRustBuildInvoke<'a>(
-        B: &Builder<'a>,
-        Ty: &'a Type,
-        Fn: &'a Value,
-        Args: *const &'a Value,
-        NumArgs: c_uint,
-        Then: &'a BasicBlock,
-        Catch: &'a BasicBlock,
-        OpBundles: *const &OperandBundleDef<'a>,
-        NumOpBundles: c_uint,
-        Name: *const c_char,
-    ) -> &'a Value;
-
-    pub fn LLVMRustBuildCallBr<'a>(
-        B: &Builder<'a>,
-        Ty: &'a Type,
-        Fn: &'a Value,
-        DefaultDest: &'a BasicBlock,
-        IndirectDests: *const &'a BasicBlock,
-        NumIndirectDests: c_uint,
-        Args: *const &'a Value,
-        NumArgs: c_uint,
-        OpBundles: *const &OperandBundleDef<'a>,
-        NumOpBundles: c_uint,
-        Name: *const c_char,
-    ) -> &'a Value;
-
     pub fn LLVMRustSetFastMath(Instr: &Value);
     pub fn LLVMRustSetAlgebraicMath(Instr: &Value);
     pub fn LLVMRustSetAllowReassoc(Instr: &Value);
 
     // Miscellaneous instructions
-    pub fn LLVMRustGetInstrProfIncrementIntrinsic(M: &Module) -> &Value;
-    pub fn LLVMRustGetInstrProfMCDCParametersIntrinsic(M: &Module) -> &Value;
-    pub fn LLVMRustGetInstrProfMCDCTVBitmapUpdateIntrinsic(M: &Module) -> &Value;
-    pub fn LLVMRustGetInstrProfMCDCCondBitmapIntrinsic(M: &Module) -> &Value;
-
-    pub fn LLVMRustBuildCall<'a>(
-        B: &Builder<'a>,
-        Ty: &'a Type,
-        Fn: &'a Value,
-        Args: *const &'a Value,
-        NumArgs: c_uint,
-        OpBundles: *const &OperandBundleDef<'a>,
-        NumOpBundles: c_uint,
-    ) -> &'a Value;
     pub fn LLVMRustBuildMemCpy<'a>(
         B: &Builder<'a>,
         Dst: &'a Value,
@@ -1767,7 +1790,7 @@ unsafe extern "C" {
     ) -> bool;
 
     #[allow(improper_ctypes)]
-    pub fn LLVMRustCoverageWriteFilenamesSectionToBuffer(
+    pub(crate) fn LLVMRustCoverageWriteFilenamesSectionToBuffer(
         Filenames: *const *const c_char,
         FilenamesLen: size_t,
         Lengths: *const size_t,
@@ -1776,33 +1799,39 @@ unsafe extern "C" {
     );
 
     #[allow(improper_ctypes)]
-    pub fn LLVMRustCoverageWriteMappingToBuffer(
+    pub(crate) fn LLVMRustCoverageWriteMappingToBuffer(
         VirtualFileMappingIDs: *const c_uint,
         NumVirtualFileMappingIDs: c_uint,
         Expressions: *const crate::coverageinfo::ffi::CounterExpression,
         NumExpressions: c_uint,
-        MappingRegions: *const crate::coverageinfo::ffi::CounterMappingRegion,
-        NumMappingRegions: c_uint,
+        CodeRegions: *const crate::coverageinfo::ffi::CodeRegion,
+        NumCodeRegions: c_uint,
+        BranchRegions: *const crate::coverageinfo::ffi::BranchRegion,
+        NumBranchRegions: c_uint,
+        MCDCBranchRegions: *const crate::coverageinfo::ffi::MCDCBranchRegion,
+        NumMCDCBranchRegions: c_uint,
+        MCDCDecisionRegions: *const crate::coverageinfo::ffi::MCDCDecisionRegion,
+        NumMCDCDecisionRegions: c_uint,
         BufferOut: &RustString,
     );
 
-    pub fn LLVMRustCoverageCreatePGOFuncNameVar(
+    pub(crate) fn LLVMRustCoverageCreatePGOFuncNameVar(
         F: &Value,
         FuncName: *const c_char,
         FuncNameLen: size_t,
     ) -> &Value;
-    pub fn LLVMRustCoverageHashByteArray(Bytes: *const c_char, NumBytes: size_t) -> u64;
+    pub(crate) fn LLVMRustCoverageHashByteArray(Bytes: *const c_char, NumBytes: size_t) -> u64;
 
     #[allow(improper_ctypes)]
-    pub fn LLVMRustCoverageWriteMapSectionNameToString(M: &Module, Str: &RustString);
+    pub(crate) fn LLVMRustCoverageWriteMapSectionNameToString(M: &Module, Str: &RustString);
 
     #[allow(improper_ctypes)]
-    pub fn LLVMRustCoverageWriteFuncSectionNameToString(M: &Module, Str: &RustString);
+    pub(crate) fn LLVMRustCoverageWriteFuncSectionNameToString(M: &Module, Str: &RustString);
 
     #[allow(improper_ctypes)]
-    pub fn LLVMRustCoverageWriteMappingVarNameToString(Str: &RustString);
+    pub(crate) fn LLVMRustCoverageWriteMappingVarNameToString(Str: &RustString);
 
-    pub fn LLVMRustCoverageMappingVersion() -> u32;
+    pub(crate) fn LLVMRustCoverageMappingVersion() -> u32;
     pub fn LLVMRustDebugMetadataVersion() -> u32;
     pub fn LLVMRustVersionMajor() -> u32;
     pub fn LLVMRustVersionMinor() -> u32;
@@ -1814,21 +1843,21 @@ unsafe extern "C" {
     /// "compatible" means depends on the merge behaviors involved.
     pub fn LLVMRustAddModuleFlagU32(
         M: &Module,
-        merge_behavior: LLVMModFlagBehavior,
-        name: *const c_char,
-        value: u32,
+        MergeBehavior: ModuleFlagMergeBehavior,
+        Name: *const c_char,
+        NameLen: size_t,
+        Value: u32,
     );
 
     pub fn LLVMRustAddModuleFlagString(
         M: &Module,
-        merge_behavior: LLVMModFlagBehavior,
-        name: *const c_char,
-        value: *const c_char,
-        value_len: size_t,
+        MergeBehavior: ModuleFlagMergeBehavior,
+        Name: *const c_char,
+        NameLen: size_t,
+        Value: *const c_char,
+        ValueLen: size_t,
     );
 
-    pub fn LLVMRustHasModuleFlag(M: &Module, name: *const c_char, len: size_t) -> bool;
-
     pub fn LLVMRustDIBuilderCreate(M: &Module) -> &mut DIBuilder<'_>;
 
     pub fn LLVMRustDIBuilderDispose<'a>(Builder: &'a mut DIBuilder<'a>);
@@ -2161,12 +2190,8 @@ unsafe extern "C" {
 
     pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool;
 
-    pub fn LLVMRustPrintTargetCPUs(
-        T: &TargetMachine,
-        cpu: *const c_char,
-        print: unsafe extern "C" fn(out: *mut c_void, string: *const c_char, len: usize),
-        out: *mut c_void,
-    );
+    #[allow(improper_ctypes)]
+    pub(crate) fn LLVMRustPrintTargetCPUs(TM: &TargetMachine, OutStr: &RustString);
     pub fn LLVMRustGetTargetFeaturesCount(T: &TargetMachine) -> size_t;
     pub fn LLVMRustGetTargetFeature(
         T: &TargetMachine,
@@ -2175,9 +2200,10 @@ unsafe extern "C" {
         Desc: &mut *const c_char,
     );
 
-    pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char;
+    pub fn LLVMRustGetHostCPUName(LenOut: &mut size_t) -> *const u8;
 
-    // This function makes copies of pointed to data, so the data's lifetime may end after this function returns
+    // This function makes copies of pointed to data, so the data's lifetime may end after this
+    // function returns.
     pub fn LLVMRustCreateTargetMachine(
         Triple: *const c_char,
         CPU: *const c_char,
@@ -2239,7 +2265,6 @@ unsafe extern "C" {
         PGOUsePath: *const c_char,
         InstrumentCoverage: bool,
         InstrProfileOutput: *const c_char,
-        InstrumentGCOV: bool,
         PGOSampleUsePath: *const c_char,
         DebugInfoForProfiling: bool,
         llvm_selfprofiler: *mut c_void,
@@ -2337,16 +2362,8 @@ unsafe extern "C" {
 
     pub fn LLVMRustSetDataLayoutFromTargetMachine<'a>(M: &'a Module, TM: &'a TargetMachine);
 
-    pub fn LLVMRustBuildOperandBundleDef(
-        Name: *const c_char,
-        Inputs: *const &'_ Value,
-        NumInputs: c_uint,
-    ) -> &mut OperandBundleDef<'_>;
-    pub fn LLVMRustFreeOperandBundleDef<'a>(Bundle: &'a mut OperandBundleDef<'a>);
-
     pub fn LLVMRustPositionBuilderAtStart<'a>(B: &Builder<'a>, BB: &'a BasicBlock);
 
-    pub fn LLVMRustSetComdat<'a>(M: &'a Module, V: &'a Value, Name: *const c_char, NameLen: size_t);
     pub fn LLVMRustSetModulePICLevel(M: &Module);
     pub fn LLVMRustSetModulePIELevel(M: &Module);
     pub fn LLVMRustSetModuleCodeModel(M: &Module, Model: CodeModel);
@@ -2370,9 +2387,9 @@ unsafe extern "C" {
     pub fn LLVMRustThinLTOBufferThinLinkDataLen(M: &ThinLTOBuffer) -> size_t;
     pub fn LLVMRustCreateThinLTOData(
         Modules: *const ThinLTOModule,
-        NumModules: c_uint,
+        NumModules: size_t,
         PreservedSymbols: *const *const c_char,
-        PreservedSymbolsLen: c_uint,
+        PreservedSymbolsLen: size_t,
     ) -> Option<&'static mut ThinLTOData>;
     pub fn LLVMRustPrepareThinLTORename(
         Data: &ThinLTOData,
@@ -2397,6 +2414,7 @@ unsafe extern "C" {
         data: *const u8,
         len: usize,
         name: *const u8,
+        name_len: usize,
         out_len: &mut usize,
     ) -> *const u8;
 
diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
index d0db350a149..3b0bf47366e 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
@@ -2,13 +2,14 @@
 
 use std::cell::RefCell;
 use std::ffi::{CStr, CString};
+use std::ops::Deref;
+use std::ptr;
 use std::str::FromStr;
 use std::string::FromUtf8Error;
 
 use libc::c_uint;
-use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_abi::{Align, Size, WrappingRange};
 use rustc_llvm::RustString;
-use rustc_target::abi::{Align, Size, WrappingRange};
 
 pub use self::AtomicRmwBinOp::*;
 pub use self::CallConv::*;
@@ -17,13 +18,13 @@ pub use self::IntPredicate::*;
 pub use self::Linkage::*;
 pub use self::MetadataType::*;
 pub use self::RealPredicate::*;
+pub use self::ffi::*;
+use crate::common::AsCCharPtr;
 
 pub mod archive_ro;
 pub mod diagnostic;
 mod ffi;
 
-pub use self::ffi::*;
-
 impl LLVMRustResult {
     pub fn into_result(self) -> Result<(), ()> {
         match self {
@@ -53,9 +54,9 @@ pub fn CreateAttrStringValue<'ll>(llcx: &'ll Context, attr: &str, value: &str) -
     unsafe {
         LLVMCreateStringAttribute(
             llcx,
-            attr.as_ptr().cast(),
+            attr.as_c_char_ptr(),
             attr.len().try_into().unwrap(),
-            value.as_ptr().cast(),
+            value.as_c_char_ptr(),
             value.len().try_into().unwrap(),
         )
     }
@@ -65,7 +66,7 @@ pub fn CreateAttrString<'ll>(llcx: &'ll Context, attr: &str) -> &'ll Attribute {
     unsafe {
         LLVMCreateStringAttribute(
             llcx,
-            attr.as_ptr().cast(),
+            attr.as_c_char_ptr(),
             attr.len().try_into().unwrap(),
             std::ptr::null(),
             0,
@@ -178,10 +179,10 @@ pub fn SetFunctionCallConv(fn_: &Value, cc: CallConv) {
 // function.
 // For more details on COMDAT sections see e.g., https://www.airs.com/blog/archives/52
 pub fn SetUniqueComdat(llmod: &Module, val: &Value) {
-    unsafe {
-        let name = get_value_name(val);
-        LLVMRustSetComdat(llmod, val, name.as_ptr().cast(), name.len());
-    }
+    let name_buf = get_value_name(val).to_vec();
+    let name =
+        CString::from_vec_with_nul(name_buf).or_else(|buf| CString::new(buf.into_bytes())).unwrap();
+    set_comdat(llmod, val, &name);
 }
 
 pub fn SetUnnamedAddress(global: &Value, unnamed: UnnamedAddr) {
@@ -210,15 +211,13 @@ impl MemoryEffects {
     }
 }
 
-pub fn set_section(llglobal: &Value, section_name: &str) {
-    let section_name_cstr = CString::new(section_name).expect("unexpected CString error");
+pub fn set_section(llglobal: &Value, section_name: &CStr) {
     unsafe {
-        LLVMSetSection(llglobal, section_name_cstr.as_ptr());
+        LLVMSetSection(llglobal, section_name.as_ptr());
     }
 }
 
-pub fn add_global<'a>(llmod: &'a Module, ty: &'a Type, name: &str) -> &'a Value {
-    let name_cstr = CString::new(name).expect("unexpected CString error");
+pub fn add_global<'a>(llmod: &'a Module, ty: &'a Type, name_cstr: &CStr) -> &'a Value {
     unsafe { LLVMAddGlobal(llmod, ty, name_cstr.as_ptr()) }
 }
 
@@ -234,15 +233,23 @@ pub fn set_global_constant(llglobal: &Value, is_constant: bool) {
     }
 }
 
+pub fn get_linkage(llglobal: &Value) -> Linkage {
+    unsafe { LLVMGetLinkage(llglobal) }.to_rust()
+}
+
 pub fn set_linkage(llglobal: &Value, linkage: Linkage) {
     unsafe {
-        LLVMRustSetLinkage(llglobal, linkage);
+        LLVMSetLinkage(llglobal, linkage);
     }
 }
 
+pub fn get_visibility(llglobal: &Value) -> Visibility {
+    unsafe { LLVMGetVisibility(llglobal) }.to_rust()
+}
+
 pub fn set_visibility(llglobal: &Value, visibility: Visibility) {
     unsafe {
-        LLVMRustSetVisibility(llglobal, visibility);
+        LLVMSetVisibility(llglobal, visibility);
     }
 }
 
@@ -252,9 +259,14 @@ pub fn set_alignment(llglobal: &Value, align: Align) {
     }
 }
 
-pub fn set_comdat(llmod: &Module, llglobal: &Value, name: &str) {
+/// Get the `name`d comdat from `llmod` and assign it to `llglobal`.
+///
+/// Inserts the comdat into `llmod` if it does not exist.
+/// It is an error to call this if the target does not support comdat.
+pub fn set_comdat(llmod: &Module, llglobal: &Value, name: &CStr) {
     unsafe {
-        LLVMRustSetComdat(llmod, llglobal, name.as_ptr().cast(), name.len());
+        let comdat = LLVMGetOrInsertComdat(llmod, name.as_ptr());
+        LLVMSetComdat(llglobal, comdat);
     }
 }
 
@@ -283,7 +295,7 @@ pub fn get_value_name(value: &Value) -> &[u8] {
 /// Safe wrapper for `LLVMSetValueName2` from a byte slice
 pub fn set_value_name(value: &Value, name: &[u8]) {
     unsafe {
-        let data = name.as_ptr().cast();
+        let data = name.as_c_char_ptr();
         LLVMSetValueName2(value, data, name.len());
     }
 }
@@ -320,24 +332,68 @@ pub fn last_error() -> Option<String> {
     }
 }
 
-pub struct OperandBundleDef<'a> {
-    pub raw: &'a mut ffi::OperandBundleDef<'a>,
+/// Owns an [`OperandBundle`], and will dispose of it when dropped.
+pub(crate) struct OperandBundleOwned<'a> {
+    raw: ptr::NonNull<OperandBundle<'a>>,
 }
 
-impl<'a> OperandBundleDef<'a> {
-    pub fn new(name: &str, vals: &[&'a Value]) -> Self {
-        let name = SmallCStr::new(name);
-        let def = unsafe {
-            LLVMRustBuildOperandBundleDef(name.as_ptr(), vals.as_ptr(), vals.len() as c_uint)
+impl<'a> OperandBundleOwned<'a> {
+    pub(crate) fn new(name: &str, vals: &[&'a Value]) -> Self {
+        let raw = unsafe {
+            LLVMCreateOperandBundle(
+                name.as_c_char_ptr(),
+                name.len(),
+                vals.as_ptr(),
+                vals.len() as c_uint,
+            )
         };
-        OperandBundleDef { raw: def }
+        OperandBundleOwned { raw: ptr::NonNull::new(raw).unwrap() }
     }
 }
 
-impl Drop for OperandBundleDef<'_> {
+impl Drop for OperandBundleOwned<'_> {
     fn drop(&mut self) {
         unsafe {
-            LLVMRustFreeOperandBundleDef(&mut *(self.raw as *mut _));
+            LLVMDisposeOperandBundle(self.raw);
         }
     }
 }
+
+impl<'a> Deref for OperandBundleOwned<'a> {
+    type Target = OperandBundle<'a>;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: The returned reference is opaque and can only used for FFI.
+        // It is valid for as long as `&self` is.
+        unsafe { self.raw.as_ref() }
+    }
+}
+
+pub(crate) fn add_module_flag_u32(
+    module: &Module,
+    merge_behavior: ModuleFlagMergeBehavior,
+    key: &str,
+    value: u32,
+) {
+    unsafe {
+        LLVMRustAddModuleFlagU32(module, merge_behavior, key.as_c_char_ptr(), key.len(), value);
+    }
+}
+
+pub(crate) fn add_module_flag_str(
+    module: &Module,
+    merge_behavior: ModuleFlagMergeBehavior,
+    key: &str,
+    value: &str,
+) {
+    unsafe {
+        LLVMRustAddModuleFlagString(
+            module,
+            merge_behavior,
+            key.as_c_char_ptr(),
+            key.len(),
+            value.as_c_char_ptr(),
+            value.len(),
+        );
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 358bfcb1573..6f2d86cc601 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -1,4 +1,5 @@
-use std::ffi::{c_char, c_void, CStr, CString};
+use std::collections::VecDeque;
+use std::ffi::{CStr, CString};
 use std::fmt::Write;
 use std::path::Path;
 use std::sync::Once;
@@ -6,20 +7,21 @@ use std::{ptr, slice, str};
 
 use libc::c_int;
 use rustc_codegen_ssa::base::wants_wasm_eh;
+use rustc_codegen_ssa::codegen_attrs::check_tied_features;
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 use rustc_data_structures::small_c_str::SmallCStr;
 use rustc_data_structures::unord::UnordSet;
 use rustc_fs_util::path_to_c_string;
 use rustc_middle::bug;
-use rustc_session::config::{PrintKind, PrintRequest};
 use rustc_session::Session;
+use rustc_session::config::{PrintKind, PrintRequest};
 use rustc_span::symbol::Symbol;
 use rustc_target::spec::{MergeFunctions, PanicStrategy, SmallDataThresholdSupport};
-use rustc_target::target_features::{RUSTC_SPECIAL_FEATURES, RUSTC_SPECIFIC_FEATURES};
+use rustc_target::target_features::{RUSTC_SPECIAL_FEATURES, RUSTC_SPECIFIC_FEATURES, Stability};
 
 use crate::back::write::create_informational_target_machine;
 use crate::errors::{
-    FixedX18InvalidArch, InvalidTargetFeaturePrefix, PossibleFeature, TargetFeatureDisableOrEnable,
+    FixedX18InvalidArch, ForbiddenCTargetFeature, InvalidTargetFeaturePrefix, PossibleFeature,
     UnknownCTargetFeature, UnknownCTargetFeaturePrefix, UnstableCTargetFeature,
 };
 use crate::llvm;
@@ -217,10 +219,10 @@ impl<'a> IntoIterator for LLVMFeature<'a> {
 // where `{ARCH}` is the architecture name. Look for instances of `SubtargetFeature`.
 //
 // Check the current rustc fork of LLVM in the repo at https://github.com/rust-lang/llvm-project/.
-// The commit in use can be found via the `llvm-project` submodule in https://github.com/rust-lang/rust/tree/master/src
-// Though note that Rust can also be build with an external precompiled version of LLVM
-// which might lead to failures if the oldest tested / supported LLVM version
-// doesn't yet support the relevant intrinsics
+// The commit in use can be found via the `llvm-project` submodule in
+// https://github.com/rust-lang/rust/tree/master/src Though note that Rust can also be build with
+// an external precompiled version of LLVM which might lead to failures if the oldest tested /
+// supported LLVM version doesn't yet support the relevant intrinsics.
 pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFeature<'a>> {
     let arch = if sess.target.arch == "x86_64" {
         "x86"
@@ -247,7 +249,10 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea
         ("aarch64", "pmuv3") => Some(LLVMFeature::new("perfmon")),
         ("aarch64", "paca") => Some(LLVMFeature::new("pauth")),
         ("aarch64", "pacg") => Some(LLVMFeature::new("pauth")),
-        ("aarch64", "sve-b16b16") => Some(LLVMFeature::new("b16b16")),
+        ("aarch64", "pauth-lr") if get_version().0 < 19 => None,
+        // Before LLVM 20 those two features were packaged together as b16b16
+        ("aarch64", "sve-b16b16") if get_version().0 < 20 => Some(LLVMFeature::new("b16b16")),
+        ("aarch64", "sme-b16b16") if get_version().0 < 20 => Some(LLVMFeature::new("b16b16")),
         ("aarch64", "flagm2") => Some(LLVMFeature::new("altnzcv")),
         // Rust ties fp and neon together.
         ("aarch64", "neon") => {
@@ -259,51 +264,49 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea
         ("aarch64", "fp16") => Some(LLVMFeature::new("fullfp16")),
         // Filter out features that are not supported by the current LLVM version
         ("aarch64", "fpmr") if get_version().0 != 18 => None,
-        // In LLVM 18, `unaligned-scalar-mem` was merged with `unaligned-vector-mem` into a single feature called
-        // `fast-unaligned-access`. In LLVM 19, it was split back out.
+        // In LLVM 18, `unaligned-scalar-mem` was merged with `unaligned-vector-mem` into a single
+        // feature called `fast-unaligned-access`. In LLVM 19, it was split back out.
         ("riscv32" | "riscv64", "unaligned-scalar-mem") if get_version().0 == 18 => {
             Some(LLVMFeature::new("fast-unaligned-access"))
         }
+        // Filter out features that are not supported by the current LLVM version
+        ("riscv32" | "riscv64", "zaamo") if get_version().0 < 19 => None,
+        ("riscv32" | "riscv64", "zabha") if get_version().0 < 19 => None,
+        ("riscv32" | "riscv64", "zalrsc") if get_version().0 < 19 => None,
         // Enable the evex512 target feature if an avx512 target feature is enabled.
         ("x86", s) if s.starts_with("avx512") => {
             Some(LLVMFeature::with_dependency(s, TargetFeatureFoldStrength::EnableOnly("evex512")))
         }
+        // Support for `wide-arithmetic` will first land in LLVM 20 as part of
+        // llvm/llvm-project#111598
+        ("wasm32" | "wasm64", "wide-arithmetic") if get_version() < (20, 0, 0) => None,
         (_, s) => Some(LLVMFeature::new(s)),
     }
 }
 
-/// Given a map from target_features to whether they are enabled or disabled,
-/// ensure only valid combinations are allowed.
-pub(crate) fn check_tied_features(
-    sess: &Session,
-    features: &FxHashMap<&str, bool>,
-) -> Option<&'static [&'static str]> {
-    if !features.is_empty() {
-        for tied in sess.target.tied_target_features() {
-            // Tied features must be set to the same value, or not set at all
-            let mut tied_iter = tied.iter();
-            let enabled = features.get(tied_iter.next().unwrap());
-            if tied_iter.any(|f| enabled != features.get(f)) {
-                return Some(tied);
-            }
-        }
-    }
-    None
-}
-
-/// Used to generate cfg variables and apply features
-/// Must express features in the way Rust understands them
+/// Used to generate cfg variables and apply features.
+/// Must express features in the way Rust understands them.
+///
+/// We do not have to worry about RUSTC_SPECIFIC_FEATURES here, those are handled outside codegen.
 pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
-    let mut features = vec![];
-
-    // Add base features for the target
+    let mut features: FxHashSet<Symbol> = Default::default();
+
+    // Add base features for the target.
+    // We do *not* add the -Ctarget-features there, and instead duplicate the logic for that below.
+    // The reason is that if LLVM considers a feature implied but we do not, we don't want that to
+    // show up in `cfg`. That way, `cfg` is entirely under our control -- except for the handling of
+    // the target CPU, that is still expanded to target features (with all their implied features) by
+    // LLVM.
     let target_machine = create_informational_target_machine(sess, true);
+    // Compute which of the known target features are enabled in the 'base' target machine.
+    // We only consider "supported" features; "forbidden" features are not reflected in `cfg` as of now.
     features.extend(
         sess.target
-            .supported_target_features()
+            .rust_target_features()
             .iter()
+            .filter(|(_, gate, _)| gate.is_supported())
             .filter(|(feature, _, _)| {
-                // skip checking special features, as LLVM may not understands them
+                // skip checking special features, as LLVM may not understand them
                 if RUSTC_SPECIAL_FEATURES.contains(feature) {
                     return true;
                 }
@@ -334,7 +337,12 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
         if enabled {
             features.extend(sess.target.implied_target_features(std::iter::once(feature)));
         } else {
+            // We don't care about the order in `features` since the only thing we use it for is the
+            // `features.contains` below.
+            #[allow(rustc::potential_query_instability)]
             features.retain(|f| {
+                // Keep a feature if it does not imply `feature`. Or, equivalently,
+                // remove the reverse-dependencies of `feature`.
                 !sess.target.implied_target_features(std::iter::once(*f)).contains(&feature)
             });
         }
@@ -342,8 +350,9 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
 
     // Filter enabled features based on feature gates
     sess.target
-        .supported_target_features()
+        .rust_target_features()
         .iter()
+        .filter(|(_, gate, _)| gate.is_supported())
         .filter_map(|&(feature, gate, _)| {
             if sess.is_nightly_build() || allow_unstable || gate.is_stable() {
                 Some(feature)
@@ -398,15 +407,78 @@ fn llvm_target_features(tm: &llvm::TargetMachine) -> Vec<(&str, &str)> {
     ret
 }
 
-fn print_target_features(out: &mut String, sess: &Session, tm: &llvm::TargetMachine) {
+pub(crate) fn print(req: &PrintRequest, out: &mut String, sess: &Session) {
+    require_inited();
+    let tm = create_informational_target_machine(sess, false);
+    match req.kind {
+        PrintKind::TargetCPUs => print_target_cpus(sess, &tm, out),
+        PrintKind::TargetFeatures => print_target_features(sess, &tm, out),
+        _ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
+    }
+}
+
+fn print_target_cpus(sess: &Session, tm: &llvm::TargetMachine, out: &mut String) {
+    let cpu_names = llvm::build_string(|s| unsafe {
+        llvm::LLVMRustPrintTargetCPUs(&tm, s);
+    })
+    .unwrap();
+
+    struct Cpu<'a> {
+        cpu_name: &'a str,
+        remark: String,
+    }
+    // Compare CPU against current target to label the default.
+    let target_cpu = handle_native(&sess.target.cpu);
+    let make_remark = |cpu_name| {
+        if cpu_name == target_cpu {
+            // FIXME(#132514): This prints the LLVM target string, which can be
+            // different from the Rust target string. Is that intended?
+            let target = &sess.target.llvm_target;
+            format!(
+                " - This is the default target CPU for the current build target (currently {target})."
+            )
+        } else {
+            "".to_owned()
+        }
+    };
+    let mut cpus = cpu_names
+        .lines()
+        .map(|cpu_name| Cpu { cpu_name, remark: make_remark(cpu_name) })
+        .collect::<VecDeque<_>>();
+
+    // Only print the "native" entry when host and target are the same arch,
+    // since otherwise it could be wrong or misleading.
+    if sess.host.arch == sess.target.arch {
+        let host = get_host_cpu_name();
+        cpus.push_front(Cpu {
+            cpu_name: "native",
+            remark: format!(" - Select the CPU of the current host (currently {host})."),
+        });
+    }
+
+    let max_name_width = cpus.iter().map(|cpu| cpu.cpu_name.len()).max().unwrap_or(0);
+    writeln!(out, "Available CPUs for this target:").unwrap();
+    for Cpu { cpu_name, remark } in cpus {
+        // Only pad the CPU name if there's a remark to print after it.
+        let width = if remark.is_empty() { 0 } else { max_name_width };
+        writeln!(out, "    {cpu_name:<width$}{remark}").unwrap();
+    }
+}
+
+fn print_target_features(sess: &Session, tm: &llvm::TargetMachine, out: &mut String) {
     let mut llvm_target_features = llvm_target_features(tm);
     let mut known_llvm_target_features = FxHashSet::<&'static str>::default();
     let mut rustc_target_features = sess
         .target
-        .supported_target_features()
+        .rust_target_features()
         .iter()
-        .filter_map(|(feature, _gate, _implied)| {
-            // LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these strings.
+        .filter_map(|(feature, gate, _implied)| {
+            if !gate.is_supported() {
+                // Only list (experimentally) supported features.
+                return None;
+            }
+            // LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these
+            // strings.
             let llvm_feature = to_llvm_features(sess, *feature)?.llvm_feature_name;
             let desc =
                 match llvm_target_features.binary_search_by_key(&llvm_feature, |(f, _d)| f).ok() {
@@ -457,52 +529,31 @@ fn print_target_features(out: &mut String, sess: &Session, tm: &llvm::TargetMach
     writeln!(out, "and may be renamed or removed in a future version of LLVM or rustc.\n").unwrap();
 }
 
-pub(crate) fn print(req: &PrintRequest, mut out: &mut String, sess: &Session) {
-    require_inited();
-    let tm = create_informational_target_machine(sess, false);
-    match req.kind {
-        PrintKind::TargetCPUs => {
-            // SAFETY generate a C compatible string from a byte slice to pass
-            // the target CPU name into LLVM, the lifetime of the reference is
-            // at least as long as the C function
-            let cpu_cstring = CString::new(handle_native(sess.target.cpu.as_ref()))
-                .unwrap_or_else(|e| bug!("failed to convert to cstring: {}", e));
-            unsafe extern "C" fn callback(out: *mut c_void, string: *const c_char, len: usize) {
-                let out = unsafe { &mut *(out as *mut &mut String) };
-                let bytes = unsafe { slice::from_raw_parts(string as *const u8, len) };
-                write!(out, "{}", String::from_utf8_lossy(bytes)).unwrap();
-            }
-            unsafe {
-                llvm::LLVMRustPrintTargetCPUs(
-                    &tm,
-                    cpu_cstring.as_ptr(),
-                    callback,
-                    std::ptr::addr_of_mut!(out) as *mut c_void,
-                );
-            }
-        }
-        PrintKind::TargetFeatures => print_target_features(out, sess, &tm),
-        _ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
-    }
+/// Returns the host CPU name, according to LLVM.
+fn get_host_cpu_name() -> &'static str {
+    let mut len = 0;
+    // SAFETY: The underlying C++ global function returns a `StringRef` that
+    // isn't tied to any particular backing buffer, so it must be 'static.
+    let slice: &'static [u8] = unsafe {
+        let ptr = llvm::LLVMRustGetHostCPUName(&mut len);
+        assert!(!ptr.is_null());
+        slice::from_raw_parts(ptr, len)
+    };
+    str::from_utf8(slice).expect("host CPU name should be UTF-8")
 }
 
-fn handle_native(name: &str) -> &str {
-    if name != "native" {
-        return name;
-    }
-
-    unsafe {
-        let mut len = 0;
-        let ptr = llvm::LLVMRustGetHostCPUName(&mut len);
-        str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap()
+/// If the given string is `"native"`, returns the host CPU name according to
+/// LLVM. Otherwise, the string is returned as-is.
+fn handle_native(cpu_name: &str) -> &str {
+    match cpu_name {
+        "native" => get_host_cpu_name(),
+        _ => cpu_name,
     }
 }
 
 pub(crate) fn target_cpu(sess: &Session) -> &str {
-    match sess.opts.cg.target_cpu {
-        Some(ref name) => handle_native(name),
-        None => handle_native(sess.target.cpu.as_ref()),
-    }
+    let cpu_name = sess.opts.cg.target_cpu.as_deref().unwrap_or_else(|| &sess.target.cpu);
+    handle_native(cpu_name)
 }
 
 /// The list of LLVM features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`,
@@ -535,6 +586,11 @@ pub(crate) fn global_llvm_features(
     // -Ctarget-cpu=native
     match sess.opts.cg.target_cpu {
         Some(ref s) if s == "native" => {
+            // We have already figured out the actual CPU name with `LLVMRustGetHostCPUName` and set
+            // that for LLVM, so the features implied by that CPU name will be available everywhere.
+            // However, that is not sufficient: e.g. `skylake` alone is not sufficient to tell if
+            // some of the instructions are available or not. So we have to also explicitly ask for
+            // the exact set of features available on the host, and enable all of them.
             let features_string = unsafe {
                 let ptr = llvm::LLVMGetHostCPUFeatures();
                 let features_string = if !ptr.is_null() {
@@ -572,7 +628,7 @@ pub(crate) fn global_llvm_features(
 
     // -Ctarget-features
     if !only_base_features {
-        let supported_features = sess.target.supported_target_features();
+        let known_features = sess.target.rust_target_features();
         let mut featsmap = FxHashMap::default();
 
         // insert implied features
@@ -606,50 +662,53 @@ pub(crate) fn global_llvm_features(
                     }
                 };
 
+                // Get the backend feature name, if any.
+                // This excludes rustc-specific features, which do not get passed to LLVM.
                 let feature = backend_feature_name(sess, s)?;
                 // Warn against use of LLVM specific feature names and unstable features on the CLI.
                 if diagnostics {
-                    let feature_state = supported_features.iter().find(|&&(v, _, _)| v == feature);
-                    if feature_state.is_none() {
-                        let rust_feature =
-                            supported_features.iter().find_map(|&(rust_feature, _, _)| {
-                                let llvm_features = to_llvm_features(sess, rust_feature)?;
-                                if llvm_features.contains(feature)
-                                    && !llvm_features.contains(rust_feature)
-                                {
-                                    Some(rust_feature)
-                                } else {
-                                    None
+                    let feature_state = known_features.iter().find(|&&(v, _, _)| v == feature);
+                    match feature_state {
+                        None => {
+                            let rust_feature =
+                                known_features.iter().find_map(|&(rust_feature, _, _)| {
+                                    let llvm_features = to_llvm_features(sess, rust_feature)?;
+                                    if llvm_features.contains(feature)
+                                        && !llvm_features.contains(rust_feature)
+                                    {
+                                        Some(rust_feature)
+                                    } else {
+                                        None
+                                    }
+                                });
+                            let unknown_feature = if let Some(rust_feature) = rust_feature {
+                                UnknownCTargetFeature {
+                                    feature,
+                                    rust_feature: PossibleFeature::Some { rust_feature },
                                 }
-                            });
-                        let unknown_feature = if let Some(rust_feature) = rust_feature {
-                            UnknownCTargetFeature {
-                                feature,
-                                rust_feature: PossibleFeature::Some { rust_feature },
-                            }
-                        } else {
-                            UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None }
-                        };
-                        sess.dcx().emit_warn(unknown_feature);
-                    } else if feature_state
-                        .is_some_and(|(_name, feature_gate, _implied)| !feature_gate.is_stable())
-                    {
-                        // An unstable feature. Warn about using it.
-                        sess.dcx().emit_warn(UnstableCTargetFeature { feature });
+                            } else {
+                                UnknownCTargetFeature {
+                                    feature,
+                                    rust_feature: PossibleFeature::None,
+                                }
+                            };
+                            sess.dcx().emit_warn(unknown_feature);
+                        }
+                        Some((_, Stability::Stable, _)) => {}
+                        Some((_, Stability::Unstable(_), _)) => {
+                            // An unstable feature. Warn about using it.
+                            sess.dcx().emit_warn(UnstableCTargetFeature { feature });
+                        }
+                        Some((_, Stability::Forbidden { reason }, _)) => {
+                            sess.dcx().emit_warn(ForbiddenCTargetFeature { feature, reason });
+                        }
                     }
-                }
 
-                if diagnostics {
                     // FIXME(nagisa): figure out how to not allocate a full hashset here.
                     featsmap.insert(feature, enable_disable == '+');
                 }
 
-                // rustc-specific features do not get passed down to LLVM…
-                if RUSTC_SPECIFIC_FEATURES.contains(&feature) {
-                    return None;
-                }
-
-                // ... otherwise though we run through `to_llvm_features` when
+                // We run through `to_llvm_features` when
                 // passing requests down to LLVM. This means that all in-language
                 // features also work on the command line instead of having two
                 // different names when the LLVM name and the Rust name differ.
@@ -675,7 +734,7 @@ pub(crate) fn global_llvm_features(
         features.extend(feats);
 
         if diagnostics && let Some(f) = check_tied_features(sess, &featsmap) {
-            sess.dcx().emit_err(TargetFeatureDisableOrEnable {
+            sess.dcx().emit_err(rustc_codegen_ssa::errors::TargetFeatureDisableOrEnable {
                 features: f,
                 span: None,
                 missing_features: None,
@@ -703,12 +762,9 @@ fn backend_feature_name<'a>(sess: &Session, s: &'a str) -> Option<&'a str> {
     let feature = s
         .strip_prefix(&['+', '-'][..])
         .unwrap_or_else(|| sess.dcx().emit_fatal(InvalidTargetFeaturePrefix { feature: s }));
-    if s.is_empty() {
-        return None;
-    }
     // Rustc-specific feature requests like `+crt-static` or `-crt-static`
     // are not passed down to LLVM.
-    if RUSTC_SPECIFIC_FEATURES.contains(&feature) {
+    if s.is_empty() || RUSTC_SPECIFIC_FEATURES.contains(&feature) {
         return None;
     }
     Some(feature)
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
index 9bd491664d8..ea8857b4739 100644
--- a/compiler/rustc_codegen_llvm/src/mono_item.rs
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -24,8 +24,8 @@ impl<'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
     ) {
         let instance = Instance::mono(self.tcx, def_id);
         let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else { bug!() };
-        // Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure out
-        // the llvm type from the actual evaluated initializer.
+        // Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure
+        // out the llvm type from the actual evaluated initializer.
         let ty = if nested {
             self.tcx.types.unit
         } else {
@@ -39,9 +39,9 @@ impl<'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
                 .emit_fatal(SymbolAlreadyDefined { span: self.tcx.def_span(def_id), symbol_name })
         });
 
+        llvm::set_linkage(g, base::linkage_to_llvm(linkage));
+        llvm::set_visibility(g, base::visibility_to_llvm(visibility));
         unsafe {
-            llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage));
-            llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility));
             if self.should_assume_dso_local(g, false) {
                 llvm::LLVMRustSetDSOLocal(g, true);
             }
@@ -61,10 +61,12 @@ impl<'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
 
         let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
         let lldecl = self.declare_fn(symbol_name, fn_abi, Some(instance));
-        unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) };
+        llvm::set_linkage(lldecl, base::linkage_to_llvm(linkage));
         let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
         base::set_link_section(lldecl, attrs);
-        if linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR {
+        if (linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR)
+            && self.tcx.sess.target.supports_comdat()
+        {
             llvm::SetUniqueComdat(self.llmod, lldecl);
         }
 
@@ -76,21 +78,15 @@ impl<'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
             && linkage != Linkage::Private
             && self.tcx.is_compiler_builtins(LOCAL_CRATE)
         {
-            unsafe {
-                llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden);
-            }
+            llvm::set_visibility(lldecl, llvm::Visibility::Hidden);
         } else {
-            unsafe {
-                llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility));
-            }
+            llvm::set_visibility(lldecl, base::visibility_to_llvm(visibility));
         }
 
         debug!("predefine_fn: instance = {:?}", instance);
 
-        unsafe {
-            if self.should_assume_dso_local(lldecl, false) {
-                llvm::LLVMRustSetDSOLocal(lldecl, true);
-            }
+        if self.should_assume_dso_local(lldecl, false) {
+            unsafe { llvm::LLVMRustSetDSOLocal(lldecl, true) };
         }
 
         self.instances.borrow_mut().insert(instance, lldecl);
@@ -100,13 +96,13 @@ impl<'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
 impl CodegenCx<'_, '_> {
     /// Whether a definition or declaration can be assumed to be local to a group of
     /// libraries that form a single DSO or executable.
-    pub(crate) unsafe fn should_assume_dso_local(
+    pub(crate) fn should_assume_dso_local(
         &self,
         llval: &llvm::Value,
         is_declaration: bool,
     ) -> bool {
-        let linkage = unsafe { llvm::LLVMRustGetLinkage(llval) };
-        let visibility = unsafe { llvm::LLVMRustGetVisibility(llval) };
+        let linkage = llvm::get_linkage(llval);
+        let visibility = llvm::get_visibility(llval);
 
         if matches!(linkage, llvm::Linkage::InternalLinkage | llvm::Linkage::PrivateLinkage) {
             return true;
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
index 2c2b9030b7c..6aec078e0de 100644
--- a/compiler/rustc_codegen_llvm/src/type_.rs
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -1,19 +1,19 @@
 use std::{fmt, ptr};
 
 use libc::{c_char, c_uint};
+use rustc_abi::{AddressSpace, Align, Integer, Size};
 use rustc_codegen_ssa::common::TypeKind;
 use rustc_codegen_ssa::traits::*;
 use rustc_data_structures::small_c_str::SmallCStr;
 use rustc_middle::bug;
 use rustc_middle::ty::layout::TyAndLayout;
 use rustc_middle::ty::{self, Ty};
-use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
-use rustc_target::abi::{AddressSpace, Align, Integer, Size};
+use rustc_target::callconv::{CastTarget, FnAbi, Reg};
 
 use crate::abi::{FnAbiLlvmExt, LlvmType};
 use crate::context::CodegenCx;
 pub(crate) use crate::llvm::Type;
-use crate::llvm::{Bool, False, True};
+use crate::llvm::{Bool, False, Metadata, True};
 use crate::type_of::LayoutLlvmExt;
 use crate::value::Value;
 use crate::{common, llvm};
@@ -283,43 +283,31 @@ impl<'ll, 'tcx> LayoutTypeCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
 impl<'ll, 'tcx> TypeMembershipCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     fn add_type_metadata(&self, function: &'ll Value, typeid: String) {
         let typeid_metadata = self.typeid_metadata(typeid).unwrap();
-        let v = [self.const_usize(0), typeid_metadata];
         unsafe {
+            let v = [llvm::LLVMValueAsMetadata(self.const_usize(0)), typeid_metadata];
             llvm::LLVMRustGlobalAddMetadata(
                 function,
                 llvm::MD_type as c_uint,
-                llvm::LLVMValueAsMetadata(llvm::LLVMMDNodeInContext(
-                    self.llcx,
-                    v.as_ptr(),
-                    v.len() as c_uint,
-                )),
+                llvm::LLVMMDNodeInContext2(self.llcx, v.as_ptr(), v.len()),
             )
         }
     }
 
     fn set_type_metadata(&self, function: &'ll Value, typeid: String) {
         let typeid_metadata = self.typeid_metadata(typeid).unwrap();
-        let v = [self.const_usize(0), typeid_metadata];
         unsafe {
+            let v = [llvm::LLVMValueAsMetadata(self.const_usize(0)), typeid_metadata];
             llvm::LLVMGlobalSetMetadata(
                 function,
                 llvm::MD_type as c_uint,
-                llvm::LLVMValueAsMetadata(llvm::LLVMMDNodeInContext(
-                    self.llcx,
-                    v.as_ptr(),
-                    v.len() as c_uint,
-                )),
+                llvm::LLVMMDNodeInContext2(self.llcx, v.as_ptr(), v.len()),
             )
         }
     }
 
-    fn typeid_metadata(&self, typeid: String) -> Option<&'ll Value> {
+    fn typeid_metadata(&self, typeid: String) -> Option<&'ll Metadata> {
         Some(unsafe {
-            llvm::LLVMMDStringInContext(
-                self.llcx,
-                typeid.as_ptr() as *const c_char,
-                typeid.len() as c_uint,
-            )
+            llvm::LLVMMDStringInContext2(self.llcx, typeid.as_ptr() as *const c_char, typeid.len())
         })
     }
 
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 6e429a1674a..2b05e24a7ba 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -1,11 +1,12 @@
 use std::fmt::Write;
 
+use rustc_abi::Primitive::{Float, Int, Pointer};
+use rustc_abi::{Align, BackendRepr, FieldsShape, Scalar, Size, Variants};
 use rustc_codegen_ssa::traits::*;
 use rustc_middle::bug;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
 use rustc_middle::ty::{self, CoroutineArgsExt, Ty, TypeVisitableExt};
-use rustc_target::abi::{Abi, Align, FieldsShape, Float, Int, Pointer, Scalar, Size, Variants};
 use tracing::debug;
 
 use crate::common::*;
@@ -16,13 +17,13 @@ fn uncached_llvm_type<'a, 'tcx>(
     layout: TyAndLayout<'tcx>,
     defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
 ) -> &'a Type {
-    match layout.abi {
-        Abi::Scalar(_) => bug!("handled elsewhere"),
-        Abi::Vector { element, count } => {
+    match layout.backend_repr {
+        BackendRepr::Scalar(_) => bug!("handled elsewhere"),
+        BackendRepr::Vector { element, count } => {
             let element = layout.scalar_llvm_type_at(cx, element);
             return cx.type_vector(element, count);
         }
-        Abi::Uninhabited | Abi::Aggregate { .. } | Abi::ScalarPair(..) => {}
+        BackendRepr::Uninhabited | BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {}
     }
 
     let name = match layout.ty.kind() {
@@ -169,16 +170,21 @@ pub(crate) trait LayoutLlvmExt<'tcx> {
 
 impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     fn is_llvm_immediate(&self) -> bool {
-        match self.abi {
-            Abi::Scalar(_) | Abi::Vector { .. } => true,
-            Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
+        match self.backend_repr {
+            BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
+            BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
+                false
+            }
         }
     }
 
     fn is_llvm_scalar_pair(&self) -> bool {
-        match self.abi {
-            Abi::ScalarPair(..) => true,
-            Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
+        match self.backend_repr {
+            BackendRepr::ScalarPair(..) => true,
+            BackendRepr::Uninhabited
+            | BackendRepr::Scalar(_)
+            | BackendRepr::Vector { .. }
+            | BackendRepr::Memory { .. } => false,
         }
     }
 
@@ -190,16 +196,16 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
     /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
     /// If the type is an unsized struct, the regular layout is generated,
-    /// with the inner-most trailing unsized field using the "minimal unit"
+    /// with the innermost trailing unsized field using the "minimal unit"
     /// of that field's type - this is useful for taking the address of
     /// that field and ensuring the struct has the right alignment.
     fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
         // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
         // In other words, this should generally not look at the type at all, but only at the
         // layout.
-        if let Abi::Scalar(scalar) = self.abi {
+        if let BackendRepr::Scalar(scalar) = self.backend_repr {
             // Use a different cache for scalars because pointers to DSTs
-            // can be either fat or thin (data pointers of fat pointers).
+            // can be either wide or thin (data pointers of wide pointers).
             if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
                 return llty;
             }
@@ -247,13 +253,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     }
 
     fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
-        match self.abi {
-            Abi::Scalar(scalar) => {
+        match self.backend_repr {
+            BackendRepr::Scalar(scalar) => {
                 if scalar.is_bool() {
                     return cx.type_i1();
                 }
             }
-            Abi::ScalarPair(..) => {
+            BackendRepr::ScalarPair(..) => {
                 // An immediate pair always contains just the two elements, without any padding
                 // filler, as it should never be stored to memory.
                 return cx.type_struct(
@@ -286,7 +292,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
         // In other words, this should generally not look at the type at all, but only at the
         // layout.
-        let Abi::ScalarPair(a, b) = self.abi else {
+        let BackendRepr::ScalarPair(a, b) = self.backend_repr else {
             bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
         };
         let scalar = [a, b][index];
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index 781cee81180..e4c3e748cb5 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -1,9 +1,9 @@
+use rustc_abi::{Align, Endian, HasDataLayout, Size};
 use rustc_codegen_ssa::common::IntPredicate;
 use rustc_codegen_ssa::mir::operand::OperandRef;
 use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods};
-use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
 use rustc_middle::ty::Ty;
-use rustc_target::abi::{Align, Endian, HasDataLayout, Size};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
 
 use crate::builder::Builder;
 use crate::type_::Type;