about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
authorLaurențiu Nicola <lnicola@users.noreply.github.com>2025-05-20 07:15:48 +0000
committerGitHub <noreply@github.com>2025-05-20 07:15:48 +0000
commit2147783b79641e76a7e5edde92d01ab2b7b1c929 (patch)
treef3320b3f6da82ecd6addc82012c3800de01aa3d2 /compiler/rustc_codegen_llvm/src
parenta1d75fb0d092355557fefed43ceb1bea1432400c (diff)
parenta6674952979445a81a890a936d968f81cf766c61 (diff)
downloadrust-2147783b79641e76a7e5edde92d01ab2b7b1c929.tar.gz
rust-2147783b79641e76a7e5edde92d01ab2b7b1c929.zip
Merge pull request #19826 from lnicola/sync-from-rust
minor: Sync from downstream
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs118
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs3
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs16
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs26
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs44
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs28
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/unused.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs14
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs43
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/mod.rs27
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs58
18 files changed, 247 insertions, 163 deletions
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index e481b99afcc..9e3893d5314 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -14,7 +14,7 @@ use smallvec::SmallVec;
 use tracing::debug;
 
 use crate::builder::Builder;
-use crate::common::{AsCCharPtr, Funclet};
+use crate::common::Funclet;
 use crate::context::CodegenCx;
 use crate::type_::Type;
 use crate::type_of::LayoutLlvmExt;
@@ -435,13 +435,7 @@ impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
             template_str.push_str("\n.att_syntax\n");
         }
 
-        unsafe {
-            llvm::LLVMAppendModuleInlineAsm(
-                self.llmod,
-                template_str.as_c_char_ptr(),
-                template_str.len(),
-            );
-        }
+        llvm::append_module_inline_asm(self.llmod, template_str.as_bytes());
     }
 
     fn mangled_name(&self, instance: Instance<'tcx>) -> String {
@@ -482,67 +476,67 @@ pub(crate) fn inline_asm_call<'ll>(
 
     debug!("Asm Output Type: {:?}", output);
     let fty = bx.cx.type_func(&argtys, output);
+
     // Ask LLVM to verify that the constraints are well-formed.
-    let constraints_ok =
-        unsafe { llvm::LLVMRustInlineAsmVerify(fty, cons.as_c_char_ptr(), cons.len()) };
+    let constraints_ok = unsafe { llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr(), cons.len()) };
     debug!("constraint verification result: {:?}", constraints_ok);
-    if constraints_ok {
-        let v = unsafe {
-            llvm::LLVMRustInlineAsm(
-                fty,
-                asm.as_c_char_ptr(),
-                asm.len(),
-                cons.as_c_char_ptr(),
-                cons.len(),
-                volatile,
-                alignstack,
-                dia,
-                can_throw,
-            )
-        };
+    if !constraints_ok {
+        // LLVM has detected an issue with our constraints, so bail out.
+        return None;
+    }
 
-        let call = if !labels.is_empty() {
-            assert!(catch_funclet.is_none());
-            bx.callbr(fty, None, None, v, inputs, dest.unwrap(), labels, None, None)
-        } else if let Some((catch, funclet)) = catch_funclet {
-            bx.invoke(fty, None, None, v, inputs, dest.unwrap(), catch, funclet, None)
-        } else {
-            bx.call(fty, None, None, v, inputs, None, None)
-        };
+    let v = unsafe {
+        llvm::LLVMGetInlineAsm(
+            fty,
+            asm.as_ptr(),
+            asm.len(),
+            cons.as_ptr(),
+            cons.len(),
+            volatile,
+            alignstack,
+            dia,
+            can_throw,
+        )
+    };
 
-        // Store mark in a metadata node so we can map LLVM errors
-        // back to source locations. See #17552.
-        let key = "srcloc";
-        let kind = bx.get_md_kind_id(key);
+    let call = if !labels.is_empty() {
+        assert!(catch_funclet.is_none());
+        bx.callbr(fty, None, None, v, inputs, dest.unwrap(), labels, None, None)
+    } else if let Some((catch, funclet)) = catch_funclet {
+        bx.invoke(fty, None, None, v, inputs, dest.unwrap(), catch, funclet, None)
+    } else {
+        bx.call(fty, None, None, v, inputs, None, None)
+    };
 
-        // `srcloc` contains one 64-bit integer for each line of assembly code,
-        // where the lower 32 bits hold the lo byte position and the upper 32 bits
-        // hold the hi byte position.
-        let mut srcloc = vec![];
-        if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 {
-            // LLVM inserts an extra line to add the ".intel_syntax", so add
-            // a dummy srcloc entry for it.
-            //
-            // Don't do this if we only have 1 line span since that may be
-            // due to the asm template string coming from a macro. LLVM will
-            // default to the first srcloc for lines that don't have an
-            // associated srcloc.
-            srcloc.push(llvm::LLVMValueAsMetadata(bx.const_u64(0)));
-        }
-        srcloc.extend(line_spans.iter().map(|span| {
-            llvm::LLVMValueAsMetadata(
-                bx.const_u64(u64::from(span.lo().to_u32()) | (u64::from(span.hi().to_u32()) << 32)),
-            )
-        }));
-        let md = unsafe { llvm::LLVMMDNodeInContext2(bx.llcx, srcloc.as_ptr(), srcloc.len()) };
-        let md = bx.get_metadata_value(md);
-        llvm::LLVMSetMetadata(call, kind, md);
+    // Store mark in a metadata node so we can map LLVM errors
+    // back to source locations. See #17552.
+    let key = "srcloc";
+    let kind = bx.get_md_kind_id(key);
 
-        Some(call)
-    } else {
-        // LLVM has detected an issue with our constraints, bail out
-        None
+    // `srcloc` contains one 64-bit integer for each line of assembly code,
+    // where the lower 32 bits hold the lo byte position and the upper 32 bits
+    // hold the hi byte position.
+    let mut srcloc = vec![];
+    if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 {
+        // LLVM inserts an extra line to add the ".intel_syntax", so add
+        // a dummy srcloc entry for it.
+        //
+        // Don't do this if we only have 1 line span since that may be
+        // due to the asm template string coming from a macro. LLVM will
+        // default to the first srcloc for lines that don't have an
+        // associated srcloc.
+        srcloc.push(llvm::LLVMValueAsMetadata(bx.const_u64(0)));
     }
+    srcloc.extend(line_spans.iter().map(|span| {
+        llvm::LLVMValueAsMetadata(
+            bx.const_u64(u64::from(span.lo().to_u32()) | (u64::from(span.hi().to_u32()) << 32)),
+        )
+    }));
+    let md = unsafe { llvm::LLVMMDNodeInContext2(bx.llcx, srcloc.as_ptr(), srcloc.len()) };
+    let md = bx.get_metadata_value(md);
+    llvm::LLVMSetMetadata(call, kind, md);
+
+    Some(call)
 }
 
 /// If the register is an xmm/ymm/zmm register then return its index.
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 176fb72dfdc..443c2eace55 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -1,5 +1,5 @@
 //! Set and unset common attributes on LLVM values.
-use rustc_attr_parsing::{InlineAttr, InstructionSetAttr, OptimizeAttr};
+use rustc_attr_data_structures::{InlineAttr, InstructionSetAttr, OptimizeAttr};
 use rustc_codegen_ssa::traits::*;
 use rustc_hir::def_id::DefId;
 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, PatchableFunctionEntry};
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 39b3a23e0b1..cb329323f5d 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -42,7 +42,8 @@ fn crate_type_allows_lto(crate_type: CrateType) -> bool {
         | CrateType::Dylib
         | CrateType::Staticlib
         | CrateType::Cdylib
-        | CrateType::ProcMacro => true,
+        | CrateType::ProcMacro
+        | CrateType::Sdylib => true,
         CrateType::Rlib => false,
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 4ac77c8f7f1..20721c74608 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -1148,9 +1148,9 @@ unsafe fn embed_bitcode(
             // We need custom section flags, so emit module-level inline assembly.
             let section_flags = if cgcx.is_pe_coff { "n" } else { "e" };
             let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode);
-            llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_c_char_ptr(), asm.len());
+            llvm::append_module_inline_asm(llmod, &asm);
             let asm = create_section_with_flags_asm(".llvmcmd", section_flags, cmdline.as_bytes());
-            llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_c_char_ptr(), asm.len());
+            llvm::append_module_inline_asm(llmod, &asm);
         }
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 04c8118b616..5238755c8eb 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -361,7 +361,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 
         // Emit KCFI operand bundle
         let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn);
-        if let Some(kcfi_bundle) = kcfi_bundle.as_ref().map(|b| b.raw()) {
+        if let Some(kcfi_bundle) = kcfi_bundle.as_ref().map(|b| b.as_ref()) {
             bundles.push(kcfi_bundle);
         }
 
@@ -1416,7 +1416,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 
         // Emit KCFI operand bundle
         let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn);
-        if let Some(kcfi_bundle) = kcfi_bundle.as_ref().map(|b| b.raw()) {
+        if let Some(kcfi_bundle) = kcfi_bundle.as_ref().map(|b| b.as_ref()) {
             bundles.push(kcfi_bundle);
         }
 
@@ -1749,7 +1749,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
 
         // Emit KCFI operand bundle
         let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn);
-        if let Some(kcfi_bundle) = kcfi_bundle.as_ref().map(|b| b.raw()) {
+        if let Some(kcfi_bundle) = kcfi_bundle.as_ref().map(|b| b.as_ref()) {
             bundles.push(kcfi_bundle);
         }
 
@@ -1836,7 +1836,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
         fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
         instance: Option<Instance<'tcx>>,
         llfn: &'ll Value,
-    ) -> Option<llvm::OperandBundleOwned<'ll>> {
+    ) -> Option<llvm::OperandBundleBox<'ll>> {
         let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
         let kcfi_bundle = if self.tcx.sess.is_sanitizer_kcfi_enabled()
             && let Some(fn_abi) = fn_abi
@@ -1862,7 +1862,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
                 kcfi::typeid_for_fnabi(self.tcx, fn_abi, options)
             };
 
-            Some(llvm::OperandBundleOwned::new("kcfi", &[self.const_u32(kcfi_typeid)]))
+            Some(llvm::OperandBundleBox::new("kcfi", &[self.const_u32(kcfi_typeid)]))
         } else {
             None
         };
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index ea9ab5c02bd..6d68eca60af 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -103,7 +103,7 @@ pub(crate) fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'t
             // This is a monomorphization of a generic function.
             if !(cx.tcx.sess.opts.share_generics()
                 || tcx.codegen_fn_attrs(instance_def_id).inline
-                    == rustc_attr_parsing::InlineAttr::Never)
+                    == rustc_attr_data_structures::InlineAttr::Never)
             {
                 // When not sharing generics, all instances are in the same
                 // crate and have hidden visibility.
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index a6f277e4455..3cfa96393e9 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -67,12 +67,12 @@ use crate::value::Value;
 /// the `OperandBundleDef` value created for MSVC landing pads.
 pub(crate) struct Funclet<'ll> {
     cleanuppad: &'ll Value,
-    operand: llvm::OperandBundleOwned<'ll>,
+    operand: llvm::OperandBundleBox<'ll>,
 }
 
 impl<'ll> Funclet<'ll> {
     pub(crate) fn new(cleanuppad: &'ll Value) -> Self {
-        Funclet { cleanuppad, operand: llvm::OperandBundleOwned::new("funclet", &[cleanuppad]) }
+        Funclet { cleanuppad, operand: llvm::OperandBundleBox::new("funclet", &[cleanuppad]) }
     }
 
     pub(crate) fn cleanuppad(&self) -> &'ll Value {
@@ -80,7 +80,7 @@ impl<'ll> Funclet<'ll> {
     }
 
     pub(crate) fn bundle(&self) -> &llvm::OperandBundle<'ll> {
-        self.operand.raw()
+        self.operand.as_ref()
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index ed50515b707..b0d8e11d1fb 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -1009,11 +1009,27 @@ impl<'ll> CodegenCx<'ll, '_> {
         ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
         ifn!("llvm.minnum.f128", fn(t_f128, t_f128) -> t_f128);
 
+        ifn!("llvm.minimum.f16", fn(t_f16, t_f16) -> t_f16);
+        ifn!("llvm.minimum.f32", fn(t_f32, t_f32) -> t_f32);
+        ifn!("llvm.minimum.f64", fn(t_f64, t_f64) -> t_f64);
+        // There are issues on x86_64 and aarch64 with the f128 variant.
+        //  - https://github.com/llvm/llvm-project/issues/139380
+        //  - https://github.com/llvm/llvm-project/issues/139381
+        // ifn!("llvm.minimum.f128", fn(t_f128, t_f128) -> t_f128);
+
         ifn!("llvm.maxnum.f16", fn(t_f16, t_f16) -> t_f16);
         ifn!("llvm.maxnum.f32", fn(t_f32, t_f32) -> t_f32);
         ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
         ifn!("llvm.maxnum.f128", fn(t_f128, t_f128) -> t_f128);
 
+        ifn!("llvm.maximum.f16", fn(t_f16, t_f16) -> t_f16);
+        ifn!("llvm.maximum.f32", fn(t_f32, t_f32) -> t_f32);
+        ifn!("llvm.maximum.f64", fn(t_f64, t_f64) -> t_f64);
+        // There are issues on x86_64 and aarch64 with the f128 variant.
+        //  - https://github.com/llvm/llvm-project/issues/139380
+        //  - https://github.com/llvm/llvm-project/issues/139381
+        // ifn!("llvm.maximum.f128", fn(t_f128, t_f128) -> t_f128);
+
         ifn!("llvm.floor.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
index f6000e72840..c207df2fb0b 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -155,6 +155,20 @@ pub(crate) struct Regions {
 impl Regions {
     /// Returns true if none of this structure's tables contain any regions.
     pub(crate) fn has_no_regions(&self) -> bool {
+        // Every region has a span, so if there are no spans then there are no regions.
+        self.all_cov_spans().next().is_none()
+    }
+
+    pub(crate) fn all_cov_spans(&self) -> impl Iterator<Item = &CoverageSpan> {
+        macro_rules! iter_cov_spans {
+            ( $( $regions:expr ),* $(,)? ) => {
+                std::iter::empty()
+                $(
+                    .chain( $regions.iter().map(|region| &region.cov_span) )
+                )*
+            }
+        }
+
         let Self {
             code_regions,
             expansion_regions,
@@ -163,11 +177,13 @@ impl Regions {
             mcdc_decision_regions,
         } = self;
 
-        code_regions.is_empty()
-            && expansion_regions.is_empty()
-            && branch_regions.is_empty()
-            && mcdc_branch_regions.is_empty()
-            && mcdc_decision_regions.is_empty()
+        iter_cov_spans!(
+            code_regions,
+            expansion_regions,
+            branch_regions,
+            mcdc_branch_regions,
+            mcdc_decision_regions,
+        )
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs
index 7bdbc685952..d3a815fabe7 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs
@@ -11,6 +11,7 @@ use rustc_abi::Align;
 use rustc_codegen_ssa::traits::{
     BaseTypeCodegenMethods as _, ConstCodegenMethods, StaticCodegenMethods,
 };
+use rustc_index::IndexVec;
 use rustc_middle::mir::coverage::{
     BasicCoverageBlock, CovTerm, CoverageIdsInfo, Expression, FunctionCoverageInfo, Mapping,
     MappingKind, Op,
@@ -104,6 +105,16 @@ fn fill_region_tables<'tcx>(
     ids_info: &'tcx CoverageIdsInfo,
     covfun: &mut CovfunRecord<'tcx>,
 ) {
+    // If this function is unused, replace all counters with zero.
+    let counter_for_bcb = |bcb: BasicCoverageBlock| -> ffi::Counter {
+        let term = if covfun.is_used {
+            ids_info.term_for_bcb[bcb].expect("every BCB in a mapping was given a term")
+        } else {
+            CovTerm::Zero
+        };
+        ffi::Counter::from_term(term)
+    };
+
     // Currently a function's mappings must all be in the same file, so use the
     // first mapping's span to determine the file.
     let source_map = tcx.sess.source_map();
@@ -115,6 +126,12 @@ fn fill_region_tables<'tcx>(
 
     let local_file_id = covfun.virtual_file_mapping.push_file(&source_file);
 
+    // If this testing flag is set, add an extra unused entry to the local
+    // file table, to help test the code for detecting unused file IDs.
+    if tcx.sess.coverage_inject_unused_local_file() {
+        covfun.virtual_file_mapping.push_file(&source_file);
+    }
+
     // In rare cases, _all_ of a function's spans are discarded, and coverage
     // codegen needs to handle that gracefully to avoid #133606.
     // It's hard for tests to trigger this organically, so instead we set
@@ -135,16 +152,6 @@ fn fill_region_tables<'tcx>(
     // For each counter/region pair in this function+file, convert it to a
     // form suitable for FFI.
     for &Mapping { ref kind, span } in &fn_cov_info.mappings {
-        // If this function is unused, replace all counters with zero.
-        let counter_for_bcb = |bcb: BasicCoverageBlock| -> ffi::Counter {
-            let term = if covfun.is_used {
-                ids_info.term_for_bcb[bcb].expect("every BCB in a mapping was given a term")
-            } else {
-                CovTerm::Zero
-            };
-            ffi::Counter::from_term(term)
-        };
-
         let Some(coords) = make_coords(span) else { continue };
         let cov_span = coords.make_coverage_span(local_file_id);
 
@@ -177,6 +184,19 @@ fn fill_region_tables<'tcx>(
     }
 }
 
+/// LLVM requires all local file IDs to have at least one mapping region.
+/// If that's not the case, skip this function, to avoid an assertion failure
+/// (or worse) in LLVM.
+fn check_local_file_table(covfun: &CovfunRecord<'_>) -> bool {
+    let mut local_file_id_seen =
+        IndexVec::<u32, _>::from_elem_n(false, covfun.virtual_file_mapping.local_file_table.len());
+    for cov_span in covfun.regions.all_cov_spans() {
+        local_file_id_seen[cov_span.file_id] = true;
+    }
+
+    local_file_id_seen.into_iter().all(|seen| seen)
+}
+
 /// Generates the contents of the covfun record for this function, which
 /// contains the function's coverage mapping data. The record is then stored
 /// as a global variable in the `__llvm_covfun` section.
@@ -185,6 +205,10 @@ pub(crate) fn generate_covfun_record<'tcx>(
     global_file_table: &GlobalFileTable,
     covfun: &CovfunRecord<'tcx>,
 ) {
+    if !check_local_file_table(covfun) {
+        return;
+    }
+
     let &CovfunRecord {
         mangled_function_name,
         source_hash,
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs
index 39a59560c9d..574463be7ff 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs
@@ -39,7 +39,10 @@ impl Coords {
 /// or other expansions), and if it does happen then skipping a span or function is
 /// better than an ICE or `llvm-cov` failure that the user might have no way to avoid.
 pub(crate) fn make_coords(source_map: &SourceMap, file: &SourceFile, span: Span) -> Option<Coords> {
-    let span = ensure_non_empty_span(source_map, span)?;
+    if span.is_empty() {
+        debug_assert!(false, "can't make coords from empty span: {span:?}");
+        return None;
+    }
 
     let lo = span.lo();
     let hi = span.hi();
@@ -70,29 +73,6 @@ pub(crate) fn make_coords(source_map: &SourceMap, file: &SourceFile, span: Span)
     })
 }
 
-fn ensure_non_empty_span(source_map: &SourceMap, span: Span) -> Option<Span> {
-    if !span.is_empty() {
-        return Some(span);
-    }
-
-    // The span is empty, so try to enlarge it to cover an adjacent '{' or '}'.
-    source_map
-        .span_to_source(span, |src, start, end| try {
-            // Adjusting span endpoints by `BytePos(1)` is normally a bug,
-            // but in this case we have specifically checked that the character
-            // we're skipping over is one of two specific ASCII characters, so
-            // adjusting by exactly 1 byte is correct.
-            if src.as_bytes().get(end).copied() == Some(b'{') {
-                Some(span.with_hi(span.hi() + BytePos(1)))
-            } else if start > 0 && src.as_bytes()[start - 1] == b'}' {
-                Some(span.with_lo(span.lo() - BytePos(1)))
-            } else {
-                None
-            }
-        })
-        .ok()?
-}
-
 /// If `llvm-cov` sees a source region that is improperly ordered (end < start),
 /// it will immediately exit with a fatal error. To prevent that from happening,
 /// discard regions that are improperly ordered, or might be interpreted in a
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/unused.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/unused.rs
index 68f60f169b5..fe3a7a1580b 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/unused.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/unused.rs
@@ -157,7 +157,7 @@ fn make_dummy_instance<'tcx>(tcx: TyCtxt<'tcx>, local_def_id: LocalDefId) -> ty:
     let def_id = local_def_id.to_def_id();
 
     // Make a dummy instance that fills in all generics with placeholders.
-    ty::Instance::new(
+    ty::Instance::new_raw(
         def_id,
         ty::GenericArgs::for_item(tcx, def_id, |param, _| {
             if let ty::GenericParamDefKind::Lifetime = param.kind {
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
index 4ffe551df09..8f0948b8183 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -95,7 +95,11 @@ pub(crate) fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool {
     // in the `.debug_gdb_scripts` section. For that reason, we make sure that the
     // section is only emitted for leaf crates.
     let embed_visualizers = cx.tcx.crate_types().iter().any(|&crate_type| match crate_type {
-        CrateType::Executable | CrateType::Dylib | CrateType::Cdylib | CrateType::Staticlib => {
+        CrateType::Executable
+        | CrateType::Dylib
+        | CrateType::Cdylib
+        | CrateType::Staticlib
+        | CrateType::Sdylib => {
             // These are crate types for which we will embed pretty printers since they
             // are treated as leaf crates.
             true
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index ffeab59b05c..5ca57375292 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -103,11 +103,23 @@ fn get_simple_intrinsic<'ll>(
         sym::minnumf64 => "llvm.minnum.f64",
         sym::minnumf128 => "llvm.minnum.f128",
 
+        sym::minimumf16 => "llvm.minimum.f16",
+        sym::minimumf32 => "llvm.minimum.f32",
+        sym::minimumf64 => "llvm.minimum.f64",
+        // There are issues on x86_64 and aarch64 with the f128 variant,
+        // let's instead use the instrinsic fallback body.
+        // sym::minimumf128 => "llvm.minimum.f128",
         sym::maxnumf16 => "llvm.maxnum.f16",
         sym::maxnumf32 => "llvm.maxnum.f32",
         sym::maxnumf64 => "llvm.maxnum.f64",
         sym::maxnumf128 => "llvm.maxnum.f128",
 
+        sym::maximumf16 => "llvm.maximum.f16",
+        sym::maximumf32 => "llvm.maximum.f32",
+        sym::maximumf64 => "llvm.maximum.f64",
+        // There are issues on x86_64 and aarch64 with the f128 variant,
+        // let's instead use the instrinsic fallback body.
+        // sym::maximumf128 => "llvm.maximum.f128",
         sym::copysignf16 => "llvm.copysign.f16",
         sym::copysignf32 => "llvm.copysign.f32",
         sym::copysignf64 => "llvm.copysign.f64",
@@ -613,7 +625,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
             _ => {
                 debug!("unknown intrinsic '{}' -- falling back to default body", name);
                 // Call the fallback body instead of generating the intrinsic code
-                return Err(ty::Instance::new(instance.def_id(), instance.args));
+                return Err(ty::Instance::new_raw(instance.def_id(), instance.args));
             }
         };
 
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index e8010ec9fc4..5736314b96a 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -6,7 +6,6 @@
 
 // tidy-alphabetical-start
 #![allow(internal_features)]
-#![cfg_attr(bootstrap, feature(let_chains))]
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
 #![doc(rust_logo)]
 #![feature(assert_matches)]
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index ffb490dcdc2..67a66e6ec79 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -1,7 +1,7 @@
 //! Bindings to the LLVM-C API (`LLVM*`), and to our own `extern "C"` wrapper
 //! functions around the unstable LLVM C++ API (`LLVMRust*`).
 //!
-//! ## Passing pointer/length strings as `*const c_uchar`
+//! ## Passing pointer/length strings as `*const c_uchar` (PTR_LEN_STR)
 //!
 //! Normally it's a good idea for Rust-side bindings to match the corresponding
 //! C-side function declarations as closely as possible. But when passing `&str`
@@ -415,6 +415,7 @@ impl AtomicRmwBinOp {
 pub(crate) enum AtomicOrdering {
     #[allow(dead_code)]
     NotAtomic = 0,
+    #[allow(dead_code)]
     Unordered = 1,
     Monotonic = 2,
     // Consume = 3,  // Not specified yet.
@@ -428,7 +429,6 @@ impl AtomicOrdering {
     pub(crate) fn from_generic(ao: rustc_codegen_ssa::common::AtomicOrdering) -> Self {
         use rustc_codegen_ssa::common::AtomicOrdering as Common;
         match ao {
-            Common::Unordered => Self::Unordered,
             Common::Relaxed => Self::Monotonic,
             Common::Acquire => Self::Acquire,
             Common::Release => Self::Release,
@@ -471,7 +471,7 @@ pub(crate) enum MetadataType {
     MD_kcfi_type = 36,
 }
 
-/// LLVMRustAsmDialect
+/// Must match the layout of `LLVMInlineAsmDialect`.
 #[derive(Copy, Clone, PartialEq)]
 #[repr(C)]
 pub(crate) enum AsmDialect {
@@ -1014,8 +1014,25 @@ unsafe extern "C" {
     pub(crate) fn LLVMGetDataLayoutStr(M: &Module) -> *const c_char;
     pub(crate) fn LLVMSetDataLayout(M: &Module, Triple: *const c_char);
 
-    /// See Module::setModuleInlineAsm.
-    pub(crate) fn LLVMAppendModuleInlineAsm(M: &Module, Asm: *const c_char, Len: size_t);
+    /// Append inline assembly to a module. See `Module::appendModuleInlineAsm`.
+    pub(crate) fn LLVMAppendModuleInlineAsm(
+        M: &Module,
+        Asm: *const c_uchar, // See "PTR_LEN_STR".
+        Len: size_t,
+    );
+
+    /// Create the specified uniqued inline asm string. See `InlineAsm::get()`.
+    pub(crate) fn LLVMGetInlineAsm<'ll>(
+        Ty: &'ll Type,
+        AsmString: *const c_uchar, // See "PTR_LEN_STR".
+        AsmStringSize: size_t,
+        Constraints: *const c_uchar, // See "PTR_LEN_STR".
+        ConstraintsSize: size_t,
+        HasSideEffects: llvm::Bool,
+        IsAlignStack: llvm::Bool,
+        Dialect: AsmDialect,
+        CanThrow: llvm::Bool,
+    ) -> &'ll Value;
 
     // Operations on integer types
     pub(crate) fn LLVMInt1TypeInContext(C: &Context) -> &Type;
@@ -1766,7 +1783,7 @@ unsafe extern "C" {
     pub(crate) fn LLVMDIBuilderCreateNameSpace<'ll>(
         Builder: &DIBuilder<'ll>,
         ParentScope: Option<&'ll Metadata>,
-        Name: *const c_uchar,
+        Name: *const c_uchar, // See "PTR_LEN_STR".
         NameLen: size_t,
         ExportSymbols: llvm::Bool,
     ) -> &'ll Metadata;
@@ -1994,21 +2011,9 @@ unsafe extern "C" {
     /// Prints the statistics collected by `-Zprint-codegen-stats`.
     pub(crate) fn LLVMRustPrintStatistics(OutStr: &RustString);
 
-    /// Prepares inline assembly.
-    pub(crate) fn LLVMRustInlineAsm(
-        Ty: &Type,
-        AsmString: *const c_char,
-        AsmStringLen: size_t,
-        Constraints: *const c_char,
-        ConstraintsLen: size_t,
-        SideEffects: Bool,
-        AlignStack: Bool,
-        Dialect: AsmDialect,
-        CanThrow: Bool,
-    ) -> &Value;
     pub(crate) fn LLVMRustInlineAsmVerify(
         Ty: &Type,
-        Constraints: *const c_char,
+        Constraints: *const c_uchar, // See "PTR_LEN_STR".
         ConstraintsLen: size_t,
     ) -> bool;
 
diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
index d14aab06073..ed23f911930 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
@@ -363,12 +363,13 @@ pub(crate) fn last_error() -> Option<String> {
     }
 }
 
-/// Owns an [`OperandBundle`], and will dispose of it when dropped.
-pub(crate) struct OperandBundleOwned<'a> {
+/// Owning pointer to an [`OperandBundle`] that will dispose of the bundle
+/// when dropped.
+pub(crate) struct OperandBundleBox<'a> {
     raw: ptr::NonNull<OperandBundle<'a>>,
 }
 
-impl<'a> OperandBundleOwned<'a> {
+impl<'a> OperandBundleBox<'a> {
     pub(crate) fn new(name: &str, vals: &[&'a Value]) -> Self {
         let raw = unsafe {
             LLVMCreateOperandBundle(
@@ -378,21 +379,21 @@ impl<'a> OperandBundleOwned<'a> {
                 vals.len() as c_uint,
             )
         };
-        OperandBundleOwned { raw: ptr::NonNull::new(raw).unwrap() }
+        Self { raw: ptr::NonNull::new(raw).unwrap() }
     }
 
-    /// Returns inner `OperandBundle` type.
+    /// Dereferences to the underlying `&OperandBundle`.
     ///
-    /// This could be a `Deref` implementation, but `OperandBundle` contains an extern type and
-    /// `Deref::Target: ?Sized`.
-    pub(crate) fn raw(&self) -> &OperandBundle<'a> {
+    /// This can't be a `Deref` implementation because `OperandBundle` transitively
+    /// contains an extern type, which is incompatible with `Deref::Target: ?Sized`.
+    pub(crate) fn as_ref(&self) -> &OperandBundle<'a> {
         // SAFETY: The returned reference is opaque and can only used for FFI.
         // It is valid for as long as `&self` is.
         unsafe { self.raw.as_ref() }
     }
 }
 
-impl Drop for OperandBundleOwned<'_> {
+impl Drop for OperandBundleBox<'_> {
     fn drop(&mut self) {
         unsafe {
             LLVMDisposeOperandBundle(self.raw);
@@ -440,3 +441,11 @@ pub(crate) fn set_dso_local<'ll>(v: &'ll Value) {
         LLVMRustSetDSOLocal(v, true);
     }
 }
+
+/// Safe wrapper for `LLVMAppendModuleInlineAsm`, which delegates to
+/// `Module::appendModuleInlineAsm`.
+pub(crate) fn append_module_inline_asm<'ll>(llmod: &'ll Module, asm: &[u8]) {
+    unsafe {
+        LLVMAppendModuleInlineAsm(llmod, asm.as_ptr(), asm.len());
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 6412a537a79..8f57f0983ab 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -19,6 +19,7 @@ use rustc_session::config::{PrintKind, PrintRequest};
 use rustc_span::Symbol;
 use rustc_target::spec::{MergeFunctions, PanicStrategy, SmallDataThresholdSupport};
 use rustc_target::target_features::{RUSTC_SPECIAL_FEATURES, RUSTC_SPECIFIC_FEATURES};
+use smallvec::{SmallVec, smallvec};
 
 use crate::back::write::create_informational_target_machine;
 use crate::errors::{
@@ -180,27 +181,27 @@ impl<'a> TargetFeatureFoldStrength<'a> {
 
 pub(crate) struct LLVMFeature<'a> {
     llvm_feature_name: &'a str,
-    dependency: Option<TargetFeatureFoldStrength<'a>>,
+    dependencies: SmallVec<[TargetFeatureFoldStrength<'a>; 1]>,
 }
 
 impl<'a> LLVMFeature<'a> {
     fn new(llvm_feature_name: &'a str) -> Self {
-        Self { llvm_feature_name, dependency: None }
+        Self { llvm_feature_name, dependencies: SmallVec::new() }
     }
 
-    fn with_dependency(
+    fn with_dependencies(
         llvm_feature_name: &'a str,
-        dependency: TargetFeatureFoldStrength<'a>,
+        dependencies: SmallVec<[TargetFeatureFoldStrength<'a>; 1]>,
     ) -> Self {
-        Self { llvm_feature_name, dependency: Some(dependency) }
+        Self { llvm_feature_name, dependencies }
     }
 
-    fn contains(&self, feat: &str) -> bool {
+    fn contains(&'a self, feat: &str) -> bool {
         self.iter().any(|dep| dep == feat)
     }
 
     fn iter(&'a self) -> impl Iterator<Item = &'a str> {
-        let dependencies = self.dependency.iter().map(|feat| feat.as_str());
+        let dependencies = self.dependencies.iter().map(|feat| feat.as_str());
         std::iter::once(self.llvm_feature_name).chain(dependencies)
     }
 }
@@ -210,7 +211,7 @@ impl<'a> IntoIterator for LLVMFeature<'a> {
     type IntoIter = impl Iterator<Item = &'a str>;
 
     fn into_iter(self) -> Self::IntoIter {
-        let dependencies = self.dependency.into_iter().map(|feat| feat.as_str());
+        let dependencies = self.dependencies.into_iter().map(|feat| feat.as_str());
         std::iter::once(self.llvm_feature_name).chain(dependencies)
     }
 }
@@ -240,9 +241,9 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea
         &*sess.target.arch
     };
     match (arch, s) {
-        ("x86", "sse4.2") => Some(LLVMFeature::with_dependency(
+        ("x86", "sse4.2") => Some(LLVMFeature::with_dependencies(
             "sse4.2",
-            TargetFeatureFoldStrength::EnableOnly("crc32"),
+            smallvec![TargetFeatureFoldStrength::EnableOnly("crc32")],
         )),
         ("x86", "pclmulqdq") => Some(LLVMFeature::new("pclmul")),
         ("x86", "rdrand") => Some(LLVMFeature::new("rdrnd")),
@@ -262,9 +263,10 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea
         ("aarch64", "sme-b16b16") if get_version().0 < 20 => Some(LLVMFeature::new("b16b16")),
         ("aarch64", "flagm2") => Some(LLVMFeature::new("altnzcv")),
         // Rust ties fp and neon together.
-        ("aarch64", "neon") => {
-            Some(LLVMFeature::with_dependency("neon", TargetFeatureFoldStrength::Both("fp-armv8")))
-        }
+        ("aarch64", "neon") => Some(LLVMFeature::with_dependencies(
+            "neon",
+            smallvec![TargetFeatureFoldStrength::Both("fp-armv8")],
+        )),
         // In LLVM neon implicitly enables fp, but we manually enable
         // neon when a feature only implicitly enables fp
         ("aarch64", "fhm") => Some(LLVMFeature::new("fp16fml")),
@@ -273,11 +275,18 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea
         ("aarch64", "fpmr") => None, // only existed in 18
         ("arm", "fp16") => Some(LLVMFeature::new("fullfp16")),
         // Filter out features that are not supported by the current LLVM version
+        ("loongarch64", "div32" | "lam-bh" | "lamcas" | "ld-seq-sa" | "scq")
+            if get_version().0 < 20 =>
+        {
+            None
+        }
+        // Filter out features that are not supported by the current LLVM version
         ("riscv32" | "riscv64", "zacas") if get_version().0 < 20 => None,
         // Enable the evex512 target feature if an avx512 target feature is enabled.
-        ("x86", s) if s.starts_with("avx512") => {
-            Some(LLVMFeature::with_dependency(s, TargetFeatureFoldStrength::EnableOnly("evex512")))
-        }
+        ("x86", s) if s.starts_with("avx512") => Some(LLVMFeature::with_dependencies(
+            s,
+            smallvec![TargetFeatureFoldStrength::EnableOnly("evex512")],
+        )),
         // Support for `wide-arithmetic` will first land in LLVM 20 as part of
         // llvm/llvm-project#111598
         ("wasm32" | "wasm64", "wide-arithmetic") if get_version() < (20, 0, 0) => None,
@@ -295,6 +304,21 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea
             None
         }
         ("x86", "movrs") if get_version().0 < 20 => None,
+        ("x86", "avx10.1") => Some(LLVMFeature::new("avx10.1-512")),
+        ("x86", "avx10.2") if get_version().0 < 20 => None,
+        ("x86", "avx10.2") if get_version().0 >= 20 => Some(LLVMFeature::new("avx10.2-512")),
+        ("x86", "apxf") => Some(LLVMFeature::with_dependencies(
+            "egpr",
+            smallvec![
+                TargetFeatureFoldStrength::Both("push2pop2"),
+                TargetFeatureFoldStrength::Both("ppx"),
+                TargetFeatureFoldStrength::Both("ndd"),
+                TargetFeatureFoldStrength::Both("ccmp"),
+                TargetFeatureFoldStrength::Both("cf"),
+                TargetFeatureFoldStrength::Both("nf"),
+                TargetFeatureFoldStrength::Both("zu"),
+            ],
+        )),
         (_, s) => Some(LLVMFeature::new(s)),
     }
 }
@@ -844,7 +868,7 @@ pub(crate) fn global_llvm_features(
                         "{}{}",
                         enable_disable, llvm_feature.llvm_feature_name
                     ))
-                    .chain(llvm_feature.dependency.into_iter().filter_map(
+                    .chain(llvm_feature.dependencies.into_iter().filter_map(
                         move |feat| match (enable, feat) {
                             (_, TargetFeatureFoldStrength::Both(f))
                             | (true, TargetFeatureFoldStrength::EnableOnly(f)) => {