about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs58
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs23
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs3
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs195
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/errors.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs5
10 files changed, 179 insertions, 119 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index 64587f98b8a..9e834b83df4 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -211,7 +211,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
             OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
         } else if self.is_unsized_indirect() {
             bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
-        } else if let PassMode::Cast(cast, _) = &self.mode {
+        } else if let PassMode::Cast { cast, pad_i32: _ } = &self.mode {
             // FIXME(eddyb): Figure out when the simpler Store is safe, clang
             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
             let can_store_through_cast_ptr = false;
@@ -274,12 +274,12 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
             PassMode::Pair(..) => {
                 OperandValue::Pair(next(), next()).store(bx, dst);
             }
-            PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+            PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
                 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
             }
             PassMode::Direct(_)
-            | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
-            | PassMode::Cast(..) => {
+            | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ }
+            | PassMode::Cast { .. } => {
                 let next_arg = next();
                 self.store(bx, next_arg, dst);
             }
@@ -332,7 +332,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         let llreturn_ty = match &self.ret.mode {
             PassMode::Ignore => cx.type_void(),
             PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
-            PassMode::Cast(cast, _) => cast.llvm_type(cx),
+            PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx),
             PassMode::Indirect { .. } => {
                 llargument_tys.push(cx.type_ptr());
                 cx.type_void()
@@ -351,6 +351,11 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                     // guarnateeing that we generate ABI-compatible LLVM IR. Things get tricky for
                     // aggregates...
                     if matches!(arg.layout.abi, abi::Abi::Aggregate { .. }) {
+                        assert!(
+                            arg.layout.is_sized(),
+                            "`PassMode::Direct` for unsized type: {}",
+                            arg.layout.ty
+                        );
                         // This really shouldn't happen, since `immediate_llvm_type` will use
                         // `layout.fields` to turn this Rust type into an LLVM type. This means all
                         // sorts of Rust type details leak into the ABI. However wasm sadly *does*
@@ -378,8 +383,10 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                     llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
                     continue;
                 }
-                PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
-                    assert!(arg.layout.is_unsized());
+                PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack } => {
+                    // `Indirect` with metadata is only for unsized types, and doesn't work with
+                    // on-stack passing.
+                    assert!(arg.layout.is_unsized() && !on_stack);
                     // Construct the type of a (wide) pointer to `ty`, and pass its two fields.
                     // Any two ABI-compatible unsized types have the same metadata type and
                     // moreover the same metadata value leads to the same dynamic size and
@@ -390,7 +397,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                     llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
                     continue;
                 }
-                PassMode::Cast(cast, pad_i32) => {
+                PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
+                    assert!(arg.layout.is_sized());
+                    cx.type_ptr()
+                }
+                PassMode::Cast { cast, pad_i32 } => {
+                    // `Cast` means "transmute to `CastType`"; that only makes sense for sized types.
+                    assert!(arg.layout.is_sized());
                     // add padding
                     if *pad_i32 {
                         llargument_tys.push(Reg::i32().llvm_type(cx));
@@ -399,7 +412,6 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                     // We assume here that ABI-compatible Rust types have the same cast type.
                     cast.llvm_type(cx)
                 }
-                PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => cx.type_ptr(),
             };
             llargument_tys.push(llarg_ty);
         }
@@ -442,13 +454,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
             PassMode::Direct(attrs) => {
                 attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
             }
-            PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
                 assert!(!on_stack);
                 let i = apply(attrs);
                 let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
                 attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
             }
-            PassMode::Cast(cast, _) => {
+            PassMode::Cast { cast, pad_i32: _ } => {
                 cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
             }
             _ => {}
@@ -456,25 +468,25 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         for arg in self.args.iter() {
             match &arg.mode {
                 PassMode::Ignore => {}
-                PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
                     let i = apply(attrs);
                     let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
                     attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
                 }
                 PassMode::Direct(attrs)
-                | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+                | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
                     apply(attrs);
                 }
-                PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
                     assert!(!on_stack);
                     apply(attrs);
-                    apply(extra_attrs);
+                    apply(meta_attrs);
                 }
                 PassMode::Pair(a, b) => {
                     apply(a);
                     apply(b);
                 }
-                PassMode::Cast(cast, pad_i32) => {
+                PassMode::Cast { cast, pad_i32 } => {
                     if *pad_i32 {
                         apply(&ArgAttributes::new());
                     }
@@ -504,13 +516,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
             PassMode::Direct(attrs) => {
                 attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
             }
-            PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
                 assert!(!on_stack);
                 let i = apply(bx.cx, attrs);
                 let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
                 attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
             }
-            PassMode::Cast(cast, _) => {
+            PassMode::Cast { cast, pad_i32: _ } => {
                 cast.attrs.apply_attrs_to_callsite(
                     llvm::AttributePlace::ReturnValue,
                     &bx.cx,
@@ -532,7 +544,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         for arg in self.args.iter() {
             match &arg.mode {
                 PassMode::Ignore => {}
-                PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
                     let i = apply(bx.cx, attrs);
                     let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
                     attributes::apply_to_callsite(
@@ -542,18 +554,18 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                     );
                 }
                 PassMode::Direct(attrs)
-                | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+                | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
                     apply(bx.cx, attrs);
                 }
-                PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack: _ } => {
+                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack: _ } => {
                     apply(bx.cx, attrs);
-                    apply(bx.cx, extra_attrs);
+                    apply(bx.cx, meta_attrs);
                 }
                 PassMode::Pair(a, b) => {
                     apply(bx.cx, a);
                     apply(bx.cx, b);
                 }
-                PassMode::Cast(cast, pad_i32) => {
+                PassMode::Cast { cast, pad_i32 } => {
                     if *pad_i32 {
                         apply(bx.cx, &ArgAttributes::new());
                     }
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 5cf83b1accb..ba263296bb4 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -605,7 +605,7 @@ pub(crate) fn run_pass_manager(
     module: &mut ModuleCodegen<ModuleLlvm>,
     thin: bool,
 ) -> Result<(), FatalError> {
-    let _timer = cgcx.prof.verbose_generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
+    let _timer = cgcx.prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
     let config = cgcx.config(module.kind);
 
     // Now we have one massive module inside of llmod. Time to run the
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index de6541635cf..1f394a7335c 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -5,13 +5,17 @@ use crate::back::profiling::{
 use crate::base;
 use crate::common;
 use crate::errors::{
-    CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, WithLlvmError, WriteBytecode,
+    CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression,
+    WithLlvmError, WriteBytecode,
 };
 use crate::llvm::{self, DiagnosticInfo, PassManager};
 use crate::llvm_util;
 use crate::type_::Type;
 use crate::LlvmCodegenBackend;
 use crate::ModuleLlvm;
+use llvm::{
+    LLVMRustLLVMHasZlibCompressionForDebugSymbols, LLVMRustLLVMHasZstdCompressionForDebugSymbols,
+};
 use rustc_codegen_ssa::back::link::ensure_removed;
 use rustc_codegen_ssa::back::write::{
     BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
@@ -234,6 +238,22 @@ pub fn target_machine_factory(
         args_cstr_buff
     };
 
+    let debuginfo_compression = sess.opts.debuginfo_compression.to_string();
+    match sess.opts.debuginfo_compression {
+        rustc_session::config::DebugInfoCompression::Zlib => {
+            if !unsafe { LLVMRustLLVMHasZlibCompressionForDebugSymbols() } {
+                sess.emit_warning(UnknownCompression { algorithm: "zlib" });
+            }
+        }
+        rustc_session::config::DebugInfoCompression::Zstd => {
+            if !unsafe { LLVMRustLLVMHasZstdCompressionForDebugSymbols() } {
+                sess.emit_warning(UnknownCompression { algorithm: "zstd" });
+            }
+        }
+        rustc_session::config::DebugInfoCompression::None => {}
+    };
+    let debuginfo_compression = SmallCStr::new(&debuginfo_compression);
+
     Arc::new(move |config: TargetMachineFactoryConfig| {
         let split_dwarf_file =
             path_mapping.map_prefix(config.split_dwarf_file.unwrap_or_default()).0;
@@ -259,6 +279,7 @@ pub fn target_machine_factory(
                 relax_elf_relocations,
                 use_init_array,
                 split_dwarf_file.as_ptr(),
+                debuginfo_compression.as_ptr(),
                 force_emulated_tls,
                 args_cstr_buff.as_ptr() as *const c_char,
                 args_cstr_buff.len(),
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 36c098218cf..5254c3f9c9a 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -95,7 +95,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
         unsafe {
             llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
 
-            let is_generic = instance.args.non_erasable_generics().next().is_some();
+            let is_generic =
+                instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
 
             if is_generic {
                 // This is a monomorphization. Its expected visibility depends
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 97a99e51056..8ba7a11abe5 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -1,13 +1,14 @@
 use crate::common::CodegenCx;
 use crate::coverageinfo;
-use crate::coverageinfo::ffi::{Counter, CounterExpression, CounterMappingRegion};
+use crate::coverageinfo::ffi::CounterMappingRegion;
+use crate::coverageinfo::map_data::FunctionCoverage;
 use crate::llvm;
 
 use rustc_codegen_ssa::traits::ConstMethods;
 use rustc_data_structures::fx::FxIndexSet;
 use rustc_hir::def::DefKind;
 use rustc_hir::def_id::DefId;
-use rustc_llvm::RustString;
+use rustc_index::IndexVec;
 use rustc_middle::bug;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir::coverage::CodeRegion;
@@ -55,7 +56,7 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
         return;
     }
 
-    let mut mapgen = CoverageMapGenerator::new(tcx);
+    let mut global_file_table = GlobalFileTable::new(tcx);
 
     // Encode coverage mappings and generate function records
     let mut function_data = Vec::new();
@@ -64,12 +65,9 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
         let mangled_function_name = tcx.symbol_name(instance).name;
         let source_hash = function_coverage.source_hash();
         let is_used = function_coverage.is_used();
-        let (expressions, counter_regions) =
-            function_coverage.get_expressions_and_counter_regions();
 
-        let coverage_mapping_buffer = llvm::build_byte_buffer(|coverage_mapping_buffer| {
-            mapgen.write_coverage_mapping(expressions, counter_regions, coverage_mapping_buffer);
-        });
+        let coverage_mapping_buffer =
+            encode_mappings_for_function(&mut global_file_table, &function_coverage);
 
         if coverage_mapping_buffer.is_empty() {
             if function_coverage.is_used() {
@@ -87,19 +85,14 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
     }
 
     // Encode all filenames referenced by counters/expressions in this module
-    let filenames_buffer = llvm::build_byte_buffer(|filenames_buffer| {
-        coverageinfo::write_filenames_section_to_buffer(
-            mapgen.filenames.iter().map(Symbol::as_str),
-            filenames_buffer,
-        );
-    });
+    let filenames_buffer = global_file_table.into_filenames_buffer();
 
     let filenames_size = filenames_buffer.len();
     let filenames_val = cx.const_bytes(&filenames_buffer);
     let filenames_ref = coverageinfo::hash_bytes(&filenames_buffer);
 
     // Generate the LLVM IR representation of the coverage map and store it in a well-known global
-    let cov_data_val = mapgen.generate_coverage_map(cx, version, filenames_size, filenames_val);
+    let cov_data_val = generate_coverage_map(cx, version, filenames_size, filenames_val);
 
     let covfun_section_name = coverageinfo::covfun_section_name(cx);
     for (mangled_function_name, source_hash, is_used, coverage_mapping_buffer) in function_data {
@@ -118,13 +111,13 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
     coverageinfo::save_cov_data_to_mod(cx, cov_data_val);
 }
 
-struct CoverageMapGenerator {
-    filenames: FxIndexSet<Symbol>,
+struct GlobalFileTable {
+    global_file_table: FxIndexSet<Symbol>,
 }
 
-impl CoverageMapGenerator {
+impl GlobalFileTable {
     fn new(tcx: TyCtxt<'_>) -> Self {
-        let mut filenames = FxIndexSet::default();
+        let mut global_file_table = FxIndexSet::default();
         // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
         // requires setting the first filename to the compilation directory.
         // Since rustc generates coverage maps with relative paths, the
@@ -133,94 +126,114 @@ impl CoverageMapGenerator {
         let working_dir = Symbol::intern(
             &tcx.sess.opts.working_dir.remapped_path_if_available().to_string_lossy(),
         );
-        filenames.insert(working_dir);
-        Self { filenames }
+        global_file_table.insert(working_dir);
+        Self { global_file_table }
     }
 
-    /// Using the `expressions` and `counter_regions` collected for the current function, generate
-    /// the `mapping_regions` and `virtual_file_mapping`, and capture any new filenames. Then use
-    /// LLVM APIs to encode the `virtual_file_mapping`, `expressions`, and `mapping_regions` into
-    /// the given `coverage_mapping` byte buffer, compliant with the LLVM Coverage Mapping format.
-    fn write_coverage_mapping<'a>(
-        &mut self,
-        expressions: Vec<CounterExpression>,
-        counter_regions: impl Iterator<Item = (Counter, &'a CodeRegion)>,
-        coverage_mapping_buffer: &RustString,
-    ) {
-        let mut counter_regions = counter_regions.collect::<Vec<_>>();
-        if counter_regions.is_empty() {
-            return;
-        }
+    fn global_file_id_for_file_name(&mut self, file_name: Symbol) -> u32 {
+        let (global_file_id, _) = self.global_file_table.insert_full(file_name);
+        global_file_id as u32
+    }
 
-        let mut virtual_file_mapping = Vec::new();
-        let mut mapping_regions = Vec::new();
-        let mut current_file_name = None;
-        let mut current_file_id = 0;
-
-        // Convert the list of (Counter, CodeRegion) pairs to an array of `CounterMappingRegion`, sorted
-        // by filename and position. Capture any new files to compute the `CounterMappingRegion`s
-        // `file_id` (indexing files referenced by the current function), and construct the
-        // function-specific `virtual_file_mapping` from `file_id` to its index in the module's
-        // `filenames` array.
-        counter_regions.sort_unstable_by_key(|(_counter, region)| *region);
-        for (counter, region) in counter_regions {
-            let CodeRegion { file_name, start_line, start_col, end_line, end_col } = *region;
-            let same_file = current_file_name.is_some_and(|p| p == file_name);
-            if !same_file {
-                if current_file_name.is_some() {
-                    current_file_id += 1;
-                }
-                current_file_name = Some(file_name);
-                debug!("  file_id: {} = '{:?}'", current_file_id, file_name);
-                let (filenames_index, _) = self.filenames.insert_full(file_name);
-                virtual_file_mapping.push(filenames_index as u32);
-            }
-            debug!("Adding counter {:?} to map for {:?}", counter, region);
+    fn into_filenames_buffer(self) -> Vec<u8> {
+        // This method takes `self` so that the caller can't accidentally
+        // modify the original file table after encoding it into a buffer.
+
+        llvm::build_byte_buffer(|buffer| {
+            coverageinfo::write_filenames_section_to_buffer(
+                self.global_file_table.iter().map(Symbol::as_str),
+                buffer,
+            );
+        })
+    }
+}
+
+/// Using the expressions and counter regions collected for a single function,
+/// generate the variable-sized payload of its corresponding `__llvm_covfun`
+/// entry. The payload is returned as a vector of bytes.
+///
+/// Newly-encountered filenames will be added to the global file table.
+fn encode_mappings_for_function(
+    global_file_table: &mut GlobalFileTable,
+    function_coverage: &FunctionCoverage<'_>,
+) -> Vec<u8> {
+    let (expressions, counter_regions) = function_coverage.get_expressions_and_counter_regions();
+
+    let mut counter_regions = counter_regions.collect::<Vec<_>>();
+    if counter_regions.is_empty() {
+        return Vec::new();
+    }
+
+    let mut virtual_file_mapping = IndexVec::<u32, u32>::new();
+    let mut mapping_regions = Vec::with_capacity(counter_regions.len());
+
+    // Sort the list of (counter, region) mapping pairs by region, so that they
+    // can be grouped by filename. Prepare file IDs for each filename, and
+    // prepare the mapping data so that we can pass it through FFI to LLVM.
+    counter_regions.sort_by_key(|(_counter, region)| *region);
+    for counter_regions_for_file in
+        counter_regions.group_by(|(_, a), (_, b)| a.file_name == b.file_name)
+    {
+        // Look up (or allocate) the global file ID for this filename.
+        let file_name = counter_regions_for_file[0].1.file_name;
+        let global_file_id = global_file_table.global_file_id_for_file_name(file_name);
+
+        // Associate that global file ID with a local file ID for this function.
+        let local_file_id: u32 = virtual_file_mapping.push(global_file_id);
+        debug!("  file id: local {local_file_id} => global {global_file_id} = '{file_name:?}'");
+
+        // For each counter/region pair in this function+file, convert it to a
+        // form suitable for FFI.
+        for &(counter, region) in counter_regions_for_file {
+            let CodeRegion { file_name: _, start_line, start_col, end_line, end_col } = *region;
+
+            debug!("Adding counter {counter:?} to map for {region:?}");
             mapping_regions.push(CounterMappingRegion::code_region(
                 counter,
-                current_file_id,
+                local_file_id,
                 start_line,
                 start_col,
                 end_line,
                 end_col,
             ));
         }
+    }
 
-        // Encode and append the current function's coverage mapping data
+    // Encode the function's coverage mappings into a buffer.
+    llvm::build_byte_buffer(|buffer| {
         coverageinfo::write_mapping_to_buffer(
-            virtual_file_mapping,
+            virtual_file_mapping.raw,
             expressions,
             mapping_regions,
-            coverage_mapping_buffer,
+            buffer,
         );
-    }
+    })
+}
 
-    /// Construct coverage map header and the array of function records, and combine them into the
-    /// coverage map. Save the coverage map data into the LLVM IR as a static global using a
-    /// specific, well-known section and name.
-    fn generate_coverage_map<'ll>(
-        self,
-        cx: &CodegenCx<'ll, '_>,
-        version: u32,
-        filenames_size: usize,
-        filenames_val: &'ll llvm::Value,
-    ) -> &'ll llvm::Value {
-        debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
-
-        // Create the coverage data header (Note, fields 0 and 2 are now always zero,
-        // as of `llvm::coverage::CovMapVersion::Version4`.)
-        let zero_was_n_records_val = cx.const_u32(0);
-        let filenames_size_val = cx.const_u32(filenames_size as u32);
-        let zero_was_coverage_size_val = cx.const_u32(0);
-        let version_val = cx.const_u32(version);
-        let cov_data_header_val = cx.const_struct(
-            &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
-            /*packed=*/ false,
-        );
+/// Construct coverage map header and the array of function records, and combine them into the
+/// coverage map. Save the coverage map data into the LLVM IR as a static global using a
+/// specific, well-known section and name.
+fn generate_coverage_map<'ll>(
+    cx: &CodegenCx<'ll, '_>,
+    version: u32,
+    filenames_size: usize,
+    filenames_val: &'ll llvm::Value,
+) -> &'ll llvm::Value {
+    debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
+
+    // Create the coverage data header (Note, fields 0 and 2 are now always zero,
+    // as of `llvm::coverage::CovMapVersion::Version4`.)
+    let zero_was_n_records_val = cx.const_u32(0);
+    let filenames_size_val = cx.const_u32(filenames_size as u32);
+    let zero_was_coverage_size_val = cx.const_u32(0);
+    let version_val = cx.const_u32(version);
+    let cov_data_header_val = cx.const_struct(
+        &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
+        /*packed=*/ false,
+    );
 
-        // Create the complete LLVM coverage data value to add to the LLVM IR
-        cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
-    }
+    // Create the complete LLVM coverage data value to add to the LLVM IR
+    cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
 }
 
 /// Construct a function record and combine it with the function's coverage mapping data.
@@ -317,10 +330,10 @@ fn add_unused_functions(cx: &CodegenCx<'_, '_>) {
     {
         let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
 
-        // If a function is marked `#[no_coverage]`, then skip generating a
+        // If a function is marked `#[coverage(off)]`, then skip generating a
         // dead code stub for it.
         if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
-            debug!("skipping unused fn marked #[no_coverage]: {:?}", non_codegenned_def_id);
+            debug!("skipping unused fn marked #[coverage(off)]: {:?}", non_codegenned_def_id);
             continue;
         }
 
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 52481a1090c..c862acdc7de 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -349,6 +349,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         type_names::push_generic_params(
             tcx,
             tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args),
+            enclosing_fn_def_id,
             &mut name,
         );
 
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
index fced6d504d2..264c273ba30 100644
--- a/compiler/rustc_codegen_llvm/src/errors.rs
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -226,3 +226,9 @@ pub(crate) struct WriteBytecode<'a> {
 pub(crate) struct CopyBitcode {
     pub err: std::io::Error,
 }
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_unknown_debuginfo_compression)]
+pub struct UnknownCompression {
+    pub algorithm: &'static str,
+}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index a9b06030e70..9289c37d763 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -165,7 +165,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
             sym::volatile_load | sym::unaligned_volatile_load => {
                 let tp_ty = fn_args.type_at(0);
                 let ptr = args[0].immediate();
-                let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
+                let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
                     let llty = ty.llvm_type(self);
                     self.volatile_load(llty, ptr)
                 } else {
@@ -386,7 +386,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
         };
 
         if !fn_abi.ret.is_ignore() {
-            if let PassMode::Cast(_, _) = &fn_abi.ret.mode {
+            if let PassMode::Cast { .. } = &fn_abi.ret.mode {
                 self.store(llval, result.llval, result.align);
             } else {
                 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index d283299ac46..ac199624e34 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -10,6 +10,7 @@
 #![feature(iter_intersperse)]
 #![feature(let_chains)]
 #![feature(never_type)]
+#![feature(slice_group_by)]
 #![feature(impl_trait_in_assoc_type)]
 #![recursion_limit = "256"]
 #![allow(rustc::potential_query_instability)]
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 8e96410deaf..2ebfdae39e8 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -2131,6 +2131,7 @@ extern "C" {
         RelaxELFRelocations: bool,
         UseInitArray: bool,
         SplitDwarfFile: *const c_char,
+        DebugInfoCompression: *const c_char,
         ForceEmulatedTls: bool,
         ArgsCstrBuff: *const c_char,
         ArgsCstrBuffLen: usize,
@@ -2366,6 +2367,10 @@ extern "C" {
 
     pub fn LLVMRustIsBitcode(ptr: *const u8, len: usize) -> bool;
 
+    pub fn LLVMRustLLVMHasZlibCompressionForDebugSymbols() -> bool;
+
+    pub fn LLVMRustLLVMHasZstdCompressionForDebugSymbols() -> bool;
+
     pub fn LLVMRustGetSymbols(
         buf_ptr: *const u8,
         buf_len: usize,