about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs26
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs44
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs28
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs80
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs83
5 files changed, 132 insertions, 129 deletions
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
index c207df2fb0b..f6000e72840 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -155,20 +155,6 @@ pub(crate) struct Regions {
 impl Regions {
     /// Returns true if none of this structure's tables contain any regions.
     pub(crate) fn has_no_regions(&self) -> bool {
-        // Every region has a span, so if there are no spans then there are no regions.
-        self.all_cov_spans().next().is_none()
-    }
-
-    pub(crate) fn all_cov_spans(&self) -> impl Iterator<Item = &CoverageSpan> {
-        macro_rules! iter_cov_spans {
-            ( $( $regions:expr ),* $(,)? ) => {
-                std::iter::empty()
-                $(
-                    .chain( $regions.iter().map(|region| &region.cov_span) )
-                )*
-            }
-        }
-
         let Self {
             code_regions,
             expansion_regions,
@@ -177,13 +163,11 @@ impl Regions {
             mcdc_decision_regions,
         } = self;
 
-        iter_cov_spans!(
-            code_regions,
-            expansion_regions,
-            branch_regions,
-            mcdc_branch_regions,
-            mcdc_decision_regions,
-        )
+        code_regions.is_empty()
+            && expansion_regions.is_empty()
+            && branch_regions.is_empty()
+            && mcdc_branch_regions.is_empty()
+            && mcdc_decision_regions.is_empty()
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs
index d3a815fabe7..7bdbc685952 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs
@@ -11,7 +11,6 @@ use rustc_abi::Align;
 use rustc_codegen_ssa::traits::{
     BaseTypeCodegenMethods as _, ConstCodegenMethods, StaticCodegenMethods,
 };
-use rustc_index::IndexVec;
 use rustc_middle::mir::coverage::{
     BasicCoverageBlock, CovTerm, CoverageIdsInfo, Expression, FunctionCoverageInfo, Mapping,
     MappingKind, Op,
@@ -105,16 +104,6 @@ fn fill_region_tables<'tcx>(
     ids_info: &'tcx CoverageIdsInfo,
     covfun: &mut CovfunRecord<'tcx>,
 ) {
-    // If this function is unused, replace all counters with zero.
-    let counter_for_bcb = |bcb: BasicCoverageBlock| -> ffi::Counter {
-        let term = if covfun.is_used {
-            ids_info.term_for_bcb[bcb].expect("every BCB in a mapping was given a term")
-        } else {
-            CovTerm::Zero
-        };
-        ffi::Counter::from_term(term)
-    };
-
     // Currently a function's mappings must all be in the same file, so use the
     // first mapping's span to determine the file.
     let source_map = tcx.sess.source_map();
@@ -126,12 +115,6 @@ fn fill_region_tables<'tcx>(
 
     let local_file_id = covfun.virtual_file_mapping.push_file(&source_file);
 
-    // If this testing flag is set, add an extra unused entry to the local
-    // file table, to help test the code for detecting unused file IDs.
-    if tcx.sess.coverage_inject_unused_local_file() {
-        covfun.virtual_file_mapping.push_file(&source_file);
-    }
-
     // In rare cases, _all_ of a function's spans are discarded, and coverage
     // codegen needs to handle that gracefully to avoid #133606.
     // It's hard for tests to trigger this organically, so instead we set
@@ -152,6 +135,16 @@ fn fill_region_tables<'tcx>(
     // For each counter/region pair in this function+file, convert it to a
     // form suitable for FFI.
     for &Mapping { ref kind, span } in &fn_cov_info.mappings {
+        // If this function is unused, replace all counters with zero.
+        let counter_for_bcb = |bcb: BasicCoverageBlock| -> ffi::Counter {
+            let term = if covfun.is_used {
+                ids_info.term_for_bcb[bcb].expect("every BCB in a mapping was given a term")
+            } else {
+                CovTerm::Zero
+            };
+            ffi::Counter::from_term(term)
+        };
+
         let Some(coords) = make_coords(span) else { continue };
         let cov_span = coords.make_coverage_span(local_file_id);
 
@@ -184,19 +177,6 @@ fn fill_region_tables<'tcx>(
     }
 }
 
-/// LLVM requires all local file IDs to have at least one mapping region.
-/// If that's not the case, skip this function, to avoid an assertion failure
-/// (or worse) in LLVM.
-fn check_local_file_table(covfun: &CovfunRecord<'_>) -> bool {
-    let mut local_file_id_seen =
-        IndexVec::<u32, _>::from_elem_n(false, covfun.virtual_file_mapping.local_file_table.len());
-    for cov_span in covfun.regions.all_cov_spans() {
-        local_file_id_seen[cov_span.file_id] = true;
-    }
-
-    local_file_id_seen.into_iter().all(|seen| seen)
-}
-
 /// Generates the contents of the covfun record for this function, which
 /// contains the function's coverage mapping data. The record is then stored
 /// as a global variable in the `__llvm_covfun` section.
@@ -205,10 +185,6 @@ pub(crate) fn generate_covfun_record<'tcx>(
     global_file_table: &GlobalFileTable,
     covfun: &CovfunRecord<'tcx>,
 ) {
-    if !check_local_file_table(covfun) {
-        return;
-    }
-
     let &CovfunRecord {
         mangled_function_name,
         source_hash,
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs
index 574463be7ff..39a59560c9d 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs
@@ -39,10 +39,7 @@ impl Coords {
 /// or other expansions), and if it does happen then skipping a span or function is
 /// better than an ICE or `llvm-cov` failure that the user might have no way to avoid.
 pub(crate) fn make_coords(source_map: &SourceMap, file: &SourceFile, span: Span) -> Option<Coords> {
-    if span.is_empty() {
-        debug_assert!(false, "can't make coords from empty span: {span:?}");
-        return None;
-    }
+    let span = ensure_non_empty_span(source_map, span)?;
 
     let lo = span.lo();
     let hi = span.hi();
@@ -73,6 +70,29 @@ pub(crate) fn make_coords(source_map: &SourceMap, file: &SourceFile, span: Span)
     })
 }
 
+fn ensure_non_empty_span(source_map: &SourceMap, span: Span) -> Option<Span> {
+    if !span.is_empty() {
+        return Some(span);
+    }
+
+    // The span is empty, so try to enlarge it to cover an adjacent '{' or '}'.
+    source_map
+        .span_to_source(span, |src, start, end| try {
+            // Adjusting span endpoints by `BytePos(1)` is normally a bug,
+            // but in this case we have specifically checked that the character
+            // we're skipping over is one of two specific ASCII characters, so
+            // adjusting by exactly 1 byte is correct.
+            if src.as_bytes().get(end).copied() == Some(b'{') {
+                Some(span.with_hi(span.hi() + BytePos(1)))
+            } else if start > 0 && src.as_bytes()[start - 1] == b'}' {
+                Some(span.with_lo(span.lo() - BytePos(1)))
+            } else {
+                None
+            }
+        })
+        .ok()?
+}
+
 /// If `llvm-cov` sees a source region that is improperly ordered (end < start),
 /// it will immediately exit with a fatal error. To prevent that from happening,
 /// discard regions that are improperly ordered, or might be interpreted in a
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 5ca57375292..e8629aeebb9 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -15,11 +15,10 @@ use rustc_middle::ty::{self, GenericArgsRef, Ty};
 use rustc_middle::{bug, span_bug};
 use rustc_span::{Span, Symbol, sym};
 use rustc_symbol_mangling::mangle_internal_symbol;
-use rustc_target::callconv::{FnAbi, PassMode};
 use rustc_target::spec::{HasTargetSpec, PanicStrategy};
 use tracing::debug;
 
-use crate::abi::{FnAbiLlvmExt, LlvmType};
+use crate::abi::FnAbiLlvmExt;
 use crate::builder::Builder;
 use crate::context::CodegenCx;
 use crate::llvm::{self, Metadata};
@@ -165,9 +164,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
     fn codegen_intrinsic_call(
         &mut self,
         instance: ty::Instance<'tcx>,
-        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
         args: &[OperandRef<'tcx, &'ll Value>],
-        llresult: &'ll Value,
+        result: PlaceRef<'tcx, &'ll Value>,
         span: Span,
     ) -> Result<(), ty::Instance<'tcx>> {
         let tcx = self.tcx;
@@ -184,7 +182,6 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
         let name = tcx.item_name(def_id);
 
         let llret_ty = self.layout_of(ret_ty).llvm_type(self);
-        let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
 
         let simple = get_simple_intrinsic(self, name);
         let llval = match name {
@@ -255,7 +252,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     args[0].immediate(),
                     args[1].immediate(),
                     args[2].immediate(),
-                    llresult,
+                    result,
                 );
                 return Ok(());
             }
@@ -264,7 +261,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
             }
             sym::va_arg => {
-                match fn_abi.ret.layout.backend_repr {
+                match result.layout.backend_repr {
                     BackendRepr::Scalar(scalar) => {
                         match scalar.primitive() {
                             Primitive::Int(..) => {
@@ -299,18 +296,12 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
             }
 
             sym::volatile_load | sym::unaligned_volatile_load => {
-                let tp_ty = fn_args.type_at(0);
                 let ptr = args[0].immediate();
-                let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
-                    let llty = ty.llvm_type(self);
-                    self.volatile_load(llty, ptr)
-                } else {
-                    self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
-                };
+                let load = self.volatile_load(result.layout.llvm_type(self), ptr);
                 let align = if name == sym::unaligned_volatile_load {
                     1
                 } else {
-                    self.align_of(tp_ty).bytes() as u32
+                    result.layout.align.abi.bytes() as u32
                 };
                 unsafe {
                     llvm::LLVMSetAlignment(load, align);
@@ -629,14 +620,12 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
             }
         };
 
-        if !fn_abi.ret.is_ignore() {
-            if let PassMode::Cast { .. } = &fn_abi.ret.mode {
-                self.store(llval, result.val.llval, result.val.align);
-            } else {
-                OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
-                    .val
-                    .store(self, result);
-            }
+        if result.layout.ty.is_bool() {
+            OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
+                .val
+                .store(self, result);
+        } else if !result.layout.ty.is_unit() {
+            self.store_to_place(llval, result.val);
         }
         Ok(())
     }
@@ -688,20 +677,19 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
     }
 }
 
-fn catch_unwind_intrinsic<'ll>(
-    bx: &mut Builder<'_, 'll, '_>,
+fn catch_unwind_intrinsic<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
     try_func: &'ll Value,
     data: &'ll Value,
     catch_func: &'ll Value,
-    dest: &'ll Value,
+    dest: PlaceRef<'tcx, &'ll Value>,
 ) {
     if bx.sess().panic_strategy() == PanicStrategy::Abort {
         let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
         bx.call(try_func_ty, None, None, try_func, &[data], None, None);
         // Return 0 unconditionally from the intrinsic call;
         // we can never unwind.
-        let ret_align = bx.tcx().data_layout.i32_align.abi;
-        bx.store(bx.const_i32(0), dest, ret_align);
+        OperandValue::Immediate(bx.const_i32(0)).store(bx, dest);
     } else if wants_msvc_seh(bx.sess()) {
         codegen_msvc_try(bx, try_func, data, catch_func, dest);
     } else if wants_wasm_eh(bx.sess()) {
@@ -720,12 +708,12 @@ fn catch_unwind_intrinsic<'ll>(
 // instructions are meant to work for all targets, as of the time of this
 // writing, however, LLVM does not recommend the usage of these new instructions
 // as the old ones are still more optimized.
-fn codegen_msvc_try<'ll>(
-    bx: &mut Builder<'_, 'll, '_>,
+fn codegen_msvc_try<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
     try_func: &'ll Value,
     data: &'ll Value,
     catch_func: &'ll Value,
-    dest: &'ll Value,
+    dest: PlaceRef<'tcx, &'ll Value>,
 ) {
     let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
         bx.set_personality_fn(bx.eh_personality());
@@ -865,17 +853,16 @@ fn codegen_msvc_try<'ll>(
     // Note that no invoke is used here because by definition this function
     // can't panic (that's what it's catching).
     let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
-    let i32_align = bx.tcx().data_layout.i32_align.abi;
-    bx.store(ret, dest, i32_align);
+    OperandValue::Immediate(ret).store(bx, dest);
 }
 
 // WASM's definition of the `rust_try` function.
-fn codegen_wasm_try<'ll>(
-    bx: &mut Builder<'_, 'll, '_>,
+fn codegen_wasm_try<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
     try_func: &'ll Value,
     data: &'ll Value,
     catch_func: &'ll Value,
-    dest: &'ll Value,
+    dest: PlaceRef<'tcx, &'ll Value>,
 ) {
     let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
         bx.set_personality_fn(bx.eh_personality());
@@ -939,8 +926,7 @@ fn codegen_wasm_try<'ll>(
     // Note that no invoke is used here because by definition this function
     // can't panic (that's what it's catching).
     let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
-    let i32_align = bx.tcx().data_layout.i32_align.abi;
-    bx.store(ret, dest, i32_align);
+    OperandValue::Immediate(ret).store(bx, dest);
 }
 
 // Definition of the standard `try` function for Rust using the GNU-like model
@@ -954,12 +940,12 @@ fn codegen_wasm_try<'ll>(
 // function calling it, and that function may already have other personality
 // functions in play. By calling a shim we're guaranteed that our shim will have
 // the right personality function.
-fn codegen_gnu_try<'ll>(
-    bx: &mut Builder<'_, 'll, '_>,
+fn codegen_gnu_try<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
     try_func: &'ll Value,
     data: &'ll Value,
     catch_func: &'ll Value,
-    dest: &'ll Value,
+    dest: PlaceRef<'tcx, &'ll Value>,
 ) {
     let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
         // Codegens the shims described above:
@@ -1006,19 +992,18 @@ fn codegen_gnu_try<'ll>(
     // Note that no invoke is used here because by definition this function
     // can't panic (that's what it's catching).
     let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
-    let i32_align = bx.tcx().data_layout.i32_align.abi;
-    bx.store(ret, dest, i32_align);
+    OperandValue::Immediate(ret).store(bx, dest);
 }
 
 // Variant of codegen_gnu_try used for emscripten where Rust panics are
 // implemented using C++ exceptions. Here we use exceptions of a specific type
 // (`struct rust_panic`) to represent Rust panics.
-fn codegen_emcc_try<'ll>(
-    bx: &mut Builder<'_, 'll, '_>,
+fn codegen_emcc_try<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
     try_func: &'ll Value,
     data: &'ll Value,
     catch_func: &'ll Value,
-    dest: &'ll Value,
+    dest: PlaceRef<'tcx, &'ll Value>,
 ) {
     let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
         // Codegens the shims described above:
@@ -1089,8 +1074,7 @@ fn codegen_emcc_try<'ll>(
     // Note that no invoke is used here because by definition this function
     // can't panic (that's what it's catching).
     let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
-    let i32_align = bx.tcx().data_layout.i32_align.abi;
-    bx.store(ret, dest, i32_align);
+    OperandValue::Immediate(ret).store(bx, dest);
 }
 
 // Helper function to give a Block to a closure to codegen a shim function.
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index c216f0f4a09..b91b6efed45 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -63,14 +63,33 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
     }
 }
 
+enum PassMode {
+    Direct,
+    Indirect,
+}
+
+enum SlotSize {
+    Bytes8 = 8,
+    Bytes4 = 4,
+}
+
+enum AllowHigherAlign {
+    No,
+    Yes,
+}
+
 fn emit_ptr_va_arg<'ll, 'tcx>(
     bx: &mut Builder<'_, 'll, 'tcx>,
     list: OperandRef<'tcx, &'ll Value>,
     target_ty: Ty<'tcx>,
-    indirect: bool,
-    slot_size: Align,
-    allow_higher_align: bool,
+    pass_mode: PassMode,
+    slot_size: SlotSize,
+    allow_higher_align: AllowHigherAlign,
 ) -> &'ll Value {
+    let indirect = matches!(pass_mode, PassMode::Indirect);
+    let allow_higher_align = matches!(allow_higher_align, AllowHigherAlign::Yes);
+    let slot_size = Align::from_bytes(slot_size as u64).unwrap();
+
     let layout = bx.cx.layout_of(target_ty);
     let (llty, size, align) = if indirect {
         (
@@ -179,8 +198,14 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
 
     // On Stack block
     bx.switch_to_block(on_stack);
-    let stack_value =
-        emit_ptr_va_arg(bx, list, target_ty, false, Align::from_bytes(8).unwrap(), true);
+    let stack_value = emit_ptr_va_arg(
+        bx,
+        list,
+        target_ty,
+        PassMode::Direct,
+        SlotSize::Bytes8,
+        AllowHigherAlign::Yes,
+    );
     bx.br(end);
 
     bx.switch_to_block(end);
@@ -386,29 +411,43 @@ pub(super) fn emit_va_arg<'ll, 'tcx>(
     // Determine the va_arg implementation to use. The LLVM va_arg instruction
     // is lacking in some instances, so we should only use it as a fallback.
     let target = &bx.cx.tcx.sess.target;
-    let arch = &bx.cx.tcx.sess.target.arch;
-    match &**arch {
-        // Windows x86
-        "x86" if target.is_like_windows => {
-            emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
-        }
-        // Generic x86
-        "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true),
-        // Windows AArch64
-        "aarch64" | "arm64ec" if target.is_like_windows => {
-            emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
-        }
-        // macOS / iOS AArch64
-        "aarch64" if target.is_like_darwin => {
-            emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
+
+    match &*target.arch {
+        "x86" => emit_ptr_va_arg(
+            bx,
+            addr,
+            target_ty,
+            PassMode::Direct,
+            SlotSize::Bytes4,
+            if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes },
+        ),
+        "aarch64" | "arm64ec" if target.is_like_windows || target.is_like_darwin => {
+            emit_ptr_va_arg(
+                bx,
+                addr,
+                target_ty,
+                PassMode::Direct,
+                SlotSize::Bytes8,
+                if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes },
+            )
         }
         "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
         "s390x" => emit_s390x_va_arg(bx, addr, target_ty),
         // Windows x86_64
         "x86_64" if target.is_like_windows => {
             let target_ty_size = bx.cx.size_of(target_ty).bytes();
-            let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
-            emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
+            emit_ptr_va_arg(
+                bx,
+                addr,
+                target_ty,
+                if target_ty_size > 8 || !target_ty_size.is_power_of_two() {
+                    PassMode::Indirect
+                } else {
+                    PassMode::Direct
+                },
+                SlotSize::Bytes8,
+                AllowHigherAlign::No,
+            )
         }
         "xtensa" => emit_xtensa_va_arg(bx, addr, target_ty),
         // For all other architecture/OS combinations fall back to using