diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
4 files changed, 100 insertions, 81 deletions
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs index c207df2fb0b..f6000e72840 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs @@ -155,20 +155,6 @@ pub(crate) struct Regions { impl Regions { /// Returns true if none of this structure's tables contain any regions. pub(crate) fn has_no_regions(&self) -> bool { - // Every region has a span, so if there are no spans then there are no regions. - self.all_cov_spans().next().is_none() - } - - pub(crate) fn all_cov_spans(&self) -> impl Iterator<Item = &CoverageSpan> { - macro_rules! iter_cov_spans { - ( $( $regions:expr ),* $(,)? ) => { - std::iter::empty() - $( - .chain( $regions.iter().map(|region| ®ion.cov_span) ) - )* - } - } - let Self { code_regions, expansion_regions, @@ -177,13 +163,11 @@ impl Regions { mcdc_decision_regions, } = self; - iter_cov_spans!( - code_regions, - expansion_regions, - branch_regions, - mcdc_branch_regions, - mcdc_decision_regions, - ) + code_regions.is_empty() + && expansion_regions.is_empty() + && branch_regions.is_empty() + && mcdc_branch_regions.is_empty() + && mcdc_decision_regions.is_empty() } } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs index d3a815fabe7..7bdbc685952 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs @@ -11,7 +11,6 @@ use rustc_abi::Align; use rustc_codegen_ssa::traits::{ BaseTypeCodegenMethods as _, ConstCodegenMethods, StaticCodegenMethods, }; -use rustc_index::IndexVec; use rustc_middle::mir::coverage::{ BasicCoverageBlock, CovTerm, CoverageIdsInfo, Expression, FunctionCoverageInfo, Mapping, MappingKind, Op, @@ -105,16 +104,6 @@ fn fill_region_tables<'tcx>( ids_info: &'tcx CoverageIdsInfo, covfun: &mut CovfunRecord<'tcx>, ) { - // If this function is unused, replace all counters with zero. - let counter_for_bcb = |bcb: BasicCoverageBlock| -> ffi::Counter { - let term = if covfun.is_used { - ids_info.term_for_bcb[bcb].expect("every BCB in a mapping was given a term") - } else { - CovTerm::Zero - }; - ffi::Counter::from_term(term) - }; - // Currently a function's mappings must all be in the same file, so use the // first mapping's span to determine the file. let source_map = tcx.sess.source_map(); @@ -126,12 +115,6 @@ fn fill_region_tables<'tcx>( let local_file_id = covfun.virtual_file_mapping.push_file(&source_file); - // If this testing flag is set, add an extra unused entry to the local - // file table, to help test the code for detecting unused file IDs. - if tcx.sess.coverage_inject_unused_local_file() { - covfun.virtual_file_mapping.push_file(&source_file); - } - // In rare cases, _all_ of a function's spans are discarded, and coverage // codegen needs to handle that gracefully to avoid #133606. // It's hard for tests to trigger this organically, so instead we set @@ -152,6 +135,16 @@ fn fill_region_tables<'tcx>( // For each counter/region pair in this function+file, convert it to a // form suitable for FFI. for &Mapping { ref kind, span } in &fn_cov_info.mappings { + // If this function is unused, replace all counters with zero. + let counter_for_bcb = |bcb: BasicCoverageBlock| -> ffi::Counter { + let term = if covfun.is_used { + ids_info.term_for_bcb[bcb].expect("every BCB in a mapping was given a term") + } else { + CovTerm::Zero + }; + ffi::Counter::from_term(term) + }; + let Some(coords) = make_coords(span) else { continue }; let cov_span = coords.make_coverage_span(local_file_id); @@ -184,19 +177,6 @@ fn fill_region_tables<'tcx>( } } -/// LLVM requires all local file IDs to have at least one mapping region. -/// If that's not the case, skip this function, to avoid an assertion failure -/// (or worse) in LLVM. -fn check_local_file_table(covfun: &CovfunRecord<'_>) -> bool { - let mut local_file_id_seen = - IndexVec::<u32, _>::from_elem_n(false, covfun.virtual_file_mapping.local_file_table.len()); - for cov_span in covfun.regions.all_cov_spans() { - local_file_id_seen[cov_span.file_id] = true; - } - - local_file_id_seen.into_iter().all(|seen| seen) -} - /// Generates the contents of the covfun record for this function, which /// contains the function's coverage mapping data. The record is then stored /// as a global variable in the `__llvm_covfun` section. @@ -205,10 +185,6 @@ pub(crate) fn generate_covfun_record<'tcx>( global_file_table: &GlobalFileTable, covfun: &CovfunRecord<'tcx>, ) { - if !check_local_file_table(covfun) { - return; - } - let &CovfunRecord { mangled_function_name, source_hash, diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs index 574463be7ff..39a59560c9d 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs @@ -39,10 +39,7 @@ impl Coords { /// or other expansions), and if it does happen then skipping a span or function is /// better than an ICE or `llvm-cov` failure that the user might have no way to avoid. pub(crate) fn make_coords(source_map: &SourceMap, file: &SourceFile, span: Span) -> Option<Coords> { - if span.is_empty() { - debug_assert!(false, "can't make coords from empty span: {span:?}"); - return None; - } + let span = ensure_non_empty_span(source_map, span)?; let lo = span.lo(); let hi = span.hi(); @@ -73,6 +70,29 @@ pub(crate) fn make_coords(source_map: &SourceMap, file: &SourceFile, span: Span) }) } +fn ensure_non_empty_span(source_map: &SourceMap, span: Span) -> Option<Span> { + if !span.is_empty() { + return Some(span); + } + + // The span is empty, so try to enlarge it to cover an adjacent '{' or '}'. + source_map + .span_to_source(span, |src, start, end| try { + // Adjusting span endpoints by `BytePos(1)` is normally a bug, + // but in this case we have specifically checked that the character + // we're skipping over is one of two specific ASCII characters, so + // adjusting by exactly 1 byte is correct. + if src.as_bytes().get(end).copied() == Some(b'{') { + Some(span.with_hi(span.hi() + BytePos(1))) + } else if start > 0 && src.as_bytes()[start - 1] == b'}' { + Some(span.with_lo(span.lo() - BytePos(1))) + } else { + None + } + }) + .ok()? +} + /// If `llvm-cov` sees a source region that is improperly ordered (end < start), /// it will immediately exit with a fatal error. To prevent that from happening, /// discard regions that are improperly ordered, or might be interpreted in a diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index c216f0f4a09..b91b6efed45 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -63,14 +63,33 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( } } +enum PassMode { + Direct, + Indirect, +} + +enum SlotSize { + Bytes8 = 8, + Bytes4 = 4, +} + +enum AllowHigherAlign { + No, + Yes, +} + fn emit_ptr_va_arg<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, list: OperandRef<'tcx, &'ll Value>, target_ty: Ty<'tcx>, - indirect: bool, - slot_size: Align, - allow_higher_align: bool, + pass_mode: PassMode, + slot_size: SlotSize, + allow_higher_align: AllowHigherAlign, ) -> &'ll Value { + let indirect = matches!(pass_mode, PassMode::Indirect); + let allow_higher_align = matches!(allow_higher_align, AllowHigherAlign::Yes); + let slot_size = Align::from_bytes(slot_size as u64).unwrap(); + let layout = bx.cx.layout_of(target_ty); let (llty, size, align) = if indirect { ( @@ -179,8 +198,14 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( // On Stack block bx.switch_to_block(on_stack); - let stack_value = - emit_ptr_va_arg(bx, list, target_ty, false, Align::from_bytes(8).unwrap(), true); + let stack_value = emit_ptr_va_arg( + bx, + list, + target_ty, + PassMode::Direct, + SlotSize::Bytes8, + AllowHigherAlign::Yes, + ); bx.br(end); bx.switch_to_block(end); @@ -386,29 +411,43 @@ pub(super) fn emit_va_arg<'ll, 'tcx>( // Determine the va_arg implementation to use. The LLVM va_arg instruction // is lacking in some instances, so we should only use it as a fallback. let target = &bx.cx.tcx.sess.target; - let arch = &bx.cx.tcx.sess.target.arch; - match &**arch { - // Windows x86 - "x86" if target.is_like_windows => { - emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false) - } - // Generic x86 - "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true), - // Windows AArch64 - "aarch64" | "arm64ec" if target.is_like_windows => { - emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false) - } - // macOS / iOS AArch64 - "aarch64" if target.is_like_darwin => { - emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true) + + match &*target.arch { + "x86" => emit_ptr_va_arg( + bx, + addr, + target_ty, + PassMode::Direct, + SlotSize::Bytes4, + if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes }, + ), + "aarch64" | "arm64ec" if target.is_like_windows || target.is_like_darwin => { + emit_ptr_va_arg( + bx, + addr, + target_ty, + PassMode::Direct, + SlotSize::Bytes8, + if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes }, + ) } "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty), "s390x" => emit_s390x_va_arg(bx, addr, target_ty), // Windows x86_64 "x86_64" if target.is_like_windows => { let target_ty_size = bx.cx.size_of(target_ty).bytes(); - let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two(); - emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false) + emit_ptr_va_arg( + bx, + addr, + target_ty, + if target_ty_size > 8 || !target_ty_size.is_power_of_two() { + PassMode::Indirect + } else { + PassMode::Direct + }, + SlotSize::Bytes8, + AllowHigherAlign::No, + ) } "xtensa" => emit_xtensa_va_arg(bx, addr, target_ty), // For all other architecture/OS combinations fall back to using |
