diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/asm.rs | 106 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/lto.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/write.rs | 25 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/base.rs | 9 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/callee.rs | 5 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/consts.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/context.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs | 72 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs | 9 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs | 57 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 4 |
11 files changed, 160 insertions, 139 deletions
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index bb74dfe1487..d1804cb49ad 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -45,7 +45,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { match *op { InlineAsmOperandRef::Out { reg, late, place } => { let is_target_supported = |reg_class: InlineAsmRegClass| { - for &(_, feature) in reg_class.supported_types(asm_arch) { + for &(_, feature) in reg_class.supported_types(asm_arch, true) { if let Some(feature) = feature { if self .tcx @@ -85,7 +85,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } continue; } else if !is_target_supported(reg.reg_class()) - || reg.reg_class().is_clobber_only(asm_arch) + || reg.reg_class().is_clobber_only(asm_arch, true) { // We turn discarded outputs into clobber constraints // if the target feature needed by the register class is @@ -342,24 +342,32 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } attributes::apply_to_callsite(result, llvm::AttributePlace::Function, &{ attrs }); - // Switch to the 'normal' basic block if we did an `invoke` instead of a `call` - if let Some(dest) = dest { - self.switch_to_block(dest); - } + // Write results to outputs. We need to do this for all possible control flow. + // + // Note that `dest` maybe populated with unreachable_block when asm goto with outputs + // is used (because we need to codegen callbr which always needs a destination), so + // here we use the NORETURN option to determine if `dest` should be used. + for block in (if options.contains(InlineAsmOptions::NORETURN) { None } else { Some(dest) }) + .into_iter() + .chain(labels.iter().copied().map(Some)) + { + if let Some(block) = block { + self.switch_to_block(block); + } - // Write results to outputs - for (idx, op) in operands.iter().enumerate() { - if let InlineAsmOperandRef::Out { reg, place: Some(place), .. } - | InlineAsmOperandRef::InOut { reg, out_place: Some(place), .. } = *op - { - let value = if output_types.len() == 1 { - result - } else { - self.extract_value(result, op_idx[&idx] as u64) - }; - let value = - llvm_fixup_output(self, value, reg.reg_class(), &place.layout, instance); - OperandValue::Immediate(value).store(self, place); + for (idx, op) in operands.iter().enumerate() { + if let InlineAsmOperandRef::Out { reg, place: Some(place), .. } + | InlineAsmOperandRef::InOut { reg, out_place: Some(place), .. } = *op + { + let value = if output_types.len() == 1 { + result + } else { + self.extract_value(result, op_idx[&idx] as u64) + }; + let value = + llvm_fixup_output(self, value, reg.reg_class(), &place.layout, instance); + OperandValue::Immediate(value).store(self, place); + } } } } @@ -637,6 +645,7 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> | Arm(ArmInlineAsmRegClass::qreg_low4) => "x", Arm(ArmInlineAsmRegClass::dreg) | Arm(ArmInlineAsmRegClass::qreg) => "w", Hexagon(HexagonInlineAsmRegClass::reg) => "r", + Hexagon(HexagonInlineAsmRegClass::preg) => unreachable!("clobber-only"), LoongArch(LoongArchInlineAsmRegClass::reg) => "r", LoongArch(LoongArchInlineAsmRegClass::freg) => "f", Mips(MipsInlineAsmRegClass::reg) => "r", @@ -647,9 +656,8 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> PowerPC(PowerPCInlineAsmRegClass::reg) => "r", PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b", PowerPC(PowerPCInlineAsmRegClass::freg) => "f", - PowerPC(PowerPCInlineAsmRegClass::cr) - | PowerPC(PowerPCInlineAsmRegClass::xer) - | PowerPC(PowerPCInlineAsmRegClass::vreg) => { + PowerPC(PowerPCInlineAsmRegClass::vreg) => "v", + PowerPC(PowerPCInlineAsmRegClass::cr) | PowerPC(PowerPCInlineAsmRegClass::xer) => { unreachable!("clobber-only") } RiscV(RiscVInlineAsmRegClass::reg) => "r", @@ -678,7 +686,8 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> S390x(S390xInlineAsmRegClass::reg) => "r", S390x(S390xInlineAsmRegClass::reg_addr) => "a", S390x(S390xInlineAsmRegClass::freg) => "f", - S390x(S390xInlineAsmRegClass::vreg | S390xInlineAsmRegClass::areg) => { + S390x(S390xInlineAsmRegClass::vreg) => "v", + S390x(S390xInlineAsmRegClass::areg) => { unreachable!("clobber-only") } Sparc(SparcInlineAsmRegClass::reg) => "r", @@ -804,6 +813,7 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &' | Arm(ArmInlineAsmRegClass::qreg_low8) | Arm(ArmInlineAsmRegClass::qreg_low4) => cx.type_vector(cx.type_i64(), 2), Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(), + Hexagon(HexagonInlineAsmRegClass::preg) => unreachable!("clobber-only"), LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(), LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(), Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(), @@ -814,9 +824,8 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &' PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(), PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(), PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(), - PowerPC(PowerPCInlineAsmRegClass::cr) - | PowerPC(PowerPCInlineAsmRegClass::xer) - | PowerPC(PowerPCInlineAsmRegClass::vreg) => { + PowerPC(PowerPCInlineAsmRegClass::vreg) => cx.type_vector(cx.type_i32(), 4), + PowerPC(PowerPCInlineAsmRegClass::cr) | PowerPC(PowerPCInlineAsmRegClass::xer) => { unreachable!("clobber-only") } RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(), @@ -844,7 +853,8 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &' Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(), S390x(S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr) => cx.type_i32(), S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(), - S390x(S390xInlineAsmRegClass::vreg | S390xInlineAsmRegClass::areg) => { + S390x(S390xInlineAsmRegClass::vreg) => cx.type_vector(cx.type_i64(), 2), + S390x(S390xInlineAsmRegClass::areg) => { unreachable!("clobber-only") } Sparc(SparcInlineAsmRegClass::reg) => cx.type_i32(), @@ -1030,6 +1040,26 @@ fn llvm_fixup_input<'ll, 'tcx>( let value = bx.or(value, bx.const_u32(0xFFFF_0000)); bx.bitcast(value, bx.type_f32()) } + (PowerPC(PowerPCInlineAsmRegClass::vreg), BackendRepr::Scalar(s)) + if s.primitive() == Primitive::Float(Float::F32) => + { + let value = bx.insert_element( + bx.const_undef(bx.type_vector(bx.type_f32(), 4)), + value, + bx.const_usize(0), + ); + bx.bitcast(value, bx.type_vector(bx.type_f32(), 4)) + } + (PowerPC(PowerPCInlineAsmRegClass::vreg), BackendRepr::Scalar(s)) + if s.primitive() == Primitive::Float(Float::F64) => + { + let value = bx.insert_element( + bx.const_undef(bx.type_vector(bx.type_f64(), 2)), + value, + bx.const_usize(0), + ); + bx.bitcast(value, bx.type_vector(bx.type_f64(), 2)) + } _ => value, } } @@ -1165,6 +1195,18 @@ fn llvm_fixup_output<'ll, 'tcx>( let value = bx.trunc(value, bx.type_i16()); bx.bitcast(value, bx.type_f16()) } + (PowerPC(PowerPCInlineAsmRegClass::vreg), BackendRepr::Scalar(s)) + if s.primitive() == Primitive::Float(Float::F32) => + { + let value = bx.bitcast(value, bx.type_vector(bx.type_f32(), 4)); + bx.extract_element(value, bx.const_usize(0)) + } + (PowerPC(PowerPCInlineAsmRegClass::vreg), BackendRepr::Scalar(s)) + if s.primitive() == Primitive::Float(Float::F64) => + { + let value = bx.bitcast(value, bx.type_vector(bx.type_f64(), 2)); + bx.extract_element(value, bx.const_usize(0)) + } _ => value, } } @@ -1289,6 +1331,16 @@ fn llvm_fixup_output_type<'ll, 'tcx>( { cx.type_f32() } + (PowerPC(PowerPCInlineAsmRegClass::vreg), BackendRepr::Scalar(s)) + if s.primitive() == Primitive::Float(Float::F32) => + { + cx.type_vector(cx.type_f32(), 4) + } + (PowerPC(PowerPCInlineAsmRegClass::vreg), BackendRepr::Scalar(s)) + if s.primitive() == Primitive::Float(Float::F64) => + { + cx.type_vector(cx.type_f64(), 2) + } _ => layout.llvm_type(cx), } } diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 48beb9be2b2..f6d2ec24da6 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -148,7 +148,7 @@ fn prepare_lto( // __llvm_profile_counter_bias is pulled in at link time by an undefined reference to // __llvm_profile_runtime, therefore we won't know until link time if this symbol // should have default visibility. - symbols_below_threshold.push(CString::new("__llvm_profile_counter_bias").unwrap()); + symbols_below_threshold.push(c"__llvm_profile_counter_bias".to_owned()); Ok((symbols_below_threshold, upstream_modules)) } diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index a65ae4df1e3..bde6668929c 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -61,6 +61,7 @@ fn write_output_file<'ll>( dwo_output: Option<&Path>, file_type: llvm::FileType, self_profiler_ref: &SelfProfilerRef, + verify_llvm_ir: bool, ) -> Result<(), FatalError> { debug!("write_output_file output={:?} dwo_output={:?}", output, dwo_output); unsafe { @@ -79,6 +80,7 @@ fn write_output_file<'ll>( output_c.as_ptr(), dwo_output_ptr, file_type, + verify_llvm_ir, ); // Record artifact sizes for self-profiling @@ -507,7 +509,7 @@ fn get_pgo_sample_use_path(config: &ModuleConfig) -> Option<CString> { } fn get_instr_profile_output_path(config: &ModuleConfig) -> Option<CString> { - config.instrument_coverage.then(|| CString::new("default_%m_%p.profraw").unwrap()) + config.instrument_coverage.then(|| c"default_%m_%p.profraw".to_owned()) } pub(crate) unsafe fn llvm_optimize( @@ -840,6 +842,7 @@ pub(crate) unsafe fn codegen( None, llvm::FileType::AssemblyFile, &cgcx.prof, + config.verify_llvm_ir, ) })?; } @@ -877,6 +880,7 @@ pub(crate) unsafe fn codegen( dwo_out, llvm::FileType::ObjectFile, &cgcx.prof, + config.verify_llvm_ir, ) })?; } @@ -955,24 +959,7 @@ pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> } } -/// Embed the bitcode of an LLVM module in the LLVM module itself. -/// -/// This is done primarily for iOS where it appears to be standard to compile C -/// code at least with `-fembed-bitcode` which creates two sections in the -/// executable: -/// -/// * __LLVM,__bitcode -/// * __LLVM,__cmdline -/// -/// It appears *both* of these sections are necessary to get the linker to -/// recognize what's going on. A suitable cmdline value is taken from the -/// target spec. -/// -/// Furthermore debug/O1 builds don't actually embed bitcode but rather just -/// embed an empty section. -/// -/// Basically all of this is us attempting to follow in the footsteps of clang -/// on iOS. See #35968 for lots more info. +/// Embed the bitcode of an LLVM module for LTO in the LLVM module itself. unsafe fn embed_bitcode( cgcx: &CodegenContext<LlvmCodegenBackend>, llcx: &llvm::Context, diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index 32793894794..f62310bd948 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -172,3 +172,12 @@ pub(crate) fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility { Visibility::Protected => llvm::Visibility::Protected, } } + +pub(crate) fn set_variable_sanitizer_attrs(llval: &Value, attrs: &CodegenFnAttrs) { + if attrs.no_sanitize.contains(SanitizerSet::ADDRESS) { + unsafe { llvm::LLVMRustSetNoSanitizeAddress(llval) }; + } + if attrs.no_sanitize.contains(SanitizerSet::HWADDRESS) { + unsafe { llvm::LLVMRustSetNoSanitizeHWAddress(llval) }; + } +} diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs index e0a2de3366c..ec77f32caf4 100644 --- a/compiler/rustc_codegen_llvm/src/callee.rs +++ b/compiler/rustc_codegen_llvm/src/callee.rs @@ -104,7 +104,10 @@ pub(crate) fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'t let is_hidden = if is_generic { // This is a monomorphization of a generic function. - if !cx.tcx.sess.opts.share_generics() { + if !(cx.tcx.sess.opts.share_generics() + || tcx.codegen_fn_attrs(instance_def_id).inline + == rustc_attr::InlineAttr::Never) + { // When not sharing generics, all instances are in the same // crate and have hidden visibility. true diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 6f5ffbb4b34..c7114480d8b 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -470,6 +470,8 @@ impl<'ll> CodegenCx<'ll, '_> { base::set_link_section(g, attrs); } + base::set_variable_sanitizer_attrs(g, attrs); + if attrs.flags.contains(CodegenFnAttrFlags::USED) { // `USED` and `USED_LINKER` can't be used together. assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER)); diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 841c110b3c8..8218126ea29 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -82,8 +82,8 @@ pub(crate) struct CodegenCx<'ll, 'tcx> { pub isize_ty: &'ll Type, - /// Extra codegen state needed when coverage instrumentation is enabled. - pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'ll, 'tcx>>, + /// Extra per-CGU codegen state needed when coverage instrumentation is enabled. + pub coverage_cx: Option<coverageinfo::CguCoverageContext<'ll, 'tcx>>, pub dbg_cx: Option<debuginfo::CodegenUnitDebugContext<'ll, 'tcx>>, eh_personality: Cell<Option<&'ll Value>>, @@ -525,7 +525,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod()); let coverage_cx = - tcx.sess.instrument_coverage().then(coverageinfo::CrateCoverageContext::new); + tcx.sess.instrument_coverage().then(coverageinfo::CguCoverageContext::new); let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None { let dctx = debuginfo::CodegenUnitDebugContext::new(llmod); @@ -576,7 +576,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// Extra state that is only available when coverage instrumentation is enabled. #[inline] #[track_caller] - pub(crate) fn coverage_cx(&self) -> &coverageinfo::CrateCoverageContext<'ll, 'tcx> { + pub(crate) fn coverage_cx(&self) -> &coverageinfo::CguCoverageContext<'ll, 'tcx> { self.coverage_cx.as_ref().expect("only called when coverage instrumentation is enabled") } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs index e3c0df27883..0752c718c70 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs @@ -1,12 +1,13 @@ use rustc_data_structures::captures::Captures; use rustc_data_structures::fx::FxIndexSet; use rustc_index::bit_set::BitSet; +use rustc_middle::mir::CoverageIdsInfo; use rustc_middle::mir::coverage::{ CounterId, CovTerm, Expression, ExpressionId, FunctionCoverageInfo, Mapping, MappingKind, Op, SourceRegion, }; use rustc_middle::ty::Instance; -use tracing::{debug, instrument}; +use tracing::debug; use crate::coverageinfo::ffi::{Counter, CounterExpression, ExprKind}; @@ -16,17 +17,8 @@ use crate::coverageinfo::ffi::{Counter, CounterExpression, ExprKind}; pub(crate) struct FunctionCoverageCollector<'tcx> { /// Coverage info that was attached to this function by the instrumentor. function_coverage_info: &'tcx FunctionCoverageInfo, + ids_info: &'tcx CoverageIdsInfo, is_used: bool, - - /// Tracks which counters have been seen, so that we can identify mappings - /// to counters that were optimized out, and set them to zero. - counters_seen: BitSet<CounterId>, - /// Contains all expression IDs that have been seen in an `ExpressionUsed` - /// coverage statement, plus all expression IDs that aren't directly used - /// by any mappings (and therefore do not have expression-used statements). - /// After MIR traversal is finished, we can conclude that any IDs missing - /// from this set must have had their statements deleted by MIR opts. - expressions_seen: BitSet<ExpressionId>, } impl<'tcx> FunctionCoverageCollector<'tcx> { @@ -34,21 +26,24 @@ impl<'tcx> FunctionCoverageCollector<'tcx> { pub(crate) fn new( instance: Instance<'tcx>, function_coverage_info: &'tcx FunctionCoverageInfo, + ids_info: &'tcx CoverageIdsInfo, ) -> Self { - Self::create(instance, function_coverage_info, true) + Self::create(instance, function_coverage_info, ids_info, true) } /// Creates a new set of coverage data for an unused (never called) function. pub(crate) fn unused( instance: Instance<'tcx>, function_coverage_info: &'tcx FunctionCoverageInfo, + ids_info: &'tcx CoverageIdsInfo, ) -> Self { - Self::create(instance, function_coverage_info, false) + Self::create(instance, function_coverage_info, ids_info, false) } fn create( instance: Instance<'tcx>, function_coverage_info: &'tcx FunctionCoverageInfo, + ids_info: &'tcx CoverageIdsInfo, is_used: bool, ) -> Self { let num_counters = function_coverage_info.num_counters; @@ -58,44 +53,7 @@ impl<'tcx> FunctionCoverageCollector<'tcx> { num_counters={num_counters}, num_expressions={num_expressions}, is_used={is_used}" ); - // Create a filled set of expression IDs, so that expressions not - // directly used by mappings will be treated as "seen". - // (If they end up being unused, LLVM will delete them for us.) - let mut expressions_seen = BitSet::new_filled(num_expressions); - // For each expression ID that is directly used by one or more mappings, - // mark it as not-yet-seen. This indicates that we expect to see a - // corresponding `ExpressionUsed` statement during MIR traversal. - for mapping in function_coverage_info.mappings.iter() { - // Currently we only worry about ordinary code mappings. - // For branch and MC/DC mappings, expressions might not correspond - // to any particular point in the control-flow graph. - // (Keep this in sync with the injection of `ExpressionUsed` - // statements in the `InstrumentCoverage` MIR pass.) - if let MappingKind::Code(term) = mapping.kind - && let CovTerm::Expression(id) = term - { - expressions_seen.remove(id); - } - } - - Self { - function_coverage_info, - is_used, - counters_seen: BitSet::new_empty(num_counters), - expressions_seen, - } - } - - /// Marks a counter ID as having been seen in a counter-increment statement. - #[instrument(level = "debug", skip(self))] - pub(crate) fn mark_counter_id_seen(&mut self, id: CounterId) { - self.counters_seen.insert(id); - } - - /// Marks an expression ID as having been seen in an expression-used statement. - #[instrument(level = "debug", skip(self))] - pub(crate) fn mark_expression_id_seen(&mut self, id: ExpressionId) { - self.expressions_seen.insert(id); + Self { function_coverage_info, ids_info, is_used } } /// Identify expressions that will always have a value of zero, and note @@ -117,7 +75,7 @@ impl<'tcx> FunctionCoverageCollector<'tcx> { // (By construction, expressions can only refer to other expressions // that have lower IDs, so one pass is sufficient.) for (id, expression) in self.function_coverage_info.expressions.iter_enumerated() { - if !self.expressions_seen.contains(id) { + if !self.is_used || !self.ids_info.expressions_seen.contains(id) { // If an expression was not seen, it must have been optimized away, // so any operand that refers to it can be replaced with zero. zero_expressions.insert(id); @@ -146,7 +104,7 @@ impl<'tcx> FunctionCoverageCollector<'tcx> { assert_operand_expression_is_lower(id); } - if is_zero_term(&self.counters_seen, &zero_expressions, *operand) { + if is_zero_term(&self.ids_info.counters_seen, &zero_expressions, *operand) { *operand = CovTerm::Zero; } }; @@ -172,17 +130,17 @@ impl<'tcx> FunctionCoverageCollector<'tcx> { pub(crate) fn into_finished(self) -> FunctionCoverage<'tcx> { let zero_expressions = self.identify_zero_expressions(); - let FunctionCoverageCollector { function_coverage_info, is_used, counters_seen, .. } = self; + let FunctionCoverageCollector { function_coverage_info, ids_info, is_used, .. } = self; - FunctionCoverage { function_coverage_info, is_used, counters_seen, zero_expressions } + FunctionCoverage { function_coverage_info, ids_info, is_used, zero_expressions } } } pub(crate) struct FunctionCoverage<'tcx> { pub(crate) function_coverage_info: &'tcx FunctionCoverageInfo, + ids_info: &'tcx CoverageIdsInfo, is_used: bool, - counters_seen: BitSet<CounterId>, zero_expressions: ZeroExpressions, } @@ -238,7 +196,7 @@ impl<'tcx> FunctionCoverage<'tcx> { } fn is_zero_term(&self, term: CovTerm) -> bool { - is_zero_term(&self.counters_seen, &self.zero_expressions, term) + !self.is_used || is_zero_term(&self.ids_info.counters_seen, &self.zero_expressions, term) } } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index b582dd967a7..8c24579fa7c 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -535,9 +535,12 @@ fn add_unused_function_coverage<'tcx>( }), ); - // An unused function's mappings will automatically be rewritten to map to - // zero, because none of its counters/expressions are marked as seen. - let function_coverage = FunctionCoverageCollector::unused(instance, function_coverage_info); + // An unused function's mappings will all be rewritten to map to zero. + let function_coverage = FunctionCoverageCollector::unused( + instance, + function_coverage_info, + tcx.coverage_ids_info(instance.def), + ); cx.coverage_cx().function_coverage_map.borrow_mut().insert(instance, function_coverage); } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index bf773cd2667..c2fcb33f98b 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -21,8 +21,8 @@ mod llvm_cov; pub(crate) mod map_data; mod mapgen; -/// A context object for maintaining all state needed by the coverageinfo module. -pub(crate) struct CrateCoverageContext<'ll, 'tcx> { +/// Extra per-CGU context/state needed for coverage instrumentation. +pub(crate) struct CguCoverageContext<'ll, 'tcx> { /// Coverage data for each instrumented function identified by DefId. pub(crate) function_coverage_map: RefCell<FxIndexMap<Instance<'tcx>, FunctionCoverageCollector<'tcx>>>, @@ -32,7 +32,7 @@ pub(crate) struct CrateCoverageContext<'ll, 'tcx> { covfun_section_name: OnceCell<CString>, } -impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> { +impl<'ll, 'tcx> CguCoverageContext<'ll, 'tcx> { pub(crate) fn new() -> Self { Self { function_coverage_map: Default::default(), @@ -143,6 +143,13 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { let bx = self; + // Due to LocalCopy instantiation or MIR inlining, coverage statements + // can end up in a crate that isn't doing coverage instrumentation. + // When that happens, we currently just discard those statements, so + // the corresponding code will be undercounted. + // FIXME(Zalathar): Find a better solution for mixed-coverage builds. + let Some(coverage_cx) = &bx.cx.coverage_cx else { return }; + let Some(function_coverage_info) = bx.tcx.instance_mir(instance.def).function_coverage_info.as_deref() else { @@ -150,32 +157,28 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { return; }; - // FIXME(#132395): Unwrapping `coverage_cx` here has led to ICEs in the - // wild, so keep this early-return until we understand why. - let mut coverage_map = match bx.coverage_cx { - Some(ref cx) => cx.function_coverage_map.borrow_mut(), - None => return, - }; - let func_coverage = coverage_map - .entry(instance) - .or_insert_with(|| FunctionCoverageCollector::new(instance, function_coverage_info)); + // Mark the instance as used in this CGU, for coverage purposes. + // This includes functions that were not partitioned into this CGU, + // but were MIR-inlined into one of this CGU's functions. + coverage_cx.function_coverage_map.borrow_mut().entry(instance).or_insert_with(|| { + FunctionCoverageCollector::new( + instance, + function_coverage_info, + bx.tcx.coverage_ids_info(instance.def), + ) + }); match *kind { CoverageKind::SpanMarker | CoverageKind::BlockMarker { .. } => unreachable!( "marker statement {kind:?} should have been removed by CleanupPostBorrowck" ), CoverageKind::CounterIncrement { id } => { - func_coverage.mark_counter_id_seen(id); - // We need to explicitly drop the `RefMut` before calling into - // `instrprof_increment`, as that needs an exclusive borrow. - drop(coverage_map); - // The number of counters passed to `llvm.instrprof.increment` might // be smaller than the number originally inserted by the instrumentor, // if some high-numbered counters were removed by MIR optimizations. // If so, LLVM's profiler runtime will use fewer physical counters. let num_counters = - bx.tcx().coverage_ids_info(instance.def).max_counter_id.as_u32() + 1; + bx.tcx().coverage_ids_info(instance.def).num_counters_after_mir_opts(); assert!( num_counters as usize <= function_coverage_info.num_counters, "num_counters disagreement: query says {num_counters} but function info only has {}", @@ -192,23 +195,23 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { ); bx.instrprof_increment(fn_name, hash, num_counters, index); } - CoverageKind::ExpressionUsed { id } => { - func_coverage.mark_expression_id_seen(id); + CoverageKind::ExpressionUsed { id: _ } => { + // Expression-used statements are markers that are handled by + // `coverage_ids_info`, so there's nothing to codegen here. } CoverageKind::CondBitmapUpdate { index, decision_depth } => { - drop(coverage_map); - let cond_bitmap = bx - .coverage_cx() + let cond_bitmap = coverage_cx .try_get_mcdc_condition_bitmap(&instance, decision_depth) .expect("mcdc cond bitmap should have been allocated for updating"); let cond_index = bx.const_i32(index as i32); bx.mcdc_condbitmap_update(cond_index, cond_bitmap); } CoverageKind::TestVectorBitmapUpdate { bitmap_idx, decision_depth } => { - drop(coverage_map); - let cond_bitmap = bx.coverage_cx() - .try_get_mcdc_condition_bitmap(&instance, decision_depth) - .expect("mcdc cond bitmap should have been allocated for merging into the global bitmap"); + let cond_bitmap = + coverage_cx.try_get_mcdc_condition_bitmap(&instance, decision_depth).expect( + "mcdc cond bitmap should have been allocated for merging \ + into the global bitmap", + ); assert!( bitmap_idx as usize <= function_coverage_info.mcdc_bitmap_bits, "bitmap index of the decision out of range" diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 7f59264824e..b1ace0033ba 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -2240,6 +2240,7 @@ unsafe extern "C" { Output: *const c_char, DwoOutput: *const c_char, FileType: FileType, + VerifyIR: bool, ) -> LLVMRustResult; pub fn LLVMRustOptimize<'a>( M: &'a Module, @@ -2460,4 +2461,7 @@ unsafe extern "C" { pub fn LLVMRustIs64BitSymbolicFile(buf_ptr: *const u8, buf_len: usize) -> bool; pub fn LLVMRustIsECObject(buf_ptr: *const u8, buf_len: usize) -> bool; + + pub fn LLVMRustSetNoSanitizeAddress(Global: &Value); + pub fn LLVMRustSetNoSanitizeHWAddress(Global: &Value); } |
