diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/lto.rs | 15 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/builder.rs | 14 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs | 93 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs | 45 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/errors.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/intrinsic.rs | 57 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/type_.rs | 2 |
7 files changed, 150 insertions, 80 deletions
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index ba263296bb4..cb5acf79135 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -2,7 +2,7 @@ use crate::back::write::{ self, bitcode_section_name, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers, }; use crate::errors::{ - DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, + DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro, }; use crate::llvm::{self, build_string}; use crate::{LlvmCodegenBackend, ModuleLlvm}; @@ -36,8 +36,12 @@ pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin"; pub fn crate_type_allows_lto(crate_type: CrateType) -> bool { match crate_type { - CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true, - CrateType::Rlib | CrateType::ProcMacro => false, + CrateType::Executable + | CrateType::Dylib + | CrateType::Staticlib + | CrateType::Cdylib + | CrateType::ProcMacro => true, + CrateType::Rlib => false, } } @@ -87,6 +91,11 @@ fn prepare_lto( diag_handler.emit_err(LtoDylib); return Err(FatalError); } + } else if *crate_type == CrateType::ProcMacro { + if !cgcx.opts.unstable_opts.dylib_lto { + diag_handler.emit_err(LtoProcMacro); + return Err(FatalError); + } } } diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index ac6d8f84142..4f9b86ec20a 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -3,6 +3,7 @@ use crate::attributes; use crate::common::Funclet; use crate::context::CodegenCx; use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, True}; +use crate::llvm_util; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; @@ -1225,9 +1226,16 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) } } - fn do_not_inline(&mut self, llret: &'ll Value) { - let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx); - attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]); + fn apply_attrs_to_cleanup_callsite(&mut self, llret: &'ll Value) { + if llvm_util::get_version() < (17, 0, 2) { + // Work around https://github.com/llvm/llvm-project/issues/66984. + let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx); + attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]); + } else { + // Cleanup is always the cold path. + let cold_inline = llvm::AttributeKind::Cold.create_attr(self.llcx); + attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[cold_inline]); + } } } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs index e83110dcad4..55f43aa5341 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs @@ -11,7 +11,7 @@ pub struct Expression { lhs: Operand, op: Op, rhs: Operand, - region: Option<CodeRegion>, + code_regions: Vec<CodeRegion>, } /// Collects all of the coverage regions associated with (a) injected counters, (b) counter @@ -30,7 +30,7 @@ pub struct FunctionCoverage<'tcx> { instance: Instance<'tcx>, source_hash: u64, is_used: bool, - counters: IndexVec<CounterId, Option<CodeRegion>>, + counters: IndexVec<CounterId, Option<Vec<CodeRegion>>>, expressions: IndexVec<ExpressionId, Option<Expression>>, unreachable_regions: Vec<CodeRegion>, } @@ -77,28 +77,40 @@ impl<'tcx> FunctionCoverage<'tcx> { } } - /// Adds a code region to be counted by an injected counter intrinsic. - pub fn add_counter(&mut self, id: CounterId, region: CodeRegion) { - if let Some(previous_region) = self.counters[id].replace(region.clone()) { - assert_eq!(previous_region, region, "add_counter: code region for id changed"); + /// Adds code regions to be counted by an injected counter intrinsic. + #[instrument(level = "debug", skip(self))] + pub(crate) fn add_counter(&mut self, id: CounterId, code_regions: &[CodeRegion]) { + if code_regions.is_empty() { + return; + } + + let slot = &mut self.counters[id]; + match slot { + None => *slot = Some(code_regions.to_owned()), + // If this counter ID slot has already been filled, it should + // contain identical information. + Some(ref previous_regions) => assert_eq!( + previous_regions, code_regions, + "add_counter: code regions for id changed" + ), } } + /// Adds information about a coverage expression, along with zero or more + /// code regions mapped to that expression. + /// /// Both counters and "counter expressions" (or simply, "expressions") can be operands in other /// expressions. These are tracked as separate variants of `Operand`, so there is no ambiguity /// between operands that are counter IDs and operands that are expression IDs. - pub fn add_counter_expression( + #[instrument(level = "debug", skip(self))] + pub(crate) fn add_counter_expression( &mut self, expression_id: ExpressionId, lhs: Operand, op: Op, rhs: Operand, - region: Option<CodeRegion>, + code_regions: &[CodeRegion], ) { - debug!( - "add_counter_expression({:?}, lhs={:?}, op={:?}, rhs={:?} at {:?}", - expression_id, lhs, op, rhs, region - ); debug_assert!( expression_id.as_usize() < self.expressions.len(), "expression_id {} is out of range for expressions.len() = {} @@ -107,23 +119,25 @@ impl<'tcx> FunctionCoverage<'tcx> { self.expressions.len(), self, ); - if let Some(previous_expression) = self.expressions[expression_id].replace(Expression { - lhs, - op, - rhs, - region: region.clone(), - }) { - assert_eq!( - previous_expression, - Expression { lhs, op, rhs, region }, + + let expression = Expression { lhs, op, rhs, code_regions: code_regions.to_owned() }; + let slot = &mut self.expressions[expression_id]; + match slot { + None => *slot = Some(expression), + // If this expression ID slot has already been filled, it should + // contain identical information. + Some(ref previous_expression) => assert_eq!( + previous_expression, &expression, "add_counter_expression: expression for id changed" - ); + ), } } - /// Add a region that will be marked as "unreachable", with a constant "zero counter". - pub fn add_unreachable_region(&mut self, region: CodeRegion) { - self.unreachable_regions.push(region) + /// Adds regions that will be marked as "unreachable", with a constant "zero counter". + #[instrument(level = "debug", skip(self))] + pub(crate) fn add_unreachable_regions(&mut self, code_regions: &[CodeRegion]) { + assert!(!code_regions.is_empty(), "unreachable regions always have code regions"); + self.unreachable_regions.extend_from_slice(code_regions); } /// Perform some simplifications to make the final coverage mappings @@ -212,11 +226,16 @@ impl<'tcx> FunctionCoverage<'tcx> { } fn counter_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> { - self.counters.iter_enumerated().filter_map(|(index, entry)| { - // Option::map() will return None to filter out missing counters. This may happen - // if, for example, a MIR-instrumented counter is removed during an optimization. - entry.as_ref().map(|region| (Counter::counter_value_reference(index), region)) - }) + self.counters + .iter_enumerated() + // Filter out counter IDs that we never saw during MIR traversal. + // This can happen if a counter was optimized out by MIR transforms + // (and replaced with `CoverageKind::Unreachable` instead). + .filter_map(|(id, maybe_code_regions)| Some((id, maybe_code_regions.as_ref()?))) + .flat_map(|(id, code_regions)| { + let counter = Counter::counter_value_reference(id); + code_regions.iter().map(move |region| (counter, region)) + }) } /// Convert this function's coverage expression data into a form that can be @@ -254,13 +273,17 @@ impl<'tcx> FunctionCoverage<'tcx> { fn expression_regions(&self) -> Vec<(Counter, &CodeRegion)> { // Find all of the expression IDs that weren't optimized out AND have - // an attached code region, and return the corresponding mapping as a - // counter/region pair. + // one or more attached code regions, and return the corresponding + // mappings as counter/region pairs. self.expressions .iter_enumerated() - .filter_map(|(id, expression)| { - let code_region = expression.as_ref()?.region.as_ref()?; - Some((Counter::expression(id), code_region)) + .filter_map(|(id, maybe_expression)| { + let code_regions = &maybe_expression.as_ref()?.code_regions; + Some((id, code_regions)) + }) + .flat_map(|(id, code_regions)| { + let counter = Counter::expression(id); + code_regions.iter().map(move |code_region| (counter, code_region)) }) .collect::<Vec<_>>() } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index c70cb670e96..dd2ce9b525b 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -89,9 +89,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// `function_coverage_map` (keyed by function `Instance`) during codegen. /// But in this case, since the unused function was _not_ previously /// codegenned, collect the coverage `CodeRegion`s from the MIR and add - /// them. The first `CodeRegion` is used to add a single counter, with the - /// same counter ID used in the injected `instrprof.increment` intrinsic - /// call. Since the function is never called, all other `CodeRegion`s can be + /// them. Since the function is never called, all of its `CodeRegion`s can be /// added as `unreachable_region`s. fn define_unused_fn(&self, def_id: DefId) { let instance = declare_unused_fn(self, def_id); @@ -110,25 +108,15 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { .entry(instance) .or_insert_with(|| FunctionCoverage::new(bx.tcx(), instance)); - let Coverage { kind, code_region } = coverage.clone(); - match kind { + let Coverage { kind, code_regions } = coverage; + match *kind { CoverageKind::Counter { function_source_hash, id } => { debug!( "ensuring function source hash is set for instance={:?}; function_source_hash={}", instance, function_source_hash, ); func_coverage.set_function_source_hash(function_source_hash); - - if let Some(code_region) = code_region { - // Note: Some counters do not have code regions, but may still be referenced - // from expressions. In that case, don't add the counter to the coverage map, - // but do inject the counter intrinsic. - debug!( - "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}", - instance, id, code_region, - ); - func_coverage.add_counter(id, code_region); - } + func_coverage.add_counter(id, code_regions); // We need to explicitly drop the `RefMut` before calling into `instrprof_increment`, // as that needs an exclusive borrow. drop(coverage_map); @@ -146,20 +134,10 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { bx.instrprof_increment(fn_name, hash, num_counters, index); } CoverageKind::Expression { id, lhs, op, rhs } => { - debug!( - "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; region: {:?}", - instance, id, lhs, op, rhs, code_region, - ); - func_coverage.add_counter_expression(id, lhs, op, rhs, code_region); + func_coverage.add_counter_expression(id, lhs, op, rhs, code_regions); } CoverageKind::Unreachable => { - let code_region = - code_region.expect("unreachable regions always have code regions"); - debug!( - "adding unreachable code to coverage_map: instance={:?}, at {:?}", - instance, code_region, - ); - func_coverage.add_unreachable_region(code_region); + func_coverage.add_unreachable_regions(code_regions); } } } @@ -227,14 +205,9 @@ fn add_unused_function_coverage<'tcx>( let tcx = cx.tcx; let mut function_coverage = FunctionCoverage::unused(tcx, instance); - for (index, &code_region) in tcx.covered_code_regions(def_id).iter().enumerate() { - if index == 0 { - // Insert at least one real counter so the LLVM CoverageMappingReader will find expected - // definitions. - function_coverage.add_counter(UNUSED_FUNCTION_COUNTER_ID, code_region.clone()); - } else { - function_coverage.add_unreachable_region(code_region.clone()); - } + for &code_region in tcx.covered_code_regions(def_id) { + let code_region = std::slice::from_ref(code_region); + function_coverage.add_unreachable_regions(code_region); } if let Some(coverage_context) = cx.coverage_context() { diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs index 264c273ba30..665d195790c 100644 --- a/compiler/rustc_codegen_llvm/src/errors.rs +++ b/compiler/rustc_codegen_llvm/src/errors.rs @@ -139,6 +139,10 @@ pub(crate) struct LtoDisallowed; pub(crate) struct LtoDylib; #[derive(Diagnostic)] +#[diag(codegen_llvm_lto_proc_macro)] +pub(crate) struct LtoProcMacro; + +#[derive(Diagnostic)] #[diag(codegen_llvm_lto_bitcode_from_rlib)] pub(crate) struct LtoBitcodeFromRlib { pub llvm_err: String, diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 9289c37d763..a97b803fc64 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -15,7 +15,7 @@ use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; use rustc_hir as hir; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf}; -use rustc_middle::ty::{self, Ty}; +use rustc_middle::ty::{self, GenericArgsRef, Ty}; use rustc_middle::{bug, span_bug}; use rustc_span::{sym, symbol::kw, Span, Symbol}; use rustc_target::abi::{self, Align, HasDataLayout, Primitive}; @@ -376,7 +376,9 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { } _ if name.as_str().starts_with("simd_") => { - match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { + match generic_simd_intrinsic( + self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span, + ) { Ok(llval) => llval, Err(()) => return, } @@ -911,6 +913,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, + fn_args: GenericArgsRef<'tcx>, args: &[OperandRef<'tcx, &'ll Value>], ret_ty: Ty<'tcx>, llret_ty: &'ll Type, @@ -1030,6 +1033,56 @@ fn generic_simd_intrinsic<'ll, 'tcx>( )); } + if name == sym::simd_shuffle_generic { + let idx = fn_args[2] + .expect_const() + .eval(tcx, ty::ParamEnv::reveal_all(), Some(span)) + .unwrap() + .unwrap_branch(); + let n = idx.len() as u64; + + require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); + let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); + require!( + out_len == n, + InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len } + ); + require!( + in_elem == out_ty, + InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty } + ); + + let total_len = in_len * 2; + + let indices: Option<Vec<_>> = idx + .iter() + .enumerate() + .map(|(arg_idx, val)| { + let idx = val.unwrap_leaf().try_to_i32().unwrap(); + if idx >= i32::try_from(total_len).unwrap() { + bx.sess().emit_err(InvalidMonomorphization::ShuffleIndexOutOfBounds { + span, + name, + arg_idx: arg_idx as u64, + total_len: total_len.into(), + }); + None + } else { + Some(bx.const_i32(idx)) + } + }) + .collect(); + let Some(indices) = indices else { + return Ok(bx.const_null(llret_ty)); + }; + + return Ok(bx.shuffle_vector( + args[0].immediate(), + args[1].immediate(), + bx.const_vector(&indices), + )); + } + if name == sym::simd_shuffle { // Make sure this is actually an array, since typeck only checks the length-suffixed // version of this intrinsic. diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index 8db6195d931..06b7703672f 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -112,7 +112,7 @@ impl<'ll> CodegenCx<'ll, '_> { } } - /// Return a LLVM type that has at most the required alignment, + /// Return an LLVM type that has at most the required alignment, /// and exactly the required size, as a best-effort padding array. pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type { let unit = Integer::approximate_align(self, align); |
