diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/abi.rs | 47 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/lto.rs | 29 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/write.rs | 41 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/base.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/builder/autodiff.rs | 72 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs | 55 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs | 23 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs | 11 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/debuginfo/mod.rs | 27 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/intrinsic.rs | 471 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/lib.rs | 3 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 30 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/llvm_util.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/mono_item.rs | 5 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/type_.rs | 8 |
16 files changed, 409 insertions, 419 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 31ee0eeca11..13846c8ab4b 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -4,8 +4,7 @@ use std::cmp; use libc::c_uint; use rustc_abi as abi; pub(crate) use rustc_abi::ExternAbi; -use rustc_abi::Primitive::Int; -use rustc_abi::{HasDataLayout, Size}; +use rustc_abi::{HasDataLayout, Primitive, Reg, RegKind, Size}; use rustc_codegen_ssa::MemFlags; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue}; @@ -440,7 +439,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { let apply_range_attr = |idx: AttributePlace, scalar: rustc_abi::Scalar| { if cx.sess().opts.optimize != config::OptLevel::No && llvm_util::get_version() >= (19, 0, 0) - && matches!(scalar.primitive(), Int(..)) + && matches!(scalar.primitive(), Primitive::Int(..)) // If the value is a boolean, the range is 0..2 and that ultimately // become 0..0 when the type becomes i1, which would be rejected // by the LLVM verifier. @@ -448,11 +447,11 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { // LLVM also rejects full range. && !scalar.is_always_valid(cx) { - attributes::apply_to_llfn(llfn, idx, &[llvm::CreateRangeAttr( - cx.llcx, - scalar.size(cx), - scalar.valid_range(cx), - )]); + attributes::apply_to_llfn( + llfn, + idx, + &[llvm::CreateRangeAttr(cx.llcx, scalar.size(cx), scalar.valid_range(cx))], + ); } }; @@ -472,10 +471,14 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { ); attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]); if cx.sess().opts.optimize != config::OptLevel::No { - attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[ - llvm::AttributeKind::Writable.create_attr(cx.llcx), - llvm::AttributeKind::DeadOnUnwind.create_attr(cx.llcx), - ]); + attributes::apply_to_llfn( + llfn, + llvm::AttributePlace::Argument(i), + &[ + llvm::AttributeKind::Writable.create_attr(cx.llcx), + llvm::AttributeKind::DeadOnUnwind.create_attr(cx.llcx), + ], + ); } } PassMode::Cast { cast, pad_i32: _ } => { @@ -574,7 +577,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { if bx.cx.sess().opts.optimize != config::OptLevel::No && llvm_util::get_version() < (19, 0, 0) && let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr - && matches!(scalar.primitive(), Int(..)) + && matches!(scalar.primitive(), Primitive::Int(..)) // If the value is a boolean, the range is 0..2 and that ultimately // become 0..0 when the type becomes i1, which would be rejected // by the LLVM verifier. @@ -593,9 +596,11 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { bx.cx.llcx, bx.cx.type_array(bx.cx.type_i8(), arg.layout.size.bytes()), ); - attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[ - byval, - ]); + attributes::apply_to_callsite( + callsite, + llvm::AttributePlace::Argument(i), + &[byval], + ); } PassMode::Direct(attrs) | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => { @@ -627,9 +632,11 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { // This will probably get ignored on all targets but those supporting the TrustZone-M // extension (thumbv8m targets). let cmse_nonsecure_call = llvm::CreateAttrString(bx.cx.llcx, "cmse_nonsecure_call"); - attributes::apply_to_callsite(callsite, llvm::AttributePlace::Function, &[ - cmse_nonsecure_call, - ]); + attributes::apply_to_callsite( + callsite, + llvm::AttributePlace::Function, + &[cmse_nonsecure_call], + ); } // Some intrinsics require that an elementtype attribute (with the pointee type of a @@ -657,7 +664,7 @@ impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { } impl llvm::CallConv { - pub fn from_conv(conv: Conv, arch: &str) -> Self { + pub(crate) fn from_conv(conv: Conv, arch: &str) -> Self { match conv { Conv::C | Conv::Rust diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 78c759bbe8c..7262fce4911 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -606,10 +606,31 @@ pub(crate) fn run_pass_manager( // If this rustc version was build with enzyme/autodiff enabled, and if users applied the // `#[autodiff]` macro at least once, then we will later call llvm_optimize a second time. - let first_run = true; debug!("running llvm pm opt pipeline"); unsafe { - write::llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage, first_run)?; + write::llvm_optimize( + cgcx, + dcx, + module, + config, + opt_level, + opt_stage, + write::AutodiffStage::DuringAD, + )?; + } + // FIXME(ZuseZ4): Make this more granular + if cfg!(llvm_enzyme) && !thin { + unsafe { + write::llvm_optimize( + cgcx, + dcx, + module, + config, + opt_level, + llvm::OptStage::FatLTO, + write::AutodiffStage::PostAD, + )?; + } } debug!("lto done"); Ok(()) @@ -621,7 +642,7 @@ unsafe impl Send for ModuleBuffer {} unsafe impl Sync for ModuleBuffer {} impl ModuleBuffer { - pub fn new(m: &llvm::Module) -> ModuleBuffer { + pub(crate) fn new(m: &llvm::Module) -> ModuleBuffer { ModuleBuffer(unsafe { llvm::LLVMRustModuleBufferCreate(m) }) } } @@ -663,7 +684,7 @@ unsafe impl Send for ThinBuffer {} unsafe impl Sync for ThinBuffer {} impl ThinBuffer { - pub fn new(m: &llvm::Module, is_thin: bool, emit_summary: bool) -> ThinBuffer { + pub(crate) fn new(m: &llvm::Module, is_thin: bool, emit_summary: bool) -> ThinBuffer { unsafe { let buffer = llvm::LLVMRustThinLTOBufferCreate(m, is_thin, emit_summary); ThinBuffer(buffer) diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs index 4cbd49aa44d..f075f332462 100644 --- a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs +++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs @@ -17,7 +17,7 @@ pub struct OwnedTargetMachine { } impl OwnedTargetMachine { - pub fn new( + pub(crate) fn new( triple: &CStr, cpu: &CStr, features: &CStr, diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 4706744f353..ae4c4d5876e 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -530,6 +530,16 @@ fn get_instr_profile_output_path(config: &ModuleConfig) -> Option<CString> { config.instrument_coverage.then(|| c"default_%m_%p.profraw".to_owned()) } +// PreAD will run llvm opts but disable size increasing opts (vectorization, loop unrolling) +// DuringAD is the same as above, but also runs the enzyme opt and autodiff passes. +// PostAD will run all opts, including size increasing opts. +#[derive(Debug, Eq, PartialEq)] +pub(crate) enum AutodiffStage { + PreAD, + DuringAD, + PostAD, +} + pub(crate) unsafe fn llvm_optimize( cgcx: &CodegenContext<LlvmCodegenBackend>, dcx: DiagCtxtHandle<'_>, @@ -537,7 +547,7 @@ pub(crate) unsafe fn llvm_optimize( config: &ModuleConfig, opt_level: config::OptLevel, opt_stage: llvm::OptStage, - skip_size_increasing_opts: bool, + autodiff_stage: AutodiffStage, ) -> Result<(), FatalError> { // Enzyme: // The whole point of compiler based AD is to differentiate optimized IR instead of unoptimized @@ -550,12 +560,16 @@ pub(crate) unsafe fn llvm_optimize( let unroll_loops; let vectorize_slp; let vectorize_loop; + let run_enzyme = cfg!(llvm_enzyme) && autodiff_stage == AutodiffStage::DuringAD; // When we build rustc with enzyme/autodiff support, we want to postpone size-increasing - // optimizations until after differentiation. FIXME(ZuseZ4): Before shipping on nightly, + // optimizations until after differentiation. Our pipeline is thus: (opt + enzyme), (full opt). + // We therefore have two calls to llvm_optimize, if autodiff is used. + // + // FIXME(ZuseZ4): Before shipping on nightly, // we should make this more granular, or at least check that the user has at least one autodiff // call in their code, to justify altering the compilation pipeline. - if skip_size_increasing_opts && cfg!(llvm_enzyme) { + if cfg!(llvm_enzyme) && autodiff_stage != AutodiffStage::PostAD { unroll_loops = false; vectorize_slp = false; vectorize_loop = false; @@ -565,7 +579,7 @@ pub(crate) unsafe fn llvm_optimize( vectorize_slp = config.vectorize_slp; vectorize_loop = config.vectorize_loop; } - trace!(?unroll_loops, ?vectorize_slp, ?vectorize_loop); + trace!(?unroll_loops, ?vectorize_slp, ?vectorize_loop, ?run_enzyme); let using_thin_buffers = opt_stage == llvm::OptStage::PreLinkThinLTO || config.bitcode_needed(); let pgo_gen_path = get_pgo_gen_path(config); let pgo_use_path = get_pgo_use_path(config); @@ -633,6 +647,7 @@ pub(crate) unsafe fn llvm_optimize( vectorize_loop, config.no_builtins, config.emit_lifetime_markers, + run_enzyme, sanitizer_options.as_ref(), pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()), pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()), @@ -684,18 +699,14 @@ pub(crate) unsafe fn optimize( _ => llvm::OptStage::PreLinkNoLTO, }; - // If we know that we will later run AD, then we disable vectorization and loop unrolling - let skip_size_increasing_opts = cfg!(llvm_enzyme); + // If we know that we will later run AD, then we disable vectorization and loop unrolling. + // Otherwise we pretend AD is already done and run the normal opt pipeline (=PostAD). + // FIXME(ZuseZ4): Make this more granular, only set PreAD if we actually have autodiff + // usages, not just if we build rustc with autodiff support. + let autodiff_stage = + if cfg!(llvm_enzyme) { AutodiffStage::PreAD } else { AutodiffStage::PostAD }; return unsafe { - llvm_optimize( - cgcx, - dcx, - module, - config, - opt_level, - opt_stage, - skip_size_increasing_opts, - ) + llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage, autodiff_stage) }; } Ok(()) diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index d05faf5577b..d35c7945bae 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -157,9 +157,7 @@ pub(crate) fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage { Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage, Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage, Linkage::WeakODR => llvm::Linkage::WeakODRLinkage, - Linkage::Appending => llvm::Linkage::AppendingLinkage, Linkage::Internal => llvm::Linkage::InternalLinkage, - Linkage::Private => llvm::Linkage::PrivateLinkage, Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage, Linkage::Common => llvm::Linkage::CommonLinkage, } diff --git a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs index 9e8e4e1c567..b2c1088e3fc 100644 --- a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs +++ b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs @@ -4,10 +4,9 @@ use rustc_ast::expand::autodiff_attrs::{AutoDiffAttrs, AutoDiffItem, DiffActivit use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_ssa::back::write::ModuleConfig; use rustc_errors::FatalError; -use rustc_session::config::Lto; use tracing::{debug, trace}; -use crate::back::write::{llvm_err, llvm_optimize}; +use crate::back::write::llvm_err; use crate::builder::SBuilder; use crate::context::SimpleCx; use crate::declare::declare_simple_fn; @@ -53,8 +52,6 @@ fn generate_enzyme_call<'ll>( let mut ad_name: String = match attrs.mode { DiffMode::Forward => "__enzyme_fwddiff", DiffMode::Reverse => "__enzyme_autodiff", - DiffMode::ForwardFirst => "__enzyme_fwddiff", - DiffMode::ReverseFirst => "__enzyme_autodiff", _ => panic!("logic bug in autodiff, unrecognized mode"), } .to_string(); @@ -153,7 +150,7 @@ fn generate_enzyme_call<'ll>( _ => {} } - trace!("matching autodiff arguments"); + debug!("matching autodiff arguments"); // We now handle the issue that Rust level arguments not always match the llvm-ir level // arguments. A slice, `&[f32]`, for example, is represented as a pointer and a length on // llvm-ir level. The number of activities matches the number of Rust level arguments, so we @@ -164,10 +161,10 @@ fn generate_enzyme_call<'ll>( let mut activity_pos = 0; let outer_args: Vec<&llvm::Value> = get_params(outer_fn); while activity_pos < inputs.len() { - let activity = inputs[activity_pos as usize]; + let diff_activity = inputs[activity_pos as usize]; // Duplicated arguments received a shadow argument, into which enzyme will write the // gradient. - let (activity, duplicated): (&Metadata, bool) = match activity { + let (activity, duplicated): (&Metadata, bool) = match diff_activity { DiffActivity::None => panic!("not a valid input activity"), DiffActivity::Const => (enzyme_const, false), DiffActivity::Active => (enzyme_out, false), @@ -222,7 +219,15 @@ fn generate_enzyme_call<'ll>( // A duplicated pointer will have the following two outer_fn arguments: // (..., ptr, ptr, ...). We add the following llvm-ir to our __enzyme call: // (..., metadata! enzyme_dup, ptr, ptr, ...). - assert!(llvm::LLVMRustGetTypeKind(next_outer_ty) == llvm::TypeKind::Pointer); + if matches!( + diff_activity, + DiffActivity::Duplicated | DiffActivity::DuplicatedOnly + ) { + assert!( + llvm::LLVMRustGetTypeKind(next_outer_ty) == llvm::TypeKind::Pointer + ); + } + // In the case of Dual we don't have assumptions, e.g. f32 would be valid. args.push(next_outer_arg); outer_pos += 2; activity_pos += 1; @@ -277,7 +282,7 @@ pub(crate) fn differentiate<'ll>( module: &'ll ModuleCodegen<ModuleLlvm>, cgcx: &CodegenContext<LlvmCodegenBackend>, diff_items: Vec<AutoDiffItem>, - config: &ModuleConfig, + _config: &ModuleConfig, ) -> Result<(), FatalError> { for item in &diff_items { trace!("{}", item); @@ -291,20 +296,26 @@ pub(crate) fn differentiate<'ll>( let name = item.source.clone(); let fn_def: Option<&llvm::Value> = cx.get_function(&name); let Some(fn_def) = fn_def else { - return Err(llvm_err(diag_handler.handle(), LlvmError::PrepareAutoDiff { - src: item.source.clone(), - target: item.target.clone(), - error: "could not find source function".to_owned(), - })); + return Err(llvm_err( + diag_handler.handle(), + LlvmError::PrepareAutoDiff { + src: item.source.clone(), + target: item.target.clone(), + error: "could not find source function".to_owned(), + }, + )); }; debug!(?item.target); let fn_target: Option<&llvm::Value> = cx.get_function(&item.target); let Some(fn_target) = fn_target else { - return Err(llvm_err(diag_handler.handle(), LlvmError::PrepareAutoDiff { - src: item.source.clone(), - target: item.target.clone(), - error: "could not find target function".to_owned(), - })); + return Err(llvm_err( + diag_handler.handle(), + LlvmError::PrepareAutoDiff { + src: item.source.clone(), + target: item.target.clone(), + error: "could not find target function".to_owned(), + }, + )); }; generate_enzyme_call(&cx, fn_def, fn_target, item.attrs.clone()); @@ -312,29 +323,6 @@ pub(crate) fn differentiate<'ll>( // FIXME(ZuseZ4): support SanitizeHWAddress and prevent illegal/unsupported opts - if let Some(opt_level) = config.opt_level { - let opt_stage = match cgcx.lto { - Lto::Fat => llvm::OptStage::PreLinkFatLTO, - Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO, - _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO, - _ => llvm::OptStage::PreLinkNoLTO, - }; - // This is our second opt call, so now we run all opts, - // to make sure we get the best performance. - let skip_size_increasing_opts = false; - trace!("running Module Optimization after differentiation"); - unsafe { - llvm_optimize( - cgcx, - diag_handler.handle(), - module, - config, - opt_level, - opt_stage, - skip_size_increasing_opts, - )? - }; - } trace!("done with differentiate()"); Ok(()) diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs index 460a4664615..c53ea6d4666 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs @@ -11,7 +11,8 @@ use rustc_codegen_ssa::traits::{ BaseTypeCodegenMethods, ConstCodegenMethods, StaticCodegenMethods, }; use rustc_middle::mir::coverage::{ - CovTerm, CoverageIdsInfo, Expression, FunctionCoverageInfo, Mapping, MappingKind, Op, + BasicCoverageBlock, CovTerm, CoverageIdsInfo, Expression, FunctionCoverageInfo, Mapping, + MappingKind, Op, }; use rustc_middle::ty::{Instance, TyCtxt}; use rustc_span::Span; @@ -53,7 +54,7 @@ pub(crate) fn prepare_covfun_record<'tcx>( let fn_cov_info = tcx.instance_mir(instance.def).function_coverage_info.as_deref()?; let ids_info = tcx.coverage_ids_info(instance.def)?; - let expressions = prepare_expressions(fn_cov_info, ids_info, is_used); + let expressions = prepare_expressions(ids_info); let mut covfun = CovfunRecord { mangled_function_name: tcx.symbol_name(instance).name, @@ -75,26 +76,14 @@ pub(crate) fn prepare_covfun_record<'tcx>( } /// Convert the function's coverage-counter expressions into a form suitable for FFI. -fn prepare_expressions( - fn_cov_info: &FunctionCoverageInfo, - ids_info: &CoverageIdsInfo, - is_used: bool, -) -> Vec<ffi::CounterExpression> { - // If any counters or expressions were removed by MIR opts, replace their - // terms with zero. - let counter_for_term = |term| { - if !is_used || ids_info.is_zero_term(term) { - ffi::Counter::ZERO - } else { - ffi::Counter::from_term(term) - } - }; +fn prepare_expressions(ids_info: &CoverageIdsInfo) -> Vec<ffi::CounterExpression> { + let counter_for_term = ffi::Counter::from_term; // We know that LLVM will optimize out any unused expressions before // producing the final coverage map, so there's no need to do the same // thing on the Rust side unless we're confident we can do much better. // (See `CounterExpressionsMinimizer` in `CoverageMappingWriter.cpp`.) - fn_cov_info + ids_info .expressions .iter() .map(move |&Expression { lhs, op, rhs }| ffi::CounterExpression { @@ -136,11 +125,16 @@ fn fill_region_tables<'tcx>( // For each counter/region pair in this function+file, convert it to a // form suitable for FFI. - let is_zero_term = |term| !covfun.is_used || ids_info.is_zero_term(term); for &Mapping { ref kind, span } in &fn_cov_info.mappings { - // If the mapping refers to counters/expressions that were removed by - // MIR opts, replace those occurrences with zero. - let kind = kind.map_terms(|term| if is_zero_term(term) { CovTerm::Zero } else { term }); + // If this function is unused, replace all counters with zero. + let counter_for_bcb = |bcb: BasicCoverageBlock| -> ffi::Counter { + let term = if covfun.is_used { + ids_info.term_for_bcb[bcb].expect("every BCB in a mapping was given a term") + } else { + CovTerm::Zero + }; + ffi::Counter::from_term(term) + }; // Convert the `Span` into coordinates that we can pass to LLVM, or // discard the span if conversion fails. In rare, cases _all_ of a @@ -154,23 +148,22 @@ fn fill_region_tables<'tcx>( continue; } - match kind { - MappingKind::Code(term) => { - code_regions - .push(ffi::CodeRegion { cov_span, counter: ffi::Counter::from_term(term) }); + match *kind { + MappingKind::Code { bcb } => { + code_regions.push(ffi::CodeRegion { cov_span, counter: counter_for_bcb(bcb) }); } - MappingKind::Branch { true_term, false_term } => { + MappingKind::Branch { true_bcb, false_bcb } => { branch_regions.push(ffi::BranchRegion { cov_span, - true_counter: ffi::Counter::from_term(true_term), - false_counter: ffi::Counter::from_term(false_term), + true_counter: counter_for_bcb(true_bcb), + false_counter: counter_for_bcb(false_bcb), }); } - MappingKind::MCDCBranch { true_term, false_term, mcdc_params } => { + MappingKind::MCDCBranch { true_bcb, false_bcb, mcdc_params } => { mcdc_branch_regions.push(ffi::MCDCBranchRegion { cov_span, - true_counter: ffi::Counter::from_term(true_term), - false_counter: ffi::Counter::from_term(false_term), + true_counter: counter_for_bcb(true_bcb), + false_counter: counter_for_bcb(false_bcb), mcdc_branch_params: ffi::mcdc::BranchParameters::from(mcdc_params), }); } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index 021108cd51c..ea7f581a3cb 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -160,21 +160,12 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { CoverageKind::SpanMarker | CoverageKind::BlockMarker { .. } => unreachable!( "marker statement {kind:?} should have been removed by CleanupPostBorrowck" ), - CoverageKind::CounterIncrement { id } => { - // The number of counters passed to `llvm.instrprof.increment` might - // be smaller than the number originally inserted by the instrumentor, - // if some high-numbered counters were removed by MIR optimizations. - // If so, LLVM's profiler runtime will use fewer physical counters. - let num_counters = ids_info.num_counters_after_mir_opts(); - assert!( - num_counters as usize <= function_coverage_info.num_counters, - "num_counters disagreement: query says {num_counters} but function info only has {}", - function_coverage_info.num_counters - ); - + CoverageKind::VirtualCounter { bcb } + if let Some(&id) = ids_info.phys_counter_for_node.get(&bcb) => + { let fn_name = bx.get_pgo_func_name_var(instance); let hash = bx.const_u64(function_coverage_info.function_source_hash); - let num_counters = bx.const_u32(num_counters); + let num_counters = bx.const_u32(ids_info.num_counters); let index = bx.const_u32(id.as_u32()); debug!( "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})", @@ -182,10 +173,8 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { ); bx.instrprof_increment(fn_name, hash, num_counters, index); } - CoverageKind::ExpressionUsed { id: _ } => { - // Expression-used statements are markers that are handled by - // `coverage_ids_info`, so there's nothing to codegen here. - } + // If a BCB doesn't have an associated physical counter, there's nothing to codegen. + CoverageKind::VirtualCounter { .. } => {} CoverageKind::CondBitmapUpdate { index, decision_depth } => { let cond_bitmap = coverage_cx .try_get_mcdc_condition_bitmap(&instance, decision_depth) diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index f497ba95661..59c3fe635d0 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -319,19 +319,16 @@ fn build_subroutine_type_di_node<'ll, 'tcx>( // This is actually a function pointer, so wrap it in pointer DI. let name = compute_debuginfo_type_name(cx.tcx, fn_ty, false); let (size, align) = match fn_ty.kind() { - ty::FnDef(..) => (0, 1), - ty::FnPtr(..) => ( - cx.tcx.data_layout.pointer_size.bits(), - cx.tcx.data_layout.pointer_align.abi.bits() as u32, - ), + ty::FnDef(..) => (Size::ZERO, Align::ONE), + ty::FnPtr(..) => (cx.tcx.data_layout.pointer_size, cx.tcx.data_layout.pointer_align.abi), _ => unreachable!(), }; let di_node = unsafe { llvm::LLVMRustDIBuilderCreatePointerType( DIB(cx), fn_di_node, - size, - align, + size.bits(), + align.bits() as u32, 0, // Ignore DWARF address space. name.as_c_char_ptr(), name.len(), diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index a9737583037..17f2d5f4e73 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -97,7 +97,11 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { // Android has the same issue (#22398) llvm::add_module_flag_u32( self.llmod, - llvm::ModuleFlagMergeBehavior::Warning, + // In the case where multiple CGUs with different dwarf version + // values are being merged together, such as with cross-crate + // LTO, then we want to use the highest version of dwarf + // we can. This matches Clang's behavior as well. + llvm::ModuleFlagMergeBehavior::Max, "Dwarf Version", sess.dwarf_version(), ); @@ -544,14 +548,17 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - let scope = namespace::item_namespace(cx, DefId { - krate: instance.def_id().krate, - index: cx - .tcx - .def_key(instance.def_id()) - .parent - .expect("get_containing_scope: missing parent?"), - }); + let scope = namespace::item_namespace( + cx, + DefId { + krate: instance.def_id().krate, + index: cx + .tcx + .def_key(instance.def_id()) + .parent + .expect("get_containing_scope: missing parent?"), + }, + ); (scope, false) } } @@ -633,7 +640,7 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { true, DIFlags::FlagZero, argument_index, - align.bytes() as u32, + align.bits() as u32, ) } } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 43d6ccfcb4a..3200c94d977 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -333,12 +333,15 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { sym::prefetch_write_instruction => (1, 0), _ => bug!(), }; - self.call_intrinsic("llvm.prefetch", &[ - args[0].immediate(), - self.const_i32(rw), - args[1].immediate(), - self.const_i32(cache_type), - ]) + self.call_intrinsic( + "llvm.prefetch", + &[ + args[0].immediate(), + self.const_i32(rw), + args[1].immediate(), + self.const_i32(cache_type), + ], + ) } sym::carrying_mul_add => { let (size, signed) = fn_args.type_at(0).int_size_and_signed(self.tcx); @@ -396,10 +399,10 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { match name { sym::ctlz | sym::cttz => { let y = self.const_bool(false); - let ret = self.call_intrinsic(&format!("llvm.{name}.i{width}"), &[ - args[0].immediate(), - y, - ]); + let ret = self.call_intrinsic( + &format!("llvm.{name}.i{width}"), + &[args[0].immediate(), y], + ); self.intcast(ret, llret_ty, false) } @@ -416,24 +419,26 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { self.intcast(ret, llret_ty, false) } sym::ctpop => { - let ret = self.call_intrinsic(&format!("llvm.ctpop.i{width}"), &[ - args[0].immediate() - ]); + let ret = self.call_intrinsic( + &format!("llvm.ctpop.i{width}"), + &[args[0].immediate()], + ); self.intcast(ret, llret_ty, false) } sym::bswap => { if width == 8 { args[0].immediate() // byte swap a u8/i8 is just a no-op } else { - self.call_intrinsic(&format!("llvm.bswap.i{width}"), &[ - args[0].immediate() - ]) + self.call_intrinsic( + &format!("llvm.bswap.i{width}"), + &[args[0].immediate()], + ) } } - sym::bitreverse => self - .call_intrinsic(&format!("llvm.bitreverse.i{width}"), &[ - args[0].immediate() - ]), + sym::bitreverse => self.call_intrinsic( + &format!("llvm.bitreverse.i{width}"), + &[args[0].immediate()], + ), sym::rotate_left | sym::rotate_right => { let is_left = name == sym::rotate_left; let val = args[0].immediate(); @@ -500,11 +505,10 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { sym::compare_bytes => { // Here we assume that the `memcmp` provided by the target is a NOP for size 0. - let cmp = self.call_intrinsic("memcmp", &[ - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - ]); + let cmp = self.call_intrinsic( + "memcmp", + &[args[0].immediate(), args[1].immediate(), args[2].immediate()], + ); // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`. self.sext(cmp, self.type_ix(32)) } @@ -1305,14 +1309,17 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if let Some(cmp_op) = comparison { let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn); - require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { - span, - name, - in_len, - in_ty, - ret_ty, - out_len - }); + require!( + in_len == out_len, + InvalidMonomorphization::ReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + } + ); require!( bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer, InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty } @@ -1333,21 +1340,14 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let n = idx.len() as u64; let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn); - require!(out_len == n, InvalidMonomorphization::ReturnLength { - span, - name, - in_len: n, - ret_ty, - out_len - }); - require!(in_elem == out_ty, InvalidMonomorphization::ReturnElement { - span, - name, - in_elem, - in_ty, - ret_ty, - out_ty - }); + require!( + out_len == n, + InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len } + ); + require!( + in_elem == out_ty, + InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty } + ); let total_len = in_len * 2; @@ -1392,21 +1392,14 @@ fn generic_simd_intrinsic<'ll, 'tcx>( }; let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn); - require!(out_len == n, InvalidMonomorphization::ReturnLength { - span, - name, - in_len: n, - ret_ty, - out_len - }); - require!(in_elem == out_ty, InvalidMonomorphization::ReturnElement { - span, - name, - in_elem, - in_ty, - ret_ty, - out_ty - }); + require!( + out_len == n, + InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len } + ); + require!( + in_elem == out_ty, + InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty } + ); let total_len = u128::from(in_len) * 2; @@ -1431,13 +1424,16 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } if name == sym::simd_insert { - require!(in_elem == arg_tys[2], InvalidMonomorphization::InsertedType { - span, - name, - in_elem, - in_ty, - out_ty: arg_tys[2] - }); + require!( + in_elem == arg_tys[2], + InvalidMonomorphization::InsertedType { + span, + name, + in_elem, + in_ty, + out_ty: arg_tys[2] + } + ); let idx = bx .const_to_opt_u128(args[1].immediate(), false) .expect("typeck should have ensure that this is a const"); @@ -1456,13 +1452,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( )); } if name == sym::simd_extract { - require!(ret_ty == in_elem, InvalidMonomorphization::ReturnType { - span, - name, - in_elem, - in_ty, - ret_ty - }); + require!( + ret_ty == in_elem, + InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty } + ); let idx = bx .const_to_opt_u128(args[1].immediate(), false) .expect("typeck should have ensure that this is a const"); @@ -1481,18 +1474,14 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let m_elem_ty = in_elem; let m_len = in_len; let (v_len, _) = require_simd!(arg_tys[1], SimdArgument); - require!(m_len == v_len, InvalidMonomorphization::MismatchedLengths { - span, - name, - m_len, - v_len - }); - let in_elem_bitwidth = - require_int_ty!(m_elem_ty.kind(), InvalidMonomorphization::MaskType { - span, - name, - ty: m_elem_ty - }); + require!( + m_len == v_len, + InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len } + ); + let in_elem_bitwidth = require_int_ty!( + m_elem_ty.kind(), + InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty } + ); let m_i1s = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len); return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); } @@ -1510,13 +1499,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let expected_bytes = in_len.div_ceil(8); // Integer vector <i{in_bitwidth} x in_len>: - let in_elem_bitwidth = - require_int_or_uint_ty!(in_elem.kind(), InvalidMonomorphization::VectorArgument { - span, - name, - in_ty, - in_elem - }); + let in_elem_bitwidth = require_int_or_uint_ty!( + in_elem.kind(), + InvalidMonomorphization::VectorArgument { span, name, in_ty, in_elem } + ); let i1xn = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, in_len); // Bitcast <i1 x N> to iN: @@ -1698,30 +1684,34 @@ fn generic_simd_intrinsic<'ll, 'tcx>( require_simd!(ret_ty, SimdReturn); // Of the same length: - require!(in_len == out_len, InvalidMonomorphization::SecondArgumentLength { - span, - name, - in_len, - in_ty, - arg_ty: arg_tys[1], - out_len - }); - require!(in_len == out_len2, InvalidMonomorphization::ThirdArgumentLength { - span, - name, - in_len, - in_ty, - arg_ty: arg_tys[2], - out_len: out_len2 - }); + require!( + in_len == out_len, + InvalidMonomorphization::SecondArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[1], + out_len + } + ); + require!( + in_len == out_len2, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[2], + out_len: out_len2 + } + ); // The return type must match the first argument type - require!(ret_ty == in_ty, InvalidMonomorphization::ExpectedReturnType { - span, - name, - in_ty, - ret_ty - }); + require!( + ret_ty == in_ty, + InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty } + ); require!( matches!( @@ -1739,13 +1729,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } ); - let mask_elem_bitwidth = - require_int_ty!(element_ty2.kind(), InvalidMonomorphization::ThirdArgElementType { + let mask_elem_bitwidth = require_int_ty!( + element_ty2.kind(), + InvalidMonomorphization::ThirdArgElementType { span, name, expected_element: element_ty2, third_arg: arg_tys[2] - }); + } + ); // Alignment of T, must be a constant integer value: let alignment_ty = bx.type_i32(); @@ -1805,22 +1797,23 @@ fn generic_simd_intrinsic<'ll, 'tcx>( require_simd!(ret_ty, SimdReturn); // Of the same length: - require!(values_len == mask_len, InvalidMonomorphization::ThirdArgumentLength { - span, - name, - in_len: mask_len, - in_ty: mask_ty, - arg_ty: values_ty, - out_len: values_len - }); + require!( + values_len == mask_len, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len: mask_len, + in_ty: mask_ty, + arg_ty: values_ty, + out_len: values_len + } + ); // The return type must match the last argument type - require!(ret_ty == values_ty, InvalidMonomorphization::ExpectedReturnType { - span, - name, - in_ty: values_ty, - ret_ty - }); + require!( + ret_ty == values_ty, + InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty } + ); require!( matches!( @@ -1838,13 +1831,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } ); - let m_elem_bitwidth = - require_int_ty!(mask_elem.kind(), InvalidMonomorphization::ThirdArgElementType { + let m_elem_bitwidth = require_int_ty!( + mask_elem.kind(), + InvalidMonomorphization::ThirdArgElementType { span, name, expected_element: values_elem, third_arg: mask_ty, - }); + } + ); let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len); let mask_ty = bx.type_vector(bx.type_i1(), mask_len); @@ -1896,14 +1891,17 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let (values_len, values_elem) = require_simd!(values_ty, SimdThird); // Of the same length: - require!(values_len == mask_len, InvalidMonomorphization::ThirdArgumentLength { - span, - name, - in_len: mask_len, - in_ty: mask_ty, - arg_ty: values_ty, - out_len: values_len - }); + require!( + values_len == mask_len, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len: mask_len, + in_ty: mask_ty, + arg_ty: values_ty, + out_len: values_len + } + ); // The second argument must be a mutable pointer type matching the element type require!( @@ -1923,13 +1921,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } ); - let m_elem_bitwidth = - require_int_ty!(mask_elem.kind(), InvalidMonomorphization::ThirdArgElementType { + let m_elem_bitwidth = require_int_ty!( + mask_elem.kind(), + InvalidMonomorphization::ThirdArgElementType { span, name, expected_element: values_elem, third_arg: mask_ty, - }); + } + ); let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len); let mask_ty = bx.type_vector(bx.type_i1(), mask_len); @@ -1976,22 +1976,28 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let (element_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird); // Of the same length: - require!(in_len == element_len1, InvalidMonomorphization::SecondArgumentLength { - span, - name, - in_len, - in_ty, - arg_ty: arg_tys[1], - out_len: element_len1 - }); - require!(in_len == element_len2, InvalidMonomorphization::ThirdArgumentLength { - span, - name, - in_len, - in_ty, - arg_ty: arg_tys[2], - out_len: element_len2 - }); + require!( + in_len == element_len1, + InvalidMonomorphization::SecondArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[1], + out_len: element_len1 + } + ); + require!( + in_len == element_len2, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[2], + out_len: element_len2 + } + ); require!( matches!( @@ -2011,13 +2017,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>( ); // The element type of the third argument must be a signed integer type of any width: - let mask_elem_bitwidth = - require_int_ty!(element_ty2.kind(), InvalidMonomorphization::ThirdArgElementType { + let mask_elem_bitwidth = require_int_ty!( + element_ty2.kind(), + InvalidMonomorphization::ThirdArgElementType { span, name, expected_element: element_ty2, third_arg: arg_tys[2] - }); + } + ); // Alignment of T, must be a constant integer value: let alignment_ty = bx.type_i32(); @@ -2058,13 +2066,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident, $identity:expr) => { if name == sym::$name { - require!(ret_ty == in_elem, InvalidMonomorphization::ReturnType { - span, - name, - in_elem, - in_ty, - ret_ty - }); + require!( + ret_ty == in_elem, + InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty } + ); return match in_elem.kind() { ty::Int(_) | ty::Uint(_) => { let r = bx.$integer_reduce(args[0].immediate()); @@ -2133,13 +2138,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( macro_rules! minmax_red { ($name:ident: $int_red:ident, $float_red:ident) => { if name == sym::$name { - require!(ret_ty == in_elem, InvalidMonomorphization::ReturnType { - span, - name, - in_elem, - in_ty, - ret_ty - }); + require!( + ret_ty == in_elem, + InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty } + ); return match in_elem.kind() { ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)), ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)), @@ -2164,13 +2166,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( ($name:ident : $red:ident, $boolean:expr) => { if name == sym::$name { let input = if !$boolean { - require!(ret_ty == in_elem, InvalidMonomorphization::ReturnType { - span, - name, - in_elem, - in_ty, - ret_ty - }); + require!( + ret_ty == in_elem, + InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty } + ); args[0].immediate() } else { let bitwidth = match in_elem.kind() { @@ -2218,25 +2217,27 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_cast_ptr { let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); - require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { - span, - name, - in_len, - in_ty, - ret_ty, - out_len - }); + require!( + in_len == out_len, + InvalidMonomorphization::ReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + } + ); match in_elem.kind() { ty::RawPtr(p_ty, _) => { let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| { bx.tcx.normalize_erasing_regions(bx.typing_env(), ty) }); - require!(metadata.is_unit(), InvalidMonomorphization::CastWidePointer { - span, - name, - ty: in_elem - }); + require!( + metadata.is_unit(), + InvalidMonomorphization::CastWidePointer { span, name, ty: in_elem } + ); } _ => { return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem }) @@ -2247,11 +2248,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| { bx.tcx.normalize_erasing_regions(bx.typing_env(), ty) }); - require!(metadata.is_unit(), InvalidMonomorphization::CastWidePointer { - span, - name, - ty: out_elem - }); + require!( + metadata.is_unit(), + InvalidMonomorphization::CastWidePointer { span, name, ty: out_elem } + ); } _ => { return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem }) @@ -2263,14 +2263,17 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_expose_provenance { let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); - require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { - span, - name, - in_len, - in_ty, - ret_ty, - out_len - }); + require!( + in_len == out_len, + InvalidMonomorphization::ReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + } + ); match in_elem.kind() { ty::RawPtr(_, _) => {} @@ -2288,14 +2291,17 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_with_exposed_provenance { let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); - require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { - span, - name, - in_len, - in_ty, - ret_ty, - out_len - }); + require!( + in_len == out_len, + InvalidMonomorphization::ReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + } + ); match in_elem.kind() { ty::Uint(ty::UintTy::Usize) => {} @@ -2313,14 +2319,17 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_cast || name == sym::simd_as { let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); - require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { - span, - name, - in_len, - in_ty, - ret_ty, - out_len - }); + require!( + in_len == out_len, + InvalidMonomorphization::ReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + } + ); // casting cares about nominal type, not just structural type if in_elem == out_elem { return Ok(args[0].immediate()); diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index 14346795fda..bcb1cc72b56 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -13,6 +13,7 @@ #![feature(extern_types)] #![feature(file_buffered)] #![feature(hash_raw_entry)] +#![feature(if_let_guard)] #![feature(impl_trait_in_assoc_type)] #![feature(iter_intersperse)] #![feature(let_chains)] @@ -29,7 +30,7 @@ use std::mem::ManuallyDrop; use back::owned_target_machine::OwnedTargetMachine; use back::write::{create_informational_target_machine, create_target_machine}; use errors::{AutoDiffWithoutLTO, ParseTargetMachineConfig}; -pub use llvm_util::target_features_cfg; +pub(crate) use llvm_util::target_features_cfg; use rustc_ast::expand::allocator::AllocatorKind; use rustc_ast::expand::autodiff_attrs::AutoDiffItem; use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 0d04f770bc6..4d6a76b23ea 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -69,19 +69,6 @@ pub enum LLVMRustResult { Failure, } -/// Translation of LLVM's MachineTypes enum, defined in llvm\include\llvm\BinaryFormat\COFF.h. -/// -/// We include only architectures supported on Windows. -#[derive(Copy, Clone, PartialEq)] -#[repr(C)] -pub enum LLVMMachineType { - AMD64 = 0x8664, - I386 = 0x14c, - ARM64 = 0xaa64, - ARM64EC = 0xa641, - ARM = 0x01c0, -} - /// Must match the layout of `LLVMRustModuleFlagMergeBehavior`. /// /// When merging modules (e.g. during LTO), their metadata flags are combined. Conflicts are @@ -645,16 +632,6 @@ pub enum ThreadLocalMode { LocalExec, } -/// LLVMRustTailCallKind -#[derive(Copy, Clone)] -#[repr(C)] -pub enum TailCallKind { - None, - Tail, - MustTail, - NoTail, -} - /// LLVMRustChecksumKind #[derive(Copy, Clone)] #[repr(C)] @@ -773,7 +750,6 @@ pub struct Builder<'a>(InvariantOpaque<'a>); #[repr(C)] pub struct PassManager<'a>(InvariantOpaque<'a>); unsafe extern "C" { - pub type Pass; pub type TargetMachine; pub type Archive; } @@ -799,7 +775,6 @@ unsafe extern "C" { } pub type DiagnosticHandlerTy = unsafe extern "C" fn(&DiagnosticInfo, *mut c_void); -pub type InlineAsmDiagHandlerTy = unsafe extern "C" fn(&SMDiagnostic, *const c_void, c_uint); pub mod debuginfo { use std::ptr; @@ -853,7 +828,6 @@ pub mod debuginfo { pub type DIFile = DIScope; pub type DILexicalBlock = DIScope; pub type DISubprogram = DIScope; - pub type DINameSpace = DIScope; pub type DIType = DIDescriptor; pub type DIBasicType = DIType; pub type DIDerivedType = DIType; @@ -1809,7 +1783,6 @@ unsafe extern "C" { Name: *const c_char, NameLen: size_t, ) -> Option<&Value>; - pub fn LLVMRustSetTailCallKind(CallInst: &Value, TKC: TailCallKind); // Operations on attributes pub fn LLVMRustCreateAttrNoValue(C: &Context, attr: AttributeKind) -> &Attribute; @@ -2409,6 +2382,7 @@ unsafe extern "C" { LoopVectorize: bool, DisableSimplifyLibCalls: bool, EmitLifetimeMarkers: bool, + RunEnzyme: bool, SanitizerOptions: Option<&SanitizerOptions>, PGOGenPath: *const c_char, PGOUsePath: *const c_char, @@ -2586,8 +2560,6 @@ unsafe extern "C" { pub fn LLVMRustGetElementTypeArgIndex(CallSite: &Value) -> i32; - pub fn LLVMRustIsBitcode(ptr: *const u8, len: usize) -> bool; - pub fn LLVMRustLLVMHasZlibCompressionForDebugSymbols() -> bool; pub fn LLVMRustLLVMHasZstdCompressionForDebugSymbols() -> bool; diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index ee8c59015e6..b4b5d6a5b19 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -304,7 +304,7 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea /// Must express features in the way Rust understands them. /// /// We do not have to worry about RUSTC_SPECIFIC_FEATURES here, those are handled outside codegen. -pub fn target_features_cfg(sess: &Session, allow_unstable: bool) -> Vec<Symbol> { +pub(crate) fn target_features_cfg(sess: &Session, allow_unstable: bool) -> Vec<Symbol> { let mut features: FxHashSet<Symbol> = Default::default(); // Add base features for the target. diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs index 33789c6261f..70edee21bd6 100644 --- a/compiler/rustc_codegen_llvm/src/mono_item.rs +++ b/compiler/rustc_codegen_llvm/src/mono_item.rs @@ -71,10 +71,7 @@ impl<'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> { // compiler-rt, then we want to implicitly compile everything with hidden // visibility as we're going to link this object all over the place but // don't want the symbols to get exported. - if linkage != Linkage::Internal - && linkage != Linkage::Private - && self.tcx.is_compiler_builtins(LOCAL_CRATE) - { + if linkage != Linkage::Internal && self.tcx.is_compiler_builtins(LOCAL_CRATE) { llvm::set_visibility(lldecl, llvm::Visibility::Hidden); } else { llvm::set_visibility(lldecl, base::visibility_to_llvm(visibility)); diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index c56ad886120..d61ce417562 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -1,14 +1,14 @@ use std::{fmt, ptr}; use libc::{c_char, c_uint}; -use rustc_abi::{AddressSpace, Align, Integer, Size}; +use rustc_abi::{AddressSpace, Align, Integer, Reg, Size}; use rustc_codegen_ssa::common::TypeKind; use rustc_codegen_ssa::traits::*; use rustc_data_structures::small_c_str::SmallCStr; use rustc_middle::bug; use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::{self, Ty}; -use rustc_target::callconv::{CastTarget, FnAbi, Reg}; +use rustc_target::callconv::{CastTarget, FnAbi}; use crate::abi::{FnAbiLlvmExt, LlvmType}; use crate::context::{CodegenCx, SimpleCx}; @@ -237,11 +237,11 @@ impl<'ll, 'tcx> BaseTypeCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { impl Type { /// Creates an integer type with the given number of bits, e.g., i24 - pub fn ix_llcx(llcx: &llvm::Context, num_bits: u64) -> &Type { + pub(crate) fn ix_llcx(llcx: &llvm::Context, num_bits: u64) -> &Type { unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) } } - pub fn ptr_llcx(llcx: &llvm::Context) -> &Type { + pub(crate) fn ptr_llcx(llcx: &llvm::Context) -> &Type { unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::DATA.0) } } } |
