diff options
| author | Sean Cross <sean@xobs.io> | 2021-04-25 00:35:25 +0800 |
|---|---|---|
| committer | Sean Cross <sean@xobs.io> | 2021-04-25 00:35:25 +0800 |
| commit | f9d390d14ad891c4ce9fe108b86d6756ea5154ee (patch) | |
| tree | 5a12452fef7481362a5fcd06beb491ca4bcf7a69 /compiler/rustc_codegen_ssa/src | |
| parent | 8f73fe91f5db7de6e42ad7824a00b9729d2925b2 (diff) | |
| parent | e11a9fa52a3f372dadd6db3d3f2ed7dc2621dcc4 (diff) | |
| download | rust-f9d390d14ad891c4ce9fe108b86d6756ea5154ee.tar.gz rust-f9d390d14ad891c4ce9fe108b86d6756ea5154ee.zip | |
Merge remote-tracking branch 'upstream/master' into impl-16351-nightly
Signed-off-by: Sean Cross <sean@xobs.io>
Diffstat (limited to 'compiler/rustc_codegen_ssa/src')
25 files changed, 471 insertions, 416 deletions
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs index caa6a6a8e3a..ea75943d6f3 100644 --- a/compiler/rustc_codegen_ssa/src/back/link.rs +++ b/compiler/rustc_codegen_ssa/src/back/link.rs @@ -6,7 +6,7 @@ use rustc_hir::def_id::CrateNum; use rustc_middle::middle::cstore::{EncodedMetadata, LibSource}; use rustc_middle::middle::dependency_format::Linkage; use rustc_session::config::{self, CFGuard, CrateType, DebugInfo}; -use rustc_session::config::{OutputFilenames, OutputType, PrintRequest, SanitizerSet}; +use rustc_session::config::{OutputFilenames, OutputType, PrintRequest}; use rustc_session::output::{check_file_is_writeable, invalid_output_for_target, out_filename}; use rustc_session::search_paths::PathKind; use rustc_session::utils::NativeLibKind; @@ -16,7 +16,7 @@ use rustc_session::{filesearch, Session}; use rustc_span::symbol::Symbol; use rustc_target::spec::crt_objects::{CrtObjects, CrtObjectsFallback}; use rustc_target::spec::{LinkOutputKind, LinkerFlavor, LldFlavor, SplitDebuginfo}; -use rustc_target::spec::{PanicStrategy, RelocModel, RelroLevel, Target}; +use rustc_target::spec::{PanicStrategy, RelocModel, RelroLevel, SanitizerSet, Target}; use super::archive::ArchiveBuilder; use super::command::Command; @@ -711,7 +711,7 @@ fn link_natively<'a, B: ArchiveBuilder<'a>>( status.signal() == Some(libc::SIGILL) } - #[cfg(windows)] + #[cfg(not(unix))] fn is_illegal_instruction(_status: &ExitStatus) -> bool { false } @@ -922,28 +922,20 @@ fn link_sanitizer_runtime(sess: &Session, linker: &mut dyn Linker, name: &str) { .map(|channel| format!("-{}", channel)) .unwrap_or_default(); - match sess.opts.target_triple.triple() { - "aarch64-apple-darwin" | "x86_64-apple-darwin" => { - // On Apple platforms, the sanitizer is always built as a dylib, and - // LLVM will link to `@rpath/*.dylib`, so we need to specify an - // rpath to the library as well (the rpath should be absolute, see - // PR #41352 for details). - let filename = format!("rustc{}_rt.{}", channel, name); - let path = find_sanitizer_runtime(&sess, &filename); - let rpath = path.to_str().expect("non-utf8 component in path"); - linker.args(&["-Wl,-rpath", "-Xlinker", rpath]); - linker.link_dylib(Symbol::intern(&filename)); - } - "aarch64-fuchsia" - | "aarch64-unknown-linux-gnu" - | "x86_64-fuchsia" - | "x86_64-unknown-freebsd" - | "x86_64-unknown-linux-gnu" => { - let filename = format!("librustc{}_rt.{}.a", channel, name); - let path = find_sanitizer_runtime(&sess, &filename).join(&filename); - linker.link_whole_rlib(&path); - } - _ => {} + if sess.target.is_like_osx { + // On Apple platforms, the sanitizer is always built as a dylib, and + // LLVM will link to `@rpath/*.dylib`, so we need to specify an + // rpath to the library as well (the rpath should be absolute, see + // PR #41352 for details). + let filename = format!("rustc{}_rt.{}", channel, name); + let path = find_sanitizer_runtime(&sess, &filename); + let rpath = path.to_str().expect("non-utf8 component in path"); + linker.args(&["-Wl,-rpath", "-Xlinker", rpath]); + linker.link_dylib(Symbol::intern(&filename)); + } else { + let filename = format!("librustc{}_rt.{}.a", channel, name); + let path = find_sanitizer_runtime(&sess, &filename).join(&filename); + linker.link_whole_rlib(&path); } } @@ -1198,7 +1190,7 @@ fn exec_linker( flush_linked_file(&output, out_filename)?; return output; - #[cfg(unix)] + #[cfg(not(windows))] fn flush_linked_file(_: &io::Result<Output>, _: &Path) -> io::Result<()> { Ok(()) } @@ -1238,6 +1230,11 @@ fn exec_linker( err.raw_os_error() == Some(ERROR_FILENAME_EXCED_RANGE) } + #[cfg(not(any(unix, windows)))] + fn command_line_too_big(_: &io::Error) -> bool { + false + } + struct Escape<'a> { arg: &'a str, is_like_msvc: bool, @@ -1414,15 +1411,10 @@ fn add_link_script(cmd: &mut dyn Linker, sess: &Session, tmpdir: &Path, crate_ty } } -/// Add arbitrary "user defined" args defined from command line and by `#[link_args]` attributes. +/// Add arbitrary "user defined" args defined from command line. /// FIXME: Determine where exactly these args need to be inserted. -fn add_user_defined_link_args( - cmd: &mut dyn Linker, - sess: &Session, - codegen_results: &CodegenResults, -) { +fn add_user_defined_link_args(cmd: &mut dyn Linker, sess: &Session) { cmd.args(&sess.opts.cg.link_args); - cmd.args(&*codegen_results.crate_info.link_args); } /// Add arbitrary "late link" args defined by the target spec. @@ -1646,6 +1638,16 @@ fn linker_with_args<'a, B: ArchiveBuilder<'a>>( cmd.add_eh_frame_header(); } + // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER + // Make the binary compatible with data execution prevention schemes. + cmd.add_no_exec(); + + // NO-OPT-OUT, OBJECT-FILES-NO + // Avoid linking to dynamic libraries unless they satisfy some undefined symbols + // at the point at which they are specified on the command line. + // Must be passed before any dynamic libraries. + cmd.add_as_needed(); + // NO-OPT-OUT, OBJECT-FILES-NO if crt_objects_fallback { cmd.no_crt_objects(); @@ -1741,7 +1743,7 @@ fn linker_with_args<'a, B: ArchiveBuilder<'a>>( ); // OBJECT-FILES-NO, AUDIT-ORDER - if sess.opts.cg.profile_generate.enabled() || sess.opts.debugging_opts.instrument_coverage { + if sess.opts.cg.profile_generate.enabled() || sess.instrument_coverage() { cmd.pgo_gen(); } @@ -1754,7 +1756,7 @@ fn linker_with_args<'a, B: ArchiveBuilder<'a>>( add_rpath_args(cmd, sess, codegen_results, out_filename); // OBJECT-FILES-MAYBE, CUSTOMIZATION-POINT - add_user_defined_link_args(cmd, sess, codegen_results); + add_user_defined_link_args(cmd, sess); // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER cmd.finalize(); diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs index bb35e7ec894..77d8ab49ff2 100644 --- a/compiler/rustc_codegen_ssa/src/back/linker.rs +++ b/compiler/rustc_codegen_ssa/src/back/linker.rs @@ -130,6 +130,8 @@ pub trait Linker { fn group_end(&mut self); fn linker_plugin_lto(&mut self); fn add_eh_frame_header(&mut self) {} + fn add_no_exec(&mut self) {} + fn add_as_needed(&mut self) {} fn finalize(&mut self); } @@ -184,7 +186,7 @@ impl<'a> GccLinker<'a> { // * On OSX they have their own linker, not binutils' // * For WebAssembly the only functional linker is LLD, which doesn't // support hint flags - !self.sess.target.is_like_osx && self.sess.target.arch != "wasm32" + !self.sess.target.is_like_osx && !self.sess.target.is_like_wasm } // Some platforms take hints about whether a library is static or dynamic. @@ -641,6 +643,20 @@ impl<'a> Linker for GccLinker<'a> { fn add_eh_frame_header(&mut self) { self.linker_arg("--eh-frame-hdr"); } + + fn add_no_exec(&mut self) { + if self.sess.target.is_like_windows { + self.linker_arg("--nxcompat"); + } else if self.sess.target.linker_is_gnu { + self.linker_arg("-znoexecstack"); + } + } + + fn add_as_needed(&mut self) { + if self.sess.target.linker_is_gnu { + self.linker_arg("--as-needed"); + } + } } pub struct MsvcLinker<'a> { @@ -878,6 +894,10 @@ impl<'a> Linker for MsvcLinker<'a> { fn linker_plugin_lto(&mut self) { // Do nothing } + + fn add_no_exec(&mut self) { + self.cmd.arg("/NXCOMPAT"); + } } pub struct EmLinker<'a> { diff --git a/compiler/rustc_codegen_ssa/src/back/lto.rs b/compiler/rustc_codegen_ssa/src/back/lto.rs index 0d7f4447696..0ff05229466 100644 --- a/compiler/rustc_codegen_ssa/src/back/lto.rs +++ b/compiler/rustc_codegen_ssa/src/back/lto.rs @@ -2,6 +2,7 @@ use super::write::CodegenContext; use crate::traits::*; use crate::ModuleCodegen; +use rustc_data_structures::memmap::Mmap; use rustc_errors::FatalError; use std::ffi::CString; @@ -93,7 +94,7 @@ impl<B: WriteBackendMethods> LtoModuleCodegen<B> { pub enum SerializedModule<M: ModuleBufferMethods> { Local(M), FromRlib(Vec<u8>), - FromUncompressedFile(memmap::Mmap), + FromUncompressedFile(Mmap), } impl<M: ModuleBufferMethods> SerializedModule<M> { diff --git a/compiler/rustc_codegen_ssa/src/back/rpath.rs b/compiler/rustc_codegen_ssa/src/back/rpath.rs index 005d2efdd3b..5f21046b05e 100644 --- a/compiler/rustc_codegen_ssa/src/back/rpath.rs +++ b/compiler/rustc_codegen_ssa/src/back/rpath.rs @@ -24,7 +24,7 @@ pub fn get_rpath_flags(config: &mut RPathConfig<'_>) -> Vec<String> { debug!("preparing the RPATH!"); - let libs = config.used_crates.clone(); + let libs = config.used_crates; let libs = libs.iter().filter_map(|&(_, ref l)| l.option()).collect::<Vec<_>>(); let rpaths = get_rpaths(config, &libs); let mut flags = rpaths_to_flags(&rpaths); diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs index 9a6f8cde1b2..b8f277c8ff5 100644 --- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs +++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs @@ -15,7 +15,8 @@ use rustc_middle::ty::query::Providers; use rustc_middle::ty::subst::{GenericArgKind, SubstsRef}; use rustc_middle::ty::Instance; use rustc_middle::ty::{SymbolName, TyCtxt}; -use rustc_session::config::{CrateType, SanitizerSet}; +use rustc_session::config::CrateType; +use rustc_target::spec::SanitizerSet; pub fn threshold(tcx: TyCtxt<'_>) -> SymbolExportLevel { crates_export_threshold(&tcx.sess.crate_types()) @@ -188,9 +189,7 @@ fn exported_symbols_provider_local( } } - if tcx.sess.opts.debugging_opts.instrument_coverage - || tcx.sess.opts.cg.profile_generate.enabled() - { + if tcx.sess.instrument_coverage() || tcx.sess.opts.cg.profile_generate.enabled() { // These are weak symbols that point to the profile version and the // profile name, which need to be treated as exported so LTO doesn't nix // them. diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index 7b8ce157fc2..c8688faa80b 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -10,6 +10,7 @@ use crate::{ use crate::traits::*; use jobserver::{Acquired, Client}; use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::memmap::Mmap; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::profiling::TimingGuard; use rustc_data_structures::profiling::VerboseTimingGuard; @@ -27,12 +28,12 @@ use rustc_middle::middle::exported_symbols::SymbolExportLevel; use rustc_middle::ty::TyCtxt; use rustc_session::cgu_reuse_tracker::CguReuseTracker; use rustc_session::config::{self, CrateType, Lto, OutputFilenames, OutputType}; -use rustc_session::config::{Passes, SanitizerSet, SwitchWithOptPath}; +use rustc_session::config::{Passes, SwitchWithOptPath}; use rustc_session::Session; use rustc_span::source_map::SourceMap; use rustc_span::symbol::{sym, Symbol}; use rustc_span::{BytePos, FileName, InnerSpan, Pos, Span}; -use rustc_target::spec::{MergeFunctions, PanicStrategy}; +use rustc_target::spec::{MergeFunctions, PanicStrategy, SanitizerSet}; use std::any::Any; use std::fs; @@ -106,7 +107,7 @@ pub struct ModuleConfig { pub vectorize_loop: bool, pub vectorize_slp: bool, pub merge_functions: bool, - pub inline_threshold: Option<usize>, + pub inline_threshold: Option<u32>, pub new_llvm_pass_manager: bool, pub emit_lifetime_markers: bool, } @@ -176,7 +177,7 @@ impl ModuleConfig { // The rustc option `-Zinstrument_coverage` injects intrinsic calls to // `llvm.instrprof.increment()`, which requires the LLVM `instrprof` pass. - if sess.opts.debugging_opts.instrument_coverage { + if sess.instrument_coverage() { passes.push("instrprof".to_owned()); } passes @@ -433,12 +434,10 @@ pub fn start_async_codegen<B: ExtraBackendMethods>( let sess = tcx.sess; let crate_name = tcx.crate_name(LOCAL_CRATE); - let no_builtins = tcx.sess.contains_name(&tcx.hir().krate().item.attrs, sym::no_builtins); - let is_compiler_builtins = - tcx.sess.contains_name(&tcx.hir().krate().item.attrs, sym::compiler_builtins); - let subsystem = tcx - .sess - .first_attr_value_str_by_name(&tcx.hir().krate().item.attrs, sym::windows_subsystem); + let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID); + let no_builtins = tcx.sess.contains_name(crate_attrs, sym::no_builtins); + let is_compiler_builtins = tcx.sess.contains_name(crate_attrs, sym::compiler_builtins); + let subsystem = tcx.sess.first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem); let windows_subsystem = subsystem.map(|subsystem| { if subsystem != sym::windows && subsystem != sym::console { tcx.sess.fatal(&format!( @@ -1095,7 +1094,7 @@ fn start_executing_work<B: ExtraBackendMethods>( // only place where we have access to the compiler `Session`. // - LLVM work can be done on any thread. // - Codegen can only happen on the main thread. - // - Each thread doing substantial work most be in possession of a `Token` + // - Each thread doing substantial work must be in possession of a `Token` // from the `Jobserver`. // - The compiler process always holds one `Token`. Any additional `Tokens` // have to be requested from the `Jobserver`. @@ -1147,7 +1146,7 @@ fn start_executing_work<B: ExtraBackendMethods>( // if possible. These two goals are at odds with each other: If memory // consumption were not an issue, we could just let the main thread produce // LLVM WorkItems at full speed, assuring maximal utilization of - // Tokens/LLVM worker threads. However, since codegen usual is faster + // Tokens/LLVM worker threads. However, since codegen is usually faster // than LLVM processing, the queue of LLVM WorkItems would fill up and each // WorkItem potentially holds on to a substantial amount of memory. // @@ -1960,7 +1959,7 @@ pub fn submit_pre_lto_module_to_llvm<B: ExtraBackendMethods>( .unwrap_or_else(|e| panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e)); let mmap = unsafe { - memmap::Mmap::map(&file).unwrap_or_else(|e| { + Mmap::map(file).unwrap_or_else(|e| { panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e) }) }; diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 08e31c3b37f..318eed76acf 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -754,7 +754,6 @@ impl CrateInfo { is_no_builtins: Default::default(), native_libraries: Default::default(), used_libraries: tcx.native_libraries(LOCAL_CRATE).iter().map(Into::into).collect(), - link_args: tcx.link_args(LOCAL_CRATE), crate_name: Default::default(), used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs index af6c476292b..962c01c2ee7 100644 --- a/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs +++ b/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs @@ -24,21 +24,39 @@ pub enum CounterKind { pub struct Counter { // Important: The layout (order and types of fields) must match its C++ counterpart. pub kind: CounterKind, - pub id: u32, + id: u32, } impl Counter { + /// Constructs a new `Counter` of kind `Zero`. For this `CounterKind`, the + /// `id` is not used. pub fn zero() -> Self { Self { kind: CounterKind::Zero, id: 0 } } + /// Constructs a new `Counter` of kind `CounterValueReference`, and converts + /// the given 1-based counter_id to the required 0-based equivalent for + /// the `Counter` encoding. pub fn counter_value_reference(counter_id: CounterValueReference) -> Self { - Self { kind: CounterKind::CounterValueReference, id: counter_id.into() } + Self { kind: CounterKind::CounterValueReference, id: counter_id.zero_based_index() } } + /// Constructs a new `Counter` of kind `Expression`. pub fn expression(mapped_expression_index: MappedExpressionIndex) -> Self { Self { kind: CounterKind::Expression, id: mapped_expression_index.into() } } + + /// Returns true if the `Counter` kind is `Zero`. + pub fn is_zero(&self) -> bool { + matches!(self.kind, CounterKind::Zero) + } + + /// An explicitly-named function to get the ID value, making it more obvious + /// that the stored value is now 0-based. + pub fn zero_based_id(&self) -> u32 { + debug_assert!(!self.is_zero(), "`id` is undefined for CounterKind::Zero"); + self.id + } } /// Aligns with [llvm::coverage::CounterExpression::ExprKind](https://github.com/rust-lang/llvm-project/blob/rustc/11.0-2020-10-12/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L147) diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs index 549b8d41f51..4458fd68678 100644 --- a/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs +++ b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs @@ -8,7 +8,7 @@ use rustc_middle::mir::coverage::{ use rustc_middle::ty::Instance; use rustc_middle::ty::TyCtxt; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub struct Expression { lhs: ExpressionOperandId, op: Op, @@ -31,27 +31,44 @@ pub struct Expression { pub struct FunctionCoverage<'tcx> { instance: Instance<'tcx>, source_hash: u64, + is_used: bool, counters: IndexVec<CounterValueReference, Option<CodeRegion>>, expressions: IndexVec<InjectedExpressionIndex, Option<Expression>>, unreachable_regions: Vec<CodeRegion>, } impl<'tcx> FunctionCoverage<'tcx> { + /// Creates a new set of coverage data for a used (called) function. pub fn new(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self { + Self::create(tcx, instance, true) + } + + /// Creates a new set of coverage data for an unused (never called) function. + pub fn unused(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self { + Self::create(tcx, instance, false) + } + + fn create(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>, is_used: bool) -> Self { let coverageinfo = tcx.coverageinfo(instance.def_id()); debug!( - "FunctionCoverage::new(instance={:?}) has coverageinfo={:?}", - instance, coverageinfo + "FunctionCoverage::new(instance={:?}) has coverageinfo={:?}. is_used={}", + instance, coverageinfo, is_used ); Self { instance, source_hash: 0, // will be set with the first `add_counter()` + is_used, counters: IndexVec::from_elem_n(None, coverageinfo.num_counters as usize), expressions: IndexVec::from_elem_n(None, coverageinfo.num_expressions as usize), unreachable_regions: Vec::new(), } } + /// Returns true for a used (called) function, and false for an unused function. + pub fn is_used(&self) -> bool { + self.is_used + } + /// Sets the function source hash value. If called multiple times for the same function, all /// calls should have the same hash value. pub fn set_function_source_hash(&mut self, source_hash: u64) { @@ -64,7 +81,9 @@ impl<'tcx> FunctionCoverage<'tcx> { /// Adds a code region to be counted by an injected counter intrinsic. pub fn add_counter(&mut self, id: CounterValueReference, region: CodeRegion) { - self.counters[id].replace(region).expect_none("add_counter called with duplicate `id`"); + if let Some(previous_region) = self.counters[id].replace(region.clone()) { + assert_eq!(previous_region, region, "add_counter: code region for id changed"); + } } /// Both counters and "counter expressions" (or simply, "expressions") can be operands in other @@ -94,9 +113,18 @@ impl<'tcx> FunctionCoverage<'tcx> { expression_id, lhs, op, rhs, region ); let expression_index = self.expression_index(u32::from(expression_id)); - self.expressions[expression_index] - .replace(Expression { lhs, op, rhs, region }) - .expect_none("add_counter_expression called with duplicate `id_descending_from_max`"); + if let Some(previous_expression) = self.expressions[expression_index].replace(Expression { + lhs, + op, + rhs, + region: region.clone(), + }) { + assert_eq!( + previous_expression, + Expression { lhs, op, rhs, region }, + "add_counter_expression: expression for id changed" + ); + } } /// Add a region that will be marked as "unreachable", with a constant "zero counter". @@ -117,8 +145,8 @@ impl<'tcx> FunctionCoverage<'tcx> { &'a self, ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &'a CodeRegion)>) { assert!( - self.source_hash != 0, - "No counters provided the source_hash for function: {:?}", + self.source_hash != 0 || !self.is_used, + "No counters provided the source_hash for used function: {:?}", self.instance ); @@ -135,9 +163,7 @@ impl<'tcx> FunctionCoverage<'tcx> { self.counters.iter_enumerated().filter_map(|(index, entry)| { // Option::map() will return None to filter out missing counters. This may happen // if, for example, a MIR-instrumented counter is removed during an optimization. - entry.as_ref().map(|region| { - (Counter::counter_value_reference(index as CounterValueReference), region) - }) + entry.as_ref().map(|region| (Counter::counter_value_reference(index), region)) }) } @@ -178,9 +204,15 @@ impl<'tcx> FunctionCoverage<'tcx> { if id == ExpressionOperandId::ZERO { Some(Counter::zero()) } else if id.index() < self.counters.len() { + debug_assert!( + id.index() > 0, + "ExpressionOperandId indexes for counters are 1-based, but this id={}", + id.index() + ); // Note: Some codegen-injected Counters may be only referenced by `Expression`s, // and may not have their own `CodeRegion`s, let index = CounterValueReference::from(id.index()); + // Note, the conversion to LLVM `Counter` adjusts the index to be zero-based. Some(Counter::counter_value_reference(index)) } else { let index = self.expression_index(u32::from(id)); @@ -205,19 +237,60 @@ impl<'tcx> FunctionCoverage<'tcx> { let optional_region = &expression.region; let Expression { lhs, op, rhs, .. } = *expression; - if let Some(Some((lhs_counter, rhs_counter))) = - id_to_counter(&new_indexes, lhs).map(|lhs_counter| { + if let Some(Some((lhs_counter, mut rhs_counter))) = id_to_counter(&new_indexes, lhs) + .map(|lhs_counter| { id_to_counter(&new_indexes, rhs).map(|rhs_counter| (lhs_counter, rhs_counter)) }) { + if lhs_counter.is_zero() && op.is_subtract() { + // The left side of a subtraction was probably optimized out. As an example, + // a branch condition might be evaluated as a constant expression, and the + // branch could be removed, dropping unused counters in the process. + // + // Since counters are unsigned, we must assume the result of the expression + // can be no more and no less than zero. An expression known to evaluate to zero + // does not need to be added to the coverage map. + // + // Coverage test `loops_branches.rs` includes multiple variations of branches + // based on constant conditional (literal `true` or `false`), and demonstrates + // that the expected counts are still correct. + debug!( + "Expression subtracts from zero (assume unreachable): \ + original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}", + original_index, lhs, op, rhs, optional_region, + ); + rhs_counter = Counter::zero(); + } debug_assert!( - (lhs_counter.id as usize) - < usize::max(self.counters.len(), self.expressions.len()) + lhs_counter.is_zero() + // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16` + || ((lhs_counter.zero_based_id() as usize) + <= usize::max(self.counters.len(), self.expressions.len())), + "lhs id={} > both counters.len()={} and expressions.len()={} + ({:?} {:?} {:?})", + lhs_counter.zero_based_id(), + self.counters.len(), + self.expressions.len(), + lhs_counter, + op, + rhs_counter, ); + debug_assert!( - (rhs_counter.id as usize) - < usize::max(self.counters.len(), self.expressions.len()) + rhs_counter.is_zero() + // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16` + || ((rhs_counter.zero_based_id() as usize) + <= usize::max(self.counters.len(), self.expressions.len())), + "rhs id={} > both counters.len()={} and expressions.len()={} + ({:?} {:?} {:?})", + rhs_counter.zero_based_id(), + self.counters.len(), + self.expressions.len(), + lhs_counter, + op, + rhs_counter, ); + // Both operands exist. `Expression` operands exist in `self.expressions` and have // been assigned a `new_index`. let mapped_expression_index = @@ -240,11 +313,15 @@ impl<'tcx> FunctionCoverage<'tcx> { expression_regions.push((Counter::expression(mapped_expression_index), region)); } } else { - debug!( - "Ignoring expression with one or more missing operands: \ - original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}", - original_index, lhs, op, rhs, optional_region, - ) + bug!( + "expression has one or more missing operands \ + original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}", + original_index, + lhs, + op, + rhs, + optional_region, + ); } } (counter_expressions, expression_regions.into_iter()) diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs index 0307117e1c8..f0f45b067b3 100644 --- a/compiler/rustc_codegen_ssa/src/lib.rs +++ b/compiler/rustc_codegen_ssa/src/lib.rs @@ -1,14 +1,16 @@ #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] +#![feature(assert_matches)] #![feature(bool_to_option)] -#![feature(option_expect_none)] #![feature(box_patterns)] #![feature(drain_filter)] #![feature(try_blocks)] #![feature(in_band_lifetimes)] #![feature(nll)] -#![feature(or_patterns)] +#![cfg_attr(bootstrap, feature(or_patterns))] #![feature(associated_type_bounds)] +#![feature(iter_zip)] #![recursion_limit = "256"] +#![feature(box_syntax)] //! This crate contains codegen code that is used by all codegen backends (LLVM and others). //! The backend-agnostic functions of this crate use functions defined in various traits that @@ -137,7 +139,6 @@ pub struct CrateInfo { pub native_libraries: FxHashMap<CrateNum, Vec<NativeLib>>, pub crate_name: FxHashMap<CrateNum, String>, pub used_libraries: Vec<NativeLib>, - pub link_args: Lrc<Vec<String>>, pub used_crate_source: FxHashMap<CrateNum, Lrc<CrateSource>>, pub used_crates_static: Vec<(CrateNum, LibSource)>, pub used_crates_dynamic: Vec<(CrateNum, LibSource)>, diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs index 289629d9215..38e928145a8 100644 --- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs +++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs @@ -231,7 +231,7 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) { let check = match terminator.kind { mir::TerminatorKind::Call { func: mir::Operand::Constant(ref c), ref args, .. } => { - match *c.literal.ty.kind() { + match *c.ty().kind() { ty::FnDef(did, _) => Some((did, args)), _ => None, } @@ -281,7 +281,18 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> Some(assignment_location) => { assignment_location.dominates(location, &self.dominators) } - None => false, + None => { + debug!("No first assignment found for {:?}", local); + // We have not seen any assignment to the local yet, + // but before marking not_ssa, check if it is a ZST, + // in which case we don't need to initialize the local. + let ty = self.fx.mir.local_decls[local].ty; + let ty = self.fx.monomorphize(ty); + + let is_zst = self.fx.cx.layout_of(ty).is_zst(); + debug!("is_zst: {}", is_zst); + is_zst + } }; if !ssa_read { self.not_ssa(local); diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 9ce90669800..fd3f89a2aee 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -146,24 +146,6 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { } } } - - // Generate sideeffect intrinsic if jumping to any of the targets can form - // a loop. - fn maybe_sideeffect<Bx: BuilderMethods<'a, 'tcx>>( - &self, - mir: &'tcx mir::Body<'tcx>, - bx: &mut Bx, - targets: &[mir::BasicBlock], - ) { - if bx.tcx().sess.opts.debugging_opts.insert_sideeffect { - if targets.iter().any(|&target| { - target <= self.bb - && target.start_location().is_predecessor_of(self.bb.start_location(), mir) - }) { - bx.sideeffect(false); - } - } - } } /// Codegen implementations for some terminator variants. @@ -198,8 +180,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let discr = self.codegen_operand(&mut bx, &discr); // `switch_ty` is redundant, sanity-check that. assert_eq!(discr.layout.ty, switch_ty); - helper.maybe_sideeffect(self.mir, &mut bx, targets.all_targets()); - let mut target_iter = targets.iter(); if target_iter.len() == 1 { // If there are two targets (one conditional, one fallback), emit br instead of switch @@ -308,7 +288,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything. - helper.maybe_sideeffect(self.mir, &mut bx, &[target]); helper.funclet_br(self, &mut bx, target); return; } @@ -337,7 +316,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } _ => (bx.get_fn_addr(drop_fn), FnAbi::of_instance(&bx, drop_fn, &[])), }; - helper.maybe_sideeffect(self.mir, &mut bx, &[target]); helper.do_call( self, &mut bx, @@ -379,7 +357,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Don't codegen the panic block if success if known. if const_cond == Some(expected) { - helper.maybe_sideeffect(self.mir, &mut bx, &[target]); helper.funclet_br(self, &mut bx, target); return; } @@ -390,7 +367,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Create the failure block and the conditional branch to it. let lltarget = helper.llblock(self, target); let panic_block = self.new_block("panic"); - helper.maybe_sideeffect(self.mir, &mut bx, &[target]); if expected { bx.cond_br(cond, lltarget, panic_block.llbb()); } else { @@ -491,9 +467,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let fn_abi = FnAbi::of_instance(bx, instance, &[]); let llfn = bx.get_fn_addr(instance); - if let Some((_, target)) = destination.as_ref() { - helper.maybe_sideeffect(self.mir, bx, &[*target]); - } // Codegen the actual panic invoke/call. helper.do_call( self, @@ -507,7 +480,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } else { // a NOP let target = destination.as_ref().unwrap().1; - helper.maybe_sideeffect(self.mir, bx, &[target]); helper.funclet_br(self, bx, target) } true @@ -551,7 +523,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if let Some(ty::InstanceDef::DropGlue(_, None)) = def { // Empty drop glue; a no-op. let &(_, target) = destination.as_ref().unwrap(); - helper.maybe_sideeffect(self.mir, &mut bx, &[target]); helper.funclet_br(self, &mut bx, target); return; } @@ -586,7 +557,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if let Some(destination_ref) = destination.as_ref() { let &(dest, target) = destination_ref; self.codegen_transmute(&mut bx, &args[0], dest); - helper.maybe_sideeffect(self.mir, &mut bx, &[target]); helper.funclet_br(self, &mut bx, target); } else { // If we are trying to transmute to an uninhabited type, @@ -634,74 +604,73 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { location.val.store(&mut bx, tmp); } self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate()); - - helper.maybe_sideeffect(self.mir, &mut bx, &[*target]); helper.funclet_br(self, &mut bx, *target); } return; } - if intrinsic.is_some() && intrinsic != Some(sym::drop_in_place) { - let intrinsic = intrinsic.unwrap(); - let dest = match ret_dest { - _ if fn_abi.ret.is_indirect() => llargs[0], - ReturnDest::Nothing => { - bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret))) - } - ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, - ReturnDest::DirectOperand(_) => { - bug!("Cannot use direct operand with an intrinsic call") - } - }; + match intrinsic { + None | Some(sym::drop_in_place) => {} + Some(sym::copy_nonoverlapping) => unreachable!(), + Some(intrinsic) => { + let dest = match ret_dest { + _ if fn_abi.ret.is_indirect() => llargs[0], + ReturnDest::Nothing => { + bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret))) + } + ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, + ReturnDest::DirectOperand(_) => { + bug!("Cannot use direct operand with an intrinsic call") + } + }; - let args: Vec<_> = args - .iter() - .enumerate() - .map(|(i, arg)| { - // The indices passed to simd_shuffle* in the - // third argument must be constant. This is - // checked by const-qualification, which also - // promotes any complex rvalues to constants. - if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") { - if let mir::Operand::Constant(constant) = arg { - let c = self.eval_mir_constant(constant); - let (llval, ty) = self.simd_shuffle_indices( - &bx, - constant.span, - constant.literal.ty, - c, - ); - return OperandRef { val: Immediate(llval), layout: bx.layout_of(ty) }; - } else { - span_bug!(span, "shuffle indices must be constant"); + let args: Vec<_> = args + .iter() + .enumerate() + .map(|(i, arg)| { + // The indices passed to simd_shuffle* in the + // third argument must be constant. This is + // checked by const-qualification, which also + // promotes any complex rvalues to constants. + if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") { + if let mir::Operand::Constant(constant) = arg { + let c = self.eval_mir_constant(constant); + let (llval, ty) = + self.simd_shuffle_indices(&bx, constant.span, constant.ty(), c); + return OperandRef { + val: Immediate(llval), + layout: bx.layout_of(ty), + }; + } else { + span_bug!(span, "shuffle indices must be constant"); + } } - } - self.codegen_operand(&mut bx, arg) - }) - .collect(); + self.codegen_operand(&mut bx, arg) + }) + .collect(); + + Self::codegen_intrinsic_call( + &mut bx, + *instance.as_ref().unwrap(), + &fn_abi, + &args, + dest, + span, + ); - Self::codegen_intrinsic_call( - &mut bx, - *instance.as_ref().unwrap(), - &fn_abi, - &args, - dest, - span, - ); + if let ReturnDest::IndirectOperand(dst, _) = ret_dest { + self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval); + } - if let ReturnDest::IndirectOperand(dst, _) = ret_dest { - self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval); - } + if let Some((_, target)) = *destination { + helper.funclet_br(self, &mut bx, target); + } else { + bx.unreachable(); + } - if let Some((_, target)) = *destination { - helper.maybe_sideeffect(self.mir, &mut bx, &[target]); - helper.funclet_br(self, &mut bx, target); - } else { - bx.unreachable(); + return; } - - return; } // Split the rust-call tupled arguments off. @@ -811,9 +780,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { _ => span_bug!(span, "no llfn for call"), }; - if let Some((_, target)) = destination.as_ref() { - helper.maybe_sideeffect(self.mir, &mut bx, &[*target]); - } helper.do_call( self, &mut bx, @@ -856,45 +822,41 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { InlineAsmOperandRef::InOut { reg, late, in_value, out_place } } mir::InlineAsmOperand::Const { ref value } => { - if let mir::Operand::Constant(constant) = value { - let const_value = self - .eval_mir_constant(constant) - .unwrap_or_else(|_| span_bug!(span, "asm const cannot be resolved")); - let ty = constant.literal.ty; - let size = bx.layout_of(ty).size; - let scalar = match const_value { - ConstValue::Scalar(s) => s, - _ => span_bug!( - span, - "expected Scalar for promoted asm const, but got {:#?}", - const_value - ), - }; - let value = scalar.assert_bits(size); - let string = match ty.kind() { - ty::Uint(_) => value.to_string(), - ty::Int(int_ty) => { - match int_ty.normalize(bx.tcx().sess.target.pointer_width) { - ty::IntTy::I8 => (value as i8).to_string(), - ty::IntTy::I16 => (value as i16).to_string(), - ty::IntTy::I32 => (value as i32).to_string(), - ty::IntTy::I64 => (value as i64).to_string(), - ty::IntTy::I128 => (value as i128).to_string(), - ty::IntTy::Isize => unreachable!(), - } + let const_value = self + .eval_mir_constant(value) + .unwrap_or_else(|_| span_bug!(span, "asm const cannot be resolved")); + let ty = value.ty(); + let size = bx.layout_of(ty).size; + let scalar = match const_value { + ConstValue::Scalar(s) => s, + _ => span_bug!( + span, + "expected Scalar for promoted asm const, but got {:#?}", + const_value + ), + }; + let value = scalar.assert_bits(size); + let string = match ty.kind() { + ty::Uint(_) => value.to_string(), + ty::Int(int_ty) => { + match int_ty.normalize(bx.tcx().sess.target.pointer_width) { + ty::IntTy::I8 => (value as i8).to_string(), + ty::IntTy::I16 => (value as i16).to_string(), + ty::IntTy::I32 => (value as i32).to_string(), + ty::IntTy::I64 => (value as i64).to_string(), + ty::IntTy::I128 => (value as i128).to_string(), + ty::IntTy::Isize => unreachable!(), } - ty::Float(ty::FloatTy::F32) => f32::from_bits(value as u32).to_string(), - ty::Float(ty::FloatTy::F64) => f64::from_bits(value as u64).to_string(), - _ => span_bug!(span, "asm const has bad type {}", ty), - }; - InlineAsmOperandRef::Const { string } - } else { - span_bug!(span, "asm const is not a constant"); - } + } + ty::Float(ty::FloatTy::F32) => f32::from_bits(value as u32).to_string(), + ty::Float(ty::FloatTy::F64) => f64::from_bits(value as u64).to_string(), + _ => span_bug!(span, "asm const has bad type {}", ty), + }; + InlineAsmOperandRef::Const { string } } mir::InlineAsmOperand::SymFn { ref value } => { let literal = self.monomorphize(value.literal); - if let ty::FnDef(def_id, substs) = *literal.ty.kind() { + if let ty::FnDef(def_id, substs) = *literal.ty().kind() { let instance = ty::Instance::resolve_for_fn_ptr( bx.tcx(), ty::ParamEnv::reveal_all(), @@ -963,22 +925,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::TerminatorKind::Goto { target } => { if bb == target { - // This is an unconditional branch back to this same basic - // block. That means we have something like a `loop {}` - // statement. Currently LLVM miscompiles this because it - // assumes forward progress. We want to prevent this in all - // cases, but that has a fairly high cost to compile times - // currently. Instead, try to handle this specific case - // which comes up commonly in practice (e.g., in embedded - // code). + // This is an unconditional branch back to this same basic block. That means we + // have something like a `loop {}` statement. LLVM versions before 12.0 + // miscompile this because they assume forward progress. For older versions + // try to handle just this specific case which comes up commonly in practice + // (e.g., in embedded code). // - // The `true` here means we insert side effects regardless - // of -Zinsert-sideeffect being passed on unconditional - // branching to the same basic block. - bx.sideeffect(true); - } else { - helper.maybe_sideeffect(self.mir, &mut bx, &[target]); + // NB: the `sideeffect` currently checks for the LLVM version used internally. + bx.sideeffect(); } + helper.funclet_br(self, &mut bx, target); } diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs index b79a221a0e7..fa8a53e60b1 100644 --- a/compiler/rustc_codegen_ssa/src/mir/constant.rs +++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs @@ -16,7 +16,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { constant: &mir::Constant<'tcx>, ) -> Result<OperandRef<'tcx, Bx::Value>, ErrorHandled> { let val = self.eval_mir_constant(constant)?; - let ty = self.monomorphize(constant.literal.ty); + let ty = self.monomorphize(constant.ty()); Ok(OperandRef::from_const(bx, val, ty)) } @@ -24,11 +24,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { &self, constant: &mir::Constant<'tcx>, ) -> Result<ConstValue<'tcx>, ErrorHandled> { - match self.monomorphize(constant.literal).val { - ty::ConstKind::Unevaluated(def, substs, promoted) => self + let ct = self.monomorphize(constant.literal); + let ct = match ct { + mir::ConstantKind::Ty(ct) => ct, + mir::ConstantKind::Val(val, _) => return Ok(val), + }; + match ct.val { + ty::ConstKind::Unevaluated(ct) => self .cx .tcx() - .const_eval_resolve(ty::ParamEnv::reveal_all(), def, substs, promoted, None) + .const_eval_resolve(ty::ParamEnv::reveal_all(), ct, None) .map_err(|err| { self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered"); err diff --git a/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs index a115d358666..621ec0519c9 100644 --- a/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs @@ -2,30 +2,41 @@ use crate::traits::*; use rustc_middle::mir::coverage::*; use rustc_middle::mir::Coverage; +use rustc_middle::mir::SourceScope; use super::FunctionCx; impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { - pub fn codegen_coverage(&self, bx: &mut Bx, coverage: Coverage) { + pub fn codegen_coverage(&self, bx: &mut Bx, coverage: Coverage, scope: SourceScope) { + // Determine the instance that coverage data was originally generated for. + let scope_data = &self.mir.source_scopes[scope]; + let instance = if let Some((inlined_instance, _)) = scope_data.inlined { + self.monomorphize(inlined_instance) + } else if let Some(inlined_scope) = scope_data.inlined_parent_scope { + self.monomorphize(self.mir.source_scopes[inlined_scope].inlined.unwrap().0) + } else { + self.instance + }; + let Coverage { kind, code_region } = coverage; match kind { CoverageKind::Counter { function_source_hash, id } => { - if bx.set_function_source_hash(self.instance, function_source_hash) { + if bx.set_function_source_hash(instance, function_source_hash) { // If `set_function_source_hash()` returned true, the coverage map is enabled, // so continue adding the counter. if let Some(code_region) = code_region { // Note: Some counters do not have code regions, but may still be referenced // from expressions. In that case, don't add the counter to the coverage map, // but do inject the counter intrinsic. - bx.add_coverage_counter(self.instance, id, code_region); + bx.add_coverage_counter(instance, id, code_region); } - let coverageinfo = bx.tcx().coverageinfo(self.instance.def_id()); + let coverageinfo = bx.tcx().coverageinfo(instance.def_id()); - let fn_name = bx.create_pgo_func_name_var(self.instance); + let fn_name = bx.get_pgo_func_name_var(instance); let hash = bx.const_u64(function_source_hash); let num_counters = bx.const_u32(coverageinfo.num_counters); - let index = bx.const_u32(u32::from(id)); + let index = bx.const_u32(id.zero_based_index()); debug!( "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})", fn_name, hash, num_counters, index, @@ -34,11 +45,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } CoverageKind::Expression { id, lhs, op, rhs } => { - bx.add_coverage_counter_expression(self.instance, id, lhs, op, rhs, code_region); + bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region); } CoverageKind::Unreachable => { bx.add_coverage_unreachable( - self.instance, + instance, code_region.expect("unreachable regions always have code regions"), ); } diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index ea59e183118..6bb20545f07 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -6,7 +6,7 @@ use rustc_middle::ty; use rustc_session::config::DebugInfo; use rustc_span::symbol::{kw, Symbol}; use rustc_span::{BytePos, Span}; -use rustc_target::abi::{LayoutOf, Size}; +use rustc_target::abi::Size; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; @@ -265,33 +265,25 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { None => continue, }; - let mut layout = base.layout; let mut direct_offset = Size::ZERO; // FIXME(eddyb) use smallvec here. let mut indirect_offsets = vec![]; + let mut place = base; for elem in &var.projection[..] { match *elem { mir::ProjectionElem::Deref => { indirect_offsets.push(Size::ZERO); - layout = bx.cx().layout_of( - layout - .ty - .builtin_deref(true) - .unwrap_or_else(|| { - span_bug!(var.source_info.span, "cannot deref `{}`", layout.ty) - }) - .ty, - ); + place = place.project_deref(bx); } mir::ProjectionElem::Field(field, _) => { let i = field.index(); let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset); - *offset += layout.fields.offset(i); - layout = layout.field(bx.cx(), i); + *offset += place.layout.fields.offset(i); + place = place.project_field(bx, i); } mir::ProjectionElem::Downcast(_, variant) => { - layout = layout.for_variant(bx.cx(), variant); + place = place.project_downcast(bx, variant); } _ => span_bug!( var.source_info.span, @@ -301,7 +293,39 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } - bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, direct_offset, &indirect_offsets); + // When targeting MSVC, create extra allocas for arguments instead of pointing multiple + // dbg_var_addr() calls into the same alloca with offsets. MSVC uses CodeView records + // not DWARF and LLVM doesn't support translating the resulting + // [DW_OP_deref, DW_OP_plus_uconst, offset, DW_OP_deref] debug info to CodeView. + // Creating extra allocas on the stack makes the resulting debug info simple enough + // that LLVM can generate correct CodeView records and thus the values appear in the + // debugger. (#83709) + let should_create_individual_allocas = bx.cx().sess().target.is_like_msvc + && self.mir.local_kind(local) == mir::LocalKind::Arg + // LLVM can handle simple things but anything more complex than just a direct + // offset or one indirect offset of 0 is too complex for it to generate CV records + // correctly. + && (direct_offset != Size::ZERO + || !matches!(&indirect_offsets[..], [Size::ZERO] | [])); + + if should_create_individual_allocas { + // Create a variable which will be a pointer to the actual value + let ptr_ty = bx.tcx().mk_ty(ty::RawPtr(ty::TypeAndMut { + mutbl: mir::Mutability::Mut, + ty: place.layout.ty, + })); + let ptr_layout = bx.layout_of(ptr_ty); + let alloca = PlaceRef::alloca(bx, ptr_layout); + bx.set_var_name(alloca.llval, &(var.name.to_string() + ".dbg.spill")); + + // Write the pointer to the variable + bx.store(place.llval, alloca.llval, alloca.align); + + // Point the debug info to `*alloca` for the current variable + bx.dbg_var_addr(dbg_var, dbg_loc, alloca.llval, Size::ZERO, &[Size::ZERO]); + } else { + bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, direct_offset, &indirect_offsets); + } } } @@ -372,7 +396,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { (var_ty, var_kind) } mir::VarDebugInfoContents::Const(c) => { - let ty = self.monomorphize(c.literal.ty); + let ty = self.monomorphize(c.ty()); (ty, VariableKind::LocalVariable) } }; diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 80e3ed75b85..8502309b90e 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -125,19 +125,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let offset = args[1].immediate(); bx.gep(ptr, &[offset]) } - - sym::copy_nonoverlapping => { - copy_intrinsic( - bx, - false, - false, - substs.type_at(0), - args[1].immediate(), - args[0].immediate(), - args[2].immediate(), - ); - return; - } sym::copy => { copy_intrinsic( bx, diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index d31ececf130..91df67b53d2 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -149,8 +149,6 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( bx.set_personality_fn(cx.eh_personality()); } - bx.sideeffect(false); - let cleanup_kinds = analyze::cleanup_kinds(&mir); // Allocate a `Block` for every basic block, except // the start block, if nothing loops back to it. @@ -284,9 +282,7 @@ fn create_funclets<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>, IndexVec<mir::BasicBlock, Option<Bx::Funclet>>, ) { - block_bxs - .iter_enumerated() - .zip(cleanup_kinds) + iter::zip(block_bxs.iter_enumerated(), cleanup_kinds) .map(|((bb, &llbb), cleanup_kind)| { match *cleanup_kind { CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {} diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 66d9d1a1e0c..a9e7ebf6d43 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -402,6 +402,18 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { downcast } + pub fn project_deref<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) -> Self { + let target_ty = self.layout.ty.builtin_deref(true).expect("failed to deref"); + let layout = bx.layout_of(target_ty.ty); + + PlaceRef { + llval: bx.load(self.llval, self.align), + llextra: None, + layout, + align: layout.align.abi, + } + } + pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) { bx.lifetime_start(self.llval, self.layout.size); } diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index e3a6cabd600..9917c23f121 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -11,7 +11,7 @@ use rustc_apfloat::{ieee, Float, Round, Status}; use rustc_hir::lang_items::LangItem; use rustc_middle::mir; use rustc_middle::ty::cast::{CastTy, IntTy}; -use rustc_middle::ty::layout::{HasTyCtxt, TyAndLayout}; +use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt}; use rustc_span::source_map::{Span, DUMMY_SP}; use rustc_span::symbol::sym; @@ -325,7 +325,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let er = scalar.valid_range_exclusive(bx.cx()); if er.end != er.start - && scalar.valid_range.end() > scalar.valid_range.start() + && scalar.valid_range.end() >= scalar.valid_range.start() { // We want `table[e as usize ± k]` to not // have bound checks, and this is the most @@ -385,10 +385,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.inttoptr(usize_llval, ll_t_out) } (CastTy::Float, CastTy::Int(IntTy::I)) => { - cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out, cast) + cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out) } (CastTy::Float, CastTy::Int(_)) => { - cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out, cast) + cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out) } _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty), }; @@ -424,7 +424,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { (bx, operand) } - mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { + mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => { let lhs = self.codegen_operand(&mut bx, lhs); let rhs = self.codegen_operand(&mut bx, rhs); let llresult = match (lhs.val, rhs.val) { @@ -453,7 +453,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; (bx, operand) } - mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => { + mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => { let lhs = self.codegen_operand(&mut bx, lhs); let rhs = self.codegen_operand(&mut bx, rhs); let result = self.codegen_scalar_checked_binop( @@ -790,7 +790,6 @@ fn cast_float_to_int<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( x: Bx::Value, float_ty: Bx::Type, int_ty: Bx::Type, - int_layout: TyAndLayout<'tcx>, ) -> Bx::Value { if let Some(false) = bx.cx().sess().opts.debugging_opts.saturating_float_casts { return if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) }; @@ -891,134 +890,39 @@ fn cast_float_to_int<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128); let zero = bx.cx().const_uint(int_ty, 0); - // The codegen here differs quite a bit depending on whether our builder's - // `fptosi` and `fptoui` instructions may trap for out-of-bounds values. If - // they don't trap then we can start doing everything inline with a - // `select` instruction because it's ok to execute `fptosi` and `fptoui` - // even if we don't use the results. - if !bx.fptosui_may_trap(x, int_ty) { - // Step 1 ... - let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) }; - let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min); - let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max); - - // Step 2: We use two comparisons and two selects, with %s1 being the - // result: - // %less_or_nan = fcmp ult %x, %f_min - // %greater = fcmp olt %x, %f_max - // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result - // %s1 = select %greater, int_ty::MAX, %s0 - // Note that %less_or_nan uses an *unordered* comparison. This - // comparison is true if the operands are not comparable (i.e., if x is - // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if - // x is NaN. - // - // Performance note: Unordered comparison can be lowered to a "flipped" - // comparison and a negation, and the negation can be merged into the - // select. Therefore, it not necessarily any more expensive than a - // ordered ("normal") comparison. Whether these optimizations will be - // performed is ultimately up to the backend, but at least x86 does - // perform them. - let s0 = bx.select(less_or_nan, int_min, fptosui_result); - let s1 = bx.select(greater, int_max, s0); - - // Step 3: NaN replacement. - // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN. - // Therefore we only need to execute this step for signed integer types. - if signed { - // LLVM has no isNaN predicate, so we use (x == x) instead - let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x); - bx.select(cmp, s1, zero) - } else { - s1 - } + // Step 1 ... + let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) }; + let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min); + let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max); + + // Step 2: We use two comparisons and two selects, with %s1 being the + // result: + // %less_or_nan = fcmp ult %x, %f_min + // %greater = fcmp olt %x, %f_max + // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result + // %s1 = select %greater, int_ty::MAX, %s0 + // Note that %less_or_nan uses an *unordered* comparison. This + // comparison is true if the operands are not comparable (i.e., if x is + // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if + // x is NaN. + // + // Performance note: Unordered comparison can be lowered to a "flipped" + // comparison and a negation, and the negation can be merged into the + // select. Therefore, it not necessarily any more expensive than a + // ordered ("normal") comparison. Whether these optimizations will be + // performed is ultimately up to the backend, but at least x86 does + // perform them. + let s0 = bx.select(less_or_nan, int_min, fptosui_result); + let s1 = bx.select(greater, int_max, s0); + + // Step 3: NaN replacement. + // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN. + // Therefore we only need to execute this step for signed integer types. + if signed { + // LLVM has no isNaN predicate, so we use (x == x) instead + let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x); + bx.select(cmp, s1, zero) } else { - // In this case we cannot execute `fptosi` or `fptoui` and then later - // discard the result. The builder is telling us that these instructions - // will trap on out-of-bounds values, so we need to use basic blocks and - // control flow to avoid executing the `fptosi` and `fptoui` - // instructions. - // - // The general idea of what we're constructing here is, for f64 -> i32: - // - // ;; block so far... %0 is the argument - // %result = alloca i32, align 4 - // %inbound_lower = fcmp oge double %0, 0xC1E0000000000000 - // %inbound_upper = fcmp ole double %0, 0x41DFFFFFFFC00000 - // ;; match (inbound_lower, inbound_upper) { - // ;; (true, true) => %0 can be converted without trapping - // ;; (false, false) => %0 is a NaN - // ;; (true, false) => %0 is too large - // ;; (false, true) => %0 is too small - // ;; } - // ;; - // ;; The (true, true) check, go to %convert if so. - // %inbounds = and i1 %inbound_lower, %inbound_upper - // br i1 %inbounds, label %convert, label %specialcase - // - // convert: - // %cvt = call i32 @llvm.wasm.trunc.signed.i32.f64(double %0) - // store i32 %cvt, i32* %result, align 4 - // br label %done - // - // specialcase: - // ;; Handle the cases where the number is NaN, too large or too small - // - // ;; Either (true, false) or (false, true) - // %is_not_nan = or i1 %inbound_lower, %inbound_upper - // ;; Figure out which saturated value we are interested in if not `NaN` - // %saturated = select i1 %inbound_lower, i32 2147483647, i32 -2147483648 - // ;; Figure out between saturated and NaN representations - // %result_nan = select i1 %is_not_nan, i32 %saturated, i32 0 - // store i32 %result_nan, i32* %result, align 4 - // br label %done - // - // done: - // %r = load i32, i32* %result, align 4 - // ;; ... - let done = bx.build_sibling_block("float_cast_done"); - let mut convert = bx.build_sibling_block("float_cast_convert"); - let mut specialcase = bx.build_sibling_block("float_cast_specialcase"); - - let result = PlaceRef::alloca(bx, int_layout); - result.storage_live(bx); - - // Use control flow to figure out whether we can execute `fptosi` in a - // basic block, or whether we go to a different basic block to implement - // the saturating logic. - let inbound_lower = bx.fcmp(RealPredicate::RealOGE, x, f_min); - let inbound_upper = bx.fcmp(RealPredicate::RealOLE, x, f_max); - let inbounds = bx.and(inbound_lower, inbound_upper); - bx.cond_br(inbounds, convert.llbb(), specialcase.llbb()); - - // Translation of the `convert` basic block - let cvt = if signed { convert.fptosi(x, int_ty) } else { convert.fptoui(x, int_ty) }; - convert.store(cvt, result.llval, result.align); - convert.br(done.llbb()); - - // Translation of the `specialcase` basic block. Note that like above - // we try to be a bit clever here for unsigned conversions. In those - // cases the `int_min` is zero so we don't need two select instructions, - // just one to choose whether we need `int_max` or not. If - // `inbound_lower` is true then we're guaranteed to not be `NaN` and - // since we're greater than zero we must be saturating to `int_max`. If - // `inbound_lower` is false then we're either NaN or less than zero, so - // we saturate to zero. - let result_nan = if signed { - let is_not_nan = specialcase.or(inbound_lower, inbound_upper); - let saturated = specialcase.select(inbound_lower, int_max, int_min); - specialcase.select(is_not_nan, saturated, zero) - } else { - specialcase.select(inbound_lower, int_max, int_min) - }; - specialcase.store(result_nan, result.llval, result.align); - specialcase.br(done.llbb()); - - // Translation of the `done` basic block, positioning ourselves to - // continue from that point as well. - *bx = done; - let ret = bx.load(result.llval, result.align); - result.storage_dead(bx); - ret + s1 } } diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs index 6f74ba77d4c..fe7f6288adb 100644 --- a/compiler/rustc_codegen_ssa/src/mir/statement.rs +++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs @@ -112,7 +112,27 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx } mir::StatementKind::Coverage(box ref coverage) => { - self.codegen_coverage(&mut bx, coverage.clone()); + self.codegen_coverage(&mut bx, coverage.clone(), statement.source_info.scope); + bx + } + mir::StatementKind::CopyNonOverlapping(box mir::CopyNonOverlapping { + ref src, + ref dst, + ref count, + }) => { + let dst_val = self.codegen_operand(&mut bx, dst); + let src_val = self.codegen_operand(&mut bx, src); + let count = self.codegen_operand(&mut bx, count).immediate(); + let pointee_layout = dst_val + .layout + .pointee_info_at(&mut bx, rustc_target::abi::Size::ZERO) + .expect("Expected pointer"); + let bytes = bx.mul(count, bx.const_usize(pointee_layout.size.bytes())); + + let align = pointee_layout.align; + let dst = dst_val.immediate(); + let src = src_val.immediate(); + bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty()); bx } mir::StatementKind::FakeRead(..) diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs index fd18f42f2dd..4e987908b4e 100644 --- a/compiler/rustc_codegen_ssa/src/target_features.rs +++ b/compiler/rustc_codegen_ssa/src/target_features.rs @@ -26,6 +26,7 @@ const ARM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[ ("vfp2", Some(sym::arm_target_feature)), ("vfp3", Some(sym::arm_target_feature)), ("vfp4", Some(sym::arm_target_feature)), + ("fp-armv8", Some(sym::arm_target_feature)), // This is needed for inline assembly, but shouldn't be stabilized as-is // since it should be enabled per-function using #[instruction_set], not // #[target_feature]. @@ -160,7 +161,7 @@ pub fn supported_target_features(sess: &Session) -> &'static [(&'static str, Opt "mips" | "mips64" => MIPS_ALLOWED_FEATURES, "powerpc" | "powerpc64" => POWERPC_ALLOWED_FEATURES, "riscv32" | "riscv64" => RISCV_ALLOWED_FEATURES, - "wasm32" => WASM_ALLOWED_FEATURES, + "wasm32" | "wasm64" => WASM_ALLOWED_FEATURES, _ => &[], } } diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index d5bd2780388..1bc05f30e5c 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -171,7 +171,6 @@ pub trait BuilderMethods<'a, 'tcx>: fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn fptoui_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>; fn fptosi_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>; - fn fptosui_may_trap(&self, val: Self::Value, dest_ty: Self::Type) -> bool; fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; diff --git a/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs index 95bddfb4b41..cbf570dba4c 100644 --- a/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs +++ b/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs @@ -1,14 +1,26 @@ use super::BackendTypes; +use rustc_hir::def_id::DefId; use rustc_middle::mir::coverage::*; use rustc_middle::ty::Instance; -pub trait CoverageInfoMethods: BackendTypes { +pub trait CoverageInfoMethods<'tcx>: BackendTypes { fn coverageinfo_finalize(&self); + + /// Codegen a small function that will never be called, with one counter + /// that will never be incremented, that gives LLVM coverage tools a + /// function definition it needs in order to resolve coverage map references + /// to unused functions. This is necessary so unused functions will appear + /// as uncovered (coverage execution count `0`) in LLVM coverage reports. + fn define_unused_fn(&self, def_id: DefId); + + /// For LLVM codegen, returns a function-specific `Value` for a global + /// string, to hold the function name passed to LLVM intrinsic + /// `instrprof.increment()`. The `Value` is only created once per instance. + /// Multiple invocations with the same instance return the same `Value`. + fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> Self::Value; } pub trait CoverageInfoBuilderMethods<'tcx>: BackendTypes { - fn create_pgo_func_name_var(&self, instance: Instance<'tcx>) -> Self::Value; - /// Returns true if the function source hash was added to the coverage map (even if it had /// already been added, for this instance). Returns false *only* if `-Z instrument-coverage` is /// not enabled (a coverage map is not being generated). diff --git a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs index ac3c99f9c90..777436ad2ae 100644 --- a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs @@ -20,9 +20,10 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes { fn abort(&mut self); fn assume(&mut self, val: Self::Value); fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value; - /// Normally, sideeffect is only emitted if -Zinsert-sideeffect is passed; - /// in some cases though we want to emit it regardless. - fn sideeffect(&mut self, unconditional: bool); + /// Emits a forced side effect. + /// + /// Currently has any effect only when LLVM versions prior to 12.0 are used as the backend. + fn sideeffect(&mut self); /// Trait method used to inject `va_start` on the "spoofed" `VaListImpl` in /// Rust defined C-variadic functions. fn va_start(&mut self, val: Self::Value) -> Self::Value; diff --git a/compiler/rustc_codegen_ssa/src/traits/mod.rs b/compiler/rustc_codegen_ssa/src/traits/mod.rs index 8ada6c10479..be2e0ea230f 100644 --- a/compiler/rustc_codegen_ssa/src/traits/mod.rs +++ b/compiler/rustc_codegen_ssa/src/traits/mod.rs @@ -58,7 +58,7 @@ pub trait CodegenMethods<'tcx>: + MiscMethods<'tcx> + ConstMethods<'tcx> + StaticMethods - + CoverageInfoMethods + + CoverageInfoMethods<'tcx> + DebugInfoMethods<'tcx> + AsmMethods + PreDefineMethods<'tcx> @@ -74,7 +74,7 @@ impl<'tcx, T> CodegenMethods<'tcx> for T where + MiscMethods<'tcx> + ConstMethods<'tcx> + StaticMethods - + CoverageInfoMethods + + CoverageInfoMethods<'tcx> + DebugInfoMethods<'tcx> + AsmMethods + PreDefineMethods<'tcx> |
