diff options
Diffstat (limited to 'compiler/rustc_codegen_ssa/src')
18 files changed, 343 insertions, 276 deletions
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs index 0dfb41f42f0..cd6201648ee 100644 --- a/compiler/rustc_codegen_ssa/src/back/link.rs +++ b/compiler/rustc_codegen_ssa/src/back/link.rs @@ -12,8 +12,8 @@ use rustc_metadata::fs::{copy_to_stdout, emit_wrapper_file, METADATA_FILENAME}; use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerFile; use rustc_middle::middle::dependency_format::Linkage; use rustc_middle::middle::exported_symbols::SymbolExportKind; -use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, Strip}; -use rustc_session::config::{OutputFilenames, OutputType, PrintRequest, SplitDwarfKind}; +use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, OutFileName, Strip}; +use rustc_session::config::{OutputFilenames, OutputType, PrintKind, SplitDwarfKind}; use rustc_session::cstore::DllImport; use rustc_session::output::{check_file_is_writeable, invalid_output_for_target, out_filename}; use rustc_session::search_paths::PathKind; @@ -596,8 +596,10 @@ fn link_staticlib<'a>( all_native_libs.extend_from_slice(&codegen_results.crate_info.used_libraries); - if sess.opts.prints.contains(&PrintRequest::NativeStaticLibs) { - print_native_static_libs(sess, &all_native_libs, &all_rust_dylibs); + for print in &sess.opts.prints { + if print.kind == PrintKind::NativeStaticLibs { + print_native_static_libs(sess, &print.out, &all_native_libs, &all_rust_dylibs); + } } Ok(()) @@ -744,8 +746,11 @@ fn link_natively<'a>( cmd.env_remove(k.as_ref()); } - if sess.opts.prints.contains(&PrintRequest::LinkArgs) { - println!("{:?}", &cmd); + for print in &sess.opts.prints { + if print.kind == PrintKind::LinkArgs { + let content = format!("{cmd:?}"); + print.out.overwrite(&content, sess); + } } // May have not found libraries in the right formats. @@ -1231,22 +1236,21 @@ fn link_sanitizer_runtime(sess: &Session, linker: &mut dyn Linker, name: &str) { } } - let channel = option_env!("CFG_RELEASE_CHANNEL") - .map(|channel| format!("-{}", channel)) - .unwrap_or_default(); + let channel = + option_env!("CFG_RELEASE_CHANNEL").map(|channel| format!("-{channel}")).unwrap_or_default(); if sess.target.is_like_osx { // On Apple platforms, the sanitizer is always built as a dylib, and // LLVM will link to `@rpath/*.dylib`, so we need to specify an // rpath to the library as well (the rpath should be absolute, see // PR #41352 for details). - let filename = format!("rustc{}_rt.{}", channel, name); + let filename = format!("rustc{channel}_rt.{name}"); let path = find_sanitizer_runtime(&sess, &filename); let rpath = path.to_str().expect("non-utf8 component in path"); linker.args(&["-Wl,-rpath", "-Xlinker", rpath]); linker.link_dylib(&filename, false, true); } else { - let filename = format!("librustc{}_rt.{}.a", channel, name); + let filename = format!("librustc{channel}_rt.{name}.a"); let path = find_sanitizer_runtime(&sess, &filename).join(&filename); linker.link_whole_rlib(&path); } @@ -1386,6 +1390,7 @@ enum RlibFlavor { fn print_native_static_libs( sess: &Session, + out: &OutFileName, all_native_libs: &[NativeLib], all_rust_dylibs: &[&Path], ) { @@ -1409,12 +1414,12 @@ fn print_native_static_libs( } else if sess.target.linker_flavor.is_gnu() { Some(format!("-l{}{}", if verbatim { ":" } else { "" }, name)) } else { - Some(format!("-l{}", name)) + Some(format!("-l{name}")) } } NativeLibKind::Framework { .. } => { // ld-only syntax, since there are no frameworks in MSVC - Some(format!("-framework {}", name)) + Some(format!("-framework {name}")) } // These are included, no need to print them NativeLibKind::Static { bundle: None | Some(true), .. } @@ -1451,19 +1456,30 @@ fn print_native_static_libs( // `foo.lib` file if the dll doesn't actually export any symbols, so we // check to see if the file is there and just omit linking to it if it's // not present. - let name = format!("{}.dll.lib", lib); + let name = format!("{lib}.dll.lib"); if path.join(&name).exists() { lib_args.push(name); } } else { - lib_args.push(format!("-l{}", lib)); + lib_args.push(format!("-l{lib}")); } } - if !lib_args.is_empty() { - sess.emit_note(errors::StaticLibraryNativeArtifacts); - // Prefix for greppability - // Note: This must not be translated as tools are allowed to depend on this exact string. - sess.note_without_error(format!("native-static-libs: {}", &lib_args.join(" "))); + + match out { + OutFileName::Real(path) => { + out.overwrite(&lib_args.join(" "), sess); + if !lib_args.is_empty() { + sess.emit_note(errors::StaticLibraryNativeArtifactsToFile { path }); + } + } + OutFileName::Stdout => { + if !lib_args.is_empty() { + sess.emit_note(errors::StaticLibraryNativeArtifacts); + // Prefix for greppability + // Note: This must not be translated as tools are allowed to depend on this exact string. + sess.note_without_error(format!("native-static-libs: {}", &lib_args.join(" "))); + } + } } } @@ -1611,8 +1627,8 @@ fn exec_linker( write!(f, "\"")?; for c in self.arg.chars() { match c { - '"' => write!(f, "\\{}", c)?, - c => write!(f, "{}", c)?, + '"' => write!(f, "\\{c}")?, + c => write!(f, "{c}")?, } } write!(f, "\"")?; @@ -1629,8 +1645,8 @@ fn exec_linker( // ensure the line is interpreted as one whole argument. for c in self.arg.chars() { match c { - '\\' | ' ' => write!(f, "\\{}", c)?, - c => write!(f, "{}", c)?, + '\\' | ' ' => write!(f, "\\{c}")?, + c => write!(f, "{c}")?, } } } @@ -2267,7 +2283,7 @@ fn add_order_independent_options( } else { "" }; - cmd.arg(format!("--dynamic-linker={}ld.so.1", prefix)); + cmd.arg(format!("--dynamic-linker={prefix}ld.so.1")); } if sess.target.eh_frame_header { diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs index cd56f85cccd..4c04fc60b98 100644 --- a/compiler/rustc_codegen_ssa/src/back/linker.rs +++ b/compiler/rustc_codegen_ssa/src/back/linker.rs @@ -309,7 +309,7 @@ impl<'a> GccLinker<'a> { self.linker_arg(&format!("-plugin-opt=sample-profile={}", path.display())); }; self.linker_args(&[ - &format!("-plugin-opt={}", opt_level), + &format!("-plugin-opt={opt_level}"), &format!("-plugin-opt=mcpu={}", self.target_cpu), ]); } @@ -487,7 +487,7 @@ impl<'a> Linker for GccLinker<'a> { fn link_rust_dylib(&mut self, lib: &str, _path: &Path) { self.hint_dynamic(); - self.cmd.arg(format!("-l{}", lib)); + self.cmd.arg(format!("-l{lib}")); } fn link_framework(&mut self, framework: &str, as_needed: bool) { @@ -671,8 +671,8 @@ impl<'a> Linker for GccLinker<'a> { let res: io::Result<()> = try { let mut f = BufWriter::new(File::create(&path)?); for sym in symbols { - debug!(" _{}", sym); - writeln!(f, "_{}", sym)?; + debug!(" _{sym}"); + writeln!(f, "_{sym}")?; } }; if let Err(error) = res { @@ -686,8 +686,8 @@ impl<'a> Linker for GccLinker<'a> { // because LD doesn't like when it's empty writeln!(f, "EXPORTS")?; for symbol in symbols { - debug!(" _{}", symbol); - writeln!(f, " {}", symbol)?; + debug!(" _{symbol}"); + writeln!(f, " {symbol}")?; } }; if let Err(error) = res { @@ -701,8 +701,8 @@ impl<'a> Linker for GccLinker<'a> { if !symbols.is_empty() { writeln!(f, " global:")?; for sym in symbols { - debug!(" {};", sym); - writeln!(f, " {};", sym)?; + debug!(" {sym};"); + writeln!(f, " {sym};")?; } } writeln!(f, "\n local:\n *;\n}};")?; @@ -837,7 +837,7 @@ impl<'a> Linker for MsvcLinker<'a> { // `foo.lib` file if the dll doesn't actually export any symbols, so we // check to see if the file is there and just omit linking to it if it's // not present. - let name = format!("{}.dll.lib", lib); + let name = format!("{lib}.dll.lib"); if path.join(&name).exists() { self.cmd.arg(name); } @@ -977,8 +977,8 @@ impl<'a> Linker for MsvcLinker<'a> { writeln!(f, "LIBRARY")?; writeln!(f, "EXPORTS")?; for symbol in symbols { - debug!(" _{}", symbol); - writeln!(f, " {}", symbol)?; + debug!(" _{symbol}"); + writeln!(f, " {symbol}")?; } }; if let Err(error) = res { @@ -992,7 +992,7 @@ impl<'a> Linker for MsvcLinker<'a> { fn subsystem(&mut self, subsystem: &str) { // Note that previous passes of the compiler validated this subsystem, // so we just blindly pass it to the linker. - self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem)); + self.cmd.arg(&format!("/SUBSYSTEM:{subsystem}")); // Windows has two subsystems we're interested in right now, the console // and windows subsystems. These both implicitly have different entry @@ -1147,7 +1147,7 @@ impl<'a> Linker for EmLinker<'a> { &symbols.iter().map(|sym| "_".to_owned() + sym).collect::<Vec<_>>(), ) .unwrap(); - debug!("{}", encoded); + debug!("{encoded}"); arg.push(encoded); @@ -1350,7 +1350,7 @@ impl<'a> Linker for L4Bender<'a> { } fn link_staticlib(&mut self, lib: &str, _verbatim: bool) { self.hint_static(); - self.cmd.arg(format!("-PC{}", lib)); + self.cmd.arg(format!("-PC{lib}")); } fn link_rlib(&mut self, lib: &Path) { self.hint_static(); @@ -1399,7 +1399,7 @@ impl<'a> Linker for L4Bender<'a> { fn link_whole_staticlib(&mut self, lib: &str, _verbatim: bool, _search_path: &[PathBuf]) { self.hint_static(); - self.cmd.arg("--whole-archive").arg(format!("-l{}", lib)); + self.cmd.arg("--whole-archive").arg(format!("-l{lib}")); self.cmd.arg("--no-whole-archive"); } @@ -1453,7 +1453,7 @@ impl<'a> Linker for L4Bender<'a> { } fn subsystem(&mut self, subsystem: &str) { - self.cmd.arg(&format!("--subsystem {}", subsystem)); + self.cmd.arg(&format!("--subsystem {subsystem}")); } fn reset_per_library_state(&mut self) { @@ -1518,12 +1518,12 @@ impl<'a> AixLinker<'a> { impl<'a> Linker for AixLinker<'a> { fn link_dylib(&mut self, lib: &str, _verbatim: bool, _as_needed: bool) { self.hint_dynamic(); - self.cmd.arg(format!("-l{}", lib)); + self.cmd.arg(format!("-l{lib}")); } fn link_staticlib(&mut self, lib: &str, _verbatim: bool) { self.hint_static(); - self.cmd.arg(format!("-l{}", lib)); + self.cmd.arg(format!("-l{lib}")); } fn link_rlib(&mut self, lib: &Path) { @@ -1573,7 +1573,7 @@ impl<'a> Linker for AixLinker<'a> { fn link_rust_dylib(&mut self, lib: &str, _: &Path) { self.hint_dynamic(); - self.cmd.arg(format!("-l{}", lib)); + self.cmd.arg(format!("-l{lib}")); } fn link_framework(&mut self, _framework: &str, _as_needed: bool) { @@ -1626,12 +1626,12 @@ impl<'a> Linker for AixLinker<'a> { let mut f = BufWriter::new(File::create(&path)?); // FIXME: use llvm-nm to generate export list. for symbol in symbols { - debug!(" _{}", symbol); - writeln!(f, " {}", symbol)?; + debug!(" _{symbol}"); + writeln!(f, " {symbol}")?; } }; if let Err(e) = res { - self.sess.fatal(format!("failed to write export file: {}", e)); + self.sess.fatal(format!("failed to write export file: {e}")); } self.cmd.arg(format!("-bE:{}", path.to_str().unwrap())); } @@ -1908,7 +1908,7 @@ impl<'a> Linker for BpfLinker<'a> { let res: io::Result<()> = try { let mut f = BufWriter::new(File::create(&path)?); for sym in symbols { - writeln!(f, "{}", sym)?; + writeln!(f, "{sym}")?; } }; if let Err(error) = res { diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs index c4bb51edade..5c7df29444b 100644 --- a/compiler/rustc_codegen_ssa/src/back/metadata.rs +++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs @@ -158,20 +158,19 @@ pub(super) fn get_metadata_xcoff<'a>(path: &Path, data: &'a [u8]) -> Result<&'a { let offset = metadata_symbol.address() as usize; if offset < 4 { - return Err(format!("Invalid metadata symbol offset: {}", offset)); + return Err(format!("Invalid metadata symbol offset: {offset}")); } // The offset specifies the location of rustc metadata in the comment section. // The metadata is preceded by a 4-byte length field. let len = u32::from_be_bytes(info_data[(offset - 4)..offset].try_into().unwrap()) as usize; if offset + len > (info_data.len() as usize) { return Err(format!( - "Metadata at offset {} with size {} is beyond .info section", - offset, len + "Metadata at offset {offset} with size {len} is beyond .info section" )); } return Ok(&info_data[offset..(offset + len)]); } else { - return Err(format!("Unable to find symbol {}", AIX_METADATA_SYMBOL_NAME)); + return Err(format!("Unable to find symbol {AIX_METADATA_SYMBOL_NAME}")); }; } diff --git a/compiler/rustc_codegen_ssa/src/back/rpath.rs b/compiler/rustc_codegen_ssa/src/back/rpath.rs index 0b5656c9ad1..18268622341 100644 --- a/compiler/rustc_codegen_ssa/src/back/rpath.rs +++ b/compiler/rustc_codegen_ssa/src/back/rpath.rs @@ -86,7 +86,7 @@ fn get_rpath_relative_to_output(config: &mut RPathConfig<'_>, lib: &Path) -> Str output.pop(); // strip filename let output = fs::canonicalize(&output).unwrap_or(output); let relative = path_relative_from(&lib, &output) - .unwrap_or_else(|| panic!("couldn't create relative path from {:?} to {:?}", output, lib)); + .unwrap_or_else(|| panic!("couldn't create relative path from {output:?} to {lib:?}")); // FIXME (#9639): This needs to handle non-utf8 paths format!("{}/{}", prefix, relative.to_str().expect("non-utf8 component in path")) } diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index 1c5d7a7c68e..40718525741 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -349,8 +349,6 @@ pub struct CodegenContext<B: WriteBackendMethods> { /// Directory into which should the LLVM optimization remarks be written. /// If `None`, they will be written to stderr. pub remark_dir: Option<PathBuf>, - /// Worker thread number - pub worker: usize, /// The incremental compilation session directory, or None if we are not /// compiling incrementally pub incr_comp_session_dir: Option<PathBuf>, @@ -362,7 +360,7 @@ pub struct CodegenContext<B: WriteBackendMethods> { impl<B: WriteBackendMethods> CodegenContext<B> { pub fn create_diag_handler(&self) -> Handler { - Handler::with_emitter(true, None, Box::new(self.diag_emitter.clone()), None) + Handler::with_emitter(Box::new(self.diag_emitter.clone())) } pub fn config(&self, kind: ModuleKind) -> &ModuleConfig { @@ -376,38 +374,39 @@ impl<B: WriteBackendMethods> CodegenContext<B> { fn generate_lto_work<B: ExtraBackendMethods>( cgcx: &CodegenContext<B>, - needs_fat_lto: Vec<FatLTOInput<B>>, + needs_fat_lto: Vec<FatLtoInput<B>>, needs_thin_lto: Vec<(String, B::ThinBuffer)>, import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>, ) -> Vec<(WorkItem<B>, u64)> { let _prof_timer = cgcx.prof.generic_activity("codegen_generate_lto_work"); - let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() { + if !needs_fat_lto.is_empty() { assert!(needs_thin_lto.is_empty()); - let lto_module = + let module = B::run_fat_lto(cgcx, needs_fat_lto, import_only_modules).unwrap_or_else(|e| e.raise()); - (vec![lto_module], vec![]) + // We are adding a single work item, so the cost doesn't matter. + vec![(WorkItem::LTO(module), 0)] } else { assert!(needs_fat_lto.is_empty()); - B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules).unwrap_or_else(|e| e.raise()) - }; - - lto_modules - .into_iter() - .map(|module| { - let cost = module.cost(); - (WorkItem::LTO(module), cost) - }) - .chain(copy_jobs.into_iter().map(|wp| { - ( - WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen { - name: wp.cgu_name.clone(), - source: wp, - }), - 0, - ) - })) - .collect() + let (lto_modules, copy_jobs) = B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules) + .unwrap_or_else(|e| e.raise()); + lto_modules + .into_iter() + .map(|module| { + let cost = module.cost(); + (WorkItem::LTO(module), cost) + }) + .chain(copy_jobs.into_iter().map(|wp| { + ( + WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen { + name: wp.cgu_name.clone(), + source: wp, + }), + 0, // copying is very cheap + ) + })) + .collect() + } } pub struct CompiledModules { @@ -709,7 +708,7 @@ impl<B: WriteBackendMethods> WorkItem<B> { fn desc(short: &str, _long: &str, name: &str) -> String { // The short label is three bytes, and is followed by a space. That // leaves 11 bytes for the CGU name. How we obtain those 11 bytes - // depends on the the CGU name form. + // depends on the CGU name form. // // - Non-incremental, e.g. `regex.f10ba03eb5ec7975-cgu.0`: the part // before the `-cgu.0` is the same for every CGU, so use the @@ -742,22 +741,32 @@ impl<B: WriteBackendMethods> WorkItem<B> { } match self { - WorkItem::Optimize(m) => desc("opt", "optimize module {}", &m.name), - WorkItem::CopyPostLtoArtifacts(m) => desc("cpy", "copy LTO artifacts for {}", &m.name), - WorkItem::LTO(m) => desc("lto", "LTO module {}", m.name()), + WorkItem::Optimize(m) => desc("opt", "optimize module", &m.name), + WorkItem::CopyPostLtoArtifacts(m) => desc("cpy", "copy LTO artifacts for", &m.name), + WorkItem::LTO(m) => desc("lto", "LTO module", m.name()), } } } /// A result produced by the backend. pub(crate) enum WorkItemResult<B: WriteBackendMethods> { - Compiled(CompiledModule), + /// The backend has finished compiling a CGU, nothing more required. + Finished(CompiledModule), + + /// The backend has finished compiling a CGU, which now needs linking + /// because `-Zcombine-cgu` was specified. NeedsLink(ModuleCodegen<B::Module>), - NeedsFatLTO(FatLTOInput<B>), - NeedsThinLTO(String, B::ThinBuffer), + + /// The backend has finished compiling a CGU, which now needs to go through + /// fat LTO. + NeedsFatLto(FatLtoInput<B>), + + /// The backend has finished compiling a CGU, which now needs to go through + /// thin LTO. + NeedsThinLto(String, B::ThinBuffer), } -pub enum FatLTOInput<B: WriteBackendMethods> { +pub enum FatLtoInput<B: WriteBackendMethods> { Serialized { name: String, buffer: B::ModuleBuffer }, InMemory(ModuleCodegen<B::Module>), } @@ -846,7 +855,7 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>( panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e); }); } - Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer)) + Ok(WorkItemResult::NeedsThinLto(name, thin_buffer)) } ComputedLtoType::Fat => match bitcode { Some(path) => { @@ -854,9 +863,9 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>( fs::write(&path, buffer.data()).unwrap_or_else(|e| { panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e); }); - Ok(WorkItemResult::NeedsFatLTO(FatLTOInput::Serialized { name, buffer })) + Ok(WorkItemResult::NeedsFatLto(FatLtoInput::Serialized { name, buffer })) } - None => Ok(WorkItemResult::NeedsFatLTO(FatLTOInput::InMemory(module))), + None => Ok(WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module))), }, } } @@ -906,7 +915,7 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>( load_from_incr_comp_dir(dwarf_obj_out, &saved_dwarf_object_file) }); - WorkItemResult::Compiled(CompiledModule { + WorkItemResult::Finished(CompiledModule { name: module.name, kind: ModuleKind::Regular, object, @@ -936,7 +945,7 @@ fn finish_intra_module_work<B: ExtraBackendMethods>( || module.kind == ModuleKind::Allocator { let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config)? }; - Ok(WorkItemResult::Compiled(module)) + Ok(WorkItemResult::Finished(module)) } else { Ok(WorkItemResult::NeedsLink(module)) } @@ -987,10 +996,15 @@ struct Diagnostic { } #[derive(PartialEq, Clone, Copy, Debug)] -enum MainThreadWorkerState { +enum MainThreadState { + /// Doing nothing. Idle, + + /// Doing codegen, i.e. MIR-to-LLVM-IR conversion. Codegenning, - LLVMing, + + /// Idle, but lending the compiler process's Token to an LLVM thread so it can do useful work. + Lending, } fn start_executing_work<B: ExtraBackendMethods>( @@ -1089,7 +1103,6 @@ fn start_executing_work<B: ExtraBackendMethods>( exported_symbols, remark: sess.opts.cg.remark.clone(), remark_dir, - worker: 0, incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(), coordinator_send, @@ -1242,7 +1255,7 @@ fn start_executing_work<B: ExtraBackendMethods>( // Each LLVM module is automatically sent back to the coordinator for LTO if // necessary. There's already optimizations in place to avoid sending work // back to the coordinator if LTO isn't requested. - return B::spawn_thread(cgcx.time_trace, move || { + return B::spawn_named_thread(cgcx.time_trace, "coordinator".to_string(), move || { let mut worker_id_counter = 0; let mut free_worker_ids = Vec::new(); let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| { @@ -1285,10 +1298,19 @@ fn start_executing_work<B: ExtraBackendMethods>( // the implicit Token the compiler process owns no matter what. let mut tokens = Vec::new(); - let mut main_thread_worker_state = MainThreadWorkerState::Idle; - let mut running = 0; + let mut main_thread_state = MainThreadState::Idle; + + // How many LLVM worker threads are running while holding a Token. This + // *excludes* any that the main thread is lending a Token to. + let mut running_with_own_token = 0; + + // How many LLVM worker threads are running in total. This *includes* + // any that the main thread is lending a Token to. + let running_with_any_token = |main_thread_state, running_with_own_token| { + running_with_own_token + + if main_thread_state == MainThreadState::Lending { 1 } else { 0 } + }; - let prof = &cgcx.prof; let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None; // Run the message loop while there's still anything that needs message @@ -1296,66 +1318,62 @@ fn start_executing_work<B: ExtraBackendMethods>( // wait for all existing work to finish, so many of the conditions here // only apply if codegen hasn't been aborted as they represent pending // work to be done. - while codegen_state == Ongoing - || running > 0 - || main_thread_worker_state == MainThreadWorkerState::LLVMing - || (codegen_state == Completed - && !(work_items.is_empty() - && needs_fat_lto.is_empty() - && needs_thin_lto.is_empty() - && lto_import_only_modules.is_empty() - && main_thread_worker_state == MainThreadWorkerState::Idle)) - { + loop { // While there are still CGUs to be codegened, the coordinator has // to decide how to utilize the compiler processes implicit Token: // For codegenning more CGU or for running them through LLVM. if codegen_state == Ongoing { - if main_thread_worker_state == MainThreadWorkerState::Idle { + if main_thread_state == MainThreadState::Idle { // Compute the number of workers that will be running once we've taken as many // items from the work queue as we can, plus one for the main thread. It's not - // critically important that we use this instead of just `running`, but it - // prevents the `queue_full_enough` heuristic from fluctuating just because a - // worker finished up and we decreased the `running` count, even though we're - // just going to increase it right after this when we put a new worker to work. - let extra_tokens = tokens.len().checked_sub(running).unwrap(); + // critically important that we use this instead of just + // `running_with_own_token`, but it prevents the `queue_full_enough` heuristic + // from fluctuating just because a worker finished up and we decreased the + // `running_with_own_token` count, even though we're just going to increase it + // right after this when we put a new worker to work. + let extra_tokens = tokens.len().checked_sub(running_with_own_token).unwrap(); let additional_running = std::cmp::min(extra_tokens, work_items.len()); - let anticipated_running = running + additional_running + 1; + let anticipated_running = running_with_own_token + additional_running + 1; if !queue_full_enough(work_items.len(), anticipated_running) { // The queue is not full enough, process more codegen units: if codegen_worker_send.send(CguMessage).is_err() { panic!("Could not send CguMessage to main thread") } - main_thread_worker_state = MainThreadWorkerState::Codegenning; + main_thread_state = MainThreadState::Codegenning; } else { // The queue is full enough to not let the worker // threads starve. Use the implicit Token to do some // LLVM work too. let (item, _) = work_items.pop().expect("queue empty - queue_full_enough() broken?"); - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - ..cgcx.clone() - }; - maybe_start_llvm_timer( - prof, - cgcx.config(item.module_kind()), + main_thread_state = MainThreadState::Lending; + spawn_work( + &cgcx, &mut llvm_start_time, + get_worker_id(&mut free_worker_ids), + item, ); - main_thread_worker_state = MainThreadWorkerState::LLVMing; - spawn_work(cgcx, item); } } } else if codegen_state == Completed { - // If we've finished everything related to normal codegen - // then it must be the case that we've got some LTO work to do. - // Perform the serial work here of figuring out what we're - // going to LTO and then push a bunch of work items onto our - // queue to do LTO - if work_items.is_empty() - && running == 0 - && main_thread_worker_state == MainThreadWorkerState::Idle + if running_with_any_token(main_thread_state, running_with_own_token) == 0 + && work_items.is_empty() { + // All codegen work is done. Do we have LTO work to do? + if needs_fat_lto.is_empty() + && needs_thin_lto.is_empty() + && lto_import_only_modules.is_empty() + { + // Nothing more to do! + break; + } + + // We have LTO work to do. Perform the serial work here of + // figuring out what we're going to LTO and then push a + // bunch of work items onto our queue to do LTO. This all + // happens on the coordinator thread but it's very quick so + // we don't worry about tokens. assert!(!started_lto); started_lto = true; @@ -1379,20 +1397,16 @@ fn start_executing_work<B: ExtraBackendMethods>( // In this branch, we know that everything has been codegened, // so it's just a matter of determining whether the implicit // Token is free to use for LLVM work. - match main_thread_worker_state { - MainThreadWorkerState::Idle => { + match main_thread_state { + MainThreadState::Idle => { if let Some((item, _)) = work_items.pop() { - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - ..cgcx.clone() - }; - maybe_start_llvm_timer( - prof, - cgcx.config(item.module_kind()), + main_thread_state = MainThreadState::Lending; + spawn_work( + &cgcx, &mut llvm_start_time, + get_worker_id(&mut free_worker_ids), + item, ); - main_thread_worker_state = MainThreadWorkerState::LLVMing; - spawn_work(cgcx, item); } else { // There is no unstarted work, so let the main thread // take over for a running worker. Otherwise the @@ -1400,16 +1414,16 @@ fn start_executing_work<B: ExtraBackendMethods>( // We reduce the `running` counter by one. The // `tokens.truncate()` below will take care of // giving the Token back. - debug_assert!(running > 0); - running -= 1; - main_thread_worker_state = MainThreadWorkerState::LLVMing; + debug_assert!(running_with_own_token > 0); + running_with_own_token -= 1; + main_thread_state = MainThreadState::Lending; } } - MainThreadWorkerState::Codegenning => bug!( + MainThreadState::Codegenning => bug!( "codegen worker should not be codegenning after \ codegen was already completed" ), - MainThreadWorkerState::LLVMing => { + MainThreadState::Lending => { // Already making good use of that token } } @@ -1417,35 +1431,39 @@ fn start_executing_work<B: ExtraBackendMethods>( // Don't queue up any more work if codegen was aborted, we're // just waiting for our existing children to finish. assert!(codegen_state == Aborted); + if running_with_any_token(main_thread_state, running_with_own_token) == 0 { + break; + } } // Spin up what work we can, only doing this while we've got available // parallelism slots and work left to spawn. - while codegen_state != Aborted && !work_items.is_empty() && running < tokens.len() { - let (item, _) = work_items.pop().unwrap(); - - maybe_start_llvm_timer(prof, cgcx.config(item.module_kind()), &mut llvm_start_time); - - let cgcx = - CodegenContext { worker: get_worker_id(&mut free_worker_ids), ..cgcx.clone() }; - - spawn_work(cgcx, item); - running += 1; + if codegen_state != Aborted { + while !work_items.is_empty() && running_with_own_token < tokens.len() { + let (item, _) = work_items.pop().unwrap(); + spawn_work( + &cgcx, + &mut llvm_start_time, + get_worker_id(&mut free_worker_ids), + item, + ); + running_with_own_token += 1; + } } - // Relinquish accidentally acquired extra tokens - tokens.truncate(running); + // Relinquish accidentally acquired extra tokens. + tokens.truncate(running_with_own_token); // If a thread exits successfully then we drop a token associated - // with that worker and update our `running` count. We may later - // re-acquire a token to continue running more work. We may also not - // actually drop a token here if the worker was running with an - // "ephemeral token" + // with that worker and update our `running_with_own_token` count. + // We may later re-acquire a token to continue running more work. + // We may also not actually drop a token here if the worker was + // running with an "ephemeral token". let mut free_worker = |worker_id| { - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - main_thread_worker_state = MainThreadWorkerState::Idle; + if main_thread_state == MainThreadState::Lending { + main_thread_state = MainThreadState::Idle; } else { - running -= 1; + running_with_own_token -= 1; } free_worker_ids.push(worker_id); @@ -1461,17 +1479,17 @@ fn start_executing_work<B: ExtraBackendMethods>( Ok(token) => { tokens.push(token); - if main_thread_worker_state == MainThreadWorkerState::LLVMing { + if main_thread_state == MainThreadState::Lending { // If the main thread token is used for LLVM work // at the moment, we turn that thread into a regular // LLVM worker thread, so the main thread is free // to react to codegen demand. - main_thread_worker_state = MainThreadWorkerState::Idle; - running += 1; + main_thread_state = MainThreadState::Idle; + running_with_own_token += 1; } } Err(e) => { - let msg = &format!("failed to acquire jobserver token: {}", e); + let msg = &format!("failed to acquire jobserver token: {e}"); shared_emitter.fatal(msg); codegen_state = Aborted; } @@ -1496,16 +1514,16 @@ fn start_executing_work<B: ExtraBackendMethods>( if !cgcx.opts.unstable_opts.no_parallel_llvm { helper.request_token(); } - assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning); - main_thread_worker_state = MainThreadWorkerState::Idle; + assert_eq!(main_thread_state, MainThreadState::Codegenning); + main_thread_state = MainThreadState::Idle; } Message::CodegenComplete => { if codegen_state != Aborted { codegen_state = Completed; } - assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning); - main_thread_worker_state = MainThreadWorkerState::Idle; + assert_eq!(main_thread_state, MainThreadState::Codegenning); + main_thread_state = MainThreadState::Idle; } // If codegen is aborted that means translation was aborted due @@ -1513,7 +1531,8 @@ fn start_executing_work<B: ExtraBackendMethods>( // to exit as soon as possible, but we want to make sure all // existing work has finished. Flag codegen as being done, and // then conditions above will ensure no more work is spawned but - // we'll keep executing this loop until `running` hits 0. + // we'll keep executing this loop until `running_with_own_token` + // hits 0. Message::CodegenAborted => { codegen_state = Aborted; } @@ -1522,9 +1541,10 @@ fn start_executing_work<B: ExtraBackendMethods>( free_worker(worker_id); match result { - Ok(WorkItemResult::Compiled(compiled_module)) => { + Ok(WorkItemResult::Finished(compiled_module)) => { match compiled_module.kind { ModuleKind::Regular => { + assert!(needs_link.is_empty()); compiled_modules.push(compiled_module); } ModuleKind::Allocator => { @@ -1535,14 +1555,17 @@ fn start_executing_work<B: ExtraBackendMethods>( } } Ok(WorkItemResult::NeedsLink(module)) => { + assert!(compiled_modules.is_empty()); needs_link.push(module); } - Ok(WorkItemResult::NeedsFatLTO(fat_lto_input)) => { + Ok(WorkItemResult::NeedsFatLto(fat_lto_input)) => { assert!(!started_lto); + assert!(needs_thin_lto.is_empty()); needs_fat_lto.push(fat_lto_input); } - Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer)) => { + Ok(WorkItemResult::NeedsThinLto(name, thin_buffer)) => { assert!(!started_lto); + assert!(needs_fat_lto.is_empty()); needs_thin_lto.push((name, thin_buffer)); } Err(Some(WorkerFatalError)) => { @@ -1560,9 +1583,9 @@ fn start_executing_work<B: ExtraBackendMethods>( Message::AddImportOnlyModule { module_data, work_product } => { assert!(!started_lto); assert_eq!(codegen_state, Ongoing); - assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning); + assert_eq!(main_thread_state, MainThreadState::Codegenning); lto_import_only_modules.push((module_data, work_product)); - main_thread_worker_state = MainThreadWorkerState::Idle; + main_thread_state = MainThreadState::Idle; } } } @@ -1595,7 +1618,8 @@ fn start_executing_work<B: ExtraBackendMethods>( modules: compiled_modules, allocator_module: compiled_allocator_module, }) - }); + }) + .expect("failed to spawn coordinator thread"); // A heuristic that determines if we have enough LLVM WorkItems in the // queue so that the main thread can do LLVM work instead of codegen @@ -1653,23 +1677,24 @@ fn start_executing_work<B: ExtraBackendMethods>( let quarter_of_workers = workers_running - 3 * workers_running / 4; items_in_queue > 0 && items_in_queue >= quarter_of_workers } - - fn maybe_start_llvm_timer<'a>( - prof: &'a SelfProfilerRef, - config: &ModuleConfig, - llvm_start_time: &mut Option<VerboseTimingGuard<'a>>, - ) { - if config.time_module && llvm_start_time.is_none() { - *llvm_start_time = Some(prof.verbose_generic_activity("LLVM_passes")); - } - } } /// `FatalError` is explicitly not `Send`. #[must_use] pub struct WorkerFatalError; -fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>) { +fn spawn_work<'a, B: ExtraBackendMethods>( + cgcx: &'a CodegenContext<B>, + llvm_start_time: &mut Option<VerboseTimingGuard<'a>>, + worker_id: usize, + work: WorkItem<B>, +) { + if cgcx.config(work.module_kind()).time_module && llvm_start_time.is_none() { + *llvm_start_time = Some(cgcx.prof.verbose_generic_activity("LLVM_passes")); + } + + let cgcx = cgcx.clone(); + B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || { // Set up a destructor which will fire off a message that we're done as // we exit. @@ -1692,11 +1717,8 @@ fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B> } } - let mut bomb = Bomb::<B> { - coordinator_send: cgcx.coordinator_send.clone(), - result: None, - worker_id: cgcx.worker, - }; + let mut bomb = + Bomb::<B> { coordinator_send: cgcx.coordinator_send.clone(), result: None, worker_id }; // Execute the work itself, and if it finishes successfully then flag // ourselves as a success as well. @@ -1728,7 +1750,7 @@ fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B> }) }; }) - .expect("failed to spawn thread"); + .expect("failed to spawn work thread"); } enum SharedEmitterMessage { @@ -1945,6 +1967,10 @@ impl<B: ExtraBackendMethods> OngoingCodegen<B> { self.backend.print_pass_timings() } + if sess.print_llvm_stats() { + self.backend.print_statistics() + } + ( CodegenResults { metadata: self.metadata, @@ -1958,19 +1984,6 @@ impl<B: ExtraBackendMethods> OngoingCodegen<B> { ) } - pub fn submit_pre_codegened_module_to_llvm( - &self, - tcx: TyCtxt<'_>, - module: ModuleCodegen<B::Module>, - ) { - self.wait_for_signal_to_codegen_item(); - self.check_for_errors(tcx.sess); - - // These are generally cheap and won't throw off scheduling. - let cost = 0; - submit_codegened_module_to_llvm(&self.backend, &self.coordinator.sender, module, cost); - } - pub fn codegen_finished(&self, tcx: TyCtxt<'_>) { self.wait_for_signal_to_codegen_item(); self.check_for_errors(tcx.sess); @@ -2036,8 +2049,8 @@ pub fn submit_pre_lto_module_to_llvm<B: ExtraBackendMethods>( }))); } -pub fn pre_lto_bitcode_filename(module_name: &str) -> String { - format!("{}.{}", module_name, PRE_LTO_BC_EXT) +fn pre_lto_bitcode_filename(module_name: &str) -> String { + format!("{module_name}.{PRE_LTO_BC_EXT}") } fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool { diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index dc862803274..0ccb08c78f7 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -38,6 +38,7 @@ use rustc_span::symbol::sym; use rustc_span::Symbol; use rustc_target::abi::{Align, FIRST_VARIANT}; +use std::cmp; use std::collections::BTreeSet; use std::time::{Duration, Instant}; @@ -663,9 +664,16 @@ pub fn codegen_crate<B: ExtraBackendMethods>( ) }); - ongoing_codegen.submit_pre_codegened_module_to_llvm( - tcx, + ongoing_codegen.wait_for_signal_to_codegen_item(); + ongoing_codegen.check_for_errors(tcx.sess); + + // These modules are generally cheap and won't throw off scheduling. + let cost = 0; + submit_codegened_module_to_llvm( + &backend, + &ongoing_codegen.coordinator.sender, ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator }, + cost, ); } @@ -682,10 +690,10 @@ pub fn codegen_crate<B: ExtraBackendMethods>( // are large size variations, this can reduce memory usage significantly. let codegen_units: Vec<_> = { let mut sorted_cgus = codegen_units.iter().collect::<Vec<_>>(); - sorted_cgus.sort_by_cached_key(|cgu| cgu.size_estimate()); + sorted_cgus.sort_by_key(|cgu| cmp::Reverse(cgu.size_estimate())); let (first_half, second_half) = sorted_cgus.split_at(sorted_cgus.len() / 2); - second_half.iter().rev().interleave(first_half).copied().collect() + first_half.iter().interleave(second_half.iter().rev()).copied().collect() }; // Calculate the CGU reuse @@ -760,7 +768,6 @@ pub fn codegen_crate<B: ExtraBackendMethods>( module, cost, ); - false } CguReuse::PreLto => { submit_pre_lto_module_to_llvm( @@ -772,7 +779,6 @@ pub fn codegen_crate<B: ExtraBackendMethods>( source: cgu.previous_work_product(tcx), }, ); - true } CguReuse::PostLto => { submit_post_lto_module_to_llvm( @@ -783,9 +789,8 @@ pub fn codegen_crate<B: ExtraBackendMethods>( source: cgu.previous_work_product(tcx), }, ); - true } - }; + } } ongoing_codegen.codegen_finished(tcx); diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs index 0c7b8a79612..92792ab6477 100644 --- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs +++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs @@ -501,7 +501,22 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { }); // #73631: closures inherit `#[target_feature]` annotations - if tcx.features().target_feature_11 && tcx.is_closure(did.to_def_id()) { + // + // If this closure is marked `#[inline(always)]`, simply skip adding `#[target_feature]`. + // + // At this point, `unsafe` has already been checked and `#[target_feature]` only affects codegen. + // Emitting both `#[inline(always)]` and `#[target_feature]` can potentially result in an + // ICE, because LLVM errors when the function fails to be inlined due to a target feature + // mismatch. + // + // Using `#[inline(always)]` implies that this closure will most likely be inlined into + // its parent function, which effectively inherits the features anyway. Boxing this closure + // would result in this closure being compiled without the inherited target features, but this + // is probably a poor usage of `#[inline(always)]` and easily avoided by not using the attribute. + if tcx.features().target_feature_11 + && tcx.is_closure(did.to_def_id()) + && codegen_fn_attrs.inline != InlineAttr::Always + { let owner_id = tcx.parent(did.to_def_id()); if tcx.def_kind(owner_id).has_codegen_attrs() { codegen_fn_attrs diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs index 64f799bb1e6..067c824aba0 100644 --- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs +++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs @@ -414,7 +414,7 @@ fn push_debuginfo_type_name<'tcx>( } // Type parameters from polymorphized functions. ty::Param(_) => { - write!(output, "{:?}", t).unwrap(); + write!(output, "{t:?}").unwrap(); } ty::Error(_) | ty::Infer(_) @@ -565,9 +565,9 @@ fn push_disambiguated_special_name( output: &mut String, ) { if cpp_like_debuginfo { - write!(output, "{}${}", label, disambiguator).unwrap(); + write!(output, "{label}${disambiguator}").unwrap(); } else { - write!(output, "{{{}#{}}}", label, disambiguator).unwrap(); + write!(output, "{{{label}#{disambiguator}}}").unwrap(); } } @@ -651,15 +651,15 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S ty::Int(ity) => { let bits = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty()); let val = Integer::from_int_ty(&tcx, *ity).size().sign_extend(bits) as i128; - write!(output, "{}", val) + write!(output, "{val}") } ty::Uint(_) => { let val = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty()); - write!(output, "{}", val) + write!(output, "{val}") } ty::Bool => { let val = ct.try_eval_bool(tcx, ty::ParamEnv::reveal_all()).unwrap(); - write!(output, "{}", val) + write!(output, "{val}") } _ => { // If we cannot evaluate the constant to a known type, we fall back @@ -678,9 +678,9 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S }); if cpp_like_debuginfo(tcx) { - write!(output, "CONST${:x}", hash_short) + write!(output, "CONST${hash_short:x}") } else { - write!(output, "{{CONST#{:x}}}", hash_short) + write!(output, "{{CONST#{hash_short:x}}}") } } }, @@ -752,7 +752,7 @@ fn push_close_angle_bracket(cpp_like_debuginfo: bool, output: &mut String) { } fn pop_close_angle_bracket(output: &mut String) { - assert!(output.ends_with('>'), "'output' does not end with '>': {}", output); + assert!(output.ends_with('>'), "'output' does not end with '>': {output}"); output.pop(); if output.ends_with(' ') { output.pop(); diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index 056b4abd235..b7d8b9b45bf 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -177,31 +177,31 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper { } thorin::Error::NamelessSection(_, offset) => { diag = handler.struct_err(fluent::codegen_ssa_thorin_section_without_name); - diag.set_arg("offset", format!("0x{:08x}", offset)); + diag.set_arg("offset", format!("0x{offset:08x}")); diag } thorin::Error::RelocationWithInvalidSymbol(section, offset) => { diag = handler.struct_err(fluent::codegen_ssa_thorin_relocation_with_invalid_symbol); diag.set_arg("section", section); - diag.set_arg("offset", format!("0x{:08x}", offset)); + diag.set_arg("offset", format!("0x{offset:08x}")); diag } thorin::Error::MultipleRelocations(section, offset) => { diag = handler.struct_err(fluent::codegen_ssa_thorin_multiple_relocations); diag.set_arg("section", section); - diag.set_arg("offset", format!("0x{:08x}", offset)); + diag.set_arg("offset", format!("0x{offset:08x}")); diag } thorin::Error::UnsupportedRelocation(section, offset) => { diag = handler.struct_err(fluent::codegen_ssa_thorin_unsupported_relocation); diag.set_arg("section", section); - diag.set_arg("offset", format!("0x{:08x}", offset)); + diag.set_arg("offset", format!("0x{offset:08x}")); diag } thorin::Error::MissingDwoName(id) => { diag = handler.struct_err(fluent::codegen_ssa_thorin_missing_dwo_name); - diag.set_arg("id", format!("0x{:08x}", id)); + diag.set_arg("id", format!("0x{id:08x}")); diag } thorin::Error::NoCompilationUnits => { @@ -251,7 +251,7 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper { } thorin::Error::StrAtOffset(_, offset) => { diag = handler.struct_err(fluent::codegen_ssa_thorin_str_at_offset); - diag.set_arg("offset", format!("0x{:08x}", offset)); + diag.set_arg("offset", format!("0x{offset:08x}")); diag } thorin::Error::ParseIndex(_, section) => { @@ -261,7 +261,7 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper { } thorin::Error::UnitNotInIndex(unit) => { diag = handler.struct_err(fluent::codegen_ssa_thorin_unit_not_in_index); - diag.set_arg("unit", format!("0x{:08x}", unit)); + diag.set_arg("unit", format!("0x{unit:08x}")); diag } thorin::Error::RowNotInIndex(_, row) => { @@ -275,7 +275,7 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper { } thorin::Error::EmptyUnit(unit) => { diag = handler.struct_err(fluent::codegen_ssa_thorin_empty_unit); - diag.set_arg("unit", format!("0x{:08x}", unit)); + diag.set_arg("unit", format!("0x{unit:08x}")); diag } thorin::Error::MultipleDebugInfoSection => { @@ -292,12 +292,12 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper { } thorin::Error::DuplicateUnit(unit) => { diag = handler.struct_err(fluent::codegen_ssa_thorin_duplicate_unit); - diag.set_arg("unit", format!("0x{:08x}", unit)); + diag.set_arg("unit", format!("0x{unit:08x}")); diag } thorin::Error::MissingReferencedUnit(unit) => { diag = handler.struct_err(fluent::codegen_ssa_thorin_missing_referenced_unit); - diag.set_arg("unit", format!("0x{:08x}", unit)); + diag.set_arg("unit", format!("0x{unit:08x}")); diag } thorin::Error::NoOutputObjectCreated => { @@ -353,7 +353,7 @@ impl IntoDiagnostic<'_> for LinkingFailed<'_> { let contains_undefined_ref = self.escaped_output.contains("undefined reference to"); - diag.note(format!("{:?}", self.command)).note(self.escaped_output.to_string()); + diag.note(format!("{:?}", self.command)).note(self.escaped_output); // Trying to match an error from OS linkers // which by now we have no way to translate. @@ -456,6 +456,12 @@ pub struct LinkerFileStem; pub struct StaticLibraryNativeArtifacts; #[derive(Diagnostic)] +#[diag(codegen_ssa_static_library_native_artifacts_to_file)] +pub struct StaticLibraryNativeArtifactsToFile<'a> { + pub path: &'a Path, +} + +#[derive(Diagnostic)] #[diag(codegen_ssa_link_script_unavailable)] pub struct LinkScriptUnavailable; diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index ed608bdbe9a..23c736c1579 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -703,13 +703,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { with_no_trimmed_paths!({ if layout.abi.is_uninhabited() { // Use this error even for the other intrinsics as it is more precise. - format!("attempted to instantiate uninhabited type `{}`", ty) + format!("attempted to instantiate uninhabited type `{ty}`") } else if requirement == ValidityRequirement::Zero { - format!("attempted to zero-initialize type `{}`, which is invalid", ty) + format!("attempted to zero-initialize type `{ty}`, which is invalid") } else { format!( - "attempted to leave type `{}` uninitialized, which is invalid", - ty + "attempted to leave type `{ty}` uninitialized, which is invalid" ) } }) @@ -1045,10 +1044,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { assert_eq!( fn_abi.args.len(), mir_args + 1, - "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {:?} {:?} {:?}", - instance, - fn_span, - fn_abi, + "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {instance:?} {fn_span:?} {fn_abi:?}", ); let location = self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info }); @@ -1555,7 +1551,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { fn landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock { let llbb = self.llbb(bb); if base::wants_new_eh_instructions(self.cx.sess()) { - let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb)); + let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{bb:?}")); let mut cleanup_bx = Bx::build(self.cx, cleanup_bb); let funclet = cleanup_bx.cleanup_pad(None, &[]); cleanup_bx.br(llbb); @@ -1675,7 +1671,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match self.cached_llbbs[bb] { CachedLlbb::None => { // FIXME(eddyb) only name the block if `fewer_names` is `false`. - let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb)); + let llbb = Bx::append_block(self.cx, self.llfn, &format!("{bb:?}")); self.cached_llbbs[bb] = CachedLlbb::Some(llbb); Some(llbb) } diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index 34b8d8b5a6f..4167a85ccd5 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -337,7 +337,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } else { Some(match whole_local_var.or(fallback_var.clone()) { Some(var) if var.name != kw::Empty => var.name.to_string(), - _ => format!("{:?}", local), + _ => format!("{local:?}"), }) }; diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 1f90a28eb8e..e7539da0a05 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -397,8 +397,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ) -> OperandRef<'tcx, Bx::Value> { assert!( self.rvalue_creates_operand(rvalue, DUMMY_SP), - "cannot codegen {:?} to operand", - rvalue, + "cannot codegen {rvalue:?} to operand", ); match *rvalue { diff --git a/compiler/rustc_codegen_ssa/src/mono_item.rs b/compiler/rustc_codegen_ssa/src/mono_item.rs index b72a1a07866..6fbf992eda9 100644 --- a/compiler/rustc_codegen_ssa/src/mono_item.rs +++ b/compiler/rustc_codegen_ssa/src/mono_item.rs @@ -140,8 +140,8 @@ impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> { MonoItem::Fn(instance) => { format!("Fn({:?}, {})", instance.def, instance.args.as_ptr().addr()) } - MonoItem::Static(id) => format!("Static({:?})", id), - MonoItem::GlobalAsm(id) => format!("GlobalAsm({:?})", id), + MonoItem::Static(id) => format!("Static({id:?})"), + MonoItem::GlobalAsm(id) => format!("GlobalAsm({id:?})"), } } } diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs index 48c6c75bb1a..98e561b0aef 100644 --- a/compiler/rustc_codegen_ssa/src/target_features.rs +++ b/compiler/rustc_codegen_ssa/src/target_features.rs @@ -369,9 +369,9 @@ pub fn from_target_feature( // We allow comma separation to enable multiple features. target_features.extend(value.as_str().split(',').filter_map(|feature| { let Some(feature_gate) = supported_target_features.get(feature) else { - let msg = format!("the feature named `{}` is not valid for this target", feature); + let msg = format!("the feature named `{feature}` is not valid for this target"); let mut err = tcx.sess.struct_span_err(item.span(), msg); - err.span_label(item.span(), format!("`{}` is not valid for this target", feature)); + err.span_label(item.span(), format!("`{feature}` is not valid for this target")); if let Some(stripped) = feature.strip_prefix('+') { let valid = supported_target_features.contains_key(stripped); if valid { @@ -405,7 +405,7 @@ pub fn from_target_feature( &tcx.sess.parse_sess, feature_gate.unwrap(), item.span(), - format!("the target feature `{}` is currently unstable", feature), + format!("the target feature `{feature}` is currently unstable"), ) .emit(); } diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs index b3c9ecf8b93..0a02ca6b317 100644 --- a/compiler/rustc_codegen_ssa/src/traits/backend.rs +++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs @@ -23,6 +23,8 @@ use rustc_span::symbol::Symbol; use rustc_target::abi::call::FnAbi; use rustc_target::spec::Target; +use std::fmt; + pub trait BackendTypes { type Value: CodegenObject; type Function: CodegenObject; @@ -61,7 +63,7 @@ pub trait CodegenBackend { fn locale_resource(&self) -> &'static str; fn init(&self, _sess: &Session) {} - fn print(&self, _req: PrintRequest, _sess: &Session) {} + fn print(&self, _req: &PrintRequest, _out: &mut dyn PrintBackendInfo, _sess: &Session) {} fn target_features(&self, _sess: &Session, _allow_unstable: bool) -> Vec<Symbol> { vec![] } @@ -140,15 +142,6 @@ pub trait ExtraBackendMethods: target_features: &[String], ) -> TargetMachineFactoryFn<Self>; - fn spawn_thread<F, T>(_time_trace: bool, f: F) -> std::thread::JoinHandle<T> - where - F: FnOnce() -> T, - F: Send + 'static, - T: Send + 'static, - { - std::thread::spawn(f) - } - fn spawn_named_thread<F, T>( _time_trace: bool, name: String, @@ -162,3 +155,19 @@ pub trait ExtraBackendMethods: std::thread::Builder::new().name(name).spawn(f) } } + +pub trait PrintBackendInfo { + fn infallible_write_fmt(&mut self, args: fmt::Arguments<'_>); +} + +impl PrintBackendInfo for String { + fn infallible_write_fmt(&mut self, args: fmt::Arguments<'_>) { + fmt::Write::write_fmt(self, args).unwrap(); + } +} + +impl dyn PrintBackendInfo + '_ { + pub fn write_fmt(&mut self, args: fmt::Arguments<'_>) { + self.infallible_write_fmt(args); + } +} diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs index d6e9bfce1a4..822c19155e3 100644 --- a/compiler/rustc_codegen_ssa/src/traits/consts.rs +++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs @@ -5,7 +5,13 @@ use rustc_target::abi; pub trait ConstMethods<'tcx>: BackendTypes { // Constant constructors fn const_null(&self, t: Self::Type) -> Self::Value; + /// Generate an uninitialized value (matching uninitialized memory in MIR). + /// Whether memory is initialized or not is tracked byte-for-byte. fn const_undef(&self, t: Self::Type) -> Self::Value; + /// Generate a fake value. Poison always affects the entire value, even if just a single byte is + /// poison. This can only be used in codepaths that are already UB, i.e., UB-free Rust code + /// (including code that e.g. copies uninit memory with `MaybeUninit`) can never encounter a + /// poison value. fn const_poison(&self, t: Self::Type) -> Self::Value; fn const_int(&self, t: Self::Type, i: i64) -> Self::Value; fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value; diff --git a/compiler/rustc_codegen_ssa/src/traits/mod.rs b/compiler/rustc_codegen_ssa/src/traits/mod.rs index 8cb58bd4c70..728c2bc8c49 100644 --- a/compiler/rustc_codegen_ssa/src/traits/mod.rs +++ b/compiler/rustc_codegen_ssa/src/traits/mod.rs @@ -30,7 +30,9 @@ mod write; pub use self::abi::AbiBuilderMethods; pub use self::asm::{AsmBuilderMethods, AsmMethods, GlobalAsmOperandRef, InlineAsmOperandRef}; -pub use self::backend::{Backend, BackendTypes, CodegenBackend, ExtraBackendMethods}; +pub use self::backend::{ + Backend, BackendTypes, CodegenBackend, ExtraBackendMethods, PrintBackendInfo, +}; pub use self::builder::{BuilderMethods, OverflowOp}; pub use self::consts::ConstMethods; pub use self::coverageinfo::CoverageInfoBuilderMethods; diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs index 9826256a4c5..ecf5095d8a3 100644 --- a/compiler/rustc_codegen_ssa/src/traits/write.rs +++ b/compiler/rustc_codegen_ssa/src/traits/write.rs @@ -1,5 +1,5 @@ use crate::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; -use crate::back::write::{CodegenContext, FatLTOInput, ModuleConfig}; +use crate::back::write::{CodegenContext, FatLtoInput, ModuleConfig}; use crate::{CompiledModule, ModuleCodegen}; use rustc_errors::{FatalError, Handler}; @@ -23,7 +23,7 @@ pub trait WriteBackendMethods: 'static + Sized + Clone { /// for further optimization. fn run_fat_lto( cgcx: &CodegenContext<Self>, - modules: Vec<FatLTOInput<Self>>, + modules: Vec<FatLtoInput<Self>>, cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>, ) -> Result<LtoModuleCodegen<Self>, FatalError>; /// Performs thin LTO by performing necessary global analysis and returning two @@ -35,6 +35,7 @@ pub trait WriteBackendMethods: 'static + Sized + Clone { cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>, ) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError>; fn print_pass_timings(&self); + fn print_statistics(&self); unsafe fn optimize( cgcx: &CodegenContext<Self>, diag_handler: &Handler, |
