diff options
Diffstat (limited to 'compiler/rustc_codegen_ssa')
| -rw-r--r-- | compiler/rustc_codegen_ssa/messages.ftl | 10 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/back/lto.rs | 91 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/back/write.rs | 184 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/errors.rs | 17 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/traits/write.rs | 7 |
5 files changed, 203 insertions, 106 deletions
diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl index c7bd6ffd1f2..a70d0011d16 100644 --- a/compiler/rustc_codegen_ssa/messages.ftl +++ b/compiler/rustc_codegen_ssa/messages.ftl @@ -35,6 +35,10 @@ codegen_ssa_dlltool_fail_import_library = {$stdout} {$stderr} +codegen_ssa_dynamic_linking_with_lto = + cannot prefer dynamic linking when performing LTO + .note = only 'staticlib', 'bin', and 'cdylib' outputs are supported with LTO + codegen_ssa_error_calling_dlltool = Error calling dlltool '{$dlltool_path}': {$error} @@ -191,6 +195,12 @@ codegen_ssa_linker_unsupported_modifier = `as-needed` modifier not supported for codegen_ssa_linking_failed = linking with `{$linker_path}` failed: {$exit_status} +codegen_ssa_lto_disallowed = lto can only be run for executables, cdylibs and static library outputs + +codegen_ssa_lto_dylib = lto cannot be used for `dylib` crate type without `-Zdylib-lto` + +codegen_ssa_lto_proc_macro = lto cannot be used for `proc-macro` crate type without `-Zdylib-lto` + codegen_ssa_malformed_cgu_name = found malformed codegen unit name `{$user_path}`. codegen units names must always start with the name of the crate (`{$crate_name}` in this case). diff --git a/compiler/rustc_codegen_ssa/src/back/lto.rs b/compiler/rustc_codegen_ssa/src/back/lto.rs index b49b6783bbd..c95038375a1 100644 --- a/compiler/rustc_codegen_ssa/src/back/lto.rs +++ b/compiler/rustc_codegen_ssa/src/back/lto.rs @@ -2,7 +2,15 @@ use std::ffi::CString; use std::sync::Arc; use rustc_data_structures::memmap::Mmap; +use rustc_hir::def_id::{CrateNum, LOCAL_CRATE}; +use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo, SymbolExportLevel}; +use rustc_middle::ty::TyCtxt; +use rustc_session::config::{CrateType, Lto}; +use tracing::info; +use crate::back::symbol_export::{self, symbol_name_for_instance_in_crate}; +use crate::back::write::CodegenContext; +use crate::errors::{DynamicLinkingWithLTO, LtoDisallowed, LtoDylib, LtoProcMacro}; use crate::traits::*; pub struct ThinModule<B: WriteBackendMethods> { @@ -52,3 +60,86 @@ impl<M: ModuleBufferMethods> SerializedModule<M> { } } } + +fn crate_type_allows_lto(crate_type: CrateType) -> bool { + match crate_type { + CrateType::Executable + | CrateType::Dylib + | CrateType::Staticlib + | CrateType::Cdylib + | CrateType::ProcMacro + | CrateType::Sdylib => true, + CrateType::Rlib => false, + } +} + +pub(super) fn exported_symbols_for_lto( + tcx: TyCtxt<'_>, + each_linked_rlib_for_lto: &[CrateNum], +) -> Vec<String> { + let export_threshold = match tcx.sess.lto() { + // We're just doing LTO for our one crate + Lto::ThinLocal => SymbolExportLevel::Rust, + + // We're doing LTO for the entire crate graph + Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&tcx.crate_types()), + + Lto::No => return vec![], + }; + + let copy_symbols = |cnum| { + tcx.exported_non_generic_symbols(cnum) + .iter() + .chain(tcx.exported_generic_symbols(cnum)) + .filter_map(|&(s, info): &(ExportedSymbol<'_>, SymbolExportInfo)| { + if info.level.is_below_threshold(export_threshold) || info.used { + Some(symbol_name_for_instance_in_crate(tcx, s, cnum)) + } else { + None + } + }) + .collect::<Vec<_>>() + }; + let mut symbols_below_threshold = { + let _timer = tcx.prof.generic_activity("lto_generate_symbols_below_threshold"); + copy_symbols(LOCAL_CRATE) + }; + info!("{} symbols to preserve in this crate", symbols_below_threshold.len()); + + // If we're performing LTO for the entire crate graph, then for each of our + // upstream dependencies, include their exported symbols. + if tcx.sess.lto() != Lto::ThinLocal { + for &cnum in each_linked_rlib_for_lto { + let _timer = tcx.prof.generic_activity("lto_generate_symbols_below_threshold"); + symbols_below_threshold.extend(copy_symbols(cnum)); + } + } + + symbols_below_threshold +} + +pub(super) fn check_lto_allowed<B: WriteBackendMethods>(cgcx: &CodegenContext<B>) { + if cgcx.lto == Lto::ThinLocal { + // Crate local LTO is always allowed + return; + } + + let dcx = cgcx.create_dcx(); + + // Make sure we actually can run LTO + for crate_type in cgcx.crate_types.iter() { + if !crate_type_allows_lto(*crate_type) { + dcx.handle().emit_fatal(LtoDisallowed); + } else if *crate_type == CrateType::Dylib { + if !cgcx.opts.unstable_opts.dylib_lto { + dcx.handle().emit_fatal(LtoDylib); + } + } else if *crate_type == CrateType::ProcMacro && !cgcx.opts.unstable_opts.dylib_lto { + dcx.handle().emit_fatal(LtoProcMacro); + } + } + + if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto { + dcx.handle().emit_fatal(DynamicLinkingWithLTO); + } +} diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index 24e0a4eb533..7be274df1d4 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -9,7 +9,7 @@ use std::{fs, io, mem, str, thread}; use rustc_abi::Size; use rustc_ast::attr; use rustc_ast::expand::autodiff_attrs::AutoDiffItem; -use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; +use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::jobserver::{self, Acquired}; use rustc_data_structures::memmap::Mmap; use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard}; @@ -20,14 +20,12 @@ use rustc_errors::{ Suggestions, }; use rustc_fs_util::link_or_copy; -use rustc_hir::def_id::{CrateNum, LOCAL_CRATE}; use rustc_incremental::{ copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess, }; use rustc_metadata::fs::copy_to_stdout; use rustc_middle::bug; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; -use rustc_middle::middle::exported_symbols::SymbolExportInfo; use rustc_middle::ty::TyCtxt; use rustc_session::Session; use rustc_session::config::{ @@ -40,7 +38,7 @@ use tracing::debug; use super::link::{self, ensure_removed}; use super::lto::{self, SerializedModule}; -use super::symbol_export::symbol_name_for_instance_in_crate; +use crate::back::lto::check_lto_allowed; use crate::errors::{AutodiffWithoutLto, ErrorCreatingRemarkDir}; use crate::traits::*; use crate::{ @@ -332,8 +330,6 @@ pub type TargetMachineFactoryFn<B> = Arc< + Sync, >; -type ExportedSymbols = FxHashMap<CrateNum, Arc<Vec<(String, SymbolExportInfo)>>>; - /// Additional resources used by optimize_and_codegen (not module specific) #[derive(Clone)] pub struct CodegenContext<B: WriteBackendMethods> { @@ -343,10 +339,8 @@ pub struct CodegenContext<B: WriteBackendMethods> { pub save_temps: bool, pub fewer_names: bool, pub time_trace: bool, - pub exported_symbols: Option<Arc<ExportedSymbols>>, pub opts: Arc<config::Options>, pub crate_types: Vec<CrateType>, - pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, pub output_filenames: Arc<OutputFilenames>, pub invocation_temp: Option<String>, pub regular_module_config: Arc<ModuleConfig>, @@ -401,13 +395,21 @@ impl<B: WriteBackendMethods> CodegenContext<B> { fn generate_thin_lto_work<B: ExtraBackendMethods>( cgcx: &CodegenContext<B>, + exported_symbols_for_lto: &[String], + each_linked_rlib_for_lto: &[PathBuf], needs_thin_lto: Vec<(String, B::ThinBuffer)>, import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>, ) -> Vec<(WorkItem<B>, u64)> { let _prof_timer = cgcx.prof.generic_activity("codegen_thin_generate_lto_work"); - let (lto_modules, copy_jobs) = - B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules).unwrap_or_else(|e| e.raise()); + let (lto_modules, copy_jobs) = B::run_thin_lto( + cgcx, + exported_symbols_for_lto, + each_linked_rlib_for_lto, + needs_thin_lto, + import_only_modules, + ) + .unwrap_or_else(|e| e.raise()); lto_modules .into_iter() .map(|module| { @@ -723,6 +725,8 @@ pub(crate) enum WorkItem<B: WriteBackendMethods> { CopyPostLtoArtifacts(CachedModuleCodegen), /// Performs fat LTO on the given module. FatLto { + exported_symbols_for_lto: Arc<Vec<String>>, + each_linked_rlib_for_lto: Vec<PathBuf>, needs_fat_lto: Vec<FatLtoInput<B>>, import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>, autodiff: Vec<AutoDiffItem>, @@ -810,7 +814,7 @@ pub(crate) enum WorkItemResult<B: WriteBackendMethods> { } pub enum FatLtoInput<B: WriteBackendMethods> { - Serialized { name: String, buffer: B::ModuleBuffer }, + Serialized { name: String, buffer: SerializedModule<B::ModuleBuffer> }, InMemory(ModuleCodegen<B::Module>), } @@ -899,7 +903,10 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>( fs::write(&path, buffer.data()).unwrap_or_else(|e| { panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e); }); - Ok(WorkItemResult::NeedsFatLto(FatLtoInput::Serialized { name, buffer })) + Ok(WorkItemResult::NeedsFatLto(FatLtoInput::Serialized { + name, + buffer: SerializedModule::Local(buffer), + })) } None => Ok(WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module))), }, @@ -992,12 +999,24 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>( fn execute_fat_lto_work_item<B: ExtraBackendMethods>( cgcx: &CodegenContext<B>, - needs_fat_lto: Vec<FatLtoInput<B>>, + exported_symbols_for_lto: &[String], + each_linked_rlib_for_lto: &[PathBuf], + mut needs_fat_lto: Vec<FatLtoInput<B>>, import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>, autodiff: Vec<AutoDiffItem>, module_config: &ModuleConfig, ) -> Result<WorkItemResult<B>, FatalError> { - let module = B::run_and_optimize_fat_lto(cgcx, needs_fat_lto, import_only_modules, autodiff)?; + for (module, wp) in import_only_modules { + needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module }) + } + + let module = B::run_and_optimize_fat_lto( + cgcx, + exported_symbols_for_lto, + each_linked_rlib_for_lto, + needs_fat_lto, + autodiff, + )?; let module = B::codegen(cgcx, module, module_config)?; Ok(WorkItemResult::Finished(module)) } @@ -1032,7 +1051,7 @@ pub(crate) enum Message<B: WriteBackendMethods> { /// The backend has finished processing a work item for a codegen unit. /// Sent from a backend worker thread. - WorkItem { result: Result<WorkItemResult<B>, Option<WorkerFatalError>>, worker_id: usize }, + WorkItem { result: Result<WorkItemResult<B>, Option<WorkerFatalError>> }, /// The frontend has finished generating something (backend IR or a /// post-LTO artifact) for a codegen unit, and it should be passed to the @@ -1113,42 +1132,18 @@ fn start_executing_work<B: ExtraBackendMethods>( let autodiff_items = autodiff_items.to_vec(); let mut each_linked_rlib_for_lto = Vec::new(); + let mut each_linked_rlib_file_for_lto = Vec::new(); drop(link::each_linked_rlib(crate_info, None, &mut |cnum, path| { if link::ignored_for_lto(sess, crate_info, cnum) { return; } - each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); + each_linked_rlib_for_lto.push(cnum); + each_linked_rlib_file_for_lto.push(path.to_path_buf()); })); // Compute the set of symbols we need to retain when doing LTO (if we need to) - let exported_symbols = { - let mut exported_symbols = FxHashMap::default(); - - let copy_symbols = |cnum| { - let symbols = tcx - .exported_non_generic_symbols(cnum) - .iter() - .chain(tcx.exported_generic_symbols(cnum)) - .map(|&(s, lvl)| (symbol_name_for_instance_in_crate(tcx, s, cnum), lvl)) - .collect(); - Arc::new(symbols) - }; - - match sess.lto() { - Lto::No => None, - Lto::ThinLocal => { - exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); - Some(Arc::new(exported_symbols)) - } - Lto::Fat | Lto::Thin => { - exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); - for &(cnum, ref _path) in &each_linked_rlib_for_lto { - exported_symbols.insert(cnum, copy_symbols(cnum)); - } - Some(Arc::new(exported_symbols)) - } - } - }; + let exported_symbols_for_lto = + Arc::new(lto::exported_symbols_for_lto(tcx, &each_linked_rlib_for_lto)); // First up, convert our jobserver into a helper thread so we can use normal // mpsc channels to manage our messages and such. @@ -1183,14 +1178,12 @@ fn start_executing_work<B: ExtraBackendMethods>( let cgcx = CodegenContext::<B> { crate_types: tcx.crate_types().to_vec(), - each_linked_rlib_for_lto, lto: sess.lto(), fewer_names: sess.fewer_names(), save_temps: sess.opts.cg.save_temps, time_trace: sess.opts.unstable_opts.llvm_time_trace, opts: Arc::new(sess.opts.clone()), prof: sess.prof.clone(), - exported_symbols, remark: sess.opts.cg.remark.clone(), remark_dir, incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), @@ -1350,18 +1343,6 @@ fn start_executing_work<B: ExtraBackendMethods>( // necessary. There's already optimizations in place to avoid sending work // back to the coordinator if LTO isn't requested. return B::spawn_named_thread(cgcx.time_trace, "coordinator".to_string(), move || { - let mut worker_id_counter = 0; - let mut free_worker_ids = Vec::new(); - let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| { - if let Some(id) = free_worker_ids.pop() { - id - } else { - let id = worker_id_counter; - worker_id_counter += 1; - id - } - }; - // This is where we collect codegen units that have gone all the way // through codegen and LLVM. let mut compiled_modules = vec![]; @@ -1442,12 +1423,7 @@ fn start_executing_work<B: ExtraBackendMethods>( let (item, _) = work_items.pop().expect("queue empty - queue_full_enough() broken?"); main_thread_state = MainThreadState::Lending; - spawn_work( - &cgcx, - &mut llvm_start_time, - get_worker_id(&mut free_worker_ids), - item, - ); + spawn_work(&cgcx, &mut llvm_start_time, item); } } } else if codegen_state == Completed { @@ -1474,12 +1450,18 @@ fn start_executing_work<B: ExtraBackendMethods>( let needs_fat_lto = mem::take(&mut needs_fat_lto); let needs_thin_lto = mem::take(&mut needs_thin_lto); let import_only_modules = mem::take(&mut lto_import_only_modules); + let each_linked_rlib_file_for_lto = + mem::take(&mut each_linked_rlib_file_for_lto); + + check_lto_allowed(&cgcx); if !needs_fat_lto.is_empty() { assert!(needs_thin_lto.is_empty()); work_items.push(( WorkItem::FatLto { + exported_symbols_for_lto: Arc::clone(&exported_symbols_for_lto), + each_linked_rlib_for_lto: each_linked_rlib_file_for_lto, needs_fat_lto, import_only_modules, autodiff: autodiff_items.clone(), @@ -1495,9 +1477,13 @@ fn start_executing_work<B: ExtraBackendMethods>( dcx.handle().emit_fatal(AutodiffWithoutLto {}); } - for (work, cost) in - generate_thin_lto_work(&cgcx, needs_thin_lto, import_only_modules) - { + for (work, cost) in generate_thin_lto_work( + &cgcx, + &exported_symbols_for_lto, + &each_linked_rlib_file_for_lto, + needs_thin_lto, + import_only_modules, + ) { let insertion_index = work_items .binary_search_by_key(&cost, |&(_, cost)| cost) .unwrap_or_else(|e| e); @@ -1516,12 +1502,7 @@ fn start_executing_work<B: ExtraBackendMethods>( MainThreadState::Idle => { if let Some((item, _)) = work_items.pop() { main_thread_state = MainThreadState::Lending; - spawn_work( - &cgcx, - &mut llvm_start_time, - get_worker_id(&mut free_worker_ids), - item, - ); + spawn_work(&cgcx, &mut llvm_start_time, item); } else { // There is no unstarted work, so let the main thread // take over for a running worker. Otherwise the @@ -1557,12 +1538,7 @@ fn start_executing_work<B: ExtraBackendMethods>( while running_with_own_token < tokens.len() && let Some((item, _)) = work_items.pop() { - spawn_work( - &cgcx, - &mut llvm_start_time, - get_worker_id(&mut free_worker_ids), - item, - ); + spawn_work(&cgcx, &mut llvm_start_time, item); running_with_own_token += 1; } } @@ -1570,21 +1546,6 @@ fn start_executing_work<B: ExtraBackendMethods>( // Relinquish accidentally acquired extra tokens. tokens.truncate(running_with_own_token); - // If a thread exits successfully then we drop a token associated - // with that worker and update our `running_with_own_token` count. - // We may later re-acquire a token to continue running more work. - // We may also not actually drop a token here if the worker was - // running with an "ephemeral token". - let mut free_worker = |worker_id| { - if main_thread_state == MainThreadState::Lending { - main_thread_state = MainThreadState::Idle; - } else { - running_with_own_token -= 1; - } - - free_worker_ids.push(worker_id); - }; - let msg = coordinator_receive.recv().unwrap(); match *msg.downcast::<Message<B>>().ok().unwrap() { // Save the token locally and the next turn of the loop will use @@ -1653,8 +1614,17 @@ fn start_executing_work<B: ExtraBackendMethods>( codegen_state = Aborted; } - Message::WorkItem { result, worker_id } => { - free_worker(worker_id); + Message::WorkItem { result } => { + // If a thread exits successfully then we drop a token associated + // with that worker and update our `running_with_own_token` count. + // We may later re-acquire a token to continue running more work. + // We may also not actually drop a token here if the worker was + // running with an "ephemeral token". + if main_thread_state == MainThreadState::Lending { + main_thread_state = MainThreadState::Idle; + } else { + running_with_own_token -= 1; + } match result { Ok(WorkItemResult::Finished(compiled_module)) => { @@ -1800,7 +1770,6 @@ pub(crate) struct WorkerFatalError; fn spawn_work<'a, B: ExtraBackendMethods>( cgcx: &'a CodegenContext<B>, llvm_start_time: &mut Option<VerboseTimingGuard<'a>>, - worker_id: usize, work: WorkItem<B>, ) { if cgcx.config(work.module_kind()).time_module && llvm_start_time.is_none() { @@ -1815,24 +1784,21 @@ fn spawn_work<'a, B: ExtraBackendMethods>( struct Bomb<B: ExtraBackendMethods> { coordinator_send: Sender<Box<dyn Any + Send>>, result: Option<Result<WorkItemResult<B>, FatalError>>, - worker_id: usize, } impl<B: ExtraBackendMethods> Drop for Bomb<B> { fn drop(&mut self) { - let worker_id = self.worker_id; let msg = match self.result.take() { - Some(Ok(result)) => Message::WorkItem::<B> { result: Ok(result), worker_id }, + Some(Ok(result)) => Message::WorkItem::<B> { result: Ok(result) }, Some(Err(FatalError)) => { - Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)), worker_id } + Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)) } } - None => Message::WorkItem::<B> { result: Err(None), worker_id }, + None => Message::WorkItem::<B> { result: Err(None) }, }; drop(self.coordinator_send.send(Box::new(msg))); } } - let mut bomb = - Bomb::<B> { coordinator_send: cgcx.coordinator_send.clone(), result: None, worker_id }; + let mut bomb = Bomb::<B> { coordinator_send: cgcx.coordinator_send.clone(), result: None }; // Execute the work itself, and if it finishes successfully then flag // ourselves as a success as well. @@ -1856,12 +1822,20 @@ fn spawn_work<'a, B: ExtraBackendMethods>( ); Ok(execute_copy_from_cache_work_item(&cgcx, m, module_config)) } - WorkItem::FatLto { needs_fat_lto, import_only_modules, autodiff } => { + WorkItem::FatLto { + exported_symbols_for_lto, + each_linked_rlib_for_lto, + needs_fat_lto, + import_only_modules, + autodiff, + } => { let _timer = cgcx .prof .generic_activity_with_arg("codegen_module_perform_lto", "everything"); execute_fat_lto_work_item( &cgcx, + &exported_symbols_for_lto, + &each_linked_rlib_for_lto, needs_fat_lto, import_only_modules, autodiff, diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index 9040915b6af..3d787d8bdbd 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -1294,3 +1294,20 @@ pub(crate) struct FeatureNotValid<'a> { #[help] pub plus_hint: bool, } + +#[derive(Diagnostic)] +#[diag(codegen_ssa_lto_disallowed)] +pub(crate) struct LtoDisallowed; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_lto_dylib)] +pub(crate) struct LtoDylib; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_lto_proc_macro)] +pub(crate) struct LtoProcMacro; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_dynamic_linking_with_lto)] +#[note] +pub(crate) struct DynamicLinkingWithLTO; diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs index 5e993640472..8e78cbe1963 100644 --- a/compiler/rustc_codegen_ssa/src/traits/write.rs +++ b/compiler/rustc_codegen_ssa/src/traits/write.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use rustc_ast::expand::autodiff_attrs::AutoDiffItem; use rustc_errors::{DiagCtxtHandle, FatalError}; use rustc_middle::dep_graph::WorkProduct; @@ -24,8 +26,9 @@ pub trait WriteBackendMethods: Clone + 'static { /// if necessary and running any further optimizations fn run_and_optimize_fat_lto( cgcx: &CodegenContext<Self>, + exported_symbols_for_lto: &[String], + each_linked_rlib_for_lto: &[PathBuf], modules: Vec<FatLtoInput<Self>>, - cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>, diff_fncs: Vec<AutoDiffItem>, ) -> Result<ModuleCodegen<Self::Module>, FatalError>; /// Performs thin LTO by performing necessary global analysis and returning two @@ -33,6 +36,8 @@ pub trait WriteBackendMethods: Clone + 'static { /// can simply be copied over from the incr. comp. cache. fn run_thin_lto( cgcx: &CodegenContext<Self>, + exported_symbols_for_lto: &[String], + each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, Self::ThinBuffer)>, cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>, ) -> Result<(Vec<ThinModule<Self>>, Vec<WorkProduct>), FatalError>; |
