diff options
Diffstat (limited to 'compiler/rustc_codegen_ssa')
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/back/write.rs | 20 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/base.rs | 11 |
2 files changed, 13 insertions, 18 deletions
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index 8586615f7c7..d6f289b3eee 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -809,19 +809,12 @@ pub(crate) fn compute_per_cgu_lto_type( sess_lto: &Lto, opts: &config::Options, sess_crate_types: &[CrateType], - module_kind: ModuleKind, ) -> ComputedLtoType { // If the linker does LTO, we don't have to do it. Note that we // keep doing full LTO, if it is requested, as not to break the // assumption that the output will be a single module. let linker_does_lto = opts.cg.linker_plugin_lto.enabled(); - // When we're automatically doing ThinLTO for multi-codegen-unit - // builds we don't actually want to LTO the allocator modules if - // it shows up. This is due to various linker shenanigans that - // we'll encounter later. - let is_allocator = module_kind == ModuleKind::Allocator; - // We ignore a request for full crate graph LTO if the crate type // is only an rlib, as there is no full crate graph to process, // that'll happen later. @@ -833,7 +826,7 @@ pub(crate) fn compute_per_cgu_lto_type( let is_rlib = matches!(sess_crate_types, [CrateType::Rlib]); match sess_lto { - Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin, + Lto::ThinLocal if !linker_does_lto => ComputedLtoType::Thin, Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin, Lto::Fat if !is_rlib => ComputedLtoType::Fat, _ => ComputedLtoType::No, @@ -855,7 +848,16 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>( // back to the coordinator thread for further LTO processing (which // has to wait for all the initial modules to be optimized). - let lto_type = compute_per_cgu_lto_type(&cgcx.lto, &cgcx.opts, &cgcx.crate_types, module.kind); + // When we're automatically doing ThinLTO for multi-codegen-unit + // builds we don't actually want to LTO the allocator modules if + // it shows up. This is due to various linker shenanigans that + // we'll encounter later. + if module.kind == ModuleKind::Allocator { + let module = B::codegen(cgcx, module, module_config); + return WorkItemResult::Finished(module); + } + + let lto_type = compute_per_cgu_lto_type(&cgcx.lto, &cgcx.opts, &cgcx.crate_types); // If we're doing some form of incremental LTO then we need to be sure to // save our module to disk first. diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 97cdf8b6973..071bd09249a 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -46,9 +46,7 @@ use crate::meth::load_vtable; use crate::mir::operand::OperandValue; use crate::mir::place::PlaceRef; use crate::traits::*; -use crate::{ - CachedModuleCodegen, CodegenLintLevels, CrateInfo, ModuleCodegen, ModuleKind, errors, meth, mir, -}; +use crate::{CachedModuleCodegen, CodegenLintLevels, CrateInfo, ModuleCodegen, errors, meth, mir}; pub(crate) fn bin_op_to_icmp_predicate(op: BinOp, signed: bool) -> IntPredicate { match (op, signed) { @@ -1135,12 +1133,7 @@ pub fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> // We can re-use either the pre- or the post-thinlto state. If no LTO is // being performed then we can use post-LTO artifacts, otherwise we must // reuse pre-LTO artifacts - match compute_per_cgu_lto_type( - &tcx.sess.lto(), - &tcx.sess.opts, - tcx.crate_types(), - ModuleKind::Regular, - ) { + match compute_per_cgu_lto_type(&tcx.sess.lto(), &tcx.sess.opts, tcx.crate_types()) { ComputedLtoType::No => CguReuse::PostLto, _ => CguReuse::PreLto, } |
