diff options
| author | bors <bors@rust-lang.org> | 2024-03-11 16:51:54 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2024-03-11 16:51:54 +0000 |
| commit | 6554a5645a13e4d9331fd028960d69be91d7492d (patch) | |
| tree | 99769b8987cd4046411fa0e9e0f2a08e4fa45941 /compiler | |
| parent | 65cd843ae06ad00123c131a431ed5304e4cd577a (diff) | |
| parent | 7fa6fa4a1f5d48ac46e22756b27cb56f2aebf02d (diff) | |
| download | rust-6554a5645a13e4d9331fd028960d69be91d7492d.tar.gz rust-6554a5645a13e4d9331fd028960d69be91d7492d.zip | |
Auto merge of #122338 - workingjubilee:rollup-xzpt4v4, r=workingjubilee
Rollup of 15 pull requests Successful merges: - #116791 (Allow codegen backends to opt-out of parallel codegen) - #116793 (Allow targets to override default codegen backend) - #117458 (LLVM Bitcode Linker: A self contained linker for nvptx and other targets) - #119385 (Fix type resolution of associated const equality bounds (take 2)) - #121438 (std support for wasm32 panic=unwind) - #121893 (Add tests (and a bit of cleanup) for interior mut handling in promotion and const-checking) - #122080 (Clarity improvements to `DropTree`) - #122152 (Improve diagnostics for parenthesized type arguments) - #122166 (Remove the unused `field_remapping` field from `TypeLowering`) - #122249 (interpret: do not call machine read hooks during validation) - #122299 (Store backtrace for `must_produce_diag`) - #122318 (Revision-related tweaks for next-solver tests) - #122320 (Use ptradd for vtable indexing) - #122328 (unix_sigpipe: Replace `inherit` with `sig_dfl` in syntax tests) - #122330 (bootstrap readme: fix, improve, update) r? `@ghost` `@rustbot` modify labels: rollup
Diffstat (limited to 'compiler')
32 files changed, 577 insertions, 239 deletions
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 37c690a0128..f89c8c9f836 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -77,8 +77,8 @@ pub struct CodegenCx<'ll, 'tcx> { /// See <https://llvm.org/docs/LangRef.html#the-llvm-compiler-used-global-variable> for details pub compiler_used_statics: RefCell<Vec<&'ll Value>>, - /// Mapping of non-scalar types to llvm types and field remapping if needed. - pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), TypeLowering<'ll>>>, + /// Mapping of non-scalar types to llvm types. + pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), &'ll Type>>, /// Mapping of scalar types to llvm types. pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>, @@ -105,15 +105,6 @@ pub struct CodegenCx<'ll, 'tcx> { pub renamed_statics: RefCell<FxHashMap<DefId, &'ll Value>>, } -pub struct TypeLowering<'ll> { - /// Associated LLVM type - pub lltype: &'ll Type, - - /// If padding is used the slice maps fields from source order - /// to llvm order. - pub field_remapping: Option<SmallVec<[u32; 4]>>, -} - fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode { match tls_model { TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic, diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index 78d47f36f91..e32c38644aa 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -5,6 +5,7 @@ use crate::errors::{ }; use crate::llvm; use libc::c_int; +use rustc_codegen_ssa::base::wants_wasm_eh; use rustc_codegen_ssa::traits::PrintBackendInfo; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::small_c_str::SmallCStr; @@ -98,6 +99,10 @@ unsafe fn configure_llvm(sess: &Session) { } } + if wants_wasm_eh(sess) { + add("-wasm-enable-eh", false); + } + if sess.target.os == "emscripten" && sess.panic_strategy() == PanicStrategy::Unwind { add("-enable-emscripten-cxx-exceptions", false); } @@ -523,6 +528,10 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str .map(String::from), ); + if wants_wasm_eh(sess) && sess.panic_strategy() == PanicStrategy::Unwind { + features.push("+exception-handling".into()); + } + // -Ctarget-features let supported_features = sess.target.supported_target_features(); let mut featsmap = FxHashMap::default(); diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 587adefe1d2..d10a083765b 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -1,5 +1,4 @@ use crate::common::*; -use crate::context::TypeLowering; use crate::type_::Type; use rustc_codegen_ssa::traits::*; use rustc_middle::bug; @@ -10,7 +9,6 @@ use rustc_target::abi::HasDataLayout; use rustc_target::abi::{Abi, Align, FieldsShape}; use rustc_target::abi::{Int, Pointer, F128, F16, F32, F64}; use rustc_target::abi::{Scalar, Size, Variants}; -use smallvec::{smallvec, SmallVec}; use std::fmt::Write; @@ -18,7 +16,6 @@ fn uncached_llvm_type<'a, 'tcx>( cx: &CodegenCx<'a, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>, - field_remapping: &mut Option<SmallVec<[u32; 4]>>, ) -> &'a Type { match layout.abi { Abi::Scalar(_) => bug!("handled elsewhere"), @@ -71,8 +68,7 @@ fn uncached_llvm_type<'a, 'tcx>( FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count), FieldsShape::Arbitrary { .. } => match name { None => { - let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout); - *field_remapping = new_field_remapping; + let (llfields, packed) = struct_llfields(cx, layout); cx.type_struct(&llfields, packed) } Some(ref name) => { @@ -87,7 +83,7 @@ fn uncached_llvm_type<'a, 'tcx>( fn struct_llfields<'a, 'tcx>( cx: &CodegenCx<'a, 'tcx>, layout: TyAndLayout<'tcx>, -) -> (Vec<&'a Type>, bool, Option<SmallVec<[u32; 4]>>) { +) -> (Vec<&'a Type>, bool) { debug!("struct_llfields: {:#?}", layout); let field_count = layout.fields.count(); @@ -95,7 +91,6 @@ fn struct_llfields<'a, 'tcx>( let mut offset = Size::ZERO; let mut prev_effective_align = layout.align.abi; let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2); - let mut field_remapping = smallvec![0; field_count]; for i in layout.fields.index_by_increasing_offset() { let target_offset = layout.fields.offset(i as usize); let field = layout.field(cx, i); @@ -120,12 +115,10 @@ fn struct_llfields<'a, 'tcx>( result.push(cx.type_padding_filler(padding, padding_align)); debug!(" padding before: {:?}", padding); } - field_remapping[i] = result.len() as u32; result.push(field.llvm_type(cx)); offset = target_offset + field.size; prev_effective_align = effective_field_align; } - let padding_used = result.len() > field_count; if layout.is_sized() && field_count > 0 { if offset > layout.size { bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset); @@ -143,8 +136,7 @@ fn struct_llfields<'a, 'tcx>( } else { debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size); } - let field_remapping = padding_used.then_some(field_remapping); - (result, packed, field_remapping) + (result, packed) } impl<'a, 'tcx> CodegenCx<'a, 'tcx> { @@ -224,7 +216,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { _ => None, }; if let Some(llty) = cx.type_lowering.borrow().get(&(self.ty, variant_index)) { - return llty.lltype; + return llty; } debug!("llvm_type({:#?})", self); @@ -236,7 +228,6 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { let normal_ty = cx.tcx.erase_regions(self.ty); let mut defer = None; - let mut field_remapping = None; let llty = if self.ty != normal_ty { let mut layout = cx.layout_of(normal_ty); if let Some(v) = variant_index { @@ -244,22 +235,15 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { } layout.llvm_type(cx) } else { - uncached_llvm_type(cx, *self, &mut defer, &mut field_remapping) + uncached_llvm_type(cx, *self, &mut defer) }; debug!("--> mapped {:#?} to llty={:?}", self, llty); - cx.type_lowering - .borrow_mut() - .insert((self.ty, variant_index), TypeLowering { lltype: llty, field_remapping }); + cx.type_lowering.borrow_mut().insert((self.ty, variant_index), llty); if let Some((llty, layout)) = defer { - let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout); + let (llfields, packed) = struct_llfields(cx, layout); cx.set_struct_body(llty, &llfields, packed); - cx.type_lowering - .borrow_mut() - .get_mut(&(self.ty, variant_index)) - .unwrap() - .field_remapping = new_field_remapping; } llty } diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs index fcb3602b734..e70cc9b6216 100644 --- a/compiler/rustc_codegen_ssa/src/back/link.rs +++ b/compiler/rustc_codegen_ssa/src/back/link.rs @@ -24,6 +24,7 @@ use rustc_span::symbol::Symbol; use rustc_target::spec::crt_objects::CrtObjects; use rustc_target::spec::LinkSelfContainedComponents; use rustc_target::spec::LinkSelfContainedDefault; +use rustc_target::spec::LinkerFlavorCli; use rustc_target::spec::{Cc, LinkOutputKind, LinkerFlavor, Lld, PanicStrategy}; use rustc_target::spec::{RelocModel, RelroLevel, SanitizerSet, SplitDebuginfo}; @@ -1350,6 +1351,7 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { } } LinkerFlavor::Bpf => "bpf-linker", + LinkerFlavor::Llbc => "llvm-bitcode-linker", LinkerFlavor::Ptx => "rust-ptx-linker", }), flavor, @@ -1367,8 +1369,17 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { // linker and linker flavor specified via command line have precedence over what the target // specification specifies - let linker_flavor = - sess.opts.cg.linker_flavor.map(|flavor| sess.target.linker_flavor.with_cli_hints(flavor)); + let linker_flavor = match sess.opts.cg.linker_flavor { + // The linker flavors that are non-target specific can be directly translated to LinkerFlavor + Some(LinkerFlavorCli::Llbc) => Some(LinkerFlavor::Llbc), + Some(LinkerFlavorCli::Ptx) => Some(LinkerFlavor::Ptx), + // The linker flavors that corresponds to targets needs logic that keeps the base LinkerFlavor + _ => sess + .opts + .cg + .linker_flavor + .map(|flavor| sess.target.linker_flavor.with_cli_hints(flavor)), + }; if let Some(ret) = infer_from(sess, sess.opts.cg.linker.clone(), linker_flavor) { return ret; } @@ -2338,8 +2349,12 @@ fn add_order_independent_options( }); } - if flavor == LinkerFlavor::Ptx { - // Provide the linker with fallback to internal `target-cpu`. + if flavor == LinkerFlavor::Llbc { + cmd.arg("--target"); + cmd.arg(sess.target.llvm_target.as_ref()); + cmd.arg("--target-cpu"); + cmd.arg(&codegen_results.crate_info.target_cpu); + } else if flavor == LinkerFlavor::Ptx { cmd.arg("--fallback-arch"); cmd.arg(&codegen_results.crate_info.target_cpu); } else if flavor == LinkerFlavor::Bpf { diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs index e52efd86955..b4e054417f3 100644 --- a/compiler/rustc_codegen_ssa/src/back/linker.rs +++ b/compiler/rustc_codegen_ssa/src/back/linker.rs @@ -153,6 +153,7 @@ pub fn get_linker<'a>( LinkerFlavor::Msvc(..) => Box::new(MsvcLinker { cmd, sess }) as Box<dyn Linker>, LinkerFlavor::EmCc => Box::new(EmLinker { cmd, sess }) as Box<dyn Linker>, LinkerFlavor::Bpf => Box::new(BpfLinker { cmd, sess }) as Box<dyn Linker>, + LinkerFlavor::Llbc => Box::new(LlbcLinker { cmd, sess }) as Box<dyn Linker>, LinkerFlavor::Ptx => Box::new(PtxLinker { cmd, sess }) as Box<dyn Linker>, } } @@ -1824,7 +1825,7 @@ impl<'a> Linker for PtxLinker<'a> { } Lto::No => {} - }; + } } fn output_filename(&mut self, path: &Path) { @@ -1862,6 +1863,104 @@ impl<'a> Linker for PtxLinker<'a> { fn linker_plugin_lto(&mut self) {} } +/// The `self-contained` LLVM bitcode linker +pub struct LlbcLinker<'a> { + cmd: Command, + sess: &'a Session, +} + +impl<'a> Linker for LlbcLinker<'a> { + fn cmd(&mut self) -> &mut Command { + &mut self.cmd + } + + fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {} + + fn link_dylib_by_name(&mut self, _name: &str, _verbatim: bool, _as_needed: bool) { + panic!("external dylibs not supported") + } + + fn link_staticlib_by_name( + &mut self, + _name: &str, + _verbatim: bool, + _whole_archive: bool, + _search_paths: &SearchPaths, + ) { + panic!("staticlibs not supported") + } + + fn link_staticlib_by_path(&mut self, path: &Path, _whole_archive: bool) { + self.cmd.arg(path); + } + + fn include_path(&mut self, path: &Path) { + self.cmd.arg("-L").arg(path); + } + + fn debuginfo(&mut self, _strip: Strip, _: &[PathBuf]) { + self.cmd.arg("--debug"); + } + + fn add_object(&mut self, path: &Path) { + self.cmd.arg(path); + } + + fn optimize(&mut self) { + match self.sess.opts.optimize { + OptLevel::No => "-O0", + OptLevel::Less => "-O1", + OptLevel::Default => "-O2", + OptLevel::Aggressive => "-O3", + OptLevel::Size => "-Os", + OptLevel::SizeMin => "-Oz", + }; + } + + fn output_filename(&mut self, path: &Path) { + self.cmd.arg("-o").arg(path); + } + + fn framework_path(&mut self, _path: &Path) { + panic!("frameworks not supported") + } + + fn full_relro(&mut self) {} + + fn partial_relro(&mut self) {} + + fn no_relro(&mut self) {} + + fn gc_sections(&mut self, _keep_metadata: bool) {} + + fn no_gc_sections(&mut self) {} + + fn pgo_gen(&mut self) {} + + fn no_crt_objects(&mut self) {} + + fn no_default_libraries(&mut self) {} + + fn control_flow_guard(&mut self) {} + + fn ehcont_guard(&mut self) {} + + fn export_symbols(&mut self, _tmpdir: &Path, _crate_type: CrateType, symbols: &[String]) { + match _crate_type { + CrateType::Cdylib => { + for sym in symbols { + self.cmd.arg("--export-symbol").arg(sym); + } + } + _ => (), + } + } + + fn subsystem(&mut self, _subsystem: &str) {} + + fn linker_plugin_lto(&mut self) {} +} + pub struct BpfLinker<'a> { cmd: Command, sess: &'a Session, diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index bbb04652119..4eda4c2f08c 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -374,6 +374,10 @@ pub struct CodegenContext<B: WriteBackendMethods> { pub incr_comp_session_dir: Option<PathBuf>, /// Channel back to the main control thread to send messages to pub coordinator_send: Sender<Box<dyn Any + Send>>, + /// `true` if the codegen should be run in parallel. + /// + /// Depends on [`CodegenBackend::supports_parallel()`] and `-Zno_parallel_backend`. + pub parallel: bool, } impl<B: WriteBackendMethods> CodegenContext<B> { @@ -1152,6 +1156,7 @@ fn start_executing_work<B: ExtraBackendMethods>( target_arch: tcx.sess.target.arch.to_string(), split_debuginfo: tcx.sess.split_debuginfo(), split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind, + parallel: backend.supports_parallel() && !sess.opts.unstable_opts.no_parallel_backend, }; // This is the "main loop" of parallel work happening for parallel codegen. @@ -1422,7 +1427,7 @@ fn start_executing_work<B: ExtraBackendMethods>( .binary_search_by_key(&cost, |&(_, cost)| cost) .unwrap_or_else(|e| e); work_items.insert(insertion_index, (work, cost)); - if !cgcx.opts.unstable_opts.no_parallel_llvm { + if cgcx.parallel { helper.request_token(); } } @@ -1545,7 +1550,7 @@ fn start_executing_work<B: ExtraBackendMethods>( }; work_items.insert(insertion_index, (llvm_work_item, cost)); - if !cgcx.opts.unstable_opts.no_parallel_llvm { + if cgcx.parallel { helper.request_token(); } assert_eq!(main_thread_state, MainThreadState::Codegenning); diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 5cba14a5dda..c316d19e041 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -165,14 +165,11 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target)); if let Some(entry_idx) = vptr_entry_idx { - let ptr_ty = cx.type_ptr(); - let ptr_align = cx.tcx().data_layout.pointer_align.abi; - let gep = bx.inbounds_gep( - ptr_ty, - old_info, - &[bx.const_usize(u64::try_from(entry_idx).unwrap())], - ); - let new_vptr = bx.load(ptr_ty, gep, ptr_align); + let ptr_size = bx.data_layout().pointer_size; + let ptr_align = bx.data_layout().pointer_align.abi; + let vtable_byte_offset = u64::try_from(entry_idx).unwrap() * ptr_size.bytes(); + let gep = bx.inbounds_ptradd(old_info, bx.const_usize(vtable_byte_offset)); + let new_vptr = bx.load(bx.type_ptr(), gep, ptr_align); bx.nonnull_metadata(new_vptr); // VTable loads are invariant. bx.set_invariant_load(new_vptr); diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs index 12146a54d3b..4f7dc9968a1 100644 --- a/compiler/rustc_codegen_ssa/src/meth.rs +++ b/compiler/rustc_codegen_ssa/src/meth.rs @@ -20,9 +20,13 @@ impl<'a, 'tcx> VirtualIndex { ty: Ty<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, ) -> Bx::Value { - // Load the data pointer from the object. + // Load the function pointer from the object. debug!("get_fn({llvtable:?}, {ty:?}, {self:?})"); + let llty = bx.fn_ptr_backend_type(fn_abi); + let ptr_size = bx.data_layout().pointer_size; + let ptr_align = bx.data_layout().pointer_align.abi; + let vtable_byte_offset = self.0 * ptr_size.bytes(); if bx.cx().sess().opts.unstable_opts.virtual_function_elimination && bx.cx().sess().lto() == Lto::Fat @@ -30,12 +34,10 @@ impl<'a, 'tcx> VirtualIndex { let typeid = bx .typeid_metadata(typeid_for_trait_ref(bx.tcx(), expect_dyn_trait_in_self(ty))) .unwrap(); - let vtable_byte_offset = self.0 * bx.data_layout().pointer_size.bytes(); let func = bx.type_checked_load(llvtable, vtable_byte_offset, typeid); func } else { - let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]); + let gep = bx.inbounds_ptradd(llvtable, bx.const_usize(vtable_byte_offset)); let ptr = bx.load(llty, gep, ptr_align); bx.nonnull_metadata(ptr); // VTable loads are invariant. @@ -53,9 +55,12 @@ impl<'a, 'tcx> VirtualIndex { debug!("get_int({:?}, {:?})", llvtable, self); let llty = bx.type_isize(); - let usize_align = bx.tcx().data_layout.pointer_align.abi; - let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]); - let ptr = bx.load(llty, gep, usize_align); + let ptr_size = bx.data_layout().pointer_size; + let ptr_align = bx.data_layout().pointer_align.abi; + let vtable_byte_offset = self.0 * ptr_size.bytes(); + + let gep = bx.inbounds_ptradd(llvtable, bx.const_usize(vtable_byte_offset)); + let ptr = bx.load(llty, gep, ptr_align); // VTable loads are invariant. bx.set_invariant_load(ptr); ptr diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 89a44d2897a..d3f5de25d9a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -1597,7 +1597,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let funclet; let llbb; let mut bx; - if base::wants_msvc_seh(self.cx.sess()) { + if base::wants_new_eh_instructions(self.cx.sess()) { // This is a basic block that we're aborting the program for, // notably in an `extern` function. These basic blocks are inserted // so that we assert that `extern` functions do indeed not panic, diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs index cb1feff1336..e6d42c596d2 100644 --- a/compiler/rustc_codegen_ssa/src/traits/backend.rs +++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs @@ -111,6 +111,13 @@ pub trait CodegenBackend { codegen_results: CodegenResults, outputs: &OutputFilenames, ) -> Result<(), ErrorGuaranteed>; + + /// Returns `true` if this backend can be safely called from multiple threads. + /// + /// Defaults to `true`. + fn supports_parallel(&self) -> bool { + true + } } pub trait ExtraBackendMethods: diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 8ee3a0cc967..90884adb28c 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -380,16 +380,12 @@ pub fn eval_in_interpreter<'mir, 'tcx>( } Ok(mplace) => { // Since evaluation had no errors, validate the resulting constant. - - // Temporarily allow access to the static_root_alloc_id for the purpose of validation. - let static_root_alloc_id = ecx.machine.static_root_alloc_id.take(); - let validation = const_validate_mplace(&ecx, &mplace, cid); - ecx.machine.static_root_alloc_id = static_root_alloc_id; + let res = const_validate_mplace(&ecx, &mplace, cid); let alloc_id = mplace.ptr().provenance.unwrap().alloc_id(); // Validation failed, report an error. - if let Err(error) = validation { + if let Err(error) = res { Err(const_report_error(&ecx, error, alloc_id)) } else { // Convert to raw constant @@ -412,10 +408,10 @@ pub fn const_validate_mplace<'mir, 'tcx>( _ if cid.promoted.is_some() => CtfeValidationMode::Promoted, Some(mutbl) => CtfeValidationMode::Static { mutbl }, // a `static` None => { - // In normal `const` (not promoted), the outermost allocation is always only copied, - // so having `UnsafeCell` in there is okay despite them being in immutable memory. - let allow_immutable_unsafe_cell = cid.promoted.is_none() && !inner; - CtfeValidationMode::Const { allow_immutable_unsafe_cell } + // This is a normal `const` (not promoted). + // The outermost allocation is always only copied, so having `UnsafeCell` in there + // is okay despite them being in immutable memory. + CtfeValidationMode::Const { allow_immutable_unsafe_cell: !inner } } }; ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)?; diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs index 90a654a1229..305f7ade101 100644 --- a/compiler/rustc_const_eval/src/interpret/machine.rs +++ b/compiler/rustc_const_eval/src/interpret/machine.rs @@ -391,6 +391,8 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// Hook for performing extra checks on a memory read access. /// + /// This will *not* be called during validation! + /// /// Takes read-only access to the allocation so we can keep all the memory read /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you /// need to mutate. @@ -410,6 +412,8 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// Hook for performing extra checks on any memory read access, /// that involves an allocation, even ZST reads. /// + /// This will *not* be called during validation! + /// /// Used to prevent statics from self-initializing by reading from their own memory /// as it is being initialized. fn before_alloc_read( diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 3b2208b8caa..cf7f165b87c 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -8,6 +8,7 @@ use std::assert_matches::assert_matches; use std::borrow::Cow; +use std::cell::Cell; use std::collections::VecDeque; use std::fmt; use std::ptr; @@ -111,6 +112,11 @@ pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> { /// that do not exist any more. // FIXME: this should not be public, but interning currently needs access to it pub(super) dead_alloc_map: FxIndexMap<AllocId, (Size, Align)>, + + /// This stores whether we are currently doing reads purely for the purpose of validation. + /// Those reads do not trigger the machine's hooks for memory reads. + /// Needless to say, this must only be set with great care! + validation_in_progress: Cell<bool>, } /// A reference to some allocation that was already bounds-checked for the given region @@ -137,6 +143,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { alloc_map: M::MemoryMap::default(), extra_fn_ptr_map: FxIndexMap::default(), dead_alloc_map: FxIndexMap::default(), + validation_in_progress: Cell::new(false), } } @@ -624,10 +631,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { size, CheckInAllocMsg::MemoryAccessTest, |alloc_id, offset, prov| { - // We want to call the hook on *all* accesses that involve an AllocId, - // including zero-sized accesses. That means we have to do it here - // rather than below in the `Some` branch. - M::before_alloc_read(self, alloc_id)?; + if !self.memory.validation_in_progress.get() { + // We want to call the hook on *all* accesses that involve an AllocId, + // including zero-sized accesses. That means we have to do it here + // rather than below in the `Some` branch. + M::before_alloc_read(self, alloc_id)?; + } let alloc = self.get_alloc_raw(alloc_id)?; Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc))) }, @@ -635,7 +644,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc { let range = alloc_range(offset, size); - M::before_memory_read(self.tcx, &self.machine, &alloc.extra, (alloc_id, prov), range)?; + if !self.memory.validation_in_progress.get() { + M::before_memory_read( + self.tcx, + &self.machine, + &alloc.extra, + (alloc_id, prov), + range, + )?; + } Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id })) } else { Ok(None) @@ -909,6 +926,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } }) } + + /// Runs the close in "validation" mode, which means the machine's memory read hooks will be + /// suppressed. Needless to say, this must only be set with great care! Cannot be nested. + pub(super) fn run_for_validation<R>(&self, f: impl FnOnce() -> R) -> R { + assert!( + self.memory.validation_in_progress.replace(true) == false, + "`validation_in_progress` was already set" + ); + let res = f(); + assert!( + self.memory.validation_in_progress.replace(false) == true, + "`validation_in_progress` was unset by someone else" + ); + res + } } #[doc(hidden)] @@ -1154,6 +1186,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { }; let src_alloc = self.get_alloc_raw(src_alloc_id)?; let src_range = alloc_range(src_offset, size); + assert!(!self.memory.validation_in_progress.get(), "we can't be copying during validation"); M::before_memory_read( tcx, &self.machine, diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index c568f9acfd3..972424bccfa 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -971,7 +971,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self }; // Run it. - match visitor.visit_value(op) { + match self.run_for_validation(|| visitor.visit_value(op)) { Ok(()) => Ok(()), // Pass through validation failures and "invalid program" issues. Err(err) diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs index 53308cd7f90..a93e8138aa4 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs @@ -30,7 +30,7 @@ type QualifResults<'mir, 'tcx, Q> = rustc_mir_dataflow::ResultsCursor<'mir, 'tcx, FlowSensitiveAnalysis<'mir, 'mir, 'tcx, Q>>; #[derive(Default)] -pub struct Qualifs<'mir, 'tcx> { +pub(crate) struct Qualifs<'mir, 'tcx> { has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>, needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>, needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>, diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs index 7eb3c181d69..1847847d9d2 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs @@ -284,6 +284,7 @@ where if Q::in_adt_inherently(cx, def, args) { return true; } + // Don't do any value-based reasoning for unions. if def.is_union() && Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)) { return true; } diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs index 2802f44cda7..7b1070f309b 100644 --- a/compiler/rustc_driver_impl/src/lib.rs +++ b/compiler/rustc_driver_impl/src/lib.rs @@ -37,7 +37,7 @@ use rustc_session::config::{nightly_options, CG_OPTIONS, Z_OPTIONS}; use rustc_session::config::{ErrorOutputType, Input, OutFileName, OutputType}; use rustc_session::getopts::{self, Matches}; use rustc_session::lint::{Lint, LintId}; -use rustc_session::{config, EarlyDiagCtxt, Session}; +use rustc_session::{config, filesearch, EarlyDiagCtxt, Session}; use rustc_span::def_id::LOCAL_CRATE; use rustc_span::source_map::FileLoader; use rustc_span::symbol::sym; @@ -887,7 +887,11 @@ pub fn version_at_macro_invocation( let debug_flags = matches.opt_strs("Z"); let backend_name = debug_flags.iter().find_map(|x| x.strip_prefix("codegen-backend=")); - get_codegen_backend(early_dcx, &None, backend_name).print_version(); + let opts = config::Options::default(); + let sysroot = filesearch::materialize_sysroot(opts.maybe_sysroot.clone()); + let target = config::build_target_config(early_dcx, &opts, None, &sysroot); + + get_codegen_backend(early_dcx, &sysroot, backend_name, &target).print_version(); } } @@ -1092,7 +1096,12 @@ pub fn describe_flag_categories(early_dcx: &EarlyDiagCtxt, matches: &Matches) -> if cg_flags.iter().any(|x| *x == "passes=list") { let backend_name = debug_flags.iter().find_map(|x| x.strip_prefix("codegen-backend=")); - get_codegen_backend(early_dcx, &None, backend_name).print_passes(); + + let opts = config::Options::default(); + let sysroot = filesearch::materialize_sysroot(opts.maybe_sysroot.clone()); + let target = config::build_target_config(early_dcx, &opts, None, &sysroot); + + get_codegen_backend(early_dcx, &sysroot, backend_name, &target).print_passes(); return true; } diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index bedb677c1e9..d4f884d49ea 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -442,8 +442,8 @@ struct DiagCtxtInner { emitter: Box<DynEmitter>, /// Must we produce a diagnostic to justify the use of the expensive - /// `trimmed_def_paths` function? - must_produce_diag: bool, + /// `trimmed_def_paths` function? Backtrace is the location of the call. + must_produce_diag: Option<Backtrace>, /// Has this diagnostic context printed any diagnostics? (I.e. has /// `self.emitter.emit_diagnostic()` been called? @@ -572,10 +572,11 @@ impl Drop for DiagCtxtInner { } if !self.has_printed && !self.suppressed_expected_diag && !std::thread::panicking() { - if self.must_produce_diag { + if let Some(backtrace) = &self.must_produce_diag { panic!( - "must_produce_diag: trimmed_def_paths called but no diagnostics emitted; \ - use `DelayDm` for lints or `with_no_trimmed_paths` for debugging" + "must_produce_diag: `trimmed_def_paths` called but no diagnostics emitted; \ + use `DelayDm` for lints or `with_no_trimmed_paths` for debugging. \ + called at: {backtrace}" ); } } @@ -721,7 +722,7 @@ impl DiagCtxt { *delayed_bugs = Default::default(); *deduplicated_err_count = 0; *deduplicated_warn_count = 0; - *must_produce_diag = false; + *must_produce_diag = None; *has_printed = false; *suppressed_expected_diag = false; *taught_diagnostics = Default::default(); @@ -1091,8 +1092,13 @@ impl DiagCtxt { /// Used when trimmed_def_paths is called and we must produce a diagnostic /// to justify its cost. + #[track_caller] pub fn set_must_produce_diag(&self) { - self.inner.borrow_mut().must_produce_diag = true; + assert!( + self.inner.borrow().must_produce_diag.is_none(), + "should only need to collect a backtrace once" + ); + self.inner.borrow_mut().must_produce_diag = Some(Backtrace::capture()); } } @@ -1384,7 +1390,7 @@ impl DiagCtxtInner { deduplicated_err_count: 0, deduplicated_warn_count: 0, emitter, - must_produce_diag: false, + must_produce_diag: None, has_printed: false, suppressed_expected_diag: false, taught_diagnostics: Default::default(), diff --git a/compiler/rustc_hir_analysis/src/astconv/bounds.rs b/compiler/rustc_hir_analysis/src/astconv/bounds.rs index 6d8a5bc0e90..17e0aeb044c 100644 --- a/compiler/rustc_hir_analysis/src/astconv/bounds.rs +++ b/compiler/rustc_hir_analysis/src/astconv/bounds.rs @@ -408,7 +408,7 @@ impl<'tcx> dyn AstConv<'tcx> + '_ { // Create the generic arguments for the associated type or constant by joining the // parent arguments (the arguments of the trait) and the own arguments (the ones of // the associated item itself) and construct an alias type using them. - candidate.map_bound(|trait_ref| { + let alias_ty = candidate.map_bound(|trait_ref| { let ident = Ident::new(assoc_item.name, binding.ident.span); let item_segment = hir::PathSegment { ident, @@ -430,7 +430,25 @@ impl<'tcx> dyn AstConv<'tcx> + '_ { // *constants* to represent *const projections*. Alias *term* would be a more // appropriate name but alas. ty::AliasTy::new(tcx, assoc_item.def_id, alias_args) - }) + }); + + // Provide the resolved type of the associated constant to `type_of(AnonConst)`. + if !speculative + && let hir::TypeBindingKind::Equality { term: hir::Term::Const(anon_const) } = + binding.kind + { + let ty = alias_ty.map_bound(|ty| tcx.type_of(ty.def_id).instantiate(tcx, ty.args)); + // Since the arguments passed to the alias type above may contain early-bound + // generic parameters, the instantiated type may contain some as well. + // Therefore wrap it in `EarlyBinder`. + // FIXME(fmease): Reject escaping late-bound vars. + tcx.feed_anon_const_type( + anon_const.def_id, + ty::EarlyBinder::bind(ty.skip_binder()), + ); + } + + alias_ty }; match binding.kind { diff --git a/compiler/rustc_hir_analysis/src/collect/type_of.rs b/compiler/rustc_hir_analysis/src/collect/type_of.rs index a1345c2d81b..598dba922d0 100644 --- a/compiler/rustc_hir_analysis/src/collect/type_of.rs +++ b/compiler/rustc_hir_analysis/src/collect/type_of.rs @@ -79,37 +79,6 @@ fn anon_const_type_of<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Ty<'tcx> { .expect("const parameter types cannot be generic"); } - Node::TypeBinding(binding @ &TypeBinding { hir_id: binding_id, .. }) - if let Node::TraitRef(trait_ref) = tcx.parent_hir_node(binding_id) => - { - let Some(trait_def_id) = trait_ref.trait_def_id() else { - return Ty::new_error_with_message( - tcx, - tcx.def_span(def_id), - "Could not find trait", - ); - }; - let assoc_items = tcx.associated_items(trait_def_id); - let assoc_item = assoc_items.find_by_name_and_kind( - tcx, - binding.ident, - ty::AssocKind::Const, - def_id.to_def_id(), - ); - return if let Some(assoc_item) = assoc_item { - tcx.type_of(assoc_item.def_id) - .no_bound_vars() - .expect("const parameter types cannot be generic") - } else { - // FIXME(associated_const_equality): add a useful error message here. - Ty::new_error_with_message( - tcx, - tcx.def_span(def_id), - "Could not find associated const on trait", - ) - }; - } - // This match arm is for when the def_id appears in a GAT whose // path can't be resolved without typechecking e.g. // diff --git a/compiler/rustc_interface/src/interface.rs b/compiler/rustc_interface/src/interface.rs index 284d965979e..1a82e6c6910 100644 --- a/compiler/rustc_interface/src/interface.rs +++ b/compiler/rustc_interface/src/interface.rs @@ -16,7 +16,7 @@ use rustc_parse::maybe_new_parser_from_source_str; use rustc_query_impl::QueryCtxt; use rustc_query_system::query::print_query_stack; use rustc_session::config::{self, Cfg, CheckCfg, ExpectedValues, Input, OutFileName}; -use rustc_session::filesearch::sysroot_candidates; +use rustc_session::filesearch::{self, sysroot_candidates}; use rustc_session::parse::ParseSess; use rustc_session::{lint, CompilerIO, EarlyDiagCtxt, Session}; use rustc_span::source_map::FileLoader; @@ -339,16 +339,53 @@ pub fn run_compiler<R: Send>(config: Config, f: impl FnOnce(&Compiler) -> R + Se let early_dcx = EarlyDiagCtxt::new(config.opts.error_format); - let codegen_backend = if let Some(make_codegen_backend) = config.make_codegen_backend { - make_codegen_backend(&config.opts) - } else { - util::get_codegen_backend( - &early_dcx, - &config.opts.maybe_sysroot, - config.opts.unstable_opts.codegen_backend.as_deref(), - ) + let sysroot = filesearch::materialize_sysroot(config.opts.maybe_sysroot.clone()); + + let (codegen_backend, target_override) = match config.make_codegen_backend { + None => { + // Build a target without override, so that it can override the backend if needed + let target = + config::build_target_config(&early_dcx, &config.opts, None, &sysroot); + + let backend = util::get_codegen_backend( + &early_dcx, + &sysroot, + config.opts.unstable_opts.codegen_backend.as_deref(), + &target, + ); + + // target_override is documented to be called before init(), so this is okay + let target_override = backend.target_override(&config.opts); + + // Assert that we don't use target's override of the backend and + // backend's override of the target at the same time + if config.opts.unstable_opts.codegen_backend.is_none() + && target.default_codegen_backend.is_some() + && target_override.is_some() + { + rustc_middle::bug!( + "Codegen backend requested target override even though the target requested the backend" + ); + } + + (backend, target_override) + } + Some(make_codegen_backend) => { + // N.B. `make_codegen_backend` takes precedence over `target.default_codegen_backend`, + // which is ignored in this case. + let backend = make_codegen_backend(&config.opts); + + // target_override is documented to be called before init(), so this is okay + let target_override = backend.target_override(&config.opts); + + (backend, target_override) + } }; + // Re-build target with the (potential) override + let target_cfg = + config::build_target_config(&early_dcx, &config.opts, target_override, &sysroot); + let temps_dir = config.opts.unstable_opts.temps_dir.as_deref().map(PathBuf::from); let bundle = match rustc_errors::fluent_bundle( @@ -367,9 +404,6 @@ pub fn run_compiler<R: Send>(config: Config, f: impl FnOnce(&Compiler) -> R + Se let mut locale_resources = Vec::from(config.locale_resources); locale_resources.push(codegen_backend.locale_resource()); - // target_override is documented to be called before init(), so this is okay - let target_override = codegen_backend.target_override(&config.opts); - let mut sess = rustc_session::build_session( early_dcx, config.opts, @@ -384,7 +418,8 @@ pub fn run_compiler<R: Send>(config: Config, f: impl FnOnce(&Compiler) -> R + Se locale_resources, config.lint_caps, config.file_loader, - target_override, + target_cfg, + sysroot, util::rustc_version_str().unwrap_or("unknown"), config.ice_file, config.using_internal_features, diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs index 4f15d1c1d3a..42fff01c11c 100644 --- a/compiler/rustc_interface/src/tests.rs +++ b/compiler/rustc_interface/src/tests.rs @@ -13,7 +13,7 @@ use rustc_session::config::{ use rustc_session::lint::Level; use rustc_session::search_paths::SearchPath; use rustc_session::utils::{CanonicalizedPath, NativeLib, NativeLibKind}; -use rustc_session::{build_session, getopts, CompilerIO, EarlyDiagCtxt, Session}; +use rustc_session::{build_session, filesearch, getopts, CompilerIO, EarlyDiagCtxt, Session}; use rustc_span::edition::{Edition, DEFAULT_EDITION}; use rustc_span::symbol::sym; use rustc_span::{FileName, SourceFileHashAlgorithm}; @@ -37,6 +37,12 @@ fn mk_session(matches: getopts::Matches) -> (Session, Cfg) { output_file: None, temps_dir, }; + + let sysroot = filesearch::materialize_sysroot(sessopts.maybe_sysroot.clone()); + + let target_cfg = + rustc_session::config::build_target_config(&early_dcx, &sessopts, None, &sysroot); + let sess = build_session( early_dcx, sessopts, @@ -46,7 +52,8 @@ fn mk_session(matches: getopts::Matches) -> (Session, Cfg) { vec![], Default::default(), None, - None, + target_cfg, + sysroot, "", None, Arc::default(), @@ -685,7 +692,7 @@ fn test_unstable_options_tracking_hash() { untracked!(nll_facts, true); untracked!(no_analysis, true); untracked!(no_leak_check, true); - untracked!(no_parallel_llvm, true); + untracked!(no_parallel_backend, true); untracked!(parse_only, true); // `pre_link_arg` is omitted because it just forwards to `pre_link_args`. untracked!(pre_link_args, vec![String::from("abc"), String::from("def")]); diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs index 23bd2dac57e..829b00aabc1 100644 --- a/compiler/rustc_interface/src/util.rs +++ b/compiler/rustc_interface/src/util.rs @@ -14,13 +14,14 @@ use rustc_session::{filesearch, output, Session}; use rustc_span::edit_distance::find_best_match_for_name; use rustc_span::edition::Edition; use rustc_span::symbol::{sym, Symbol}; +use rustc_target::spec::Target; use session::EarlyDiagCtxt; -use std::env; use std::env::consts::{DLL_PREFIX, DLL_SUFFIX}; use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::OnceLock; use std::thread; +use std::{env, iter}; /// Function pointer type that constructs a new CodegenBackend. pub type MakeBackendFn = fn() -> Box<dyn CodegenBackend>; @@ -195,21 +196,25 @@ fn load_backend_from_dylib(early_dcx: &EarlyDiagCtxt, path: &Path) -> MakeBacken /// A name of `None` indicates that the default backend should be used. pub fn get_codegen_backend( early_dcx: &EarlyDiagCtxt, - maybe_sysroot: &Option<PathBuf>, + sysroot: &Path, backend_name: Option<&str>, + target: &Target, ) -> Box<dyn CodegenBackend> { static LOAD: OnceLock<unsafe fn() -> Box<dyn CodegenBackend>> = OnceLock::new(); let load = LOAD.get_or_init(|| { - let default_codegen_backend = option_env!("CFG_DEFAULT_CODEGEN_BACKEND").unwrap_or("llvm"); + let backend = backend_name + .or(target.default_codegen_backend.as_deref()) + .or(option_env!("CFG_DEFAULT_CODEGEN_BACKEND")) + .unwrap_or("llvm"); - match backend_name.unwrap_or(default_codegen_backend) { + match backend { filename if filename.contains('.') => { load_backend_from_dylib(early_dcx, filename.as_ref()) } #[cfg(feature = "llvm")] "llvm" => rustc_codegen_llvm::LlvmCodegenBackend::new, - backend_name => get_codegen_sysroot(early_dcx, maybe_sysroot, backend_name), + backend_name => get_codegen_sysroot(early_dcx, sysroot, backend_name), } }); @@ -244,7 +249,7 @@ fn get_rustc_path_inner(bin_path: &str) -> Option<PathBuf> { #[allow(rustc::untranslatable_diagnostic)] // FIXME: make this translatable fn get_codegen_sysroot( early_dcx: &EarlyDiagCtxt, - maybe_sysroot: &Option<PathBuf>, + sysroot: &Path, backend_name: &str, ) -> MakeBackendFn { // For now we only allow this function to be called once as it'll dlopen a @@ -261,28 +266,28 @@ fn get_codegen_sysroot( let target = session::config::host_triple(); let sysroot_candidates = sysroot_candidates(); - let sysroot = maybe_sysroot - .iter() - .chain(sysroot_candidates.iter()) + let sysroot = iter::once(sysroot) + .chain(sysroot_candidates.iter().map(<_>::as_ref)) .map(|sysroot| { filesearch::make_target_lib_path(sysroot, target).with_file_name("codegen-backends") }) .find(|f| { info!("codegen backend candidate: {}", f.display()); f.exists() - }); - let sysroot = sysroot.unwrap_or_else(|| { - let candidates = sysroot_candidates - .iter() - .map(|p| p.display().to_string()) - .collect::<Vec<_>>() - .join("\n* "); - let err = format!( - "failed to find a `codegen-backends` folder \ + }) + .unwrap_or_else(|| { + let candidates = sysroot_candidates + .iter() + .map(|p| p.display().to_string()) + .collect::<Vec<_>>() + .join("\n* "); + let err = format!( + "failed to find a `codegen-backends` folder \ in the sysroot candidates:\n* {candidates}" - ); - early_dcx.early_fatal(err); - }); + ); + early_dcx.early_fatal(err); + }); + info!("probing {} for a codegen backend", sysroot.display()); let d = sysroot.read_dir().unwrap_or_else(|e| { diff --git a/compiler/rustc_mir_build/src/build/scope.rs b/compiler/rustc_mir_build/src/build/scope.rs index 961502566ba..88ac05cabb6 100644 --- a/compiler/rustc_mir_build/src/build/scope.rs +++ b/compiler/rustc_mir_build/src/build/scope.rs @@ -203,16 +203,31 @@ const ROOT_NODE: DropIdx = DropIdx::from_u32(0); /// in `build_mir`. #[derive(Debug)] struct DropTree { - /// Drops in the tree. - drops: IndexVec<DropIdx, (DropData, DropIdx)>, - /// Map for finding the inverse of the `next_drop` relation: - /// - /// `previous_drops[(drops[i].1, drops[i].0.local, drops[i].0.kind)] == i` - previous_drops: FxHashMap<(DropIdx, Local, DropKind), DropIdx>, + /// Nodes in the drop tree, containing drop data and a link to the next node. + drops: IndexVec<DropIdx, DropNode>, + /// Map for finding the index of an existing node, given its contents. + existing_drops_map: FxHashMap<DropNodeKey, DropIdx>, /// Edges into the `DropTree` that need to be added once it's lowered. entry_points: Vec<(DropIdx, BasicBlock)>, } +/// A single node in the drop tree. +#[derive(Debug)] +struct DropNode { + /// Info about the drop to be performed at this node in the drop tree. + data: DropData, + /// Index of the "next" drop to perform (in drop order, not declaration order). + next: DropIdx, +} + +/// Subset of [`DropNode`] used for reverse lookup in a hash table. +#[derive(Debug, PartialEq, Eq, Hash)] +struct DropNodeKey { + next: DropIdx, + local: Local, + kind: DropKind, +} + impl Scope { /// Whether there's anything to do for the cleanup path, that is, /// when unwinding through this scope. This includes destructors, @@ -247,7 +262,7 @@ trait DropTreeBuilder<'tcx> { /// Links a block outside the drop tree, `from`, to the block `to` inside /// the drop tree. - fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock); + fn link_entry_point(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock); } impl DropTree { @@ -258,20 +273,29 @@ impl DropTree { let fake_source_info = SourceInfo::outermost(DUMMY_SP); let fake_data = DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage }; - let drop_idx = DropIdx::MAX; - let drops = IndexVec::from_elem_n((fake_data, drop_idx), 1); - Self { drops, entry_points: Vec::new(), previous_drops: FxHashMap::default() } + let drops = IndexVec::from_raw(vec![DropNode { data: fake_data, next: DropIdx::MAX }]); + Self { drops, entry_points: Vec::new(), existing_drops_map: FxHashMap::default() } } - fn add_drop(&mut self, drop: DropData, next: DropIdx) -> DropIdx { + /// Adds a node to the drop tree, consisting of drop data and the index of + /// the "next" drop (in drop order), which could be the sentinel [`ROOT_NODE`]. + /// + /// If there is already an equivalent node in the tree, nothing is added, and + /// that node's index is returned. Otherwise, the new node's index is returned. + fn add_drop(&mut self, data: DropData, next: DropIdx) -> DropIdx { let drops = &mut self.drops; *self - .previous_drops - .entry((next, drop.local, drop.kind)) - .or_insert_with(|| drops.push((drop, next))) + .existing_drops_map + .entry(DropNodeKey { next, local: data.local, kind: data.kind }) + // Create a new node, and also add its index to the map. + .or_insert_with(|| drops.push(DropNode { data, next })) } - fn add_entry(&mut self, from: BasicBlock, to: DropIdx) { + /// Registers `from` as an entry point to this drop tree, at `to`. + /// + /// During [`Self::build_mir`], `from` will be linked to the corresponding + /// block within the drop tree. + fn add_entry_point(&mut self, from: BasicBlock, to: DropIdx) { debug_assert!(to < self.drops.next_index()); self.entry_points.push((to, from)); } @@ -326,13 +350,13 @@ impl DropTree { let entry_points = &mut self.entry_points; entry_points.sort(); - for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() { + for (drop_idx, drop_node) in self.drops.iter_enumerated().rev() { if entry_points.last().is_some_and(|entry_point| entry_point.0 == drop_idx) { let block = *blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg)); needs_block[drop_idx] = Block::Own; while entry_points.last().is_some_and(|entry_point| entry_point.0 == drop_idx) { let entry_block = entry_points.pop().unwrap().1; - T::add_entry(cfg, entry_block, block); + T::link_entry_point(cfg, entry_block, block); } } match needs_block[drop_idx] { @@ -344,10 +368,10 @@ impl DropTree { blocks[drop_idx] = blocks[pred]; } } - if let DropKind::Value = drop_data.0.kind { - needs_block[drop_data.1] = Block::Own; + if let DropKind::Value = drop_node.data.kind { + needs_block[drop_node.next] = Block::Own; } else if drop_idx != ROOT_NODE { - match &mut needs_block[drop_data.1] { + match &mut needs_block[drop_node.next] { pred @ Block::None => *pred = Block::Shares(drop_idx), pred @ Block::Shares(_) => *pred = Block::Own, Block::Own => (), @@ -364,34 +388,35 @@ impl DropTree { cfg: &mut CFG<'tcx>, blocks: &IndexSlice<DropIdx, Option<BasicBlock>>, ) { - for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() { + for (drop_idx, drop_node) in self.drops.iter_enumerated().rev() { let Some(block) = blocks[drop_idx] else { continue }; - match drop_data.0.kind { + match drop_node.data.kind { DropKind::Value => { let terminator = TerminatorKind::Drop { - target: blocks[drop_data.1].unwrap(), + target: blocks[drop_node.next].unwrap(), // The caller will handle this if needed. unwind: UnwindAction::Terminate(UnwindTerminateReason::InCleanup), - place: drop_data.0.local.into(), + place: drop_node.data.local.into(), replace: false, }; - cfg.terminate(block, drop_data.0.source_info, terminator); + cfg.terminate(block, drop_node.data.source_info, terminator); } // Root nodes don't correspond to a drop. DropKind::Storage if drop_idx == ROOT_NODE => {} DropKind::Storage => { let stmt = Statement { - source_info: drop_data.0.source_info, - kind: StatementKind::StorageDead(drop_data.0.local), + source_info: drop_node.data.source_info, + kind: StatementKind::StorageDead(drop_node.data.local), }; cfg.push(block, stmt); - let target = blocks[drop_data.1].unwrap(); + let target = blocks[drop_node.next].unwrap(); if target != block { // Diagnostics don't use this `Span` but debuginfo // might. Since we don't want breakpoints to be placed // here, especially when this is on an unwind path, we // use `DUMMY_SP`. - let source_info = SourceInfo { span: DUMMY_SP, ..drop_data.0.source_info }; + let source_info = + SourceInfo { span: DUMMY_SP, ..drop_node.data.source_info }; let terminator = TerminatorKind::Goto { target }; cfg.terminate(block, source_info, terminator); } @@ -673,11 +698,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { .flat_map(|scope| &scope.drops) .fold(ROOT_NODE, |drop_idx, &drop| drops.add_drop(drop, drop_idx)); - drops.add_entry(block, drop_idx); + drops.add_entry_point(block, drop_idx); // `build_drop_trees` doesn't have access to our source_info, so we // create a dummy terminator now. `TerminatorKind::UnwindResume` is used // because MIR type checking will panic if it hasn't been overwritten. + // (See `<ExitScopes as DropTreeBuilder>::link_entry_point`.) self.cfg.terminate(block, source_info, TerminatorKind::UnwindResume); self.cfg.start_new_block().unit() @@ -708,11 +734,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { drop_idx = drops.add_drop(*drop, drop_idx); } } - drops.add_entry(block, drop_idx); + drops.add_entry_point(block, drop_idx); // `build_drop_trees` doesn't have access to our source_info, so we // create a dummy terminator now. `TerminatorKind::UnwindResume` is used // because MIR type checking will panic if it hasn't been overwritten. + // (See `<ExitScopes as DropTreeBuilder>::link_entry_point`.) self.cfg.terminate(block, source_info, TerminatorKind::UnwindResume); } @@ -1119,7 +1146,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ); let next_drop = self.diverge_cleanup(); - self.scopes.unwind_drops.add_entry(start, next_drop); + self.scopes.unwind_drops.add_entry_point(start, next_drop); } /// Sets up a path that performs all required cleanup for dropping a @@ -1153,7 +1180,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { scope.cached_coroutine_drop_block = Some(cached_drop); } - self.scopes.coroutine_drops.add_entry(yield_block, cached_drop); + self.scopes.coroutine_drops.add_entry_point(yield_block, cached_drop); } /// Utility function for *non*-scope code to build their own drops @@ -1274,9 +1301,9 @@ fn build_scope_drops<'tcx>( // `unwind_to` should drop the value that we're about to // schedule. If dropping this value panics, then we continue // with the *next* value on the unwind path. - debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local); - debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind); - unwind_to = unwind_drops.drops[unwind_to].1; + debug_assert_eq!(unwind_drops.drops[unwind_to].data.local, drop_data.local); + debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind); + unwind_to = unwind_drops.drops[unwind_to].next; // If the operand has been moved, and we are not on an unwind // path, then don't generate the drop. (We only take this into @@ -1286,7 +1313,7 @@ fn build_scope_drops<'tcx>( continue; } - unwind_drops.add_entry(block, unwind_to); + unwind_drops.add_entry_point(block, unwind_to); let next = cfg.start_new_block(); cfg.terminate( @@ -1303,9 +1330,9 @@ fn build_scope_drops<'tcx>( } DropKind::Storage => { if storage_dead_on_unwind { - debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local); - debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind); - unwind_to = unwind_drops.drops[unwind_to].1; + debug_assert_eq!(unwind_drops.drops[unwind_to].data.local, drop_data.local); + debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind); + unwind_to = unwind_drops.drops[unwind_to].next; } // Only temps and vars need their storage dead. assert!(local.index() > arg_count); @@ -1335,30 +1362,31 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { let is_coroutine = self.coroutine.is_some(); // Link the exit drop tree to unwind drop tree. - if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) { + if drops.drops.iter().any(|drop_node| drop_node.data.kind == DropKind::Value) { let unwind_target = self.diverge_cleanup_target(else_scope, span); let mut unwind_indices = IndexVec::from_elem_n(unwind_target, 1); - for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) { - match drop_data.0.kind { + for (drop_idx, drop_node) in drops.drops.iter_enumerated().skip(1) { + match drop_node.data.kind { DropKind::Storage => { if is_coroutine { let unwind_drop = self .scopes .unwind_drops - .add_drop(drop_data.0, unwind_indices[drop_data.1]); + .add_drop(drop_node.data, unwind_indices[drop_node.next]); unwind_indices.push(unwind_drop); } else { - unwind_indices.push(unwind_indices[drop_data.1]); + unwind_indices.push(unwind_indices[drop_node.next]); } } DropKind::Value => { let unwind_drop = self .scopes .unwind_drops - .add_drop(drop_data.0, unwind_indices[drop_data.1]); - self.scopes - .unwind_drops - .add_entry(blocks[drop_idx].unwrap(), unwind_indices[drop_data.1]); + .add_drop(drop_node.data, unwind_indices[drop_node.next]); + self.scopes.unwind_drops.add_entry_point( + blocks[drop_idx].unwrap(), + unwind_indices[drop_node.next], + ); unwind_indices.push(unwind_drop); } } @@ -1408,10 +1436,10 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { // prevent drop elaboration from creating drop flags that would have // to be captured by the coroutine. I'm not sure how important this // optimization is, but it is here. - for (drop_idx, drop_data) in drops.drops.iter_enumerated() { - if let DropKind::Value = drop_data.0.kind { - debug_assert!(drop_data.1 < drops.drops.next_index()); - drops.entry_points.push((drop_data.1, blocks[drop_idx].unwrap())); + for (drop_idx, drop_node) in drops.drops.iter_enumerated() { + if let DropKind::Value = drop_node.data.kind { + debug_assert!(drop_node.next < drops.drops.next_index()); + drops.entry_points.push((drop_node.next, blocks[drop_idx].unwrap())); } } Self::build_unwind_tree(cfg, drops, fn_span, resume_block); @@ -1442,8 +1470,16 @@ impl<'tcx> DropTreeBuilder<'tcx> for ExitScopes { fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock { cfg.start_new_block() } - fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { - cfg.block_data_mut(from).terminator_mut().kind = TerminatorKind::Goto { target: to }; + fn link_entry_point(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { + // There should be an existing terminator with real source info and a + // dummy TerminatorKind. Replace it with a proper goto. + // (The dummy is added by `break_scope` and `break_for_else`.) + let term = cfg.block_data_mut(from).terminator_mut(); + if let TerminatorKind::UnwindResume = term.kind { + term.kind = TerminatorKind::Goto { target: to }; + } else { + span_bug!(term.source_info.span, "unexpected dummy terminator kind: {:?}", term.kind); + } } } @@ -1453,7 +1489,7 @@ impl<'tcx> DropTreeBuilder<'tcx> for CoroutineDrop { fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock { cfg.start_new_block() } - fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { + fn link_entry_point(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { let term = cfg.block_data_mut(from).terminator_mut(); if let TerminatorKind::Yield { ref mut drop, .. } = term.kind { *drop = Some(to); @@ -1473,7 +1509,7 @@ impl<'tcx> DropTreeBuilder<'tcx> for Unwind { fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock { cfg.start_new_cleanup_block() } - fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { + fn link_entry_point(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { let term = &mut cfg.block_data_mut(from).terminator_mut(); match &mut term.kind { TerminatorKind::Drop { unwind, .. } => { diff --git a/compiler/rustc_parse/src/parser/path.rs b/compiler/rustc_parse/src/parser/path.rs index 545db5138a3..2edf2111de7 100644 --- a/compiler/rustc_parse/src/parser/path.rs +++ b/compiler/rustc_parse/src/parser/path.rs @@ -1,6 +1,7 @@ use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign}; use super::{Parser, Restrictions, TokenType}; use crate::errors::PathSingleColon; +use crate::parser::{CommaRecoveryMode, RecoverColon, RecoverComma}; use crate::{errors, maybe_whole}; use ast::token::IdentIsRaw; use rustc_ast::ptr::P; @@ -10,7 +11,7 @@ use rustc_ast::{ AssocConstraintKind, BlockCheckMode, GenericArg, GenericArgs, Generics, ParenthesizedArgs, Path, PathSegment, QSelf, }; -use rustc_errors::{Applicability, PResult}; +use rustc_errors::{Applicability, Diag, PResult}; use rustc_span::symbol::{kw, sym, Ident}; use rustc_span::{BytePos, Span}; use std::mem; @@ -373,7 +374,38 @@ impl<'a> Parser<'a> { .into() } else { // `(T, U) -> R` - let (inputs, _) = self.parse_paren_comma_seq(|p| p.parse_ty())?; + + let prev_token_before_parsing = self.prev_token.clone(); + let token_before_parsing = self.token.clone(); + let mut snapshot = None; + if self.may_recover() + && prev_token_before_parsing.kind == token::ModSep + && (style == PathStyle::Expr && self.token.can_begin_expr() + || style == PathStyle::Pat && self.token.can_begin_pattern()) + { + snapshot = Some(self.create_snapshot_for_diagnostic()); + } + + let (inputs, _) = match self.parse_paren_comma_seq(|p| p.parse_ty()) { + Ok(output) => output, + Err(mut error) if prev_token_before_parsing.kind == token::ModSep => { + error.span_label( + prev_token_before_parsing.span.to(token_before_parsing.span), + "while parsing this parenthesized list of type arguments starting here", + ); + + if let Some(mut snapshot) = snapshot { + snapshot.recover_fn_call_leading_path_sep( + style, + prev_token_before_parsing, + &mut error, + ) + } + + return Err(error); + } + Err(error) => return Err(error), + }; let inputs_span = lo.to(self.prev_token.span); let output = self.parse_ret_ty(AllowPlus::No, RecoverQPath::No, RecoverReturnSign::No)?; @@ -399,6 +431,56 @@ impl<'a> Parser<'a> { } } + /// Recover `$path::(...)` as `$path(...)`. + /// + /// ```ignore (diagnostics) + /// foo::(420, "bar") + /// ^^ remove extra separator to make the function call + /// // or + /// match x { + /// Foo::(420, "bar") => { ... }, + /// ^^ remove extra separator to turn this into tuple struct pattern + /// _ => { ... }, + /// } + /// ``` + fn recover_fn_call_leading_path_sep( + &mut self, + style: PathStyle, + prev_token_before_parsing: Token, + error: &mut Diag<'_>, + ) { + if ((style == PathStyle::Expr && self.parse_paren_comma_seq(|p| p.parse_expr()).is_ok()) + || (style == PathStyle::Pat + && self + .parse_paren_comma_seq(|p| { + p.parse_pat_allow_top_alt( + None, + RecoverComma::No, + RecoverColon::No, + CommaRecoveryMode::LikelyTuple, + ) + }) + .is_ok())) + && !matches!(self.token.kind, token::ModSep | token::RArrow) + { + error.span_suggestion_verbose( + prev_token_before_parsing.span, + format!( + "consider removing the `::` here to {}", + match style { + PathStyle::Expr => "call the expression", + PathStyle::Pat => "turn this into a tuple struct pattern", + _ => { + return; + } + } + ), + "", + Applicability::MaybeIncorrect, + ); + } + } + /// Parses generic args (within a path segment) with recovery for extra leading angle brackets. /// For the purposes of understanding the parsing logic of generic arguments, this function /// can be thought of being the same as just calling `self.parse_angle_args()` if the source diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index 1573920c8c0..b947ac81805 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -8,7 +8,7 @@ pub use crate::options::*; use crate::errors::FileWriteFail; use crate::search_paths::SearchPath; use crate::utils::{CanonicalizedPath, NativeLib, NativeLibKind}; -use crate::{lint, HashStableContext}; +use crate::{filesearch, lint, HashStableContext}; use crate::{EarlyDiagCtxt, Session}; use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet}; use rustc_data_structures::stable_hasher::{StableOrd, ToStableHashKey}; @@ -1564,7 +1564,7 @@ pub fn build_configuration(sess: &Session, mut user_cfg: Cfg) -> Cfg { user_cfg } -pub(super) fn build_target_config( +pub fn build_target_config( early_dcx: &EarlyDiagCtxt, opts: &Options, target_override: Option<Target>, @@ -2863,16 +2863,8 @@ pub fn build_session_options(early_dcx: &mut EarlyDiagCtxt, matches: &getopts::M let logical_env = parse_logical_env(early_dcx, matches); - // Try to find a directory containing the Rust `src`, for more details see - // the doc comment on the `real_rust_source_base_dir` field. - let tmp_buf; - let sysroot = match &sysroot_opt { - Some(s) => s, - None => { - tmp_buf = crate::filesearch::get_or_default_sysroot().expect("Failed finding sysroot"); - &tmp_buf - } - }; + let sysroot = filesearch::materialize_sysroot(sysroot_opt); + let real_rust_source_base_dir = { // This is the location used by the `rust-src` `rustup` component. let mut candidate = sysroot.join("lib/rustlib/src/rust"); @@ -2916,7 +2908,7 @@ pub fn build_session_options(early_dcx: &mut EarlyDiagCtxt, matches: &getopts::M describe_lints, output_types, search_paths, - maybe_sysroot: sysroot_opt, + maybe_sysroot: Some(sysroot), target_triple, test, incremental, diff --git a/compiler/rustc_session/src/filesearch.rs b/compiler/rustc_session/src/filesearch.rs index 2456b4b3a15..4f0e3354680 100644 --- a/compiler/rustc_session/src/filesearch.rs +++ b/compiler/rustc_session/src/filesearch.rs @@ -193,6 +193,12 @@ pub fn sysroot_candidates() -> SmallVec<[PathBuf; 2]> { return sysroot_candidates; } +/// Returns the provided sysroot or calls [`get_or_default_sysroot`] if it's none. +/// Panics if [`get_or_default_sysroot`] returns an error. +pub fn materialize_sysroot(maybe_sysroot: Option<PathBuf>) -> PathBuf { + maybe_sysroot.unwrap_or_else(|| get_or_default_sysroot().expect("Failed finding sysroot")) +} + /// This function checks if sysroot is found using env::args().next(), and if it /// is not found, finds sysroot from current rustc_driver dll. pub fn get_or_default_sysroot() -> Result<PathBuf, String> { diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs index 93bef82e4ba..ea4b8f2463e 100644 --- a/compiler/rustc_session/src/options.rs +++ b/compiler/rustc_session/src/options.rs @@ -1772,7 +1772,7 @@ options! { "disable the 'leak check' for subtyping; unsound, but useful for tests"), no_link: bool = (false, parse_no_flag, [TRACKED], "compile without linking"), - no_parallel_llvm: bool = (false, parse_no_flag, [UNTRACKED], + no_parallel_backend: bool = (false, parse_no_flag, [UNTRACKED], "run LLVM in non-parallel mode (while keeping codegen-units and ThinLTO)"), no_profiler_runtime: bool = (false, parse_no_flag, [TRACKED], "prevent automatic injection of the profiler_builtins crate"), diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs index e9b8a300939..ab636b14d19 100644 --- a/compiler/rustc_session/src/session.rs +++ b/compiler/rustc_session/src/session.rs @@ -1016,7 +1016,8 @@ pub fn build_session( fluent_resources: Vec<&'static str>, driver_lint_caps: FxHashMap<lint::LintId, lint::Level>, file_loader: Option<Box<dyn FileLoader + Send + Sync + 'static>>, - target_override: Option<Target>, + target_cfg: Target, + sysroot: PathBuf, cfg_version: &'static str, ice_file: Option<PathBuf>, using_internal_features: Arc<AtomicBool>, @@ -1033,12 +1034,6 @@ pub fn build_session( let cap_lints_allow = sopts.lint_cap.is_some_and(|cap| cap == lint::Allow); let can_emit_warnings = !(warnings_allow || cap_lints_allow); - let sysroot = match &sopts.maybe_sysroot { - Some(sysroot) => sysroot.clone(), - None => filesearch::get_or_default_sysroot().expect("Failed finding sysroot"), - }; - - let target_cfg = config::build_target_config(&early_dcx, &sopts, target_override, &sysroot); let host_triple = TargetTriple::from_triple(config::host_triple()); let (host, target_warnings) = Target::search(&host_triple, &sysroot).unwrap_or_else(|e| { early_dcx.early_fatal(format!("Error loading host specification: {e}")) diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index 6552810d113..6bca86af30c 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -123,6 +123,8 @@ pub enum LinkerFlavor { Bpf, /// Linker tool for Nvidia PTX. Ptx, + /// LLVM bitcode linker that can be used as a `self-contained` linker + Llbc, } /// Linker flavors available externally through command line (`-Clinker-flavor`) @@ -141,6 +143,7 @@ pub enum LinkerFlavorCli { EmCc, Bpf, Ptx, + Llbc, // Legacy stable values Gcc, @@ -160,6 +163,7 @@ impl LinkerFlavorCli { | LinkerFlavorCli::Msvc(Lld::Yes) | LinkerFlavorCli::EmCc | LinkerFlavorCli::Bpf + | LinkerFlavorCli::Llbc | LinkerFlavorCli::Ptx => true, LinkerFlavorCli::Gcc | LinkerFlavorCli::Ld @@ -219,6 +223,7 @@ impl LinkerFlavor { LinkerFlavorCli::Msvc(lld) => LinkerFlavor::Msvc(lld), LinkerFlavorCli::EmCc => LinkerFlavor::EmCc, LinkerFlavorCli::Bpf => LinkerFlavor::Bpf, + LinkerFlavorCli::Llbc => LinkerFlavor::Llbc, LinkerFlavorCli::Ptx => LinkerFlavor::Ptx, // Below: legacy stable values @@ -258,6 +263,7 @@ impl LinkerFlavor { LinkerFlavor::Msvc(..) => LinkerFlavorCli::Msvc(Lld::No), LinkerFlavor::EmCc => LinkerFlavorCli::Em, LinkerFlavor::Bpf => LinkerFlavorCli::Bpf, + LinkerFlavor::Llbc => LinkerFlavorCli::Llbc, LinkerFlavor::Ptx => LinkerFlavorCli::Ptx, } } @@ -272,6 +278,7 @@ impl LinkerFlavor { LinkerFlavor::Msvc(lld) => LinkerFlavorCli::Msvc(lld), LinkerFlavor::EmCc => LinkerFlavorCli::EmCc, LinkerFlavor::Bpf => LinkerFlavorCli::Bpf, + LinkerFlavor::Llbc => LinkerFlavorCli::Llbc, LinkerFlavor::Ptx => LinkerFlavorCli::Ptx, } } @@ -286,6 +293,7 @@ impl LinkerFlavor { LinkerFlavorCli::Msvc(lld) => (Some(Cc::No), Some(lld)), LinkerFlavorCli::EmCc => (Some(Cc::Yes), Some(Lld::Yes)), LinkerFlavorCli::Bpf | LinkerFlavorCli::Ptx => (None, None), + LinkerFlavorCli::Llbc => (None, None), // Below: legacy stable values LinkerFlavorCli::Gcc => (Some(Cc::Yes), None), @@ -340,7 +348,7 @@ impl LinkerFlavor { LinkerFlavor::WasmLld(cc) => LinkerFlavor::WasmLld(cc_hint.unwrap_or(cc)), LinkerFlavor::Unix(cc) => LinkerFlavor::Unix(cc_hint.unwrap_or(cc)), LinkerFlavor::Msvc(lld) => LinkerFlavor::Msvc(lld_hint.unwrap_or(lld)), - LinkerFlavor::EmCc | LinkerFlavor::Bpf | LinkerFlavor::Ptx => self, + LinkerFlavor::EmCc | LinkerFlavor::Bpf | LinkerFlavor::Llbc | LinkerFlavor::Ptx => self, } } @@ -355,8 +363,8 @@ impl LinkerFlavor { pub fn check_compatibility(self, cli: LinkerFlavorCli) -> Option<String> { let compatible = |cli| { // The CLI flavor should be compatible with the target if: - // 1. they are counterparts: they have the same principal flavor. match (self, cli) { + // 1. they are counterparts: they have the same principal flavor. (LinkerFlavor::Gnu(..), LinkerFlavorCli::Gnu(..)) | (LinkerFlavor::Darwin(..), LinkerFlavorCli::Darwin(..)) | (LinkerFlavor::WasmLld(..), LinkerFlavorCli::WasmLld(..)) @@ -364,11 +372,14 @@ impl LinkerFlavor { | (LinkerFlavor::Msvc(..), LinkerFlavorCli::Msvc(..)) | (LinkerFlavor::EmCc, LinkerFlavorCli::EmCc) | (LinkerFlavor::Bpf, LinkerFlavorCli::Bpf) + | (LinkerFlavor::Llbc, LinkerFlavorCli::Llbc) | (LinkerFlavor::Ptx, LinkerFlavorCli::Ptx) => return true, + // 2. The linker flavor is independent of target and compatible + (LinkerFlavor::Ptx, LinkerFlavorCli::Llbc) => return true, _ => {} } - // 2. or, the flavor is legacy and survives this roundtrip. + // 3. or, the flavor is legacy and survives this roundtrip. cli == self.with_cli_hints(cli).to_cli() }; (!compatible(cli)).then(|| { @@ -387,6 +398,7 @@ impl LinkerFlavor { | LinkerFlavor::Unix(..) | LinkerFlavor::EmCc | LinkerFlavor::Bpf + | LinkerFlavor::Llbc | LinkerFlavor::Ptx => LldFlavor::Ld, LinkerFlavor::Darwin(..) => LldFlavor::Ld64, LinkerFlavor::WasmLld(..) => LldFlavor::Wasm, @@ -412,6 +424,7 @@ impl LinkerFlavor { | LinkerFlavor::Msvc(_) | LinkerFlavor::Unix(_) | LinkerFlavor::Bpf + | LinkerFlavor::Llbc | LinkerFlavor::Ptx => false, } } @@ -431,6 +444,7 @@ impl LinkerFlavor { | LinkerFlavor::Msvc(_) | LinkerFlavor::Unix(_) | LinkerFlavor::Bpf + | LinkerFlavor::Llbc | LinkerFlavor::Ptx => false, } } @@ -480,6 +494,7 @@ linker_flavor_cli_impls! { (LinkerFlavorCli::Msvc(Lld::No)) "msvc" (LinkerFlavorCli::EmCc) "em-cc" (LinkerFlavorCli::Bpf) "bpf" + (LinkerFlavorCli::Llbc) "llbc" (LinkerFlavorCli::Ptx) "ptx" // Legacy stable flavors @@ -2070,6 +2085,14 @@ pub struct TargetOptions { /// Default number of codegen units to use in debug mode pub default_codegen_units: Option<u64>, + /// Default codegen backend used for this target. Defaults to `None`. + /// + /// If `None`, then `CFG_DEFAULT_CODEGEN_BACKEND` environmental variable captured when + /// compiling `rustc` will be used instead (or llvm if it is not set). + /// + /// N.B. when *using* the compiler, backend can always be overriden with `-Zcodegen-backend`. + pub default_codegen_backend: Option<StaticCow<str>>, + /// Whether to generate trap instructions in places where optimization would /// otherwise produce control flow that falls through into unrelated memory. pub trap_unreachable: bool, @@ -2220,6 +2243,7 @@ fn add_link_args_iter( | LinkerFlavor::Unix(..) | LinkerFlavor::EmCc | LinkerFlavor::Bpf + | LinkerFlavor::Llbc | LinkerFlavor::Ptx => {} } } @@ -2376,6 +2400,7 @@ impl Default for TargetOptions { stack_probes: StackProbeType::None, min_global_align: None, default_codegen_units: None, + default_codegen_backend: None, trap_unreachable: true, requires_lto: false, singlethread: false, diff --git a/compiler/rustc_target/src/spec/targets/nvptx64_nvidia_cuda.rs b/compiler/rustc_target/src/spec/targets/nvptx64_nvidia_cuda.rs index e54661b29d7..0b0b31b503b 100644 --- a/compiler/rustc_target/src/spec/targets/nvptx64_nvidia_cuda.rs +++ b/compiler/rustc_target/src/spec/targets/nvptx64_nvidia_cuda.rs @@ -1,3 +1,4 @@ +use crate::spec::LinkSelfContainedDefault; use crate::spec::{LinkerFlavor, MergeFunctions, PanicStrategy, Target, TargetOptions}; pub fn target() -> Target { @@ -52,6 +53,9 @@ pub fn target() -> Target { // The LLVM backend does not support stack canaries for this target supports_stack_protector: false, + // Support using `self-contained` linkers like the llvm-bitcode-linker + link_self_contained: LinkSelfContainedDefault::True, + ..Default::default() }, } diff --git a/compiler/rustc_target/src/spec/tests/tests_impl.rs b/compiler/rustc_target/src/spec/tests/tests_impl.rs index 3fefd60f7cd..3be18ef3127 100644 --- a/compiler/rustc_target/src/spec/tests/tests_impl.rs +++ b/compiler/rustc_target/src/spec/tests/tests_impl.rs @@ -56,7 +56,10 @@ impl Target { LinkerFlavor::Msvc(..) => { assert_matches!(flavor, LinkerFlavor::Msvc(..)) } - LinkerFlavor::EmCc | LinkerFlavor::Bpf | LinkerFlavor::Ptx => { + LinkerFlavor::EmCc + | LinkerFlavor::Bpf + | LinkerFlavor::Ptx + | LinkerFlavor::Llbc => { assert_eq!(flavor, self.linker_flavor) } } |
