diff options
Diffstat (limited to 'compiler')
34 files changed, 555 insertions, 241 deletions
diff --git a/compiler/rustc_attr_parsing/messages.ftl b/compiler/rustc_attr_parsing/messages.ftl index 40e9d597530..8f24b51f1d9 100644 --- a/compiler/rustc_attr_parsing/messages.ftl +++ b/compiler/rustc_attr_parsing/messages.ftl @@ -86,6 +86,12 @@ attr_parsing_invalid_repr_hint_no_value = attr_parsing_invalid_since = 'since' must be a Rust version number, such as "1.31.0" +attr_parsing_invalid_style = {$is_used_as_inner -> + [false] crate-level attribute should be an inner attribute: add an exclamation mark: `#![{$name}]` + *[other] the `#![{$name}]` attribute can only be used at the crate root + } + .note = This attribute does not have an `!`, which means it is applied to this {$target} + attr_parsing_link_ordinal_out_of_range = ordinal value in `link_ordinal` is too large: `{$ordinal}` .note = the value may not exceed `u16::MAX` diff --git a/compiler/rustc_attr_parsing/src/attributes/crate_level.rs b/compiler/rustc_attr_parsing/src/attributes/crate_level.rs index 9175d7479e1..2fed09b85e8 100644 --- a/compiler/rustc_attr_parsing/src/attributes/crate_level.rs +++ b/compiler/rustc_attr_parsing/src/attributes/crate_level.rs @@ -1,3 +1,5 @@ +use rustc_feature::AttributeType; + use super::prelude::*; pub(crate) struct CrateNameParser; @@ -7,6 +9,7 @@ impl<S: Stage> SingleAttributeParser<S> for CrateNameParser { const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost; const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError; const TEMPLATE: AttributeTemplate = template!(NameValueStr: "name"); + const TYPE: AttributeType = AttributeType::CrateLevel; // FIXME: crate name is allowed on all targets and ignored, // even though it should only be valid on crates of course diff --git a/compiler/rustc_attr_parsing/src/attributes/mod.rs b/compiler/rustc_attr_parsing/src/attributes/mod.rs index 9dad9c893f0..043bc925eac 100644 --- a/compiler/rustc_attr_parsing/src/attributes/mod.rs +++ b/compiler/rustc_attr_parsing/src/attributes/mod.rs @@ -12,11 +12,15 @@ //! - [`CombineAttributeParser`](crate::attributes::CombineAttributeParser): makes it easy to implement an attribute which should combine the //! contents of attributes, if an attribute appear multiple times in a list //! +//! By default, attributes are allowed anywhere. When adding an attribute that should only be used +//! at the crate root, consider setting the `TYPE` in the parser trait to +//! [`AttributeType::CrateLevel`](rustc_feature::AttributeType::CrateLevel). +//! //! Attributes should be added to `crate::context::ATTRIBUTE_PARSERS` to be parsed. use std::marker::PhantomData; -use rustc_feature::{AttributeTemplate, template}; +use rustc_feature::{AttributeTemplate, AttributeType, template}; use rustc_hir::attrs::AttributeKind; use rustc_span::{Span, Symbol}; use thin_vec::ThinVec; @@ -88,6 +92,8 @@ pub(crate) trait AttributeParser<S: Stage>: Default + 'static { const ALLOWED_TARGETS: AllowedTargets; + const TYPE: AttributeType = AttributeType::Normal; + /// The parser has gotten a chance to accept the attributes on an item, /// here it can produce an attribute. /// @@ -129,6 +135,8 @@ pub(crate) trait SingleAttributeParser<S: Stage>: 'static { /// The template this attribute parser should implement. Used for diagnostics. const TEMPLATE: AttributeTemplate; + const TYPE: AttributeType = AttributeType::Normal; + /// Converts a single syntactical attribute to a single semantic attribute, or [`AttributeKind`] fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option<AttributeKind>; } @@ -175,6 +183,8 @@ impl<T: SingleAttributeParser<S>, S: Stage> AttributeParser<S> for Single<T, S> )]; const ALLOWED_TARGETS: AllowedTargets = T::ALLOWED_TARGETS; + const TYPE: AttributeType = T::TYPE; + fn finalize(self, _cx: &FinalizeContext<'_, '_, S>) -> Option<AttributeKind> { Some(self.1?.0) } @@ -259,6 +269,7 @@ pub(crate) trait NoArgsAttributeParser<S: Stage>: 'static { const PATH: &[Symbol]; const ON_DUPLICATE: OnDuplicate<S>; const ALLOWED_TARGETS: AllowedTargets; + const TYPE: AttributeType = AttributeType::Normal; /// Create the [`AttributeKind`] given attribute's [`Span`]. const CREATE: fn(Span) -> AttributeKind; @@ -278,6 +289,7 @@ impl<T: NoArgsAttributeParser<S>, S: Stage> SingleAttributeParser<S> for Without const ON_DUPLICATE: OnDuplicate<S> = T::ON_DUPLICATE; const ALLOWED_TARGETS: AllowedTargets = T::ALLOWED_TARGETS; const TEMPLATE: AttributeTemplate = template!(Word); + const TYPE: AttributeType = T::TYPE; fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option<AttributeKind> { if let Err(span) = args.no_args() { @@ -311,6 +323,8 @@ pub(crate) trait CombineAttributeParser<S: Stage>: 'static { /// The template this attribute parser should implement. Used for diagnostics. const TEMPLATE: AttributeTemplate; + const TYPE: AttributeType = AttributeType::Normal; + /// Converts a single syntactical attribute to a number of elements of the semantic attribute, or [`AttributeKind`] fn extend<'c>( cx: &'c mut AcceptContext<'_, '_, S>, @@ -346,6 +360,7 @@ impl<T: CombineAttributeParser<S>, S: Stage> AttributeParser<S> for Combine<T, S group.items.extend(T::extend(cx, args)) })]; const ALLOWED_TARGETS: AllowedTargets = T::ALLOWED_TARGETS; + const TYPE: AttributeType = T::TYPE; fn finalize(self, _cx: &FinalizeContext<'_, '_, S>) -> Option<AttributeKind> { if let Some(first_span) = self.first_span { diff --git a/compiler/rustc_attr_parsing/src/attributes/traits.rs b/compiler/rustc_attr_parsing/src/attributes/traits.rs index 89ac1b07d16..fbd9a480fbb 100644 --- a/compiler/rustc_attr_parsing/src/attributes/traits.rs +++ b/compiler/rustc_attr_parsing/src/attributes/traits.rs @@ -1,5 +1,7 @@ use std::mem; +use rustc_feature::AttributeType; + use super::prelude::*; use crate::attributes::{ AttributeOrder, NoArgsAttributeParser, OnDuplicate, SingleAttributeParser, @@ -154,6 +156,7 @@ impl<S: Stage> NoArgsAttributeParser<S> for CoherenceIsCoreParser { const PATH: &[Symbol] = &[sym::rustc_coherence_is_core]; const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error; const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Crate)]); + const TYPE: AttributeType = AttributeType::CrateLevel; const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::CoherenceIsCore; } diff --git a/compiler/rustc_attr_parsing/src/context.rs b/compiler/rustc_attr_parsing/src/context.rs index d4b9cfe00ad..b16ef7edd64 100644 --- a/compiler/rustc_attr_parsing/src/context.rs +++ b/compiler/rustc_attr_parsing/src/context.rs @@ -4,12 +4,12 @@ use std::ops::{Deref, DerefMut}; use std::sync::LazyLock; use private::Sealed; -use rustc_ast::{AttrStyle, MetaItemLit, NodeId}; +use rustc_ast::{AttrStyle, CRATE_NODE_ID, MetaItemLit, NodeId}; use rustc_errors::{Diag, Diagnostic, Level}; -use rustc_feature::AttributeTemplate; +use rustc_feature::{AttributeTemplate, AttributeType}; use rustc_hir::attrs::AttributeKind; use rustc_hir::lints::{AttributeLint, AttributeLintKind}; -use rustc_hir::{AttrPath, HirId}; +use rustc_hir::{AttrPath, CRATE_HIR_ID, HirId}; use rustc_session::Session; use rustc_span::{ErrorGuaranteed, Span, Symbol}; @@ -80,6 +80,7 @@ pub(super) struct GroupTypeInnerAccept<S: Stage> { pub(super) template: AttributeTemplate, pub(super) accept_fn: AcceptFn<S>, pub(super) allowed_targets: AllowedTargets, + pub(super) attribute_type: AttributeType, } type AcceptFn<S> = @@ -129,6 +130,7 @@ macro_rules! attribute_parsers { }) }), allowed_targets: <$names as crate::attributes::AttributeParser<$stage>>::ALLOWED_TARGETS, + attribute_type: <$names as crate::attributes::AttributeParser<$stage>>::TYPE, }); } @@ -250,6 +252,8 @@ pub trait Stage: Sized + 'static + Sealed { ) -> ErrorGuaranteed; fn should_emit(&self) -> ShouldEmit; + + fn id_is_crate_root(id: Self::Id) -> bool; } // allow because it's a sealed trait @@ -271,6 +275,10 @@ impl Stage for Early { fn should_emit(&self) -> ShouldEmit { self.emit_errors } + + fn id_is_crate_root(id: Self::Id) -> bool { + id == CRATE_NODE_ID + } } // allow because it's a sealed trait @@ -292,6 +300,10 @@ impl Stage for Late { fn should_emit(&self) -> ShouldEmit { ShouldEmit::ErrorsAndLints } + + fn id_is_crate_root(id: Self::Id) -> bool { + id == CRATE_HIR_ID + } } /// used when parsing attributes for miscellaneous things *before* ast lowering diff --git a/compiler/rustc_attr_parsing/src/interface.rs b/compiler/rustc_attr_parsing/src/interface.rs index 60523c2877c..a3558850ef3 100644 --- a/compiler/rustc_attr_parsing/src/interface.rs +++ b/compiler/rustc_attr_parsing/src/interface.rs @@ -271,8 +271,8 @@ impl<'sess, S: Stage> AttributeParser<'sess, S> { }; (accept.accept_fn)(&mut cx, args); - - if !matches!(self.stage.should_emit(), ShouldEmit::Nothing) { + if !matches!(cx.stage.should_emit(), ShouldEmit::Nothing) { + Self::check_type(accept.attribute_type, target, &mut cx); self.check_target( path.get_attribute_path(), attr.span, diff --git a/compiler/rustc_attr_parsing/src/lints.rs b/compiler/rustc_attr_parsing/src/lints.rs index 84ae19c4fc6..b1a971eec32 100644 --- a/compiler/rustc_attr_parsing/src/lints.rs +++ b/compiler/rustc_attr_parsing/src/lints.rs @@ -66,5 +66,19 @@ pub fn emit_attribute_lint<L: LintEmitter>(lint: &AttributeLint<L::Id>, lint_emi attr_span: *span, }, ), + + &AttributeLintKind::InvalidStyle { ref name, is_used_as_inner, target, target_span } => { + lint_emitter.emit_node_span_lint( + rustc_session::lint::builtin::UNUSED_ATTRIBUTES, + *id, + *span, + session_diagnostics::InvalidAttrStyle { + name: name.clone(), + is_used_as_inner, + target_span: (!is_used_as_inner).then_some(target_span), + target, + }, + ) + } } } diff --git a/compiler/rustc_attr_parsing/src/session_diagnostics.rs b/compiler/rustc_attr_parsing/src/session_diagnostics.rs index 72bee0ddfbf..a639b55e81f 100644 --- a/compiler/rustc_attr_parsing/src/session_diagnostics.rs +++ b/compiler/rustc_attr_parsing/src/session_diagnostics.rs @@ -6,7 +6,7 @@ use rustc_errors::{ Applicability, Diag, DiagArgValue, DiagCtxtHandle, Diagnostic, EmissionGuarantee, Level, }; use rustc_feature::AttributeTemplate; -use rustc_hir::AttrPath; +use rustc_hir::{AttrPath, Target}; use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic}; use rustc_span::{Span, Symbol}; @@ -826,3 +826,13 @@ pub(crate) struct SuffixedLiteralInAttribute { #[primary_span] pub span: Span, } + +#[derive(LintDiagnostic)] +#[diag(attr_parsing_invalid_style)] +pub(crate) struct InvalidAttrStyle { + pub name: AttrPath, + pub is_used_as_inner: bool, + #[note] + pub target_span: Option<Span>, + pub target: Target, +} diff --git a/compiler/rustc_attr_parsing/src/target_checking.rs b/compiler/rustc_attr_parsing/src/target_checking.rs index 9568b791b3f..6f4dd76fa81 100644 --- a/compiler/rustc_attr_parsing/src/target_checking.rs +++ b/compiler/rustc_attr_parsing/src/target_checking.rs @@ -1,13 +1,14 @@ use std::borrow::Cow; +use rustc_ast::AttrStyle; use rustc_errors::DiagArgValue; -use rustc_feature::Features; +use rustc_feature::{AttributeType, Features}; use rustc_hir::lints::{AttributeLint, AttributeLintKind}; use rustc_hir::{AttrPath, MethodKind, Target}; use rustc_span::Span; use crate::AttributeParser; -use crate::context::Stage; +use crate::context::{AcceptContext, Stage}; use crate::session_diagnostics::InvalidTarget; #[derive(Debug)] @@ -68,7 +69,7 @@ pub(crate) enum Policy { Error(Target), } -impl<S: Stage> AttributeParser<'_, S> { +impl<'sess, S: Stage> AttributeParser<'sess, S> { pub(crate) fn check_target( &self, attr_name: AttrPath, @@ -111,6 +112,32 @@ impl<S: Stage> AttributeParser<'_, S> { } } } + + pub(crate) fn check_type( + attribute_type: AttributeType, + target: Target, + cx: &mut AcceptContext<'_, 'sess, S>, + ) { + let is_crate_root = S::id_is_crate_root(cx.target_id); + + if is_crate_root { + return; + } + + if attribute_type != AttributeType::CrateLevel { + return; + } + + let lint = AttributeLintKind::InvalidStyle { + name: cx.attr_path.clone(), + is_used_as_inner: cx.attr_style == AttrStyle::Inner, + target, + target_span: cx.target_span, + }; + let attr_span = cx.attr_span; + + cx.emit_lint(lint, attr_span); + } } /// Takes a list of `allowed_targets` for an attribute, and the `target` the attribute was applied to. diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs index d558dfbc1c4..fcee6b6df62 100644 --- a/compiler/rustc_codegen_gcc/src/back/lto.rs +++ b/compiler/rustc_codegen_gcc/src/back/lto.rs @@ -29,7 +29,7 @@ use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput}; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file}; use rustc_data_structures::memmap::Mmap; -use rustc_errors::{DiagCtxtHandle, FatalError}; +use rustc_errors::DiagCtxtHandle; use rustc_middle::bug; use rustc_middle::dep_graph::WorkProduct; use rustc_session::config::Lto; @@ -51,12 +51,11 @@ fn prepare_lto( cgcx: &CodegenContext<GccCodegenBackend>, each_linked_rlib_for_lto: &[PathBuf], dcx: DiagCtxtHandle<'_>, -) -> Result<LtoData, FatalError> { +) -> LtoData { let tmp_path = match tempdir() { Ok(tmp_path) => tmp_path, Err(error) => { - eprintln!("Cannot create temporary directory: {}", error); - return Err(FatalError); + dcx.fatal(format!("Cannot create temporary directory: {}", error)); } }; @@ -91,15 +90,14 @@ fn prepare_lto( upstream_modules.push((module, CString::new(name).unwrap())); } Err(e) => { - dcx.emit_err(e); - return Err(FatalError); + dcx.emit_fatal(e); } } } } } - Ok(LtoData { upstream_modules, tmp_path }) + LtoData { upstream_modules, tmp_path } } fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> { @@ -114,10 +112,10 @@ pub(crate) fn run_fat( cgcx: &CodegenContext<GccCodegenBackend>, each_linked_rlib_for_lto: &[PathBuf], modules: Vec<FatLtoInput<GccCodegenBackend>>, -) -> Result<ModuleCodegen<GccContext>, FatalError> { +) -> ModuleCodegen<GccContext> { let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); - let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx)?; + let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx); /*let symbols_below_threshold = lto_data.symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();*/ fat_lto( @@ -137,7 +135,7 @@ fn fat_lto( mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, tmp_path: TempDir, //symbols_below_threshold: &[String], -) -> Result<ModuleCodegen<GccContext>, FatalError> { +) -> ModuleCodegen<GccContext> { let _timer = cgcx.prof.generic_activity("GCC_fat_lto_build_monolithic_module"); info!("going for a fat lto"); @@ -261,7 +259,7 @@ fn fat_lto( // of now. module.module_llvm.temp_dir = Some(tmp_path); - Ok(module) + module } pub struct ModuleBuffer(PathBuf); @@ -286,10 +284,10 @@ pub(crate) fn run_thin( each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, ThinBuffer)>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, -) -> Result<(Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> { +) -> (Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>) { let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); - let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx)?; + let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx); if cgcx.opts.cg.linker_plugin_lto.enabled() { unreachable!( "We should never reach this case if the LTO step \ @@ -355,7 +353,7 @@ fn thin_lto( tmp_path: TempDir, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, //_symbols_below_threshold: &[String], -) -> Result<(Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> { +) -> (Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>) { let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis"); info!("going for that thin, thin LTO"); @@ -518,13 +516,13 @@ fn thin_lto( // TODO: save the directory so that it gets deleted later. std::mem::forget(tmp_path); - Ok((opt_jobs, copy_jobs)) + (opt_jobs, copy_jobs) } pub fn optimize_thin_module( thin_module: ThinModule<GccCodegenBackend>, _cgcx: &CodegenContext<GccCodegenBackend>, -) -> Result<ModuleCodegen<GccContext>, FatalError> { +) -> ModuleCodegen<GccContext> { //let dcx = cgcx.create_dcx(); //let module_name = &thin_module.shared.module_names[thin_module.idx]; @@ -634,7 +632,8 @@ pub fn optimize_thin_module( save_temp_bitcode(cgcx, &module, "thin-lto-after-pm"); } }*/ - Ok(module) + #[allow(clippy::let_and_return)] + module } pub struct ThinBuffer { diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs index c1231142c65..84bc7016271 100644 --- a/compiler/rustc_codegen_gcc/src/back/write.rs +++ b/compiler/rustc_codegen_gcc/src/back/write.rs @@ -6,7 +6,6 @@ use rustc_codegen_ssa::back::write::{BitcodeSection, CodegenContext, EmitObj, Mo use rustc_codegen_ssa::{CompiledModule, ModuleCodegen}; use rustc_fs_util::link_or_copy; use rustc_session::config::OutputType; -use rustc_span::fatal_error::FatalError; use rustc_target::spec::SplitDebuginfo; use crate::base::add_pic_option; @@ -17,7 +16,7 @@ pub(crate) fn codegen( cgcx: &CodegenContext<GccCodegenBackend>, module: ModuleCodegen<GccContext>, config: &ModuleConfig, -) -> Result<CompiledModule, FatalError> { +) -> CompiledModule { let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); @@ -246,7 +245,7 @@ pub(crate) fn codegen( } } - Ok(module.into_compiled_module( + module.into_compiled_module( config.emit_obj != EmitObj::None, cgcx.target_can_use_split_dwarf && cgcx.split_debuginfo == SplitDebuginfo::Unpacked, config.emit_bc, @@ -254,7 +253,7 @@ pub(crate) fn codegen( config.emit_ir, &cgcx.output_filenames, cgcx.invocation_temp.as_deref(), - )) + ) } pub(crate) fn save_temp_bitcode( diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index 4025aba82da..2d7df79ba95 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -110,7 +110,6 @@ use rustc_middle::util::Providers; use rustc_session::Session; use rustc_session::config::{OptLevel, OutputFilenames}; use rustc_span::Symbol; -use rustc_span::fatal_error::FatalError; use rustc_target::spec::RelocModel; use tempfile::TempDir; @@ -362,7 +361,7 @@ impl WriteBackendMethods for GccCodegenBackend { _exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec<FatLtoInput<Self>>, - ) -> Result<ModuleCodegen<Self::Module>, FatalError> { + ) -> ModuleCodegen<Self::Module> { back::lto::run_fat(cgcx, each_linked_rlib_for_lto, modules) } @@ -373,7 +372,7 @@ impl WriteBackendMethods for GccCodegenBackend { each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, Self::ThinBuffer)>, cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>, - ) -> Result<(Vec<ThinModule<Self>>, Vec<WorkProduct>), FatalError> { + ) -> (Vec<ThinModule<Self>>, Vec<WorkProduct>) { back::lto::run_thin(cgcx, each_linked_rlib_for_lto, modules, cached_modules) } @@ -390,15 +389,14 @@ impl WriteBackendMethods for GccCodegenBackend { _dcx: DiagCtxtHandle<'_>, module: &mut ModuleCodegen<Self::Module>, config: &ModuleConfig, - ) -> Result<(), FatalError> { + ) { module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level)); - Ok(()) } fn optimize_thin( cgcx: &CodegenContext<Self>, thin: ThinModule<Self>, - ) -> Result<ModuleCodegen<Self::Module>, FatalError> { + ) -> ModuleCodegen<Self::Module> { back::lto::optimize_thin_module(thin, cgcx) } @@ -406,7 +404,7 @@ impl WriteBackendMethods for GccCodegenBackend { cgcx: &CodegenContext<Self>, module: ModuleCodegen<Self::Module>, config: &ModuleConfig, - ) -> Result<CompiledModule, FatalError> { + ) -> CompiledModule { back::write::codegen(cgcx, module, config) } diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 853d0295238..d85f432702c 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -14,7 +14,7 @@ use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::memmap::Mmap; -use rustc_errors::{DiagCtxtHandle, FatalError}; +use rustc_errors::DiagCtxtHandle; use rustc_middle::bug; use rustc_middle::dep_graph::WorkProduct; use rustc_session::config::{self, Lto}; @@ -36,7 +36,7 @@ fn prepare_lto( exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], dcx: DiagCtxtHandle<'_>, -) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError> { +) -> (Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>) { let mut symbols_below_threshold = exported_symbols_for_lto .iter() .map(|symbol| CString::new(symbol.to_owned()).unwrap()) @@ -79,16 +79,13 @@ fn prepare_lto( let module = SerializedModule::FromRlib(data.to_vec()); upstream_modules.push((module, CString::new(name).unwrap())); } - Err(e) => { - dcx.emit_err(e); - return Err(FatalError); - } + Err(e) => dcx.emit_fatal(e), } } } } - Ok((symbols_below_threshold, upstream_modules)) + (symbols_below_threshold, upstream_modules) } fn get_bitcode_slice_from_object_data<'a>( @@ -123,11 +120,11 @@ pub(crate) fn run_fat( exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec<FatLtoInput<LlvmCodegenBackend>>, -) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> { +) -> ModuleCodegen<ModuleLlvm> { let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); let (symbols_below_threshold, upstream_modules) = - prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx)?; + prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx); let symbols_below_threshold = symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>(); fat_lto(cgcx, dcx, modules, upstream_modules, &symbols_below_threshold) @@ -142,11 +139,11 @@ pub(crate) fn run_thin( each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, ThinBuffer)>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, -) -> Result<(Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> { +) -> (Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>) { let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); let (symbols_below_threshold, upstream_modules) = - prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx)?; + prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx); let symbols_below_threshold = symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>(); if cgcx.opts.cg.linker_plugin_lto.enabled() { @@ -173,7 +170,7 @@ fn fat_lto( modules: Vec<FatLtoInput<LlvmCodegenBackend>>, mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, symbols_below_threshold: &[*const libc::c_char], -) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> { +) -> ModuleCodegen<ModuleLlvm> { let _timer = cgcx.prof.generic_activity("LLVM_fat_lto_build_monolithic_module"); info!("going for a fat lto"); @@ -224,7 +221,7 @@ fn fat_lto( assert!(!serialized_modules.is_empty(), "must have at least one serialized module"); let (buffer, name) = serialized_modules.remove(0); info!("no in-memory regular modules to choose from, parsing {:?}", name); - let llvm_module = ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx)?; + let llvm_module = ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx); ModuleCodegen::new_regular(name.into_string().unwrap(), llvm_module) } }; @@ -265,7 +262,9 @@ fn fat_lto( }); info!("linking {:?}", name); let data = bc_decoded.data(); - linker.add(data).map_err(|()| write::llvm_err(dcx, LlvmError::LoadBitcode { name }))?; + linker + .add(data) + .unwrap_or_else(|()| write::llvm_err(dcx, LlvmError::LoadBitcode { name })); } drop(linker); save_temp_bitcode(cgcx, &module, "lto.input"); @@ -282,7 +281,7 @@ fn fat_lto( save_temp_bitcode(cgcx, &module, "lto.after-restriction"); } - Ok(module) + module } pub(crate) struct Linker<'a>(&'a mut llvm::Linker<'a>); @@ -352,7 +351,7 @@ fn thin_lto( serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, symbols_below_threshold: &[*const libc::c_char], -) -> Result<(Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> { +) -> (Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>) { let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis"); unsafe { info!("going for that thin, thin LTO"); @@ -422,7 +421,7 @@ fn thin_lto( symbols_below_threshold.as_ptr(), symbols_below_threshold.len(), ) - .ok_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext))?; + .unwrap_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext)); let data = ThinData(data); @@ -492,10 +491,10 @@ fn thin_lto( if let Some(path) = key_map_path && let Err(err) = curr_key_map.save_to_file(&path) { - return Err(write::llvm_err(dcx, LlvmError::WriteThinLtoKey { err })); + write::llvm_err(dcx, LlvmError::WriteThinLtoKey { err }); } - Ok((opt_jobs, copy_jobs)) + (opt_jobs, copy_jobs) } } @@ -550,7 +549,7 @@ pub(crate) fn run_pass_manager( dcx: DiagCtxtHandle<'_>, module: &mut ModuleCodegen<ModuleLlvm>, thin: bool, -) -> Result<(), FatalError> { +) { let _timer = cgcx.prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name); let config = cgcx.config(module.kind); @@ -582,7 +581,7 @@ pub(crate) fn run_pass_manager( } unsafe { - write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage)?; + write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage); } if enable_gpu && !thin { @@ -596,7 +595,7 @@ pub(crate) fn run_pass_manager( let stage = write::AutodiffStage::PostAD; if !config.autodiff.contains(&config::AutoDiff::NoPostopt) { unsafe { - write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage)?; + write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage); } } @@ -608,7 +607,6 @@ pub(crate) fn run_pass_manager( } debug!("lto done"); - Ok(()) } pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer); @@ -701,7 +699,7 @@ impl Drop for ThinBuffer { pub(crate) fn optimize_thin_module( thin_module: ThinModule<LlvmCodegenBackend>, cgcx: &CodegenContext<LlvmCodegenBackend>, -) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> { +) -> ModuleCodegen<ModuleLlvm> { let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); @@ -712,7 +710,7 @@ pub(crate) fn optimize_thin_module( // into that context. One day, however, we may do this for upstream // crates but for locally codegened modules we may be able to reuse // that LLVM Context and Module. - let module_llvm = ModuleLlvm::parse(cgcx, module_name, thin_module.data(), dcx)?; + let module_llvm = ModuleLlvm::parse(cgcx, module_name, thin_module.data(), dcx); let mut module = ModuleCodegen::new_regular(thin_module.name(), module_llvm); // Given that the newly created module lacks a thinlto buffer for embedding, we need to re-add it here. if cgcx.config(ModuleKind::Regular).embed_bitcode() { @@ -746,7 +744,7 @@ pub(crate) fn optimize_thin_module( .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name()); if unsafe { !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) } { - return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule)); + write::llvm_err(dcx, LlvmError::PrepareThinLtoModule); } save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve"); } @@ -757,7 +755,7 @@ pub(crate) fn optimize_thin_module( .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name()); if unsafe { !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) } { - return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule)); + write::llvm_err(dcx, LlvmError::PrepareThinLtoModule); } save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize"); } @@ -768,7 +766,7 @@ pub(crate) fn optimize_thin_module( if unsafe { !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target.raw()) } { - return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule)); + write::llvm_err(dcx, LlvmError::PrepareThinLtoModule); } save_temp_bitcode(cgcx, &module, "thin-lto-after-import"); } @@ -780,11 +778,11 @@ pub(crate) fn optimize_thin_module( // little differently. { info!("running thin lto passes over {}", module.name); - run_pass_manager(cgcx, dcx, &mut module, true)?; + run_pass_manager(cgcx, dcx, &mut module, true); save_temp_bitcode(cgcx, &module, "thin-lto-after-pm"); } } - Ok(module) + module } /// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys @@ -850,9 +848,9 @@ pub(crate) fn parse_module<'a>( name: &CStr, data: &[u8], dcx: DiagCtxtHandle<'_>, -) -> Result<&'a llvm::Module, FatalError> { +) -> &'a llvm::Module { unsafe { llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr()) - .ok_or_else(|| write::llvm_err(dcx, LlvmError::ParseBitcode)) + .unwrap_or_else(|| write::llvm_err(dcx, LlvmError::ParseBitcode)) } } diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 7c65d667bda..7ea2ae6673b 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -20,7 +20,7 @@ use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{CompiledModule, ModuleCodegen, ModuleKind}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_errors::{DiagCtxtHandle, FatalError, Level}; +use rustc_errors::{DiagCtxtHandle, Level}; use rustc_fs_util::{link_or_copy, path_to_c_string}; use rustc_middle::ty::TyCtxt; use rustc_session::Session; @@ -46,10 +46,10 @@ use crate::llvm::{self, DiagnosticInfo}; use crate::type_::Type; use crate::{LlvmCodegenBackend, ModuleLlvm, base, common, llvm_util}; -pub(crate) fn llvm_err<'a>(dcx: DiagCtxtHandle<'_>, err: LlvmError<'a>) -> FatalError { +pub(crate) fn llvm_err<'a>(dcx: DiagCtxtHandle<'_>, err: LlvmError<'a>) -> ! { match llvm::last_error() { - Some(llvm_err) => dcx.emit_almost_fatal(WithLlvmError(err, llvm_err)), - None => dcx.emit_almost_fatal(err), + Some(llvm_err) => dcx.emit_fatal(WithLlvmError(err, llvm_err)), + None => dcx.emit_fatal(err), } } @@ -63,7 +63,7 @@ fn write_output_file<'ll>( file_type: llvm::FileType, self_profiler_ref: &SelfProfilerRef, verify_llvm_ir: bool, -) -> Result<(), FatalError> { +) { debug!("write_output_file output={:?} dwo_output={:?}", output, dwo_output); let output_c = path_to_c_string(output); let dwo_output_c; @@ -100,7 +100,7 @@ fn write_output_file<'ll>( } } - result.into_result().map_err(|()| llvm_err(dcx, LlvmError::WriteOutput { path: output })) + result.into_result().unwrap_or_else(|()| llvm_err(dcx, LlvmError::WriteOutput { path: output })) } pub(crate) fn create_informational_target_machine( @@ -112,7 +112,7 @@ pub(crate) fn create_informational_target_machine( // system/tcx is set up. let features = llvm_util::global_llvm_features(sess, false, only_base_features); target_machine_factory(sess, config::OptLevel::No, &features)(config) - .unwrap_or_else(|err| llvm_err(sess.dcx(), err).raise()) + .unwrap_or_else(|err| llvm_err(sess.dcx(), err)) } pub(crate) fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine { @@ -139,7 +139,7 @@ pub(crate) fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTar tcx.backend_optimization_level(()), tcx.global_backend_features(()), )(config) - .unwrap_or_else(|err| llvm_err(tcx.dcx(), err).raise()) + .unwrap_or_else(|err| llvm_err(tcx.dcx(), err)) } fn to_llvm_opt_settings(cfg: config::OptLevel) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) { @@ -565,7 +565,7 @@ pub(crate) unsafe fn llvm_optimize( opt_level: config::OptLevel, opt_stage: llvm::OptStage, autodiff_stage: AutodiffStage, -) -> Result<(), FatalError> { +) { // Enzyme: // The whole point of compiler based AD is to differentiate optimized IR instead of unoptimized // source code. However, benchmarks show that optimizations increasing the code size @@ -704,7 +704,7 @@ pub(crate) unsafe fn llvm_optimize( llvm_plugins.len(), ) }; - result.into_result().map_err(|()| llvm_err(dcx, LlvmError::RunLlvmPasses)) + result.into_result().unwrap_or_else(|()| llvm_err(dcx, LlvmError::RunLlvmPasses)) } // Unsafe due to LLVM calls. @@ -713,7 +713,7 @@ pub(crate) fn optimize( dcx: DiagCtxtHandle<'_>, module: &mut ModuleCodegen<ModuleLlvm>, config: &ModuleConfig, -) -> Result<(), FatalError> { +) { let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name); let llcx = &*module.module_llvm.llcx; @@ -765,7 +765,7 @@ pub(crate) fn optimize( opt_stage, autodiff_stage, ) - }?; + }; if let Some(thin_lto_buffer) = thin_lto_buffer { let thin_lto_buffer = unsafe { ThinBuffer::from_raw_ptr(thin_lto_buffer) }; module.thin_lto_buffer = Some(thin_lto_buffer.data().to_vec()); @@ -793,14 +793,13 @@ pub(crate) fn optimize( } } } - Ok(()) } pub(crate) fn codegen( cgcx: &CodegenContext<LlvmCodegenBackend>, module: ModuleCodegen<ModuleLlvm>, config: &ModuleConfig, -) -> Result<CompiledModule, FatalError> { +) -> CompiledModule { let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); @@ -909,7 +908,9 @@ pub(crate) fn codegen( record_artifact_size(&cgcx.prof, "llvm_ir", &out); } - result.into_result().map_err(|()| llvm_err(dcx, LlvmError::WriteIr { path: &out }))?; + result + .into_result() + .unwrap_or_else(|()| llvm_err(dcx, LlvmError::WriteIr { path: &out })); } if config.emit_asm { @@ -940,7 +941,7 @@ pub(crate) fn codegen( llvm::FileType::AssemblyFile, &cgcx.prof, config.verify_llvm_ir, - )?; + ); } match config.emit_obj { @@ -976,7 +977,7 @@ pub(crate) fn codegen( llvm::FileType::ObjectFile, &cgcx.prof, config.verify_llvm_ir, - )?; + ); } EmitObj::Bitcode => { @@ -1009,7 +1010,7 @@ pub(crate) fn codegen( && cgcx.target_can_use_split_dwarf && cgcx.split_debuginfo != SplitDebuginfo::Off && cgcx.split_dwarf_kind == SplitDwarfKind::Split; - Ok(module.into_compiled_module( + module.into_compiled_module( config.emit_obj != EmitObj::None, dwarf_object_emitted, config.emit_bc, @@ -1017,7 +1018,7 @@ pub(crate) fn codegen( config.emit_ir, &cgcx.output_filenames, cgcx.invocation_temp.as_deref(), - )) + ) } fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: &[u8]) -> Vec<u8> { diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index 0fcf31d7993..628cb34fd9e 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -37,7 +37,7 @@ use rustc_codegen_ssa::back::write::{ use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen, TargetConfig}; use rustc_data_structures::fx::FxIndexMap; -use rustc_errors::{DiagCtxtHandle, FatalError}; +use rustc_errors::DiagCtxtHandle; use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::ty::TyCtxt; @@ -165,15 +165,15 @@ impl WriteBackendMethods for LlvmCodegenBackend { exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec<FatLtoInput<Self>>, - ) -> Result<ModuleCodegen<Self::Module>, FatalError> { + ) -> ModuleCodegen<Self::Module> { let mut module = - back::lto::run_fat(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, modules)?; + back::lto::run_fat(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, modules); let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); - back::lto::run_pass_manager(cgcx, dcx, &mut module, false)?; + back::lto::run_pass_manager(cgcx, dcx, &mut module, false); - Ok(module) + module } fn run_thin_lto( cgcx: &CodegenContext<Self>, @@ -181,7 +181,7 @@ impl WriteBackendMethods for LlvmCodegenBackend { each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, Self::ThinBuffer)>, cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>, - ) -> Result<(Vec<ThinModule<Self>>, Vec<WorkProduct>), FatalError> { + ) -> (Vec<ThinModule<Self>>, Vec<WorkProduct>) { back::lto::run_thin( cgcx, exported_symbols_for_lto, @@ -195,20 +195,20 @@ impl WriteBackendMethods for LlvmCodegenBackend { dcx: DiagCtxtHandle<'_>, module: &mut ModuleCodegen<Self::Module>, config: &ModuleConfig, - ) -> Result<(), FatalError> { + ) { back::write::optimize(cgcx, dcx, module, config) } fn optimize_thin( cgcx: &CodegenContext<Self>, thin: ThinModule<Self>, - ) -> Result<ModuleCodegen<Self::Module>, FatalError> { + ) -> ModuleCodegen<Self::Module> { back::lto::optimize_thin_module(thin, cgcx) } fn codegen( cgcx: &CodegenContext<Self>, module: ModuleCodegen<Self::Module>, config: &ModuleConfig, - ) -> Result<CompiledModule, FatalError> { + ) -> CompiledModule { back::write::codegen(cgcx, module, config) } fn prepare_thin( @@ -407,12 +407,12 @@ impl ModuleLlvm { cgcx: &CodegenContext<LlvmCodegenBackend>, name: &str, dcx: DiagCtxtHandle<'_>, - ) -> Result<OwnedTargetMachine, FatalError> { + ) -> OwnedTargetMachine { let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, name); match (cgcx.tm_factory)(tm_factory_config) { - Ok(m) => Ok(m), + Ok(m) => m, Err(e) => { - return Err(dcx.emit_almost_fatal(ParseTargetMachineConfig(e))); + dcx.emit_fatal(ParseTargetMachineConfig(e)); } } } @@ -422,13 +422,13 @@ impl ModuleLlvm { name: &CStr, buffer: &[u8], dcx: DiagCtxtHandle<'_>, - ) -> Result<Self, FatalError> { + ) -> Self { unsafe { let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); - let llmod_raw = back::lto::parse_module(llcx, name, buffer, dcx)?; - let tm = ModuleLlvm::tm_from_cgcx(cgcx, name.to_str().unwrap(), dcx)?; + let llmod_raw = back::lto::parse_module(llcx, name, buffer, dcx); + let tm = ModuleLlvm::tm_from_cgcx(cgcx, name.to_str().unwrap(), dcx); - Ok(ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) }) + ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) } } } diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index fa2802a891f..7c79cba2273 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1929,11 +1929,17 @@ unsafe extern "C" { C: &Context, effects: MemoryEffects, ) -> &Attribute; + /// ## Safety + /// - Each of `LowerWords` and `UpperWords` must point to an array that is + /// long enough to fully define an integer of size `NumBits`, i.e. each + /// pointer must point to `NumBits.div_ceil(64)` elements or more. + /// - The implementation will make its own copy of the pointed-to `u64` + /// values, so the pointers only need to outlive this function call. pub(crate) fn LLVMRustCreateRangeAttribute( C: &Context, - num_bits: c_uint, - lower_words: *const u64, - upper_words: *const u64, + NumBits: c_uint, + LowerWords: *const u64, + UpperWords: *const u64, ) -> &Attribute; // Operations on functions diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index 6adabe53129..d6974e22c85 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -112,16 +112,26 @@ pub(crate) fn CreateAllocKindAttr(llcx: &Context, kind_arg: AllocKindFlags) -> & pub(crate) fn CreateRangeAttr(llcx: &Context, size: Size, range: WrappingRange) -> &Attribute { let lower = range.start; + // LLVM treats the upper bound as exclusive, but allows wrapping. let upper = range.end.wrapping_add(1); - let lower_words = [lower as u64, (lower >> 64) as u64]; - let upper_words = [upper as u64, (upper >> 64) as u64]; + + // Pass each `u128` endpoint value as a `[u64; 2]` array, least-significant part first. + let as_u64_array = |x: u128| [x as u64, (x >> 64) as u64]; + let lower_words: [u64; 2] = as_u64_array(lower); + let upper_words: [u64; 2] = as_u64_array(upper); + + // To ensure that LLVM doesn't try to read beyond the `[u64; 2]` arrays, + // we must explicitly check that `size_bits` does not exceed 128. + let size_bits = size.bits(); + assert!(size_bits <= 128); + // More robust assertions that are redundant with `size_bits <= 128` and + // should be optimized away. + assert!(size_bits.div_ceil(64) <= u64::try_from(lower_words.len()).unwrap()); + assert!(size_bits.div_ceil(64) <= u64::try_from(upper_words.len()).unwrap()); + let size_bits = c_uint::try_from(size_bits).unwrap(); + unsafe { - LLVMRustCreateRangeAttribute( - llcx, - size.bits().try_into().unwrap(), - lower_words.as_ptr(), - upper_words.as_ptr(), - ) + LLVMRustCreateRangeAttribute(llcx, size_bits, lower_words.as_ptr(), upper_words.as_ptr()) } } diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index d5025bb405c..d927ffd78c2 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -279,7 +279,7 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea } ("loongarch32" | "loongarch64", "32s") if get_version().0 < 21 => None, // Filter out features that are not supported by the current LLVM version - ("riscv32" | "riscv64", "zacas") if get_version().0 < 20 => None, + ("riscv32" | "riscv64", "zacas" | "rva23u64" | "supm") if get_version().0 < 20 => None, ( "s390x", "message-security-assist-extension12" diff --git a/compiler/rustc_codegen_ssa/src/back/apple.rs b/compiler/rustc_codegen_ssa/src/back/apple.rs index 2274450e20e..b1d646d9265 100644 --- a/compiler/rustc_codegen_ssa/src/back/apple.rs +++ b/compiler/rustc_codegen_ssa/src/back/apple.rs @@ -164,7 +164,7 @@ pub(super) fn get_sdk_root(sess: &Session) -> Option<PathBuf> { // // Note that when cross-compiling from e.g. Linux, the `xcrun` binary may sometimes be provided // as a shim by a cross-compilation helper tool. It usually isn't, but we still try nonetheless. - match xcrun_show_sdk_path(sdk_name, sess.verbose_internals()) { + match xcrun_show_sdk_path(sdk_name, false) { Ok((path, stderr)) => { // Emit extra stderr, such as if `-verbose` was passed, or if `xcrun` emitted a warning. if !stderr.is_empty() { diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index 26d089a1171..9f22859ba81 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -1,5 +1,6 @@ use std::assert_matches::assert_matches; use std::marker::PhantomData; +use std::panic::AssertUnwindSafe; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::sync::mpsc::{Receiver, Sender, channel}; @@ -14,7 +15,7 @@ use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard}; use rustc_errors::emitter::Emitter; use rustc_errors::translation::Translator; use rustc_errors::{ - Diag, DiagArgMap, DiagCtxt, DiagMessage, ErrCode, FatalError, Level, MultiSpan, Style, + Diag, DiagArgMap, DiagCtxt, DiagMessage, ErrCode, FatalErrorMarker, Level, MultiSpan, Style, Suggestions, }; use rustc_fs_util::link_or_copy; @@ -395,8 +396,7 @@ fn generate_thin_lto_work<B: ExtraBackendMethods>( each_linked_rlib_for_lto, needs_thin_lto, import_only_modules, - ) - .unwrap_or_else(|e| e.raise()); + ); lto_modules .into_iter() .map(|module| { @@ -844,11 +844,11 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>( cgcx: &CodegenContext<B>, mut module: ModuleCodegen<B::Module>, module_config: &ModuleConfig, -) -> Result<WorkItemResult<B>, FatalError> { +) -> WorkItemResult<B> { let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); - B::optimize(cgcx, dcx, &mut module, module_config)?; + B::optimize(cgcx, dcx, &mut module, module_config); // After we've done the initial round of optimizations we need to // decide whether to synchronously codegen this module or ship it @@ -868,8 +868,8 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>( match lto_type { ComputedLtoType::No => { - let module = B::codegen(cgcx, module, module_config)?; - Ok(WorkItemResult::Finished(module)) + let module = B::codegen(cgcx, module, module_config); + WorkItemResult::Finished(module) } ComputedLtoType::Thin => { let (name, thin_buffer) = B::prepare_thin(module, false); @@ -878,7 +878,7 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>( panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e); }); } - Ok(WorkItemResult::NeedsThinLto(name, thin_buffer)) + WorkItemResult::NeedsThinLto(name, thin_buffer) } ComputedLtoType::Fat => match bitcode { Some(path) => { @@ -886,12 +886,12 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>( fs::write(&path, buffer.data()).unwrap_or_else(|e| { panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e); }); - Ok(WorkItemResult::NeedsFatLto(FatLtoInput::Serialized { + WorkItemResult::NeedsFatLto(FatLtoInput::Serialized { name, buffer: SerializedModule::Local(buffer), - })) + }) } - None => Ok(WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module))), + None => WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module)), }, } } @@ -987,7 +987,7 @@ fn execute_fat_lto_work_item<B: ExtraBackendMethods>( mut needs_fat_lto: Vec<FatLtoInput<B>>, import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>, module_config: &ModuleConfig, -) -> Result<WorkItemResult<B>, FatalError> { +) -> WorkItemResult<B> { for (module, wp) in import_only_modules { needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module }) } @@ -997,19 +997,19 @@ fn execute_fat_lto_work_item<B: ExtraBackendMethods>( exported_symbols_for_lto, each_linked_rlib_for_lto, needs_fat_lto, - )?; - let module = B::codegen(cgcx, module, module_config)?; - Ok(WorkItemResult::Finished(module)) + ); + let module = B::codegen(cgcx, module, module_config); + WorkItemResult::Finished(module) } fn execute_thin_lto_work_item<B: ExtraBackendMethods>( cgcx: &CodegenContext<B>, module: lto::ThinModule<B>, module_config: &ModuleConfig, -) -> Result<WorkItemResult<B>, FatalError> { - let module = B::optimize_thin(cgcx, module)?; - let module = B::codegen(cgcx, module, module_config)?; - Ok(WorkItemResult::Finished(module)) +) -> WorkItemResult<B> { + let module = B::optimize_thin(cgcx, module); + let module = B::codegen(cgcx, module, module_config); + WorkItemResult::Finished(module) } /// Messages sent to the coordinator. @@ -1722,37 +1722,10 @@ fn spawn_work<'a, B: ExtraBackendMethods>( let cgcx = cgcx.clone(); B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || { - // Set up a destructor which will fire off a message that we're done as - // we exit. - struct Bomb<B: ExtraBackendMethods> { - coordinator_send: Sender<Message<B>>, - result: Option<Result<WorkItemResult<B>, FatalError>>, - } - impl<B: ExtraBackendMethods> Drop for Bomb<B> { - fn drop(&mut self) { - let msg = match self.result.take() { - Some(Ok(result)) => Message::WorkItem::<B> { result: Ok(result) }, - Some(Err(FatalError)) => { - Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)) } - } - None => Message::WorkItem::<B> { result: Err(None) }, - }; - drop(self.coordinator_send.send(msg)); - } - } - - let mut bomb = Bomb::<B> { coordinator_send, result: None }; - - // Execute the work itself, and if it finishes successfully then flag - // ourselves as a success as well. - // - // Note that we ignore any `FatalError` coming out of `execute_work_item`, - // as a diagnostic was already sent off to the main thread - just - // surface that there was an error in this worker. - bomb.result = { + let result = std::panic::catch_unwind(AssertUnwindSafe(|| { let module_config = cgcx.config(work.module_kind()); - Some(match work { + match work { WorkItem::Optimize(m) => { let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*m.name); @@ -1763,7 +1736,7 @@ fn spawn_work<'a, B: ExtraBackendMethods>( "codegen_copy_artifacts_from_incr_cache", &*m.name, ); - Ok(execute_copy_from_cache_work_item(&cgcx, m, module_config)) + execute_copy_from_cache_work_item(&cgcx, m, module_config) } WorkItem::FatLto { exported_symbols_for_lto, @@ -1788,8 +1761,22 @@ fn spawn_work<'a, B: ExtraBackendMethods>( cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", m.name()); execute_thin_lto_work_item(&cgcx, m, module_config) } - }) + } + })); + + let msg = match result { + Ok(result) => Message::WorkItem::<B> { result: Ok(result) }, + + // We ignore any `FatalError` coming out of `execute_work_item`, as a + // diagnostic was already sent off to the main thread - just surface + // that there was an error in this worker. + Err(err) if err.is::<FatalErrorMarker>() => { + Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)) } + } + + Err(_) => Message::WorkItem::<B> { result: Err(None) }, }; + drop(coordinator_send.send(msg)); }) .expect("failed to spawn work thread"); } diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs index c29ad90735b..cc7c4e46d7b 100644 --- a/compiler/rustc_codegen_ssa/src/traits/write.rs +++ b/compiler/rustc_codegen_ssa/src/traits/write.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use rustc_errors::{DiagCtxtHandle, FatalError}; +use rustc_errors::DiagCtxtHandle; use rustc_middle::dep_graph::WorkProduct; use crate::back::lto::{SerializedModule, ThinModule}; @@ -22,7 +22,7 @@ pub trait WriteBackendMethods: Clone + 'static { exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec<FatLtoInput<Self>>, - ) -> Result<ModuleCodegen<Self::Module>, FatalError>; + ) -> ModuleCodegen<Self::Module>; /// Performs thin LTO by performing necessary global analysis and returning two /// lists, one of the modules that need optimization and another for modules that /// can simply be copied over from the incr. comp. cache. @@ -32,7 +32,7 @@ pub trait WriteBackendMethods: Clone + 'static { each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, Self::ThinBuffer)>, cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>, - ) -> Result<(Vec<ThinModule<Self>>, Vec<WorkProduct>), FatalError>; + ) -> (Vec<ThinModule<Self>>, Vec<WorkProduct>); fn print_pass_timings(&self); fn print_statistics(&self); fn optimize( @@ -40,16 +40,16 @@ pub trait WriteBackendMethods: Clone + 'static { dcx: DiagCtxtHandle<'_>, module: &mut ModuleCodegen<Self::Module>, config: &ModuleConfig, - ) -> Result<(), FatalError>; + ); fn optimize_thin( cgcx: &CodegenContext<Self>, thin: ThinModule<Self>, - ) -> Result<ModuleCodegen<Self::Module>, FatalError>; + ) -> ModuleCodegen<Self::Module>; fn codegen( cgcx: &CodegenContext<Self>, module: ModuleCodegen<Self::Module>, config: &ModuleConfig, - ) -> Result<CompiledModule, FatalError>; + ) -> CompiledModule; fn prepare_thin( module: ModuleCodegen<Self::Module>, want_summary: bool, diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index da954cf4ed7..fccb6b171b1 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -280,22 +280,110 @@ impl<'tcx> CompileTimeInterpCx<'tcx> { interp_ok(match (a, b) { // Comparisons between integers are always known. (Scalar::Int(a), Scalar::Int(b)) => (a == b) as u8, - // Comparisons of null with an arbitrary scalar can be known if `scalar_may_be_null` - // indicates that the scalar can definitely *not* be null. - (Scalar::Int(int), ptr) | (ptr, Scalar::Int(int)) - if int.is_null() && !self.scalar_may_be_null(ptr)? => - { - 0 + // Comparing a pointer `ptr` with an integer `int` is equivalent to comparing + // `ptr-int` with null, so we can reduce this case to a `scalar_may_be_null` test. + (Scalar::Int(int), Scalar::Ptr(ptr, _)) | (Scalar::Ptr(ptr, _), Scalar::Int(int)) => { + let int = int.to_target_usize(*self.tcx); + // The `wrapping_neg` here may produce a value that is not + // a valid target usize any more... but `wrapping_offset` handles that correctly. + let offset_ptr = ptr.wrapping_offset(Size::from_bytes(int.wrapping_neg()), self); + if !self.scalar_may_be_null(Scalar::from_pointer(offset_ptr, self))? { + // `ptr.wrapping_sub(int)` is definitely not equal to `0`, so `ptr != int` + 0 + } else { + // `ptr.wrapping_sub(int)` could be equal to `0`, but might not be, + // so we cannot know for sure if `ptr == int` or not + 2 + } + } + (Scalar::Ptr(a, _), Scalar::Ptr(b, _)) => { + let (a_prov, a_offset) = a.prov_and_relative_offset(); + let (b_prov, b_offset) = b.prov_and_relative_offset(); + let a_allocid = a_prov.alloc_id(); + let b_allocid = b_prov.alloc_id(); + let a_info = self.get_alloc_info(a_allocid); + let b_info = self.get_alloc_info(b_allocid); + + // Check if the pointers cannot be equal due to alignment + if a_info.align > Align::ONE && b_info.align > Align::ONE { + let min_align = Ord::min(a_info.align.bytes(), b_info.align.bytes()); + let a_residue = a_offset.bytes() % min_align; + let b_residue = b_offset.bytes() % min_align; + if a_residue != b_residue { + // If the two pointers have a different residue modulo their + // common alignment, they cannot be equal. + return interp_ok(0); + } + // The pointers have the same residue modulo their common alignment, + // so they could be equal. Try the other checks. + } + + if let (Some(GlobalAlloc::Static(a_did)), Some(GlobalAlloc::Static(b_did))) = ( + self.tcx.try_get_global_alloc(a_allocid), + self.tcx.try_get_global_alloc(b_allocid), + ) { + if a_allocid == b_allocid { + debug_assert_eq!( + a_did, b_did, + "different static item DefIds had same AllocId? {a_allocid:?} == {b_allocid:?}, {a_did:?} != {b_did:?}" + ); + // Comparing two pointers into the same static. As per + // https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.intro + // a static cannot be duplicated, so if two pointers are into the same + // static, they are equal if and only if their offsets are equal. + (a_offset == b_offset) as u8 + } else { + debug_assert_ne!( + a_did, b_did, + "same static item DefId had two different AllocIds? {a_allocid:?} != {b_allocid:?}, {a_did:?} == {b_did:?}" + ); + // Comparing two pointers into the different statics. + // We can never determine for sure that two pointers into different statics + // are *equal*, but we can know that they are *inequal* if they are both + // strictly in-bounds (i.e. in-bounds and not one-past-the-end) of + // their respective static, as different non-zero-sized statics cannot + // overlap or be deduplicated as per + // https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.intro + // (non-deduplication), and + // https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.storage-disjointness + // (non-overlapping). + if a_offset < a_info.size && b_offset < b_info.size { + 0 + } else { + // Otherwise, conservatively say we don't know. + // There are some cases we could still return `0` for, e.g. + // if the pointers being equal would require their statics to overlap + // one or more bytes, but for simplicity we currently only check + // strictly in-bounds pointers. + 2 + } + } + } else { + // All other cases we conservatively say we don't know. + // + // For comparing statics to non-statics, as per https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.storage-disjointness + // immutable statics can overlap with other kinds of allocations sometimes. + // + // FIXME: We could be more decisive for (non-zero-sized) mutable statics, + // which cannot overlap with other kinds of allocations. + // + // Functions and vtables can be duplicated and deduplicated, so we + // cannot be sure of runtime equality of pointers to the same one, or the + // runtime inequality of pointers to different ones (see e.g. #73722), + // so comparing those should return 2, whether they are the same allocation + // or not. + // + // `GlobalAlloc::TypeId` exists mostly to prevent consteval from comparing + // `TypeId`s, so comparing those should always return 2, whether they are the + // same allocation or not. + // + // FIXME: We could revisit comparing pointers into the same + // `GlobalAlloc::Memory` once https://github.com/rust-lang/rust/issues/128775 + // is fixed (but they can be deduplicated, so comparing pointers into different + // ones should return 2). + 2 + } } - // Other ways of comparing integers and pointers can never be known for sure. - (Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => 2, - // FIXME: return a `1` for when both sides are the same pointer, *except* that - // some things (like functions and vtables) do not have stable addresses - // so we need to be careful around them (see e.g. #73722). - // FIXME: return `0` for at least some comparisons where we can reliably - // determine the result of runtime inequality tests at compile-time. - // Examples include comparison of addresses in different static items. - (Scalar::Ptr(..), Scalar::Ptr(..)) => 2, }) } } diff --git a/compiler/rustc_hir/src/lints.rs b/compiler/rustc_hir/src/lints.rs index 061ec786dc8..0b24052b453 100644 --- a/compiler/rustc_hir/src/lints.rs +++ b/compiler/rustc_hir/src/lints.rs @@ -35,4 +35,5 @@ pub enum AttributeLintKind { IllFormedAttributeInput { suggestions: Vec<String> }, EmptyAttribute { first_span: Span }, InvalidTarget { name: AttrPath, target: Target, applied: Vec<String>, only: &'static str }, + InvalidStyle { name: AttrPath, is_used_as_inner: bool, target: Target, target_span: Span }, } diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index e699e4b9c13..cce40da354d 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -488,6 +488,9 @@ extern "C" LLVMAttributeRef LLVMRustCreateRangeAttribute(LLVMContextRef C, unsigned NumBits, const uint64_t LowerWords[], const uint64_t UpperWords[]) { + // FIXME(Zalathar): There appears to be no stable guarantee that C++ + // `AttrKind` values correspond directly to the `unsigned KindID` values + // accepted by LLVM-C API functions, though in practice they currently do. return LLVMCreateConstantRangeAttribute(C, Attribute::Range, NumBits, LowerWords, UpperWords); } diff --git a/compiler/rustc_passes/messages.ftl b/compiler/rustc_passes/messages.ftl index 00a41e31a02..3be06a3704c 100644 --- a/compiler/rustc_passes/messages.ftl +++ b/compiler/rustc_passes/messages.ftl @@ -462,8 +462,10 @@ passes_object_lifetime_err = {$repr} passes_outer_crate_level_attr = - crate-level attribute should be an inner attribute: add an exclamation mark: `#![foo]` + crate-level attribute should be an inner attribute +passes_outer_crate_level_attr_suggestion = + add a `!` passes_panic_unwind_without_std = unwinding panics are not supported without std diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs index 7aef60b7b91..bbe11ee78bb 100644 --- a/compiler/rustc_passes/src/check_attr.rs +++ b/compiler/rustc_passes/src/check_attr.rs @@ -369,24 +369,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { if hir_id != CRATE_HIR_ID { match attr { - // FIXME(jdonszelmann) move to attribute parsing when validation gets better there - &Attribute::Parsed(AttributeKind::CrateName { - attr_span: span, style, .. - }) => match style { - ast::AttrStyle::Outer => self.tcx.emit_node_span_lint( - UNUSED_ATTRIBUTES, - hir_id, - span, - errors::OuterCrateLevelAttr, - ), - ast::AttrStyle::Inner => self.tcx.emit_node_span_lint( - UNUSED_ATTRIBUTES, - hir_id, - span, - errors::InnerCrateLevelAttr, - ), - }, - Attribute::Parsed(_) => { /* not crate-level */ } + Attribute::Parsed(_) => { /* Already validated. */ } Attribute::Unparsed(attr) => { // FIXME(jdonszelmann): remove once all crate-level attrs are parsed and caught by // the above @@ -397,12 +380,26 @@ impl<'tcx> CheckAttrVisitor<'tcx> { .and_then(|ident| BUILTIN_ATTRIBUTE_MAP.get(&ident.name)) { match attr.style { - ast::AttrStyle::Outer => self.tcx.emit_node_span_lint( - UNUSED_ATTRIBUTES, - hir_id, - attr.span, - errors::OuterCrateLevelAttr, - ), + ast::AttrStyle::Outer => { + let attr_span = attr.span; + let bang_position = self + .tcx + .sess + .source_map() + .span_until_char(attr_span, '[') + .shrink_to_hi(); + + self.tcx.emit_node_span_lint( + UNUSED_ATTRIBUTES, + hir_id, + attr.span, + errors::OuterCrateLevelAttr { + suggestion: errors::OuterCrateLevelAttrSuggestion { + bang_position, + }, + }, + ) + } ast::AttrStyle::Inner => self.tcx.emit_node_span_lint( UNUSED_ATTRIBUTES, hir_id, @@ -1851,12 +1848,24 @@ impl<'tcx> CheckAttrVisitor<'tcx> { { if hir_id != CRATE_HIR_ID { match style { - Some(ast::AttrStyle::Outer) => self.tcx.emit_node_span_lint( - UNUSED_ATTRIBUTES, - hir_id, - attr.span(), - errors::OuterCrateLevelAttr, - ), + Some(ast::AttrStyle::Outer) => { + let attr_span = attr.span(); + let bang_position = self + .tcx + .sess + .source_map() + .span_until_char(attr_span, '[') + .shrink_to_hi(); + + self.tcx.emit_node_span_lint( + UNUSED_ATTRIBUTES, + hir_id, + attr_span, + errors::OuterCrateLevelAttr { + suggestion: errors::OuterCrateLevelAttrSuggestion { bang_position }, + }, + ) + } Some(ast::AttrStyle::Inner) | None => self.tcx.emit_node_span_lint( UNUSED_ATTRIBUTES, hir_id, diff --git a/compiler/rustc_passes/src/errors.rs b/compiler/rustc_passes/src/errors.rs index 4fec6b0798a..01972067978 100644 --- a/compiler/rustc_passes/src/errors.rs +++ b/compiler/rustc_passes/src/errors.rs @@ -64,7 +64,17 @@ pub(crate) struct MixedExportNameAndNoMangle { #[derive(LintDiagnostic)] #[diag(passes_outer_crate_level_attr)] -pub(crate) struct OuterCrateLevelAttr; +pub(crate) struct OuterCrateLevelAttr { + #[subdiagnostic] + pub suggestion: OuterCrateLevelAttrSuggestion, +} + +#[derive(Subdiagnostic)] +#[multipart_suggestion(passes_outer_crate_level_attr_suggestion, style = "verbose")] +pub(crate) struct OuterCrateLevelAttrSuggestion { + #[suggestion_part(code = "!")] + pub bang_position: Span, +} #[derive(LintDiagnostic)] #[diag(passes_inner_crate_level_attr)] diff --git a/compiler/rustc_span/src/source_map.rs b/compiler/rustc_span/src/source_map.rs index 8270de1e917..166842e374b 100644 --- a/compiler/rustc_span/src/source_map.rs +++ b/compiler/rustc_span/src/source_map.rs @@ -9,6 +9,7 @@ //! within the `SourceMap`, which upon request can be converted to line and column //! information, source code snippets, etc. +use std::fs::File; use std::io::{self, BorrowedBuf, Read}; use std::{fs, path}; @@ -115,13 +116,18 @@ impl FileLoader for RealFileLoader { } fn read_file(&self, path: &Path) -> io::Result<String> { - if path.metadata().is_ok_and(|metadata| metadata.len() > SourceFile::MAX_FILE_SIZE.into()) { + let mut file = File::open(path)?; + let size = file.metadata().map(|metadata| metadata.len()).ok().unwrap_or(0); + + if size > SourceFile::MAX_FILE_SIZE.into() { return Err(io::Error::other(format!( "text files larger than {} bytes are unsupported", SourceFile::MAX_FILE_SIZE ))); } - fs::read_to_string(path) + let mut contents = String::new(); + file.read_to_string(&mut contents)?; + Ok(contents) } fn read_binary_file(&self, path: &Path) -> io::Result<Arc<[u8]>> { diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index a67795e3e93..c53d92bee9d 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -1954,6 +1954,7 @@ supported_targets! { ("armv7-unknown-linux-musleabihf", armv7_unknown_linux_musleabihf), ("aarch64-unknown-linux-gnu", aarch64_unknown_linux_gnu), ("aarch64-unknown-linux-musl", aarch64_unknown_linux_musl), + ("aarch64_be-unknown-linux-musl", aarch64_be_unknown_linux_musl), ("x86_64-unknown-linux-musl", x86_64_unknown_linux_musl), ("i686-unknown-linux-musl", i686_unknown_linux_musl), ("i586-unknown-linux-musl", i586_unknown_linux_musl), @@ -2149,6 +2150,7 @@ supported_targets! { ("riscv64gc-unknown-none-elf", riscv64gc_unknown_none_elf), ("riscv64gc-unknown-linux-gnu", riscv64gc_unknown_linux_gnu), ("riscv64gc-unknown-linux-musl", riscv64gc_unknown_linux_musl), + ("riscv64a23-unknown-linux-gnu", riscv64a23_unknown_linux_gnu), ("sparc-unknown-none-elf", sparc_unknown_none_elf), diff --git a/compiler/rustc_target/src/spec/targets/aarch64_be_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/targets/aarch64_be_unknown_linux_musl.rs new file mode 100644 index 00000000000..be5ac4a843b --- /dev/null +++ b/compiler/rustc_target/src/spec/targets/aarch64_be_unknown_linux_musl.rs @@ -0,0 +1,40 @@ +use rustc_abi::Endian; + +use crate::spec::{ + FramePointer, SanitizerSet, StackProbeType, Target, TargetMetadata, TargetOptions, base, +}; + +pub(crate) fn target() -> Target { + let mut base = base::linux_musl::opts(); + base.max_atomic_width = Some(128); + base.supports_xray = true; + base.features = "+v8a,+outline-atomics".into(); + base.stack_probes = StackProbeType::Inline; + base.supported_sanitizers = SanitizerSet::ADDRESS + | SanitizerSet::CFI + | SanitizerSet::LEAK + | SanitizerSet::MEMORY + | SanitizerSet::THREAD; + + Target { + llvm_target: "aarch64_be-unknown-linux-musl".into(), + metadata: TargetMetadata { + description: Some("ARM64 Linux (big-endian) with musl-libc 1.2.5".into()), + tier: Some(3), + host_tools: Some(false), + std: Some(true), + }, + pointer_width: 64, + data_layout: "E-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32".into(), + arch: "aarch64".into(), + options: TargetOptions { + // the AAPCS64 expects use of non-leaf frame pointers per + // https://github.com/ARM-software/abi-aa/blob/4492d1570eb70c8fd146623e0db65b2d241f12e7/aapcs64/aapcs64.rst#the-frame-pointer + // and we tend to encounter interesting bugs in AArch64 unwinding code if we do not + frame_pointer: FramePointer::NonLeaf, + mcount: "\u{1}_mcount".into(), + endian: Endian::Big, + ..base + }, + } +} diff --git a/compiler/rustc_target/src/spec/targets/riscv64a23_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/targets/riscv64a23_unknown_linux_gnu.rs new file mode 100644 index 00000000000..60f2e7da042 --- /dev/null +++ b/compiler/rustc_target/src/spec/targets/riscv64a23_unknown_linux_gnu.rs @@ -0,0 +1,27 @@ +use std::borrow::Cow; + +use crate::spec::{CodeModel, SplitDebuginfo, Target, TargetMetadata, TargetOptions, base}; + +pub(crate) fn target() -> Target { + Target { + llvm_target: "riscv64-unknown-linux-gnu".into(), + metadata: TargetMetadata { + description: Some("RISC-V Linux (kernel 6.8.0, glibc 2.39)".into()), + tier: Some(3), + host_tools: Some(true), + std: Some(true), + }, + pointer_width: 64, + data_layout: "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128".into(), + arch: "riscv64".into(), + options: TargetOptions { + code_model: Some(CodeModel::Medium), + cpu: "generic-rv64".into(), + features: "+rva23u64".into(), + llvm_abiname: "lp64d".into(), + max_atomic_width: Some(64), + supported_split_debuginfo: Cow::Borrowed(&[SplitDebuginfo::Off]), + ..base::linux_gnu::opts() + }, + } +} diff --git a/compiler/rustc_target/src/target_features.rs b/compiler/rustc_target/src/target_features.rs index e45300b59cc..4c1b8c99426 100644 --- a/compiler/rustc_target/src/target_features.rs +++ b/compiler/rustc_target/src/target_features.rs @@ -601,6 +601,49 @@ static RISCV_FEATURES: &[(&str, Stability, ImpliedFeatures)] = &[ ), ("m", Stable, &[]), ("relax", Unstable(sym::riscv_target_feature), &[]), + ( + "rva23u64", + Unstable(sym::riscv_target_feature), + &[ + "m", + "a", + "f", + "d", + "c", + "b", + "v", + "zicsr", + "zicntr", + "zihpm", + "ziccif", + "ziccrse", + "ziccamoa", + "zicclsm", + "zic64b", + "za64rs", + "zihintpause", + "zba", + "zbb", + "zbs", + "zicbom", + "zicbop", + "zicboz", + "zfhmin", + "zkt", + "zvfhmin", + "zvbb", + "zvkt", + "zihintntl", + "zicond", + "zimop", + "zcmop", + "zcb", + "zfa", + "zawrs", + "supm", + ], + ), + ("supm", Unstable(sym::riscv_target_feature), &[]), ("unaligned-scalar-mem", Unstable(sym::riscv_target_feature), &[]), ("unaligned-vector-mem", Unstable(sym::riscv_target_feature), &[]), ("v", Unstable(sym::riscv_target_feature), &["zvl128b", "zve64d"]), diff --git a/compiler/rustc_thread_pool/src/lib.rs b/compiler/rustc_thread_pool/src/lib.rs index 34252d919e3..7ce7fbc27ea 100644 --- a/compiler/rustc_thread_pool/src/lib.rs +++ b/compiler/rustc_thread_pool/src/lib.rs @@ -787,18 +787,7 @@ impl ThreadPoolBuildError { } } -const GLOBAL_POOL_ALREADY_INITIALIZED: &str = - "The global thread pool has already been initialized."; - impl Error for ThreadPoolBuildError { - #[allow(deprecated)] - fn description(&self) -> &str { - match self.kind { - ErrorKind::GlobalPoolAlreadyInitialized => GLOBAL_POOL_ALREADY_INITIALIZED, - ErrorKind::IOError(ref e) => e.description(), - } - } - fn source(&self) -> Option<&(dyn Error + 'static)> { match &self.kind { ErrorKind::GlobalPoolAlreadyInitialized => None, @@ -810,7 +799,9 @@ impl Error for ThreadPoolBuildError { impl fmt::Display for ThreadPoolBuildError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match &self.kind { - ErrorKind::GlobalPoolAlreadyInitialized => GLOBAL_POOL_ALREADY_INITIALIZED.fmt(f), + ErrorKind::GlobalPoolAlreadyInitialized => { + "The global thread pool has already been initialized.".fmt(f) + } ErrorKind::IOError(e) => e.fmt(f), } } diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs index e31ff8b8729..aa153d3607b 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs @@ -820,16 +820,20 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { if matches!(obligation.cause.code(), ObligationCauseCode::FunctionArg { .. }) && obligation.cause.span.can_be_used_for_suggestions() { + let (span, sugg) = if let Some(snippet) = + self.tcx.sess.source_map().span_to_snippet(obligation.cause.span).ok() + && snippet.starts_with("|") + { + (obligation.cause.span, format!("({snippet})({args})")) + } else { + (obligation.cause.span.shrink_to_hi(), format!("({args})")) + }; + // When the obligation error has been ensured to have been caused by // an argument, the `obligation.cause.span` points at the expression // of the argument, so we can provide a suggestion. Otherwise, we give // a more general note. - err.span_suggestion_verbose( - obligation.cause.span.shrink_to_hi(), - msg, - format!("({args})"), - Applicability::HasPlaceholders, - ); + err.span_suggestion_verbose(span, msg, sugg, Applicability::HasPlaceholders); } else if let DefIdOrName::DefId(def_id) = def_id_or_name { let name = match self.tcx.hir_get_if_local(def_id) { Some(hir::Node::Expr(hir::Expr { |
