diff options
Diffstat (limited to 'compiler')
286 files changed, 5202 insertions, 4382 deletions
diff --git a/compiler/rustc/Cargo.toml b/compiler/rustc/Cargo.toml index 3ca75235446..e3214d1ab9c 100644 --- a/compiler/rustc/Cargo.toml +++ b/compiler/rustc/Cargo.toml @@ -33,3 +33,8 @@ llvm = ['rustc_driver_impl/llvm'] max_level_info = ['rustc_driver_impl/max_level_info'] rustc_randomized_layouts = ['rustc_driver_impl/rustc_randomized_layouts'] # tidy-alphabetical-end + +[build-dependencies] +# tidy-alphabetical-start +rustc_windows_rc = { path = "../rustc_windows_rc" } +# tidy-alphabetical-end diff --git a/compiler/rustc/build.rs b/compiler/rustc/build.rs index 8b7d28d2b8a..9b5def53e3c 100644 --- a/compiler/rustc/build.rs +++ b/compiler/rustc/build.rs @@ -1,4 +1,6 @@ -use std::env; +use std::{env, path}; + +use rustc_windows_rc::{VersionInfoFileType, compile_windows_resource_file}; fn main() { let target_os = env::var("CARGO_CFG_TARGET_OS"); @@ -13,6 +15,18 @@ fn main() { // Add a manifest file to rustc.exe. fn set_windows_exe_options() { + set_windows_resource(); + set_windows_manifest(); +} + +fn set_windows_resource() { + let stem = path::PathBuf::from("rustc_main_resource"); + let file_description = "rustc"; + let res_file = compile_windows_resource_file(&stem, file_description, VersionInfoFileType::App); + println!("cargo:rustc-link-arg={}", res_file.display()); +} + +fn set_windows_manifest() { static WINDOWS_MANIFEST_FILE: &str = "Windows Manifest.xml"; let mut manifest = env::current_dir().unwrap(); diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs index 802a6fa3249..3e8fddd9954 100644 --- a/compiler/rustc_ast/src/ast.rs +++ b/compiler/rustc_ast/src/ast.rs @@ -2284,6 +2284,54 @@ pub struct FnSig { pub span: Span, } +impl FnSig { + /// Return a span encompassing the header, or where to insert it if empty. + pub fn header_span(&self) -> Span { + match self.header.ext { + Extern::Implicit(span) | Extern::Explicit(_, span) => { + return self.span.with_hi(span.hi()); + } + Extern::None => {} + } + + match self.header.safety { + Safety::Unsafe(span) | Safety::Safe(span) => return self.span.with_hi(span.hi()), + Safety::Default => {} + }; + + if let Some(coroutine_kind) = self.header.coroutine_kind { + return self.span.with_hi(coroutine_kind.span().hi()); + } + + if let Const::Yes(span) = self.header.constness { + return self.span.with_hi(span.hi()); + } + + self.span.shrink_to_lo() + } + + /// The span of the header's safety, or where to insert it if empty. + pub fn safety_span(&self) -> Span { + match self.header.safety { + Safety::Unsafe(span) | Safety::Safe(span) => span, + Safety::Default => { + // Insert after the `coroutine_kind` if available. + if let Some(extern_span) = self.header.ext.span() { + return extern_span.shrink_to_lo(); + } + + // Insert right at the front of the signature. + self.header_span().shrink_to_hi() + } + } + } + + /// The span of the header's extern, or where to insert it if empty. + pub fn extern_span(&self) -> Span { + self.header.ext.span().unwrap_or(self.safety_span().shrink_to_hi()) + } +} + /// A constraint on an associated item. /// /// ### Examples @@ -3526,6 +3574,13 @@ impl Extern { None => Extern::Implicit(span), } } + + pub fn span(self) -> Option<Span> { + match self { + Extern::None => None, + Extern::Implicit(span) | Extern::Explicit(_, span) => Some(span), + } + } } /// A function header. @@ -3534,12 +3589,12 @@ impl Extern { /// included in this struct (e.g., `async unsafe fn` or `const extern "C" fn`). #[derive(Clone, Copy, Encodable, Decodable, Debug, Walkable)] pub struct FnHeader { - /// Whether this is `unsafe`, or has a default safety. - pub safety: Safety, - /// Whether this is `async`, `gen`, or nothing. - pub coroutine_kind: Option<CoroutineKind>, /// The `const` keyword, if any pub constness: Const, + /// Whether this is `async`, `gen`, or nothing. + pub coroutine_kind: Option<CoroutineKind>, + /// Whether this is `unsafe`, or has a default safety. + pub safety: Safety, /// The `extern` keyword and corresponding ABI string, if any. pub ext: Extern, } @@ -3553,38 +3608,6 @@ impl FnHeader { || matches!(constness, Const::Yes(_)) || !matches!(ext, Extern::None) } - - /// Return a span encompassing the header, or none if all options are default. - pub fn span(&self) -> Option<Span> { - fn append(a: &mut Option<Span>, b: Span) { - *a = match a { - None => Some(b), - Some(x) => Some(x.to(b)), - } - } - - let mut full_span = None; - - match self.safety { - Safety::Unsafe(span) | Safety::Safe(span) => append(&mut full_span, span), - Safety::Default => {} - }; - - if let Some(coroutine_kind) = self.coroutine_kind { - append(&mut full_span, coroutine_kind.span()); - } - - if let Const::Yes(span) = self.constness { - append(&mut full_span, span); - } - - match self.ext { - Extern::Implicit(span) | Extern::Explicit(_, span) => append(&mut full_span, span), - Extern::None => {} - } - - full_span - } } impl Default for FnHeader { diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs index 3674814b796..bb6b25baf01 100644 --- a/compiler/rustc_ast_lowering/src/expr.rs +++ b/compiler/rustc_ast_lowering/src/expr.rs @@ -1536,7 +1536,13 @@ impl<'hir> LoweringContext<'_, 'hir> { hir::LangItem::Range } } - (None, Some(..), Closed) => hir::LangItem::RangeToInclusive, + (None, Some(..), Closed) => { + if self.tcx.features().new_range() { + hir::LangItem::RangeToInclusiveCopy + } else { + hir::LangItem::RangeToInclusive + } + } (Some(e1), Some(e2), Closed) => { if self.tcx.features().new_range() { hir::LangItem::RangeInclusiveCopy @@ -1560,13 +1566,26 @@ impl<'hir> LoweringContext<'_, 'hir> { }; let fields = self.arena.alloc_from_iter( - e1.iter().map(|e| (sym::start, e)).chain(e2.iter().map(|e| (sym::end, e))).map( - |(s, e)| { + e1.iter() + .map(|e| (sym::start, e)) + .chain(e2.iter().map(|e| { + ( + if matches!( + lang_item, + hir::LangItem::RangeInclusiveCopy | hir::LangItem::RangeToInclusiveCopy + ) { + sym::last + } else { + sym::end + }, + e, + ) + })) + .map(|(s, e)| { let expr = self.lower_expr(e); let ident = Ident::new(s, self.lower_span(e.span)); self.expr_field(ident, expr, e.span) - }, - ), + }), ); hir::ExprKind::Struct( diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs index bb559bd8921..53351f91c46 100644 --- a/compiler/rustc_ast_lowering/src/item.rs +++ b/compiler/rustc_ast_lowering/src/item.rs @@ -5,7 +5,9 @@ use rustc_errors::{E0570, ErrorGuaranteed, struct_span_code_err}; use rustc_hir::attrs::AttributeKind; use rustc_hir::def::{DefKind, PerNS, Res}; use rustc_hir::def_id::{CRATE_DEF_ID, LocalDefId}; -use rustc_hir::{self as hir, HirId, LifetimeSource, PredicateOrigin, Target, find_attr}; +use rustc_hir::{ + self as hir, HirId, ImplItemImplKind, LifetimeSource, PredicateOrigin, Target, find_attr, +}; use rustc_index::{IndexSlice, IndexVec}; use rustc_middle::span_bug; use rustc_middle::ty::{ResolverAstLowering, TyCtxt}; @@ -1117,20 +1119,31 @@ impl<'hir> LoweringContext<'_, 'hir> { } }; + let span = self.lower_span(i.span); let item = hir::ImplItem { owner_id: hir_id.expect_owner(), ident: self.lower_ident(ident), generics, + impl_kind: if is_in_trait_impl { + ImplItemImplKind::Trait { + defaultness, + trait_item_def_id: self + .resolver + .get_partial_res(i.id) + .and_then(|r| r.expect_full_res().opt_def_id()) + .ok_or_else(|| { + self.dcx().span_delayed_bug( + span, + "could not resolve trait item being implemented", + ) + }), + } + } else { + ImplItemImplKind::Inherent { vis_span: self.lower_span(i.vis.span) } + }, kind, - vis_span: self.lower_span(i.vis.span), - span: self.lower_span(i.span), - defaultness, + span, has_delayed_lints: !self.delayed_lints.is_empty(), - trait_item_def_id: self - .resolver - .get_partial_res(i.id) - .map(|r| r.expect_full_res().opt_def_id()) - .unwrap_or(None), }; self.arena.alloc(item) } diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs index 72f20a95ff0..4e2243e8787 100644 --- a/compiler/rustc_ast_lowering/src/lib.rs +++ b/compiler/rustc_ast_lowering/src/lib.rs @@ -2101,17 +2101,14 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { { return; } - if self.tcx.features().more_maybe_bounds() { - return; - } } RelaxedBoundPolicy::Forbidden(reason) => { - if self.tcx.features().more_maybe_bounds() { - return; - } - match reason { RelaxedBoundForbiddenReason::TraitObjectTy => { + if self.tcx.features().more_maybe_bounds() { + return; + } + self.dcx().span_err( span, "relaxed bounds are not permitted in trait object types", @@ -2119,6 +2116,10 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { return; } RelaxedBoundForbiddenReason::SuperTrait => { + if self.tcx.features().more_maybe_bounds() { + return; + } + let mut diag = self.dcx().struct_span_err( span, "relaxed bounds are not permitted in supertrait bounds", diff --git a/compiler/rustc_ast_passes/messages.ftl b/compiler/rustc_ast_passes/messages.ftl index e5f1fcdc4b4..8dcf3e3aa38 100644 --- a/compiler/rustc_ast_passes/messages.ftl +++ b/compiler/rustc_ast_passes/messages.ftl @@ -57,8 +57,6 @@ ast_passes_auto_super_lifetime = auto traits cannot have super traits or lifetim .label = {ast_passes_auto_super_lifetime} .suggestion = remove the super traits or lifetime bounds -ast_passes_bad_c_variadic = defining functions with C-variadic arguments is only allowed for free functions with the "C" or "C-unwind" calling convention - ast_passes_body_in_extern = incorrect `{$kind}` inside `extern` block .cannot_have = cannot have a body .invalid = the invalid body @@ -66,6 +64,19 @@ ast_passes_body_in_extern = incorrect `{$kind}` inside `extern` block ast_passes_bound_in_context = bounds on `type`s in {$ctx} have no effect +ast_passes_c_variadic_associated_function = associated functions cannot have a C variable argument list + +ast_passes_c_variadic_bad_extern = `...` is not supported for `extern "{$abi}"` functions + .label = `extern "{$abi}"` because of this + .help = only `extern "C"` and `extern "C-unwind"` functions may have a C variable argument list + +ast_passes_c_variadic_must_be_unsafe = + functions with a C variable argument list must be unsafe + .suggestion = add the `unsafe` keyword to this definition + +ast_passes_c_variadic_no_extern = `...` is not supported for non-extern functions + .help = only `extern "C"` and `extern "C-unwind"` functions may have a C variable argument list + ast_passes_const_and_c_variadic = functions cannot be both `const` and C-variadic .const = `const` because of this .variadic = C-variadic because of this @@ -84,6 +95,10 @@ ast_passes_const_without_body = ast_passes_constraint_on_negative_bound = associated type constraints not allowed on negative bounds +ast_passes_coroutine_and_c_variadic = functions cannot be both `{$coroutine_kind}` and C-variadic + .const = `{$coroutine_kind}` because of this + .variadic = C-variadic because of this + ast_passes_equality_in_where = equality constraints are not yet supported in `where` clauses .label = not supported .suggestion = if `{$ident}` is an associated type you're trying to set, use the associated type binding syntax diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs index 6133cc3548b..a6ef89b553d 100644 --- a/compiler/rustc_ast_passes/src/ast_validation.rs +++ b/compiler/rustc_ast_passes/src/ast_validation.rs @@ -492,7 +492,7 @@ impl<'a> AstValidator<'a> { } if !spans.is_empty() { - let header_span = sig.header.span().unwrap_or(sig.span.shrink_to_lo()); + let header_span = sig.header_span(); let suggestion_span = header_span.shrink_to_hi().to(sig.decl.output.span()); let padding = if header_span.is_empty() { "" } else { " " }; @@ -665,46 +665,73 @@ impl<'a> AstValidator<'a> { /// - Non-const /// - Either foreign, or free and `unsafe extern "C"` semantically fn check_c_variadic_type(&self, fk: FnKind<'a>) { - let variadic_spans: Vec<_> = fk - .decl() - .inputs - .iter() - .filter(|arg| matches!(arg.ty.kind, TyKind::CVarArgs)) - .map(|arg| arg.span) - .collect(); + // `...` is already rejected when it is not the final parameter. + let variadic_param = match fk.decl().inputs.last() { + Some(param) if matches!(param.ty.kind, TyKind::CVarArgs) => param, + _ => return, + }; - if variadic_spans.is_empty() { - return; - } + let FnKind::Fn(fn_ctxt, _, Fn { sig, .. }) = fk else { + // Unreachable because the parser already rejects `...` in closures. + unreachable!("C variable argument list cannot be used in closures") + }; - if let Some(header) = fk.header() - && let Const::Yes(const_span) = header.constness - { - let mut spans = variadic_spans.clone(); - spans.push(const_span); + // C-variadics are not yet implemented in const evaluation. + if let Const::Yes(const_span) = sig.header.constness { self.dcx().emit_err(errors::ConstAndCVariadic { - spans, + spans: vec![const_span, variadic_param.span], const_span, - variadic_spans: variadic_spans.clone(), + variadic_span: variadic_param.span, }); } - match (fk.ctxt(), fk.header()) { - (Some(FnCtxt::Foreign), _) => return, - (Some(FnCtxt::Free), Some(header)) => match header.ext { - Extern::Explicit(StrLit { symbol_unescaped: sym::C, .. }, _) - | Extern::Explicit(StrLit { symbol_unescaped: sym::C_dash_unwind, .. }, _) - | Extern::Implicit(_) - if matches!(header.safety, Safety::Unsafe(_)) => - { - return; + if let Some(coroutine_kind) = sig.header.coroutine_kind { + self.dcx().emit_err(errors::CoroutineAndCVariadic { + spans: vec![coroutine_kind.span(), variadic_param.span], + coroutine_kind: coroutine_kind.as_str(), + coroutine_span: coroutine_kind.span(), + variadic_span: variadic_param.span, + }); + } + + match fn_ctxt { + FnCtxt::Foreign => return, + FnCtxt::Free => match sig.header.ext { + Extern::Implicit(_) => { + if !matches!(sig.header.safety, Safety::Unsafe(_)) { + self.dcx().emit_err(errors::CVariadicMustBeUnsafe { + span: variadic_param.span, + unsafe_span: sig.safety_span(), + }); + } } - _ => {} - }, - _ => {} - }; + Extern::Explicit(StrLit { symbol_unescaped, .. }, _) => { + if !matches!(symbol_unescaped, sym::C | sym::C_dash_unwind) { + self.dcx().emit_err(errors::CVariadicBadExtern { + span: variadic_param.span, + abi: symbol_unescaped, + extern_span: sig.extern_span(), + }); + } - self.dcx().emit_err(errors::BadCVariadic { span: variadic_spans }); + if !matches!(sig.header.safety, Safety::Unsafe(_)) { + self.dcx().emit_err(errors::CVariadicMustBeUnsafe { + span: variadic_param.span, + unsafe_span: sig.safety_span(), + }); + } + } + Extern::None => { + let err = errors::CVariadicNoExtern { span: variadic_param.span }; + self.dcx().emit_err(err); + } + }, + FnCtxt::Assoc(_) => { + // For now, C variable argument lists are unsupported in associated functions. + let err = errors::CVariadicAssociatedFunction { span: variadic_param.span }; + self.dcx().emit_err(err); + } + } } fn check_item_named(&self, ident: Ident, kind: &str) { diff --git a/compiler/rustc_ast_passes/src/errors.rs b/compiler/rustc_ast_passes/src/errors.rs index ae8f056cb4e..ae805042c54 100644 --- a/compiler/rustc_ast_passes/src/errors.rs +++ b/compiler/rustc_ast_passes/src/errors.rs @@ -319,10 +319,44 @@ pub(crate) struct ExternItemAscii { } #[derive(Diagnostic)] -#[diag(ast_passes_bad_c_variadic)] -pub(crate) struct BadCVariadic { +#[diag(ast_passes_c_variadic_associated_function)] +pub(crate) struct CVariadicAssociatedFunction { #[primary_span] - pub span: Vec<Span>, + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(ast_passes_c_variadic_no_extern)] +#[help] +pub(crate) struct CVariadicNoExtern { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(ast_passes_c_variadic_must_be_unsafe)] +pub(crate) struct CVariadicMustBeUnsafe { + #[primary_span] + pub span: Span, + + #[suggestion( + ast_passes_suggestion, + applicability = "maybe-incorrect", + code = "unsafe ", + style = "verbose" + )] + pub unsafe_span: Span, +} + +#[derive(Diagnostic)] +#[diag(ast_passes_c_variadic_bad_extern)] +#[help] +pub(crate) struct CVariadicBadExtern { + #[primary_span] + pub span: Span, + pub abi: Symbol, + #[label] + pub extern_span: Span, } #[derive(Diagnostic)] @@ -656,7 +690,19 @@ pub(crate) struct ConstAndCVariadic { #[label(ast_passes_const)] pub const_span: Span, #[label(ast_passes_variadic)] - pub variadic_spans: Vec<Span>, + pub variadic_span: Span, +} + +#[derive(Diagnostic)] +#[diag(ast_passes_coroutine_and_c_variadic)] +pub(crate) struct CoroutineAndCVariadic { + #[primary_span] + pub spans: Vec<Span>, + pub coroutine_kind: &'static str, + #[label(ast_passes_const)] + pub coroutine_span: Span, + #[label(ast_passes_variadic)] + pub variadic_span: Span, } #[derive(Diagnostic)] diff --git a/compiler/rustc_attr_parsing/messages.ftl b/compiler/rustc_attr_parsing/messages.ftl index 7fa1293463c..839a5d23c3b 100644 --- a/compiler/rustc_attr_parsing/messages.ftl +++ b/compiler/rustc_attr_parsing/messages.ftl @@ -247,3 +247,7 @@ attr_parsing_raw_dylib_only_windows = attr_parsing_whole_archive_needs_static = linking modifier `whole-archive` is only compatible with `static` linking kind + +attr_parsing_limit_invalid = + `limit` must be a non-negative integer + .label = {$error_str} diff --git a/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs b/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs index ffdacff7152..d5d51f2e79a 100644 --- a/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs +++ b/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs @@ -218,6 +218,7 @@ impl<S: Stage> AttributeParser<S> for NakedParser { sym::rustc_std_internal_symbol, // FIXME(#82232, #143834): temporarily renamed to mitigate `#[align]` nameres ambiguity sym::rustc_align, + sym::rustc_align_static, // obviously compatible with self sym::naked, // documentation diff --git a/compiler/rustc_attr_parsing/src/attributes/crate_level.rs b/compiler/rustc_attr_parsing/src/attributes/crate_level.rs index 2fed09b85e8..0b2c05482bf 100644 --- a/compiler/rustc_attr_parsing/src/attributes/crate_level.rs +++ b/compiler/rustc_attr_parsing/src/attributes/crate_level.rs @@ -1,6 +1,40 @@ -use rustc_feature::AttributeType; +use std::num::IntErrorKind; + +use rustc_hir::limit::Limit; use super::prelude::*; +use crate::session_diagnostics::LimitInvalid; + +impl<S: Stage> AcceptContext<'_, '_, S> { + fn parse_limit_int(&self, nv: &NameValueParser) -> Option<Limit> { + let Some(limit) = nv.value_as_str() else { + self.expected_string_literal(nv.value_span, Some(nv.value_as_lit())); + return None; + }; + + let error_str = match limit.as_str().parse() { + Ok(i) => return Some(Limit::new(i)), + Err(e) => match e.kind() { + IntErrorKind::PosOverflow => "`limit` is too large", + IntErrorKind::Empty => "`limit` must be a non-negative integer", + IntErrorKind::InvalidDigit => "not a valid integer", + IntErrorKind::NegOverflow => { + panic!( + "`limit` should never negatively overflow since we're parsing into a usize and we'd get Empty instead" + ) + } + IntErrorKind::Zero => { + panic!("zero is a valid `limit` so should have returned Ok() when parsing") + } + kind => panic!("unimplemented IntErrorKind variant: {:?}", kind), + }, + }; + + self.emit_err(LimitInvalid { span: self.attr_span, value_span: nv.value_span, error_str }); + + None + } +} pub(crate) struct CrateNameParser; @@ -11,8 +45,8 @@ impl<S: Stage> SingleAttributeParser<S> for CrateNameParser { const TEMPLATE: AttributeTemplate = template!(NameValueStr: "name"); const TYPE: AttributeType = AttributeType::CrateLevel; - // FIXME: crate name is allowed on all targets and ignored, - // even though it should only be valid on crates of course + // because it's a crate-level attribute, we already warn about it. + // Putting target limitations here would give duplicate warnings const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(ALL_TARGETS); fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option<AttributeKind> { @@ -34,3 +68,135 @@ impl<S: Stage> SingleAttributeParser<S> for CrateNameParser { }) } } + +pub(crate) struct RecursionLimitParser; + +impl<S: Stage> SingleAttributeParser<S> for RecursionLimitParser { + const PATH: &[Symbol] = &[sym::recursion_limit]; + const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost; + const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError; + const TEMPLATE: AttributeTemplate = template!(NameValueStr: "N", "https://doc.rust-lang.org/reference/attributes/limits.html#the-recursion_limit-attribute"); + const TYPE: AttributeType = AttributeType::CrateLevel; + + // because it's a crate-level attribute, we already warn about it. + // Putting target limitations here would give duplicate warnings + const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(ALL_TARGETS); + + fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option<AttributeKind> { + let ArgParser::NameValue(nv) = args else { + cx.expected_name_value(cx.attr_span, None); + return None; + }; + + Some(AttributeKind::RecursionLimit { + limit: cx.parse_limit_int(nv)?, + attr_span: cx.attr_span, + limit_span: nv.value_span, + }) + } +} + +pub(crate) struct MoveSizeLimitParser; + +impl<S: Stage> SingleAttributeParser<S> for MoveSizeLimitParser { + const PATH: &[Symbol] = &[sym::move_size_limit]; + const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost; + const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error; + const TEMPLATE: AttributeTemplate = template!(NameValueStr: "N"); + const TYPE: AttributeType = AttributeType::CrateLevel; + + // because it's a crate-level attribute, we already warn about it. + // Putting target limitations here would give duplicate warnings + const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(ALL_TARGETS); + + fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option<AttributeKind> { + let ArgParser::NameValue(nv) = args else { + cx.expected_name_value(cx.attr_span, None); + return None; + }; + + Some(AttributeKind::MoveSizeLimit { + limit: cx.parse_limit_int(nv)?, + attr_span: cx.attr_span, + limit_span: nv.value_span, + }) + } +} + +pub(crate) struct TypeLengthLimitParser; + +impl<S: Stage> SingleAttributeParser<S> for TypeLengthLimitParser { + const PATH: &[Symbol] = &[sym::type_length_limit]; + const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost; + const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError; + const TEMPLATE: AttributeTemplate = template!(NameValueStr: "N"); + const TYPE: AttributeType = AttributeType::CrateLevel; + + // because it's a crate-level attribute, we already warn about it. + // Putting target limitations here would give duplicate warnings + const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(ALL_TARGETS); + + fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option<AttributeKind> { + let ArgParser::NameValue(nv) = args else { + cx.expected_name_value(cx.attr_span, None); + return None; + }; + + Some(AttributeKind::TypeLengthLimit { + limit: cx.parse_limit_int(nv)?, + attr_span: cx.attr_span, + limit_span: nv.value_span, + }) + } +} + +pub(crate) struct PatternComplexityLimitParser; + +impl<S: Stage> SingleAttributeParser<S> for PatternComplexityLimitParser { + const PATH: &[Symbol] = &[sym::pattern_complexity_limit]; + const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost; + const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error; + const TEMPLATE: AttributeTemplate = template!(NameValueStr: "N"); + const TYPE: AttributeType = AttributeType::CrateLevel; + + // because it's a crate-level attribute, we already warn about it. + // Putting target limitations here would give duplicate warnings + const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(ALL_TARGETS); + + fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option<AttributeKind> { + let ArgParser::NameValue(nv) = args else { + cx.expected_name_value(cx.attr_span, None); + return None; + }; + + Some(AttributeKind::PatternComplexityLimit { + limit: cx.parse_limit_int(nv)?, + attr_span: cx.attr_span, + limit_span: nv.value_span, + }) + } +} + +pub(crate) struct NoCoreParser; + +impl<S: Stage> NoArgsAttributeParser<S> for NoCoreParser { + const PATH: &[Symbol] = &[sym::no_core]; + const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn; + // because it's a crate-level attribute, we already warn about it. + // Putting target limitations here would give duplicate warnings + const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(ALL_TARGETS); + const CREATE: fn(Span) -> AttributeKind = AttributeKind::NoCore; + const TYPE: AttributeType = AttributeType::CrateLevel; +} + +pub(crate) struct NoStdParser; + +impl<S: Stage> NoArgsAttributeParser<S> for NoStdParser { + const PATH: &[Symbol] = &[sym::no_std]; + const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn; + // because it's a crate-level attribute, we already warn about it. + // Putting target limitations here would give duplicate warnings + const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(ALL_TARGETS); + const CREATE: fn(Span) -> AttributeKind = AttributeKind::NoStd; + const TYPE: AttributeType = AttributeType::CrateLevel; +} diff --git a/compiler/rustc_attr_parsing/src/attributes/prelude.rs b/compiler/rustc_attr_parsing/src/attributes/prelude.rs index 6aef7e7a67b..8f040ffb9d4 100644 --- a/compiler/rustc_attr_parsing/src/attributes/prelude.rs +++ b/compiler/rustc_attr_parsing/src/attributes/prelude.rs @@ -1,8 +1,7 @@ -// templates -#[doc(hidden)] -pub(super) use rustc_feature::{AttributeTemplate, template}; // data structures #[doc(hidden)] +pub(super) use rustc_feature::{AttributeTemplate, AttributeType, template}; +#[doc(hidden)] pub(super) use rustc_hir::attrs::AttributeKind; #[doc(hidden)] pub(super) use rustc_hir::lints::AttributeLintKind; diff --git a/compiler/rustc_attr_parsing/src/attributes/repr.rs b/compiler/rustc_attr_parsing/src/attributes/repr.rs index 23aabd15597..0330e2515c7 100644 --- a/compiler/rustc_attr_parsing/src/attributes/repr.rs +++ b/compiler/rustc_attr_parsing/src/attributes/repr.rs @@ -331,3 +331,30 @@ impl<S: Stage> AttributeParser<S> for AlignParser { Some(AttributeKind::Align { align, span }) } } + +#[derive(Default)] +pub(crate) struct AlignStaticParser(AlignParser); + +impl AlignStaticParser { + const PATH: &'static [Symbol] = &[sym::rustc_align_static]; + const TEMPLATE: AttributeTemplate = AlignParser::TEMPLATE; + + fn parse<'c, S: Stage>( + &mut self, + cx: &'c mut AcceptContext<'_, '_, S>, + args: &'c ArgParser<'_>, + ) { + self.0.parse(cx, args) + } +} + +impl<S: Stage> AttributeParser<S> for AlignStaticParser { + const ATTRIBUTES: AcceptMapping<Self, S> = &[(Self::PATH, Self::TEMPLATE, Self::parse)]; + const ALLOWED_TARGETS: AllowedTargets = + AllowedTargets::AllowList(&[Allow(Target::Static), Allow(Target::ForeignStatic)]); + + fn finalize(self, _cx: &FinalizeContext<'_, '_, S>) -> Option<AttributeKind> { + let (align, span) = self.0.0?; + Some(AttributeKind::Align { align, span }) + } +} diff --git a/compiler/rustc_attr_parsing/src/context.rs b/compiler/rustc_attr_parsing/src/context.rs index 7f5b810f244..b3ab1d3edd6 100644 --- a/compiler/rustc_attr_parsing/src/context.rs +++ b/compiler/rustc_attr_parsing/src/context.rs @@ -24,7 +24,10 @@ use crate::attributes::codegen_attrs::{ UsedParser, }; use crate::attributes::confusables::ConfusablesParser; -use crate::attributes::crate_level::CrateNameParser; +use crate::attributes::crate_level::{ + CrateNameParser, MoveSizeLimitParser, NoCoreParser, NoStdParser, PatternComplexityLimitParser, + RecursionLimitParser, TypeLengthLimitParser, +}; use crate::attributes::deprecation::DeprecationParser; use crate::attributes::dummy::DummyParser; use crate::attributes::inline::{InlineParser, RustcForceInlineParser}; @@ -47,7 +50,7 @@ use crate::attributes::proc_macro_attrs::{ ProcMacroAttributeParser, ProcMacroDeriveParser, ProcMacroParser, RustcBuiltinMacroParser, }; use crate::attributes::prototype::CustomMirParser; -use crate::attributes::repr::{AlignParser, ReprParser}; +use crate::attributes::repr::{AlignParser, AlignStaticParser, ReprParser}; use crate::attributes::rustc_internal::{ RustcLayoutScalarValidRangeEnd, RustcLayoutScalarValidRangeStart, RustcObjectLifetimeDefaultParser, @@ -149,6 +152,7 @@ attribute_parsers!( pub(crate) static ATTRIBUTE_PARSERS = [ // tidy-alphabetical-start AlignParser, + AlignStaticParser, BodyStabilityParser, ConfusablesParser, ConstStabilityParser, @@ -181,10 +185,13 @@ attribute_parsers!( Single<LinkOrdinalParser>, Single<LinkSectionParser>, Single<LinkageParser>, + Single<MoveSizeLimitParser>, Single<MustUseParser>, Single<OptimizeParser>, Single<PathAttributeParser>, + Single<PatternComplexityLimitParser>, Single<ProcMacroDeriveParser>, + Single<RecursionLimitParser>, Single<RustcBuiltinMacroParser>, Single<RustcForceInlineParser>, Single<RustcLayoutScalarValidRangeEnd>, @@ -194,6 +201,7 @@ attribute_parsers!( Single<ShouldPanicParser>, Single<SkipDuringMethodDispatchParser>, Single<TransparencyParser>, + Single<TypeLengthLimitParser>, Single<WithoutArgs<AllowIncoherentImplParser>>, Single<WithoutArgs<AllowInternalUnsafeParser>>, Single<WithoutArgs<AsPtrParser>>, @@ -215,8 +223,10 @@ attribute_parsers!( Single<WithoutArgs<MacroEscapeParser>>, Single<WithoutArgs<MarkerParser>>, Single<WithoutArgs<MayDangleParser>>, + Single<WithoutArgs<NoCoreParser>>, Single<WithoutArgs<NoImplicitPreludeParser>>, Single<WithoutArgs<NoMangleParser>>, + Single<WithoutArgs<NoStdParser>>, Single<WithoutArgs<NonExhaustiveParser>>, Single<WithoutArgs<ParenSugarParser>>, Single<WithoutArgs<PassByValueParser>>, @@ -346,7 +356,10 @@ impl<'f, 'sess: 'f, S: Stage> SharedContext<'f, 'sess, S> { /// must be delayed until after HIR is built. This method will take care of the details of /// that. pub(crate) fn emit_lint(&mut self, lint: AttributeLintKind, span: Span) { - if matches!(self.stage.should_emit(), ShouldEmit::Nothing) { + if !matches!( + self.stage.should_emit(), + ShouldEmit::ErrorsAndLints | ShouldEmit::EarlyFatal { also_emit_lints: true } + ) { return; } let id = self.target_id; @@ -670,20 +683,20 @@ pub enum ShouldEmit { /// /// Only relevant when early parsing, in late parsing equivalent to `ErrorsAndLints`. /// Late parsing is never fatal, and instead tries to emit as many diagnostics as possible. - EarlyFatal, + EarlyFatal { also_emit_lints: bool }, /// The operation will emit errors and lints. /// This is usually what you need. ErrorsAndLints, /// The operation will emit *not* errors and lints. - /// Use this if you are *sure* that this operation will be called at a different time with `ShouldEmit::Emit`. + /// Use this if you are *sure* that this operation will be called at a different time with `ShouldEmit::ErrorsAndLints`. Nothing, } impl ShouldEmit { pub(crate) fn emit_err(&self, diag: Diag<'_>) -> ErrorGuaranteed { match self { - ShouldEmit::EarlyFatal if diag.level() == Level::DelayedBug => diag.emit(), - ShouldEmit::EarlyFatal => diag.upgrade_to_fatal().emit(), + ShouldEmit::EarlyFatal { .. } if diag.level() == Level::DelayedBug => diag.emit(), + ShouldEmit::EarlyFatal { .. } => diag.upgrade_to_fatal().emit(), ShouldEmit::ErrorsAndLints => diag.emit(), ShouldEmit::Nothing => diag.delay_as_bug(), } diff --git a/compiler/rustc_attr_parsing/src/session_diagnostics.rs b/compiler/rustc_attr_parsing/src/session_diagnostics.rs index a9dee23bf6a..32ea9005a97 100644 --- a/compiler/rustc_attr_parsing/src/session_diagnostics.rs +++ b/compiler/rustc_attr_parsing/src/session_diagnostics.rs @@ -930,3 +930,13 @@ pub(crate) struct ImportNameTypeRaw { #[primary_span] pub span: Span, } + +#[derive(Diagnostic)] +#[diag(attr_parsing_limit_invalid)] +pub(crate) struct LimitInvalid<'a> { + #[primary_span] + pub span: Span, + #[label] + pub value_span: Span, + pub error_str: &'a str, +} diff --git a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs index ea264c8064a..6d69040c711 100644 --- a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs @@ -716,7 +716,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { return (false, false, None); }; - let implemented_trait_item = self.infcx.tcx.associated_item(my_def).trait_item_def_id; + let implemented_trait_item = self.infcx.tcx.trait_item_of(my_def); ( true, diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs index 9990f4cb3f2..f57456949bb 100644 --- a/compiler/rustc_borrowck/src/region_infer/mod.rs +++ b/compiler/rustc_borrowck/src/region_infer/mod.rs @@ -615,7 +615,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { } // Type-test failed. Report the error. - let erased_generic_kind = infcx.tcx.erase_regions(type_test.generic_kind); + let erased_generic_kind = infcx.tcx.erase_and_anonymize_regions(type_test.generic_kind); // Skip duplicate-ish errors. if deduplicate_errors.insert(( diff --git a/compiler/rustc_borrowck/src/region_infer/opaque_types/mod.rs b/compiler/rustc_borrowck/src/region_infer/opaque_types/mod.rs index 72615cb33b3..0af636aa734 100644 --- a/compiler/rustc_borrowck/src/region_infer/opaque_types/mod.rs +++ b/compiler/rustc_borrowck/src/region_infer/opaque_types/mod.rs @@ -341,7 +341,7 @@ fn compute_concrete_types_from_defining_uses<'tcx>( // // FIXME(-Znext-solver): This isn't necessary after all. We can remove this check again. if let Some((prev_decl_key, prev_span)) = decls_modulo_regions.insert( - rcx.infcx.tcx.erase_regions(opaque_type_key), + rcx.infcx.tcx.erase_and_anonymize_regions(opaque_type_key), (opaque_type_key, hidden_type.span), ) && let Some((arg1, arg2)) = std::iter::zip( prev_decl_key.iter_captured_args(infcx.tcx).map(|(_, arg)| arg), @@ -503,7 +503,16 @@ pub(crate) fn apply_computed_concrete_opaque_types<'tcx>( let mut errors = Vec::new(); for &(key, hidden_type) in opaque_types { let Some(expected) = get_concrete_opaque_type(concrete_opaque_types, key.def_id) else { - assert!(tcx.use_typing_mode_borrowck(), "non-defining use in defining scope"); + if !tcx.use_typing_mode_borrowck() { + if let ty::Alias(ty::Opaque, alias_ty) = hidden_type.ty.kind() + && alias_ty.def_id == key.def_id.to_def_id() + && alias_ty.args == key.args + { + continue; + } else { + unreachable!("non-defining use in defining scope"); + } + } errors.push(DeferredOpaqueTypeError::NonDefiningUseInDefiningScope { span: hidden_type.span, opaque_type_key: key, diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs index 02be78f90b0..8c5447fe1e9 100644 --- a/compiler/rustc_borrowck/src/type_check/mod.rs +++ b/compiler/rustc_borrowck/src/type_check/mod.rs @@ -1892,7 +1892,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { if is_diverging { // The signature in this call can reference region variables, // so erase them before calling a query. - let output_ty = self.tcx().erase_regions(sig.output()); + let output_ty = self.tcx().erase_and_anonymize_regions(sig.output()); if !output_ty .is_privately_uninhabited(self.tcx(), self.infcx.typing_env(self.infcx.param_env)) { @@ -1986,7 +1986,9 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { let op_arg_ty = self.normalize(op_arg_ty, term_location); let category = if call_source.from_hir_call() { - ConstraintCategory::CallArgument(Some(self.infcx.tcx.erase_regions(func_ty))) + ConstraintCategory::CallArgument(Some( + self.infcx.tcx.erase_and_anonymize_regions(func_ty), + )) } else { ConstraintCategory::Boring }; @@ -2120,7 +2122,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { // Erase the regions from `ty` to get a global type. The // `Sized` bound in no way depends on precise regions, so this // shouldn't affect `is_sized`. - let erased_ty = tcx.erase_regions(ty); + let erased_ty = tcx.erase_and_anonymize_regions(ty); // FIXME(#132279): Using `Ty::is_sized` causes us to incorrectly handle opaques here. if !erased_ty.is_sized(tcx, self.infcx.typing_env(self.infcx.param_env)) { // in current MIR construction, all non-control-flow rvalue diff --git a/compiler/rustc_builtin_macros/src/assert.rs b/compiler/rustc_builtin_macros/src/assert.rs index 013258a1b4e..855da5caa31 100644 --- a/compiler/rustc_builtin_macros/src/assert.rs +++ b/compiler/rustc_builtin_macros/src/assert.rs @@ -1,8 +1,8 @@ mod context; -use rustc_ast::token::{self, Delimiter}; +use rustc_ast::token::Delimiter; use rustc_ast::tokenstream::{DelimSpan, TokenStream}; -use rustc_ast::{DelimArgs, Expr, ExprKind, MacCall, Path, PathSegment}; +use rustc_ast::{DelimArgs, Expr, ExprKind, MacCall, Path, PathSegment, UnOp, token}; use rustc_ast_pretty::pprust; use rustc_errors::PResult; use rustc_expand::base::{DummyResult, ExpandResult, ExtCtxt, MacEager, MacroExpanderResult}; @@ -29,7 +29,7 @@ pub(crate) fn expand_assert<'cx>( // `core::panic` and `std::panic` are different macros, so we use call-site // context to pick up whichever is currently in scope. - let call_site_span = cx.with_call_site_ctxt(cond_expr.span); + let call_site_span = cx.with_call_site_ctxt(span); let panic_path = || { if use_panic_2021(span) { @@ -63,7 +63,7 @@ pub(crate) fn expand_assert<'cx>( }), })), ); - assert_cond_check(cx, call_site_span, cond_expr, then) + expr_if_not(cx, call_site_span, cond_expr, then, None) } // If `generic_assert` is enabled, generates rich captured outputs // @@ -88,33 +88,26 @@ pub(crate) fn expand_assert<'cx>( )), )], ); - assert_cond_check(cx, call_site_span, cond_expr, then) + expr_if_not(cx, call_site_span, cond_expr, then, None) }; ExpandResult::Ready(MacEager::expr(expr)) } -/// `assert!($cond_expr, $custom_message)` struct Assert { cond_expr: Box<Expr>, custom_message: Option<TokenStream>, } -/// `match <cond> { true => {} _ => <then> }` -fn assert_cond_check(cx: &ExtCtxt<'_>, span: Span, cond: Box<Expr>, then: Box<Expr>) -> Box<Expr> { - // Instead of expanding to `if !<cond> { <then> }`, we expand to - // `match <cond> { true => {} _ => <then> }`. - // This allows us to always complain about mismatched types instead of "cannot apply unary - // operator `!` to type `X`" when passing an invalid `<cond>`, while also allowing `<cond>` to - // be `&true`. - let els = cx.expr_block(cx.block(span, thin_vec![])); - let mut arms = thin_vec![]; - arms.push(cx.arm(span, cx.pat_lit(span, cx.expr_bool(span, true)), els)); - arms.push(cx.arm(span, cx.pat_wild(span), then)); - - // We wrap the `match` in a statement to limit the length of any borrows introduced in the - // condition. - cx.expr_block(cx.block(span, [cx.stmt_expr(cx.expr_match(span, cond, arms))].into())) +// if !{ ... } { ... } else { ... } +fn expr_if_not( + cx: &ExtCtxt<'_>, + span: Span, + cond: Box<Expr>, + then: Box<Expr>, + els: Option<Box<Expr>>, +) -> Box<Expr> { + cx.expr_if(span, cx.expr(span, ExprKind::Unary(UnOp::Not, cond)), then, els) } fn parse_assert<'a>(cx: &ExtCtxt<'a>, sp: Span, stream: TokenStream) -> PResult<'a, Assert> { diff --git a/compiler/rustc_builtin_macros/src/format.rs b/compiler/rustc_builtin_macros/src/format.rs index 6415e55e0b0..d70888205a5 100644 --- a/compiler/rustc_builtin_macros/src/format.rs +++ b/compiler/rustc_builtin_macros/src/format.rs @@ -565,9 +565,11 @@ fn make_format_args( &used, &args, &pieces, + &invalid_refs, detect_foreign_fmt, str_style, fmt_str, + uncooked_fmt_str.1.as_str(), fmt_span, ); } @@ -645,9 +647,11 @@ fn report_missing_placeholders( used: &[bool], args: &FormatArguments, pieces: &[parse::Piece<'_>], + invalid_refs: &[(usize, Option<Span>, PositionUsedAs, FormatArgPositionKind)], detect_foreign_fmt: bool, str_style: Option<usize>, fmt_str: &str, + uncooked_fmt_str: &str, fmt_span: Span, ) { let mut diag = if let &[(span, named)] = &unused[..] { @@ -762,6 +766,35 @@ fn report_missing_placeholders( diag.span_label(fmt_span, "formatting specifier missing"); } + if !found_foreign && invalid_refs.is_empty() { + // Show example if user didn't use any format specifiers + let show_example = used.iter().all(|used| !used); + + if !show_example { + if unused.len() > 1 { + diag.note(format!("consider adding {} format specifiers", unused.len())); + } + } else { + let msg = if unused.len() == 1 { + "a format specifier".to_string() + } else { + format!("{} format specifiers", unused.len()) + }; + + let sugg = match str_style { + None => format!("\"{}{}\"", uncooked_fmt_str, "{}".repeat(unused.len())), + Some(n_hashes) => format!( + "r{hashes}\"{uncooked_fmt_str}{fmt_specifiers}\"{hashes}", + hashes = "#".repeat(n_hashes), + fmt_specifiers = "{}".repeat(unused.len()) + ), + }; + let msg = format!("format specifiers use curly braces, consider adding {msg}"); + + diag.span_suggestion_verbose(fmt_span, msg, sugg, Applicability::MaybeIncorrect); + } + } + diag.emit(); } diff --git a/compiler/rustc_builtin_macros/src/source_util.rs b/compiler/rustc_builtin_macros/src/source_util.rs index 37bab5be542..11b868f81a9 100644 --- a/compiler/rustc_builtin_macros/src/source_util.rs +++ b/compiler/rustc_builtin_macros/src/source_util.rs @@ -1,3 +1,5 @@ +//! The implementation of built-in macros which relate to the file system. + use std::path::{Path, PathBuf}; use std::rc::Rc; use std::sync::Arc; @@ -11,9 +13,11 @@ use rustc_expand::base::{ }; use rustc_expand::module::DirOwnership; use rustc_lint_defs::BuiltinLintDiag; -use rustc_parse::parser::{ForceCollect, Parser}; +use rustc_parse::lexer::StripTokens; +use rustc_parse::parser::ForceCollect; use rustc_parse::{new_parser_from_file, unwrap_or_emit_fatal, utf8_error}; use rustc_session::lint::builtin::INCOMPLETE_INCLUDE; +use rustc_session::parse::ParseSess; use rustc_span::source_map::SourceMap; use rustc_span::{ByteSymbol, Pos, Span, Symbol}; use smallvec::SmallVec; @@ -23,11 +27,7 @@ use crate::util::{ check_zero_tts, get_single_str_from_tts, get_single_str_spanned_from_tts, parse_expr, }; -// These macros all relate to the file system; they either return -// the column/row/filename of the expression, or they include -// a given file into the current one. - -/// line!(): expands to the current line number +/// Expand `line!()` to the current line number. pub(crate) fn expand_line( cx: &mut ExtCtxt<'_>, sp: Span, @@ -42,7 +42,7 @@ pub(crate) fn expand_line( ExpandResult::Ready(MacEager::expr(cx.expr_u32(topmost, loc.line as u32))) } -/* column!(): expands to the current column number */ +/// Expand `column!()` to the current column number. pub(crate) fn expand_column( cx: &mut ExtCtxt<'_>, sp: Span, @@ -57,9 +57,7 @@ pub(crate) fn expand_column( ExpandResult::Ready(MacEager::expr(cx.expr_u32(topmost, loc.col.to_usize() as u32 + 1))) } -/// file!(): expands to the current filename */ -/// The source_file (`loc.file`) contains a bunch more information we could spit -/// out if we wanted. +/// Expand `file!()` to the current filename. pub(crate) fn expand_file( cx: &mut ExtCtxt<'_>, sp: Span, @@ -81,6 +79,7 @@ pub(crate) fn expand_file( ))) } +/// Expand `stringify!($input)`. pub(crate) fn expand_stringify( cx: &mut ExtCtxt<'_>, sp: Span, @@ -91,6 +90,7 @@ pub(crate) fn expand_stringify( ExpandResult::Ready(MacEager::expr(cx.expr_str(sp, Symbol::intern(&s)))) } +/// Expand `module_path!()` to (a textual representation of) the current module path. pub(crate) fn expand_mod( cx: &mut ExtCtxt<'_>, sp: Span, @@ -104,9 +104,9 @@ pub(crate) fn expand_mod( ExpandResult::Ready(MacEager::expr(cx.expr_str(sp, Symbol::intern(&string)))) } -/// include! : parse the given file as an expr -/// This is generally a bad idea because it's going to behave -/// unhygienically. +/// Expand `include!($input)`. +/// +/// This works in item and expression position. Notably, it doesn't work in pattern position. pub(crate) fn expand_include<'cx>( cx: &'cx mut ExtCtxt<'_>, sp: Span, @@ -116,39 +116,48 @@ pub(crate) fn expand_include<'cx>( let ExpandResult::Ready(mac) = get_single_str_from_tts(cx, sp, tts, "include!") else { return ExpandResult::Retry(()); }; - let file = match mac { - Ok(file) => file, + let path = match mac { + Ok(path) => path, Err(guar) => return ExpandResult::Ready(DummyResult::any(sp, guar)), }; // The file will be added to the code map by the parser - let file = match resolve_path(&cx.sess, file.as_str(), sp) { - Ok(f) => f, + let path = match resolve_path(&cx.sess, path.as_str(), sp) { + Ok(path) => path, Err(err) => { let guar = err.emit(); return ExpandResult::Ready(DummyResult::any(sp, guar)); } }; - let p = unwrap_or_emit_fatal(new_parser_from_file(cx.psess(), &file, Some(sp))); // If in the included file we have e.g., `mod bar;`, - // then the path of `bar.rs` should be relative to the directory of `file`. + // then the path of `bar.rs` should be relative to the directory of `path`. // See https://github.com/rust-lang/rust/pull/69838/files#r395217057 for a discussion. // `MacroExpander::fully_expand_fragment` later restores, so "stack discipline" is maintained. - let dir_path = file.parent().unwrap_or(&file).to_owned(); + let dir_path = path.parent().unwrap_or(&path).to_owned(); cx.current_expansion.module = Rc::new(cx.current_expansion.module.with_dir_path(dir_path)); cx.current_expansion.dir_ownership = DirOwnership::Owned { relative: None }; struct ExpandInclude<'a> { - p: Parser<'a>, + psess: &'a ParseSess, + path: PathBuf, node_id: ast::NodeId, + span: Span, } impl<'a> MacResult for ExpandInclude<'a> { - fn make_expr(mut self: Box<ExpandInclude<'a>>) -> Option<Box<ast::Expr>> { - let expr = parse_expr(&mut self.p).ok()?; - if self.p.token != token::Eof { - self.p.psess.buffer_lint( + fn make_expr(self: Box<ExpandInclude<'a>>) -> Option<Box<ast::Expr>> { + let mut p = unwrap_or_emit_fatal(new_parser_from_file( + self.psess, + &self.path, + // Don't strip frontmatter for backward compatibility, `---` may be the start of a + // manifold negation. FIXME: Ideally, we wouldn't strip shebangs here either. + StripTokens::Shebang, + Some(self.span), + )); + let expr = parse_expr(&mut p).ok()?; + if p.token != token::Eof { + p.psess.buffer_lint( INCOMPLETE_INCLUDE, - self.p.token.span, + p.token.span, self.node_id, BuiltinLintDiag::IncompleteInclude, ); @@ -156,24 +165,27 @@ pub(crate) fn expand_include<'cx>( Some(expr) } - fn make_items(mut self: Box<ExpandInclude<'a>>) -> Option<SmallVec<[Box<ast::Item>; 1]>> { + fn make_items(self: Box<ExpandInclude<'a>>) -> Option<SmallVec<[Box<ast::Item>; 1]>> { + let mut p = unwrap_or_emit_fatal(new_parser_from_file( + self.psess, + &self.path, + StripTokens::ShebangAndFrontmatter, + Some(self.span), + )); let mut ret = SmallVec::new(); loop { - match self.p.parse_item(ForceCollect::No) { + match p.parse_item(ForceCollect::No) { Err(err) => { err.emit(); break; } Ok(Some(item)) => ret.push(item), Ok(None) => { - if self.p.token != token::Eof { - self.p - .dcx() - .create_err(errors::ExpectedItem { - span: self.p.token.span, - token: &pprust::token_to_string(&self.p.token), - }) - .emit(); + if p.token != token::Eof { + p.dcx().emit_err(errors::ExpectedItem { + span: p.token.span, + token: &pprust::token_to_string(&p.token), + }); } break; @@ -184,10 +196,17 @@ pub(crate) fn expand_include<'cx>( } } - ExpandResult::Ready(Box::new(ExpandInclude { p, node_id: cx.current_expansion.lint_node_id })) + ExpandResult::Ready(Box::new(ExpandInclude { + psess: cx.psess(), + path, + node_id: cx.current_expansion.lint_node_id, + span: sp, + })) } -/// `include_str!`: read the given file, insert it as a literal string expr +/// Expand `include_str!($input)` to the content of the UTF-8-encoded file given by path `$input` as a string literal. +/// +/// This works in expression, pattern and statement position. pub(crate) fn expand_include_str( cx: &mut ExtCtxt<'_>, sp: Span, @@ -206,6 +225,7 @@ pub(crate) fn expand_include_str( Ok((bytes, bsp)) => match std::str::from_utf8(&bytes) { Ok(src) => { let interned_src = Symbol::intern(src); + // MacEager converts the expr into a pat if need be. MacEager::expr(cx.expr_str(cx.with_def_site_ctxt(bsp), interned_src)) } Err(utf8err) => { @@ -218,6 +238,9 @@ pub(crate) fn expand_include_str( }) } +/// Expand `include_bytes!($input)` to the content of the file given by path `$input`. +/// +/// This works in expression, pattern and statement position. pub(crate) fn expand_include_bytes( cx: &mut ExtCtxt<'_>, sp: Span, @@ -237,6 +260,7 @@ pub(crate) fn expand_include_bytes( // Don't care about getting the span for the raw bytes, // because the console can't really show them anyway. let expr = cx.expr(sp, ast::ExprKind::IncludedBytes(ByteSymbol::intern(&bytes))); + // MacEager converts the expr into a pat if need be. MacEager::expr(expr) } Err(dummy) => dummy, diff --git a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh index 52e02c857c7..62f1cc6a893 100755 --- a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh +++ b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh @@ -121,7 +121,7 @@ rm tests/ui/abi/large-byval-align.rs # exceeds implementation limit of Cranelift # ============================================================ rm -r tests/run-make/remap-path-prefix-dwarf # requires llvm-dwarfdump rm -r tests/run-make/strip # same -rm -r tests/run-make/compiler-builtins # Expects lib/rustlib/src/rust to contains the standard library source +rm -r tests/run-make-cargo/compiler-builtins # Expects lib/rustlib/src/rust to contains the standard library source rm -r tests/run-make/translation # same rm -r tests/run-make/missing-unstable-trait-bound # This disables support for unstable features, but running cg_clif needs some unstable features rm -r tests/run-make/const-trait-stable-toolchain # same @@ -166,5 +166,5 @@ index 073116933bd..c3e4578204d 100644 EOF echo "[TEST] rustc test suite" -./x.py test --stage 0 --test-args=--no-capture tests/{codegen-units,run-make,ui,incremental} +./x.py test --stage 0 --test-args=--no-capture tests/{codegen-units,run-make,run-make-cargo,ui,incremental} popd diff --git a/compiler/rustc_codegen_gcc/build_system/src/test.rs b/compiler/rustc_codegen_gcc/build_system/src/test.rs index 3dd3fce2eec..1823aa71f40 100644 --- a/compiler/rustc_codegen_gcc/build_system/src/test.rs +++ b/compiler/rustc_codegen_gcc/build_system/src/test.rs @@ -1083,11 +1083,12 @@ where fn test_rustc(env: &Env, args: &TestArg) -> Result<(), String> { test_rustc_inner(env, args, |_| Ok(false), false, "run-make")?; + test_rustc_inner(env, args, |_| Ok(false), false, "run-make-cargo")?; test_rustc_inner(env, args, |_| Ok(false), false, "ui") } fn test_failing_rustc(env: &Env, args: &TestArg) -> Result<(), String> { - let result1 = test_rustc_inner( + let run_make_result = test_rustc_inner( env, args, retain_files_callback("tests/failing-run-make-tests.txt", "run-make"), @@ -1095,7 +1096,15 @@ fn test_failing_rustc(env: &Env, args: &TestArg) -> Result<(), String> { "run-make", ); - let result2 = test_rustc_inner( + let run_make_cargo_result = test_rustc_inner( + env, + args, + retain_files_callback("tests/failing-run-make-tests.txt", "run-make-cargo"), + false, + "run-make", + ); + + let ui_result = test_rustc_inner( env, args, retain_files_callback("tests/failing-ui-tests.txt", "ui"), @@ -1103,7 +1112,7 @@ fn test_failing_rustc(env: &Env, args: &TestArg) -> Result<(), String> { "ui", ); - result1.and(result2) + run_make_result.and(run_make_cargo_result).and(ui_result) } fn test_successful_rustc(env: &Env, args: &TestArg) -> Result<(), String> { @@ -1120,6 +1129,13 @@ fn test_successful_rustc(env: &Env, args: &TestArg) -> Result<(), String> { remove_files_callback("tests/failing-run-make-tests.txt", "run-make"), false, "run-make", + )?; + test_rustc_inner( + env, + args, + remove_files_callback("tests/failing-run-make-tests.txt", "run-make-cargo"), + false, + "run-make-cargo", ) } diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs index fcee6b6df62..d29bba2570f 100644 --- a/compiler/rustc_codegen_gcc/src/back/lto.rs +++ b/compiler/rustc_codegen_gcc/src/back/lto.rs @@ -204,7 +204,7 @@ fn fat_lto( let path = tmp_path.path().to_path_buf().join(&module.name); let path = path.to_str().expect("path"); let context = &module.module_llvm.context; - let config = cgcx.config(module.kind); + let config = &cgcx.module_config; // NOTE: we need to set the optimization level here in order for LTO to do its job. context.set_optimization_level(to_gcc_opt_level(config.opt_level)); context.add_command_line_option("-flto=auto"); @@ -305,12 +305,9 @@ pub(crate) fn run_thin( ) } -pub(crate) fn prepare_thin( - module: ModuleCodegen<GccContext>, - _emit_summary: bool, -) -> (String, ThinBuffer) { +pub(crate) fn prepare_thin(module: ModuleCodegen<GccContext>) -> (String, ThinBuffer) { let name = module.name; - //let buffer = ThinBuffer::new(module.module_llvm.context, true, emit_summary); + //let buffer = ThinBuffer::new(module.module_llvm.context, true); let buffer = ThinBuffer::new(&module.module_llvm.context); (name, buffer) } @@ -650,10 +647,6 @@ impl ThinBufferMethods for ThinBuffer { fn data(&self) -> &[u8] { &[] } - - fn thin_link_data(&self) -> &[u8] { - unimplemented!(); - } } pub struct ThinData; //(Arc<TempDir>); diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs index 619277eba8b..7fe8fc122b3 100644 --- a/compiler/rustc_codegen_gcc/src/consts.rs +++ b/compiler/rustc_codegen_gcc/src/consts.rs @@ -81,6 +81,8 @@ impl<'gcc, 'tcx> StaticCodegenMethods for CodegenCx<'gcc, 'tcx> { if global.to_rvalue().get_type() != val_llty { global.to_rvalue().set_type(val_llty); } + + // NOTE: Alignment from attributes has already been applied to the allocation. set_global_alignment(self, global, alloc.align); global.global_set_initializer_rvalue(value); diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index 2d7df79ba95..f76f933cad4 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -408,11 +408,8 @@ impl WriteBackendMethods for GccCodegenBackend { back::write::codegen(cgcx, module, config) } - fn prepare_thin( - module: ModuleCodegen<Self::Module>, - emit_summary: bool, - ) -> (String, Self::ThinBuffer) { - back::lto::prepare_thin(module, emit_summary) + fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) { + back::lto::prepare_thin(module) } fn serialize_module(_module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) { diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index 093f902bc3d..93202483eed 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -240,7 +240,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { // Make sure lifetimes are erased, to avoid generating distinct LLVM // types for Rust types that only differ in the choice of lifetimes. - let normal_ty = cx.tcx.erase_regions(self.ty); + let normal_ty = cx.tcx.erase_and_anonymize_regions(self.ty); let mut defer = None; let ty = if self.ty != normal_ty { diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index 9f2d37d39d8..573c51a9539 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -296,6 +296,19 @@ pub(crate) fn tune_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribu .map(|tune_cpu| llvm::CreateAttrStringValue(cx.llcx, "tune-cpu", tune_cpu)) } +/// Get the `target-features` LLVM attribute. +pub(crate) fn target_features_attr<'ll>( + cx: &CodegenCx<'ll, '_>, + function_features: Vec<String>, +) -> Option<&'ll Attribute> { + let global_features = cx.tcx.global_backend_features(()).iter().map(String::as_str); + let function_features = function_features.iter().map(String::as_str); + let target_features = + global_features.chain(function_features).intersperse(",").collect::<String>(); + (!target_features.is_empty()) + .then(|| llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features)) +} + /// Get the `NonLazyBind` LLVM attribute, /// if the codegen options allow skipping the PLT. pub(crate) fn non_lazy_bind_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { @@ -523,14 +536,7 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>( } } - let global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str()); - let function_features = function_features.iter().map(|s| s.as_str()); - let target_features: String = - global_features.chain(function_features).intersperse(",").collect(); - - if !target_features.is_empty() { - to_add.push(llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features)); - } + to_add.extend(target_features_attr(cx, function_features)); attributes::apply_to_llfn(llfn, Function, &to_add); } diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index fc38c4f3e51..f571716d9dd 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -43,9 +43,7 @@ fn prepare_lto( .map(|symbol| CString::new(symbol.to_owned()).unwrap()) .collect::<Vec<CString>>(); - if cgcx.regular_module_config.instrument_coverage - || cgcx.regular_module_config.pgo_gen.enabled() - { + if cgcx.module_config.instrument_coverage || cgcx.module_config.pgo_gen.enabled() { // These are weak symbols that point to the profile version and the // profile name, which need to be treated as exported so LTO doesn't nix // them. @@ -55,15 +53,15 @@ fn prepare_lto( symbols_below_threshold.extend(PROFILER_WEAK_SYMBOLS.iter().map(|&sym| sym.to_owned())); } - if cgcx.regular_module_config.sanitizer.contains(SanitizerSet::MEMORY) { + if cgcx.module_config.sanitizer.contains(SanitizerSet::MEMORY) { let mut msan_weak_symbols = Vec::new(); // Similar to profiling, preserve weak msan symbol during LTO. - if cgcx.regular_module_config.sanitizer_recover.contains(SanitizerSet::MEMORY) { + if cgcx.module_config.sanitizer_recover.contains(SanitizerSet::MEMORY) { msan_weak_symbols.push(c"__msan_keep_going"); } - if cgcx.regular_module_config.sanitizer_memory_track_origins != 0 { + if cgcx.module_config.sanitizer_memory_track_origins != 0 { msan_weak_symbols.push(c"__msan_track_origins"); } @@ -187,12 +185,9 @@ pub(crate) fn run_thin( thin_lto(cgcx, dcx, modules, upstream_modules, cached_modules, &symbols_below_threshold) } -pub(crate) fn prepare_thin( - module: ModuleCodegen<ModuleLlvm>, - emit_summary: bool, -) -> (String, ThinBuffer) { +pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBuffer) { let name = module.name; - let buffer = ThinBuffer::new(module.module_llvm.llmod(), true, emit_summary); + let buffer = ThinBuffer::new(module.module_llvm.llmod(), true); (name, buffer) } @@ -583,7 +578,7 @@ pub(crate) fn run_pass_manager( thin: bool, ) { let _timer = cgcx.prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name); - let config = cgcx.config(module.kind); + let config = &cgcx.module_config; // Now we have one massive module inside of llmod. Time to run the // LTO-specific optimization passes that LLVM provides. @@ -689,9 +684,9 @@ unsafe impl Send for ThinBuffer {} unsafe impl Sync for ThinBuffer {} impl ThinBuffer { - pub(crate) fn new(m: &llvm::Module, is_thin: bool, emit_summary: bool) -> ThinBuffer { + pub(crate) fn new(m: &llvm::Module, is_thin: bool) -> ThinBuffer { unsafe { - let buffer = llvm::LLVMRustThinLTOBufferCreate(m, is_thin, emit_summary); + let buffer = llvm::LLVMRustThinLTOBufferCreate(m, is_thin); ThinBuffer(buffer) } } @@ -700,21 +695,21 @@ impl ThinBuffer { let mut ptr = NonNull::new(ptr).unwrap(); ThinBuffer(unsafe { ptr.as_mut() }) } -} -impl ThinBufferMethods for ThinBuffer { - fn data(&self) -> &[u8] { + pub(crate) fn thin_link_data(&self) -> &[u8] { unsafe { - let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _; - let len = llvm::LLVMRustThinLTOBufferLen(self.0); + let ptr = llvm::LLVMRustThinLTOBufferThinLinkDataPtr(self.0) as *const _; + let len = llvm::LLVMRustThinLTOBufferThinLinkDataLen(self.0); slice::from_raw_parts(ptr, len) } } +} - fn thin_link_data(&self) -> &[u8] { +impl ThinBufferMethods for ThinBuffer { + fn data(&self) -> &[u8] { unsafe { - let ptr = llvm::LLVMRustThinLTOBufferThinLinkDataPtr(self.0) as *const _; - let len = llvm::LLVMRustThinLTOBufferThinLinkDataLen(self.0); + let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _; + let len = llvm::LLVMRustThinLTOBufferLen(self.0); slice::from_raw_parts(ptr, len) } } @@ -745,7 +740,7 @@ pub(crate) fn optimize_thin_module( let module_llvm = ModuleLlvm::parse(cgcx, module_name, thin_module.data(), dcx); let mut module = ModuleCodegen::new_regular(thin_module.name(), module_llvm); // Given that the newly created module lacks a thinlto buffer for embedding, we need to re-add it here. - if cgcx.config(ModuleKind::Regular).embed_bitcode() { + if cgcx.module_config.embed_bitcode() { module.thin_lto_buffer = Some(thin_module.data().to_vec()); } { diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 7ea2ae6673b..423f0da4878 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -837,7 +837,7 @@ pub(crate) fn codegen( "LLVM_module_codegen_make_bitcode", &*module.name, ); - ThinBuffer::new(llmod, config.emit_thin_lto, false) + ThinBuffer::new(llmod, config.emit_thin_lto) }; let data = thin.data(); let _timer = cgcx diff --git a/compiler/rustc_codegen_llvm/src/builder/gpu_offload.rs b/compiler/rustc_codegen_llvm/src/builder/gpu_offload.rs index 1280ab1442a..0737a18384b 100644 --- a/compiler/rustc_codegen_llvm/src/builder/gpu_offload.rs +++ b/compiler/rustc_codegen_llvm/src/builder/gpu_offload.rs @@ -193,7 +193,7 @@ fn gen_define_handling<'ll>( // reference) types. let num_ptr_types = types .iter() - .map(|&x| matches!(cx.type_kind(x), rustc_codegen_ssa::common::TypeKind::Pointer)) + .filter(|&x| matches!(cx.type_kind(x), rustc_codegen_ssa::common::TypeKind::Pointer)) .count(); // We do not know their size anymore at this level, so hardcode a placeholder. diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 9ec7b0f80ae..dc9bb743560 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -452,6 +452,8 @@ impl<'ll> CodegenCx<'ll, '_> { self.statics_to_rauw.borrow_mut().push((g, new_g)); new_g }; + + // NOTE: Alignment from attributes has already been applied to the allocation. set_global_alignment(self, g, alloc.align); llvm::set_initializer(g, v); diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 257c7b95666..a69fa54a54a 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -853,7 +853,7 @@ impl<'ll, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> { let entry_name = self.sess().target.entry_name.as_ref(); if self.get_declared_value(entry_name).is_none() { - Some(self.declare_entry_fn( + let llfn = self.declare_entry_fn( entry_name, llvm::CallConv::from_conv( self.sess().target.entry_abi, @@ -861,7 +861,13 @@ impl<'ll, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { ), llvm::UnnamedAddr::Global, fn_type, - )) + ); + attributes::apply_to_llfn( + llfn, + llvm::AttributePlace::Function, + attributes::target_features_attr(self, vec![]).as_slice(), + ); + Some(llfn) } else { // If the symbol already exists, it is an error: for example, the user wrote // #[no_mangle] extern "C" fn main(..) {..} diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index caa3369f413..dc3a84b6a15 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -103,16 +103,17 @@ fn build_fixed_size_array_di_node<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, unique_type_id: UniqueTypeId<'tcx>, array_type: Ty<'tcx>, + span: Span, ) -> DINodeCreationResult<'ll> { let ty::Array(element_type, len) = array_type.kind() else { bug!("build_fixed_size_array_di_node() called with non-ty::Array type `{:?}`", array_type) }; - let element_type_di_node = type_di_node(cx, *element_type); + let element_type_di_node = spanned_type_di_node(cx, *element_type, span); return_if_di_node_created_in_meantime!(cx, unique_type_id); - let (size, align) = cx.size_and_align_of(array_type); + let (size, align) = cx.spanned_size_and_align_of(array_type, span); let upper_bound = len .try_to_target_usize(cx.tcx) @@ -447,7 +448,7 @@ pub(crate) fn spanned_type_di_node<'ll, 'tcx>( build_basic_type_di_node(cx, t) } ty::Tuple(elements) if elements.is_empty() => build_basic_type_di_node(cx, t), - ty::Array(..) => build_fixed_size_array_di_node(cx, unique_type_id, t), + ty::Array(..) => build_fixed_size_array_di_node(cx, unique_type_id, t, span), ty::Slice(_) | ty::Str => build_slice_type_di_node(cx, t, unique_type_id), ty::Dynamic(..) => build_dyn_type_di_node(cx, t, unique_type_id), ty::Foreign(..) => build_foreign_type_di_node(cx, t, unique_type_id), @@ -1435,7 +1436,7 @@ fn build_vtable_type_di_node<'ll, 'tcx>( let vtable_entries = if let Some(poly_trait_ref) = poly_trait_ref { let trait_ref = poly_trait_ref.with_self_ty(tcx, ty); - let trait_ref = tcx.erase_regions(trait_ref); + let trait_ref = tcx.erase_and_anonymize_regions(trait_ref); tcx.vtable_entries(trait_ref) } else { @@ -1562,7 +1563,7 @@ pub(crate) fn apply_vcall_visibility_metadata<'ll, 'tcx>( // Unwrap potential addrspacecast let vtable = find_vtable_behind_cast(vtable); let trait_ref_self = trait_ref.with_self_ty(cx.tcx, ty); - let trait_ref_self = cx.tcx.erase_regions(trait_ref_self); + let trait_ref_self = cx.tcx.erase_and_anonymize_regions(trait_ref_self); let trait_def_id = trait_ref_self.def_id; let trait_vis = cx.tcx.visibility(trait_def_id); diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 49d3dedbeab..85f71f331a4 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -383,7 +383,9 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { | sym::rotate_left | sym::rotate_right | sym::saturating_add - | sym::saturating_sub => { + | sym::saturating_sub + | sym::unchecked_funnel_shl + | sym::unchecked_funnel_shr => { let ty = args[0].layout.ty; if !ty.is_integral() { tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType { @@ -424,18 +426,26 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { sym::bitreverse => { self.call_intrinsic("llvm.bitreverse", &[llty], &[args[0].immediate()]) } - sym::rotate_left | sym::rotate_right => { - let is_left = name == sym::rotate_left; - let val = args[0].immediate(); - let raw_shift = args[1].immediate(); - // rotate = funnel shift with first two args the same + sym::rotate_left + | sym::rotate_right + | sym::unchecked_funnel_shl + | sym::unchecked_funnel_shr => { + let is_left = name == sym::rotate_left || name == sym::unchecked_funnel_shl; + let lhs = args[0].immediate(); + let (rhs, raw_shift) = + if name == sym::rotate_left || name == sym::rotate_right { + // rotate = funnel shift with first two args the same + (lhs, args[1].immediate()) + } else { + (args[1].immediate(), args[2].immediate()) + }; let llvm_name = format!("llvm.fsh{}", if is_left { 'l' } else { 'r' }); // llvm expects shift to be the same type as the values, but rust // always uses `u32`. - let raw_shift = self.intcast(raw_shift, self.val_ty(val), false); + let raw_shift = self.intcast(raw_shift, self.val_ty(lhs), false); - self.call_intrinsic(llvm_name, &[llty], &[val, val, raw_shift]) + self.call_intrinsic(llvm_name, &[llty], &[lhs, rhs, raw_shift]) } sym::saturating_add | sym::saturating_sub => { let is_add = name == sym::saturating_add; diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index 628cb34fd9e..6fb23d09843 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -211,11 +211,8 @@ impl WriteBackendMethods for LlvmCodegenBackend { ) -> CompiledModule { back::write::codegen(cgcx, module, config) } - fn prepare_thin( - module: ModuleCodegen<Self::Module>, - emit_summary: bool, - ) -> (String, Self::ThinBuffer) { - back::lto::prepare_thin(module, emit_summary) + fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) { + back::lto::prepare_thin(module) } fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) { (module.name, back::lto::ModuleBuffer::new(module.module_llvm.llmod())) diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index b66fc157b3c..0679f55ab7f 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -2602,7 +2602,6 @@ unsafe extern "C" { pub(crate) fn LLVMRustThinLTOBufferCreate( M: &Module, is_thin: bool, - emit_summary: bool, ) -> &'static mut ThinLTOBuffer; pub(crate) fn LLVMRustThinLTOBufferFree(M: &'static mut ThinLTOBuffer); pub(crate) fn LLVMRustThinLTOBufferPtr(M: &ThinLTOBuffer) -> *const c_char; diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 4e7096da502..84998b5499b 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -7,6 +7,7 @@ use rustc_middle::bug; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; use rustc_middle::ty::{self, CoroutineArgsExt, Ty, TypeVisitableExt}; +use rustc_span::{DUMMY_SP, Span}; use tracing::debug; use crate::common::*; @@ -149,7 +150,11 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx> { } pub(crate) fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) { - let layout = self.layout_of(ty); + self.spanned_size_and_align_of(ty, DUMMY_SP) + } + + pub(crate) fn spanned_size_and_align_of(&self, ty: Ty<'tcx>, span: Span) -> (Size, Align) { + let layout = self.spanned_layout_of(ty, span); (layout.size, layout.align.abi) } } @@ -226,7 +231,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { // Make sure lifetimes are erased, to avoid generating distinct LLVM // types for Rust types that only differ in the choice of lifetimes. - let normal_ty = cx.tcx.erase_regions(self.ty); + let normal_ty = cx.tcx.erase_and_anonymize_regions(self.ty); let mut defer = None; let llty = if self.ty != normal_ty { diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index 7eb5d302058..ab08125217f 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -908,6 +908,21 @@ pub(super) fn emit_va_arg<'ll, 'tcx>( ) } "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty), + "arm" => { + // Types wider than 16 bytes are not currently supported. Clang has special logic for + // such types, but `VaArgSafe` is not implemented for any type that is this large. + assert!(bx.cx.size_of(target_ty).bytes() <= 16); + + emit_ptr_va_arg( + bx, + addr, + target_ty, + PassMode::Direct, + SlotSize::Bytes4, + AllowHigherAlign::Yes, + ForceRightAdjust::No, + ) + } "s390x" => emit_s390x_va_arg(bx, addr, target_ty), "powerpc" => emit_powerpc_va_arg(bx, addr, target_ty), "powerpc64" | "powerpc64le" => emit_ptr_va_arg( diff --git a/compiler/rustc_codegen_ssa/Cargo.toml b/compiler/rustc_codegen_ssa/Cargo.toml index 2dfbc581643..5462163f4ff 100644 --- a/compiler/rustc_codegen_ssa/Cargo.toml +++ b/compiler/rustc_codegen_ssa/Cargo.toml @@ -9,7 +9,7 @@ ar_archive_writer = "0.5" bitflags = "2.4.1" bstr = "1.11.3" # `cc` updates often break things, so we pin it here. Cargo enforces "max 1 semver-compat version -# per crate", so if you change this, you need to also change it in `rustc_llvm`. +# per crate", so if you change this, you need to also change it in `rustc_llvm` and `rustc_windows_rc`. cc = "=1.2.16" itertools = "0.12" pathdiff = "0.2.0" diff --git a/compiler/rustc_codegen_ssa/src/back/command.rs b/compiler/rustc_codegen_ssa/src/back/command.rs index 05351bd6ca3..7420f18aacb 100644 --- a/compiler/rustc_codegen_ssa/src/back/command.rs +++ b/compiler/rustc_codegen_ssa/src/back/command.rs @@ -109,7 +109,7 @@ impl Command { } Program::Lld(ref p, flavor) => { let mut c = process::Command::new(p); - c.arg("-flavor").arg(flavor.as_str()); + c.arg("-flavor").arg(flavor.desc()); c } }; diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index 8586615f7c7..cbaf67d7345 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -333,8 +333,8 @@ pub struct CodegenContext<B: WriteBackendMethods> { pub crate_types: Vec<CrateType>, pub output_filenames: Arc<OutputFilenames>, pub invocation_temp: Option<String>, - pub regular_module_config: Arc<ModuleConfig>, - pub allocator_module_config: Arc<ModuleConfig>, + pub module_config: Arc<ModuleConfig>, + pub allocator_config: Arc<ModuleConfig>, pub tm_factory: TargetMachineFactoryFn<B>, pub msvc_imps_needed: bool, pub is_pe_coff: bool, @@ -372,13 +372,6 @@ impl<B: WriteBackendMethods> CodegenContext<B> { pub fn create_dcx(&self) -> DiagCtxt { DiagCtxt::new(Box::new(self.diag_emitter.clone())) } - - pub fn config(&self, kind: ModuleKind) -> &ModuleConfig { - match kind { - ModuleKind::Regular => &self.regular_module_config, - ModuleKind::Allocator => &self.allocator_module_config, - } - } } fn generate_thin_lto_work<B: ExtraBackendMethods>( @@ -442,6 +435,7 @@ pub(crate) fn start_async_codegen<B: ExtraBackendMethods>( backend: B, tcx: TyCtxt<'_>, target_cpu: String, + allocator_module: Option<ModuleCodegen<B::Module>>, ) -> OngoingCodegen<B> { let (coordinator_send, coordinator_receive) = channel(); @@ -465,6 +459,7 @@ pub(crate) fn start_async_codegen<B: ExtraBackendMethods>( coordinator_receive, Arc::new(regular_config), Arc::new(allocator_config), + allocator_module, coordinator_send.clone(), ); @@ -720,15 +715,6 @@ pub(crate) enum WorkItem<B: WriteBackendMethods> { } impl<B: WriteBackendMethods> WorkItem<B> { - fn module_kind(&self) -> ModuleKind { - match *self { - WorkItem::Optimize(ref m) => m.kind, - WorkItem::CopyPostLtoArtifacts(_) | WorkItem::FatLto { .. } | WorkItem::ThinLto(_) => { - ModuleKind::Regular - } - } - } - /// Generate a short description of this work item suitable for use as a thread name. fn short_description(&self) -> String { // `pthread_setname()` on *nix ignores anything beyond the first 15 @@ -817,7 +803,7 @@ pub(crate) fn compute_per_cgu_lto_type( let linker_does_lto = opts.cg.linker_plugin_lto.enabled(); // When we're automatically doing ThinLTO for multi-codegen-unit - // builds we don't actually want to LTO the allocator modules if + // builds we don't actually want to LTO the allocator module if // it shows up. This is due to various linker shenanigans that // we'll encounter later. let is_allocator = module_kind == ModuleKind::Allocator; @@ -843,11 +829,17 @@ pub(crate) fn compute_per_cgu_lto_type( fn execute_optimize_work_item<B: ExtraBackendMethods>( cgcx: &CodegenContext<B>, mut module: ModuleCodegen<B::Module>, - module_config: &ModuleConfig, ) -> WorkItemResult<B> { + let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*module.name); + let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); + let module_config = match module.kind { + ModuleKind::Regular => &cgcx.module_config, + ModuleKind::Allocator => &cgcx.allocator_config, + }; + B::optimize(cgcx, dcx, &mut module, module_config); // After we've done the initial round of optimizations we need to @@ -859,7 +851,7 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>( // If we're doing some form of incremental LTO then we need to be sure to // save our module to disk first. - let bitcode = if cgcx.config(module.kind).emit_pre_lto_bc { + let bitcode = if module_config.emit_pre_lto_bc { let filename = pre_lto_bitcode_filename(&module.name); cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename)) } else { @@ -872,7 +864,7 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>( WorkItemResult::Finished(module) } ComputedLtoType::Thin => { - let (name, thin_buffer) = B::prepare_thin(module, false); + let (name, thin_buffer) = B::prepare_thin(module); if let Some(path) = bitcode { fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| { panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e); @@ -899,8 +891,11 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>( fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>( cgcx: &CodegenContext<B>, module: CachedModuleCodegen, - module_config: &ModuleConfig, ) -> WorkItemResult<B> { + let _timer = cgcx + .prof + .generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*module.name); + let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap(); let mut links_from_incr_cache = Vec::new(); @@ -959,6 +954,7 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>( } }; + let module_config = &cgcx.module_config; let should_emit_obj = module_config.emit_obj != EmitObj::None; let assembly = load_from_incr_cache(module_config.emit_asm, OutputType::Assembly); let llvm_ir = load_from_incr_cache(module_config.emit_ir, OutputType::LlvmAssembly); @@ -970,8 +966,8 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>( WorkItemResult::Finished(CompiledModule { links_from_incr_cache, - name: module.name, kind: ModuleKind::Regular, + name: module.name, object, dwarf_object, bytecode, @@ -986,8 +982,9 @@ fn execute_fat_lto_work_item<B: ExtraBackendMethods>( each_linked_rlib_for_lto: &[PathBuf], mut needs_fat_lto: Vec<FatLtoInput<B>>, import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>, - module_config: &ModuleConfig, ) -> WorkItemResult<B> { + let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", "everything"); + for (module, wp) in import_only_modules { needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module }) } @@ -998,17 +995,18 @@ fn execute_fat_lto_work_item<B: ExtraBackendMethods>( each_linked_rlib_for_lto, needs_fat_lto, ); - let module = B::codegen(cgcx, module, module_config); + let module = B::codegen(cgcx, module, &cgcx.module_config); WorkItemResult::Finished(module) } fn execute_thin_lto_work_item<B: ExtraBackendMethods>( cgcx: &CodegenContext<B>, module: lto::ThinModule<B>, - module_config: &ModuleConfig, ) -> WorkItemResult<B> { + let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", module.name()); + let module = B::optimize_thin(cgcx, module); - let module = B::codegen(cgcx, module, module_config); + let module = B::codegen(cgcx, module, &cgcx.module_config); WorkItemResult::Finished(module) } @@ -1093,6 +1091,7 @@ fn start_executing_work<B: ExtraBackendMethods>( coordinator_receive: Receiver<Message<B>>, regular_config: Arc<ModuleConfig>, allocator_config: Arc<ModuleConfig>, + allocator_module: Option<ModuleCodegen<B::Module>>, tx_to_llvm_workers: Sender<Message<B>>, ) -> thread::JoinHandle<Result<CompiledModules, ()>> { let coordinator_send = tx_to_llvm_workers; @@ -1157,8 +1156,8 @@ fn start_executing_work<B: ExtraBackendMethods>( expanded_args: tcx.sess.expanded_args.clone(), diag_emitter: shared_emitter.clone(), output_filenames: Arc::clone(tcx.output_filenames(())), - regular_module_config: regular_config, - allocator_module_config: allocator_config, + module_config: regular_config, + allocator_config, tm_factory: backend.target_machine_factory(tcx.sess, ol, backend_features), msvc_imps_needed: msvc_imps_needed(tcx), is_pe_coff: tcx.sess.target.is_like_windows, @@ -1312,7 +1311,6 @@ fn start_executing_work<B: ExtraBackendMethods>( // This is where we collect codegen units that have gone all the way // through codegen and LLVM. let mut compiled_modules = vec![]; - let mut compiled_allocator_module = None; let mut needs_fat_lto = Vec::new(); let mut needs_thin_lto = Vec::new(); let mut lto_import_only_modules = Vec::new(); @@ -1353,6 +1351,17 @@ fn start_executing_work<B: ExtraBackendMethods>( let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None; + let compiled_allocator_module = allocator_module.and_then(|allocator_module| { + match execute_optimize_work_item(&cgcx, allocator_module) { + WorkItemResult::Finished(compiled_module) => return Some(compiled_module), + WorkItemResult::NeedsFatLto(fat_lto_input) => needs_fat_lto.push(fat_lto_input), + WorkItemResult::NeedsThinLto(name, thin_buffer) => { + needs_thin_lto.push((name, thin_buffer)) + } + } + None + }); + // Run the message loop while there's still anything that needs message // processing. Note that as soon as codegen is aborted we simply want to // wait for all existing work to finish, so many of the conditions here @@ -1586,15 +1595,7 @@ fn start_executing_work<B: ExtraBackendMethods>( match result { Ok(WorkItemResult::Finished(compiled_module)) => { - match compiled_module.kind { - ModuleKind::Regular => { - compiled_modules.push(compiled_module); - } - ModuleKind::Allocator => { - assert!(compiled_allocator_module.is_none()); - compiled_allocator_module = Some(compiled_module); - } - } + compiled_modules.push(compiled_module); } Ok(WorkItemResult::NeedsFatLto(fat_lto_input)) => { assert!(!started_lto); @@ -1722,46 +1723,22 @@ fn spawn_work<'a, B: ExtraBackendMethods>( let cgcx = cgcx.clone(); B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || { - let result = std::panic::catch_unwind(AssertUnwindSafe(|| { - let module_config = cgcx.config(work.module_kind()); - - match work { - WorkItem::Optimize(m) => { - let _timer = - cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*m.name); - execute_optimize_work_item(&cgcx, m, module_config) - } - WorkItem::CopyPostLtoArtifacts(m) => { - let _timer = cgcx.prof.generic_activity_with_arg( - "codegen_copy_artifacts_from_incr_cache", - &*m.name, - ); - execute_copy_from_cache_work_item(&cgcx, m, module_config) - } - WorkItem::FatLto { - exported_symbols_for_lto, - each_linked_rlib_for_lto, - needs_fat_lto, - import_only_modules, - } => { - let _timer = cgcx - .prof - .generic_activity_with_arg("codegen_module_perform_lto", "everything"); - execute_fat_lto_work_item( - &cgcx, - &exported_symbols_for_lto, - &each_linked_rlib_for_lto, - needs_fat_lto, - import_only_modules, - module_config, - ) - } - WorkItem::ThinLto(m) => { - let _timer = - cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", m.name()); - execute_thin_lto_work_item(&cgcx, m, module_config) - } - } + let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work { + WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, m), + WorkItem::CopyPostLtoArtifacts(m) => execute_copy_from_cache_work_item(&cgcx, m), + WorkItem::FatLto { + exported_symbols_for_lto, + each_linked_rlib_for_lto, + needs_fat_lto, + import_only_modules, + } => execute_fat_lto_work_item( + &cgcx, + &exported_symbols_for_lto, + &each_linked_rlib_for_lto, + needs_fat_lto, + import_only_modules, + ), + WorkItem::ThinLto(m) => execute_thin_lto_work_item(&cgcx, m), })); let msg = match result { diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 97cdf8b6973..45b028aa8ef 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -664,7 +664,7 @@ pub fn codegen_crate<B: ExtraBackendMethods>( ) -> OngoingCodegen<B> { // Skip crate items and just output metadata in -Z no-codegen mode. if tcx.sess.opts.unstable_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() { - let ongoing_codegen = start_async_codegen(backend, tcx, target_cpu); + let ongoing_codegen = start_async_codegen(backend, tcx, target_cpu, None); ongoing_codegen.codegen_finished(tcx); @@ -695,7 +695,27 @@ pub fn codegen_crate<B: ExtraBackendMethods>( } } - let ongoing_codegen = start_async_codegen(backend.clone(), tcx, target_cpu); + // Codegen an allocator shim, if necessary. + let allocator_module = if let Some(kind) = allocator_kind_for_codegen(tcx) { + let llmod_id = + cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string(); + + tcx.sess.time("write_allocator_module", || { + let module = backend.codegen_allocator( + tcx, + &llmod_id, + kind, + // If allocator_kind is Some then alloc_error_handler_kind must + // also be Some. + tcx.alloc_error_handler_kind(()).unwrap(), + ); + Some(ModuleCodegen::new_allocator(llmod_id, module)) + }) + } else { + None + }; + + let ongoing_codegen = start_async_codegen(backend.clone(), tcx, target_cpu, allocator_module); // For better throughput during parallel processing by LLVM, we used to sort // CGUs largest to smallest. This would lead to better thread utilization @@ -812,35 +832,6 @@ pub fn codegen_crate<B: ExtraBackendMethods>( } } - // Codegen an allocator shim, if necessary. - // Do this last to ensure the LLVM_passes timer doesn't start while no CGUs have been codegened - // yet for the backend to optimize. - if let Some(kind) = allocator_kind_for_codegen(tcx) { - let llmod_id = - cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string(); - let module_llvm = tcx.sess.time("write_allocator_module", || { - backend.codegen_allocator( - tcx, - &llmod_id, - kind, - // If allocator_kind is Some then alloc_error_handler_kind must - // also be Some. - tcx.alloc_error_handler_kind(()).unwrap(), - ) - }); - - ongoing_codegen.wait_for_signal_to_codegen_item(); - ongoing_codegen.check_for_errors(tcx.sess); - - // These modules are generally cheap and won't throw off scheduling. - let cost = 0; - submit_codegened_module_to_llvm( - &ongoing_codegen.coordinator, - ModuleCodegen::new_allocator(llmod_id, module_llvm), - cost, - ); - } - ongoing_codegen.codegen_finished(tcx); // Since the main thread is sometimes blocked during codegen, we keep track diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs index 008340e614d..dc500c363f4 100644 --- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs +++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs @@ -562,15 +562,6 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { codegen_fn_attrs } -/// If the provided DefId is a method in a trait impl, return the DefId of the method prototype. -fn opt_trait_item(tcx: TyCtxt<'_>, def_id: DefId) -> Option<DefId> { - let impl_item = tcx.opt_associated_item(def_id)?; - match impl_item.container { - ty::AssocItemContainer::Impl => impl_item.trait_item_def_id, - _ => None, - } -} - fn disabled_sanitizers_for(tcx: TyCtxt<'_>, did: LocalDefId) -> SanitizerSet { // Backtrack to the crate root. let mut disabled = match tcx.opt_local_parent(did) { @@ -600,14 +591,15 @@ fn disabled_sanitizers_for(tcx: TyCtxt<'_>, did: LocalDefId) -> SanitizerSet { /// Checks if the provided DefId is a method in a trait impl for a trait which has track_caller /// applied to the method prototype. fn should_inherit_track_caller(tcx: TyCtxt<'_>, def_id: DefId) -> bool { - let Some(trait_item) = opt_trait_item(tcx, def_id) else { return false }; - tcx.codegen_fn_attrs(trait_item).flags.intersects(CodegenFnAttrFlags::TRACK_CALLER) + tcx.trait_item_of(def_id).is_some_and(|id| { + tcx.codegen_fn_attrs(id).flags.intersects(CodegenFnAttrFlags::TRACK_CALLER) + }) } /// If the provided DefId is a method in a trait impl, return the value of the `#[align]` /// attribute on the method prototype (if any). fn inherited_align<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Option<Align> { - tcx.codegen_fn_attrs(opt_trait_item(tcx, def_id)?).alignment + tcx.codegen_fn_attrs(tcx.trait_item_of(def_id)?).alignment } /// We now check the #\[rustc_autodiff\] attributes which we generated from the #[autodiff(...)] diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs index fe0500a5d4c..baba8f9ca3e 100644 --- a/compiler/rustc_codegen_ssa/src/lib.rs +++ b/compiler/rustc_codegen_ssa/src/lib.rs @@ -119,7 +119,7 @@ impl<M> ModuleCodegen<M> { }); CompiledModule { - name: self.name.clone(), + name: self.name, kind: self.kind, object, dwarf_object, diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 5f6976f5d00..6492ef73956 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -520,7 +520,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { LocalRef::Place(va_list) => { bx.va_end(va_list.val.llval); - // Explicitly end the lifetime of the `va_list`, this matters for LLVM. + // Explicitly end the lifetime of the `va_list`, improves LLVM codegen. bx.lifetime_end(va_list.val.llval, va_list.layout.size); } _ => bug!("C-variadic function must have a `VaList` place"), diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 06873313e2e..6b109e8b8e2 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -438,6 +438,10 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() { let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty)); + + // Explicitly start the lifetime of the `va_list`, improves LLVM codegen. + bx.lifetime_start(va_list.val.llval, va_list.layout.size); + bx.va_start(va_list.val.llval); return LocalRef::Place(va_list); diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 8a67b8d6e5f..f6f2e3f2a3a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -1,3 +1,4 @@ +use itertools::Itertools as _; use rustc_abi::{self as abi, FIRST_VARIANT}; use rustc_middle::ty::adjustment::PointerCoercion; use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout}; @@ -111,14 +112,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let size = bx.const_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all same byte arrays - if let Some(int) = bx.cx().const_to_opt_u128(v, false) { - let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()]; - let first = bytes[0]; - if bytes[1..].iter().all(|&b| b == first) { - let fill = bx.cx().const_u8(first); - bx.memset(start, fill, size, dest.val.align, MemFlags::empty()); - return true; - } + if let Some(int) = bx.cx().const_to_opt_u128(v, false) + && let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()] + && let Ok(&byte) = bytes.iter().all_equal_value() + { + let fill = bx.cx().const_u8(byte); + bx.memset(start, fill, size, dest.val.align, MemFlags::empty()); + return true; } // Use llvm.memset.p0i8.* to initialize byte arrays @@ -130,13 +130,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { false }; - match cg_elem.val { - OperandValue::Immediate(v) => { - if try_init_all_same(bx, v) { - return; - } - } - _ => (), + if let OperandValue::Immediate(v) = cg_elem.val + && try_init_all_same(bx, v) + { + return; } let count = self diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs index cc7c4e46d7b..1ac1d7ef2e2 100644 --- a/compiler/rustc_codegen_ssa/src/traits/write.rs +++ b/compiler/rustc_codegen_ssa/src/traits/write.rs @@ -50,16 +50,12 @@ pub trait WriteBackendMethods: Clone + 'static { module: ModuleCodegen<Self::Module>, config: &ModuleConfig, ) -> CompiledModule; - fn prepare_thin( - module: ModuleCodegen<Self::Module>, - want_summary: bool, - ) -> (String, Self::ThinBuffer); + fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer); fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer); } pub trait ThinBufferMethods: Send + Sync { fn data(&self) -> &[u8]; - fn thin_link_data(&self) -> &[u8]; } pub trait ModuleBufferMethods: Send + Sync { diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index 9681d89ce35..91ed71ac3e5 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -4,6 +4,7 @@ use either::{Left, Right}; use rustc_abi::{Align, HasDataLayout, Size, TargetDataLayout}; use rustc_errors::DiagCtxtHandle; use rustc_hir::def_id::DefId; +use rustc_hir::limit::Limit; use rustc_middle::mir::interpret::{ErrorHandled, InvalidMetaKind, ReportedErrorInfo}; use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::layout::{ @@ -12,7 +13,6 @@ use rustc_middle::ty::layout::{ }; use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt, TypeFoldable, TypingEnv, Variance}; use rustc_middle::{mir, span_bug}; -use rustc_session::Limit; use rustc_span::Span; use rustc_target::callconv::FnAbi; use tracing::{debug, trace}; diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 2c1e5087e1c..ebcdb9461d0 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -953,6 +953,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // # Global allocations if let Some(global_alloc) = self.tcx.try_get_global_alloc(id) { + // NOTE: `static` alignment from attributes has already been applied to the allocation. let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env); let mutbl = global_alloc.mutability(*self.tcx, self.typing_env); let kind = match global_alloc { @@ -1501,8 +1502,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // `get_bytes_mut` will clear the provenance, which is correct, // since we don't want to keep any provenance at the target. // This will also error if copying partial provenance is not supported. - let provenance = - src_alloc.provenance().prepare_copy(src_range, dest_offset, num_copies, self); + let provenance = src_alloc + .provenance() + .prepare_copy(src_range, dest_offset, num_copies, self) + .map_err(|e| e.to_interp_error(src_alloc_id))?; // Prepare a copy of the initialization mask. let init = src_alloc.init_mask().prepare_copy(src_range); diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs index e4b5c82853a..870f9a396ae 100644 --- a/compiler/rustc_const_eval/src/interpret/traits.rs +++ b/compiler/rustc_const_eval/src/interpret/traits.rs @@ -23,7 +23,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> { trace!("get_vtable(ty={ty:?}, dyn_ty={dyn_ty:?})"); - let (ty, dyn_ty) = self.tcx.erase_regions((ty, dyn_ty)); + let (ty, dyn_ty) = self.tcx.erase_and_anonymize_regions((ty, dyn_ty)); // All vtables must be monomorphic, bail out otherwise. ensure_monomorphic_enough(*self.tcx, ty)?; @@ -53,8 +53,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { ) -> &'tcx [VtblEntry<'tcx>] { if let Some(trait_) = trait_ { let trait_ref = trait_.with_self_ty(*self.tcx, dyn_ty); - let trait_ref = - self.tcx.erase_regions(self.tcx.instantiate_bound_regions_with_erased(trait_ref)); + let trait_ref = self.tcx.erase_and_anonymize_regions( + self.tcx.instantiate_bound_regions_with_erased(trait_ref), + ); self.tcx.vtable_entries(trait_ref) } else { TyCtxt::COMMON_VTABLE_ENTRIES diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs index 72bee345406..f667823723c 100644 --- a/compiler/rustc_const_eval/src/interpret/util.rs +++ b/compiler/rustc_const_eval/src/interpret/util.rs @@ -1,6 +1,6 @@ use rustc_hir::def_id::LocalDefId; use rustc_middle::mir; -use rustc_middle::mir::interpret::{AllocInit, Allocation, InterpResult, Pointer}; +use rustc_middle::mir::interpret::{AllocInit, Allocation, GlobalAlloc, InterpResult, Pointer}; use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::{TyCtxt, TypeVisitable, TypeVisitableExt}; use tracing::debug; @@ -38,7 +38,14 @@ pub(crate) fn create_static_alloc<'tcx>( static_def_id: LocalDefId, layout: TyAndLayout<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx>> { - let alloc = Allocation::try_new(layout.size, layout.align.abi, AllocInit::Uninit, ())?; + // Inherit size and align from the `GlobalAlloc::Static` so we can avoid duplicating + // the alignment attribute logic. + let (size, align) = + GlobalAlloc::Static(static_def_id.into()).size_and_align(*ecx.tcx, ecx.typing_env); + assert_eq!(size, layout.size); + assert!(align >= layout.align.abi); + + let alloc = Allocation::try_new(size, align, AllocInit::Uninit, ())?; let alloc_id = ecx.tcx.reserve_and_set_static_alloc(static_def_id.into()); assert_eq!(ecx.machine.static_root_ids, None); ecx.machine.static_root_ids = Some((alloc_id, static_def_id)); diff --git a/compiler/rustc_const_eval/src/util/type_name.rs b/compiler/rustc_const_eval/src/util/type_name.rs index 2ae6655901b..5bcf96abd8c 100644 --- a/compiler/rustc_const_eval/src/util/type_name.rs +++ b/compiler/rustc_const_eval/src/util/type_name.rs @@ -168,10 +168,11 @@ impl<'tcx> PrettyPrinter<'tcx> for TypeNamePrinter<'tcx> { // Bound regions are always printed (as `'_`), which gives some idea that they are special, // even though the `for` is omitted by the pretty printer. // E.g. `for<'a, 'b> fn(&'a u32, &'b u32)` is printed as "fn(&'_ u32, &'_ u32)". + let kind = region.kind(); match region.kind() { - ty::ReErased | ty::ReEarlyParam(_) => false, + ty::ReErased | ty::ReEarlyParam(_) | ty::ReStatic => false, ty::ReBound(..) => true, - _ => unreachable!(), + _ => panic!("type_name unhandled region: {kind:?}"), } } diff --git a/compiler/rustc_driver/Cargo.toml b/compiler/rustc_driver/Cargo.toml index e3ee8351295..51909719827 100644 --- a/compiler/rustc_driver/Cargo.toml +++ b/compiler/rustc_driver/Cargo.toml @@ -10,3 +10,8 @@ crate-type = ["dylib"] # tidy-alphabetical-start rustc_driver_impl = { path = "../rustc_driver_impl" } # tidy-alphabetical-end + +[build-dependencies] +# tidy-alphabetical-start +rustc_windows_rc = { path = "../rustc_windows_rc" } +# tidy-alphabetical-end diff --git a/compiler/rustc_driver/build.rs b/compiler/rustc_driver/build.rs new file mode 100644 index 00000000000..ba44fe7a86e --- /dev/null +++ b/compiler/rustc_driver/build.rs @@ -0,0 +1,21 @@ +use std::{env, path}; + +use rustc_windows_rc::{VersionInfoFileType, compile_windows_resource_file}; + +fn main() { + let target_os = env::var("CARGO_CFG_TARGET_OS"); + let target_env = env::var("CARGO_CFG_TARGET_ENV"); + if Ok("windows") == target_os.as_deref() && Ok("msvc") == target_env.as_deref() { + set_windows_dll_options(); + } else { + // Avoid rerunning the build script every time. + println!("cargo:rerun-if-changed=build.rs"); + } +} + +fn set_windows_dll_options() { + let stem = path::PathBuf::from("rustc_driver_resource"); + let file_description = "rustc_driver"; + let res_file = compile_windows_resource_file(&stem, file_description, VersionInfoFileType::Dll); + println!("cargo:rustc-link-arg={}", res_file.display()); +} diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs index f3ed6042105..4f875cf99ec 100644 --- a/compiler/rustc_driver_impl/src/lib.rs +++ b/compiler/rustc_driver_impl/src/lib.rs @@ -51,6 +51,7 @@ use rustc_lint::unerased_lint_store; use rustc_metadata::creader::MetadataLoader; use rustc_metadata::locator; use rustc_middle::ty::TyCtxt; +use rustc_parse::lexer::StripTokens; use rustc_parse::{new_parser_from_file, new_parser_from_source_str, unwrap_or_emit_fatal}; use rustc_session::config::{ CG_OPTIONS, CrateType, ErrorOutputType, Input, OptionDesc, OutFileName, OutputType, Sysroot, @@ -667,6 +668,10 @@ fn print_crate_info( TargetSpecJson => { println_info!("{}", serde_json::to_string_pretty(&sess.target.to_json()).unwrap()); } + TargetSpecJsonSchema => { + let schema = rustc_target::spec::json_schema(); + println_info!("{}", serde_json::to_string_pretty(&schema).unwrap()); + } AllTargetSpecsJson => { let mut targets = BTreeMap::new(); for name in rustc_target::spec::TARGETS { @@ -1288,10 +1293,15 @@ fn warn_on_confusing_output_filename_flag( fn parse_crate_attrs<'a>(sess: &'a Session) -> PResult<'a, ast::AttrVec> { let mut parser = unwrap_or_emit_fatal(match &sess.io.input { - Input::File(file) => new_parser_from_file(&sess.psess, file, None), - Input::Str { name, input } => { - new_parser_from_source_str(&sess.psess, name.clone(), input.clone()) + Input::File(file) => { + new_parser_from_file(&sess.psess, file, StripTokens::ShebangAndFrontmatter, None) } + Input::Str { name, input } => new_parser_from_source_str( + &sess.psess, + name.clone(), + input.clone(), + StripTokens::ShebangAndFrontmatter, + ), }); parser.parse_inner_attributes() } diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs index 96a4ed3218f..ae23ef1e255 100644 --- a/compiler/rustc_errors/src/diagnostic.rs +++ b/compiler/rustc_errors/src/diagnostic.rs @@ -945,11 +945,6 @@ impl<'a, G: EmissionGuarantee> Diag<'a, G> { None, "Span must not be empty and have no suggestion", ); - debug_assert_eq!( - parts.array_windows().find(|[a, b]| a.span.overlaps(b.span)), - None, - "suggestion must not have overlapping parts", - ); self.push_suggestion(CodeSuggestion { substitutions: vec![Substitution { parts }], diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs index b94370e8e9b..93b1e6b7615 100644 --- a/compiler/rustc_errors/src/emitter.rs +++ b/compiler/rustc_errors/src/emitter.rs @@ -2354,7 +2354,6 @@ impl HumanEmitter { .sum(); let underline_start = (span_start_pos + start) as isize + offset; let underline_end = (span_start_pos + start + sub_len) as isize + offset; - assert!(underline_start >= 0 && underline_end >= 0); let padding: usize = max_line_num_len + 3; for p in underline_start..underline_end { if let DisplaySuggestion::Underline = show_code_change diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index a56e0f3fae1..8869799ce90 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -381,6 +381,17 @@ impl CodeSuggestion { // Assumption: all spans are in the same file, and all spans // are disjoint. Sort in ascending order. substitution.parts.sort_by_key(|part| part.span.lo()); + // Verify the assumption that all spans are disjoint + assert_eq!( + substitution.parts.array_windows().find(|[a, b]| a.span.overlaps(b.span)), + None, + "all spans must be disjoint", + ); + + // Account for cases where we are suggesting the same code that's already + // there. This shouldn't happen often, but in some cases for multipart + // suggestions it's much easier to handle it here than in the origin. + substitution.parts.retain(|p| is_different(sm, &p.snippet, p.span)); // Find the bounding span. let lo = substitution.parts.iter().map(|part| part.span.lo()).min()?; @@ -470,16 +481,12 @@ impl CodeSuggestion { _ => 1, }) .sum(); - if !is_different(sm, &part.snippet, part.span) { - // Account for cases where we are suggesting the same code that's already - // there. This shouldn't happen often, but in some cases for multipart - // suggestions it's much easier to handle it here than in the origin. - } else { - line_highlight.push(SubstitutionHighlight { - start: (cur_lo.col.0 as isize + acc) as usize, - end: (cur_lo.col.0 as isize + acc + len) as usize, - }); - } + + line_highlight.push(SubstitutionHighlight { + start: (cur_lo.col.0 as isize + acc) as usize, + end: (cur_lo.col.0 as isize + acc + len) as usize, + }); + buf.push_str(&part.snippet); let cur_hi = sm.lookup_char_pos(part.span.hi()); // Account for the difference between the width of the current code and the diff --git a/compiler/rustc_expand/src/base.rs b/compiler/rustc_expand/src/base.rs index 8ff21509f4a..88f88f30a8c 100644 --- a/compiler/rustc_expand/src/base.rs +++ b/compiler/rustc_expand/src/base.rs @@ -18,13 +18,14 @@ use rustc_feature::Features; use rustc_hir as hir; use rustc_hir::attrs::{AttributeKind, CfgEntry, Deprecation}; use rustc_hir::def::MacroKinds; +use rustc_hir::limit::Limit; use rustc_hir::{Stability, find_attr}; use rustc_lint_defs::RegisteredTools; use rustc_parse::MACRO_ARGUMENTS; use rustc_parse::parser::{ForceCollect, Parser}; +use rustc_session::Session; use rustc_session::config::CollapseMacroDebuginfo; use rustc_session::parse::ParseSess; -use rustc_session::{Limit, Session}; use rustc_span::def_id::{CrateNum, DefId, LocalDefId}; use rustc_span::edition::Edition; use rustc_span::hygiene::{AstPass, ExpnData, ExpnKind, LocalExpnId, MacroKind}; diff --git a/compiler/rustc_expand/src/errors.rs b/compiler/rustc_expand/src/errors.rs index ba9d76970f0..6eb5cd65846 100644 --- a/compiler/rustc_expand/src/errors.rs +++ b/compiler/rustc_expand/src/errors.rs @@ -2,8 +2,8 @@ use std::borrow::Cow; use rustc_ast::ast; use rustc_errors::codes::*; +use rustc_hir::limit::Limit; use rustc_macros::{Diagnostic, Subdiagnostic}; -use rustc_session::Limit; use rustc_span::{Ident, MacroRulesNormalizedIdent, Span, Symbol}; #[derive(Diagnostic)] diff --git a/compiler/rustc_expand/src/expand.rs b/compiler/rustc_expand/src/expand.rs index dbb4a9de9e0..38e057d2776 100644 --- a/compiler/rustc_expand/src/expand.rs +++ b/compiler/rustc_expand/src/expand.rs @@ -19,14 +19,15 @@ use rustc_errors::PResult; use rustc_feature::Features; use rustc_hir::Target; use rustc_hir::def::MacroKinds; +use rustc_hir::limit::Limit; use rustc_parse::parser::{ AttemptLocalParseRecovery, CommaRecoveryMode, ForceCollect, Parser, RecoverColon, RecoverComma, token_descr, }; +use rustc_session::Session; use rustc_session::lint::BuiltinLintDiag; use rustc_session::lint::builtin::{UNUSED_ATTRIBUTES, UNUSED_DOC_COMMENTS}; use rustc_session::parse::feature_err; -use rustc_session::{Limit, Session}; use rustc_span::hygiene::SyntaxContext; use rustc_span::{ErrorGuaranteed, FileName, Ident, LocalExpnId, Span, Symbol, sym}; use smallvec::SmallVec; @@ -2529,6 +2530,7 @@ impl ExpansionConfig<'_> { ExpansionConfig { crate_name, features, + // FIXME should this limit be configurable? recursion_limit: Limit::new(1024), trace_mac: false, should_test: false, diff --git a/compiler/rustc_expand/src/mbe/transcribe.rs b/compiler/rustc_expand/src/mbe/transcribe.rs index e4e4866b64c..ed8aa71d59d 100644 --- a/compiler/rustc_expand/src/mbe/transcribe.rs +++ b/compiler/rustc_expand/src/mbe/transcribe.rs @@ -940,11 +940,27 @@ fn extract_symbol_from_pnr<'a>( { Ok(*symbol) } + ParseNtResult::Literal(expr) + if let ExprKind::Lit(lit @ Lit { kind: LitKind::Integer, symbol, suffix }) = + &expr.kind => + { + if lit.is_semantic_float() { + Err(dcx + .struct_err("floats are not supported as metavariables of `${concat(..)}`") + .with_span(span_err)) + } else if suffix.is_none() { + Ok(*symbol) + } else { + Err(dcx + .struct_err("integer metavariables of `${concat(..)}` must not be suffixed") + .with_span(span_err)) + } + } _ => Err(dcx .struct_err( "metavariables of `${concat(..)}` must be of type `ident`, `literal` or `tt`", ) - .with_note("currently only string literals are supported") + .with_note("currently only string and integer literals are supported") .with_span(span_err)), } } diff --git a/compiler/rustc_expand/src/module.rs b/compiler/rustc_expand/src/module.rs index 19f3cdbc549..79ab3cab22c 100644 --- a/compiler/rustc_expand/src/module.rs +++ b/compiler/rustc_expand/src/module.rs @@ -4,6 +4,7 @@ use std::path::{self, Path, PathBuf}; use rustc_ast::{AttrVec, Attribute, Inline, Item, ModSpans}; use rustc_attr_parsing::validate_attr; use rustc_errors::{Diag, ErrorGuaranteed}; +use rustc_parse::lexer::StripTokens; use rustc_parse::{exp, new_parser_from_file, unwrap_or_emit_fatal}; use rustc_session::Session; use rustc_session::parse::ParseSess; @@ -67,8 +68,12 @@ pub(crate) fn parse_external_mod( } // Actually parse the external file as a module. - let mut parser = - unwrap_or_emit_fatal(new_parser_from_file(&sess.psess, &mp.file_path, Some(span))); + let mut parser = unwrap_or_emit_fatal(new_parser_from_file( + &sess.psess, + &mp.file_path, + StripTokens::ShebangAndFrontmatter, + Some(span), + )); let (inner_attrs, items, inner_span) = parser.parse_mod(exp!(Eof)).map_err(|err| ModError::ParserError(err))?; attrs.extend(inner_attrs); diff --git a/compiler/rustc_expand/src/proc_macro_server.rs b/compiler/rustc_expand/src/proc_macro_server.rs index 5b1d3d6d35b..295573f4492 100644 --- a/compiler/rustc_expand/src/proc_macro_server.rs +++ b/compiler/rustc_expand/src/proc_macro_server.rs @@ -8,7 +8,7 @@ use rustc_ast::util::literal::escape_byte_str_symbol; use rustc_ast_pretty::pprust; use rustc_data_structures::fx::FxHashMap; use rustc_errors::{Diag, ErrorGuaranteed, MultiSpan, PResult}; -use rustc_parse::lexer::nfc_normalize; +use rustc_parse::lexer::{StripTokens, nfc_normalize}; use rustc_parse::parser::Parser; use rustc_parse::{exp, new_parser_from_source_str, source_str_to_stream, unwrap_or_emit_fatal}; use rustc_proc_macro::bridge::{ @@ -485,8 +485,13 @@ impl server::FreeFunctions for Rustc<'_, '_> { fn literal_from_str(&mut self, s: &str) -> Result<Literal<Self::Span, Self::Symbol>, ()> { let name = FileName::proc_macro_source_code(s); - let mut parser = - unwrap_or_emit_fatal(new_parser_from_source_str(self.psess(), name, s.to_owned())); + + let mut parser = unwrap_or_emit_fatal(new_parser_from_source_str( + self.psess(), + name, + s.to_owned(), + StripTokens::Nothing, + )); let first_span = parser.token.span.data(); let minus_present = parser.eat(exp!(Minus)); diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs index e81003b1897..129ab7eccb5 100644 --- a/compiler/rustc_feature/src/builtin_attrs.rs +++ b/compiler/rustc_feature/src/builtin_attrs.rs @@ -621,6 +621,7 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ ), // FIXME(#82232, #143834): temporarily renamed to mitigate `#[align]` nameres ambiguity gated!(rustc_align, Normal, template!(List: &["alignment"]), DuplicatesOk, EncodeCrossCrate::No, fn_align, experimental!(rustc_align)), + gated!(rustc_align_static, Normal, template!(List: &["alignment"]), DuplicatesOk, EncodeCrossCrate::No, static_align, experimental!(rustc_align_static)), ungated!( unsafe(Edition2024) export_name, Normal, template!(NameValueStr: "name", "https://doc.rust-lang.org/reference/abi.html#the-export_name-attribute"), diff --git a/compiler/rustc_feature/src/unstable.rs b/compiler/rustc_feature/src/unstable.rs index 4f35bf63a1a..93e5588146e 100644 --- a/compiler/rustc_feature/src/unstable.rs +++ b/compiler/rustc_feature/src/unstable.rs @@ -632,6 +632,8 @@ declare_features! ( (unstable, simd_ffi, "1.0.0", Some(27731)), /// Allows specialization of implementations (RFC 1210). (incomplete, specialization, "1.7.0", Some(31844)), + /// Allows using `#[rustc_align_static(...)]` on static items. + (unstable, static_align, "CURRENT_RUSTC_VERSION", Some(146177)), /// Allows attributes on expressions and non-item statements. (unstable, stmt_expr_attributes, "1.6.0", Some(15701)), /// Allows lints part of the strict provenance effort. diff --git a/compiler/rustc_hir/src/attrs/data_structures.rs b/compiler/rustc_hir/src/attrs/data_structures.rs index dd5565d6f90..ea11a99efbc 100644 --- a/compiler/rustc_hir/src/attrs/data_structures.rs +++ b/compiler/rustc_hir/src/attrs/data_structures.rs @@ -14,6 +14,7 @@ pub use rustc_target::spec::SanitizerSet; use thin_vec::ThinVec; use crate::attrs::pretty_printing::PrintAttribute; +use crate::limit::Limit; use crate::{DefaultBodyStability, PartialConstStability, RustcVersion, Stability}; #[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic, PrintAttribute)] @@ -565,6 +566,9 @@ pub enum AttributeKind { /// Represents [`#[may_dangle]`](https://std-dev-guide.rust-lang.org/tricky/may-dangle.html). MayDangle(Span), + /// Represents `#[move_size_limit]` + MoveSizeLimit { attr_span: Span, limit_span: Span, limit: Limit }, + /// Represents `#[must_use]`. MustUse { span: Span, @@ -575,12 +579,18 @@ pub enum AttributeKind { /// Represents `#[naked]` Naked(Span), + /// Represents `#[no_core]` + NoCore(Span), + /// Represents `#[no_implicit_prelude]` NoImplicitPrelude(Span), /// Represents `#[no_mangle]` NoMangle(Span), + /// Represents `#[no_std]` + NoStd(Span), + /// Represents `#[non_exhaustive]` NonExhaustive(Span), @@ -596,6 +606,9 @@ pub enum AttributeKind { /// Represents `#[path]` Path(Symbol, Span), + /// Represents `#[pattern_complexity_limit]` + PatternComplexityLimit { attr_span: Span, limit_span: Span, limit: Limit }, + /// Represents `#[pointee]` Pointee(Span), @@ -611,6 +624,9 @@ pub enum AttributeKind { /// Represents `#[rustc_pub_transparent]` (used by the `repr_transparent_external_private_fields` lint). PubTransparent(Span), + /// Represents [`#[recursion_limit]`](https://doc.rust-lang.org/reference/attributes/limits.html#the-recursion_limit-attribute) + RecursionLimit { attr_span: Span, limit_span: Span, limit: Limit }, + /// Represents [`#[repr]`](https://doc.rust-lang.org/stable/reference/type-layout.html#representations). Repr { reprs: ThinVec<(ReprAttr, Span)>, first_span: Span }, @@ -661,6 +677,9 @@ pub enum AttributeKind { /// Represents `#[type_const]`. TypeConst(Span), + /// Represents `#[type_length_limit]` + TypeLengthLimit { attr_span: Span, limit_span: Span, limit: Limit }, + /// Represents `#[rustc_unsafe_specialization_marker]`. UnsafeSpecializationMarker(Span), diff --git a/compiler/rustc_hir/src/attrs/encode_cross_crate.rs b/compiler/rustc_hir/src/attrs/encode_cross_crate.rs index 3810bb6d003..55521c15854 100644 --- a/compiler/rustc_hir/src/attrs/encode_cross_crate.rs +++ b/compiler/rustc_hir/src/attrs/encode_cross_crate.rs @@ -61,20 +61,25 @@ impl AttributeKind { MacroUse { .. } => No, Marker(..) => No, MayDangle(..) => No, + MoveSizeLimit { .. } => No, MustUse { .. } => Yes, Naked(..) => No, + NoCore(..) => No, NoImplicitPrelude(..) => No, - NoMangle(..) => Yes, // Needed for rustdoc + NoMangle(..) => Yes, // Needed for rustdoc + NoStd(..) => No, NonExhaustive(..) => Yes, // Needed for rustdoc Optimize(..) => No, ParenSugar(..) => No, PassByValue(..) => Yes, Path(..) => No, + PatternComplexityLimit { .. } => No, Pointee(..) => No, ProcMacro(..) => No, ProcMacroAttribute(..) => No, ProcMacroDerive { .. } => No, PubTransparent(..) => Yes, + RecursionLimit { .. } => No, Repr { .. } => No, RustcBuiltinMacro { .. } => Yes, RustcLayoutScalarValidRangeEnd(..) => Yes, @@ -89,6 +94,7 @@ impl AttributeKind { TargetFeature { .. } => No, TrackCaller(..) => Yes, TypeConst(..) => Yes, + TypeLengthLimit { .. } => No, UnsafeSpecializationMarker(..) => No, UnstableFeatureBound(..) => No, Used { .. } => No, diff --git a/compiler/rustc_hir/src/attrs/pretty_printing.rs b/compiler/rustc_hir/src/attrs/pretty_printing.rs index e65de25b451..d13cd3fd1da 100644 --- a/compiler/rustc_hir/src/attrs/pretty_printing.rs +++ b/compiler/rustc_hir/src/attrs/pretty_printing.rs @@ -9,6 +9,8 @@ use rustc_span::{ErrorGuaranteed, Ident, Span, Symbol}; use rustc_target::spec::SanitizerSet; use thin_vec::ThinVec; +use crate::limit::Limit; + /// This trait is used to print attributes in `rustc_hir_pretty`. /// /// For structs and enums it can be derived using [`rustc_macros::PrintAttribute`]. @@ -146,7 +148,7 @@ macro_rules! print_tup { print_tup!(A B C D E F G H); print_skip!(Span, (), ErrorGuaranteed); -print_disp!(u16, bool, NonZero<u32>); +print_disp!(u16, bool, NonZero<u32>, Limit); print_debug!( Symbol, Ident, diff --git a/compiler/rustc_hir/src/def.rs b/compiler/rustc_hir/src/def.rs index 8af4740f376..95abe5c40dd 100644 --- a/compiler/rustc_hir/src/def.rs +++ b/compiler/rustc_hir/src/def.rs @@ -440,6 +440,43 @@ impl DefKind { | DefKind::ExternCrate => false, } } + + /// Returns `true` if `self` is a kind of definition that does not have its own + /// type-checking context, i.e. closure, coroutine or inline const. + #[inline] + pub fn is_typeck_child(self) -> bool { + match self { + DefKind::Closure | DefKind::InlineConst | DefKind::SyntheticCoroutineBody => true, + DefKind::Mod + | DefKind::Struct + | DefKind::Union + | DefKind::Enum + | DefKind::Variant + | DefKind::Trait + | DefKind::TyAlias + | DefKind::ForeignTy + | DefKind::TraitAlias + | DefKind::AssocTy + | DefKind::TyParam + | DefKind::Fn + | DefKind::Const + | DefKind::ConstParam + | DefKind::Static { .. } + | DefKind::Ctor(_, _) + | DefKind::AssocFn + | DefKind::AssocConst + | DefKind::Macro(_) + | DefKind::ExternCrate + | DefKind::Use + | DefKind::ForeignMod + | DefKind::AnonConst + | DefKind::OpaqueTy + | DefKind::Field + | DefKind::LifetimeParam + | DefKind::GlobalAsm + | DefKind::Impl { .. } => false, + } + } } /// The resolution of a path or export. diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs index ae03121e5f7..493236718a8 100644 --- a/compiler/rustc_hir/src/hir.rs +++ b/compiler/rustc_hir/src/hir.rs @@ -2616,6 +2616,18 @@ impl Expr<'_> { ) | ( ExprKind::Struct( + QPath::LangItem(LangItem::RangeToInclusiveCopy, _), + [val1], + StructTailExpr::None, + ), + ExprKind::Struct( + QPath::LangItem(LangItem::RangeToInclusiveCopy, _), + [val2], + StructTailExpr::None, + ), + ) + | ( + ExprKind::Struct( QPath::LangItem(LangItem::RangeFrom, _), [val1], StructTailExpr::None, @@ -2705,7 +2717,8 @@ pub fn is_range_literal(expr: &Expr<'_>) -> bool { | LangItem::RangeToInclusive | LangItem::RangeCopy | LangItem::RangeFromCopy - | LangItem::RangeInclusiveCopy, + | LangItem::RangeInclusiveCopy + | LangItem::RangeToInclusiveCopy, .. ) ), @@ -3207,12 +3220,21 @@ pub struct ImplItem<'hir> { pub owner_id: OwnerId, pub generics: &'hir Generics<'hir>, pub kind: ImplItemKind<'hir>, - pub defaultness: Defaultness, + pub impl_kind: ImplItemImplKind, pub span: Span, - pub vis_span: Span, pub has_delayed_lints: bool, - /// When we are in a trait impl, link to the trait-item's id. - pub trait_item_def_id: Option<DefId>, +} + +#[derive(Debug, Clone, Copy, HashStable_Generic)] +pub enum ImplItemImplKind { + Inherent { + vis_span: Span, + }, + Trait { + defaultness: Defaultness, + /// Item in the trait that this item implements + trait_item_def_id: Result<DefId, ErrorGuaranteed>, + }, } impl<'hir> ImplItem<'hir> { @@ -3226,6 +3248,13 @@ impl<'hir> ImplItem<'hir> { ImplItemId { owner_id: self.owner_id } } + pub fn vis_span(&self) -> Option<Span> { + match self.impl_kind { + ImplItemImplKind::Trait { .. } => None, + ImplItemImplKind::Inherent { vis_span, .. } => Some(vis_span), + } + } + expect_methods_self_kind! { expect_const, (&'hir Ty<'hir>, BodyId), ImplItemKind::Const(ty, body), (ty, *body); expect_fn, (&FnSig<'hir>, BodyId), ImplItemKind::Fn(ty, body), (ty, *body); @@ -4972,7 +5001,7 @@ mod size_asserts { static_assert_size!(GenericBound<'_>, 64); static_assert_size!(Generics<'_>, 56); static_assert_size!(Impl<'_>, 40); - static_assert_size!(ImplItem<'_>, 96); + static_assert_size!(ImplItem<'_>, 88); static_assert_size!(ImplItemKind<'_>, 40); static_assert_size!(Item<'_>, 88); static_assert_size!(ItemKind<'_>, 64); diff --git a/compiler/rustc_hir/src/intravisit.rs b/compiler/rustc_hir/src/intravisit.rs index 25a7ae239f3..eb682f32111 100644 --- a/compiler/rustc_hir/src/intravisit.rs +++ b/compiler/rustc_hir/src/intravisit.rs @@ -1257,18 +1257,21 @@ pub fn walk_impl_item<'v, V: Visitor<'v>>( owner_id: _, ident, ref generics, + ref impl_kind, ref kind, - ref defaultness, span: _, - vis_span: _, has_delayed_lints: _, - trait_item_def_id: _, } = *impl_item; try_visit!(visitor.visit_ident(ident)); try_visit!(visitor.visit_generics(generics)); - try_visit!(visitor.visit_defaultness(defaultness)); try_visit!(visitor.visit_id(impl_item.hir_id())); + match impl_kind { + ImplItemImplKind::Inherent { vis_span: _ } => {} + ImplItemImplKind::Trait { defaultness, trait_item_def_id: _ } => { + try_visit!(visitor.visit_defaultness(defaultness)); + } + } match *kind { ImplItemKind::Const(ref ty, body) => { try_visit!(visitor.visit_ty_unambig(ty)); diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs index 0464665b41f..67d2f15d414 100644 --- a/compiler/rustc_hir/src/lang_items.rs +++ b/compiler/rustc_hir/src/lang_items.rs @@ -422,6 +422,7 @@ language_item_table! { RangeFromCopy, sym::RangeFromCopy, range_from_copy_struct, Target::Struct, GenericRequirement::None; RangeCopy, sym::RangeCopy, range_copy_struct, Target::Struct, GenericRequirement::None; RangeInclusiveCopy, sym::RangeInclusiveCopy, range_inclusive_copy_struct, Target::Struct, GenericRequirement::None; + RangeToInclusiveCopy, sym::RangeToInclusiveCopy, range_to_inclusive_copy_struct, Target::Struct, GenericRequirement::None; String, sym::String, string, Target::Struct, GenericRequirement::None; CStr, sym::CStr, c_str, Target::Struct, GenericRequirement::None; diff --git a/compiler/rustc_hir/src/lib.rs b/compiler/rustc_hir/src/lib.rs index 78fc63753a2..eb630fc8018 100644 --- a/compiler/rustc_hir/src/lib.rs +++ b/compiler/rustc_hir/src/lib.rs @@ -25,6 +25,7 @@ mod hir; pub use rustc_hir_id::{self as hir_id, *}; pub mod intravisit; pub mod lang_items; +pub mod limit; pub mod lints; pub mod pat_util; mod stability; diff --git a/compiler/rustc_hir/src/limit.rs b/compiler/rustc_hir/src/limit.rs new file mode 100644 index 00000000000..7e6d23f51bd --- /dev/null +++ b/compiler/rustc_hir/src/limit.rs @@ -0,0 +1,63 @@ +use std::fmt; +use std::ops::{Div, Mul}; + +use rustc_error_messages::{DiagArgValue, IntoDiagArg}; +use rustc_macros::{Decodable, Encodable, HashStable_Generic}; + +/// New-type wrapper around `usize` for representing limits. Ensures that comparisons against +/// limits are consistent throughout the compiler. +#[derive(Clone, Copy, Debug, HashStable_Generic, Encodable, Decodable)] +pub struct Limit(pub usize); + +impl Limit { + /// Create a new limit from a `usize`. + pub fn new(value: usize) -> Self { + Limit(value) + } + + /// Create a new unlimited limit. + pub fn unlimited() -> Self { + Limit(usize::MAX) + } + + /// Check that `value` is within the limit. Ensures that the same comparisons are used + /// throughout the compiler, as mismatches can cause ICEs, see #72540. + #[inline] + pub fn value_within_limit(&self, value: usize) -> bool { + value <= self.0 + } +} + +impl From<usize> for Limit { + fn from(value: usize) -> Self { + Self::new(value) + } +} + +impl fmt::Display for Limit { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl Div<usize> for Limit { + type Output = Limit; + + fn div(self, rhs: usize) -> Self::Output { + Limit::new(self.0 / rhs) + } +} + +impl Mul<usize> for Limit { + type Output = Limit; + + fn mul(self, rhs: usize) -> Self::Output { + Limit::new(self.0 * rhs) + } +} + +impl IntoDiagArg for Limit { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { + self.to_string().into_diag_arg(&mut None) + } +} diff --git a/compiler/rustc_hir_analysis/src/autoderef.rs b/compiler/rustc_hir_analysis/src/autoderef.rs index e27e68d3662..e8237471e1b 100644 --- a/compiler/rustc_hir_analysis/src/autoderef.rs +++ b/compiler/rustc_hir_analysis/src/autoderef.rs @@ -1,7 +1,7 @@ +use rustc_hir::limit::Limit; use rustc_infer::infer::InferCtxt; use rustc_infer::traits::PredicateObligations; use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt}; -use rustc_session::Limit; use rustc_span::def_id::{LOCAL_CRATE, LocalDefId}; use rustc_span::{ErrorGuaranteed, Span}; use rustc_trait_selection::traits::ObligationCtxt; diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs index 08b344638dd..886ebddc75c 100644 --- a/compiler/rustc_hir_analysis/src/check/check.rs +++ b/compiler/rustc_hir_analysis/src/check/check.rs @@ -1009,8 +1009,8 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(), res = res.and(check_associated_item(tcx, def_id)); let assoc_item = tcx.associated_item(def_id); match assoc_item.container { - ty::AssocItemContainer::Impl => {} - ty::AssocItemContainer::Trait => { + ty::AssocContainer::InherentImpl | ty::AssocContainer::TraitImpl(_) => {} + ty::AssocContainer::Trait => { res = res.and(check_trait_item(tcx, def_id)); } } @@ -1026,8 +1026,8 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(), res = res.and(check_associated_item(tcx, def_id)); let assoc_item = tcx.associated_item(def_id); match assoc_item.container { - ty::AssocItemContainer::Impl => {} - ty::AssocItemContainer::Trait => { + ty::AssocContainer::InherentImpl | ty::AssocContainer::TraitImpl(_) => {} + ty::AssocContainer::Trait => { res = res.and(check_trait_item(tcx, def_id)); } } @@ -1043,8 +1043,8 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(), let assoc_item = tcx.associated_item(def_id); let has_type = match assoc_item.container { - ty::AssocItemContainer::Impl => true, - ty::AssocItemContainer::Trait => { + ty::AssocContainer::InherentImpl | ty::AssocContainer::TraitImpl(_) => true, + ty::AssocContainer::Trait => { tcx.ensure_ok().explicit_item_bounds(def_id); tcx.ensure_ok().explicit_item_self_bounds(def_id); if tcx.is_conditionally_const(def_id) { @@ -1177,12 +1177,9 @@ fn check_impl_items_against_trait<'tcx>( for &impl_item in impl_item_refs { let ty_impl_item = tcx.associated_item(impl_item); - let ty_trait_item = if let Some(trait_item_id) = ty_impl_item.trait_item_def_id { - tcx.associated_item(trait_item_id) - } else { - // Checked in `associated_item`. - tcx.dcx().span_delayed_bug(tcx.def_span(impl_item), "missing associated item in trait"); - continue; + let ty_trait_item = match ty_impl_item.expect_trait_impl() { + Ok(trait_item_id) => tcx.associated_item(trait_item_id), + Err(ErrorGuaranteed { .. }) => continue, }; let res = tcx.ensure_ok().compare_impl_item(impl_item.expect_local()); diff --git a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs index e4827256193..84fb09b7390 100644 --- a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs +++ b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs @@ -37,7 +37,7 @@ pub(super) fn compare_impl_item( impl_item_def_id: LocalDefId, ) -> Result<(), ErrorGuaranteed> { let impl_item = tcx.associated_item(impl_item_def_id); - let trait_item = tcx.associated_item(impl_item.trait_item_def_id.unwrap()); + let trait_item = tcx.associated_item(impl_item.expect_trait_impl()?); let impl_trait_ref = tcx.impl_trait_ref(impl_item.container_id(tcx)).unwrap().instantiate_identity(); debug!(?impl_trait_ref); @@ -446,7 +446,7 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>( impl_m_def_id: LocalDefId, ) -> Result<&'tcx DefIdMap<ty::EarlyBinder<'tcx, Ty<'tcx>>>, ErrorGuaranteed> { let impl_m = tcx.associated_item(impl_m_def_id.to_def_id()); - let trait_m = tcx.associated_item(impl_m.trait_item_def_id.unwrap()); + let trait_m = tcx.associated_item(impl_m.expect_trait_impl()?); let impl_trait_ref = tcx.impl_trait_ref(tcx.parent(impl_m_def_id.to_def_id())).unwrap().instantiate_identity(); // First, check a few of the same things as `compare_impl_method`, @@ -1449,8 +1449,10 @@ fn compare_self_type<'tcx>( let self_string = |method: ty::AssocItem| { let untransformed_self_ty = match method.container { - ty::AssocItemContainer::Impl => impl_trait_ref.self_ty(), - ty::AssocItemContainer::Trait => tcx.types.self_param, + ty::AssocContainer::InherentImpl | ty::AssocContainer::TraitImpl(_) => { + impl_trait_ref.self_ty() + } + ty::AssocContainer::Trait => tcx.types.self_param, }; let self_arg_ty = tcx.fn_sig(method.def_id).instantiate_identity().input(0); let (infcx, param_env) = tcx @@ -2458,8 +2460,12 @@ fn param_env_with_gat_bounds<'tcx>( for impl_ty in impl_tys_to_install { let trait_ty = match impl_ty.container { - ty::AssocItemContainer::Trait => impl_ty, - ty::AssocItemContainer::Impl => tcx.associated_item(impl_ty.trait_item_def_id.unwrap()), + ty::AssocContainer::InherentImpl => bug!(), + ty::AssocContainer::Trait => impl_ty, + ty::AssocContainer::TraitImpl(Err(_)) => continue, + ty::AssocContainer::TraitImpl(Ok(trait_item_def_id)) => { + tcx.associated_item(trait_item_def_id) + } }; let mut bound_vars: smallvec::SmallVec<[ty::BoundVariableKind; 8]> = diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index cfc6bc2f3a0..aa2d27ab809 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -449,6 +449,9 @@ pub(crate) fn check_intrinsic_type( } sym::unchecked_shl | sym::unchecked_shr => (2, 0, vec![param(0), param(1)], param(0)), sym::rotate_left | sym::rotate_right => (1, 0, vec![param(0), tcx.types.u32], param(0)), + sym::unchecked_funnel_shl | sym::unchecked_funnel_shr => { + (1, 0, vec![param(0), param(0), tcx.types.u32], param(0)) + } sym::unchecked_add | sym::unchecked_sub | sym::unchecked_mul => { (1, 0, vec![param(0), param(0)], param(0)) } diff --git a/compiler/rustc_hir_analysis/src/check/mod.rs b/compiler/rustc_hir_analysis/src/check/mod.rs index b5c0ca4727c..63d0f400aef 100644 --- a/compiler/rustc_hir_analysis/src/check/mod.rs +++ b/compiler/rustc_hir_analysis/src/check/mod.rs @@ -85,7 +85,9 @@ use rustc_infer::traits::ObligationCause; use rustc_middle::query::Providers; use rustc_middle::ty::error::{ExpectedFound, TypeError}; use rustc_middle::ty::print::with_types_for_signature; -use rustc_middle::ty::{self, GenericArgs, GenericArgsRef, Ty, TyCtxt, TypingMode}; +use rustc_middle::ty::{ + self, GenericArgs, GenericArgsRef, GenericParamDefKind, Ty, TyCtxt, TypingMode, +}; use rustc_middle::{bug, span_bug}; use rustc_session::parse::feature_err; use rustc_span::def_id::CRATE_DEF_ID; @@ -233,8 +235,7 @@ fn missing_items_err( }; // Obtain the level of indentation ending in `sugg_sp`. - let padding = - tcx.sess.source_map().indentation_before(sugg_sp).unwrap_or_else(|| String::new()); + let padding = tcx.sess.source_map().indentation_before(sugg_sp).unwrap_or_else(String::new); let (mut missing_trait_item, mut missing_trait_item_none, mut missing_trait_item_label) = (Vec::new(), Vec::new(), Vec::new()); @@ -331,6 +332,7 @@ fn default_body_is_unstable( fn bounds_from_generic_predicates<'tcx>( tcx: TyCtxt<'tcx>, predicates: impl IntoIterator<Item = (ty::Clause<'tcx>, Span)>, + assoc: ty::AssocItem, ) -> (String, String) { let mut types: FxIndexMap<Ty<'tcx>, Vec<DefId>> = FxIndexMap::default(); let mut projections = vec![]; @@ -354,34 +356,50 @@ fn bounds_from_generic_predicates<'tcx>( } let mut where_clauses = vec![]; - let mut types_str = vec![]; - for (ty, bounds) in types { - if let ty::Param(_) = ty.kind() { - let mut bounds_str = vec![]; - for bound in bounds { - let mut projections_str = vec![]; - for projection in &projections { - let p = projection.skip_binder(); - if bound == tcx.parent(p.projection_term.def_id) - && p.projection_term.self_ty() == ty - { - let name = tcx.item_name(p.projection_term.def_id); - projections_str.push(format!("{} = {}", name, p.term)); + let generics = tcx.generics_of(assoc.def_id); + let types_str = generics + .own_params + .iter() + .filter(|p| matches!(p.kind, GenericParamDefKind::Type { synthetic: false, .. })) + .map(|p| { + // we just checked that it's a type, so the unwrap can't fail + let ty = tcx.mk_param_from_def(p).as_type().unwrap(); + if let Some(bounds) = types.get(&ty) { + let mut bounds_str = vec![]; + for bound in bounds.iter().copied() { + let mut projections_str = vec![]; + for projection in &projections { + let p = projection.skip_binder(); + if bound == tcx.parent(p.projection_term.def_id) + && p.projection_term.self_ty() == ty + { + let name = tcx.item_name(p.projection_term.def_id); + projections_str.push(format!("{} = {}", name, p.term)); + } + } + let bound_def_path = tcx.def_path_str(bound); + if projections_str.is_empty() { + where_clauses.push(format!("{}: {}", ty, bound_def_path)); + } else { + bounds_str.push(format!( + "{}<{}>", + bound_def_path, + projections_str.join(", ") + )); } } - let bound_def_path = tcx.def_path_str(bound); - if projections_str.is_empty() { - where_clauses.push(format!("{}: {}", ty, bound_def_path)); + if bounds_str.is_empty() { + ty.to_string() } else { - bounds_str.push(format!("{}<{}>", bound_def_path, projections_str.join(", "))); + format!("{}: {}", ty, bounds_str.join(" + ")) } - } - if bounds_str.is_empty() { - types_str.push(ty.to_string()); } else { - types_str.push(format!("{}: {}", ty, bounds_str.join(" + "))); + ty.to_string() } - } else { + }) + .collect::<Vec<_>>(); + for (ty, bounds) in types.into_iter() { + if !matches!(ty.kind(), ty::Param(_)) { // Avoid suggesting the following: // fn foo<T, <T as Trait>::Bar>(_: T) where T: Trait, <T as Trait>::Bar: Other {} where_clauses.extend( @@ -473,10 +491,10 @@ fn fn_sig_suggestion<'tcx>( let output = if !output.is_unit() { format!(" -> {output}") } else { String::new() }; let safety = sig.safety.prefix_str(); - let (generics, where_clauses) = bounds_from_generic_predicates(tcx, predicates); + let (generics, where_clauses) = bounds_from_generic_predicates(tcx, predicates, assoc); // FIXME: this is not entirely correct, as the lifetimes from borrowed params will - // not be present in the `fn` definition, not will we account for renamed + // not be present in the `fn` definition, nor will we account for renamed // lifetimes between the `impl` and the `trait`, but this should be good enough to // fill in a significant portion of the missing code, and other subsequent // suggestions can help the user fix the code. @@ -512,6 +530,7 @@ fn suggestion_signature<'tcx>( let (generics, where_clauses) = bounds_from_generic_predicates( tcx, tcx.predicates_of(assoc.def_id).instantiate_own(tcx, args), + assoc, ); format!("type {}{generics} = /* Type */{where_clauses};", assoc.name()) } diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs index e6a1f6d8d8b..d33f1f3e12a 100644 --- a/compiler/rustc_hir_analysis/src/check/wfcheck.rs +++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs @@ -944,12 +944,11 @@ pub(crate) fn check_associated_item( // Avoid bogus "type annotations needed `Foo: Bar`" errors on `impl Bar for Foo` in case // other `Foo` impls are incoherent. - tcx.ensure_ok() - .coherent_trait(tcx.parent(item.trait_item_def_id.unwrap_or(item_id.into())))?; + tcx.ensure_ok().coherent_trait(tcx.parent(item.trait_item_or_self()?))?; let self_ty = match item.container { - ty::AssocItemContainer::Trait => tcx.types.self_param, - ty::AssocItemContainer::Impl => { + ty::AssocContainer::Trait => tcx.types.self_param, + ty::AssocContainer::InherentImpl | ty::AssocContainer::TraitImpl(_) => { tcx.type_of(item.container_id(tcx)).instantiate_identity() } }; @@ -978,7 +977,7 @@ pub(crate) fn check_associated_item( check_method_receiver(wfcx, hir_sig, item, self_ty) } ty::AssocKind::Type { .. } => { - if let ty::AssocItemContainer::Trait = item.container { + if let ty::AssocContainer::Trait = item.container { check_associated_type_bounds(wfcx, item, span) } if item.defaultness(tcx).has_value() { @@ -1047,7 +1046,7 @@ fn check_type_defn<'tcx>( let needs_drop_copy = || { packed && { let ty = tcx.type_of(variant.tail().did).instantiate_identity(); - let ty = tcx.erase_regions(ty); + let ty = tcx.erase_and_anonymize_regions(ty); assert!(!ty.has_infer()); ty.needs_drop(tcx, wfcx.infcx.typing_env(wfcx.param_env)) } diff --git a/compiler/rustc_hir_analysis/src/coherence/orphan.rs b/compiler/rustc_hir_analysis/src/coherence/orphan.rs index f707196c816..621431ae234 100644 --- a/compiler/rustc_hir_analysis/src/coherence/orphan.rs +++ b/compiler/rustc_hir_analysis/src/coherence/orphan.rs @@ -404,7 +404,7 @@ fn emit_orphan_check_error<'tcx>( of_trait.trait_ref.path.span }; - ty = tcx.erase_regions(ty); + ty = tcx.erase_and_anonymize_regions(ty); let is_foreign = !trait_ref.def_id.is_local() && matches!(is_target_ty, IsFirstInputType::No); diff --git a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs index b59dc4bd132..dd3590f9ac5 100644 --- a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs +++ b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs @@ -111,9 +111,8 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen } Some(ImplTraitInTraitData::Impl { fn_def_id }) => { - let assoc_item = tcx.associated_item(def_id); - let trait_assoc_predicates = - tcx.explicit_predicates_of(assoc_item.trait_item_def_id.unwrap()); + let trait_item_def_id = tcx.trait_item_of(def_id).unwrap(); + let trait_assoc_predicates = tcx.explicit_predicates_of(trait_item_def_id); let impl_assoc_identity_args = ty::GenericArgs::identity_for_item(tcx, def_id); let impl_def_id = tcx.parent(fn_def_id); @@ -174,12 +173,6 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen } }; - if let Node::TraitItem(item) = node { - let mut bounds = Vec::new(); - icx.lowerer().add_default_trait_item_bounds(item, &mut bounds); - predicates.extend(bounds); - } - let generics = tcx.generics_of(def_id); // Below we'll consider the bounds on the type parameters (including `Self`) diff --git a/compiler/rustc_hir_analysis/src/collect/type_of.rs b/compiler/rustc_hir_analysis/src/collect/type_of.rs index 62125c99d80..8cbf17162e3 100644 --- a/compiler/rustc_hir_analysis/src/collect/type_of.rs +++ b/compiler/rustc_hir_analysis/src/collect/type_of.rs @@ -125,8 +125,8 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<'_ Some(ty::ImplTraitInTraitData::Impl { fn_def_id }) => { match tcx.collect_return_position_impl_trait_in_trait_tys(fn_def_id) { Ok(map) => { - let assoc_item = tcx.associated_item(def_id); - return map[&assoc_item.trait_item_def_id.unwrap()]; + let trait_item_def_id = tcx.trait_item_of(def_id).unwrap(); + return map[&trait_item_def_id]; } Err(_) => { return ty::EarlyBinder::bind(Ty::new_error_with_message( @@ -198,7 +198,7 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<'_ } } ImplItemKind::Type(ty) => { - if tcx.impl_trait_ref(tcx.hir_get_parent_item(hir_id)).is_none() { + if let ImplItemImplKind::Inherent { .. } = item.impl_kind { check_feature_inherent_assoc_ty(tcx, item.span); } diff --git a/compiler/rustc_hir_analysis/src/errors.rs b/compiler/rustc_hir_analysis/src/errors.rs index 6565ad7cf53..3b6367219b7 100644 --- a/compiler/rustc_hir_analysis/src/errors.rs +++ b/compiler/rustc_hir_analysis/src/errors.rs @@ -6,6 +6,7 @@ use rustc_errors::{ Applicability, Diag, DiagCtxtHandle, DiagSymbolList, Diagnostic, EmissionGuarantee, Level, MultiSpan, }; +use rustc_hir::limit::Limit; use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic}; use rustc_middle::ty::Ty; use rustc_span::{Ident, Span, Symbol}; @@ -557,7 +558,7 @@ pub(crate) struct AutoDerefReachedRecursionLimit<'a> { #[label] pub span: Span, pub ty: Ty<'a>, - pub suggested_limit: rustc_session::Limit, + pub suggested_limit: Limit, pub crate_name: Symbol, } diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/bounds.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/bounds.rs index d14aef8ace4..99dc8e6e522 100644 --- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/bounds.rs +++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/bounds.rs @@ -1,12 +1,13 @@ +use std::assert_matches::assert_matches; use std::ops::ControlFlow; use rustc_data_structures::fx::{FxIndexMap, FxIndexSet}; use rustc_errors::codes::*; use rustc_errors::struct_span_code_err; use rustc_hir as hir; +use rustc_hir::PolyTraitRef; use rustc_hir::def::{DefKind, Res}; use rustc_hir::def_id::{CRATE_DEF_ID, DefId, LocalDefId}; -use rustc_hir::{AmbigArg, PolyTraitRef}; use rustc_middle::bug; use rustc_middle::ty::{ self as ty, IsSuggestable, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt, @@ -230,122 +231,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { } } - /// Checks whether `Self: DefaultAutoTrait` bounds should be added on trait super bounds - /// or associated items. - /// - /// To keep backward compatibility with existing code, `experimental_default_bounds` bounds - /// should be added everywhere, including super bounds. However this causes a huge performance - /// costs. For optimization purposes instead of adding default supertraits, bounds - /// are added to the associated items: - /// - /// ```ignore(illustrative) - /// // Default bounds are generated in the following way: - /// trait Trait { - /// fn foo(&self) where Self: Leak {} - /// } - /// - /// // instead of this: - /// trait Trait: Leak { - /// fn foo(&self) {} - /// } - /// ``` - /// It is not always possible to do this because of backward compatibility: - /// - /// ```ignore(illustrative) - /// pub trait Trait<Rhs = Self> {} - /// pub trait Trait1 : Trait {} - /// //~^ ERROR: `Rhs` requires `DefaultAutoTrait`, but `Self` is not `DefaultAutoTrait` - /// ``` - /// - /// or: - /// - /// ```ignore(illustrative) - /// trait Trait { - /// type Type where Self: Sized; - /// } - /// trait Trait2<T> : Trait<Type = T> {} - /// //~^ ERROR: `DefaultAutoTrait` required for `Trait2`, by implicit `Self: DefaultAutoTrait` in `Trait::Type` - /// ``` - /// - /// Therefore, `experimental_default_bounds` are still being added to supertraits if - /// the `SelfTyParam` or `AssocItemConstraint` were found in a trait header. - fn requires_default_supertraits( - &self, - hir_bounds: &'tcx [hir::GenericBound<'tcx>], - hir_generics: &'tcx hir::Generics<'tcx>, - ) -> bool { - struct TraitInfoCollector; - - impl<'tcx> hir::intravisit::Visitor<'tcx> for TraitInfoCollector { - type Result = ControlFlow<()>; - - fn visit_assoc_item_constraint( - &mut self, - _constraint: &'tcx hir::AssocItemConstraint<'tcx>, - ) -> Self::Result { - ControlFlow::Break(()) - } - - fn visit_ty(&mut self, t: &'tcx hir::Ty<'tcx, AmbigArg>) -> Self::Result { - if matches!( - &t.kind, - hir::TyKind::Path(hir::QPath::Resolved( - _, - hir::Path { res: hir::def::Res::SelfTyParam { .. }, .. }, - )) - ) { - return ControlFlow::Break(()); - } - hir::intravisit::walk_ty(self, t) - } - } - - let mut found = false; - for bound in hir_bounds { - found |= hir::intravisit::walk_param_bound(&mut TraitInfoCollector, bound).is_break(); - } - found |= hir::intravisit::walk_generics(&mut TraitInfoCollector, hir_generics).is_break(); - found - } - - /// Implicitly add `Self: DefaultAutoTrait` clauses on trait associated items if - /// they are not added as super trait bounds to the trait itself. See - /// `requires_default_supertraits` for more information. - pub(crate) fn add_default_trait_item_bounds( - &self, - trait_item: &hir::TraitItem<'tcx>, - bounds: &mut Vec<(ty::Clause<'tcx>, Span)>, - ) { - let tcx = self.tcx(); - if !tcx.sess.opts.unstable_opts.experimental_default_bounds { - return; - } - - let parent = tcx.local_parent(trait_item.hir_id().owner.def_id); - let hir::Node::Item(parent_trait) = tcx.hir_node_by_def_id(parent) else { - unreachable!(); - }; - - let (trait_generics, trait_bounds) = match parent_trait.kind { - hir::ItemKind::Trait(_, _, _, _, generics, supertraits, _) => (generics, supertraits), - hir::ItemKind::TraitAlias(_, generics, supertraits) => (generics, supertraits), - _ => unreachable!(), - }; - - if !self.requires_default_supertraits(trait_bounds, trait_generics) { - let self_ty_where_predicates = (parent, trait_item.generics.predicates); - self.add_default_traits( - bounds, - tcx.types.self_param, - &[], - Some(self_ty_where_predicates), - trait_item.span, - ); - } - } - - /// Lazily sets `experimental_default_bounds` to true on trait super bounds. - /// See `requires_default_supertraits` for more information. + /// Adds `experimental_default_bounds` bounds to the supertrait bounds. pub(crate) fn add_default_super_traits( &self, trait_def_id: LocalDefId, @@ -354,21 +240,21 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { hir_generics: &'tcx hir::Generics<'tcx>, span: Span, ) { - if !self.tcx().sess.opts.unstable_opts.experimental_default_bounds { + assert_matches!(self.tcx().def_kind(trait_def_id), DefKind::Trait | DefKind::TraitAlias); + + // Supertraits for auto trait are unsound according to the unstable book: + // https://doc.rust-lang.org/beta/unstable-book/language-features/auto-traits.html#supertraits + if self.tcx().trait_is_auto(trait_def_id.to_def_id()) { return; } - assert!(matches!(self.tcx().def_kind(trait_def_id), DefKind::Trait | DefKind::TraitAlias)); - if self.requires_default_supertraits(hir_bounds, hir_generics) { - let self_ty_where_predicates = (trait_def_id, hir_generics.predicates); - self.add_default_traits( - bounds, - self.tcx().types.self_param, - hir_bounds, - Some(self_ty_where_predicates), - span, - ); - } + self.add_default_traits( + bounds, + self.tcx().types.self_param, + hir_bounds, + Some((trait_def_id, hir_generics.predicates)), + span, + ); } pub(crate) fn add_default_traits( diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs index 93b82acf621..0cf9cb7193f 100644 --- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs +++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs @@ -482,7 +482,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { .map(|header| header.trait_ref.instantiate_identity().self_ty()) // We don't care about blanket impls. .filter(|self_ty| !self_ty.has_non_region_param()) - .map(|self_ty| tcx.erase_regions(self_ty).to_string()) + .map(|self_ty| tcx.erase_and_anonymize_regions(self_ty).to_string()) .collect() }; // FIXME: also look at `tcx.generics_of(self.item_def_id()).params` any that diff --git a/compiler/rustc_hir_analysis/src/lib.rs b/compiler/rustc_hir_analysis/src/lib.rs index 2562ab7542a..6659aff7111 100644 --- a/compiler/rustc_hir_analysis/src/lib.rs +++ b/compiler/rustc_hir_analysis/src/lib.rs @@ -238,7 +238,8 @@ pub fn check_crate(tcx: TyCtxt<'_>) { _ => (), } // Skip `AnonConst`s because we feed their `type_of`. - if !matches!(def_kind, DefKind::AnonConst) { + // Also skip items for which typeck forwards to parent typeck. + if !(matches!(def_kind, DefKind::AnonConst) || def_kind.is_typeck_child()) { tcx.ensure_ok().typeck(item_def_id); } // Ensure we generate the new `DefId` before finishing `check_crate`. diff --git a/compiler/rustc_hir_typeck/src/cast.rs b/compiler/rustc_hir_typeck/src/cast.rs index 3eeb0eac023..27540fd1a43 100644 --- a/compiler/rustc_hir_typeck/src/cast.rs +++ b/compiler/rustc_hir_typeck/src/cast.rs @@ -851,8 +851,8 @@ impl<'a, 'tcx> CastCheck<'tcx> { debug!("check_ptr_ptr_cast m_src={m_src:?} m_dst={m_dst:?}"); // ptr-ptr cast. metadata must match. - let src_kind = fcx.tcx.erase_regions(fcx.pointer_kind(m_src.ty, self.span)?); - let dst_kind = fcx.tcx.erase_regions(fcx.pointer_kind(m_dst.ty, self.span)?); + let src_kind = fcx.tcx.erase_and_anonymize_regions(fcx.pointer_kind(m_src.ty, self.span)?); + let dst_kind = fcx.tcx.erase_and_anonymize_regions(fcx.pointer_kind(m_dst.ty, self.span)?); // We can't cast if target pointer kind is unknown let Some(dst_kind) = dst_kind else { diff --git a/compiler/rustc_hir_typeck/src/coercion.rs b/compiler/rustc_hir_typeck/src/coercion.rs index e66601631fc..b99f811db1a 100644 --- a/compiler/rustc_hir_typeck/src/coercion.rs +++ b/compiler/rustc_hir_typeck/src/coercion.rs @@ -1897,7 +1897,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> { fcx.suggest_semicolon_at_end(cond_expr.span, &mut err); } } - }; + } // If this is due to an explicit `return`, suggest adding a return type. if let Some((fn_id, fn_decl)) = fcx.get_fn_decl(block_or_return_id) diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs index a652e08905a..7adbee7ff28 100644 --- a/compiler/rustc_hir_typeck/src/expr.rs +++ b/compiler/rustc_hir_typeck/src/expr.rs @@ -2585,12 +2585,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .filter(|item| item.is_fn() && !item.is_method()) .filter_map(|item| { // Only assoc fns that return `Self` - let fn_sig = self.tcx.fn_sig(item.def_id).skip_binder(); - let ret_ty = fn_sig.output(); - let ret_ty = self.tcx.normalize_erasing_late_bound_regions( - self.typing_env(self.param_env), - ret_ty, - ); + let fn_sig = self + .tcx + .fn_sig(item.def_id) + .instantiate(self.tcx, self.fresh_args_for_item(span, item.def_id)); + let ret_ty = self.tcx.instantiate_bound_regions_with_erased(fn_sig.output()); if !self.can_eq(self.param_env, ret_ty, adt_ty) { return None; } diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs index 0b3d50ff219..7370124e800 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs @@ -1021,7 +1021,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let container_id = assoc_item.container_id(tcx); debug!(?def_id, ?container, ?container_id); match container { - ty::AssocItemContainer::Trait => { + ty::AssocContainer::Trait => { if let Err(e) = callee::check_legal_trait_for_method_call( tcx, path_span, @@ -1033,7 +1033,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { self.set_tainted_by_errors(e); } } - ty::AssocItemContainer::Impl => { + ty::AssocContainer::InherentImpl | ty::AssocContainer::TraitImpl(_) => { if segments.len() == 1 { // `<T>::assoc` will end up here, and so // can `T::assoc`. If this came from an diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs index 5aec50c8b53..44a6084ebd5 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs @@ -1912,7 +1912,22 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { hir::StmtKind::Expr(ref expr) => { // Check with expected type of `()`. self.check_expr_has_type_or_error(expr, self.tcx.types.unit, |err| { - if expr.can_have_side_effects() { + if self.is_next_stmt_expr_continuation(stmt.hir_id) + && let hir::ExprKind::Match(..) | hir::ExprKind::If(..) = expr.kind + { + // We have something like `match () { _ => true } && true`. Suggest + // wrapping in parentheses. We find the statement or expression + // following the `match` (`&& true`) and see if it is something that + // can reasonably be interpreted as a binop following an expression. + err.multipart_suggestion( + "parentheses are required to parse this as an expression", + vec![ + (expr.span.shrink_to_lo(), "(".to_string()), + (expr.span.shrink_to_hi(), ")".to_string()), + ], + Applicability::MachineApplicable, + ); + } else if expr.can_have_side_effects() { self.suggest_semicolon_at_end(expr.span, err); } }); @@ -2339,7 +2354,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // We want it to always point to the trait item. // If we're pointing at an inherent function, we don't need to do anything, // so we fetch the parent and verify if it's a trait item. - && let maybe_trait_item_def_id = assoc_item.trait_item_def_id.unwrap_or(def_id) + && let Ok(maybe_trait_item_def_id) = assoc_item.trait_item_or_self() && let maybe_trait_def_id = self.tcx.parent(maybe_trait_item_def_id) // Just an easy way to check "trait_def_id == Fn/FnMut/FnOnce" && let Some(call_kind) = self.tcx.fn_trait_kind_from_def_id(maybe_trait_def_id) diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs index 74f27e85cba..7a060cafeab 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs @@ -21,7 +21,6 @@ use rustc_middle::ty::{self, Const, Ty, TyCtxt, TypeVisitableExt}; use rustc_session::Session; use rustc_span::{self, DUMMY_SP, ErrorGuaranteed, Ident, Span, sym}; use rustc_trait_selection::error_reporting::TypeErrCtxt; -use rustc_trait_selection::error_reporting::infer::sub_relations::SubRelations; use rustc_trait_selection::traits::{ self, FulfillmentError, ObligationCause, ObligationCauseCode, ObligationCtxt, }; @@ -188,14 +187,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { /// /// [`InferCtxtErrorExt::err_ctxt`]: rustc_trait_selection::error_reporting::InferCtxtErrorExt::err_ctxt pub(crate) fn err_ctxt(&'a self) -> TypeErrCtxt<'a, 'tcx> { - let mut sub_relations = SubRelations::default(); - sub_relations.add_constraints( - self, - self.fulfillment_cx.borrow_mut().pending_obligations().iter().map(|o| o.predicate), - ); TypeErrCtxt { infcx: &self.infcx, - sub_relations: RefCell::new(sub_relations), typeck_results: Some(self.typeck_results.borrow()), fallback_has_occurred: self.fallback_has_occurred.get(), normalize_fn_sig: Box::new(|fn_sig| { diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs index aca3840712e..1998a1884b7 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs @@ -1,3 +1,4 @@ +// ignore-tidy-filelength use core::cmp::min; use core::iter; @@ -766,56 +767,121 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { needs_block: bool, parent_is_closure: bool, ) { - if expected.is_unit() { - // `BlockTailExpression` only relevant if the tail expr would be - // useful on its own. - match expression.kind { - ExprKind::Call(..) - | ExprKind::MethodCall(..) - | ExprKind::Loop(..) - | ExprKind::If(..) - | ExprKind::Match(..) - | ExprKind::Block(..) - if expression.can_have_side_effects() - // If the expression is from an external macro, then do not suggest - // adding a semicolon, because there's nowhere to put it. - // See issue #81943. - && !expression.span.in_external_macro(self.tcx.sess.source_map()) => + if !expected.is_unit() { + return; + } + // `BlockTailExpression` only relevant if the tail expr would be + // useful on its own. + match expression.kind { + ExprKind::Call(..) + | ExprKind::MethodCall(..) + | ExprKind::Loop(..) + | ExprKind::If(..) + | ExprKind::Match(..) + | ExprKind::Block(..) + if expression.can_have_side_effects() + // If the expression is from an external macro, then do not suggest + // adding a semicolon, because there's nowhere to put it. + // See issue #81943. + && !expression.span.in_external_macro(self.tcx.sess.source_map()) => + { + if needs_block { + err.multipart_suggestion( + "consider using a semicolon here", + vec![ + (expression.span.shrink_to_lo(), "{ ".to_owned()), + (expression.span.shrink_to_hi(), "; }".to_owned()), + ], + Applicability::MachineApplicable, + ); + } else if let hir::Node::Block(block) = self.tcx.parent_hir_node(expression.hir_id) + && let hir::Node::Expr(expr) = self.tcx.parent_hir_node(block.hir_id) + && let hir::Node::Expr(if_expr) = self.tcx.parent_hir_node(expr.hir_id) + && let hir::ExprKind::If(_cond, _then, Some(_else)) = if_expr.kind + && let hir::Node::Stmt(stmt) = self.tcx.parent_hir_node(if_expr.hir_id) + && let hir::StmtKind::Expr(_) = stmt.kind + && self.is_next_stmt_expr_continuation(stmt.hir_id) { - if needs_block { - err.multipart_suggestion( - "consider using a semicolon here", - vec![ - (expression.span.shrink_to_lo(), "{ ".to_owned()), - (expression.span.shrink_to_hi(), "; }".to_owned()), - ], - Applicability::MachineApplicable, - ); - } else { - err.span_suggestion( - expression.span.shrink_to_hi(), - "consider using a semicolon here", - ";", - Applicability::MachineApplicable, - ); - } + err.multipart_suggestion( + "parentheses are required to parse this as an expression", + vec![ + (stmt.span.shrink_to_lo(), "(".to_string()), + (stmt.span.shrink_to_hi(), ")".to_string()), + ], + Applicability::MachineApplicable, + ); + } else { + err.span_suggestion( + expression.span.shrink_to_hi(), + "consider using a semicolon here", + ";", + Applicability::MachineApplicable, + ); } - ExprKind::Path(..) | ExprKind::Lit(_) - if parent_is_closure - && !expression.span.in_external_macro(self.tcx.sess.source_map()) => + } + ExprKind::Path(..) | ExprKind::Lit(_) + if parent_is_closure + && !expression.span.in_external_macro(self.tcx.sess.source_map()) => + { + err.span_suggestion_verbose( + expression.span.shrink_to_lo(), + "consider ignoring the value", + "_ = ", + Applicability::MachineApplicable, + ); + } + _ => { + if let hir::Node::Block(block) = self.tcx.parent_hir_node(expression.hir_id) + && let hir::Node::Expr(expr) = self.tcx.parent_hir_node(block.hir_id) + && let hir::Node::Expr(if_expr) = self.tcx.parent_hir_node(expr.hir_id) + && let hir::ExprKind::If(_cond, _then, Some(_else)) = if_expr.kind + && let hir::Node::Stmt(stmt) = self.tcx.parent_hir_node(if_expr.hir_id) + && let hir::StmtKind::Expr(_) = stmt.kind + && self.is_next_stmt_expr_continuation(stmt.hir_id) { - err.span_suggestion_verbose( - expression.span.shrink_to_lo(), - "consider ignoring the value", - "_ = ", + // The error is pointing at an arm of an if-expression, and we want to get the + // `Span` of the whole if-expression for the suggestion. This only works for a + // single level of nesting, which is fine. + // We have something like `if true { false } else { true } && true`. Suggest + // wrapping in parentheses. We find the statement or expression following the + // `if` (`&& true`) and see if it is something that can reasonably be + // interpreted as a binop following an expression. + err.multipart_suggestion( + "parentheses are required to parse this as an expression", + vec![ + (stmt.span.shrink_to_lo(), "(".to_string()), + (stmt.span.shrink_to_hi(), ")".to_string()), + ], Applicability::MachineApplicable, ); } - _ => (), } } } + pub(crate) fn is_next_stmt_expr_continuation(&self, hir_id: HirId) -> bool { + if let hir::Node::Block(b) = self.tcx.parent_hir_node(hir_id) + && let mut stmts = b.stmts.iter().skip_while(|s| s.hir_id != hir_id) + && let Some(_) = stmts.next() // The statement the statement that was passed in + && let Some(next) = match (stmts.next(), b.expr) { // The following statement + (Some(next), _) => match next.kind { + hir::StmtKind::Expr(next) | hir::StmtKind::Semi(next) => Some(next), + _ => None, + }, + (None, Some(next)) => Some(next), + _ => None, + } + && let hir::ExprKind::AddrOf(..) // prev_stmt && next + | hir::ExprKind::Unary(..) // prev_stmt * next + | hir::ExprKind::Err(_) = next.kind + // prev_stmt + next + { + true + } else { + false + } + } + /// A possible error is to forget to add a return type that is needed: /// /// ```compile_fail,E0308 @@ -1292,7 +1358,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .infcx .type_implements_trait( clone_trait_def, - [self.tcx.erase_regions(expected_ty)], + [self.tcx.erase_and_anonymize_regions(expected_ty)], self.param_env, ) .must_apply_modulo_regions() @@ -1816,7 +1882,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if segment.ident.name == sym::clone && results.type_dependent_def_id(expr.hir_id).is_some_and(|did| { let assoc_item = self.tcx.associated_item(did); - assoc_item.container == ty::AssocItemContainer::Trait + assoc_item.container == ty::AssocContainer::Trait && assoc_item.container_id(self.tcx) == clone_trait_did }) // If that clone call hasn't already dereferenced the self type (i.e. don't give this diff --git a/compiler/rustc_hir_typeck/src/inline_asm.rs b/compiler/rustc_hir_typeck/src/inline_asm.rs index b59c1752c25..c0cd23be690 100644 --- a/compiler/rustc_hir_typeck/src/inline_asm.rs +++ b/compiler/rustc_hir_typeck/src/inline_asm.rs @@ -44,7 +44,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { if ty.has_non_region_infer() { Ty::new_misc_error(self.tcx()) } else { - self.tcx().erase_regions(ty) + self.tcx().erase_and_anonymize_regions(ty) } } diff --git a/compiler/rustc_hir_typeck/src/lib.rs b/compiler/rustc_hir_typeck/src/lib.rs index 129de32fd4a..7f5397a7926 100644 --- a/compiler/rustc_hir_typeck/src/lib.rs +++ b/compiler/rustc_hir_typeck/src/lib.rs @@ -278,8 +278,7 @@ fn infer_type_if_missing<'tcx>(fcx: &FnCtxt<'_, 'tcx>, node: Node<'tcx>) -> Opti { if let Some(item) = tcx.opt_associated_item(def_id.into()) && let ty::AssocKind::Const { .. } = item.kind - && let ty::AssocItemContainer::Impl = item.container - && let Some(trait_item_def_id) = item.trait_item_def_id + && let ty::AssocContainer::TraitImpl(Ok(trait_item_def_id)) = item.container { let impl_def_id = item.container_id(tcx); let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap().instantiate_identity(); diff --git a/compiler/rustc_hir_typeck/src/method/probe.rs b/compiler/rustc_hir_typeck/src/method/probe.rs index ab584eb7c90..4185f7f6996 100644 --- a/compiler/rustc_hir_typeck/src/method/probe.rs +++ b/compiler/rustc_hir_typeck/src/method/probe.rs @@ -18,8 +18,8 @@ use rustc_middle::middle::stability; use rustc_middle::ty::elaborate::supertrait_def_ids; use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams, simplify_type}; use rustc_middle::ty::{ - self, AssocItem, AssocItemContainer, GenericArgs, GenericArgsRef, GenericParamDefKind, - ParamEnvAnd, Ty, TyCtxt, TypeVisitableExt, Upcast, + self, AssocContainer, AssocItem, GenericArgs, GenericArgsRef, GenericParamDefKind, ParamEnvAnd, + Ty, TyCtxt, TypeVisitableExt, Upcast, }; use rustc_middle::{bug, span_bug}; use rustc_session::lint; @@ -403,15 +403,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // special handling for this "trivial case" is a good idea. let infcx = &self.infcx; - let (ParamEnvAnd { param_env: _, value: self_ty }, canonical_inference_vars) = + let (ParamEnvAnd { param_env: _, value: self_ty }, var_values) = infcx.instantiate_canonical(span, &query_input.canonical); debug!(?self_ty, ?query_input, "probe_op: Mode::Path"); MethodAutoderefStepsResult { steps: infcx.tcx.arena.alloc_from_iter([CandidateStep { - self_ty: self.make_query_response_ignoring_pending_obligations( - canonical_inference_vars, - self_ty, - ), + self_ty: self + .make_query_response_ignoring_pending_obligations(var_values, self_ty), autoderefs: 0, from_unsafe_deref: false, unsize: false, @@ -530,7 +528,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ProbeScope::Single(def_id) => { let item = self.tcx.associated_item(def_id); // FIXME(fn_delegation): Delegation to inherent methods is not yet supported. - assert_eq!(item.container, AssocItemContainer::Trait); + assert_eq!(item.container, AssocContainer::Trait); let trait_def_id = self.tcx.parent(def_id); let trait_span = self.tcx.def_span(trait_def_id); @@ -1661,7 +1659,7 @@ impl<'tcx> Pick<'tcx> { /// Do not use for type checking. pub(crate) fn differs_from(&self, other: &Self) -> bool { let Self { - item: AssocItem { def_id, kind: _, container: _, trait_item_def_id: _ }, + item: AssocItem { def_id, kind: _, container: _ }, kind: _, import_ids: _, autoderefs: _, @@ -1704,7 +1702,7 @@ impl<'tcx> Pick<'tcx> { tcx.def_path_str(self.item.def_id), )); } - (ty::AssocKind::Const { name }, ty::AssocItemContainer::Trait) => { + (ty::AssocKind::Const { name }, ty::AssocContainer::Trait) => { let def_id = self.item.container_id(tcx); lint.span_suggestion( span, diff --git a/compiler/rustc_hir_typeck/src/writeback.rs b/compiler/rustc_hir_typeck/src/writeback.rs index d75bc9edab2..90301d1b391 100644 --- a/compiler/rustc_hir_typeck/src/writeback.rs +++ b/compiler/rustc_hir_typeck/src/writeback.rs @@ -802,7 +802,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { format!("unexpected inference variable after writeback: {predicate:?}"), ); } else { - let predicate = self.tcx().erase_regions(predicate); + let predicate = self.tcx().erase_and_anonymize_regions(predicate); if cause.has_infer() || cause.has_placeholders() { // We can't use the the obligation cause as it references // information local to this query. @@ -984,8 +984,8 @@ impl<'cx, 'tcx> Resolver<'cx, 'tcx> { // borrowck, and specifically region constraints will be populated during // MIR typeck which is run on the new body. // - // We're not using `tcx.erase_regions` as that also anonymizes bound variables, - // regressing borrowck diagnostics. + // We're not using `tcx.erase_and_anonymize_regions` as that also + // anonymizes bound variables, regressing borrowck diagnostics. value = fold_regions(tcx, value, |_, _| tcx.lifetimes.re_erased); // Normalize consts in writeback, because GCE doesn't normalize eagerly. diff --git a/compiler/rustc_index/src/idx.rs b/compiler/rustc_index/src/idx.rs index 33f406e2113..9cd7134659c 100644 --- a/compiler/rustc_index/src/idx.rs +++ b/compiler/rustc_index/src/idx.rs @@ -130,7 +130,22 @@ impl<I: Idx, T> IntoSliceIdx<I, [T]> for core::range::RangeFrom<I> { impl<I: Idx, T> IntoSliceIdx<I, [T]> for core::range::RangeInclusive<I> { type Output = core::range::RangeInclusive<usize>; #[inline] + #[cfg(bootstrap)] fn into_slice_idx(self) -> Self::Output { core::range::RangeInclusive { start: self.start.index(), end: self.end.index() } } + #[inline] + #[cfg(not(bootstrap))] + fn into_slice_idx(self) -> Self::Output { + core::range::RangeInclusive { start: self.start.index(), last: self.last.index() } + } +} + +#[cfg(all(feature = "nightly", not(bootstrap)))] +impl<I: Idx, T> IntoSliceIdx<I, [T]> for core::range::RangeToInclusive<I> { + type Output = core::range::RangeToInclusive<usize>; + #[inline] + fn into_slice_idx(self) -> Self::Output { + core::range::RangeToInclusive { last: self.last.index() } + } } diff --git a/compiler/rustc_index/src/interval.rs b/compiler/rustc_index/src/interval.rs index 0225c5c4f32..dda5253e7c5 100644 --- a/compiler/rustc_index/src/interval.rs +++ b/compiler/rustc_index/src/interval.rs @@ -140,6 +140,30 @@ impl<I: Idx> IntervalSet<I> { result } + /// Specialized version of `insert` when we know that the inserted point is *after* any + /// contained. + pub fn append(&mut self, point: I) { + let point = point.index() as u32; + + if let Some((_, last_end)) = self.map.last_mut() { + assert!(*last_end <= point); + if point == *last_end { + // The point is already in the set. + } else if point == *last_end + 1 { + *last_end = point; + } else { + self.map.push((point, point)); + } + } else { + self.map.push((point, point)); + } + + debug_assert!( + self.check_invariants(), + "wrong intervals after append {point:?} to {self:?}" + ); + } + pub fn contains(&self, needle: I) -> bool { let needle = needle.index() as u32; let Some(last) = self.map.partition_point(|r| r.0 <= needle).checked_sub(1) else { @@ -176,6 +200,32 @@ impl<I: Idx> IntervalSet<I> { }) } + pub fn disjoint(&self, other: &IntervalSet<I>) -> bool + where + I: Step, + { + let helper = move || { + let mut self_iter = self.iter_intervals(); + let mut other_iter = other.iter_intervals(); + + let mut self_current = self_iter.next()?; + let mut other_current = other_iter.next()?; + + loop { + if self_current.end <= other_current.start { + self_current = self_iter.next()?; + continue; + } + if other_current.end <= self_current.start { + other_current = other_iter.next()?; + continue; + } + return Some(false); + } + }; + helper().unwrap_or(true) + } + pub fn is_empty(&self) -> bool { self.map.is_empty() } @@ -325,6 +375,10 @@ impl<R: Idx, C: Step + Idx> SparseIntervalMatrix<R, C> { self.ensure_row(row).insert(point) } + pub fn append(&mut self, row: R, point: C) { + self.ensure_row(row).append(point) + } + pub fn contains(&self, row: R, point: C) -> bool { self.row(row).is_some_and(|r| r.contains(point)) } diff --git a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs index 3ad14dc79d5..3c5e4a91c98 100644 --- a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs +++ b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs @@ -6,6 +6,7 @@ //! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::sso::SsoHashMap; use rustc_index::Idx; use rustc_middle::bug; use rustc_middle::ty::{ @@ -17,7 +18,7 @@ use tracing::debug; use crate::infer::InferCtxt; use crate::infer::canonical::{ - Canonical, CanonicalQueryInput, CanonicalTyVarKind, CanonicalVarKind, OriginalQueryValues, + Canonical, CanonicalQueryInput, CanonicalVarKind, OriginalQueryValues, }; impl<'tcx> InferCtxt<'tcx> { @@ -293,6 +294,13 @@ struct Canonicalizer<'cx, 'tcx> { // Note that indices is only used once `var_values` is big enough to be // heap-allocated. indices: FxHashMap<GenericArg<'tcx>, BoundVar>, + /// Maps each `sub_unification_table_root_var` to the index of the first + /// variable which used it. + /// + /// This means in case two type variables have the same sub relations root, + /// we set the `sub_root` of the second variable to the position of the first. + /// Otherwise the `sub_root` of each type variable is just its own position. + sub_root_lookup_table: SsoHashMap<ty::TyVid, usize>, canonicalize_mode: &'cx dyn CanonicalizeMode, needs_canonical_flags: TypeFlags, @@ -361,10 +369,8 @@ impl<'cx, 'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'cx, 'tcx> { // FIXME: perf problem described in #55921. ui = ty::UniverseIndex::ROOT; } - self.canonicalize_ty_var( - CanonicalVarKind::Ty(CanonicalTyVarKind::General(ui)), - t, - ) + let sub_root = self.get_or_insert_sub_root(vid); + self.canonicalize_ty_var(CanonicalVarKind::Ty { ui, sub_root }, t) } } } @@ -374,7 +380,7 @@ impl<'cx, 'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'cx, 'tcx> { if nt != t { return self.fold_ty(nt); } else { - self.canonicalize_ty_var(CanonicalVarKind::Ty(CanonicalTyVarKind::Int), t) + self.canonicalize_ty_var(CanonicalVarKind::Int, t) } } ty::Infer(ty::FloatVar(vid)) => { @@ -382,7 +388,7 @@ impl<'cx, 'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'cx, 'tcx> { if nt != t { return self.fold_ty(nt); } else { - self.canonicalize_ty_var(CanonicalVarKind::Ty(CanonicalTyVarKind::Float), t) + self.canonicalize_ty_var(CanonicalVarKind::Float, t) } } @@ -562,6 +568,7 @@ impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> { variables: SmallVec::from_slice(base.variables), query_state, indices: FxHashMap::default(), + sub_root_lookup_table: Default::default(), binder_index: ty::INNERMOST, }; if canonicalizer.query_state.var_values.spilled() { @@ -660,6 +667,13 @@ impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> { } } + fn get_or_insert_sub_root(&mut self, vid: ty::TyVid) -> ty::BoundVar { + let root_vid = self.infcx.unwrap().sub_unification_table_root_var(vid); + let idx = + *self.sub_root_lookup_table.entry(root_vid).or_insert_with(|| self.variables.len()); + ty::BoundVar::from(idx) + } + /// Replaces the universe indexes used in `var_values` with their index in /// `query_state.universe_map`. This minimizes the maximum universe used in /// the canonicalized value. @@ -679,11 +693,11 @@ impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> { self.variables .iter() .map(|&kind| match kind { - CanonicalVarKind::Ty(CanonicalTyVarKind::Int | CanonicalTyVarKind::Float) => { + CanonicalVarKind::Int | CanonicalVarKind::Float => { return kind; } - CanonicalVarKind::Ty(CanonicalTyVarKind::General(u)) => { - CanonicalVarKind::Ty(CanonicalTyVarKind::General(reverse_universe_map[&u])) + CanonicalVarKind::Ty { ui, sub_root } => { + CanonicalVarKind::Ty { ui: reverse_universe_map[&ui], sub_root } } CanonicalVarKind::Region(u) => CanonicalVarKind::Region(reverse_universe_map[&u]), CanonicalVarKind::Const(u) => CanonicalVarKind::Const(reverse_universe_map[&u]), diff --git a/compiler/rustc_infer/src/infer/canonical/mod.rs b/compiler/rustc_infer/src/infer/canonical/mod.rs index 79a2aa54ef8..f99f228e19d 100644 --- a/compiler/rustc_infer/src/infer/canonical/mod.rs +++ b/compiler/rustc_infer/src/infer/canonical/mod.rs @@ -24,7 +24,7 @@ pub use instantiate::CanonicalExt; use rustc_index::IndexVec; pub use rustc_middle::infer::canonical::*; -use rustc_middle::ty::{self, GenericArg, List, Ty, TyCtxt, TypeFoldable}; +use rustc_middle::ty::{self, GenericArg, Ty, TyCtxt, TypeFoldable}; use rustc_span::Span; use crate::infer::{InferCtxt, RegionVariableOrigin}; @@ -67,30 +67,12 @@ impl<'tcx> InferCtxt<'tcx> { .chain((1..=canonical.max_universe.as_u32()).map(|_| self.create_next_universe())) .collect(); - let canonical_inference_vars = - self.instantiate_canonical_vars(span, canonical.variables, |ui| universes[ui]); - let result = canonical.instantiate(self.tcx, &canonical_inference_vars); - (result, canonical_inference_vars) - } - - /// Given the "infos" about the canonical variables from some - /// canonical, creates fresh variables with the same - /// characteristics (see `instantiate_canonical_var` for - /// details). You can then use `instantiate` to instantiate the - /// canonical variable with these inference variables. - fn instantiate_canonical_vars( - &self, - span: Span, - variables: &List<CanonicalVarKind<'tcx>>, - universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex, - ) -> CanonicalVarValues<'tcx> { - CanonicalVarValues { - var_values: self.tcx.mk_args_from_iter( - variables - .iter() - .map(|kind| self.instantiate_canonical_var(span, kind, &universe_map)), - ), - } + let var_values = + CanonicalVarValues::instantiate(self.tcx, &canonical.variables, |var_values, info| { + self.instantiate_canonical_var(span, info, &var_values, |ui| universes[ui]) + }); + let result = canonical.instantiate(self.tcx, &var_values); + (result, var_values) } /// Given the "info" about a canonical variable, creates a fresh @@ -105,21 +87,27 @@ impl<'tcx> InferCtxt<'tcx> { &self, span: Span, kind: CanonicalVarKind<'tcx>, + previous_var_values: &[GenericArg<'tcx>], universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex, ) -> GenericArg<'tcx> { match kind { - CanonicalVarKind::Ty(ty_kind) => { - let ty = match ty_kind { - CanonicalTyVarKind::General(ui) => { - self.next_ty_var_in_universe(span, universe_map(ui)) + CanonicalVarKind::Ty { ui, sub_root } => { + let vid = self.next_ty_vid_in_universe(span, universe_map(ui)); + // If this inference variable is related to an earlier variable + // via subtyping, we need to add that info to the inference context. + if let Some(prev) = previous_var_values.get(sub_root.as_usize()) { + if let &ty::Infer(ty::TyVar(sub_root)) = prev.expect_ty().kind() { + self.sub_unify_ty_vids_raw(vid, sub_root); + } else { + unreachable!() } + } + Ty::new_var(self.tcx, vid).into() + } - CanonicalTyVarKind::Int => self.next_int_var(), + CanonicalVarKind::Int => self.next_int_var().into(), - CanonicalTyVarKind::Float => self.next_float_var(), - }; - ty.into() - } + CanonicalVarKind::Float => self.next_float_var().into(), CanonicalVarKind::PlaceholderTy(ty::PlaceholderType { universe, bound }) => { let universe_mapped = universe_map(universe); diff --git a/compiler/rustc_infer/src/infer/canonical/query_response.rs b/compiler/rustc_infer/src/infer/canonical/query_response.rs index 09578598114..5d1b4be9e57 100644 --- a/compiler/rustc_infer/src/infer/canonical/query_response.rs +++ b/compiler/rustc_infer/src/infer/canonical/query_response.rs @@ -13,6 +13,7 @@ use std::iter; use rustc_index::{Idx, IndexVec}; use rustc_middle::arena::ArenaAllocatable; use rustc_middle::bug; +use rustc_middle::infer::canonical::CanonicalVarKind; use rustc_middle::ty::{self, BoundVar, GenericArg, GenericArgKind, Ty, TyCtxt, TypeFoldable}; use tracing::{debug, instrument}; @@ -413,26 +414,27 @@ impl<'tcx> InferCtxt<'tcx> { let mut opt_values: IndexVec<BoundVar, Option<GenericArg<'tcx>>> = IndexVec::from_elem_n(None, query_response.variables.len()); - // In terms of our example above, we are iterating over pairs like: - // [(?A, Vec<?0>), ('static, '?1), (?B, ?0)] for (original_value, result_value) in iter::zip(&original_values.var_values, result_values) { match result_value.kind() { GenericArgKind::Type(result_value) => { - // e.g., here `result_value` might be `?0` in the example above... - if let ty::Bound(debruijn, b) = *result_value.kind() { - // ...in which case we would set `canonical_vars[0]` to `Some(?U)`. - + // We disable the instantiation guess for inference variables + // and only use it for placeholders. We need to handle the + // `sub_root` of type inference variables which would make this + // more involved. They are also a lot rarer than region variables. + if let ty::Bound(debruijn, b) = *result_value.kind() + && !matches!( + query_response.variables[b.var.as_usize()], + CanonicalVarKind::Ty { .. } + ) + { // We only allow a `ty::INNERMOST` index in generic parameters. assert_eq!(debruijn, ty::INNERMOST); opt_values[b.var] = Some(*original_value); } } GenericArgKind::Lifetime(result_value) => { - // e.g., here `result_value` might be `'?1` in the example above... if let ty::ReBound(debruijn, b) = result_value.kind() { - // ... in which case we would set `canonical_vars[0]` to `Some('static)`. - // We only allow a `ty::INNERMOST` index in generic parameters. assert_eq!(debruijn, ty::INNERMOST); opt_values[b.var] = Some(*original_value); @@ -440,8 +442,6 @@ impl<'tcx> InferCtxt<'tcx> { } GenericArgKind::Const(result_value) => { if let ty::ConstKind::Bound(debruijn, b) = result_value.kind() { - // ...in which case we would set `canonical_vars[0]` to `Some(const X)`. - // We only allow a `ty::INNERMOST` index in generic parameters. assert_eq!(debruijn, ty::INNERMOST); opt_values[b.var] = Some(*original_value); @@ -453,39 +453,36 @@ impl<'tcx> InferCtxt<'tcx> { // Create result arguments: if we found a value for a // given variable in the loop above, use that. Otherwise, use // a fresh inference variable. - let result_args = CanonicalVarValues { - var_values: self.tcx.mk_args_from_iter( - query_response.variables.iter().enumerate().map(|(index, var_kind)| { - if var_kind.universe() != ty::UniverseIndex::ROOT { - // A variable from inside a binder of the query. While ideally these shouldn't - // exist at all, we have to deal with them for now. - self.instantiate_canonical_var(cause.span, var_kind, |u| { - universe_map[u.as_usize()] - }) - } else if var_kind.is_existential() { - match opt_values[BoundVar::new(index)] { - Some(k) => k, - None => self.instantiate_canonical_var(cause.span, var_kind, |u| { - universe_map[u.as_usize()] - }), - } - } else { - // For placeholders which were already part of the input, we simply map this - // universal bound variable back the placeholder of the input. - opt_values[BoundVar::new(index)].expect( - "expected placeholder to be unified with itself during response", - ) - } - }), - ), - }; + let tcx = self.tcx; + let variables = query_response.variables; + let var_values = CanonicalVarValues::instantiate(tcx, variables, |var_values, kind| { + if kind.universe() != ty::UniverseIndex::ROOT { + // A variable from inside a binder of the query. While ideally these shouldn't + // exist at all, we have to deal with them for now. + self.instantiate_canonical_var(cause.span, kind, &var_values, |u| { + universe_map[u.as_usize()] + }) + } else if kind.is_existential() { + match opt_values[BoundVar::new(var_values.len())] { + Some(k) => k, + None => self.instantiate_canonical_var(cause.span, kind, &var_values, |u| { + universe_map[u.as_usize()] + }), + } + } else { + // For placeholders which were already part of the input, we simply map this + // universal bound variable back the placeholder of the input. + opt_values[BoundVar::new(var_values.len())] + .expect("expected placeholder to be unified with itself during response") + } + }); let mut obligations = PredicateObligations::new(); // Carry all newly resolved opaque types to the caller's scope for &(a, b) in &query_response.value.opaque_types { - let a = instantiate_value(self.tcx, &result_args, a); - let b = instantiate_value(self.tcx, &result_args, b); + let a = instantiate_value(self.tcx, &var_values, a); + let b = instantiate_value(self.tcx, &var_values, b); debug!(?a, ?b, "constrain opaque type"); // We use equate here instead of, for example, just registering the // opaque type's hidden value directly, because the hidden type may have been an inference @@ -502,7 +499,7 @@ impl<'tcx> InferCtxt<'tcx> { ); } - Ok(InferOk { value: result_args, obligations }) + Ok(InferOk { value: var_values, obligations }) } /// Given a "guess" at the values for the canonical variables in diff --git a/compiler/rustc_infer/src/infer/context.rs b/compiler/rustc_infer/src/infer/context.rs index bb9c8850093..14cc590720a 100644 --- a/compiler/rustc_infer/src/infer/context.rs +++ b/compiler/rustc_infer/src/infer/context.rs @@ -1,4 +1,4 @@ -///! Definition of `InferCtxtLike` from the librarified type layer. +//! Definition of `InferCtxtLike` from the librarified type layer. use rustc_hir::def_id::DefId; use rustc_middle::traits::ObligationCause; use rustc_middle::ty::relate::RelateResult; @@ -59,6 +59,10 @@ impl<'tcx> rustc_type_ir::InferCtxtLike for InferCtxt<'tcx> { self.root_var(var) } + fn sub_unification_table_root_var(&self, var: ty::TyVid) -> ty::TyVid { + self.sub_unification_table_root_var(var) + } + fn root_const_var(&self, var: ty::ConstVid) -> ty::ConstVid { self.root_const_var(var) } @@ -179,6 +183,10 @@ impl<'tcx> rustc_type_ir::InferCtxtLike for InferCtxt<'tcx> { self.inner.borrow_mut().type_variables().equate(a, b); } + fn sub_unify_ty_vids_raw(&self, a: ty::TyVid, b: ty::TyVid) { + self.sub_unify_ty_vids_raw(a, b); + } + fn equate_int_vids_raw(&self, a: ty::IntVid, b: ty::IntVid) { self.inner.borrow_mut().int_unification_table().union(a, b); } diff --git a/compiler/rustc_infer/src/infer/mod.rs b/compiler/rustc_infer/src/infer/mod.rs index d105d24bed7..9d3886aff1c 100644 --- a/compiler/rustc_infer/src/infer/mod.rs +++ b/compiler/rustc_infer/src/infer/mod.rs @@ -764,6 +764,7 @@ impl<'tcx> InferCtxt<'tcx> { let r_b = self.shallow_resolve(predicate.skip_binder().b); match (r_a.kind(), r_b.kind()) { (&ty::Infer(ty::TyVar(a_vid)), &ty::Infer(ty::TyVar(b_vid))) => { + self.sub_unify_ty_vids_raw(a_vid, b_vid); return Err((a_vid, b_vid)); } _ => {} @@ -1128,6 +1129,14 @@ impl<'tcx> InferCtxt<'tcx> { self.inner.borrow_mut().type_variables().root_var(var) } + pub fn sub_unify_ty_vids_raw(&self, a: ty::TyVid, b: ty::TyVid) { + self.inner.borrow_mut().type_variables().sub_unify(a, b); + } + + pub fn sub_unification_table_root_var(&self, var: ty::TyVid) -> ty::TyVid { + self.inner.borrow_mut().type_variables().sub_unification_table_root_var(var) + } + pub fn root_const_var(&self, var: ty::ConstVid) -> ty::ConstVid { self.inner.borrow_mut().const_unification_table().find(var).vid } diff --git a/compiler/rustc_infer/src/infer/outlives/test_type_match.rs b/compiler/rustc_infer/src/infer/outlives/test_type_match.rs index bfdd282d7e1..f06eb58a371 100644 --- a/compiler/rustc_infer/src/infer/outlives/test_type_match.rs +++ b/compiler/rustc_infer/src/infer/outlives/test_type_match.rs @@ -76,7 +76,7 @@ pub(super) fn can_match_erased_ty<'tcx>( erased_ty: Ty<'tcx>, ) -> bool { assert!(!outlives_predicate.has_escaping_bound_vars()); - let erased_outlives_predicate = tcx.erase_regions(outlives_predicate); + let erased_outlives_predicate = tcx.erase_and_anonymize_regions(outlives_predicate); let outlives_ty = erased_outlives_predicate.skip_binder().0; if outlives_ty == erased_ty { // pointless micro-optimization diff --git a/compiler/rustc_infer/src/infer/outlives/verify.rs b/compiler/rustc_infer/src/infer/outlives/verify.rs index 69feecfe30a..f67b99cb3f8 100644 --- a/compiler/rustc_infer/src/infer/outlives/verify.rs +++ b/compiler/rustc_infer/src/infer/outlives/verify.rs @@ -96,7 +96,7 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> { &self, alias_ty: ty::AliasTy<'tcx>, ) -> Vec<ty::PolyTypeOutlivesPredicate<'tcx>> { - let erased_alias_ty = self.tcx.erase_regions(alias_ty.to_ty(self.tcx)); + let erased_alias_ty = self.tcx.erase_and_anonymize_regions(alias_ty.to_ty(self.tcx)); self.declared_generic_bounds_from_env_for_erased_ty(erased_alias_ty) } @@ -241,7 +241,7 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> { } let p_ty = p.to_ty(tcx); - let erased_p_ty = self.tcx.erase_regions(p_ty); + let erased_p_ty = self.tcx.erase_and_anonymize_regions(p_ty); (erased_p_ty == erased_ty).then_some(ty::Binder::dummy(ty::OutlivesPredicate(p_ty, r))) })); diff --git a/compiler/rustc_infer/src/infer/relate/generalize.rs b/compiler/rustc_infer/src/infer/relate/generalize.rs index a000bb1123c..cc41957c110 100644 --- a/compiler/rustc_infer/src/infer/relate/generalize.rs +++ b/compiler/rustc_infer/src/infer/relate/generalize.rs @@ -6,8 +6,8 @@ use rustc_hir::def_id::DefId; use rustc_middle::bug; use rustc_middle::ty::error::TypeError; use rustc_middle::ty::{ - self, AliasRelationDirection, InferConst, MaxUniverse, Term, Ty, TyCtxt, TypeVisitable, - TypeVisitableExt, TypingMode, + self, AliasRelationDirection, InferConst, Term, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, + TypeVisitableExt, TypeVisitor, TypingMode, }; use rustc_span::Span; use tracing::{debug, instrument, warn}; @@ -290,6 +290,45 @@ impl<'tcx> InferCtxt<'tcx> { } } +/// Finds the max universe present +struct MaxUniverse { + max_universe: ty::UniverseIndex, +} + +impl MaxUniverse { + fn new() -> Self { + MaxUniverse { max_universe: ty::UniverseIndex::ROOT } + } + + fn max_universe(self) -> ty::UniverseIndex { + self.max_universe + } +} + +impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for MaxUniverse { + fn visit_ty(&mut self, t: Ty<'tcx>) { + if let ty::Placeholder(placeholder) = t.kind() { + self.max_universe = self.max_universe.max(placeholder.universe); + } + + t.super_visit_with(self) + } + + fn visit_const(&mut self, c: ty::Const<'tcx>) { + if let ty::ConstKind::Placeholder(placeholder) = c.kind() { + self.max_universe = self.max_universe.max(placeholder.universe); + } + + c.super_visit_with(self) + } + + fn visit_region(&mut self, r: ty::Region<'tcx>) { + if let ty::RePlaceholder(placeholder) = r.kind() { + self.max_universe = self.max_universe.max(placeholder.universe); + } + } +} + /// The "generalizer" is used when handling inference variables. /// /// The basic strategy for handling a constraint like `?A <: B` is to @@ -519,6 +558,10 @@ impl<'tcx> TypeRelation<TyCtxt<'tcx>> for Generalizer<'_, 'tcx> { let origin = inner.type_variables().var_origin(vid); let new_var_id = inner.type_variables().new_var(self.for_universe, origin); + // Record that `vid` and `new_var_id` have to be subtypes + // of each other. This is currently only used for diagnostics. + // To see why, see the docs in the `type_variables` module. + inner.type_variables().sub_unify(vid, new_var_id); // If we're in the new solver and create a new inference // variable inside of an alias we eagerly constrain that // inference variable to prevent unexpected ambiguity errors. diff --git a/compiler/rustc_infer/src/infer/snapshot/undo_log.rs b/compiler/rustc_infer/src/infer/snapshot/undo_log.rs index fcc0ab3af41..22c815fb87c 100644 --- a/compiler/rustc_infer/src/infer/snapshot/undo_log.rs +++ b/compiler/rustc_infer/src/infer/snapshot/undo_log.rs @@ -20,7 +20,7 @@ pub struct Snapshot<'tcx> { pub(crate) enum UndoLog<'tcx> { DuplicateOpaqueType, OpaqueTypes(OpaqueTypeKey<'tcx>, Option<OpaqueHiddenType<'tcx>>), - TypeVariables(sv::UndoLog<ut::Delegate<type_variable::TyVidEqKey<'tcx>>>), + TypeVariables(type_variable::UndoLog<'tcx>), ConstUnificationTable(sv::UndoLog<ut::Delegate<ConstVidKey<'tcx>>>), IntUnificationTable(sv::UndoLog<ut::Delegate<ty::IntVid>>), FloatUnificationTable(sv::UndoLog<ut::Delegate<ty::FloatVid>>), @@ -49,6 +49,8 @@ impl_from! { RegionConstraintCollector(region_constraints::UndoLog<'tcx>), TypeVariables(sv::UndoLog<ut::Delegate<type_variable::TyVidEqKey<'tcx>>>), + TypeVariables(sv::UndoLog<ut::Delegate<type_variable::TyVidSubKey>>), + TypeVariables(type_variable::UndoLog<'tcx>), IntUnificationTable(sv::UndoLog<ut::Delegate<ty::IntVid>>), FloatUnificationTable(sv::UndoLog<ut::Delegate<ty::FloatVid>>), diff --git a/compiler/rustc_infer/src/infer/type_variable.rs b/compiler/rustc_infer/src/infer/type_variable.rs index 6f6791804d3..65f77fe8e25 100644 --- a/compiler/rustc_infer/src/infer/type_variable.rs +++ b/compiler/rustc_infer/src/infer/type_variable.rs @@ -13,12 +13,48 @@ use tracing::debug; use crate::infer::InferCtxtUndoLogs; +/// Represents a single undo-able action that affects a type inference variable. +#[derive(Clone)] +pub(crate) enum UndoLog<'tcx> { + EqRelation(sv::UndoLog<ut::Delegate<TyVidEqKey<'tcx>>>), + SubRelation(sv::UndoLog<ut::Delegate<TyVidSubKey>>), +} + +/// Convert from a specific kind of undo to the more general UndoLog +impl<'tcx> From<sv::UndoLog<ut::Delegate<TyVidEqKey<'tcx>>>> for UndoLog<'tcx> { + fn from(l: sv::UndoLog<ut::Delegate<TyVidEqKey<'tcx>>>) -> Self { + UndoLog::EqRelation(l) + } +} + +/// Convert from a specific kind of undo to the more general UndoLog +impl<'tcx> From<sv::UndoLog<ut::Delegate<TyVidSubKey>>> for UndoLog<'tcx> { + fn from(l: sv::UndoLog<ut::Delegate<TyVidSubKey>>) -> Self { + UndoLog::SubRelation(l) + } +} + impl<'tcx> Rollback<sv::UndoLog<ut::Delegate<TyVidEqKey<'tcx>>>> for TypeVariableStorage<'tcx> { fn reverse(&mut self, undo: sv::UndoLog<ut::Delegate<TyVidEqKey<'tcx>>>) { self.eq_relations.reverse(undo) } } +impl<'tcx> Rollback<sv::UndoLog<ut::Delegate<TyVidSubKey>>> for TypeVariableStorage<'tcx> { + fn reverse(&mut self, undo: sv::UndoLog<ut::Delegate<TyVidSubKey>>) { + self.sub_unification_table.reverse(undo) + } +} + +impl<'tcx> Rollback<UndoLog<'tcx>> for TypeVariableStorage<'tcx> { + fn reverse(&mut self, undo: UndoLog<'tcx>) { + match undo { + UndoLog::EqRelation(undo) => self.eq_relations.reverse(undo), + UndoLog::SubRelation(undo) => self.sub_unification_table.reverse(undo), + } + } +} + #[derive(Clone, Default)] pub(crate) struct TypeVariableStorage<'tcx> { /// The origins of each type variable. @@ -27,6 +63,25 @@ pub(crate) struct TypeVariableStorage<'tcx> { /// constraint `?X == ?Y`. This table also stores, for each key, /// the known value. eq_relations: ut::UnificationTableStorage<TyVidEqKey<'tcx>>, + /// Only used by `-Znext-solver` and for diagnostics. Tracks whether + /// type variables are related via subtyping at all, ignoring which of + /// the two is the subtype. + /// + /// When reporting ambiguity errors, we sometimes want to + /// treat all inference vars which are subtypes of each + /// others as if they are equal. For this case we compute + /// the transitive closure of our subtype obligations here. + /// + /// E.g. when encountering ambiguity errors, we want to suggest + /// specifying some method argument or to add a type annotation + /// to a local variable. Because subtyping cannot change the + /// shape of a type, it's fine if the cause of the ambiguity error + /// is only related to the suggested variable via subtyping. + /// + /// Even for something like `let x = returns_arg(); x.method();` the + /// type of `x` is only a supertype of the argument of `returns_arg`. We + /// still want to suggest specifying the type of the argument. + sub_unification_table: ut::UnificationTableStorage<TyVidSubKey>, } pub(crate) struct TypeVariableTable<'a, 'tcx> { @@ -102,13 +157,24 @@ impl<'tcx> TypeVariableTable<'_, 'tcx> { self.storage.values[vid].origin } - /// Records that `a == b`, depending on `dir`. + /// Records that `a == b`. /// /// Precondition: neither `a` nor `b` are known. pub(crate) fn equate(&mut self, a: ty::TyVid, b: ty::TyVid) { debug_assert!(self.probe(a).is_unknown()); debug_assert!(self.probe(b).is_unknown()); self.eq_relations().union(a, b); + self.sub_unification_table().union(a, b); + } + + /// Records that `a` and `b` are related via subtyping. We don't track + /// which of the two is the subtype. + /// + /// Precondition: neither `a` nor `b` are known. + pub(crate) fn sub_unify(&mut self, a: ty::TyVid, b: ty::TyVid) { + debug_assert!(self.probe(a).is_unknown()); + debug_assert!(self.probe(b).is_unknown()); + self.sub_unification_table().union(a, b); } /// Instantiates `vid` with the type `ty`. @@ -142,6 +208,10 @@ impl<'tcx> TypeVariableTable<'_, 'tcx> { origin: TypeVariableOrigin, ) -> ty::TyVid { let eq_key = self.eq_relations().new_key(TypeVariableValue::Unknown { universe }); + + let sub_key = self.sub_unification_table().new_key(()); + debug_assert_eq!(eq_key.vid, sub_key.vid); + let index = self.storage.values.push(TypeVariableData { origin }); debug_assert_eq!(eq_key.vid, index); @@ -164,6 +234,18 @@ impl<'tcx> TypeVariableTable<'_, 'tcx> { self.eq_relations().find(vid).vid } + /// Returns the "root" variable of `vid` in the `sub_unification_table` + /// equivalence table. All type variables that have been are related via + /// equality or subtyping will yield the same root variable (per the + /// union-find algorithm), so `sub_unification_table_root_var(a) + /// == sub_unification_table_root_var(b)` implies that: + /// ```text + /// exists X. (a <: X || X <: a) && (b <: X || X <: b) + /// ``` + pub(crate) fn sub_unification_table_root_var(&mut self, vid: ty::TyVid) -> ty::TyVid { + self.sub_unification_table().find(vid).vid + } + /// Retrieves the type to which `vid` has been instantiated, if /// any. pub(crate) fn probe(&mut self, vid: ty::TyVid) -> TypeVariableValue<'tcx> { @@ -181,6 +263,11 @@ impl<'tcx> TypeVariableTable<'_, 'tcx> { self.storage.eq_relations.with_log(self.undo_log) } + #[inline] + fn sub_unification_table(&mut self) -> super::UnificationTable<'_, 'tcx, TyVidSubKey> { + self.storage.sub_unification_table.with_log(self.undo_log) + } + /// Returns a range of the type variables created during the snapshot. pub(crate) fn vars_since_snapshot( &mut self, @@ -243,6 +330,33 @@ impl<'tcx> ut::UnifyKey for TyVidEqKey<'tcx> { } } +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub(crate) struct TyVidSubKey { + vid: ty::TyVid, +} + +impl From<ty::TyVid> for TyVidSubKey { + #[inline] // make this function eligible for inlining - it is quite hot. + fn from(vid: ty::TyVid) -> Self { + TyVidSubKey { vid } + } +} + +impl ut::UnifyKey for TyVidSubKey { + type Value = (); + #[inline] + fn index(&self) -> u32 { + self.vid.as_u32() + } + #[inline] + fn from_index(i: u32) -> TyVidSubKey { + TyVidSubKey { vid: ty::TyVid::from_u32(i) } + } + fn tag() -> &'static str { + "TyVidSubKey" + } +} + impl<'tcx> ut::UnifyValue for TypeVariableValue<'tcx> { type Error = ut::NoError; diff --git a/compiler/rustc_interface/messages.ftl b/compiler/rustc_interface/messages.ftl index 4b9c71d7172..d83b18cbc2d 100644 --- a/compiler/rustc_interface/messages.ftl +++ b/compiler/rustc_interface/messages.ftl @@ -30,10 +30,6 @@ interface_ignoring_out_dir = ignoring --out-dir flag due to -o flag interface_input_file_would_be_overwritten = the input file "{$path}" would be overwritten by the generated executable -interface_limit_invalid = - `limit` must be a non-negative integer - .label = {$error_str} - interface_mixed_bin_crate = cannot mix `bin` crate type with others diff --git a/compiler/rustc_interface/src/errors.rs b/compiler/rustc_interface/src/errors.rs index 6b39b4f1891..d1082eaf617 100644 --- a/compiler/rustc_interface/src/errors.rs +++ b/compiler/rustc_interface/src/errors.rs @@ -108,13 +108,3 @@ pub(crate) struct AbiRequiredTargetFeature<'a> { pub feature: &'a str, pub enabled: &'a str, } - -#[derive(Diagnostic)] -#[diag(interface_limit_invalid)] -pub(crate) struct LimitInvalid<'a> { - #[primary_span] - pub span: Span, - #[label] - pub value_span: Span, - pub error_str: &'a str, -} diff --git a/compiler/rustc_interface/src/interface.rs b/compiler/rustc_interface/src/interface.rs index 4c820b8877b..b52c5b4cd66 100644 --- a/compiler/rustc_interface/src/interface.rs +++ b/compiler/rustc_interface/src/interface.rs @@ -13,7 +13,8 @@ use rustc_lint::LintStore; use rustc_middle::ty; use rustc_middle::ty::CurrentGcx; use rustc_middle::util::Providers; -use rustc_parse::new_parser_from_simple_source_str; +use rustc_parse::lexer::StripTokens; +use rustc_parse::new_parser_from_source_str; use rustc_parse::parser::attr::AllowLeadingUnsafe; use rustc_query_impl::QueryCtxt; use rustc_query_system::query::print_query_stack; @@ -68,7 +69,8 @@ pub(crate) fn parse_cfg(dcx: DiagCtxtHandle<'_>, cfgs: Vec<String>) -> Cfg { }; } - match new_parser_from_simple_source_str(&psess, filename, s.to_string()) { + match new_parser_from_source_str(&psess, filename, s.to_string(), StripTokens::Nothing) + { Ok(mut parser) => match parser.parse_meta_item(AllowLeadingUnsafe::No) { Ok(meta_item) if parser.token == token::Eof => { if meta_item.path.segments.len() != 1 { @@ -166,13 +168,15 @@ pub(crate) fn parse_check_cfg(dcx: DiagCtxtHandle<'_>, specs: Vec<String>) -> Ch error!("expected `cfg(name, values(\"value1\", \"value2\", ... \"valueN\"))`") }; - let mut parser = match new_parser_from_simple_source_str(&psess, filename, s.to_string()) { - Ok(parser) => parser, - Err(errs) => { - errs.into_iter().for_each(|err| err.cancel()); - expected_error(); - } - }; + let mut parser = + match new_parser_from_source_str(&psess, filename, s.to_string(), StripTokens::Nothing) + { + Ok(parser) => parser, + Err(errs) => { + errs.into_iter().for_each(|err| err.cancel()); + expected_error(); + } + }; let meta_item = match parser.parse_meta_item(AllowLeadingUnsafe::No) { Ok(meta_item) if parser.token == token::Eof => meta_item, diff --git a/compiler/rustc_interface/src/limits.rs b/compiler/rustc_interface/src/limits.rs index 8f01edec09f..10e58f32256 100644 --- a/compiler/rustc_interface/src/limits.rs +++ b/compiler/rustc_interface/src/limits.rs @@ -8,78 +8,32 @@ //! Users can override these limits via an attribute on the crate like //! `#![recursion_limit="22"]`. This pass just looks for those attributes. -use std::num::IntErrorKind; - -use rustc_ast::attr::AttributeExt; -use rustc_middle::bug; +use rustc_hir::attrs::AttributeKind; +use rustc_hir::limit::Limit; +use rustc_hir::{Attribute, find_attr}; use rustc_middle::query::Providers; -use rustc_session::{Limit, Limits, Session}; -use rustc_span::{Symbol, sym}; - -use crate::errors::LimitInvalid; +use rustc_session::Limits; pub(crate) fn provide(providers: &mut Providers) { - providers.limits = |tcx, ()| Limits { - recursion_limit: get_recursion_limit(tcx.hir_krate_attrs(), tcx.sess), - move_size_limit: get_limit( - tcx.hir_krate_attrs(), - tcx.sess, - sym::move_size_limit, - Limit::new(tcx.sess.opts.unstable_opts.move_size_limit.unwrap_or(0)), - ), - type_length_limit: get_limit( - tcx.hir_krate_attrs(), - tcx.sess, - sym::type_length_limit, - Limit::new(2usize.pow(24)), - ), - pattern_complexity_limit: get_limit( - tcx.hir_krate_attrs(), - tcx.sess, - sym::pattern_complexity_limit, - Limit::unlimited(), - ), + providers.limits = |tcx, ()| { + let attrs = tcx.hir_krate_attrs(); + Limits { + recursion_limit: get_recursion_limit(tcx.hir_krate_attrs()), + move_size_limit: + find_attr!(attrs, AttributeKind::MoveSizeLimit { limit, .. } => *limit) + .unwrap_or(Limit::new(tcx.sess.opts.unstable_opts.move_size_limit.unwrap_or(0))), + type_length_limit: + find_attr!(attrs, AttributeKind::TypeLengthLimit { limit, .. } => *limit) + .unwrap_or(Limit::new(2usize.pow(24))), + pattern_complexity_limit: + find_attr!(attrs, AttributeKind::PatternComplexityLimit { limit, .. } => *limit) + .unwrap_or(Limit::unlimited()), + } } } // This one is separate because it must be read prior to macro expansion. -pub(crate) fn get_recursion_limit(krate_attrs: &[impl AttributeExt], sess: &Session) -> Limit { - get_limit(krate_attrs, sess, sym::recursion_limit, Limit::new(128)) -} - -fn get_limit( - krate_attrs: &[impl AttributeExt], - sess: &Session, - name: Symbol, - default: Limit, -) -> Limit { - for attr in krate_attrs { - if !attr.has_name(name) { - continue; - } - - if let Some(sym) = attr.value_str() { - match sym.as_str().parse() { - Ok(n) => return Limit::new(n), - Err(e) => { - let error_str = match e.kind() { - IntErrorKind::PosOverflow => "`limit` is too large", - IntErrorKind::Empty => "`limit` must be a non-negative integer", - IntErrorKind::InvalidDigit => "not a valid integer", - IntErrorKind::NegOverflow => { - bug!("`limit` should never negatively overflow") - } - IntErrorKind::Zero => bug!("zero is a valid `limit`"), - kind => bug!("unimplemented IntErrorKind variant: {:?}", kind), - }; - sess.dcx().emit_err(LimitInvalid { - span: attr.span(), - value_span: attr.value_span().unwrap(), - error_str, - }); - } - } - } - } - default +pub(crate) fn get_recursion_limit(attrs: &[Attribute]) -> Limit { + find_attr!(attrs, AttributeKind::RecursionLimit { limit, .. } => *limit) + .unwrap_or(Limit::new(128)) } diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs index ca8c10311fb..d39219bfd66 100644 --- a/compiler/rustc_interface/src/passes.rs +++ b/compiler/rustc_interface/src/passes.rs @@ -6,7 +6,7 @@ use std::sync::{Arc, LazyLock, OnceLock}; use std::{env, fs, iter}; use rustc_ast as ast; -use rustc_attr_parsing::{AttributeParser, ShouldEmit, validate_attr}; +use rustc_attr_parsing::{AttributeParser, ShouldEmit}; use rustc_codegen_ssa::traits::CodegenBackend; use rustc_data_structures::jobserver::Proxy; use rustc_data_structures::steal::Steal; @@ -19,6 +19,7 @@ use rustc_fs_util::try_canonicalize; use rustc_hir::attrs::AttributeKind; use rustc_hir::def_id::{LOCAL_CRATE, StableCrateId, StableCrateIdMap}; use rustc_hir::definitions::Definitions; +use rustc_hir::limit::Limit; use rustc_incremental::setup_dep_graph; use rustc_lint::{BufferedEarlyLint, EarlyCheckNode, LintStore, unerased_lint_store}; use rustc_metadata::EncodedMetadata; @@ -27,15 +28,16 @@ use rustc_middle::arena::Arena; use rustc_middle::dep_graph::DepsType; use rustc_middle::ty::{self, CurrentGcx, GlobalCtxt, RegisteredTools, TyCtxt}; use rustc_middle::util::Providers; +use rustc_parse::lexer::StripTokens; use rustc_parse::{new_parser_from_file, new_parser_from_source_str, unwrap_or_emit_fatal}; use rustc_passes::{abi_test, input_stats, layout_test}; use rustc_resolve::{Resolver, ResolverOutputs}; +use rustc_session::Session; use rustc_session::config::{CrateType, Input, OutFileName, OutputFilenames, OutputType}; use rustc_session::cstore::Untracked; use rustc_session::output::{collect_crate_types, filename_for_input}; use rustc_session::parse::feature_err; use rustc_session::search_paths::PathKind; -use rustc_session::{Limit, Session}; use rustc_span::{ DUMMY_SP, ErrorGuaranteed, ExpnKind, FileName, SourceFileHash, SourceFileHashAlgorithm, Span, Symbol, sym, @@ -51,10 +53,18 @@ pub fn parse<'a>(sess: &'a Session) -> ast::Crate { let mut krate = sess .time("parse_crate", || { let mut parser = unwrap_or_emit_fatal(match &sess.io.input { - Input::File(file) => new_parser_from_file(&sess.psess, file, None), - Input::Str { input, name } => { - new_parser_from_source_str(&sess.psess, name.clone(), input.clone()) - } + Input::File(file) => new_parser_from_file( + &sess.psess, + file, + StripTokens::ShebangAndFrontmatter, + None, + ), + Input::Str { input, name } => new_parser_from_source_str( + &sess.psess, + name.clone(), + input.clone(), + StripTokens::ShebangAndFrontmatter, + ), }); parser.parse_crate_mod() }) @@ -1246,7 +1256,8 @@ pub fn get_crate_name(sess: &Session, krate_attrs: &[ast::Attribute]) -> Symbol // in all code paths that require the crate name very early on, namely before // macro expansion. - let attr_crate_name = parse_crate_name(sess, krate_attrs, ShouldEmit::EarlyFatal); + let attr_crate_name = + parse_crate_name(sess, krate_attrs, ShouldEmit::EarlyFatal { also_emit_lints: true }); let validate = |name, span| { rustc_session::output::validate_crate_name(sess, name, span); @@ -1307,36 +1318,18 @@ pub(crate) fn parse_crate_name( } fn get_recursion_limit(krate_attrs: &[ast::Attribute], sess: &Session) -> Limit { - // We don't permit macro calls inside of the attribute (e.g., #![recursion_limit = `expand!()`]) - // because that would require expanding this while in the middle of expansion, which needs to - // know the limit before expanding. - let _ = validate_and_find_value_str_builtin_attr(sym::recursion_limit, sess, krate_attrs); - crate::limits::get_recursion_limit(krate_attrs, sess) -} - -/// Validate *all* occurrences of the given "[value-str]" built-in attribute and return the first find. -/// -/// This validator is intended for built-in attributes whose value needs to be known very early -/// during compilation (namely, before macro expansion) and it mainly exists to reject macro calls -/// inside of the attributes, such as in `#![name = expand!()]`. Normal attribute validation happens -/// during semantic analysis via [`TyCtxt::check_mod_attrs`] which happens *after* macro expansion -/// when such macro calls (here: `expand`) have already been expanded and we can no longer check for -/// their presence. -/// -/// [value-str]: ast::Attribute::value_str -fn validate_and_find_value_str_builtin_attr( - name: Symbol, - sess: &Session, - krate_attrs: &[ast::Attribute], -) -> Option<(Symbol, Span)> { - let mut result = None; - // Validate *all* relevant attributes, not just the first occurrence. - for attr in ast::attr::filter_by_name(krate_attrs, name) { - let Some(value) = attr.value_str() else { - validate_attr::emit_fatal_malformed_builtin_attribute(&sess.psess, attr, name) - }; - // Choose the first occurrence as our result. - result.get_or_insert((value, attr.span)); - } - result + let attr = AttributeParser::parse_limited_should_emit( + sess, + &krate_attrs, + sym::recursion_limit, + DUMMY_SP, + rustc_ast::node_id::CRATE_NODE_ID, + None, + // errors are fatal here, but lints aren't. + // If things aren't fatal we continue, and will parse this again. + // That makes the same lint trigger again. + // So, no lints here to avoid duplicates. + ShouldEmit::EarlyFatal { also_emit_lints: false }, + ); + crate::limits::get_recursion_limit(attr.as_slice()) } diff --git a/compiler/rustc_lint/Cargo.toml b/compiler/rustc_lint/Cargo.toml index 7718f16984d..3a50aac50cb 100644 --- a/compiler/rustc_lint/Cargo.toml +++ b/compiler/rustc_lint/Cargo.toml @@ -5,6 +5,7 @@ edition = "2024" [dependencies] # tidy-alphabetical-start +bitflags = "2.4.1" rustc_abi = { path = "../rustc_abi" } rustc_ast = { path = "../rustc_ast" } rustc_ast_pretty = { path = "../rustc_ast_pretty" } diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs index c3c0a34df71..75a0f89321b 100644 --- a/compiler/rustc_lint/src/builtin.rs +++ b/compiler/rustc_lint/src/builtin.rs @@ -29,12 +29,12 @@ use rustc_hir::attrs::AttributeKind; use rustc_hir::def::{DefKind, Res}; use rustc_hir::def_id::{CRATE_DEF_ID, DefId, LocalDefId}; use rustc_hir::intravisit::FnKind as HirFnKind; -use rustc_hir::{Body, FnDecl, PatKind, PredicateOrigin, find_attr}; +use rustc_hir::{Body, FnDecl, ImplItemImplKind, PatKind, PredicateOrigin, find_attr}; use rustc_middle::bug; use rustc_middle::lint::LevelAndSource; use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::print::with_no_trimmed_paths; -use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt, Upcast, VariantDef}; +use rustc_middle::ty::{self, AssocContainer, Ty, TyCtxt, TypeVisitableExt, Upcast, VariantDef}; use rustc_session::lint::FutureIncompatibilityReason; // hardwired lints from rustc_lint_defs pub use rustc_session::lint::builtin::*; @@ -61,7 +61,6 @@ use crate::lints::{ BuiltinUnreachablePub, BuiltinUnsafe, BuiltinUnstableFeatures, BuiltinUnusedDocComment, BuiltinUnusedDocCommentSub, BuiltinWhileTrue, InvalidAsmLabel, }; -use crate::nonstandard_style::{MethodLateContext, method_context}; use crate::{ EarlyContext, EarlyLintPass, LateContext, LateLintPass, Level, LintContext, fluent_generated as fluent, @@ -469,14 +468,14 @@ impl<'tcx> LateLintPass<'tcx> for MissingDoc { } fn check_impl_item(&mut self, cx: &LateContext<'_>, impl_item: &hir::ImplItem<'_>) { - let context = method_context(cx, impl_item.owner_id.def_id); + let container = cx.tcx.associated_item(impl_item.owner_id.def_id).container; - match context { + match container { // If the method is an impl for a trait, don't doc. - MethodLateContext::TraitImpl => return, - MethodLateContext::TraitAutoImpl => {} + AssocContainer::TraitImpl(_) => return, + AssocContainer::Trait => {} // If the method is an impl for an item with docs_hidden, don't doc. - MethodLateContext::PlainImpl => { + AssocContainer::InherentImpl => { let parent = cx.tcx.hir_get_parent_item(impl_item.hir_id()); let impl_ty = cx.tcx.type_of(parent).instantiate_identity(); let outerdef = match impl_ty.kind() { @@ -1321,9 +1320,8 @@ impl<'tcx> LateLintPass<'tcx> for UnreachablePub { } fn check_impl_item(&mut self, cx: &LateContext<'_>, impl_item: &hir::ImplItem<'_>) { - // Only lint inherent impl items. - if cx.tcx.associated_item(impl_item.owner_id).trait_item_def_id.is_none() { - self.perform_lint(cx, "item", impl_item.owner_id.def_id, impl_item.vis_span, false); + if let ImplItemImplKind::Inherent { vis_span } = impl_item.impl_kind { + self.perform_lint(cx, "item", impl_item.owner_id.def_id, vis_span, false); } } } diff --git a/compiler/rustc_lint/src/deref_into_dyn_supertrait.rs b/compiler/rustc_lint/src/deref_into_dyn_supertrait.rs index 943fcc0801b..e1c51ff8299 100644 --- a/compiler/rustc_lint/src/deref_into_dyn_supertrait.rs +++ b/compiler/rustc_lint/src/deref_into_dyn_supertrait.rs @@ -78,7 +78,7 @@ impl<'tcx> LateLintPass<'tcx> for DerefIntoDynSupertrait { { // erase regions in self type for better diagnostic presentation let (self_ty, target_principal, supertrait_principal) = - tcx.erase_regions((self_ty, target_principal, supertrait_principal)); + tcx.erase_and_anonymize_regions((self_ty, target_principal, supertrait_principal)); let label2 = tcx .associated_items(item.owner_id) .find_by_ident_and_kind( diff --git a/compiler/rustc_lint/src/for_loops_over_fallibles.rs b/compiler/rustc_lint/src/for_loops_over_fallibles.rs index a56b753bda7..9e1fc598171 100644 --- a/compiler/rustc_lint/src/for_loops_over_fallibles.rs +++ b/compiler/rustc_lint/src/for_loops_over_fallibles.rs @@ -176,7 +176,7 @@ fn suggest_question_mark<'tcx>( cause, param_env, // Erase any region vids from the type, which may not be resolved - infcx.tcx.erase_regions(ty), + infcx.tcx.erase_and_anonymize_regions(ty), into_iterator_did, ); diff --git a/compiler/rustc_lint/src/foreign_modules.rs b/compiler/rustc_lint/src/foreign_modules.rs index 3267e70f1de..ad73e15e31f 100644 --- a/compiler/rustc_lint/src/foreign_modules.rs +++ b/compiler/rustc_lint/src/foreign_modules.rs @@ -136,7 +136,6 @@ impl ClashingExternDeclarations { ty::TypingEnv::non_body_analysis(tcx, this_fi.owner_id), existing_decl_ty, this_decl_ty, - types::CItemKind::Declaration, ) { let orig = name_of_extern_decl(tcx, existing_did); @@ -214,10 +213,9 @@ fn structurally_same_type<'tcx>( typing_env: ty::TypingEnv<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>, - ckind: types::CItemKind, ) -> bool { let mut seen_types = UnordSet::default(); - let result = structurally_same_type_impl(&mut seen_types, tcx, typing_env, a, b, ckind); + let result = structurally_same_type_impl(&mut seen_types, tcx, typing_env, a, b); if cfg!(debug_assertions) && result { // Sanity-check: must have same ABI, size and alignment. // `extern` blocks cannot be generic, so we'll always get a layout here. @@ -236,7 +234,6 @@ fn structurally_same_type_impl<'tcx>( typing_env: ty::TypingEnv<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>, - ckind: types::CItemKind, ) -> bool { debug!("structurally_same_type_impl(tcx, a = {:?}, b = {:?})", a, b); @@ -307,7 +304,6 @@ fn structurally_same_type_impl<'tcx>( typing_env, tcx.type_of(a_did).instantiate(tcx, a_gen_args), tcx.type_of(b_did).instantiate(tcx, b_gen_args), - ckind, ) }, ) @@ -315,25 +311,19 @@ fn structurally_same_type_impl<'tcx>( (ty::Array(a_ty, a_len), ty::Array(b_ty, b_len)) => { // For arrays, we also check the length. a_len == b_len - && structurally_same_type_impl( - seen_types, tcx, typing_env, *a_ty, *b_ty, ckind, - ) + && structurally_same_type_impl(seen_types, tcx, typing_env, *a_ty, *b_ty) } (ty::Slice(a_ty), ty::Slice(b_ty)) => { - structurally_same_type_impl(seen_types, tcx, typing_env, *a_ty, *b_ty, ckind) + structurally_same_type_impl(seen_types, tcx, typing_env, *a_ty, *b_ty) } (ty::RawPtr(a_ty, a_mutbl), ty::RawPtr(b_ty, b_mutbl)) => { a_mutbl == b_mutbl - && structurally_same_type_impl( - seen_types, tcx, typing_env, *a_ty, *b_ty, ckind, - ) + && structurally_same_type_impl(seen_types, tcx, typing_env, *a_ty, *b_ty) } (ty::Ref(_a_region, a_ty, a_mut), ty::Ref(_b_region, b_ty, b_mut)) => { // For structural sameness, we don't need the region to be same. a_mut == b_mut - && structurally_same_type_impl( - seen_types, tcx, typing_env, *a_ty, *b_ty, ckind, - ) + && structurally_same_type_impl(seen_types, tcx, typing_env, *a_ty, *b_ty) } (ty::FnDef(..), ty::FnDef(..)) => { let a_poly_sig = a.fn_sig(tcx); @@ -347,7 +337,7 @@ fn structurally_same_type_impl<'tcx>( (a_sig.abi, a_sig.safety, a_sig.c_variadic) == (b_sig.abi, b_sig.safety, b_sig.c_variadic) && a_sig.inputs().iter().eq_by(b_sig.inputs().iter(), |a, b| { - structurally_same_type_impl(seen_types, tcx, typing_env, *a, *b, ckind) + structurally_same_type_impl(seen_types, tcx, typing_env, *a, *b) }) && structurally_same_type_impl( seen_types, @@ -355,7 +345,6 @@ fn structurally_same_type_impl<'tcx>( typing_env, a_sig.output(), b_sig.output(), - ckind, ) } (ty::Tuple(..), ty::Tuple(..)) => { @@ -383,14 +372,14 @@ fn structurally_same_type_impl<'tcx>( // An Adt and a primitive or pointer type. This can be FFI-safe if non-null // enum layout optimisation is being applied. (ty::Adt(..) | ty::Pat(..), _) if is_primitive_or_pointer(b) => { - if let Some(a_inner) = types::repr_nullable_ptr(tcx, typing_env, a, ckind) { + if let Some(a_inner) = types::repr_nullable_ptr(tcx, typing_env, a) { a_inner == b } else { false } } (_, ty::Adt(..) | ty::Pat(..)) if is_primitive_or_pointer(a) => { - if let Some(b_inner) = types::repr_nullable_ptr(tcx, typing_env, b, ckind) { + if let Some(b_inner) = types::repr_nullable_ptr(tcx, typing_env, b) { b_inner == a } else { false diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs index bdbac7fc4d1..9bb53fea54a 100644 --- a/compiler/rustc_lint/src/lib.rs +++ b/compiler/rustc_lint/src/lib.rs @@ -194,8 +194,7 @@ late_lint_methods!( DefaultCouldBeDerived: DefaultCouldBeDerived::default(), DerefIntoDynSupertrait: DerefIntoDynSupertrait, DropForgetUseless: DropForgetUseless, - ImproperCTypesDeclarations: ImproperCTypesDeclarations, - ImproperCTypesDefinitions: ImproperCTypesDefinitions, + ImproperCTypesLint: ImproperCTypesLint, InvalidFromUtf8: InvalidFromUtf8, VariantSizeDifferences: VariantSizeDifferences, PathStatements: PathStatements, diff --git a/compiler/rustc_lint/src/multiple_supertrait_upcastable.rs b/compiler/rustc_lint/src/multiple_supertrait_upcastable.rs index 5513c703f1d..93f067d0983 100644 --- a/compiler/rustc_lint/src/multiple_supertrait_upcastable.rs +++ b/compiler/rustc_lint/src/multiple_supertrait_upcastable.rs @@ -47,7 +47,8 @@ impl<'tcx> LateLintPass<'tcx> for MultipleSupertraitUpcastable { .explicit_super_predicates_of(def_id) .iter_identity_copied() .filter_map(|(pred, _)| pred.as_trait_clause()) - .filter(|pred| !cx.tcx.is_lang_item(pred.def_id(), hir::LangItem::MetaSized)); + .filter(|pred| !cx.tcx.is_lang_item(pred.def_id(), hir::LangItem::MetaSized)) + .filter(|pred| !cx.tcx.is_default_trait(pred.def_id())); if direct_super_traits_iter.count() > 1 { cx.emit_span_lint( MULTIPLE_SUPERTRAIT_UPCASTABLE, diff --git a/compiler/rustc_lint/src/nonstandard_style.rs b/compiler/rustc_lint/src/nonstandard_style.rs index 65075cfecfa..7f643a551bb 100644 --- a/compiler/rustc_lint/src/nonstandard_style.rs +++ b/compiler/rustc_lint/src/nonstandard_style.rs @@ -7,7 +7,7 @@ use rustc_hir::def_id::DefId; use rustc_hir::intravisit::{FnKind, Visitor}; use rustc_hir::{Attribute, GenericParamKind, PatExprKind, PatKind, find_attr}; use rustc_middle::hir::nested_filter::All; -use rustc_middle::ty; +use rustc_middle::ty::AssocContainer; use rustc_session::config::CrateType; use rustc_session::{declare_lint, declare_lint_pass}; use rustc_span::def_id::LocalDefId; @@ -20,29 +20,6 @@ use crate::lints::{ }; use crate::{EarlyContext, EarlyLintPass, LateContext, LateLintPass, LintContext}; -#[derive(PartialEq)] -pub(crate) enum MethodLateContext { - TraitAutoImpl, - TraitImpl, - PlainImpl, -} - -pub(crate) fn method_context(cx: &LateContext<'_>, id: LocalDefId) -> MethodLateContext { - let item = cx.tcx.associated_item(id); - match item.container { - ty::AssocItemContainer::Trait => MethodLateContext::TraitAutoImpl, - ty::AssocItemContainer::Impl => match cx.tcx.impl_trait_ref(item.container_id(cx.tcx)) { - Some(_) => MethodLateContext::TraitImpl, - None => MethodLateContext::PlainImpl, - }, - } -} - -fn assoc_item_in_trait_impl(cx: &LateContext<'_>, ii: &hir::ImplItem<'_>) -> bool { - let item = cx.tcx.associated_item(ii.owner_id); - item.trait_item_def_id.is_some() -} - declare_lint! { /// The `non_camel_case_types` lint detects types, variants, traits and /// type parameters that don't have camel case names. @@ -389,8 +366,8 @@ impl<'tcx> LateLintPass<'tcx> for NonSnakeCase { id: LocalDefId, ) { match &fk { - FnKind::Method(ident, sig, ..) => match method_context(cx, id) { - MethodLateContext::PlainImpl => { + FnKind::Method(ident, sig, ..) => match cx.tcx.associated_item(id).container { + AssocContainer::InherentImpl => { if sig.header.abi != ExternAbi::Rust && find_attr!(cx.tcx.get_all_attrs(id), AttributeKind::NoMangle(..)) { @@ -398,10 +375,10 @@ impl<'tcx> LateLintPass<'tcx> for NonSnakeCase { } self.check_snake_case(cx, "method", ident); } - MethodLateContext::TraitAutoImpl => { + AssocContainer::Trait => { self.check_snake_case(cx, "trait method", ident); } - _ => (), + AssocContainer::TraitImpl(_) => {} }, FnKind::ItemFn(ident, _, header) => { // Skip foreign-ABI #[no_mangle] functions (Issue #31924) @@ -602,7 +579,7 @@ impl<'tcx> LateLintPass<'tcx> for NonUpperCaseGlobals { fn check_impl_item(&mut self, cx: &LateContext<'_>, ii: &hir::ImplItem<'_>) { if let hir::ImplItemKind::Const(..) = ii.kind - && !assoc_item_in_trait_impl(cx, ii) + && let hir::ImplItemImplKind::Inherent { .. } = ii.impl_kind { NonUpperCaseGlobals::check_upper_case(cx, "associated constant", None, &ii.ident); } diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs index f8a692313f0..88527fa2e6e 100644 --- a/compiler/rustc_lint/src/types.rs +++ b/compiler/rustc_lint/src/types.rs @@ -1,35 +1,28 @@ use std::iter; -use std::ops::ControlFlow; -use rustc_abi::{BackendRepr, TagEncoding, VariantIdx, Variants, WrappingRange}; -use rustc_data_structures::fx::FxHashSet; -use rustc_errors::DiagMessage; -use rustc_hir::intravisit::VisitorExt; -use rustc_hir::{AmbigArg, Expr, ExprKind, HirId, LangItem}; +use rustc_abi::{BackendRepr, TagEncoding, Variants, WrappingRange}; +use rustc_hir::{Expr, ExprKind, HirId, LangItem}; use rustc_middle::bug; use rustc_middle::ty::layout::{LayoutOf, SizeSkeleton}; -use rustc_middle::ty::{ - self, Adt, AdtKind, GenericArgsRef, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, - TypeVisitableExt, -}; +use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt}; use rustc_session::{declare_lint, declare_lint_pass, impl_lint_pass}; -use rustc_span::def_id::LocalDefId; use rustc_span::{Span, Symbol, sym}; use tracing::debug; use {rustc_ast as ast, rustc_hir as hir}; -mod improper_ctypes; +mod improper_ctypes; // these filed do the implementation for ImproperCTypesDefinitions,ImproperCTypesDeclarations +pub(crate) use improper_ctypes::ImproperCTypesLint; use crate::lints::{ AmbiguousWidePointerComparisons, AmbiguousWidePointerComparisonsAddrMetadataSuggestion, AmbiguousWidePointerComparisonsAddrSuggestion, AmbiguousWidePointerComparisonsCastSuggestion, AmbiguousWidePointerComparisonsExpectSuggestion, AtomicOrderingFence, AtomicOrderingLoad, - AtomicOrderingStore, ImproperCTypes, InvalidAtomicOrderingDiag, InvalidNanComparisons, + AtomicOrderingStore, InvalidAtomicOrderingDiag, InvalidNanComparisons, InvalidNanComparisonsSuggestion, UnpredictableFunctionPointerComparisons, - UnpredictableFunctionPointerComparisonsSuggestion, UnusedComparisons, UsesPowerAlignment, + UnpredictableFunctionPointerComparisonsSuggestion, UnusedComparisons, VariantSizeDifferencesDiag, }; -use crate::{LateContext, LateLintPass, LintContext, fluent_generated as fluent}; +use crate::{LateContext, LateLintPass, LintContext}; mod literal; @@ -690,144 +683,6 @@ impl<'tcx> LateLintPass<'tcx> for TypeLimits { } } -declare_lint! { - /// The `improper_ctypes` lint detects incorrect use of types in foreign - /// modules. - /// - /// ### Example - /// - /// ```rust - /// unsafe extern "C" { - /// static STATIC: String; - /// } - /// ``` - /// - /// {{produces}} - /// - /// ### Explanation - /// - /// The compiler has several checks to verify that types used in `extern` - /// blocks are safe and follow certain rules to ensure proper - /// compatibility with the foreign interfaces. This lint is issued when it - /// detects a probable mistake in a definition. The lint usually should - /// provide a description of the issue, along with possibly a hint on how - /// to resolve it. - IMPROPER_CTYPES, - Warn, - "proper use of libc types in foreign modules" -} - -declare_lint_pass!(ImproperCTypesDeclarations => [IMPROPER_CTYPES]); - -declare_lint! { - /// The `improper_ctypes_definitions` lint detects incorrect use of - /// [`extern` function] definitions. - /// - /// [`extern` function]: https://doc.rust-lang.org/reference/items/functions.html#extern-function-qualifier - /// - /// ### Example - /// - /// ```rust - /// # #![allow(unused)] - /// pub extern "C" fn str_type(p: &str) { } - /// ``` - /// - /// {{produces}} - /// - /// ### Explanation - /// - /// There are many parameter and return types that may be specified in an - /// `extern` function that are not compatible with the given ABI. This - /// lint is an alert that these types should not be used. The lint usually - /// should provide a description of the issue, along with possibly a hint - /// on how to resolve it. - IMPROPER_CTYPES_DEFINITIONS, - Warn, - "proper use of libc types in foreign item definitions" -} - -declare_lint! { - /// The `uses_power_alignment` lint detects specific `repr(C)` - /// aggregates on AIX. - /// In its platform C ABI, AIX uses the "power" (as in PowerPC) alignment - /// rule (detailed in https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=data-using-alignment-modes#alignment), - /// which can also be set for XLC by `#pragma align(power)` or - /// `-qalign=power`. Aggregates with a floating-point type as the - /// recursively first field (as in "at offset 0") modify the layout of - /// *subsequent* fields of the associated structs to use an alignment value - /// where the floating-point type is aligned on a 4-byte boundary. - /// - /// Effectively, subsequent floating-point fields act as-if they are `repr(packed(4))`. This - /// would be unsound to do in a `repr(C)` type without all the restrictions that come with - /// `repr(packed)`. Rust instead chooses a layout that maintains soundness of Rust code, at the - /// expense of incompatibility with C code. - /// - /// ### Example - /// - /// ```rust,ignore (fails on non-powerpc64-ibm-aix) - /// #[repr(C)] - /// pub struct Floats { - /// a: f64, - /// b: u8, - /// c: f64, - /// } - /// ``` - /// - /// This will produce: - /// - /// ```text - /// warning: repr(C) does not follow the power alignment rule. This may affect platform C ABI compatibility for this type - /// --> <source>:5:3 - /// | - /// 5 | c: f64, - /// | ^^^^^^ - /// | - /// = note: `#[warn(uses_power_alignment)]` on by default - /// ``` - /// - /// ### Explanation - /// - /// The power alignment rule specifies that the above struct has the - /// following alignment: - /// - offset_of!(Floats, a) == 0 - /// - offset_of!(Floats, b) == 8 - /// - offset_of!(Floats, c) == 12 - /// - /// However, Rust currently aligns `c` at `offset_of!(Floats, c) == 16`. - /// Using offset 12 would be unsound since `f64` generally must be 8-aligned on this target. - /// Thus, a warning is produced for the above struct. - USES_POWER_ALIGNMENT, - Warn, - "Structs do not follow the power alignment rule under repr(C)" -} - -declare_lint_pass!(ImproperCTypesDefinitions => [IMPROPER_CTYPES_DEFINITIONS, USES_POWER_ALIGNMENT]); - -#[derive(Clone, Copy)] -pub(crate) enum CItemKind { - Declaration, - Definition, -} - -struct ImproperCTypesVisitor<'a, 'tcx> { - cx: &'a LateContext<'tcx>, - mode: CItemKind, -} - -/// Accumulator for recursive ffi type checking -struct CTypesVisitorState<'tcx> { - cache: FxHashSet<Ty<'tcx>>, - /// The original type being checked, before we recursed - /// to any other types it contains. - base_ty: Ty<'tcx>, -} - -enum FfiResult<'tcx> { - FfiSafe, - FfiPhantom(Ty<'tcx>), - FfiUnsafe { ty: Ty<'tcx>, reason: DiagMessage, help: Option<DiagMessage> }, -} - pub(crate) fn nonnull_optimization_guaranteed<'tcx>( tcx: TyCtxt<'tcx>, def: ty::AdtDef<'tcx>, @@ -855,14 +710,13 @@ fn ty_is_known_nonnull<'tcx>( tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>, ty: Ty<'tcx>, - mode: CItemKind, ) -> bool { let ty = tcx.try_normalize_erasing_regions(typing_env, ty).unwrap_or(ty); match ty.kind() { ty::FnPtr(..) => true, ty::Ref(..) => true, - ty::Adt(def, _) if def.is_box() && matches!(mode, CItemKind::Definition) => true, + ty::Adt(def, _) if def.is_box() => true, ty::Adt(def, args) if def.repr().transparent() && !def.is_union() => { let marked_non_null = nonnull_optimization_guaranteed(tcx, *def); @@ -878,10 +732,10 @@ fn ty_is_known_nonnull<'tcx>( def.variants() .iter() .filter_map(|variant| transparent_newtype_field(tcx, variant)) - .any(|field| ty_is_known_nonnull(tcx, typing_env, field.ty(tcx, args), mode)) + .any(|field| ty_is_known_nonnull(tcx, typing_env, field.ty(tcx, args))) } ty::Pat(base, pat) => { - ty_is_known_nonnull(tcx, typing_env, *base, mode) + ty_is_known_nonnull(tcx, typing_env, *base) || pat_ty_is_known_nonnull(tcx, typing_env, *pat) } _ => false, @@ -992,7 +846,6 @@ pub(crate) fn repr_nullable_ptr<'tcx>( tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>, ty: Ty<'tcx>, - ckind: CItemKind, ) -> Option<Ty<'tcx>> { debug!("is_repr_nullable_ptr(tcx, ty = {:?})", ty); match ty.kind() { @@ -1017,7 +870,7 @@ pub(crate) fn repr_nullable_ptr<'tcx>( _ => return None, }; - if !ty_is_known_nonnull(tcx, typing_env, field_ty, ckind) { + if !ty_is_known_nonnull(tcx, typing_env, field_ty) { return None; } @@ -1076,717 +929,13 @@ fn get_nullable_type_from_pat<'tcx>( } } -impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { - /// Check if the type is array and emit an unsafe type lint. - fn check_for_array_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool { - if let ty::Array(..) = ty.kind() { - self.emit_ffi_unsafe_type_lint( - ty, - sp, - fluent::lint_improper_ctypes_array_reason, - Some(fluent::lint_improper_ctypes_array_help), - ); - true - } else { - false - } - } - - /// Checks if the given field's type is "ffi-safe". - fn check_field_type_for_ffi( - &self, - acc: &mut CTypesVisitorState<'tcx>, - field: &ty::FieldDef, - args: GenericArgsRef<'tcx>, - ) -> FfiResult<'tcx> { - let field_ty = field.ty(self.cx.tcx, args); - let field_ty = self - .cx - .tcx - .try_normalize_erasing_regions(self.cx.typing_env(), field_ty) - .unwrap_or(field_ty); - self.check_type_for_ffi(acc, field_ty) - } - - /// Checks if the given `VariantDef`'s field types are "ffi-safe". - fn check_variant_for_ffi( - &self, - acc: &mut CTypesVisitorState<'tcx>, - ty: Ty<'tcx>, - def: ty::AdtDef<'tcx>, - variant: &ty::VariantDef, - args: GenericArgsRef<'tcx>, - ) -> FfiResult<'tcx> { - use FfiResult::*; - let transparent_with_all_zst_fields = if def.repr().transparent() { - if let Some(field) = transparent_newtype_field(self.cx.tcx, variant) { - // Transparent newtypes have at most one non-ZST field which needs to be checked.. - match self.check_field_type_for_ffi(acc, field, args) { - FfiUnsafe { ty, .. } if ty.is_unit() => (), - r => return r, - } - - false - } else { - // ..or have only ZST fields, which is FFI-unsafe (unless those fields are all - // `PhantomData`). - true - } - } else { - false - }; - - // We can't completely trust `repr(C)` markings, so make sure the fields are actually safe. - let mut all_phantom = !variant.fields.is_empty(); - for field in &variant.fields { - all_phantom &= match self.check_field_type_for_ffi(acc, field, args) { - FfiSafe => false, - // `()` fields are FFI-safe! - FfiUnsafe { ty, .. } if ty.is_unit() => false, - FfiPhantom(..) => true, - r @ FfiUnsafe { .. } => return r, - } - } - - if all_phantom { - FfiPhantom(ty) - } else if transparent_with_all_zst_fields { - FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_struct_zst, help: None } - } else { - FfiSafe - } - } - - /// Checks if the given type is "ffi-safe" (has a stable, well-defined - /// representation which can be exported to C code). - fn check_type_for_ffi( - &self, - acc: &mut CTypesVisitorState<'tcx>, - ty: Ty<'tcx>, - ) -> FfiResult<'tcx> { - use FfiResult::*; - - let tcx = self.cx.tcx; - - // Protect against infinite recursion, for example - // `struct S(*mut S);`. - // FIXME: A recursion limit is necessary as well, for irregular - // recursive types. - if !acc.cache.insert(ty) { - return FfiSafe; - } - - match *ty.kind() { - ty::Adt(def, args) => { - if let Some(boxed) = ty.boxed_ty() - && matches!(self.mode, CItemKind::Definition) - { - if boxed.is_sized(tcx, self.cx.typing_env()) { - return FfiSafe; - } else { - return FfiUnsafe { - ty, - reason: fluent::lint_improper_ctypes_box, - help: None, - }; - } - } - if def.is_phantom_data() { - return FfiPhantom(ty); - } - match def.adt_kind() { - AdtKind::Struct | AdtKind::Union => { - if let Some(sym::cstring_type | sym::cstr_type) = - tcx.get_diagnostic_name(def.did()) - && !acc.base_ty.is_mutable_ptr() - { - return FfiUnsafe { - ty, - reason: fluent::lint_improper_ctypes_cstr_reason, - help: Some(fluent::lint_improper_ctypes_cstr_help), - }; - } - - if !def.repr().c() && !def.repr().transparent() { - return FfiUnsafe { - ty, - reason: if def.is_struct() { - fluent::lint_improper_ctypes_struct_layout_reason - } else { - fluent::lint_improper_ctypes_union_layout_reason - }, - help: if def.is_struct() { - Some(fluent::lint_improper_ctypes_struct_layout_help) - } else { - Some(fluent::lint_improper_ctypes_union_layout_help) - }, - }; - } - - if def.non_enum_variant().field_list_has_applicable_non_exhaustive() { - return FfiUnsafe { - ty, - reason: if def.is_struct() { - fluent::lint_improper_ctypes_struct_non_exhaustive - } else { - fluent::lint_improper_ctypes_union_non_exhaustive - }, - help: None, - }; - } - - if def.non_enum_variant().fields.is_empty() { - return FfiUnsafe { - ty, - reason: if def.is_struct() { - fluent::lint_improper_ctypes_struct_fieldless_reason - } else { - fluent::lint_improper_ctypes_union_fieldless_reason - }, - help: if def.is_struct() { - Some(fluent::lint_improper_ctypes_struct_fieldless_help) - } else { - Some(fluent::lint_improper_ctypes_union_fieldless_help) - }, - }; - } - - self.check_variant_for_ffi(acc, ty, def, def.non_enum_variant(), args) - } - AdtKind::Enum => { - if def.variants().is_empty() { - // Empty enums are okay... although sort of useless. - return FfiSafe; - } - // Check for a repr() attribute to specify the size of the - // discriminant. - if !def.repr().c() && !def.repr().transparent() && def.repr().int.is_none() - { - // Special-case types like `Option<extern fn()>` and `Result<extern fn(), ()>` - if let Some(ty) = - repr_nullable_ptr(self.cx.tcx, self.cx.typing_env(), ty, self.mode) - { - return self.check_type_for_ffi(acc, ty); - } - - return FfiUnsafe { - ty, - reason: fluent::lint_improper_ctypes_enum_repr_reason, - help: Some(fluent::lint_improper_ctypes_enum_repr_help), - }; - } - - use improper_ctypes::check_non_exhaustive_variant; - - let non_exhaustive = def.variant_list_has_applicable_non_exhaustive(); - // Check the contained variants. - let ret = def.variants().iter().try_for_each(|variant| { - check_non_exhaustive_variant(non_exhaustive, variant) - .map_break(|reason| FfiUnsafe { ty, reason, help: None })?; - - match self.check_variant_for_ffi(acc, ty, def, variant, args) { - FfiSafe => ControlFlow::Continue(()), - r => ControlFlow::Break(r), - } - }); - if let ControlFlow::Break(result) = ret { - return result; - } - - FfiSafe - } - } - } - - ty::Char => FfiUnsafe { - ty, - reason: fluent::lint_improper_ctypes_char_reason, - help: Some(fluent::lint_improper_ctypes_char_help), - }, - - // It's just extra invariants on the type that you need to uphold, - // but only the base type is relevant for being representable in FFI. - ty::Pat(base, ..) => self.check_type_for_ffi(acc, base), - - // Primitive types with a stable representation. - ty::Bool | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Never => FfiSafe, - - ty::Slice(_) => FfiUnsafe { - ty, - reason: fluent::lint_improper_ctypes_slice_reason, - help: Some(fluent::lint_improper_ctypes_slice_help), - }, - - ty::Dynamic(..) => { - FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_dyn, help: None } - } - - ty::Str => FfiUnsafe { - ty, - reason: fluent::lint_improper_ctypes_str_reason, - help: Some(fluent::lint_improper_ctypes_str_help), - }, - - ty::Tuple(..) => FfiUnsafe { - ty, - reason: fluent::lint_improper_ctypes_tuple_reason, - help: Some(fluent::lint_improper_ctypes_tuple_help), - }, - - ty::RawPtr(ty, _) | ty::Ref(_, ty, _) - if { - matches!(self.mode, CItemKind::Definition) - && ty.is_sized(self.cx.tcx, self.cx.typing_env()) - } => - { - FfiSafe - } - - ty::RawPtr(ty, _) - if match ty.kind() { - ty::Tuple(tuple) => tuple.is_empty(), - _ => false, - } => - { - FfiSafe - } - - ty::RawPtr(ty, _) | ty::Ref(_, ty, _) => self.check_type_for_ffi(acc, ty), - - ty::Array(inner_ty, _) => self.check_type_for_ffi(acc, inner_ty), - - ty::FnPtr(sig_tys, hdr) => { - let sig = sig_tys.with(hdr); - if sig.abi().is_rustic_abi() { - return FfiUnsafe { - ty, - reason: fluent::lint_improper_ctypes_fnptr_reason, - help: Some(fluent::lint_improper_ctypes_fnptr_help), - }; - } - - let sig = tcx.instantiate_bound_regions_with_erased(sig); - for arg in sig.inputs() { - match self.check_type_for_ffi(acc, *arg) { - FfiSafe => {} - r => return r, - } - } - - let ret_ty = sig.output(); - if ret_ty.is_unit() { - return FfiSafe; - } - - self.check_type_for_ffi(acc, ret_ty) - } - - ty::Foreign(..) => FfiSafe, - - // While opaque types are checked for earlier, if a projection in a struct field - // normalizes to an opaque type, then it will reach this branch. - ty::Alias(ty::Opaque, ..) => { - FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_opaque, help: None } - } - - // `extern "C" fn` functions can have type parameters, which may or may not be FFI-safe, - // so they are currently ignored for the purposes of this lint. - ty::Param(..) | ty::Alias(ty::Projection | ty::Inherent, ..) - if matches!(self.mode, CItemKind::Definition) => - { - FfiSafe - } - - ty::UnsafeBinder(_) => todo!("FIXME(unsafe_binder)"), - - ty::Param(..) - | ty::Alias(ty::Projection | ty::Inherent | ty::Free, ..) - | ty::Infer(..) - | ty::Bound(..) - | ty::Error(_) - | ty::Closure(..) - | ty::CoroutineClosure(..) - | ty::Coroutine(..) - | ty::CoroutineWitness(..) - | ty::Placeholder(..) - | ty::FnDef(..) => bug!("unexpected type in foreign function: {:?}", ty), - } - } - - fn emit_ffi_unsafe_type_lint( - &mut self, - ty: Ty<'tcx>, - sp: Span, - note: DiagMessage, - help: Option<DiagMessage>, - ) { - let lint = match self.mode { - CItemKind::Declaration => IMPROPER_CTYPES, - CItemKind::Definition => IMPROPER_CTYPES_DEFINITIONS, - }; - let desc = match self.mode { - CItemKind::Declaration => "block", - CItemKind::Definition => "fn", - }; - let span_note = if let ty::Adt(def, _) = ty.kind() - && let Some(sp) = self.cx.tcx.hir_span_if_local(def.did()) - { - Some(sp) - } else { - None - }; - self.cx.emit_span_lint( - lint, - sp, - ImproperCTypes { ty, desc, label: sp, help, note, span_note }, - ); - } - - fn check_for_opaque_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool { - struct ProhibitOpaqueTypes; - impl<'tcx> ty::TypeVisitor<TyCtxt<'tcx>> for ProhibitOpaqueTypes { - type Result = ControlFlow<Ty<'tcx>>; - - fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result { - if !ty.has_opaque_types() { - return ControlFlow::Continue(()); - } - - if let ty::Alias(ty::Opaque, ..) = ty.kind() { - ControlFlow::Break(ty) - } else { - ty.super_visit_with(self) - } - } - } - - if let Some(ty) = self - .cx - .tcx - .try_normalize_erasing_regions(self.cx.typing_env(), ty) - .unwrap_or(ty) - .visit_with(&mut ProhibitOpaqueTypes) - .break_value() - { - self.emit_ffi_unsafe_type_lint(ty, sp, fluent::lint_improper_ctypes_opaque, None); - true - } else { - false - } - } - - fn check_type_for_ffi_and_report_errors( - &mut self, - sp: Span, - ty: Ty<'tcx>, - is_static: bool, - is_return_type: bool, - ) { - if self.check_for_opaque_ty(sp, ty) { - // We've already emitted an error due to an opaque type. - return; - } - - let ty = self.cx.tcx.try_normalize_erasing_regions(self.cx.typing_env(), ty).unwrap_or(ty); - - // C doesn't really support passing arrays by value - the only way to pass an array by value - // is through a struct. So, first test that the top level isn't an array, and then - // recursively check the types inside. - if !is_static && self.check_for_array_ty(sp, ty) { - return; - } - - // Don't report FFI errors for unit return types. This check exists here, and not in - // the caller (where it would make more sense) so that normalization has definitely - // happened. - if is_return_type && ty.is_unit() { - return; - } - - let mut acc = CTypesVisitorState { cache: FxHashSet::default(), base_ty: ty }; - match self.check_type_for_ffi(&mut acc, ty) { - FfiResult::FfiSafe => {} - FfiResult::FfiPhantom(ty) => { - self.emit_ffi_unsafe_type_lint( - ty, - sp, - fluent::lint_improper_ctypes_only_phantomdata, - None, - ); - } - FfiResult::FfiUnsafe { ty, reason, help } => { - self.emit_ffi_unsafe_type_lint(ty, sp, reason, help); - } - } - } - - /// Check if a function's argument types and result type are "ffi-safe". - /// - /// For a external ABI function, argument types and the result type are walked to find fn-ptr - /// types that have external ABIs, as these still need checked. - fn check_fn(&mut self, def_id: LocalDefId, decl: &'tcx hir::FnDecl<'_>) { - let sig = self.cx.tcx.fn_sig(def_id).instantiate_identity(); - let sig = self.cx.tcx.instantiate_bound_regions_with_erased(sig); - - for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) { - for (fn_ptr_ty, span) in self.find_fn_ptr_ty_with_external_abi(input_hir, *input_ty) { - self.check_type_for_ffi_and_report_errors(span, fn_ptr_ty, false, false); - } - } - - if let hir::FnRetTy::Return(ret_hir) = decl.output { - for (fn_ptr_ty, span) in self.find_fn_ptr_ty_with_external_abi(ret_hir, sig.output()) { - self.check_type_for_ffi_and_report_errors(span, fn_ptr_ty, false, true); - } - } - } - - /// Check if a function's argument types and result type are "ffi-safe". - fn check_foreign_fn(&mut self, def_id: LocalDefId, decl: &'tcx hir::FnDecl<'_>) { - let sig = self.cx.tcx.fn_sig(def_id).instantiate_identity(); - let sig = self.cx.tcx.instantiate_bound_regions_with_erased(sig); - - for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) { - self.check_type_for_ffi_and_report_errors(input_hir.span, *input_ty, false, false); - } - - if let hir::FnRetTy::Return(ret_hir) = decl.output { - self.check_type_for_ffi_and_report_errors(ret_hir.span, sig.output(), false, true); - } - } - - fn check_foreign_static(&mut self, id: hir::OwnerId, span: Span) { - let ty = self.cx.tcx.type_of(id).instantiate_identity(); - self.check_type_for_ffi_and_report_errors(span, ty, true, false); - } - - /// Find any fn-ptr types with external ABIs in `ty`. - /// - /// For example, `Option<extern "C" fn()>` returns `extern "C" fn()` - fn find_fn_ptr_ty_with_external_abi( - &self, - hir_ty: &hir::Ty<'tcx>, - ty: Ty<'tcx>, - ) -> Vec<(Ty<'tcx>, Span)> { - struct FnPtrFinder<'tcx> { - spans: Vec<Span>, - tys: Vec<Ty<'tcx>>, - } - - impl<'tcx> hir::intravisit::Visitor<'_> for FnPtrFinder<'tcx> { - fn visit_ty(&mut self, ty: &'_ hir::Ty<'_, AmbigArg>) { - debug!(?ty); - if let hir::TyKind::FnPtr(hir::FnPtrTy { abi, .. }) = ty.kind - && !abi.is_rustic_abi() - { - self.spans.push(ty.span); - } - - hir::intravisit::walk_ty(self, ty) - } - } - - impl<'tcx> ty::TypeVisitor<TyCtxt<'tcx>> for FnPtrFinder<'tcx> { - type Result = (); - - fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result { - if let ty::FnPtr(_, hdr) = ty.kind() - && !hdr.abi.is_rustic_abi() - { - self.tys.push(ty); - } - - ty.super_visit_with(self) - } - } - - let mut visitor = FnPtrFinder { spans: Vec::new(), tys: Vec::new() }; - ty.visit_with(&mut visitor); - visitor.visit_ty_unambig(hir_ty); - - iter::zip(visitor.tys.drain(..), visitor.spans.drain(..)).collect() - } -} - -impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDeclarations { - fn check_foreign_item(&mut self, cx: &LateContext<'tcx>, it: &hir::ForeignItem<'tcx>) { - let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Declaration }; - let abi = cx.tcx.hir_get_foreign_abi(it.hir_id()); - - match it.kind { - hir::ForeignItemKind::Fn(sig, _, _) => { - if abi.is_rustic_abi() { - vis.check_fn(it.owner_id.def_id, sig.decl) - } else { - vis.check_foreign_fn(it.owner_id.def_id, sig.decl); - } - } - hir::ForeignItemKind::Static(ty, _, _) if !abi.is_rustic_abi() => { - vis.check_foreign_static(it.owner_id, ty.span); - } - hir::ForeignItemKind::Static(..) | hir::ForeignItemKind::Type => (), - } - } -} - -impl ImproperCTypesDefinitions { - fn check_ty_maybe_containing_foreign_fnptr<'tcx>( - &mut self, - cx: &LateContext<'tcx>, - hir_ty: &'tcx hir::Ty<'_>, - ty: Ty<'tcx>, - ) { - let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Definition }; - for (fn_ptr_ty, span) in vis.find_fn_ptr_ty_with_external_abi(hir_ty, ty) { - vis.check_type_for_ffi_and_report_errors(span, fn_ptr_ty, true, false); - } - } - - fn check_arg_for_power_alignment<'tcx>( - &mut self, - cx: &LateContext<'tcx>, - ty: Ty<'tcx>, - ) -> bool { - assert!(cx.tcx.sess.target.os == "aix"); - // Structs (under repr(C)) follow the power alignment rule if: - // - the first field of the struct is a floating-point type that - // is greater than 4-bytes, or - // - the first field of the struct is an aggregate whose - // recursively first field is a floating-point type greater than - // 4 bytes. - if ty.is_floating_point() && ty.primitive_size(cx.tcx).bytes() > 4 { - return true; - } else if let Adt(adt_def, _) = ty.kind() - && adt_def.is_struct() - && adt_def.repr().c() - && !adt_def.repr().packed() - && adt_def.repr().align.is_none() - { - let struct_variant = adt_def.variant(VariantIdx::ZERO); - // Within a nested struct, all fields are examined to correctly - // report if any fields after the nested struct within the - // original struct are misaligned. - for struct_field in &struct_variant.fields { - let field_ty = cx.tcx.type_of(struct_field.did).instantiate_identity(); - if self.check_arg_for_power_alignment(cx, field_ty) { - return true; - } - } - } - return false; - } - - fn check_struct_for_power_alignment<'tcx>( - &mut self, - cx: &LateContext<'tcx>, - item: &'tcx hir::Item<'tcx>, - ) { - let adt_def = cx.tcx.adt_def(item.owner_id.to_def_id()); - // repr(C) structs also with packed or aligned representation - // should be ignored. - if adt_def.repr().c() - && !adt_def.repr().packed() - && adt_def.repr().align.is_none() - && cx.tcx.sess.target.os == "aix" - && !adt_def.all_fields().next().is_none() - { - let struct_variant_data = item.expect_struct().2; - for field_def in struct_variant_data.fields().iter().skip(1) { - // Struct fields (after the first field) are checked for the - // power alignment rule, as fields after the first are likely - // to be the fields that are misaligned. - let def_id = field_def.def_id; - let ty = cx.tcx.type_of(def_id).instantiate_identity(); - if self.check_arg_for_power_alignment(cx, ty) { - cx.emit_span_lint(USES_POWER_ALIGNMENT, field_def.span, UsesPowerAlignment); - } - } - } - } -} - -/// `ImproperCTypesDefinitions` checks items outside of foreign items (e.g. stuff that isn't in -/// `extern "C" { }` blocks): -/// -/// - `extern "<abi>" fn` definitions are checked in the same way as the -/// `ImproperCtypesDeclarations` visitor checks functions if `<abi>` is external (e.g. "C"). -/// - All other items which contain types (e.g. other functions, struct definitions, etc) are -/// checked for extern fn-ptrs with external ABIs. -impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDefinitions { - fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) { - match item.kind { - hir::ItemKind::Static(_, _, ty, _) - | hir::ItemKind::Const(_, _, ty, _) - | hir::ItemKind::TyAlias(_, _, ty) => { - self.check_ty_maybe_containing_foreign_fnptr( - cx, - ty, - cx.tcx.type_of(item.owner_id).instantiate_identity(), - ); - } - // See `check_fn`.. - hir::ItemKind::Fn { .. } => {} - // Structs are checked based on if they follow the power alignment - // rule (under repr(C)). - hir::ItemKind::Struct(..) => { - self.check_struct_for_power_alignment(cx, item); - } - // See `check_field_def`.. - hir::ItemKind::Union(..) | hir::ItemKind::Enum(..) => {} - // Doesn't define something that can contain a external type to be checked. - hir::ItemKind::Impl(..) - | hir::ItemKind::TraitAlias(..) - | hir::ItemKind::Trait(..) - | hir::ItemKind::GlobalAsm { .. } - | hir::ItemKind::ForeignMod { .. } - | hir::ItemKind::Mod(..) - | hir::ItemKind::Macro(..) - | hir::ItemKind::Use(..) - | hir::ItemKind::ExternCrate(..) => {} - } - } - - fn check_field_def(&mut self, cx: &LateContext<'tcx>, field: &'tcx hir::FieldDef<'tcx>) { - self.check_ty_maybe_containing_foreign_fnptr( - cx, - field.ty, - cx.tcx.type_of(field.def_id).instantiate_identity(), - ); - } - - fn check_fn( - &mut self, - cx: &LateContext<'tcx>, - kind: hir::intravisit::FnKind<'tcx>, - decl: &'tcx hir::FnDecl<'_>, - _: &'tcx hir::Body<'_>, - _: Span, - id: LocalDefId, - ) { - use hir::intravisit::FnKind; - - let abi = match kind { - FnKind::ItemFn(_, _, header, ..) => header.abi, - FnKind::Method(_, sig, ..) => sig.header.abi, - _ => return, - }; - - let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Definition }; - if abi.is_rustic_abi() { - vis.check_fn(id, decl); - } else { - vis.check_foreign_fn(id, decl); - } - } -} - declare_lint_pass!(VariantSizeDifferences => [VARIANT_SIZE_DIFFERENCES]); impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences { fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) { if let hir::ItemKind::Enum(_, _, ref enum_definition) = it.kind { let t = cx.tcx.type_of(it.owner_id).instantiate_identity(); - let ty = cx.tcx.erase_regions(t); + let ty = cx.tcx.erase_and_anonymize_regions(t); let Ok(layout) = cx.layout_of(ty) else { return }; let Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, variants, .. } = &layout.variants diff --git a/compiler/rustc_lint/src/types/improper_ctypes.rs b/compiler/rustc_lint/src/types/improper_ctypes.rs index 13afa540afc..7ca57b0094e 100644 --- a/compiler/rustc_lint/src/types/improper_ctypes.rs +++ b/compiler/rustc_lint/src/types/improper_ctypes.rs @@ -1,10 +1,141 @@ +use std::iter; use std::ops::ControlFlow; +use bitflags::bitflags; +use rustc_abi::VariantIdx; +use rustc_data_structures::fx::FxHashSet; use rustc_errors::DiagMessage; use rustc_hir::def::CtorKind; -use rustc_middle::ty; +use rustc_hir::intravisit::VisitorExt; +use rustc_hir::{self as hir, AmbigArg}; +use rustc_middle::bug; +use rustc_middle::ty::{ + self, Adt, AdtDef, AdtKind, GenericArgsRef, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, + TypeVisitableExt, +}; +use rustc_session::{declare_lint, declare_lint_pass}; +use rustc_span::def_id::LocalDefId; +use rustc_span::{Span, sym}; +use tracing::debug; -use crate::fluent_generated as fluent; +use super::repr_nullable_ptr; +use crate::lints::{ImproperCTypes, UsesPowerAlignment}; +use crate::{LateContext, LateLintPass, LintContext, fluent_generated as fluent}; + +declare_lint! { + /// The `improper_ctypes` lint detects incorrect use of types in foreign + /// modules. + /// + /// ### Example + /// + /// ```rust + /// unsafe extern "C" { + /// static STATIC: String; + /// } + /// ``` + /// + /// {{produces}} + /// + /// ### Explanation + /// + /// The compiler has several checks to verify that types used in `extern` + /// blocks are safe and follow certain rules to ensure proper + /// compatibility with the foreign interfaces. This lint is issued when it + /// detects a probable mistake in a definition. The lint usually should + /// provide a description of the issue, along with possibly a hint on how + /// to resolve it. + IMPROPER_CTYPES, + Warn, + "proper use of libc types in foreign modules" +} + +declare_lint! { + /// The `improper_ctypes_definitions` lint detects incorrect use of + /// [`extern` function] definitions. + /// + /// [`extern` function]: https://doc.rust-lang.org/reference/items/functions.html#extern-function-qualifier + /// + /// ### Example + /// + /// ```rust + /// # #![allow(unused)] + /// pub extern "C" fn str_type(p: &str) { } + /// ``` + /// + /// {{produces}} + /// + /// ### Explanation + /// + /// There are many parameter and return types that may be specified in an + /// `extern` function that are not compatible with the given ABI. This + /// lint is an alert that these types should not be used. The lint usually + /// should provide a description of the issue, along with possibly a hint + /// on how to resolve it. + IMPROPER_CTYPES_DEFINITIONS, + Warn, + "proper use of libc types in foreign item definitions" +} + +declare_lint! { + /// The `uses_power_alignment` lint detects specific `repr(C)` + /// aggregates on AIX. + /// In its platform C ABI, AIX uses the "power" (as in PowerPC) alignment + /// rule (detailed in https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=data-using-alignment-modes#alignment), + /// which can also be set for XLC by `#pragma align(power)` or + /// `-qalign=power`. Aggregates with a floating-point type as the + /// recursively first field (as in "at offset 0") modify the layout of + /// *subsequent* fields of the associated structs to use an alignment value + /// where the floating-point type is aligned on a 4-byte boundary. + /// + /// Effectively, subsequent floating-point fields act as-if they are `repr(packed(4))`. This + /// would be unsound to do in a `repr(C)` type without all the restrictions that come with + /// `repr(packed)`. Rust instead chooses a layout that maintains soundness of Rust code, at the + /// expense of incompatibility with C code. + /// + /// ### Example + /// + /// ```rust,ignore (fails on non-powerpc64-ibm-aix) + /// #[repr(C)] + /// pub struct Floats { + /// a: f64, + /// b: u8, + /// c: f64, + /// } + /// ``` + /// + /// This will produce: + /// + /// ```text + /// warning: repr(C) does not follow the power alignment rule. This may affect platform C ABI compatibility for this type + /// --> <source>:5:3 + /// | + /// 5 | c: f64, + /// | ^^^^^^ + /// | + /// = note: `#[warn(uses_power_alignment)]` on by default + /// ``` + /// + /// ### Explanation + /// + /// The power alignment rule specifies that the above struct has the + /// following alignment: + /// - offset_of!(Floats, a) == 0 + /// - offset_of!(Floats, b) == 8 + /// - offset_of!(Floats, c) == 12 + /// + /// However, Rust currently aligns `c` at `offset_of!(Floats, c) == 16`. + /// Using offset 12 would be unsound since `f64` generally must be 8-aligned on this target. + /// Thus, a warning is produced for the above struct. + USES_POWER_ALIGNMENT, + Warn, + "Structs do not follow the power alignment rule under repr(C)" +} + +declare_lint_pass!(ImproperCTypesLint => [ + IMPROPER_CTYPES, + IMPROPER_CTYPES_DEFINITIONS, + USES_POWER_ALIGNMENT +]); /// Check a variant of a non-exhaustive enum for improper ctypes /// @@ -41,3 +172,884 @@ fn variant_has_complex_ctor(variant: &ty::VariantDef) -> bool { // CtorKind::Const means a "unit" ctor !matches!(variant.ctor_kind(), Some(CtorKind::Const)) } + +/// Per-struct-field function that checks if a struct definition follows +/// the Power alignment Rule (see the `check_struct_for_power_alignment` function). +fn check_arg_for_power_alignment<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool { + let tcx = cx.tcx; + assert!(tcx.sess.target.os == "aix"); + // Structs (under repr(C)) follow the power alignment rule if: + // - the first field of the struct is a floating-point type that + // is greater than 4-bytes, or + // - the first field of the struct is an aggregate whose + // recursively first field is a floating-point type greater than + // 4 bytes. + if ty.is_floating_point() && ty.primitive_size(tcx).bytes() > 4 { + return true; + } else if let Adt(adt_def, _) = ty.kind() + && adt_def.is_struct() + && adt_def.repr().c() + && !adt_def.repr().packed() + && adt_def.repr().align.is_none() + { + let struct_variant = adt_def.variant(VariantIdx::ZERO); + // Within a nested struct, all fields are examined to correctly + // report if any fields after the nested struct within the + // original struct are misaligned. + for struct_field in &struct_variant.fields { + let field_ty = tcx.type_of(struct_field.did).instantiate_identity(); + if check_arg_for_power_alignment(cx, field_ty) { + return true; + } + } + } + return false; +} + +/// Check a struct definition for respect of the Power alignment Rule (as in PowerPC), +/// which should be respected in the "aix" target OS. +/// To do so, we must follow one of the two following conditions: +/// - The first field of the struct must be floating-point type that +/// is greater than 4-bytes. +/// - The first field of the struct must be an aggregate whose +/// recursively first field is a floating-point type greater than +/// 4 bytes. +fn check_struct_for_power_alignment<'tcx>( + cx: &LateContext<'tcx>, + item: &'tcx hir::Item<'tcx>, + adt_def: AdtDef<'tcx>, +) { + let tcx = cx.tcx; + // repr(C) structs also with packed or aligned representation + // should be ignored. + if adt_def.repr().c() + && !adt_def.repr().packed() + && adt_def.repr().align.is_none() + && tcx.sess.target.os == "aix" + && !adt_def.all_fields().next().is_none() + { + let struct_variant_data = item.expect_struct().2; + for field_def in struct_variant_data.fields().iter().skip(1) { + // Struct fields (after the first field) are checked for the + // power alignment rule, as fields after the first are likely + // to be the fields that are misaligned. + let def_id = field_def.def_id; + let ty = tcx.type_of(def_id).instantiate_identity(); + if check_arg_for_power_alignment(cx, ty) { + cx.emit_span_lint(USES_POWER_ALIGNMENT, field_def.span, UsesPowerAlignment); + } + } + } +} + +#[derive(Clone, Copy)] +enum CItemKind { + Declaration, + Definition, +} + +enum FfiResult<'tcx> { + FfiSafe, + FfiPhantom(Ty<'tcx>), + FfiUnsafe { ty: Ty<'tcx>, reason: DiagMessage, help: Option<DiagMessage> }, +} + +/// The result when a type has been checked but perhaps not completely. `None` indicates that +/// FFI safety/unsafety has not yet been determined, `Some(res)` indicates that the safety/unsafety +/// in the `FfiResult` is final. +type PartialFfiResult<'tcx> = Option<FfiResult<'tcx>>; + +bitflags! { + #[derive(Clone, Copy, Debug, PartialEq, Eq)] + struct VisitorState: u8 { + /// For use in (externally-linked) static variables. + const STATIC = 0b000001; + /// For use in functions in general. + const FUNC = 0b000010; + /// For variables in function returns (implicitly: not for static variables). + const FN_RETURN = 0b000100; + /// For variables in functions/variables which are defined in rust. + const DEFINED = 0b001000; + /// For times where we are only defining the type of something + /// (struct/enum/union definitions, FnPtrs). + const THEORETICAL = 0b010000; + } +} + +impl VisitorState { + // The values that can be set. + const STATIC_TY: Self = Self::STATIC; + const ARGUMENT_TY_IN_DEFINITION: Self = + Self::from_bits(Self::FUNC.bits() | Self::DEFINED.bits()).unwrap(); + const RETURN_TY_IN_DEFINITION: Self = + Self::from_bits(Self::FUNC.bits() | Self::FN_RETURN.bits() | Self::DEFINED.bits()).unwrap(); + const ARGUMENT_TY_IN_DECLARATION: Self = Self::FUNC; + const RETURN_TY_IN_DECLARATION: Self = + Self::from_bits(Self::FUNC.bits() | Self::FN_RETURN.bits()).unwrap(); + const ARGUMENT_TY_IN_FNPTR: Self = + Self::from_bits(Self::FUNC.bits() | Self::THEORETICAL.bits()).unwrap(); + const RETURN_TY_IN_FNPTR: Self = + Self::from_bits(Self::FUNC.bits() | Self::THEORETICAL.bits() | Self::FN_RETURN.bits()) + .unwrap(); + + /// Get the proper visitor state for a given function's arguments. + fn argument_from_fnmode(fn_mode: CItemKind) -> Self { + match fn_mode { + CItemKind::Definition => VisitorState::ARGUMENT_TY_IN_DEFINITION, + CItemKind::Declaration => VisitorState::ARGUMENT_TY_IN_DECLARATION, + } + } + + /// Get the proper visitor state for a given function's return type. + fn return_from_fnmode(fn_mode: CItemKind) -> Self { + match fn_mode { + CItemKind::Definition => VisitorState::RETURN_TY_IN_DEFINITION, + CItemKind::Declaration => VisitorState::RETURN_TY_IN_DECLARATION, + } + } + + /// Whether the type is used in a function. + fn is_in_function(self) -> bool { + let ret = self.contains(Self::FUNC); + if ret { + debug_assert!(!self.contains(Self::STATIC)); + } + ret + } + /// Whether the type is used (directly or not) in a function, in return position. + fn is_in_function_return(self) -> bool { + let ret = self.contains(Self::FN_RETURN); + if ret { + debug_assert!(self.is_in_function()); + } + ret + } + /// Whether the type is used (directly or not) in a defined function. + /// In other words, whether or not we allow non-FFI-safe types behind a C pointer, + /// to be treated as an opaque type on the other side of the FFI boundary. + fn is_in_defined_function(self) -> bool { + self.contains(Self::DEFINED) && self.is_in_function() + } + + /// Whether the type is used (directly or not) in a function pointer type. + /// Here, we also allow non-FFI-safe types behind a C pointer, + /// to be treated as an opaque type on the other side of the FFI boundary. + fn is_in_fnptr(self) -> bool { + self.contains(Self::THEORETICAL) && self.is_in_function() + } + + /// Whether we can expect type parameters and co in a given type. + fn can_expect_ty_params(self) -> bool { + // rust-defined functions, as well as FnPtrs + self.contains(Self::THEORETICAL) || self.is_in_defined_function() + } +} + +/// Visitor used to recursively traverse MIR types and evaluate FFI-safety. +/// It uses ``check_*`` methods as entrypoints to be called elsewhere, +/// and ``visit_*`` methods to recurse. +struct ImproperCTypesVisitor<'a, 'tcx> { + cx: &'a LateContext<'tcx>, + /// To prevent problems with recursive types, + /// add a types-in-check cache. + cache: FxHashSet<Ty<'tcx>>, + /// The original type being checked, before we recursed + /// to any other types it contains. + base_ty: Ty<'tcx>, + base_fn_mode: CItemKind, +} + +impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { + fn new(cx: &'a LateContext<'tcx>, base_ty: Ty<'tcx>, base_fn_mode: CItemKind) -> Self { + Self { cx, base_ty, base_fn_mode, cache: FxHashSet::default() } + } + + /// Checks if the given field's type is "ffi-safe". + fn check_field_type_for_ffi( + &mut self, + state: VisitorState, + field: &ty::FieldDef, + args: GenericArgsRef<'tcx>, + ) -> FfiResult<'tcx> { + let field_ty = field.ty(self.cx.tcx, args); + let field_ty = self + .cx + .tcx + .try_normalize_erasing_regions(self.cx.typing_env(), field_ty) + .unwrap_or(field_ty); + self.visit_type(state, field_ty) + } + + /// Checks if the given `VariantDef`'s field types are "ffi-safe". + fn check_variant_for_ffi( + &mut self, + state: VisitorState, + ty: Ty<'tcx>, + def: ty::AdtDef<'tcx>, + variant: &ty::VariantDef, + args: GenericArgsRef<'tcx>, + ) -> FfiResult<'tcx> { + use FfiResult::*; + let transparent_with_all_zst_fields = if def.repr().transparent() { + if let Some(field) = super::transparent_newtype_field(self.cx.tcx, variant) { + // Transparent newtypes have at most one non-ZST field which needs to be checked.. + match self.check_field_type_for_ffi(state, field, args) { + FfiUnsafe { ty, .. } if ty.is_unit() => (), + r => return r, + } + + false + } else { + // ..or have only ZST fields, which is FFI-unsafe (unless those fields are all + // `PhantomData`). + true + } + } else { + false + }; + + // We can't completely trust `repr(C)` markings, so make sure the fields are actually safe. + let mut all_phantom = !variant.fields.is_empty(); + for field in &variant.fields { + all_phantom &= match self.check_field_type_for_ffi(state, field, args) { + FfiSafe => false, + // `()` fields are FFI-safe! + FfiUnsafe { ty, .. } if ty.is_unit() => false, + FfiPhantom(..) => true, + r @ FfiUnsafe { .. } => return r, + } + } + + if all_phantom { + FfiPhantom(ty) + } else if transparent_with_all_zst_fields { + FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_struct_zst, help: None } + } else { + FfiSafe + } + } + + /// Checks if the given type is "ffi-safe" (has a stable, well-defined + /// representation which can be exported to C code). + fn visit_type(&mut self, state: VisitorState, ty: Ty<'tcx>) -> FfiResult<'tcx> { + use FfiResult::*; + + let tcx = self.cx.tcx; + + // Protect against infinite recursion, for example + // `struct S(*mut S);`. + // FIXME: A recursion limit is necessary as well, for irregular + // recursive types. + if !self.cache.insert(ty) { + return FfiSafe; + } + + match *ty.kind() { + ty::Adt(def, args) => { + if let Some(boxed) = ty.boxed_ty() + && ( + // FIXME(ctypes): this logic is broken, but it still fits the current tests + state.is_in_defined_function() + || (state.is_in_fnptr() + && matches!(self.base_fn_mode, CItemKind::Definition)) + ) + { + if boxed.is_sized(tcx, self.cx.typing_env()) { + return FfiSafe; + } else { + return FfiUnsafe { + ty, + reason: fluent::lint_improper_ctypes_box, + help: None, + }; + } + } + if def.is_phantom_data() { + return FfiPhantom(ty); + } + match def.adt_kind() { + AdtKind::Struct | AdtKind::Union => { + if let Some(sym::cstring_type | sym::cstr_type) = + tcx.get_diagnostic_name(def.did()) + && !self.base_ty.is_mutable_ptr() + { + return FfiUnsafe { + ty, + reason: fluent::lint_improper_ctypes_cstr_reason, + help: Some(fluent::lint_improper_ctypes_cstr_help), + }; + } + + if !def.repr().c() && !def.repr().transparent() { + return FfiUnsafe { + ty, + reason: if def.is_struct() { + fluent::lint_improper_ctypes_struct_layout_reason + } else { + fluent::lint_improper_ctypes_union_layout_reason + }, + help: if def.is_struct() { + Some(fluent::lint_improper_ctypes_struct_layout_help) + } else { + Some(fluent::lint_improper_ctypes_union_layout_help) + }, + }; + } + + if def.non_enum_variant().field_list_has_applicable_non_exhaustive() { + return FfiUnsafe { + ty, + reason: if def.is_struct() { + fluent::lint_improper_ctypes_struct_non_exhaustive + } else { + fluent::lint_improper_ctypes_union_non_exhaustive + }, + help: None, + }; + } + + if def.non_enum_variant().fields.is_empty() { + return FfiUnsafe { + ty, + reason: if def.is_struct() { + fluent::lint_improper_ctypes_struct_fieldless_reason + } else { + fluent::lint_improper_ctypes_union_fieldless_reason + }, + help: if def.is_struct() { + Some(fluent::lint_improper_ctypes_struct_fieldless_help) + } else { + Some(fluent::lint_improper_ctypes_union_fieldless_help) + }, + }; + } + + self.check_variant_for_ffi(state, ty, def, def.non_enum_variant(), args) + } + AdtKind::Enum => { + if def.variants().is_empty() { + // Empty enums are okay... although sort of useless. + return FfiSafe; + } + // Check for a repr() attribute to specify the size of the + // discriminant. + if !def.repr().c() && !def.repr().transparent() && def.repr().int.is_none() + { + // Special-case types like `Option<extern fn()>` and `Result<extern fn(), ()>` + if let Some(ty) = + repr_nullable_ptr(self.cx.tcx, self.cx.typing_env(), ty) + { + return self.visit_type(state, ty); + } + + return FfiUnsafe { + ty, + reason: fluent::lint_improper_ctypes_enum_repr_reason, + help: Some(fluent::lint_improper_ctypes_enum_repr_help), + }; + } + + let non_exhaustive = def.variant_list_has_applicable_non_exhaustive(); + // Check the contained variants. + let ret = def.variants().iter().try_for_each(|variant| { + check_non_exhaustive_variant(non_exhaustive, variant) + .map_break(|reason| FfiUnsafe { ty, reason, help: None })?; + + match self.check_variant_for_ffi(state, ty, def, variant, args) { + FfiSafe => ControlFlow::Continue(()), + r => ControlFlow::Break(r), + } + }); + if let ControlFlow::Break(result) = ret { + return result; + } + + FfiSafe + } + } + } + + ty::Char => FfiUnsafe { + ty, + reason: fluent::lint_improper_ctypes_char_reason, + help: Some(fluent::lint_improper_ctypes_char_help), + }, + + // It's just extra invariants on the type that you need to uphold, + // but only the base type is relevant for being representable in FFI. + ty::Pat(base, ..) => self.visit_type(state, base), + + // Primitive types with a stable representation. + ty::Bool | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Never => FfiSafe, + + ty::Slice(_) => FfiUnsafe { + ty, + reason: fluent::lint_improper_ctypes_slice_reason, + help: Some(fluent::lint_improper_ctypes_slice_help), + }, + + ty::Dynamic(..) => { + FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_dyn, help: None } + } + + ty::Str => FfiUnsafe { + ty, + reason: fluent::lint_improper_ctypes_str_reason, + help: Some(fluent::lint_improper_ctypes_str_help), + }, + + ty::Tuple(..) => FfiUnsafe { + ty, + reason: fluent::lint_improper_ctypes_tuple_reason, + help: Some(fluent::lint_improper_ctypes_tuple_help), + }, + + ty::RawPtr(ty, _) | ty::Ref(_, ty, _) + if { + (state.is_in_defined_function() || state.is_in_fnptr()) + && ty.is_sized(self.cx.tcx, self.cx.typing_env()) + } => + { + FfiSafe + } + + ty::RawPtr(ty, _) + if match ty.kind() { + ty::Tuple(tuple) => tuple.is_empty(), + _ => false, + } => + { + FfiSafe + } + + ty::RawPtr(ty, _) | ty::Ref(_, ty, _) => self.visit_type(state, ty), + + ty::Array(inner_ty, _) => self.visit_type(state, inner_ty), + + ty::FnPtr(sig_tys, hdr) => { + let sig = sig_tys.with(hdr); + if sig.abi().is_rustic_abi() { + return FfiUnsafe { + ty, + reason: fluent::lint_improper_ctypes_fnptr_reason, + help: Some(fluent::lint_improper_ctypes_fnptr_help), + }; + } + + let sig = tcx.instantiate_bound_regions_with_erased(sig); + for arg in sig.inputs() { + match self.visit_type(VisitorState::ARGUMENT_TY_IN_FNPTR, *arg) { + FfiSafe => {} + r => return r, + } + } + + let ret_ty = sig.output(); + if ret_ty.is_unit() { + return FfiSafe; + } + + self.visit_type(VisitorState::RETURN_TY_IN_FNPTR, ret_ty) + } + + ty::Foreign(..) => FfiSafe, + + // While opaque types are checked for earlier, if a projection in a struct field + // normalizes to an opaque type, then it will reach this branch. + ty::Alias(ty::Opaque, ..) => { + FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_opaque, help: None } + } + + // `extern "C" fn` functions can have type parameters, which may or may not be FFI-safe, + // so they are currently ignored for the purposes of this lint. + ty::Param(..) | ty::Alias(ty::Projection | ty::Inherent, ..) + if state.can_expect_ty_params() => + { + FfiSafe + } + + ty::UnsafeBinder(_) => todo!("FIXME(unsafe_binder)"), + + ty::Param(..) + | ty::Alias(ty::Projection | ty::Inherent | ty::Free, ..) + | ty::Infer(..) + | ty::Bound(..) + | ty::Error(_) + | ty::Closure(..) + | ty::CoroutineClosure(..) + | ty::Coroutine(..) + | ty::CoroutineWitness(..) + | ty::Placeholder(..) + | ty::FnDef(..) => bug!("unexpected type in foreign function: {:?}", ty), + } + } + + fn visit_for_opaque_ty(&mut self, ty: Ty<'tcx>) -> PartialFfiResult<'tcx> { + struct ProhibitOpaqueTypes; + impl<'tcx> ty::TypeVisitor<TyCtxt<'tcx>> for ProhibitOpaqueTypes { + type Result = ControlFlow<Ty<'tcx>>; + + fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result { + if !ty.has_opaque_types() { + return ControlFlow::Continue(()); + } + + if let ty::Alias(ty::Opaque, ..) = ty.kind() { + ControlFlow::Break(ty) + } else { + ty.super_visit_with(self) + } + } + } + + if let Some(ty) = self + .cx + .tcx + .try_normalize_erasing_regions(self.cx.typing_env(), ty) + .unwrap_or(ty) + .visit_with(&mut ProhibitOpaqueTypes) + .break_value() + { + Some(FfiResult::FfiUnsafe { + ty, + reason: fluent::lint_improper_ctypes_opaque, + help: None, + }) + } else { + None + } + } + + /// Check if the type is array and emit an unsafe type lint. + fn check_for_array_ty(&mut self, ty: Ty<'tcx>) -> PartialFfiResult<'tcx> { + if let ty::Array(..) = ty.kind() { + Some(FfiResult::FfiUnsafe { + ty, + reason: fluent::lint_improper_ctypes_array_reason, + help: Some(fluent::lint_improper_ctypes_array_help), + }) + } else { + None + } + } + + /// Determine the FFI-safety of a single (MIR) type, given the context of how it is used. + fn check_type(&mut self, state: VisitorState, ty: Ty<'tcx>) -> FfiResult<'tcx> { + if let Some(res) = self.visit_for_opaque_ty(ty) { + return res; + } + + let ty = self.cx.tcx.try_normalize_erasing_regions(self.cx.typing_env(), ty).unwrap_or(ty); + + // C doesn't really support passing arrays by value - the only way to pass an array by value + // is through a struct. So, first test that the top level isn't an array, and then + // recursively check the types inside. + if state.is_in_function() { + if let Some(res) = self.check_for_array_ty(ty) { + return res; + } + } + + // Don't report FFI errors for unit return types. This check exists here, and not in + // the caller (where it would make more sense) so that normalization has definitely + // happened. + if state.is_in_function_return() && ty.is_unit() { + return FfiResult::FfiSafe; + } + + self.visit_type(state, ty) + } +} + +impl<'tcx> ImproperCTypesLint { + /// Find any fn-ptr types with external ABIs in `ty`, and FFI-checks them. + /// For example, `Option<extern "C" fn()>` FFI-checks `extern "C" fn()`. + fn check_type_for_external_abi_fnptr( + &mut self, + cx: &LateContext<'tcx>, + state: VisitorState, + hir_ty: &hir::Ty<'tcx>, + ty: Ty<'tcx>, + fn_mode: CItemKind, + ) { + struct FnPtrFinder<'tcx> { + spans: Vec<Span>, + tys: Vec<Ty<'tcx>>, + } + + impl<'tcx> hir::intravisit::Visitor<'_> for FnPtrFinder<'tcx> { + fn visit_ty(&mut self, ty: &'_ hir::Ty<'_, AmbigArg>) { + debug!(?ty); + if let hir::TyKind::FnPtr(hir::FnPtrTy { abi, .. }) = ty.kind + && !abi.is_rustic_abi() + { + self.spans.push(ty.span); + } + + hir::intravisit::walk_ty(self, ty) + } + } + + impl<'tcx> ty::TypeVisitor<TyCtxt<'tcx>> for FnPtrFinder<'tcx> { + type Result = (); + + fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result { + if let ty::FnPtr(_, hdr) = ty.kind() + && !hdr.abi.is_rustic_abi() + { + self.tys.push(ty); + } + + ty.super_visit_with(self) + } + } + + let mut visitor = FnPtrFinder { spans: Vec::new(), tys: Vec::new() }; + ty.visit_with(&mut visitor); + visitor.visit_ty_unambig(hir_ty); + + let all_types = iter::zip(visitor.tys.drain(..), visitor.spans.drain(..)); + for (fn_ptr_ty, span) in all_types { + let mut visitor = ImproperCTypesVisitor::new(cx, fn_ptr_ty, fn_mode); + // FIXME(ctypes): make a check_for_fnptr + let ffi_res = visitor.check_type(state, fn_ptr_ty); + + self.process_ffi_result(cx, span, ffi_res, fn_mode); + } + } + + /// Regardless of a function's need to be "ffi-safe", look for fn-ptr argument/return types + /// that need to be checked for ffi-safety. + fn check_fn_for_external_abi_fnptr( + &mut self, + cx: &LateContext<'tcx>, + fn_mode: CItemKind, + def_id: LocalDefId, + decl: &'tcx hir::FnDecl<'_>, + ) { + let sig = cx.tcx.fn_sig(def_id).instantiate_identity(); + let sig = cx.tcx.instantiate_bound_regions_with_erased(sig); + + for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) { + let state = VisitorState::argument_from_fnmode(fn_mode); + self.check_type_for_external_abi_fnptr(cx, state, input_hir, *input_ty, fn_mode); + } + + if let hir::FnRetTy::Return(ret_hir) = decl.output { + let state = VisitorState::return_from_fnmode(fn_mode); + self.check_type_for_external_abi_fnptr(cx, state, ret_hir, sig.output(), fn_mode); + } + } + + /// For a local definition of a #[repr(C)] struct/enum/union, check that it is indeed FFI-safe. + fn check_reprc_adt( + &mut self, + cx: &LateContext<'tcx>, + item: &'tcx hir::Item<'tcx>, + adt_def: AdtDef<'tcx>, + ) { + debug_assert!( + adt_def.repr().c() && !adt_def.repr().packed() && adt_def.repr().align.is_none() + ); + + // FIXME(ctypes): this following call is awkward. + // is there a way to perform its logic in MIR space rather than HIR space? + // (so that its logic can be absorbed into visitor.visit_struct_or_union) + check_struct_for_power_alignment(cx, item, adt_def); + } + + fn check_foreign_static(&mut self, cx: &LateContext<'tcx>, id: hir::OwnerId, span: Span) { + let ty = cx.tcx.type_of(id).instantiate_identity(); + let mut visitor = ImproperCTypesVisitor::new(cx, ty, CItemKind::Declaration); + let ffi_res = visitor.check_type(VisitorState::STATIC_TY, ty); + self.process_ffi_result(cx, span, ffi_res, CItemKind::Declaration); + } + + /// Check if a function's argument types and result type are "ffi-safe". + fn check_foreign_fn( + &mut self, + cx: &LateContext<'tcx>, + fn_mode: CItemKind, + def_id: LocalDefId, + decl: &'tcx hir::FnDecl<'_>, + ) { + let sig = cx.tcx.fn_sig(def_id).instantiate_identity(); + let sig = cx.tcx.instantiate_bound_regions_with_erased(sig); + + for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) { + let state = VisitorState::argument_from_fnmode(fn_mode); + let mut visitor = ImproperCTypesVisitor::new(cx, *input_ty, fn_mode); + let ffi_res = visitor.check_type(state, *input_ty); + self.process_ffi_result(cx, input_hir.span, ffi_res, fn_mode); + } + + if let hir::FnRetTy::Return(ret_hir) = decl.output { + let state = VisitorState::return_from_fnmode(fn_mode); + let mut visitor = ImproperCTypesVisitor::new(cx, sig.output(), fn_mode); + let ffi_res = visitor.check_type(state, sig.output()); + self.process_ffi_result(cx, ret_hir.span, ffi_res, fn_mode); + } + } + + fn process_ffi_result( + &self, + cx: &LateContext<'tcx>, + sp: Span, + res: FfiResult<'tcx>, + fn_mode: CItemKind, + ) { + match res { + FfiResult::FfiSafe => {} + FfiResult::FfiPhantom(ty) => { + self.emit_ffi_unsafe_type_lint( + cx, + ty, + sp, + fluent::lint_improper_ctypes_only_phantomdata, + None, + fn_mode, + ); + } + FfiResult::FfiUnsafe { ty, reason, help } => { + self.emit_ffi_unsafe_type_lint(cx, ty, sp, reason, help, fn_mode); + } + } + } + + fn emit_ffi_unsafe_type_lint( + &self, + cx: &LateContext<'tcx>, + ty: Ty<'tcx>, + sp: Span, + note: DiagMessage, + help: Option<DiagMessage>, + fn_mode: CItemKind, + ) { + let lint = match fn_mode { + CItemKind::Declaration => IMPROPER_CTYPES, + CItemKind::Definition => IMPROPER_CTYPES_DEFINITIONS, + }; + let desc = match fn_mode { + CItemKind::Declaration => "block", + CItemKind::Definition => "fn", + }; + let span_note = if let ty::Adt(def, _) = ty.kind() + && let Some(sp) = cx.tcx.hir_span_if_local(def.did()) + { + Some(sp) + } else { + None + }; + cx.emit_span_lint(lint, sp, ImproperCTypes { ty, desc, label: sp, help, note, span_note }); + } +} + +/// `ImproperCTypesDefinitions` checks items outside of foreign items (e.g. stuff that isn't in +/// `extern "C" { }` blocks): +/// +/// - `extern "<abi>" fn` definitions are checked in the same way as the +/// `ImproperCtypesDeclarations` visitor checks functions if `<abi>` is external (e.g. "C"). +/// - All other items which contain types (e.g. other functions, struct definitions, etc) are +/// checked for extern fn-ptrs with external ABIs. +impl<'tcx> LateLintPass<'tcx> for ImproperCTypesLint { + fn check_foreign_item(&mut self, cx: &LateContext<'tcx>, it: &hir::ForeignItem<'tcx>) { + let abi = cx.tcx.hir_get_foreign_abi(it.hir_id()); + + match it.kind { + hir::ForeignItemKind::Fn(sig, _, _) => { + // fnptrs are a special case, they always need to be treated as + // "the element rendered unsafe" because their unsafety doesn't affect + // their surroundings, and their type is often declared inline + if !abi.is_rustic_abi() { + self.check_foreign_fn(cx, CItemKind::Declaration, it.owner_id.def_id, sig.decl); + } else { + self.check_fn_for_external_abi_fnptr( + cx, + CItemKind::Declaration, + it.owner_id.def_id, + sig.decl, + ); + } + } + hir::ForeignItemKind::Static(ty, _, _) if !abi.is_rustic_abi() => { + self.check_foreign_static(cx, it.owner_id, ty.span); + } + hir::ForeignItemKind::Static(..) | hir::ForeignItemKind::Type => (), + } + } + + fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) { + match item.kind { + hir::ItemKind::Static(_, _, ty, _) + | hir::ItemKind::Const(_, _, ty, _) + | hir::ItemKind::TyAlias(_, _, ty) => { + self.check_type_for_external_abi_fnptr( + cx, + VisitorState::STATIC_TY, + ty, + cx.tcx.type_of(item.owner_id).instantiate_identity(), + CItemKind::Definition, + ); + } + // See `check_fn` for declarations, `check_foreign_items` for definitions in extern blocks + hir::ItemKind::Fn { .. } => {} + hir::ItemKind::Struct(..) | hir::ItemKind::Union(..) | hir::ItemKind::Enum(..) => { + // looking for extern FnPtr:s is delegated to `check_field_def`. + let adt_def: AdtDef<'tcx> = cx.tcx.adt_def(item.owner_id.to_def_id()); + + if adt_def.repr().c() && !adt_def.repr().packed() && adt_def.repr().align.is_none() + { + self.check_reprc_adt(cx, item, adt_def); + } + } + + // Doesn't define something that can contain a external type to be checked. + hir::ItemKind::Impl(..) + | hir::ItemKind::TraitAlias(..) + | hir::ItemKind::Trait(..) + | hir::ItemKind::GlobalAsm { .. } + | hir::ItemKind::ForeignMod { .. } + | hir::ItemKind::Mod(..) + | hir::ItemKind::Macro(..) + | hir::ItemKind::Use(..) + | hir::ItemKind::ExternCrate(..) => {} + } + } + + fn check_field_def(&mut self, cx: &LateContext<'tcx>, field: &'tcx hir::FieldDef<'tcx>) { + self.check_type_for_external_abi_fnptr( + cx, + VisitorState::STATIC_TY, + field.ty, + cx.tcx.type_of(field.def_id).instantiate_identity(), + CItemKind::Definition, + ); + } + + fn check_fn( + &mut self, + cx: &LateContext<'tcx>, + kind: hir::intravisit::FnKind<'tcx>, + decl: &'tcx hir::FnDecl<'_>, + _: &'tcx hir::Body<'_>, + _: Span, + id: LocalDefId, + ) { + use hir::intravisit::FnKind; + + let abi = match kind { + FnKind::ItemFn(_, _, header, ..) => header.abi, + FnKind::Method(_, sig, ..) => sig.header.abi, + _ => return, + }; + + // fnptrs are a special case, they always need to be treated as + // "the element rendered unsafe" because their unsafety doesn't affect + // their surroundings, and their type is often declared inline + if !abi.is_rustic_abi() { + self.check_foreign_fn(cx, CItemKind::Definition, id, decl); + } else { + self.check_fn_for_external_abi_fnptr(cx, CItemKind::Definition, id, decl); + } + } +} diff --git a/compiler/rustc_llvm/Cargo.toml b/compiler/rustc_llvm/Cargo.toml index cd352ce3d0f..0dfd1b13df5 100644 --- a/compiler/rustc_llvm/Cargo.toml +++ b/compiler/rustc_llvm/Cargo.toml @@ -11,7 +11,7 @@ libc = "0.2.73" [build-dependencies] # tidy-alphabetical-start # `cc` updates often break things, so we pin it here. Cargo enforces "max 1 semver-compat version -# per crate", so if you change this, you need to also change it in `rustc_codegen_ssa`. +# per crate", so if you change this, you need to also change it in `rustc_codegen_ssa` and `rustc_windows_rc`. cc = "=1.2.16" # tidy-alphabetical-end diff --git a/compiler/rustc_llvm/build.rs b/compiler/rustc_llvm/build.rs index d01f79dcade..9d21d0d22e3 100644 --- a/compiler/rustc_llvm/build.rs +++ b/compiler/rustc_llvm/build.rs @@ -174,10 +174,10 @@ fn main() { // Prevent critical warnings when we're compiling from rust-lang/rust CI, // except on MSVC, as the compiler throws warnings that are only reported - // for this platform. See https://github.com/rust-lang/rust/pull/145031#issuecomment-3162677202 - // FIXME(llvm22): It looks like the specific problem code has been removed - // in https://github.com/llvm/llvm-project/commit/e8fc808bf8e78a3c80d1f8e293a92677b92366dd, - // retry msvc once we bump our LLVM version. + // for this platform. See https://github.com/rust-lang/rust/pull/145031#issuecomment-3162677202. + // Moreover, LLVM generally guarantees warning-freedom only when building with Clang, as other + // compilers have too many false positives. This is typically the case for MSVC, which throws + // many false-positive warnings. We keep it excluded, for these reasons. if std::env::var_os("CI").is_some() && !target.contains("msvc") { cfg.warnings_into_errors(true); } @@ -254,7 +254,10 @@ fn main() { println!("cargo:rustc-link-lib=kstat"); } - if (target.starts_with("arm") && !target.contains("freebsd") && !target.contains("ohos")) + if (target.starts_with("arm") + && !target.starts_with("arm64") + && !target.contains("freebsd") + && !target.contains("ohos")) || target.starts_with("mips-") || target.starts_with("mipsel-") || target.starts_with("powerpc-") diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp index dd492325814..3bb1533c2fe 100644 --- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp @@ -1568,12 +1568,11 @@ extern "C" bool LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, return true; } -extern "C" LLVMRustThinLTOBuffer * -LLVMRustThinLTOBufferCreate(LLVMModuleRef M, bool is_thin, bool emit_summary) { +extern "C" LLVMRustThinLTOBuffer *LLVMRustThinLTOBufferCreate(LLVMModuleRef M, + bool is_thin) { auto Ret = std::make_unique<LLVMRustThinLTOBuffer>(); { auto OS = raw_string_ostream(Ret->data); - auto ThinLinkOS = raw_string_ostream(Ret->thin_link_data); { if (is_thin) { PassBuilder PB; @@ -1587,11 +1586,7 @@ LLVMRustThinLTOBufferCreate(LLVMModuleRef M, bool is_thin, bool emit_summary) { PB.registerLoopAnalyses(LAM); PB.crossRegisterProxies(LAM, FAM, CGAM, MAM); ModulePassManager MPM; - // We only pass ThinLinkOS to be filled in if we want the summary, - // because otherwise LLVM does extra work and may double-emit some - // errors or warnings. - MPM.addPass( - ThinLTOBitcodeWriterPass(OS, emit_summary ? &ThinLinkOS : nullptr)); + MPM.addPass(ThinLTOBitcodeWriterPass(OS, nullptr)); MPM.run(*unwrap(M), MAM); } else { WriteBitcodeToFile(*unwrap(M), OS); diff --git a/compiler/rustc_log/Cargo.toml b/compiler/rustc_log/Cargo.toml index c673d51a1d4..72f68d685cd 100644 --- a/compiler/rustc_log/Cargo.toml +++ b/compiler/rustc_log/Cargo.toml @@ -5,13 +5,12 @@ edition = "2024" [dependencies] # tidy-alphabetical-start -tracing = "0.1.28" -tracing-core = "=0.1.30" # FIXME(Nilstrieb) tracing has a deadlock: https://github.com/tokio-rs/tracing/issues/2635 +tracing = "0.1.41" tracing-subscriber = { version = "0.3.3", default-features = false, features = ["fmt", "env-filter", "smallvec", "parking_lot", "ansi"] } tracing-tree = "0.3.1" # tidy-alphabetical-end [features] # tidy-alphabetical-start -max_level_info = ['tracing/max_level_info'] +max_level_info = ['tracing/max_level_info', 'tracing/release_max_level_info'] # tidy-alphabetical-end diff --git a/compiler/rustc_log/src/lib.rs b/compiler/rustc_log/src/lib.rs index df648bbd489..26475eec1c1 100644 --- a/compiler/rustc_log/src/lib.rs +++ b/compiler/rustc_log/src/lib.rs @@ -38,7 +38,7 @@ use std::fmt::{self, Display}; use std::io::{self, IsTerminal}; use tracing::dispatcher::SetGlobalDefaultError; -use tracing_core::{Event, Subscriber}; +use tracing::{Event, Subscriber}; use tracing_subscriber::filter::{Directive, EnvFilter, LevelFilter}; use tracing_subscriber::fmt::FmtContext; use tracing_subscriber::fmt::format::{self, FormatEvent, FormatFields}; diff --git a/compiler/rustc_metadata/src/creader.rs b/compiler/rustc_metadata/src/creader.rs index 5d776ea581d..9e23da88f5e 100644 --- a/compiler/rustc_metadata/src/creader.rs +++ b/compiler/rustc_metadata/src/creader.rs @@ -412,7 +412,7 @@ impl CStore { match (&left_name_val, &right_name_val) { (Some(l), Some(r)) => match l.1.opt.cmp(&r.1.opt) { cmp::Ordering::Equal => { - if l.0.tech_value != r.0.tech_value { + if !l.1.consistent(&tcx.sess.opts, Some(&r.1)) { report_diff( &l.0.prefix, &l.0.name, @@ -424,20 +424,28 @@ impl CStore { right_name_val = None; } cmp::Ordering::Greater => { - report_diff(&r.0.prefix, &r.0.name, None, Some(&r.1.value_name)); + if !r.1.consistent(&tcx.sess.opts, None) { + report_diff(&r.0.prefix, &r.0.name, None, Some(&r.1.value_name)); + } right_name_val = None; } cmp::Ordering::Less => { - report_diff(&l.0.prefix, &l.0.name, Some(&l.1.value_name), None); + if !l.1.consistent(&tcx.sess.opts, None) { + report_diff(&l.0.prefix, &l.0.name, Some(&l.1.value_name), None); + } left_name_val = None; } }, (Some(l), None) => { - report_diff(&l.0.prefix, &l.0.name, Some(&l.1.value_name), None); + if !l.1.consistent(&tcx.sess.opts, None) { + report_diff(&l.0.prefix, &l.0.name, Some(&l.1.value_name), None); + } left_name_val = None; } (None, Some(r)) => { - report_diff(&r.0.prefix, &r.0.name, None, Some(&r.1.value_name)); + if !r.1.consistent(&tcx.sess.opts, None) { + report_diff(&r.0.prefix, &r.0.name, None, Some(&r.1.value_name)); + } right_name_val = None; } (None, None) => break, diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs index 110b26c62ef..0c8d1f32e99 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder.rs @@ -1194,10 +1194,6 @@ impl<'a> CrateMetadataRef<'a> { self.root.tables.default_fields.get(self, id).map(|d| d.decode(self)) } - fn get_trait_item_def_id(self, id: DefIndex) -> Option<DefId> { - self.root.tables.trait_item_def_id.get(self, id).map(|d| d.decode_from_cdata(self)) - } - fn get_expn_that_defined(self, id: DefIndex, sess: &Session) -> ExpnId { self.root .tables @@ -1359,14 +1355,9 @@ impl<'a> CrateMetadataRef<'a> { } _ => bug!("cannot get associated-item of `{:?}`", self.def_key(id)), }; - let container = self.root.tables.assoc_container.get(self, id).unwrap(); + let container = self.root.tables.assoc_container.get(self, id).unwrap().decode(self); - ty::AssocItem { - kind, - def_id: self.local_def_id(id), - trait_item_def_id: self.get_trait_item_def_id(id), - container, - } + ty::AssocItem { kind, def_id: self.local_def_id(id), container } } fn get_ctor(self, node_id: DefIndex) -> Option<(CtorKind, DefId)> { diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs index a7e7e9985f4..db66938457f 100644 --- a/compiler/rustc_metadata/src/rmeta/encoder.rs +++ b/compiler/rustc_metadata/src/rmeta/encoder.rs @@ -22,7 +22,7 @@ use rustc_middle::middle::dependency_format::Linkage; use rustc_middle::mir::interpret; use rustc_middle::query::Providers; use rustc_middle::traits::specialization_graph; -use rustc_middle::ty::AssocItemContainer; +use rustc_middle::ty::AssocContainer; use rustc_middle::ty::codec::TyEncoder; use rustc_middle::ty::fast_reject::{self, TreatParams}; use rustc_middle::{bug, span_bug}; @@ -1254,8 +1254,8 @@ fn should_encode_type(tcx: TyCtxt<'_>, def_id: LocalDefId, def_kind: DefKind) -> DefKind::AssocTy => { let assoc_item = tcx.associated_item(def_id); match assoc_item.container { - ty::AssocItemContainer::Impl => true, - ty::AssocItemContainer::Trait => assoc_item.defaultness(tcx).has_value(), + ty::AssocContainer::InherentImpl | ty::AssocContainer::TraitImpl(_) => true, + ty::AssocContainer::Trait => assoc_item.defaultness(tcx).has_value(), } } DefKind::TyParam => { @@ -1725,24 +1725,20 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { let tcx = self.tcx; let item = tcx.associated_item(def_id); - self.tables.defaultness.set_some(def_id.index, item.defaultness(tcx)); - self.tables.assoc_container.set_some(def_id.index, item.container); - - match item.container { - AssocItemContainer::Trait => { - if item.is_type() { - self.encode_explicit_item_bounds(def_id); - self.encode_explicit_item_self_bounds(def_id); - if tcx.is_conditionally_const(def_id) { - record_defaulted_array!(self.tables.explicit_implied_const_bounds[def_id] - <- self.tcx.explicit_implied_const_bounds(def_id).skip_binder()); - } - } - } - AssocItemContainer::Impl => { - if let Some(trait_item_def_id) = item.trait_item_def_id { - self.tables.trait_item_def_id.set_some(def_id.index, trait_item_def_id.into()); - } + if matches!(item.container, AssocContainer::Trait | AssocContainer::TraitImpl(_)) { + self.tables.defaultness.set_some(def_id.index, item.defaultness(tcx)); + } + + record!(self.tables.assoc_container[def_id] <- item.container); + + if let AssocContainer::Trait = item.container + && item.is_type() + { + self.encode_explicit_item_bounds(def_id); + self.encode_explicit_item_self_bounds(def_id); + if tcx.is_conditionally_const(def_id) { + record_defaulted_array!(self.tables.explicit_implied_const_bounds[def_id] + <- self.tcx.explicit_implied_const_bounds(def_id).skip_binder()); } } if let ty::AssocKind::Type { data: ty::AssocTypeData::Rpitit(rpitit_info) } = item.kind { diff --git a/compiler/rustc_metadata/src/rmeta/mod.rs b/compiler/rustc_metadata/src/rmeta/mod.rs index 1f7d142d330..720970bbaf9 100644 --- a/compiler/rustc_metadata/src/rmeta/mod.rs +++ b/compiler/rustc_metadata/src/rmeta/mod.rs @@ -447,7 +447,6 @@ define_tables! { coroutine_by_move_body_def_id: Table<DefIndex, RawDefId>, eval_static_initializer: Table<DefIndex, LazyValue<mir::interpret::ConstAllocation<'static>>>, trait_def: Table<DefIndex, LazyValue<ty::TraitDef>>, - trait_item_def_id: Table<DefIndex, RawDefId>, expn_that_defined: Table<DefIndex, LazyValue<ExpnId>>, default_fields: Table<DefIndex, LazyValue<DefId>>, params_in_repr: Table<DefIndex, LazyValue<DenseBitSet<u32>>>, @@ -459,7 +458,7 @@ define_tables! { def_keys: Table<DefIndex, LazyValue<DefKey>>, proc_macro_quoted_spans: Table<usize, LazyValue<Span>>, variant_data: Table<DefIndex, LazyValue<VariantData>>, - assoc_container: Table<DefIndex, ty::AssocItemContainer>, + assoc_container: Table<DefIndex, LazyValue<ty::AssocContainer>>, macro_definition: Table<DefIndex, LazyValue<ast::DelimArgs>>, proc_macro: Table<DefIndex, MacroKind>, deduced_param_attrs: Table<DefIndex, LazyArray<DeducedParamAttrs>>, diff --git a/compiler/rustc_metadata/src/rmeta/parameterized.rs b/compiler/rustc_metadata/src/rmeta/parameterized.rs index 34180001f80..4b2dc2c814e 100644 --- a/compiler/rustc_metadata/src/rmeta/parameterized.rs +++ b/compiler/rustc_metadata/src/rmeta/parameterized.rs @@ -102,7 +102,7 @@ trivially_parameterized_over_tcx! { rustc_middle::middle::resolve_bound_vars::ObjectLifetimeDefault, rustc_middle::mir::ConstQualifs, rustc_middle::ty::AnonConstKind, - rustc_middle::ty::AssocItemContainer, + rustc_middle::ty::AssocContainer, rustc_middle::ty::AsyncDestructor, rustc_middle::ty::Asyncness, rustc_middle::ty::DeducedParamAttrs, diff --git a/compiler/rustc_metadata/src/rmeta/table.rs b/compiler/rustc_metadata/src/rmeta/table.rs index 2cb07a28a8a..a882ee4f2b9 100644 --- a/compiler/rustc_metadata/src/rmeta/table.rs +++ b/compiler/rustc_metadata/src/rmeta/table.rs @@ -222,13 +222,6 @@ fixed_size_enum! { } fixed_size_enum! { - ty::AssocItemContainer { - ( Trait ) - ( Impl ) - } -} - -fixed_size_enum! { MacroKind { ( Attr ) ( Bang ) diff --git a/compiler/rustc_middle/src/error.rs b/compiler/rustc_middle/src/error.rs index 7520bc262c6..dad402ec696 100644 --- a/compiler/rustc_middle/src/error.rs +++ b/compiler/rustc_middle/src/error.rs @@ -72,7 +72,7 @@ pub enum TypeMismatchReason { #[help] pub(crate) struct RecursionLimitReached<'tcx> { pub ty: Ty<'tcx>, - pub suggested_limit: rustc_session::Limit, + pub suggested_limit: rustc_hir::limit::Limit, } #[derive(Diagnostic)] diff --git a/compiler/rustc_middle/src/infer/canonical.rs b/compiler/rustc_middle/src/infer/canonical.rs index 4fe4c2dadee..153605ee7f8 100644 --- a/compiler/rustc_middle/src/infer/canonical.rs +++ b/compiler/rustc_middle/src/infer/canonical.rs @@ -27,7 +27,6 @@ use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::Lock; use rustc_macros::{HashStable, TypeFoldable, TypeVisitable}; pub use rustc_type_ir as ir; -pub use rustc_type_ir::CanonicalTyVarKind; use smallvec::SmallVec; use crate::mir::ConstraintCategory; diff --git a/compiler/rustc_middle/src/mir/consts.rs b/compiler/rustc_middle/src/mir/consts.rs index 164433aede2..5764a9c84ee 100644 --- a/compiler/rustc_middle/src/mir/consts.rs +++ b/compiler/rustc_middle/src/mir/consts.rs @@ -579,7 +579,7 @@ impl<'tcx> Display for Const<'tcx> { } /////////////////////////////////////////////////////////////////////////// -/// Const-related utilities +// Const-related utilities impl<'tcx> TyCtxt<'tcx> { pub fn span_as_caller_location(self, span: Span) -> ConstValue { diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index 2ea92a39d48..67962813ae4 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -724,6 +724,11 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> } // If we get here, we have to check per-byte provenance, and join them together. let prov = 'prov: { + if !Prov::OFFSET_IS_ADDR { + // FIXME(#146291): We need to ensure that we don't mix different pointers with + // the same provenance. + return Err(AllocError::ReadPartialPointer(range.start)); + } // Initialize with first fragment. Must have index 0. let Some((mut joint_prov, 0)) = self.provenance.get_byte(range.start, cx) else { break 'prov None; diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs index dbbd95408c8..720e58d7aa0 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs @@ -11,6 +11,7 @@ use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; use tracing::trace; use super::{AllocRange, CtfeProvenance, Provenance, alloc_range}; +use crate::mir::interpret::{AllocError, AllocResult}; /// Stores the provenance information of pointers stored in memory. #[derive(Clone, PartialEq, Eq, Hash, Debug)] @@ -137,6 +138,11 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { let Some(bytes) = self.bytes.as_deref_mut() else { return true; }; + if !Prov::OFFSET_IS_ADDR { + // FIXME(#146291): We need to ensure that we don't mix different pointers with + // the same provenance. + return false; + } let ptr_size = cx.data_layout().pointer_size(); while let Some((offset, (prov, _))) = bytes.iter().next().copied() { // Check if this fragment starts a pointer. @@ -285,7 +291,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { dest: Size, count: u64, cx: &impl HasDataLayout, - ) -> ProvenanceCopy<Prov> { + ) -> AllocResult<ProvenanceCopy<Prov>> { let shift_offset = move |idx, offset| { // compute offset for current repetition let dest_offset = dest + src.size * idx; // `Size` operations @@ -363,6 +369,12 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { } trace!("byte provenances: {bytes:?}"); + if !bytes.is_empty() && !Prov::OFFSET_IS_ADDR { + // FIXME(#146291): We need to ensure that we don't mix different pointers with + // the same provenance. + return Err(AllocError::ReadPartialPointer(src.start)); + } + // And again a buffer for the new list on the target side. let mut dest_bytes = Vec::with_capacity(bytes.len() * (count as usize)); for i in 0..count { @@ -373,7 +385,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { dest_bytes_box = Some(dest_bytes.into_boxed_slice()); } - ProvenanceCopy { dest_ptrs: dest_ptrs_box, dest_bytes: dest_bytes_box } + Ok(ProvenanceCopy { dest_ptrs: dest_ptrs_box, dest_bytes: dest_bytes_box }) } /// Applies a provenance copy. diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs index bed99a4ff2a..9762e0f21da 100644 --- a/compiler/rustc_middle/src/mir/interpret/mod.rs +++ b/compiler/rustc_middle/src/mir/interpret/mod.rs @@ -386,7 +386,16 @@ impl<'tcx> GlobalAlloc<'tcx> { .expect("statics should not have generic parameters"); let layout = tcx.layout_of(typing_env.as_query_input(ty)).unwrap(); assert!(layout.is_sized()); - (layout.size, layout.align.abi) + + // Take over-alignment from attributes into account. + let align = match tcx.codegen_fn_attrs(def_id).alignment { + Some(align_from_attribute) => { + Ord::max(align_from_attribute, layout.align.abi) + } + None => layout.align.abi, + }; + + (layout.size, align) } } GlobalAlloc::Memory(alloc) => { diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs index e25f35c59c2..ecf35d9dd6d 100644 --- a/compiler/rustc_middle/src/mir/interpret/queries.rs +++ b/compiler/rustc_middle/src/mir/interpret/queries.rs @@ -41,7 +41,7 @@ impl<'tcx> TyCtxt<'tcx> { let instance = ty::Instance::new_raw(def_id, args); let cid = GlobalId { instance, promoted: None }; let typing_env = ty::TypingEnv::post_analysis(self, def_id); - let inputs = self.erase_regions(typing_env.as_query_input(cid)); + let inputs = self.erase_and_anonymize_regions(typing_env.as_query_input(cid)); self.eval_to_allocation_raw(inputs) } @@ -172,8 +172,9 @@ impl<'tcx> TyCtxt<'tcx> { ) -> EvalToConstValueResult<'tcx> { // Const-eval shouldn't depend on lifetimes at all, so we can erase them, which should // improve caching of queries. - let inputs = - self.erase_regions(typing_env.with_post_analysis_normalized(self).as_query_input(cid)); + let inputs = self.erase_and_anonymize_regions( + typing_env.with_post_analysis_normalized(self).as_query_input(cid), + ); if !span.is_dummy() { // The query doesn't know where it is being invoked, so we need to fix the span. self.at(span).eval_to_const_value_raw(inputs).map_err(|e| e.with_span(span)) @@ -192,8 +193,9 @@ impl<'tcx> TyCtxt<'tcx> { ) -> ConstToValTreeResult<'tcx> { // Const-eval shouldn't depend on lifetimes at all, so we can erase them, which should // improve caching of queries. - let inputs = - self.erase_regions(typing_env.with_post_analysis_normalized(self).as_query_input(cid)); + let inputs = self.erase_and_anonymize_regions( + typing_env.with_post_analysis_normalized(self).as_query_input(cid), + ); debug!(?inputs); let res = if !span.is_dummy() { // The query doesn't know where it is being invoked, so we need to fix the span. diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs index a7d99f513a1..f9d0a5f0a3b 100644 --- a/compiler/rustc_middle/src/mir/pretty.rs +++ b/compiler/rustc_middle/src/mir/pretty.rs @@ -78,8 +78,9 @@ impl<'dis, 'de, 'tcx> MirDumper<'dis, 'de, 'tcx> { pub fn new(tcx: TyCtxt<'tcx>, pass_name: &'static str, body: &Body<'tcx>) -> Option<Self> { let dump_enabled = if let Some(ref filters) = tcx.sess.opts.unstable_opts.dump_mir { // see notes on #41697 below - let node_path = - ty::print::with_forced_impl_filename_line!(tcx.def_path_str(body.source.def_id())); + let node_path = ty::print::with_no_trimmed_paths!( + ty::print::with_forced_impl_filename_line!(tcx.def_path_str(body.source.def_id())) + ); filters.split('|').any(|or_filter| { or_filter.split('&').all(|and_filter| { let and_filter_trimmed = and_filter.trim(); @@ -173,9 +174,10 @@ impl<'dis, 'de, 'tcx> MirDumper<'dis, 'de, 'tcx> { // trigger `type_of`, and this can run while we are already attempting to evaluate `type_of`. pub fn dump_mir_to_writer(&self, body: &Body<'tcx>, w: &mut dyn io::Write) -> io::Result<()> { // see notes on #41697 above - let def_path = ty::print::with_forced_impl_filename_line!( - self.tcx().def_path_str(body.source.def_id()) - ); + let def_path = + ty::print::with_no_trimmed_paths!(ty::print::with_forced_impl_filename_line!( + self.tcx().def_path_str(body.source.def_id()) + )); // ignore-tidy-odd-backticks the literal below is fine write!(w, "// MIR for `{def_path}")?; match body.source.promoted { diff --git a/compiler/rustc_middle/src/mir/statement.rs b/compiler/rustc_middle/src/mir/statement.rs index 6e52bc775ef..ec2a8e86077 100644 --- a/compiler/rustc_middle/src/mir/statement.rs +++ b/compiler/rustc_middle/src/mir/statement.rs @@ -408,14 +408,14 @@ impl<'tcx> Place<'tcx> { self.as_ref().project_deeper(more_projections, tcx) } - pub fn ty_from<D: ?Sized>( + pub fn ty_from<D>( local: Local, projection: &[PlaceElem<'tcx>], local_decls: &D, tcx: TyCtxt<'tcx>, ) -> PlaceTy<'tcx> where - D: HasLocalDecls<'tcx>, + D: ?Sized + HasLocalDecls<'tcx>, { PlaceTy::from_ty(local_decls.local_decls()[local].ty).multi_projection_ty(tcx, projection) } @@ -529,9 +529,9 @@ impl<'tcx> PlaceRef<'tcx> { Place { local: self.local, projection: tcx.mk_place_elems(new_projections) } } - pub fn ty<D: ?Sized>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> PlaceTy<'tcx> + pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> PlaceTy<'tcx> where - D: HasLocalDecls<'tcx>, + D: ?Sized + HasLocalDecls<'tcx>, { Place::ty_from(self.local, self.projection, local_decls, tcx) } @@ -630,9 +630,9 @@ impl<'tcx> Operand<'tcx> { if let ty::FnDef(def_id, args) = *const_ty.kind() { Some((def_id, args)) } else { None } } - pub fn ty<D: ?Sized>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx> + pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx> where - D: HasLocalDecls<'tcx>, + D: ?Sized + HasLocalDecls<'tcx>, { match self { &Operand::Copy(ref l) | &Operand::Move(ref l) => l.ty(local_decls, tcx).ty, @@ -640,9 +640,9 @@ impl<'tcx> Operand<'tcx> { } } - pub fn span<D: ?Sized>(&self, local_decls: &D) -> Span + pub fn span<D>(&self, local_decls: &D) -> Span where - D: HasLocalDecls<'tcx>, + D: ?Sized + HasLocalDecls<'tcx>, { match self { &Operand::Copy(ref l) | &Operand::Move(ref l) => { @@ -674,7 +674,7 @@ impl<'tcx> ConstOperand<'tcx> { } /////////////////////////////////////////////////////////////////////////// -/// Rvalues +// Rvalues pub enum RvalueInitializationState { Shallow, @@ -721,9 +721,9 @@ impl<'tcx> Rvalue<'tcx> { } } - pub fn ty<D: ?Sized>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx> + pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx> where - D: HasLocalDecls<'tcx>, + D: ?Sized + HasLocalDecls<'tcx>, { match *self { Rvalue::Use(ref operand) => operand.ty(local_decls, tcx), diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs index 904d78d69b6..b498b7b8912 100644 --- a/compiler/rustc_middle/src/mir/visit.rs +++ b/compiler/rustc_middle/src/mir/visit.rs @@ -1415,6 +1415,24 @@ impl PlaceContext { ) } + /// Returns `true` if this place context may be used to know the address of the given place. + #[inline] + pub fn may_observe_address(self) -> bool { + matches!( + self, + PlaceContext::NonMutatingUse( + NonMutatingUseContext::SharedBorrow + | NonMutatingUseContext::RawBorrow + | NonMutatingUseContext::FakeBorrow + ) | PlaceContext::MutatingUse( + MutatingUseContext::Drop + | MutatingUseContext::Borrow + | MutatingUseContext::RawBorrow + | MutatingUseContext::AsmOutput + ) + ) + } + /// Returns `true` if this place context represents a storage live or storage dead marker. #[inline] pub fn is_storage_marker(self) -> bool { diff --git a/compiler/rustc_middle/src/query/erase.rs b/compiler/rustc_middle/src/query/erase.rs index bea2191c560..4c00b769237 100644 --- a/compiler/rustc_middle/src/query/erase.rs +++ b/compiler/rustc_middle/src/query/erase.rs @@ -318,7 +318,7 @@ trivial! { rustc_middle::traits::WellFormedLoc, rustc_middle::ty::adjustment::CoerceUnsizedInfo, rustc_middle::ty::AssocItem, - rustc_middle::ty::AssocItemContainer, + rustc_middle::ty::AssocContainer, rustc_middle::ty::Asyncness, rustc_middle::ty::AsyncDestructor, rustc_middle::ty::BoundVariableKind, diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index 874cee54c7c..0e645a3aae4 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -135,7 +135,7 @@ use crate::traits::{ }; use crate::ty::fast_reject::SimplifiedType; use crate::ty::layout::ValidityRequirement; -use crate::ty::print::{PrintTraitRefExt, describe_as_module}; +use crate::ty::print::PrintTraitRefExt; use crate::ty::util::AlwaysRequiresDrop; use crate::ty::{ self, CrateInherentImpls, GenericArg, GenericArgsRef, PseudoCanonicalInput, SizedTraitKind, Ty, @@ -761,9 +761,9 @@ rustc_queries! { } /// Erases regions from `ty` to yield a new type. - /// Normally you would just use `tcx.erase_regions(value)`, + /// Normally you would just use `tcx.erase_and_anonymize_regions(value)`, /// however, which uses this query as a kind of cache. - query erase_regions_ty(ty: Ty<'tcx>) -> Ty<'tcx> { + query erase_and_anonymize_regions_ty(ty: Ty<'tcx>) -> Ty<'tcx> { // This query is not expected to have input -- as a result, it // is not a good candidates for "replay" because it is essentially a // pure function of its input (and hence the expectation is that @@ -1881,6 +1881,7 @@ rustc_queries! { } /// Returns whether the impl or associated function has the `default` keyword. + /// Note: This will ICE on inherent impl items. Consider using `AssocItem::defaultness`. query defaultness(def_id: DefId) -> hir::Defaultness { desc { |tcx| "looking up whether `{}` has `default`", tcx.def_path_str(def_id) } separate_provide_extern @@ -2731,3 +2732,12 @@ rustc_queries! { rustc_with_all_queries! { define_callbacks! } rustc_feedable_queries! { define_feedable! } + +fn describe_as_module(def_id: impl Into<LocalDefId>, tcx: TyCtxt<'_>) -> String { + let def_id = def_id.into(); + if def_id.is_top_level_module() { + "top-level module".to_string() + } else { + format!("module `{}`", tcx.def_path_str(def_id)) + } +} diff --git a/compiler/rustc_middle/src/ty/abstract_const.rs b/compiler/rustc_middle/src/ty/abstract_const.rs index c9b9ec771b3..463e4c58805 100644 --- a/compiler/rustc_middle/src/ty/abstract_const.rs +++ b/compiler/rustc_middle/src/ty/abstract_const.rs @@ -55,7 +55,7 @@ impl<'tcx> TyCtxt<'tcx> { ty::ConstKind::Unevaluated(uv) => match self.tcx.thir_abstract_const(uv.def) { Err(e) => ty::Const::new_error(self.tcx, e), Ok(Some(bac)) => { - let args = self.tcx.erase_regions(uv.args); + let args = self.tcx.erase_and_anonymize_regions(uv.args); let bac = bac.instantiate(self.tcx, args); return bac.fold_with(self); } diff --git a/compiler/rustc_middle/src/ty/assoc.rs b/compiler/rustc_middle/src/ty/assoc.rs index a902a8a61e5..768646c7630 100644 --- a/compiler/rustc_middle/src/ty/assoc.rs +++ b/compiler/rustc_middle/src/ty/assoc.rs @@ -5,15 +5,17 @@ use rustc_hir::def::{DefKind, Namespace}; use rustc_hir::def_id::DefId; use rustc_hir::find_attr; use rustc_macros::{Decodable, Encodable, HashStable}; -use rustc_span::{Ident, Symbol}; +use rustc_span::{ErrorGuaranteed, Ident, Symbol}; use super::{TyCtxt, Visibility}; use crate::ty; #[derive(Clone, Copy, PartialEq, Eq, Debug, HashStable, Hash, Encodable, Decodable)] -pub enum AssocItemContainer { +pub enum AssocContainer { Trait, - Impl, + InherentImpl, + /// The `DefId` points to the trait item being implemented. + TraitImpl(Result<DefId, ErrorGuaranteed>), } /// Information about an associated item @@ -21,11 +23,7 @@ pub enum AssocItemContainer { pub struct AssocItem { pub def_id: DefId, pub kind: AssocKind, - pub container: AssocItemContainer, - - /// If this is an item in an impl of a trait then this is the `DefId` of - /// the associated item on the trait that this implements. - pub trait_item_def_id: Option<DefId>, + pub container: AssocContainer, } impl AssocItem { @@ -55,7 +53,34 @@ impl AssocItem { /// /// [`type_of`]: crate::ty::TyCtxt::type_of pub fn defaultness(&self, tcx: TyCtxt<'_>) -> hir::Defaultness { - tcx.defaultness(self.def_id) + match self.container { + AssocContainer::InherentImpl => hir::Defaultness::Final, + AssocContainer::Trait | AssocContainer::TraitImpl(_) => tcx.defaultness(self.def_id), + } + } + + pub fn expect_trait_impl(&self) -> Result<DefId, ErrorGuaranteed> { + let AssocContainer::TraitImpl(trait_item_id) = self.container else { + bug!("expected item to be in a trait impl: {:?}", self.def_id); + }; + trait_item_id + } + + /// If this is a trait impl item, returns the `DefId` of the trait item this implements. + /// Otherwise, returns `DefId` for self. Returns an Err in case the trait item was not + /// resolved successfully. + pub fn trait_item_or_self(&self) -> Result<DefId, ErrorGuaranteed> { + match self.container { + AssocContainer::TraitImpl(id) => id, + AssocContainer::Trait | AssocContainer::InherentImpl => Ok(self.def_id), + } + } + + pub fn trait_item_def_id(&self) -> Option<DefId> { + match self.container { + AssocContainer::TraitImpl(Ok(id)) => Some(id), + _ => None, + } } #[inline] @@ -71,16 +96,18 @@ impl AssocItem { #[inline] pub fn trait_container(&self, tcx: TyCtxt<'_>) -> Option<DefId> { match self.container { - AssocItemContainer::Impl => None, - AssocItemContainer::Trait => Some(tcx.parent(self.def_id)), + AssocContainer::InherentImpl | AssocContainer::TraitImpl(_) => None, + AssocContainer::Trait => Some(tcx.parent(self.def_id)), } } #[inline] pub fn impl_container(&self, tcx: TyCtxt<'_>) -> Option<DefId> { match self.container { - AssocItemContainer::Impl => Some(tcx.parent(self.def_id)), - AssocItemContainer::Trait => None, + AssocContainer::InherentImpl | AssocContainer::TraitImpl(_) => { + Some(tcx.parent(self.def_id)) + } + AssocContainer::Trait => None, } } @@ -156,11 +183,11 @@ impl AssocItem { return false; } - let def_id = match (self.container, self.trait_item_def_id) { - (AssocItemContainer::Trait, _) => self.def_id, - (AssocItemContainer::Impl, Some(trait_item_did)) => trait_item_did, - // Inherent impl but this attr is only applied to trait assoc items. - (AssocItemContainer::Impl, None) => return true, + let def_id = match self.container { + AssocContainer::Trait => self.def_id, + AssocContainer::TraitImpl(Ok(trait_item_did)) => trait_item_did, + AssocContainer::TraitImpl(Err(_)) => return false, + AssocContainer::InherentImpl => return true, }; find_attr!(tcx.get_all_attrs(def_id), AttributeKind::TypeConst(_)) } diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs index 95759d1f31a..3f37595d0ee 100644 --- a/compiler/rustc_middle/src/ty/codec.rs +++ b/compiler/rustc_middle/src/ty/codec.rs @@ -216,10 +216,7 @@ impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for ty::ParamEnv<'tcx> { #[inline] fn decode_arena_allocable<'tcx, D: TyDecoder<'tcx>, T: ArenaAllocatable<'tcx> + Decodable<D>>( decoder: &mut D, -) -> &'tcx T -where - D: TyDecoder<'tcx>, -{ +) -> &'tcx T { decoder.interner().arena.alloc(Decodable::decode(decoder)) } @@ -230,10 +227,7 @@ fn decode_arena_allocable_slice< T: ArenaAllocatable<'tcx> + Decodable<D>, >( decoder: &mut D, -) -> &'tcx [T] -where - D: TyDecoder<'tcx>, -{ +) -> &'tcx [T] { decoder.interner().arena.alloc_from_iter(<Vec<T> as Decodable<D>>::decode(decoder)) } diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 72ab6ac612c..79700d485c4 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -38,6 +38,7 @@ use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE, LocalDefId}; use rustc_hir::definitions::{DefPathData, Definitions, DisambiguatorState}; use rustc_hir::intravisit::VisitorExt; use rustc_hir::lang_items::LangItem; +use rustc_hir::limit::Limit; use rustc_hir::{self as hir, Attribute, HirId, Node, TraitCandidate, find_attr}; use rustc_index::IndexVec; use rustc_macros::{HashStable, TyDecodable, TyEncodable}; @@ -45,14 +46,14 @@ use rustc_query_system::cache::WithDepNode; use rustc_query_system::dep_graph::DepNodeIndex; use rustc_query_system::ich::StableHashingContext; use rustc_serialize::opaque::{FileEncodeResult, FileEncoder}; +use rustc_session::Session; use rustc_session::config::CrateType; use rustc_session::cstore::{CrateStoreDyn, Untracked}; use rustc_session::lint::Lint; -use rustc_session::{Limit, Session}; use rustc_span::def_id::{CRATE_DEF_ID, DefPathHash, StableCrateId}; use rustc_span::{DUMMY_SP, Ident, Span, Symbol, kw, sym}; use rustc_type_ir::TyKind::*; -use rustc_type_ir::lang_items::{SolverLangItem, SolverTraitLangItem}; +use rustc_type_ir::lang_items::{SolverAdtLangItem, SolverLangItem, SolverTraitLangItem}; pub use rustc_type_ir::lift::Lift; use rustc_type_ir::{ CollectAndApply, Interner, TypeFlags, TypeFoldable, WithCachedTypeInfo, elaborate, search_graph, @@ -94,6 +95,13 @@ impl<'tcx> Interner for TyCtxt<'tcx> { type DefId = DefId; type LocalDefId = LocalDefId; type TraitId = DefId; + type ForeignId = DefId; + type FunctionId = DefId; + type ClosureId = DefId; + type CoroutineClosureId = DefId; + type CoroutineId = DefId; + type AdtId = DefId; + type ImplId = DefId; type Span = Span; type GenericArgs = ty::GenericArgsRef<'tcx>; @@ -492,6 +500,10 @@ impl<'tcx> Interner for TyCtxt<'tcx> { self.require_lang_item(solver_trait_lang_item_to_lang_item(lang_item), DUMMY_SP) } + fn require_adt_lang_item(self, lang_item: SolverAdtLangItem) -> DefId { + self.require_lang_item(solver_adt_lang_item_to_lang_item(lang_item), DUMMY_SP) + } + fn is_lang_item(self, def_id: DefId, lang_item: SolverLangItem) -> bool { self.is_lang_item(def_id, solver_lang_item_to_lang_item(lang_item)) } @@ -500,6 +512,10 @@ impl<'tcx> Interner for TyCtxt<'tcx> { self.is_lang_item(def_id, solver_trait_lang_item_to_lang_item(lang_item)) } + fn is_adt_lang_item(self, def_id: DefId, lang_item: SolverAdtLangItem) -> bool { + self.is_lang_item(def_id, solver_adt_lang_item_to_lang_item(lang_item)) + } + fn is_default_trait(self, def_id: DefId) -> bool { self.is_default_trait(def_id) } @@ -512,6 +528,10 @@ impl<'tcx> Interner for TyCtxt<'tcx> { lang_item_to_solver_trait_lang_item(self.lang_items().from_def_id(def_id)?) } + fn as_adt_lang_item(self, def_id: DefId) -> Option<SolverAdtLangItem> { + lang_item_to_solver_adt_lang_item(self.lang_items().from_def_id(def_id)?) + } + fn associated_type_def_ids(self, def_id: DefId) -> impl IntoIterator<Item = DefId> { self.associated_items(def_id) .in_definition_order() @@ -631,7 +651,11 @@ impl<'tcx> Interner for TyCtxt<'tcx> { | ty::Bound(_, _) => bug!("unexpected self type: {self_ty}"), } - let trait_impls = tcx.trait_impls_of(trait_def_id); + #[allow(rustc::usage_of_type_ir_traits)] + self.for_each_blanket_impl(trait_def_id, f) + } + fn for_each_blanket_impl(self, trait_def_id: DefId, mut f: impl FnMut(DefId)) { + let trait_impls = self.trait_impls_of(trait_def_id); for &impl_def_id in trait_impls.blanket_impls() { f(impl_def_id); } @@ -738,8 +762,8 @@ impl<'tcx> Interner for TyCtxt<'tcx> { ) } - type ProbeRef = &'tcx inspect::Probe<TyCtxt<'tcx>>; - fn mk_probe_ref(self, probe: inspect::Probe<Self>) -> &'tcx inspect::Probe<TyCtxt<'tcx>> { + type Probe = &'tcx inspect::Probe<TyCtxt<'tcx>>; + fn mk_probe(self, probe: inspect::Probe<Self>) -> &'tcx inspect::Probe<TyCtxt<'tcx>> { self.arena.alloc(probe) } fn evaluate_root_goal_for_proof_tree_raw( @@ -783,6 +807,13 @@ bidirectional_lang_item_map! { DynMetadata, FutureOutput, Metadata, +// tidy-alphabetical-end +} + +bidirectional_lang_item_map! { + SolverAdtLangItem, lang_item_to_solver_adt_lang_item, solver_adt_lang_item_to_lang_item; + +// tidy-alphabetical-start Option, Poll, // tidy-alphabetical-end @@ -2245,7 +2276,16 @@ impl<'tcx> TyCtxt<'tcx> { let is_impl_item = match self.hir_node_by_def_id(suitable_region_binding_scope) { Node::Item(..) | Node::TraitItem(..) => false, - Node::ImplItem(..) => self.is_bound_region_in_impl_item(suitable_region_binding_scope), + Node::ImplItem(impl_item) => match impl_item.impl_kind { + // For now, we do not try to target impls of traits. This is + // because this message is going to suggest that the user + // change the fn signature, but they may not be free to do so, + // since the signature must match the trait. + // + // FIXME(#42706) -- in some cases, we could do better here. + hir::ImplItemImplKind::Trait { .. } => true, + _ => false, + }, _ => false, }; @@ -2299,21 +2339,6 @@ impl<'tcx> TyCtxt<'tcx> { None } - /// Checks if the bound region is in Impl Item. - pub fn is_bound_region_in_impl_item(self, suitable_region_binding_scope: LocalDefId) -> bool { - let container_id = self.parent(suitable_region_binding_scope.to_def_id()); - if self.impl_trait_ref(container_id).is_some() { - // For now, we do not try to target impls of traits. This is - // because this message is going to suggest that the user - // change the fn signature, but they may not be free to do so, - // since the signature must match the trait. - // - // FIXME(#42706) -- in some cases, we could do better here. - return true; - } - false - } - /// Determines whether identifiers in the assembly have strict naming rules. /// Currently, only NVPTX* targets need it. pub fn has_strict_asm_symbol_naming(self) -> bool { diff --git a/compiler/rustc_middle/src/ty/erase_regions.rs b/compiler/rustc_middle/src/ty/erase_regions.rs index f4fead7e952..74b4adda7fd 100644 --- a/compiler/rustc_middle/src/ty/erase_regions.rs +++ b/compiler/rustc_middle/src/ty/erase_regions.rs @@ -6,20 +6,20 @@ use crate::ty::{ }; pub(super) fn provide(providers: &mut Providers) { - *providers = Providers { erase_regions_ty, ..*providers }; + *providers = Providers { erase_and_anonymize_regions_ty, ..*providers }; } -fn erase_regions_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> { +fn erase_and_anonymize_regions_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> { // N.B., use `super_fold_with` here. If we used `fold_with`, it - // could invoke the `erase_regions_ty` query recursively. - ty.super_fold_with(&mut RegionEraserVisitor { tcx }) + // could invoke the `erase_and_anonymize_regions_ty` query recursively. + ty.super_fold_with(&mut RegionEraserAndAnonymizerVisitor { tcx }) } impl<'tcx> TyCtxt<'tcx> { - /// Returns an equivalent value with all free regions removed (note - /// that late-bound regions remain, because they are important for - /// subtyping, but they are anonymized and normalized as well).. - pub fn erase_regions<T>(self, value: T) -> T + /// Returns an equivalent value with all free regions removed and + /// bound regions anonymized. (note that bound regions are important + /// for subtyping and generally type equality so *cannot* be removed) + pub fn erase_and_anonymize_regions<T>(self, value: T) -> T where T: TypeFoldable<TyCtxt<'tcx>>, { @@ -27,18 +27,18 @@ impl<'tcx> TyCtxt<'tcx> { if !value.has_type_flags(TypeFlags::HAS_BINDER_VARS | TypeFlags::HAS_FREE_REGIONS) { return value; } - debug!("erase_regions({:?})", value); - let value1 = value.fold_with(&mut RegionEraserVisitor { tcx: self }); - debug!("erase_regions = {:?}", value1); + debug!("erase_and_anonymize_regions({:?})", value); + let value1 = value.fold_with(&mut RegionEraserAndAnonymizerVisitor { tcx: self }); + debug!("erase_and_anonymize_regions = {:?}", value1); value1 } } -struct RegionEraserVisitor<'tcx> { +struct RegionEraserAndAnonymizerVisitor<'tcx> { tcx: TyCtxt<'tcx>, } -impl<'tcx> TypeFolder<TyCtxt<'tcx>> for RegionEraserVisitor<'tcx> { +impl<'tcx> TypeFolder<TyCtxt<'tcx>> for RegionEraserAndAnonymizerVisitor<'tcx> { fn cx(&self) -> TyCtxt<'tcx> { self.tcx } @@ -49,7 +49,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for RegionEraserVisitor<'tcx> { } else if ty.has_infer() { ty.super_fold_with(self) } else { - self.tcx.erase_regions_ty(ty) + self.tcx.erase_and_anonymize_regions_ty(ty) } } diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs index 3f854038651..66542525d28 100644 --- a/compiler/rustc_middle/src/ty/error.rs +++ b/compiler/rustc_middle/src/ty/error.rs @@ -7,6 +7,7 @@ use std::path::PathBuf; use rustc_errors::pluralize; use rustc_hir as hir; use rustc_hir::def::{CtorOf, DefKind}; +use rustc_hir::limit::Limit; use rustc_macros::extension; pub use rustc_type_ir::error::ExpectedFound; @@ -233,7 +234,7 @@ impl<'tcx> TyCtxt<'tcx> { loop { // Look for the longest properly trimmed path that still fits in length_limit. short = with_forced_trimmed_paths!({ - let mut p = FmtPrinter::new_with_limit(self, ns, rustc_session::Limit(type_limit)); + let mut p = FmtPrinter::new_with_limit(self, ns, Limit(type_limit)); self.lift(t) .expect("could not lift for printing") .print(&mut p) diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs index 3a51f79f121..34ead91b4f6 100644 --- a/compiler/rustc_middle/src/ty/instance.rs +++ b/compiler/rustc_middle/src/ty/instance.rs @@ -18,8 +18,8 @@ use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags; use crate::ty::normalize_erasing_regions::NormalizationError; use crate::ty::print::{FmtPrinter, Print}; use crate::ty::{ - self, EarlyBinder, GenericArgs, GenericArgsRef, Ty, TyCtxt, TypeFoldable, TypeSuperVisitable, - TypeVisitable, TypeVisitableExt, TypeVisitor, + self, AssocContainer, EarlyBinder, GenericArgs, GenericArgsRef, Ty, TyCtxt, TypeFoldable, + TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor, }; /// An `InstanceKind` along with the args that are needed to substitute the instance. @@ -544,7 +544,9 @@ impl<'tcx> Instance<'tcx> { // All regions in the result of this query are erased, so it's // fine to erase all of the input regions. - tcx.resolve_instance_raw(tcx.erase_regions(typing_env.as_query_input((def_id, args)))) + tcx.resolve_instance_raw( + tcx.erase_and_anonymize_regions(typing_env.as_query_input((def_id, args))), + ) } pub fn expect_resolve( @@ -609,26 +611,23 @@ impl<'tcx> Instance<'tcx> { debug!(" => fn pointer created for virtual call"); resolved.def = InstanceKind::ReifyShim(def_id, reason); } - // Reify `Trait::method` implementations if KCFI is enabled - // FIXME(maurer) only reify it if it is a vtable-safe function - _ if tcx.sess.is_sanitizer_kcfi_enabled() - && tcx - .opt_associated_item(def_id) - .and_then(|assoc| assoc.trait_item_def_id) - .is_some() => - { - // If this function could also go in a vtable, we need to `ReifyShim` it with - // KCFI because it can only attach one type per function. - resolved.def = InstanceKind::ReifyShim(resolved.def_id(), reason) - } - // Reify `::call`-like method implementations if KCFI is enabled - _ if tcx.sess.is_sanitizer_kcfi_enabled() - && tcx.is_closure_like(resolved.def_id()) => - { - // Reroute through a reify via the *unresolved* instance. The resolved one can't - // be directly reified because it's closure-like. The reify can handle the - // unresolved instance. - resolved = Instance { def: InstanceKind::ReifyShim(def_id, reason), args } + _ if tcx.sess.is_sanitizer_kcfi_enabled() => { + // Reify `::call`-like method implementations + if tcx.is_closure_like(resolved.def_id()) { + // Reroute through a reify via the *unresolved* instance. The resolved one can't + // be directly reified because it's closure-like. The reify can handle the + // unresolved instance. + resolved = Instance { def: InstanceKind::ReifyShim(def_id, reason), args } + // Reify `Trait::method` implementations + // FIXME(maurer) only reify it if it is a vtable-safe function + } else if let Some(assoc) = tcx.opt_associated_item(def_id) + && let AssocContainer::Trait | AssocContainer::TraitImpl(Ok(_)) = + assoc.container + { + // If this function could also go in a vtable, we need to `ReifyShim` it with + // KCFI because it can only attach one type per function. + resolved.def = InstanceKind::ReifyShim(resolved.def_id(), reason) + } } _ => {} } @@ -681,7 +680,7 @@ impl<'tcx> Instance<'tcx> { && !matches!( tcx.opt_associated_item(def), Some(ty::AssocItem { - container: ty::AssocItemContainer::Trait, + container: ty::AssocContainer::Trait, .. }) ); diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index aed94f9aa04..2114d080dfa 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -401,7 +401,10 @@ impl<'tcx> SizeSkeleton<'tcx> { match tail.kind() { ty::Param(_) | ty::Alias(ty::Projection | ty::Inherent, _) => { debug_assert!(tail.has_non_region_param()); - Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) }) + Ok(SizeSkeleton::Pointer { + non_zero, + tail: tcx.erase_and_anonymize_regions(tail), + }) } ty::Error(guar) => { // Fixes ICE #124031 diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index e567ba05f61..d4c001f625e 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -109,7 +109,6 @@ pub use self::typeck_results::{ CanonicalUserType, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations, IsIdentity, Rust2024IncompatiblePatInfo, TypeckResults, UserType, UserTypeAnnotationIndex, UserTypeKind, }; -pub use self::visit::*; use crate::error::{OpaqueHiddenTypeMismatch, TypeMismatchReason}; use crate::metadata::ModChild; use crate::middle::privacy::EffectiveVisibilities; @@ -1935,6 +1934,11 @@ impl<'tcx> TyCtxt<'tcx> { Some((parent, def_kind)) } + /// Returns the trait item that is implemented by the given item `DefId`. + pub fn trait_item_of(self, def_id: impl IntoQueryParam<DefId>) -> Option<DefId> { + self.opt_associated_item(def_id.into_query_param())?.trait_item_def_id() + } + /// If the given `DefId` is an associated item of a trait, /// returns the `DefId` of the trait; otherwise, returns `None`. pub fn trait_of_assoc(self, def_id: DefId) -> Option<DefId> { @@ -2150,17 +2154,12 @@ impl<'tcx> TyCtxt<'tcx> { let Some(item) = self.opt_associated_item(def_id) else { return false; }; - if item.container != ty::AssocItemContainer::Impl { - return false; - } - let Some(trait_item_def_id) = item.trait_item_def_id else { + let AssocContainer::TraitImpl(Ok(trait_item_def_id)) = item.container else { return false; }; - return !self - .associated_types_for_impl_traits_in_associated_fn(trait_item_def_id) - .is_empty(); + !self.associated_types_for_impl_traits_in_associated_fn(trait_item_def_id).is_empty() } } diff --git a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs index f2a4a5a4ecf..69c1eb9b345 100644 --- a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs +++ b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs @@ -51,7 +51,7 @@ impl<'tcx> TyCtxt<'tcx> { // Erase first before we do the real query -- this keeps the // cache from being too polluted. - let value = self.erase_regions(value); + let value = self.erase_and_anonymize_regions(value); debug!(?value); if !value.has_aliases() { @@ -83,7 +83,7 @@ impl<'tcx> TyCtxt<'tcx> { // Erase first before we do the real query -- this keeps the // cache from being too polluted. - let value = self.erase_regions(value); + let value = self.erase_and_anonymize_regions(value); debug!(?value); if !value.has_aliases() { diff --git a/compiler/rustc_middle/src/ty/predicate.rs b/compiler/rustc_middle/src/ty/predicate.rs index 59e00f85957..69dcca54f46 100644 --- a/compiler/rustc_middle/src/ty/predicate.rs +++ b/compiler/rustc_middle/src/ty/predicate.rs @@ -638,21 +638,7 @@ impl<'tcx> Predicate<'tcx> { let predicate = self.kind(); match predicate.skip_binder() { PredicateKind::Clause(ClauseKind::Trait(t)) => Some(predicate.rebind(t)), - PredicateKind::Clause(ClauseKind::Projection(..)) - | PredicateKind::Clause(ClauseKind::HostEffect(..)) - | PredicateKind::Clause(ClauseKind::ConstArgHasType(..)) - | PredicateKind::Clause(ClauseKind::UnstableFeature(_)) - | PredicateKind::NormalizesTo(..) - | PredicateKind::AliasRelate(..) - | PredicateKind::Subtype(..) - | PredicateKind::Coerce(..) - | PredicateKind::Clause(ClauseKind::RegionOutlives(..)) - | PredicateKind::Clause(ClauseKind::WellFormed(..)) - | PredicateKind::DynCompatible(..) - | PredicateKind::Clause(ClauseKind::TypeOutlives(..)) - | PredicateKind::Clause(ClauseKind::ConstEvaluatable(..)) - | PredicateKind::ConstEquate(..) - | PredicateKind::Ambiguous => None, + _ => None, } } @@ -660,21 +646,7 @@ impl<'tcx> Predicate<'tcx> { let predicate = self.kind(); match predicate.skip_binder() { PredicateKind::Clause(ClauseKind::Projection(t)) => Some(predicate.rebind(t)), - PredicateKind::Clause(ClauseKind::Trait(..)) - | PredicateKind::Clause(ClauseKind::HostEffect(..)) - | PredicateKind::Clause(ClauseKind::ConstArgHasType(..)) - | PredicateKind::Clause(ClauseKind::UnstableFeature(_)) - | PredicateKind::NormalizesTo(..) - | PredicateKind::AliasRelate(..) - | PredicateKind::Subtype(..) - | PredicateKind::Coerce(..) - | PredicateKind::Clause(ClauseKind::RegionOutlives(..)) - | PredicateKind::Clause(ClauseKind::WellFormed(..)) - | PredicateKind::DynCompatible(..) - | PredicateKind::Clause(ClauseKind::TypeOutlives(..)) - | PredicateKind::Clause(ClauseKind::ConstEvaluatable(..)) - | PredicateKind::ConstEquate(..) - | PredicateKind::Ambiguous => None, + _ => None, } } diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs index 9e6f277ef77..d636e8ef31f 100644 --- a/compiler/rustc_middle/src/ty/print/mod.rs +++ b/compiler/rustc_middle/src/ty/print/mod.rs @@ -2,7 +2,7 @@ use hir::def::Namespace; use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::sso::SsoHashSet; use rustc_hir as hir; -use rustc_hir::def_id::{CrateNum, DefId, LocalDefId}; +use rustc_hir::def_id::{CrateNum, DefId}; use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData}; use tracing::{debug, instrument, trace}; @@ -396,16 +396,6 @@ impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for ty::Const<'tcx> { } } -// This is only used by query descriptions -pub fn describe_as_module(def_id: impl Into<LocalDefId>, tcx: TyCtxt<'_>) -> String { - let def_id = def_id.into(); - if def_id.is_top_level_module() { - "top-level module".to_string() - } else { - format!("module `{}`", tcx.def_path_str(def_id)) - } -} - impl<T> rustc_type_ir::ir_print::IrPrint<T> for TyCtxt<'_> where T: Copy + for<'a, 'tcx> Lift<TyCtxt<'tcx>, Lifted: Print<'tcx, FmtPrinter<'a, 'tcx>>>, diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index 74caee7336a..fc821ffdaa6 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -13,8 +13,8 @@ use rustc_hir::LangItem; use rustc_hir::def::{self, CtorKind, DefKind, Namespace}; use rustc_hir::def_id::{DefIdMap, DefIdSet, LOCAL_CRATE, ModDefId}; use rustc_hir::definitions::{DefKey, DefPathDataName}; +use rustc_hir::limit::Limit; use rustc_macros::{Lift, extension}; -use rustc_session::Limit; use rustc_session::cstore::{ExternCrate, ExternCrateSource}; use rustc_span::{FileNameDisplayPreference, Ident, Symbol, kw, sym}; use rustc_type_ir::{Upcast as _, elaborate}; @@ -1436,8 +1436,8 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { // anonymized regions, but the super projections can still // contain named regions. So we erase and anonymize everything // here to compare the types modulo regions below. - let proj = p.tcx().erase_regions(proj); - let super_proj = p.tcx().erase_regions(super_proj); + let proj = p.tcx().erase_and_anonymize_regions(proj); + let super_proj = p.tcx().erase_and_anonymize_regions(super_proj); proj == super_proj }); diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs index a7d07adf78f..029586a9c55 100644 --- a/compiler/rustc_middle/src/ty/util.rs +++ b/compiler/rustc_middle/src/ty/util.rs @@ -12,9 +12,9 @@ use rustc_hashes::Hash128; use rustc_hir as hir; use rustc_hir::def::{CtorOf, DefKind, Res}; use rustc_hir::def_id::{CrateNum, DefId, LocalDefId}; +use rustc_hir::limit::Limit; use rustc_index::bit_set::GrowableBitSet; use rustc_macros::{HashStable, TyDecodable, TyEncodable, extension}; -use rustc_session::Limit; use rustc_span::sym; use rustc_type_ir::solve::SizedTraitKind; use smallvec::{SmallVec, smallvec}; @@ -131,10 +131,9 @@ impl<'tcx> TyCtxt<'tcx> { /// Creates a hash of the type `Ty` which will be the same no matter what crate /// context it's calculated within. This is used by the `type_id` intrinsic. pub fn type_id_hash(self, ty: Ty<'tcx>) -> Hash128 { - // We want the type_id be independent of the types free regions, so we - // erase them. The erase_regions() call will also anonymize bound - // regions, which is desirable too. - let ty = self.erase_regions(ty); + // We don't have region information, so we erase all free regions. Equal types + // must have the same `TypeId`, so we must anonymize all bound regions as well. + let ty = self.erase_and_anonymize_regions(ty); self.with_stable_hashing_context(|mut hcx| { let mut hasher = StableHasher::new(); @@ -609,10 +608,7 @@ impl<'tcx> TyCtxt<'tcx> { /// Returns `true` if `def_id` refers to a definition that does not have its own /// type-checking context, i.e. closure, coroutine or inline const. pub fn is_typeck_child(self, def_id: DefId) -> bool { - matches!( - self.def_kind(def_id), - DefKind::Closure | DefKind::InlineConst | DefKind::SyntheticCoroutineBody - ) + self.def_kind(def_id).is_typeck_child() } /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`). @@ -1309,7 +1305,7 @@ impl<'tcx> Ty<'tcx> { debug_assert!(!typing_env.param_env.has_infer()); let query_ty = tcx .try_normalize_erasing_regions(typing_env, query_ty) - .unwrap_or_else(|_| tcx.erase_regions(query_ty)); + .unwrap_or_else(|_| tcx.erase_and_anonymize_regions(query_ty)); tcx.needs_drop_raw(typing_env.as_query_input(query_ty)) } @@ -1346,7 +1342,7 @@ impl<'tcx> Ty<'tcx> { debug_assert!(!typing_env.has_infer()); let query_ty = tcx .try_normalize_erasing_regions(typing_env, query_ty) - .unwrap_or_else(|_| tcx.erase_regions(query_ty)); + .unwrap_or_else(|_| tcx.erase_and_anonymize_regions(query_ty)); tcx.needs_async_drop_raw(typing_env.as_query_input(query_ty)) } diff --git a/compiler/rustc_middle/src/ty/visit.rs b/compiler/rustc_middle/src/ty/visit.rs index 3853a804a92..f0c47f257cc 100644 --- a/compiler/rustc_middle/src/ty/visit.rs +++ b/compiler/rustc_middle/src/ty/visit.rs @@ -212,42 +212,3 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for LateBoundRegionsCollector { } } } - -/// Finds the max universe present -pub struct MaxUniverse { - max_universe: ty::UniverseIndex, -} - -impl MaxUniverse { - pub fn new() -> Self { - MaxUniverse { max_universe: ty::UniverseIndex::ROOT } - } - - pub fn max_universe(self) -> ty::UniverseIndex { - self.max_universe - } -} - -impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for MaxUniverse { - fn visit_ty(&mut self, t: Ty<'tcx>) { - if let ty::Placeholder(placeholder) = t.kind() { - self.max_universe = self.max_universe.max(placeholder.universe); - } - - t.super_visit_with(self) - } - - fn visit_const(&mut self, c: ty::consts::Const<'tcx>) { - if let ty::ConstKind::Placeholder(placeholder) = c.kind() { - self.max_universe = self.max_universe.max(placeholder.universe); - } - - c.super_visit_with(self) - } - - fn visit_region(&mut self, r: ty::Region<'tcx>) { - if let ty::RePlaceholder(placeholder) = r.kind() { - self.max_universe = self.max_universe.max(placeholder.universe); - } - } -} diff --git a/compiler/rustc_middle/src/ty/vtable.rs b/compiler/rustc_middle/src/ty/vtable.rs index 6fc19c82342..e2f09fdcb4b 100644 --- a/compiler/rustc_middle/src/ty/vtable.rs +++ b/compiler/rustc_middle/src/ty/vtable.rs @@ -89,7 +89,7 @@ pub(super) fn vtable_allocation_provider<'tcx>( let vtable_entries = if let Some(poly_trait_ref) = poly_trait_ref { let trait_ref = poly_trait_ref.with_self_ty(tcx, ty); - let trait_ref = tcx.erase_regions(trait_ref); + let trait_ref = tcx.erase_and_anonymize_regions(trait_ref); tcx.vtable_entries(trait_ref) } else { diff --git a/compiler/rustc_mir_build/src/check_tail_calls.rs b/compiler/rustc_mir_build/src/check_tail_calls.rs index e0cbe8519ed..d40c77d145f 100644 --- a/compiler/rustc_mir_build/src/check_tail_calls.rs +++ b/compiler/rustc_mir_build/src/check_tail_calls.rs @@ -64,10 +64,10 @@ impl<'tcx> TailCallCkVisitor<'_, 'tcx> { "`become` outside of functions should have been disallowed by hir_typeck" ) }; - // While the `caller_sig` does have its regions erased, it does not have its - // binders anonymized. We call `erase_regions` once again to anonymize any binders + // While the `caller_sig` does have its free regions erased, it does not have its + // binders anonymized. We call `erase_and_anonymize_regions` once again to anonymize any binders // within the signature, such as in function pointer or `dyn Trait` args. - let caller_sig = self.tcx.erase_regions(caller_sig); + let caller_sig = self.tcx.erase_and_anonymize_regions(caller_sig); let ExprKind::Scope { value, .. } = call.kind else { span_bug!(call.span, "expected scope, found: {call:?}") diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs index 81b0e21a5f5..0b9bc018a09 100644 --- a/compiler/rustc_mir_build/src/thir/cx/expr.rs +++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs @@ -792,10 +792,9 @@ impl<'tcx> ThirBuildCx<'tcx> { let ty = self.typeck_results.node_type(anon_const.hir_id); let did = anon_const.def_id.to_def_id(); let typeck_root_def_id = tcx.typeck_root_def_id(did); - let parent_args = tcx.erase_regions(GenericArgs::identity_for_item( - tcx, - typeck_root_def_id, - )); + let parent_args = tcx.erase_and_anonymize_regions( + GenericArgs::identity_for_item(tcx, typeck_root_def_id), + ); let args = InlineConstArgs::new(tcx, InlineConstArgsParts { parent_args, ty }) .args; @@ -831,8 +830,10 @@ impl<'tcx> ThirBuildCx<'tcx> { let ty = self.typeck_results.node_type(anon_const.hir_id); let did = anon_const.def_id.to_def_id(); let typeck_root_def_id = tcx.typeck_root_def_id(did); - let parent_args = - tcx.erase_regions(GenericArgs::identity_for_item(tcx, typeck_root_def_id)); + let parent_args = tcx.erase_and_anonymize_regions(GenericArgs::identity_for_item( + tcx, + typeck_root_def_id, + )); let args = InlineConstArgs::new(tcx, InlineConstArgsParts { parent_args, ty }).args; ExprKind::ConstBlock { did, args } diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs index d46c4678bcf..6316ccf1b8c 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs @@ -105,9 +105,11 @@ impl<'tcx> ConstToPat<'tcx> { // // FIXME: `const_eval_resolve_for_typeck` should probably just modify the env itself // instead of having this logic here - let typing_env = - self.tcx.erase_regions(self.typing_env).with_post_analysis_normalized(self.tcx); - let uv = self.tcx.erase_regions(uv); + let typing_env = self + .tcx + .erase_and_anonymize_regions(self.typing_env) + .with_post_analysis_normalized(self.tcx); + let uv = self.tcx.erase_and_anonymize_regions(uv); // try to resolve e.g. associated constants to their definition on an impl, and then // evaluate the const. diff --git a/compiler/rustc_mir_dataflow/src/impls/liveness.rs b/compiler/rustc_mir_dataflow/src/impls/liveness.rs index 6ec1b03a34e..5eba474a60c 100644 --- a/compiler/rustc_mir_dataflow/src/impls/liveness.rs +++ b/compiler/rustc_mir_dataflow/src/impls/liveness.rs @@ -92,7 +92,7 @@ impl<'tcx> Visitor<'tcx> for TransferFunction<'_> { } match DefUse::for_place(*place, context) { - Some(DefUse::Def) => { + DefUse::Def => { if let PlaceContext::MutatingUse( MutatingUseContext::Call | MutatingUseContext::AsmOutput, ) = context @@ -105,8 +105,8 @@ impl<'tcx> Visitor<'tcx> for TransferFunction<'_> { self.0.kill(place.local); } } - Some(DefUse::Use) => self.0.gen_(place.local), - None => {} + DefUse::Use => self.0.gen_(place.local), + DefUse::PartialWrite | DefUse::NonUse => {} } self.visit_projection(place.as_ref(), context, location); @@ -131,23 +131,29 @@ impl<'tcx> Visitor<'tcx> for YieldResumeEffect<'_> { } #[derive(Eq, PartialEq, Clone)] -enum DefUse { +pub enum DefUse { + /// Full write to the local. Def, + /// Read of any part of the local. Use, + /// Partial write to the local. + PartialWrite, + /// Non-use, like debuginfo. + NonUse, } impl DefUse { fn apply(state: &mut DenseBitSet<Local>, place: Place<'_>, context: PlaceContext) { match DefUse::for_place(place, context) { - Some(DefUse::Def) => state.kill(place.local), - Some(DefUse::Use) => state.gen_(place.local), - None => {} + DefUse::Def => state.kill(place.local), + DefUse::Use => state.gen_(place.local), + DefUse::PartialWrite | DefUse::NonUse => {} } } - fn for_place(place: Place<'_>, context: PlaceContext) -> Option<DefUse> { + pub fn for_place(place: Place<'_>, context: PlaceContext) -> DefUse { match context { - PlaceContext::NonUse(_) => None, + PlaceContext::NonUse(_) => DefUse::NonUse, PlaceContext::MutatingUse( MutatingUseContext::Call @@ -156,21 +162,20 @@ impl DefUse { | MutatingUseContext::Store | MutatingUseContext::Deinit, ) => { + // Treat derefs as a use of the base local. `*p = 4` is not a def of `p` but a use. if place.is_indirect() { - // Treat derefs as a use of the base local. `*p = 4` is not a def of `p` but a - // use. - Some(DefUse::Use) + DefUse::Use } else if place.projection.is_empty() { - Some(DefUse::Def) + DefUse::Def } else { - None + DefUse::PartialWrite } } // Setting the discriminant is not a use because it does no reading, but it is also not // a def because it does not overwrite the whole place PlaceContext::MutatingUse(MutatingUseContext::SetDiscriminant) => { - place.is_indirect().then_some(DefUse::Use) + if place.is_indirect() { DefUse::Use } else { DefUse::PartialWrite } } // All other contexts are uses... @@ -188,7 +193,7 @@ impl DefUse { | NonMutatingUseContext::PlaceMention | NonMutatingUseContext::FakeBorrow | NonMutatingUseContext::SharedBorrow, - ) => Some(DefUse::Use), + ) => DefUse::Use, PlaceContext::MutatingUse(MutatingUseContext::Projection) | PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection) => { diff --git a/compiler/rustc_mir_dataflow/src/impls/mod.rs b/compiler/rustc_mir_dataflow/src/impls/mod.rs index 3f29b819a6d..6d573e1c00e 100644 --- a/compiler/rustc_mir_dataflow/src/impls/mod.rs +++ b/compiler/rustc_mir_dataflow/src/impls/mod.rs @@ -9,7 +9,8 @@ pub use self::initialized::{ MaybeUninitializedPlaces, MaybeUninitializedPlacesDomain, }; pub use self::liveness::{ - MaybeLiveLocals, MaybeTransitiveLiveLocals, TransferFunction as LivenessTransferFunction, + DefUse, MaybeLiveLocals, MaybeTransitiveLiveLocals, + TransferFunction as LivenessTransferFunction, }; pub use self::storage_liveness::{ MaybeRequiresStorage, MaybeStorageDead, MaybeStorageLive, always_storage_live_locals, diff --git a/compiler/rustc_mir_dataflow/src/points.rs b/compiler/rustc_mir_dataflow/src/points.rs index 70d1a34b5fb..e3d1e04a319 100644 --- a/compiler/rustc_mir_dataflow/src/points.rs +++ b/compiler/rustc_mir_dataflow/src/points.rs @@ -1,9 +1,5 @@ -use rustc_index::bit_set::DenseBitSet; -use rustc_index::interval::SparseIntervalMatrix; use rustc_index::{Idx, IndexVec}; -use rustc_middle::mir::{self, BasicBlock, Body, Location}; - -use crate::framework::{Analysis, Results, ResultsVisitor, visit_results}; +use rustc_middle::mir::{BasicBlock, Body, Location}; /// Maps between a `Location` and a `PointIndex` (and vice versa). pub struct DenseLocationMap { @@ -93,65 +89,3 @@ rustc_index::newtype_index! { #[debug_format = "PointIndex({})"] pub struct PointIndex {} } - -/// Add points depending on the result of the given dataflow analysis. -pub fn save_as_intervals<'tcx, N, A>( - elements: &DenseLocationMap, - body: &mir::Body<'tcx>, - mut analysis: A, - results: Results<A::Domain>, -) -> SparseIntervalMatrix<N, PointIndex> -where - N: Idx, - A: Analysis<'tcx, Domain = DenseBitSet<N>>, -{ - let values = SparseIntervalMatrix::new(elements.num_points()); - let mut visitor = Visitor { elements, values }; - visit_results( - body, - body.basic_blocks.reverse_postorder().iter().copied(), - &mut analysis, - &results, - &mut visitor, - ); - visitor.values -} - -struct Visitor<'a, N: Idx> { - elements: &'a DenseLocationMap, - values: SparseIntervalMatrix<N, PointIndex>, -} - -impl<'tcx, A, N> ResultsVisitor<'tcx, A> for Visitor<'_, N> -where - A: Analysis<'tcx, Domain = DenseBitSet<N>>, - N: Idx, -{ - fn visit_after_primary_statement_effect<'mir>( - &mut self, - _analysis: &mut A, - state: &A::Domain, - _statement: &'mir mir::Statement<'tcx>, - location: Location, - ) { - let point = self.elements.point_from_location(location); - // Use internal iterator manually as it is much more efficient. - state.iter().for_each(|node| { - self.values.insert(node, point); - }); - } - - fn visit_after_primary_terminator_effect<'mir>( - &mut self, - _analysis: &mut A, - state: &A::Domain, - _terminator: &'mir mir::Terminator<'tcx>, - location: Location, - ) { - let point = self.elements.point_from_location(location); - // Use internal iterator manually as it is much more efficient. - state.iter().for_each(|node| { - self.values.insert(node, point); - }); - } -} diff --git a/compiler/rustc_mir_dataflow/src/value_analysis.rs b/compiler/rustc_mir_dataflow/src/value_analysis.rs index 005e7973130..bf5ec8f459e 100644 --- a/compiler/rustc_mir_dataflow/src/value_analysis.rs +++ b/compiler/rustc_mir_dataflow/src/value_analysis.rs @@ -6,7 +6,7 @@ use rustc_data_structures::fx::{FxHashMap, FxIndexSet, StdEntry}; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_index::IndexVec; use rustc_index::bit_set::DenseBitSet; -use rustc_middle::mir::visit::{MutatingUseContext, PlaceContext, Visitor}; +use rustc_middle::mir::visit::{PlaceContext, Visitor}; use rustc_middle::mir::*; use rustc_middle::ty::{self, Ty, TyCtxt}; use tracing::debug; @@ -891,7 +891,7 @@ pub fn iter_fields<'tcx>( let field_ty = f_def.ty(tcx, args); let field_ty = tcx .try_normalize_erasing_regions(typing_env, field_ty) - .unwrap_or_else(|_| tcx.erase_regions(field_ty)); + .unwrap_or_else(|_| tcx.erase_and_anonymize_regions(field_ty)); f(variant, f_index.into(), field_ty); } } @@ -917,12 +917,7 @@ pub fn excluded_locals(body: &Body<'_>) -> DenseBitSet<Local> { impl<'tcx> Visitor<'tcx> for Collector { fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, _location: Location) { - if (context.is_borrow() - || context.is_address_of() - || context.is_drop() - || context == PlaceContext::MutatingUse(MutatingUseContext::AsmOutput)) - && !place.is_indirect() - { + if context.may_observe_address() && !place.is_indirect() { // A pointer to a place could be used to access other places with the same local, // hence we have to exclude the local completely. self.result.insert(place.local); diff --git a/compiler/rustc_mir_transform/src/add_subtyping_projections.rs b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs index 92ee80eaa35..be4f84d64d0 100644 --- a/compiler/rustc_mir_transform/src/add_subtyping_projections.rs +++ b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs @@ -32,8 +32,8 @@ impl<'a, 'tcx> MutVisitor<'tcx> for SubTypeChecker<'a, 'tcx> { let mut rval_ty = rvalue.ty(self.local_decls, self.tcx); // Not erasing this causes `Free Regions` errors in validator, // when rval is `ReStatic`. - rval_ty = self.tcx.erase_regions(rval_ty); - place_ty = self.tcx.erase_regions(place_ty); + rval_ty = self.tcx.erase_and_anonymize_regions(rval_ty); + place_ty = self.tcx.erase_and_anonymize_regions(place_ty); if place_ty != rval_ty { let temp = self .patcher diff --git a/compiler/rustc_mir_transform/src/coverage/expansion.rs b/compiler/rustc_mir_transform/src/coverage/expansion.rs index 91e0528f52f..851bbaeed48 100644 --- a/compiler/rustc_mir_transform/src/coverage/expansion.rs +++ b/compiler/rustc_mir_transform/src/coverage/expansion.rs @@ -82,7 +82,7 @@ impl ExpnNode { Self { expn_id, - expn_kind: expn_data.kind.clone(), + expn_kind: expn_data.kind, call_site, call_site_expn_id, diff --git a/compiler/rustc_mir_transform/src/cross_crate_inline.rs b/compiler/rustc_mir_transform/src/cross_crate_inline.rs index b186c2bd775..df98c07f549 100644 --- a/compiler/rustc_mir_transform/src/cross_crate_inline.rs +++ b/compiler/rustc_mir_transform/src/cross_crate_inline.rs @@ -135,7 +135,16 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> { } } } - TerminatorKind::Call { unwind, .. } => { + TerminatorKind::Call { ref func, unwind, .. } => { + // We track calls because they make our function not a leaf (and in theory, the + // number of calls indicates how likely this function is to perturb other CGUs). + // But intrinsics don't have a body that gets assigned to a CGU, so they are + // ignored. + if let Some((fn_def_id, _)) = func.const_fn_def() + && self.tcx.has_attr(fn_def_id, sym::rustc_intrinsic) + { + return; + } self.calls += 1; if let UnwindAction::Cleanup(_) = unwind { self.landing_pads += 1; diff --git a/compiler/rustc_mir_transform/src/dest_prop.rs b/compiler/rustc_mir_transform/src/dest_prop.rs index cf7425251e8..9ba2d274691 100644 --- a/compiler/rustc_mir_transform/src/dest_prop.rs +++ b/compiler/rustc_mir_transform/src/dest_prop.rs @@ -59,6 +59,12 @@ //! The first two conditions are simple structural requirements on the `Assign` statements that can //! be trivially checked. The third requirement however is more difficult and costly to check. //! +//! ## Current implementation +//! +//! The current implementation relies on live range computation to check for conflicts. We only +//! allow to merge locals that have disjoint live ranges. The live range are defined with +//! half-statement granularity, so as to make all writes be live for at least a half statement. +//! //! ## Future Improvements //! //! There are a number of ways in which this pass could be improved in the future: @@ -117,9 +123,8 @@ //! - Layout optimizations for coroutines have been added to improve code generation for //! async/await, which are very similar in spirit to what this optimization does. //! -//! Also, rustc now has a simple NRVO pass (see `nrvo.rs`), which handles a subset of the cases that -//! this destination propagation pass handles, proving that similar optimizations can be performed -//! on MIR. +//! [The next approach][attempt 4] computes a conflict matrix between locals by forbidding merging +//! locals with competing writes or with one write while the other is live. //! //! ## Pre/Post Optimization //! @@ -130,20 +135,18 @@ //! [attempt 1]: https://github.com/rust-lang/rust/pull/47954 //! [attempt 2]: https://github.com/rust-lang/rust/pull/71003 //! [attempt 3]: https://github.com/rust-lang/rust/pull/72632 +//! [attempt 4]: https://github.com/rust-lang/rust/pull/96451 -use rustc_data_structures::fx::{FxIndexMap, IndexEntry, IndexOccupiedEntry}; +use rustc_data_structures::union_find::UnionFind; use rustc_index::bit_set::DenseBitSet; use rustc_index::interval::SparseIntervalMatrix; -use rustc_middle::bug; +use rustc_index::{IndexVec, newtype_index}; use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor}; -use rustc_middle::mir::{ - Body, HasLocalDecls, InlineAsmOperand, Local, LocalKind, Location, MirDumper, Operand, - PassWhere, Place, Rvalue, Statement, StatementKind, TerminatorKind, traversal, -}; +use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; -use rustc_mir_dataflow::Analysis; -use rustc_mir_dataflow::impls::MaybeLiveLocals; -use rustc_mir_dataflow::points::{DenseLocationMap, PointIndex, save_as_intervals}; +use rustc_mir_dataflow::impls::{DefUse, MaybeLiveLocals}; +use rustc_mir_dataflow::points::DenseLocationMap; +use rustc_mir_dataflow::{Analysis, Results}; use tracing::{debug, trace}; pub(super) struct DestinationPropagation; @@ -161,84 +164,81 @@ impl<'tcx> crate::MirPass<'tcx> for DestinationPropagation { sess.mir_opt_level() >= 3 } + #[tracing::instrument(level = "trace", skip(self, tcx, body))] fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let def_id = body.source.def_id(); - let mut candidates = Candidates::default(); - let mut write_info = WriteInfo::default(); - trace!(func = ?tcx.def_path_str(def_id)); + trace!(?def_id); let borrowed = rustc_mir_dataflow::impls::borrowed_locals(body); + let candidates = Candidates::find(body, &borrowed); + trace!(?candidates); + if candidates.c.is_empty() { + return; + } + let live = MaybeLiveLocals.iterate_to_fixpoint(tcx, body, Some("MaybeLiveLocals-DestProp")); + let points = DenseLocationMap::new(body); - let mut live = save_as_intervals(&points, body, live.analysis, live.results); - - // In order to avoid having to collect data for every single pair of locals in the body, we - // do not allow doing more than one merge for places that are derived from the same local at - // once. To avoid missed opportunities, we instead iterate to a fixed point - we'll refer to - // each of these iterations as a "round." - // - // Reaching a fixed point could in theory take up to `min(l, s)` rounds - however, we do not - // expect to see MIR like that. To verify this, a test was run against `[rust-lang/regex]` - - // the average MIR body saw 1.32 full iterations of this loop. The most that was hit were 30 - // for a single function. Only 80/2801 (2.9%) of functions saw at least 5. - // - // [rust-lang/regex]: - // https://github.com/rust-lang/regex/tree/b5372864e2df6a2f5e543a556a62197f50ca3650 - let mut round_count = 0; - loop { - // PERF: Can we do something smarter than recalculating the candidates and liveness - // results? - candidates.reset_and_find(body, &borrowed); - trace!(?candidates); - dest_prop_mir_dump(tcx, body, &points, &live, round_count); - - FilterInformation::filter_liveness( - &mut candidates, - &points, - &live, - &mut write_info, - body, - ); - - // Because we only filter once per round, it is unsound to use a local for more than - // one merge operation within a single round of optimizations. We store here which ones - // we have already used. - let mut merged_locals: DenseBitSet<Local> = - DenseBitSet::new_empty(body.local_decls.len()); - - // This is the set of merges we will apply this round. It is a subset of the candidates. - let mut merges = FxIndexMap::default(); - - for (src, candidates) in candidates.c.iter() { - if merged_locals.contains(*src) { - continue; - } - let Some(dest) = candidates.iter().find(|dest| !merged_locals.contains(**dest)) - else { - continue; - }; - - // Replace `src` by `dest` everywhere. - merges.insert(*src, *dest); - merged_locals.insert(*src); - merged_locals.insert(*dest); - - // Update liveness information based on the merge we just performed. - // Every location where `src` was live, `dest` will be live. - live.union_rows(*src, *dest); + let mut relevant = RelevantLocals::compute(&candidates, body.local_decls.len()); + let mut live = save_as_intervals(&points, body, &relevant, live.results); + + dest_prop_mir_dump(tcx, body, &points, &live, &relevant); + + let mut merged_locals = DenseBitSet::new_empty(body.local_decls.len()); + + for (src, dst) in candidates.c.into_iter() { + trace!(?src, ?dst); + + let Some(mut src) = relevant.find(src) else { continue }; + let Some(mut dst) = relevant.find(dst) else { continue }; + if src == dst { + continue; } - trace!(merging = ?merges); - if merges.is_empty() { - break; + let Some(src_live_ranges) = live.row(src) else { continue }; + let Some(dst_live_ranges) = live.row(dst) else { continue }; + trace!(?src, ?src_live_ranges); + trace!(?dst, ?dst_live_ranges); + + if src_live_ranges.disjoint(dst_live_ranges) { + // We want to replace `src` by `dst`. + let mut orig_src = relevant.original[src]; + let mut orig_dst = relevant.original[dst]; + + // The return place and function arguments are required and cannot be renamed. + // This check cannot be made during candidate collection, as we may want to + // unify the same non-required local with several required locals. + match (is_local_required(orig_src, body), is_local_required(orig_dst, body)) { + // Renaming `src` is ok. + (false, _) => {} + // Renaming `src` is wrong, but renaming `dst` is ok. + (true, false) => { + std::mem::swap(&mut src, &mut dst); + std::mem::swap(&mut orig_src, &mut orig_dst); + } + // Neither local can be renamed, so skip this case. + (true, true) => continue, + } + + trace!(?src, ?dst, "merge"); + merged_locals.insert(orig_src); + merged_locals.insert(orig_dst); + + // Replace `src` by `dst`. + let head = relevant.union(src, dst); + live.union_rows(/* read */ src, /* write */ head); + live.union_rows(/* read */ dst, /* write */ head); } - round_count += 1; + } + trace!(?merged_locals); + trace!(?relevant.renames); - apply_merges(body, tcx, merges, merged_locals); + if merged_locals.is_empty() { + return; } - trace!(round_count); + apply_merges(body, tcx, relevant, merged_locals); } fn is_required(&self) -> bool { @@ -246,30 +246,6 @@ impl<'tcx> crate::MirPass<'tcx> for DestinationPropagation { } } -#[derive(Debug, Default)] -struct Candidates { - /// The set of candidates we are considering in this optimization. - /// - /// We will always merge the key into at most one of its values. - /// - /// Whether a place ends up in the key or the value does not correspond to whether it appears as - /// the lhs or rhs of any assignment. As a matter of fact, the places in here might never appear - /// in an assignment at all. This happens because if we see an assignment like this: - /// - /// ```ignore (syntax-highlighting-only) - /// _1.0 = _2.0 - /// ``` - /// - /// We will still report that we would like to merge `_1` and `_2` in an attempt to allow us to - /// remove that assignment. - c: FxIndexMap<Local, Vec<Local>>, - - /// A reverse index of the `c` set; if the `c` set contains `a => Place { local: b, proj }`, - /// then this contains `b => a`. - // PERF: Possibly these should be `SmallVec`s? - reverse: FxIndexMap<Local, Vec<Local>>, -} - ////////////////////////////////////////////////////////// // Merging // @@ -278,16 +254,16 @@ struct Candidates { fn apply_merges<'tcx>( body: &mut Body<'tcx>, tcx: TyCtxt<'tcx>, - merges: FxIndexMap<Local, Local>, + relevant: RelevantLocals, merged_locals: DenseBitSet<Local>, ) { - let mut merger = Merger { tcx, merges, merged_locals }; + let mut merger = Merger { tcx, relevant, merged_locals }; merger.visit_body_preserves_cfg(body); } struct Merger<'tcx> { tcx: TyCtxt<'tcx>, - merges: FxIndexMap<Local, Local>, + relevant: RelevantLocals, merged_locals: DenseBitSet<Local>, } @@ -297,8 +273,8 @@ impl<'tcx> MutVisitor<'tcx> for Merger<'tcx> { } fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _location: Location) { - if let Some(dest) = self.merges.get(local) { - *local = *dest; + if let Some(relevant) = self.relevant.find(*local) { + *local = self.relevant.original[relevant]; } } @@ -336,414 +312,95 @@ impl<'tcx> MutVisitor<'tcx> for Merger<'tcx> { } ////////////////////////////////////////////////////////// -// Liveness filtering +// Relevant locals // -// This section enforces bullet point 2 +// Small utility to reduce size of the conflict matrix by only considering locals that appear in +// the candidates -struct FilterInformation<'a, 'tcx> { - body: &'a Body<'tcx>, - points: &'a DenseLocationMap, - live: &'a SparseIntervalMatrix<Local, PointIndex>, - candidates: &'a mut Candidates, - write_info: &'a mut WriteInfo, - at: Location, +newtype_index! { + /// Represent a subset of locals which appear in candidates. + struct RelevantLocal {} } -// We first implement some utility functions which we will expose removing candidates according to -// different needs. Throughout the liveness filtering, the `candidates` are only ever accessed -// through these methods, and not directly. -impl Candidates { - /// Collects the candidates for merging. - /// - /// This is responsible for enforcing the first and third bullet point. - fn reset_and_find<'tcx>(&mut self, body: &Body<'tcx>, borrowed: &DenseBitSet<Local>) { - self.c.clear(); - self.reverse.clear(); - let mut visitor = FindAssignments { body, candidates: &mut self.c, borrowed }; - visitor.visit_body(body); - // Deduplicate candidates. - for (_, cands) in self.c.iter_mut() { - cands.sort(); - cands.dedup(); - } - // Generate the reverse map. - for (src, cands) in self.c.iter() { - for dest in cands.iter().copied() { - self.reverse.entry(dest).or_default().push(*src); - } - } - } - - /// Just `Vec::retain`, but the condition is inverted and we add debugging output - fn vec_filter_candidates( - src: Local, - v: &mut Vec<Local>, - mut f: impl FnMut(Local) -> CandidateFilter, - at: Location, - ) { - v.retain(|dest| { - let remove = f(*dest); - if remove == CandidateFilter::Remove { - trace!("eliminating {:?} => {:?} due to conflict at {:?}", src, dest, at); - } - remove == CandidateFilter::Keep - }); - } - - /// `vec_filter_candidates` but for an `Entry` - fn entry_filter_candidates( - mut entry: IndexOccupiedEntry<'_, Local, Vec<Local>>, - p: Local, - f: impl FnMut(Local) -> CandidateFilter, - at: Location, - ) { - let candidates = entry.get_mut(); - Self::vec_filter_candidates(p, candidates, f, at); - if candidates.len() == 0 { - // FIXME(#120456) - is `swap_remove` correct? - entry.swap_remove(); - } - } - - /// For all candidates `(p, q)` or `(q, p)` removes the candidate if `f(q)` says to do so - fn filter_candidates_by( - &mut self, - p: Local, - mut f: impl FnMut(Local) -> CandidateFilter, - at: Location, - ) { - // Cover the cases where `p` appears as a `src` - if let IndexEntry::Occupied(entry) = self.c.entry(p) { - Self::entry_filter_candidates(entry, p, &mut f, at); - } - // And the cases where `p` appears as a `dest` - let Some(srcs) = self.reverse.get_mut(&p) else { - return; - }; - // We use `retain` here to remove the elements from the reverse set if we've removed the - // matching candidate in the forward set. - srcs.retain(|src| { - if f(*src) == CandidateFilter::Keep { - return true; - } - let IndexEntry::Occupied(entry) = self.c.entry(*src) else { - return false; - }; - Self::entry_filter_candidates( - entry, - *src, - |dest| { - if dest == p { CandidateFilter::Remove } else { CandidateFilter::Keep } - }, - at, - ); - false - }); - } +#[derive(Debug)] +struct RelevantLocals { + original: IndexVec<RelevantLocal, Local>, + shrink: IndexVec<Local, Option<RelevantLocal>>, + renames: UnionFind<RelevantLocal>, } -#[derive(Copy, Clone, PartialEq, Eq)] -enum CandidateFilter { - Keep, - Remove, -} +impl RelevantLocals { + #[tracing::instrument(level = "trace", skip(candidates, num_locals), ret)] + fn compute(candidates: &Candidates, num_locals: usize) -> RelevantLocals { + let mut original = IndexVec::with_capacity(candidates.c.len()); + let mut shrink = IndexVec::from_elem_n(None, num_locals); -impl<'a, 'tcx> FilterInformation<'a, 'tcx> { - /// Filters the set of candidates to remove those that conflict. - /// - /// The steps we take are exactly those that are outlined at the top of the file. For each - /// statement/terminator, we collect the set of locals that are written to in that - /// statement/terminator, and then we remove all pairs of candidates that contain one such local - /// and another one that is live. - /// - /// We need to be careful about the ordering of operations within each statement/terminator - /// here. Many statements might write and read from more than one place, and we need to consider - /// them all. The strategy for doing this is as follows: We first gather all the places that are - /// written to within the statement/terminator via `WriteInfo`. Then, we use the liveness - /// analysis from *before* the statement/terminator (in the control flow sense) to eliminate - /// candidates - this is because we want to conservatively treat a pair of locals that is both - /// read and written in the statement/terminator to be conflicting, and the liveness analysis - /// before the statement/terminator will correctly report locals that are read in the - /// statement/terminator to be live. We are additionally conservative by treating all written to - /// locals as also being read from. - fn filter_liveness( - candidates: &mut Candidates, - points: &DenseLocationMap, - live: &SparseIntervalMatrix<Local, PointIndex>, - write_info: &mut WriteInfo, - body: &Body<'tcx>, - ) { - let mut this = FilterInformation { - body, - points, - live, - candidates, - // We don't actually store anything at this scope, we just keep things here to be able - // to reuse the allocation. - write_info, - // Doesn't matter what we put here, will be overwritten before being used - at: Location::START, + // Mark a local as relevant and record it into the maps. + let mut declare = |local| { + shrink.get_or_insert_with(local, || original.push(local)); }; - this.internal_filter_liveness(); - } - fn internal_filter_liveness(&mut self) { - for (block, data) in traversal::preorder(self.body) { - self.at = Location { block, statement_index: data.statements.len() }; - self.write_info.for_terminator(&data.terminator().kind); - self.apply_conflicts(); - - for (i, statement) in data.statements.iter().enumerate().rev() { - self.at = Location { block, statement_index: i }; - self.write_info.for_statement(&statement.kind, self.body); - self.apply_conflicts(); - } + for &(src, dest) in candidates.c.iter() { + declare(src); + declare(dest) } - } - fn apply_conflicts(&mut self) { - let writes = &self.write_info.writes; - for p in writes { - let other_skip = self.write_info.skip_pair.and_then(|(a, b)| { - if a == *p { - Some(b) - } else if b == *p { - Some(a) - } else { - None - } - }); - let at = self.points.point_from_location(self.at); - self.candidates.filter_candidates_by( - *p, - |q| { - if Some(q) == other_skip { - return CandidateFilter::Keep; - } - // It is possible that a local may be live for less than the - // duration of a statement This happens in the case of function - // calls or inline asm. Because of this, we also mark locals as - // conflicting when both of them are written to in the same - // statement. - if self.live.contains(q, at) || writes.contains(&q) { - CandidateFilter::Remove - } else { - CandidateFilter::Keep - } - }, - self.at, - ); - } + let renames = UnionFind::new(original.len()); + RelevantLocals { original, shrink, renames } } -} -/// Describes where a statement/terminator writes to -#[derive(Default, Debug)] -struct WriteInfo { - writes: Vec<Local>, - /// If this pair of locals is a candidate pair, completely skip processing it during this - /// statement. All other candidates are unaffected. - skip_pair: Option<(Local, Local)>, -} - -impl WriteInfo { - fn for_statement<'tcx>(&mut self, statement: &StatementKind<'tcx>, body: &Body<'tcx>) { - self.reset(); - match statement { - StatementKind::Assign(box (lhs, rhs)) => { - self.add_place(*lhs); - match rhs { - Rvalue::Use(op) => { - self.add_operand(op); - self.consider_skipping_for_assign_use(*lhs, op, body); - } - Rvalue::Repeat(op, _) => { - self.add_operand(op); - } - Rvalue::Cast(_, op, _) - | Rvalue::UnaryOp(_, op) - | Rvalue::ShallowInitBox(op, _) => { - self.add_operand(op); - } - Rvalue::BinaryOp(_, ops) => { - for op in [&ops.0, &ops.1] { - self.add_operand(op); - } - } - Rvalue::Aggregate(_, ops) => { - for op in ops { - self.add_operand(op); - } - } - Rvalue::WrapUnsafeBinder(op, _) => { - self.add_operand(op); - } - Rvalue::ThreadLocalRef(_) - | Rvalue::NullaryOp(_, _) - | Rvalue::Ref(_, _, _) - | Rvalue::RawPtr(_, _) - | Rvalue::Len(_) - | Rvalue::Discriminant(_) - | Rvalue::CopyForDeref(_) => {} - } - } - // Retags are technically also reads, but reporting them as a write suffices - StatementKind::SetDiscriminant { place, .. } - | StatementKind::Deinit(place) - | StatementKind::Retag(_, place) => { - self.add_place(**place); - } - StatementKind::Intrinsic(_) - | StatementKind::ConstEvalCounter - | StatementKind::Nop - | StatementKind::Coverage(_) - | StatementKind::StorageLive(_) - | StatementKind::StorageDead(_) - | StatementKind::BackwardIncompatibleDropHint { .. } - | StatementKind::PlaceMention(_) => {} - StatementKind::FakeRead(_) | StatementKind::AscribeUserType(_, _) => { - bug!("{:?} not found in this MIR phase", statement) - } - } + fn find(&mut self, src: Local) -> Option<RelevantLocal> { + let src = self.shrink[src]?; + let src = self.renames.find(src); + Some(src) } - fn consider_skipping_for_assign_use<'tcx>( - &mut self, - lhs: Place<'tcx>, - rhs: &Operand<'tcx>, - body: &Body<'tcx>, - ) { - let Some(rhs) = rhs.place() else { return }; - if let Some(pair) = places_to_candidate_pair(lhs, rhs, body) { - self.skip_pair = Some(pair); - } - } - - fn for_terminator<'tcx>(&mut self, terminator: &TerminatorKind<'tcx>) { - self.reset(); - match terminator { - TerminatorKind::SwitchInt { discr: op, .. } - | TerminatorKind::Assert { cond: op, .. } => { - self.add_operand(op); - } - TerminatorKind::Call { destination, func, args, .. } => { - self.add_place(*destination); - self.add_operand(func); - for arg in args { - self.add_operand(&arg.node); - } - } - TerminatorKind::TailCall { func, args, .. } => { - self.add_operand(func); - for arg in args { - self.add_operand(&arg.node); - } - } - TerminatorKind::InlineAsm { operands, .. } => { - for asm_operand in operands { - match asm_operand { - InlineAsmOperand::In { value, .. } => { - self.add_operand(value); - } - InlineAsmOperand::Out { place, .. } => { - if let Some(place) = place { - self.add_place(*place); - } - } - // Note that the `late` field in `InOut` is about whether the registers used - // for these things overlap, and is of absolutely no interest to us. - InlineAsmOperand::InOut { in_value, out_place, .. } => { - if let Some(place) = out_place { - self.add_place(*place); - } - self.add_operand(in_value); - } - InlineAsmOperand::Const { .. } - | InlineAsmOperand::SymFn { .. } - | InlineAsmOperand::SymStatic { .. } - | InlineAsmOperand::Label { .. } => {} - } - } - } - TerminatorKind::Goto { .. } - | TerminatorKind::UnwindResume - | TerminatorKind::UnwindTerminate(_) - | TerminatorKind::Return - | TerminatorKind::Unreachable { .. } => (), - TerminatorKind::Drop { .. } => { - // `Drop`s create a `&mut` and so are not considered - } - TerminatorKind::Yield { .. } - | TerminatorKind::CoroutineDrop - | TerminatorKind::FalseEdge { .. } - | TerminatorKind::FalseUnwind { .. } => { - bug!("{:?} not found in this MIR phase", terminator) - } - } - } - - fn add_place(&mut self, place: Place<'_>) { - self.writes.push(place.local); - } - - fn add_operand<'tcx>(&mut self, op: &Operand<'tcx>) { - match op { - // FIXME(JakobDegen): In a previous version, the `Move` case was incorrectly treated as - // being a read only. This was unsound, however we cannot add a regression test because - // it is not possible to set this off with current MIR. Once we have that ability, a - // regression test should be added. - Operand::Move(p) => self.add_place(*p), - Operand::Copy(_) | Operand::Constant(_) => (), - } - } - - fn reset(&mut self) { - self.writes.clear(); - self.skip_pair = None; + fn union(&mut self, lhs: RelevantLocal, rhs: RelevantLocal) -> RelevantLocal { + let head = self.renames.unify(lhs, rhs); + // We need to ensure we keep the original local of the RHS, as it may be a required local. + self.original[head] = self.original[rhs]; + head } } ///////////////////////////////////////////////////// // Candidate accumulation -/// If the pair of places is being considered for merging, returns the candidate which would be -/// merged in order to accomplish this. -/// -/// The contract here is in one direction - there is a guarantee that merging the locals that are -/// outputted by this function would result in an assignment between the inputs becoming a -/// self-assignment. However, there is no guarantee that the returned pair is actually suitable for -/// merging - candidate collection must still check this independently. -/// -/// This output is unique for each unordered pair of input places. -fn places_to_candidate_pair<'tcx>( - a: Place<'tcx>, - b: Place<'tcx>, - body: &Body<'tcx>, -) -> Option<(Local, Local)> { - let (mut a, mut b) = if a.projection.len() == 0 && b.projection.len() == 0 { - (a.local, b.local) - } else { - return None; - }; +#[derive(Debug, Default)] +struct Candidates { + /// The set of candidates we are considering in this optimization. + /// + /// Whether a place ends up in the key or the value does not correspond to whether it appears as + /// the lhs or rhs of any assignment. As a matter of fact, the places in here might never appear + /// in an assignment at all. This happens because if we see an assignment like this: + /// + /// ```ignore (syntax-highlighting-only) + /// _1.0 = _2.0 + /// ``` + /// + /// We will still report that we would like to merge `_1` and `_2` in an attempt to allow us to + /// remove that assignment. + c: Vec<(Local, Local)>, +} - // By sorting, we make sure we're input order independent - if a > b { - std::mem::swap(&mut a, &mut b); - } +// We first implement some utility functions which we will expose removing candidates according to +// different needs. Throughout the liveness filtering, the `candidates` are only ever accessed +// through these methods, and not directly. +impl Candidates { + /// Collects the candidates for merging. + /// + /// This is responsible for enforcing the first and third bullet point. + fn find(body: &Body<'_>, borrowed: &DenseBitSet<Local>) -> Candidates { + let mut visitor = FindAssignments { body, candidates: Default::default(), borrowed }; + visitor.visit_body(body); - // We could now return `(a, b)`, but then we miss some candidates in the case where `a` can't be - // used as a `src`. - if is_local_required(a, body) { - std::mem::swap(&mut a, &mut b); + Candidates { c: visitor.candidates } } - // We could check `is_local_required` again here, but there's no need - after all, we make no - // promise that the candidate pair is actually valid - Some((a, b)) } struct FindAssignments<'a, 'tcx> { body: &'a Body<'tcx>, - candidates: &'a mut FxIndexMap<Local, Vec<Local>>, + candidates: Vec<(Local, Local)>, borrowed: &'a DenseBitSet<Local>, } @@ -753,11 +410,9 @@ impl<'tcx> Visitor<'tcx> for FindAssignments<'_, 'tcx> { lhs, Rvalue::CopyForDeref(rhs) | Rvalue::Use(Operand::Copy(rhs) | Operand::Move(rhs)), )) = &statement.kind + && let Some(src) = lhs.as_local() + && let Some(dest) = rhs.as_local() { - let Some((src, dest)) = places_to_candidate_pair(*lhs, *rhs, self.body) else { - return; - }; - // As described at the top of the file, we do not go near things that have // their address taken. if self.borrowed.contains(src) || self.borrowed.contains(dest) { @@ -774,13 +429,8 @@ impl<'tcx> Visitor<'tcx> for FindAssignments<'_, 'tcx> { return; } - // Also, we need to make sure that MIR actually allows the `src` to be removed - if is_local_required(src, self.body) { - return; - } - // We may insert duplicates here, but that's fine - self.candidates.entry(src).or_default().push(dest); + self.candidates.push((src, dest)); } } } @@ -803,22 +453,162 @@ fn dest_prop_mir_dump<'tcx>( tcx: TyCtxt<'tcx>, body: &Body<'tcx>, points: &DenseLocationMap, - live: &SparseIntervalMatrix<Local, PointIndex>, - round: usize, + live: &SparseIntervalMatrix<RelevantLocal, TwoStepIndex>, + relevant: &RelevantLocals, ) { let locals_live_at = |location| { - let location = points.point_from_location(location); - live.rows().filter(|&r| live.contains(r, location)).collect::<Vec<_>>() + live.rows() + .filter(|&r| live.contains(r, location)) + .map(|rl| relevant.original[rl]) + .collect::<Vec<_>>() }; if let Some(dumper) = MirDumper::new(tcx, "DestinationPropagation-dataflow", body) { let extra_data = &|pass_where, w: &mut dyn std::io::Write| { if let PassWhere::BeforeLocation(loc) = pass_where { - writeln!(w, " // live: {:?}", locals_live_at(loc))?; + let location = TwoStepIndex::new(points, loc, Effect::Before); + let live = locals_live_at(location); + writeln!(w, " // before: {:?} => {:?}", location, live)?; + } + if let PassWhere::AfterLocation(loc) = pass_where { + let location = TwoStepIndex::new(points, loc, Effect::After); + let live = locals_live_at(location); + writeln!(w, " // after: {:?} => {:?}", location, live)?; } Ok(()) }; - dumper.set_disambiguator(&round).set_extra_data(extra_data).dump_mir(body) + dumper.set_extra_data(extra_data).dump_mir(body) } } + +#[derive(Copy, Clone, Debug)] +enum Effect { + Before, + After, +} + +rustc_index::newtype_index! { + /// A reversed `PointIndex` but with the lower bit encoding early/late inside the statement. + /// The reversed order allows to use the more efficient `IntervalSet::append` method while we + /// iterate on the statements in reverse order. + #[orderable] + #[debug_format = "TwoStepIndex({})"] + struct TwoStepIndex {} +} + +impl TwoStepIndex { + fn new(elements: &DenseLocationMap, location: Location, effect: Effect) -> TwoStepIndex { + let point = elements.point_from_location(location); + let effect = match effect { + Effect::Before => 0, + Effect::After => 1, + }; + let max_index = 2 * elements.num_points() as u32 - 1; + let index = 2 * point.as_u32() + (effect as u32); + // Reverse the indexing to use more efficient `IntervalSet::append`. + TwoStepIndex::from_u32(max_index - index) + } +} + +struct VisitPlacesWith<F>(F); + +impl<'tcx, F> Visitor<'tcx> for VisitPlacesWith<F> +where + F: FnMut(Place<'tcx>, PlaceContext), +{ + fn visit_local(&mut self, local: Local, ctxt: PlaceContext, _: Location) { + (self.0)(local.into(), ctxt); + } + + fn visit_place(&mut self, place: &Place<'tcx>, ctxt: PlaceContext, location: Location) { + (self.0)(*place, ctxt); + self.visit_projection(place.as_ref(), ctxt, location); + } +} + +/// Add points depending on the result of the given dataflow analysis. +fn save_as_intervals<'tcx>( + elements: &DenseLocationMap, + body: &Body<'tcx>, + relevant: &RelevantLocals, + results: Results<DenseBitSet<Local>>, +) -> SparseIntervalMatrix<RelevantLocal, TwoStepIndex> { + let mut values = SparseIntervalMatrix::new(2 * elements.num_points()); + let mut state = MaybeLiveLocals.bottom_value(body); + let reachable_blocks = traversal::reachable_as_bitset(body); + + let two_step_loc = |location, effect| TwoStepIndex::new(elements, location, effect); + let append_at = + |values: &mut SparseIntervalMatrix<_, _>, state: &DenseBitSet<Local>, twostep| { + for (relevant, &original) in relevant.original.iter_enumerated() { + if state.contains(original) { + values.append(relevant, twostep); + } + } + }; + + // Iterate blocks in decreasing order, to visit locations in decreasing order. This + // allows to use the more efficient `append` method to interval sets. + for block in body.basic_blocks.indices().rev() { + if !reachable_blocks.contains(block) { + continue; + } + + state.clone_from(&results[block]); + + let block_data = &body.basic_blocks[block]; + let loc = Location { block, statement_index: block_data.statements.len() }; + + let term = block_data.terminator(); + let mut twostep = two_step_loc(loc, Effect::After); + append_at(&mut values, &state, twostep); + // Ensure we have a non-zero live range even for dead stores. This is done by marking all + // the written-to locals as live in the second half of the statement. + // We also ensure that operands read by terminators conflict with writes by that terminator. + // For instance a function call may read args after having written to the destination. + VisitPlacesWith(|place, ctxt| match DefUse::for_place(place, ctxt) { + DefUse::Def | DefUse::Use | DefUse::PartialWrite => { + if let Some(relevant) = relevant.shrink[place.local] { + values.insert(relevant, twostep); + } + } + DefUse::NonUse => {} + }) + .visit_terminator(term, loc); + + twostep = TwoStepIndex::from_u32(twostep.as_u32() + 1); + debug_assert_eq!(twostep, two_step_loc(loc, Effect::Before)); + MaybeLiveLocals.apply_early_terminator_effect(&mut state, term, loc); + MaybeLiveLocals.apply_primary_terminator_effect(&mut state, term, loc); + append_at(&mut values, &state, twostep); + + for (statement_index, stmt) in block_data.statements.iter().enumerate().rev() { + let loc = Location { block, statement_index }; + twostep = TwoStepIndex::from_u32(twostep.as_u32() + 1); + debug_assert_eq!(twostep, two_step_loc(loc, Effect::After)); + append_at(&mut values, &state, twostep); + // Ensure we have a non-zero live range even for dead stores. This is done by marking + // all the written-to locals as live in the second half of the statement. + VisitPlacesWith(|place, ctxt| match DefUse::for_place(place, ctxt) { + DefUse::Def | DefUse::PartialWrite => { + if let Some(relevant) = relevant.shrink[place.local] { + values.insert(relevant, twostep); + } + } + DefUse::Use | DefUse::NonUse => {} + }) + .visit_statement(stmt, loc); + + twostep = TwoStepIndex::from_u32(twostep.as_u32() + 1); + debug_assert_eq!(twostep, two_step_loc(loc, Effect::Before)); + MaybeLiveLocals.apply_early_statement_effect(&mut state, stmt, loc); + MaybeLiveLocals.apply_primary_statement_effect(&mut state, stmt, loc); + // ... but reads from operands are marked as live here so they do not conflict with + // the all the writes we manually marked as live in the second half of the statement. + append_at(&mut values, &state, twostep); + } + } + + values +} diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 5a13394543b..bf6aa800d20 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -87,6 +87,7 @@ use std::borrow::Cow; use either::Either; +use itertools::Itertools as _; use rustc_abi::{self as abi, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx}; use rustc_const_eval::const_eval::DummyMachine; use rustc_const_eval::interpret::{ @@ -895,18 +896,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { fn simplify_aggregate_to_copy( &mut self, - lhs: &Place<'tcx>, - rvalue: &mut Rvalue<'tcx>, - location: Location, - fields: &[VnIndex], + ty: Ty<'tcx>, variant_index: VariantIdx, + fields: &[VnIndex], ) -> Option<VnIndex> { - let Some(&first_field) = fields.first() else { - return None; - }; - let Value::Projection(copy_from_value, _) = *self.get(first_field) else { - return None; - }; + let Some(&first_field) = fields.first() else { return None }; + let Value::Projection(copy_from_value, _) = *self.get(first_field) else { return None }; + // All fields must correspond one-to-one and come from the same aggregate value. if fields.iter().enumerate().any(|(index, &v)| { if let Value::Projection(pointer, ProjectionElem::Field(from_index, _)) = *self.get(v) @@ -933,21 +929,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } } - // Allow introducing places with non-constant offsets, as those are still better than - // reconstructing an aggregate. - if self.ty(copy_from_local_value) == rvalue.ty(self.local_decls, self.tcx) - && let Some(place) = self.try_as_place(copy_from_local_value, location, true) - { - // Avoid creating `*a = copy (*b)`, as they might be aliases resulting in overlapping assignments. - // FIXME: This also avoids any kind of projection, not just derefs. We can add allowed projections. - if lhs.as_local().is_some() { - self.reused_locals.insert(place.local); - *rvalue = Rvalue::Use(Operand::Copy(place)); - } - return Some(copy_from_local_value); - } - - None + // Both must be variants of the same type. + if self.ty(copy_from_local_value) == ty { Some(copy_from_local_value) } else { None } } fn simplify_aggregate( @@ -1023,20 +1006,27 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } }; - if ty.is_array() && fields.len() > 4 { - let first = fields[0]; - if fields.iter().all(|&v| v == first) { - let len = ty::Const::from_target_usize(self.tcx, fields.len().try_into().unwrap()); - if let Some(op) = self.try_as_operand(first, location) { - *rvalue = Rvalue::Repeat(op, len); - } - return Some(self.insert(ty, Value::Repeat(first, len))); + if ty.is_array() + && fields.len() > 4 + && let Ok(&first) = fields.iter().all_equal_value() + { + let len = ty::Const::from_target_usize(self.tcx, fields.len().try_into().unwrap()); + if let Some(op) = self.try_as_operand(first, location) { + *rvalue = Rvalue::Repeat(op, len); } + return Some(self.insert(ty, Value::Repeat(first, len))); } - if let Some(value) = - self.simplify_aggregate_to_copy(lhs, rvalue, location, &fields, variant_index) - { + if let Some(value) = self.simplify_aggregate_to_copy(ty, variant_index, &fields) { + // Allow introducing places with non-constant offsets, as those are still better than + // reconstructing an aggregate. But avoid creating `*a = copy (*b)`, as they might be + // aliases resulting in overlapping assignments. + let allow_complex_projection = + lhs.projection[..].iter().all(PlaceElem::is_stable_offset); + if let Some(place) = self.try_as_place(value, location, allow_complex_projection) { + self.reused_locals.insert(place.local); + *rvalue = Rvalue::Use(Operand::Copy(place)); + } return Some(value); } @@ -1654,6 +1644,11 @@ impl<'tcx> VnState<'_, 'tcx> { let place = Place { local, projection: self.tcx.mk_place_elems(projection.as_slice()) }; return Some(place); + } else if projection.last() == Some(&PlaceElem::Deref) { + // `Deref` can only be the first projection in a place. + // If we are here, we failed to find a local, and we already have a `Deref`. + // Trying to add projections will only result in an ill-formed place. + return None; } else if let Value::Projection(pointer, proj) = *self.get(index) && (allow_complex_projection || proj.is_stable_offset()) && let Some(proj) = self.try_as_place_elem(self.ty(index), proj, loc) diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs index 7f9234d1dc8..25a9baffe58 100644 --- a/compiler/rustc_mir_transform/src/inline/cycle.rs +++ b/compiler/rustc_mir_transform/src/inline/cycle.rs @@ -2,9 +2,9 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet}; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::unord::UnordSet; use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_hir::limit::Limit; use rustc_middle::mir::TerminatorKind; use rustc_middle::ty::{self, GenericArgsRef, InstanceKind, TyCtxt, TypeVisitableExt}; -use rustc_session::Limit; use rustc_span::sym; use tracing::{instrument, trace}; diff --git a/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs b/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs index 794984d2f3e..61c9bbe3123 100644 --- a/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs +++ b/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs @@ -2,6 +2,7 @@ use std::cell::RefCell; use std::collections::hash_map; use std::rc::Rc; +use itertools::Itertools as _; use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap}; use rustc_data_structures::unord::{UnordMap, UnordSet}; use rustc_errors::Subdiagnostic; @@ -339,9 +340,9 @@ pub(crate) fn run_lint<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId, body: &Body< // Suppose that all BIDs point into the same local, // we can remove the this local from the observed drops, // so that we can focus our diagnosis more on the others. - if candidates.iter().all(|&(_, place)| candidates[0].1.local == place.local) { + if let Ok(local) = candidates.iter().map(|&(_, place)| place.local).all_equal_value() { for path_idx in all_locals_dropped.iter() { - if move_data.move_paths[path_idx].place.local == candidates[0].1.local { + if move_data.move_paths[path_idx].place.local == local { to_exclude.insert(path_idx); } } diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs index 462ddfa3dd3..9ea2eb4f25d 100644 --- a/compiler/rustc_mir_transform/src/promote_consts.rs +++ b/compiler/rustc_mir_transform/src/promote_consts.rs @@ -875,7 +875,8 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { let mut promoted_operand = |ty, span| { promoted.span = span; promoted.local_decls[RETURN_PLACE] = LocalDecl::new(ty, span); - let args = tcx.erase_regions(GenericArgs::identity_for_item(tcx, def)); + let args = + tcx.erase_and_anonymize_regions(GenericArgs::identity_for_item(tcx, def)); let uneval = mir::UnevaluatedConst { def, args, promoted: Some(next_promoted_index) }; diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs index 468ef742dfb..75917d23883 100644 --- a/compiler/rustc_mir_transform/src/simplify.rs +++ b/compiler/rustc_mir_transform/src/simplify.rs @@ -34,6 +34,7 @@ //! The normal logic that a program with UB can be changed to do anything does not apply to //! pre-"runtime" MIR! +use itertools::Itertools as _; use rustc_index::{Idx, IndexSlice, IndexVec}; use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor}; use rustc_middle::mir::*; @@ -288,20 +289,13 @@ impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> { return false; }; - let first_succ = { - if let Some(first_succ) = terminator.successors().next() { - if terminator.successors().all(|s| s == first_succ) { - let count = terminator.successors().count(); - self.pred_count[first_succ] -= (count - 1) as u32; - first_succ - } else { - return false; - } - } else { - return false; - } + let Ok(first_succ) = terminator.successors().all_equal_value() else { + return false; }; + let count = terminator.successors().count(); + self.pred_count[first_succ] -= (count - 1) as u32; + debug!("simplifying branch {:?}", terminator); terminator.kind = TerminatorKind::Goto { target: first_succ }; true diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs index cd9a7f4a39d..73c249a3c8c 100644 --- a/compiler/rustc_mir_transform/src/ssa.rs +++ b/compiler/rustc_mir_transform/src/ssa.rs @@ -225,6 +225,9 @@ impl SsaVisitor<'_, '_> { impl<'tcx> Visitor<'tcx> for SsaVisitor<'_, 'tcx> { fn visit_local(&mut self, local: Local, ctxt: PlaceContext, loc: Location) { + if ctxt.may_observe_address() { + self.borrowed_locals.insert(local); + } match ctxt { PlaceContext::MutatingUse(MutatingUseContext::Projection) | PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection) => bug!(), @@ -237,7 +240,6 @@ impl<'tcx> Visitor<'tcx> for SsaVisitor<'_, 'tcx> { PlaceContext::NonMutatingUse( NonMutatingUseContext::SharedBorrow | NonMutatingUseContext::FakeBorrow, ) => { - self.borrowed_locals.insert(local); self.check_dominates(local, loc); self.direct_uses[local] += 1; } diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs index cffeb6f9807..378d71cb1fc 100644 --- a/compiler/rustc_monomorphize/src/collector.rs +++ b/compiler/rustc_monomorphize/src/collector.rs @@ -217,6 +217,7 @@ use rustc_hir::attrs::InlineAttr; use rustc_hir::def::DefKind; use rustc_hir::def_id::{DefId, DefIdMap, LocalDefId}; use rustc_hir::lang_items::LangItem; +use rustc_hir::limit::Limit; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::interpret::{AllocId, ErrorHandled, GlobalAlloc, Scalar}; use rustc_middle::mir::mono::{CollectionMode, InstantiationMode, MonoItem}; @@ -231,7 +232,6 @@ use rustc_middle::ty::{ }; use rustc_middle::util::Providers; use rustc_middle::{bug, span_bug}; -use rustc_session::Limit; use rustc_session::config::{DebugInfo, EntryFnType}; use rustc_span::source_map::{Spanned, dummy_spanned, respan}; use rustc_span::{DUMMY_SP, Span}; @@ -1558,7 +1558,7 @@ impl<'v> RootCollector<'_, 'v> { ty::Closure(def_id, args) | ty::Coroutine(def_id, args) | ty::CoroutineClosure(def_id, args) => { - Instance::new_raw(def_id, self.tcx.erase_regions(args)) + Instance::new_raw(def_id, self.tcx.erase_and_anonymize_regions(args)) } _ => unreachable!(), }; diff --git a/compiler/rustc_monomorphize/src/mono_checks/move_check.rs b/compiler/rustc_monomorphize/src/mono_checks/move_check.rs index 7251ef478c6..0adf1b089b5 100644 --- a/compiler/rustc_monomorphize/src/mono_checks/move_check.rs +++ b/compiler/rustc_monomorphize/src/mono_checks/move_check.rs @@ -1,10 +1,10 @@ use rustc_abi::Size; use rustc_data_structures::fx::FxIndexSet; use rustc_hir::def_id::DefId; +use rustc_hir::limit::Limit; use rustc_middle::mir::visit::Visitor as MirVisitor; use rustc_middle::mir::{self, Location, traversal}; use rustc_middle::ty::{self, AssocTag, Instance, Ty, TyCtxt, TypeFoldable}; -use rustc_session::Limit; use rustc_session::lint::builtin::LARGE_ASSIGNMENTS; use rustc_span::source_map::Spanned; use rustc_span::{Ident, Span, sym}; diff --git a/compiler/rustc_next_trait_solver/src/canonicalizer.rs b/compiler/rustc_next_trait_solver/src/canonicalizer.rs index da05c49756f..a8f2b4e8db6 100644 --- a/compiler/rustc_next_trait_solver/src/canonicalizer.rs +++ b/compiler/rustc_next_trait_solver/src/canonicalizer.rs @@ -2,9 +2,8 @@ use rustc_type_ir::data_structures::{HashMap, ensure_sufficient_stack}; use rustc_type_ir::inherent::*; use rustc_type_ir::solve::{Goal, QueryInput}; use rustc_type_ir::{ - self as ty, Canonical, CanonicalParamEnvCacheEntry, CanonicalTyVarKind, CanonicalVarKind, - Flags, InferCtxtLike, Interner, TypeFlags, TypeFoldable, TypeFolder, TypeSuperFoldable, - TypeVisitableExt, + self as ty, Canonical, CanonicalParamEnvCacheEntry, CanonicalVarKind, Flags, InferCtxtLike, + Interner, TypeFlags, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitableExt, }; use crate::delegate::SolverDelegate; @@ -68,6 +67,13 @@ pub struct Canonicalizer<'a, D: SolverDelegate<Interner = I>, I: Interner> { variables: &'a mut Vec<I::GenericArg>, var_kinds: Vec<CanonicalVarKind<I>>, variable_lookup_table: HashMap<I::GenericArg, usize>, + /// Maps each `sub_unification_table_root_var` to the index of the first + /// variable which used it. + /// + /// This means in case two type variables have the same sub relations root, + /// we set the `sub_root` of the second variable to the position of the first. + /// Otherwise the `sub_root` of each type variable is just its own position. + sub_root_lookup_table: HashMap<ty::TyVid, usize>, binder_index: ty::DebruijnIndex, /// We only use the debruijn index during lookup. We don't need to @@ -89,6 +95,7 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> { variables, variable_lookup_table: Default::default(), + sub_root_lookup_table: Default::default(), var_kinds: Vec::new(), binder_index: ty::INNERMOST, @@ -133,6 +140,7 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> { variables: &mut variables, variable_lookup_table: Default::default(), + sub_root_lookup_table: Default::default(), var_kinds: Vec::new(), binder_index: ty::INNERMOST, @@ -140,6 +148,7 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> { }; let param_env = param_env.fold_with(&mut env_canonicalizer); debug_assert_eq!(env_canonicalizer.binder_index, ty::INNERMOST); + debug_assert!(env_canonicalizer.sub_root_lookup_table.is_empty()); CanonicalParamEnvCacheEntry { param_env, variable_lookup_table: env_canonicalizer.variable_lookup_table, @@ -165,6 +174,7 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> { variables, variable_lookup_table: Default::default(), + sub_root_lookup_table: Default::default(), var_kinds: Vec::new(), binder_index: ty::INNERMOST, @@ -172,6 +182,7 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> { }; let param_env = param_env.fold_with(&mut env_canonicalizer); debug_assert_eq!(env_canonicalizer.binder_index, ty::INNERMOST); + debug_assert!(env_canonicalizer.sub_root_lookup_table.is_empty()); (param_env, env_canonicalizer.variable_lookup_table, env_canonicalizer.var_kinds) } } @@ -200,6 +211,7 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> { variables, variable_lookup_table, + sub_root_lookup_table: Default::default(), var_kinds, binder_index: ty::INNERMOST, @@ -266,6 +278,13 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> { ty::BoundVar::from(idx) } + fn get_or_insert_sub_root(&mut self, vid: ty::TyVid) -> ty::BoundVar { + let root_vid = self.delegate.sub_unification_table_root_var(vid); + let idx = + *self.sub_root_lookup_table.entry(root_vid).or_insert_with(|| self.variables.len()); + ty::BoundVar::from(idx) + } + fn finalize(self) -> (ty::UniverseIndex, I::CanonicalVarKinds) { let mut var_kinds = self.var_kinds; // See the rustc-dev-guide section about how we deal with universes @@ -313,18 +332,15 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> { "ty vid should have been resolved fully before canonicalization" ); - match self.canonicalize_mode { - CanonicalizeMode::Input { .. } => CanonicalVarKind::Ty( - CanonicalTyVarKind::General(ty::UniverseIndex::ROOT), - ), - CanonicalizeMode::Response { .. } => { - CanonicalVarKind::Ty(CanonicalTyVarKind::General( - self.delegate.universe_of_ty(vid).unwrap_or_else(|| { - panic!("ty var should have been resolved: {t:?}") - }), - )) - } - } + let sub_root = self.get_or_insert_sub_root(vid); + let ui = match self.canonicalize_mode { + CanonicalizeMode::Input { .. } => ty::UniverseIndex::ROOT, + CanonicalizeMode::Response { .. } => self + .delegate + .universe_of_ty(vid) + .unwrap_or_else(|| panic!("ty var should have been resolved: {t:?}")), + }; + CanonicalVarKind::Ty { ui, sub_root } } ty::IntVar(vid) => { debug_assert_eq!( @@ -332,7 +348,7 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> { t, "ty vid should have been resolved fully before canonicalization" ); - CanonicalVarKind::Ty(CanonicalTyVarKind::Int) + CanonicalVarKind::Int } ty::FloatVar(vid) => { debug_assert_eq!( @@ -340,7 +356,7 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> { t, "ty vid should have been resolved fully before canonicalization" ); - CanonicalVarKind::Ty(CanonicalTyVarKind::Float) + CanonicalVarKind::Float } ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_) => { panic!("fresh vars not expected in canonicalization") diff --git a/compiler/rustc_next_trait_solver/src/coherence.rs b/compiler/rustc_next_trait_solver/src/coherence.rs index 79219b34e29..b949deb1192 100644 --- a/compiler/rustc_next_trait_solver/src/coherence.rs +++ b/compiler/rustc_next_trait_solver/src/coherence.rs @@ -435,7 +435,21 @@ where } } ty::Error(_) => ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(ty)), - ty::Closure(did, ..) | ty::CoroutineClosure(did, ..) | ty::Coroutine(did, ..) => { + ty::Closure(did, ..) => { + if self.def_id_is_local(did) { + ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(ty)) + } else { + self.found_non_local_ty(ty) + } + } + ty::CoroutineClosure(did, ..) => { + if self.def_id_is_local(did) { + ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(ty)) + } else { + self.found_non_local_ty(ty) + } + } + ty::Coroutine(did, ..) => { if self.def_id_is_local(did) { ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(ty)) } else { diff --git a/compiler/rustc_next_trait_solver/src/delegate.rs b/compiler/rustc_next_trait_solver/src/delegate.rs index 7b932010d49..41c2e5f60ec 100644 --- a/compiler/rustc_next_trait_solver/src/delegate.rs +++ b/compiler/rustc_next_trait_solver/src/delegate.rs @@ -57,12 +57,14 @@ pub trait SolverDelegate: Deref<Target = Self::Infcx> + Sized { where V: TypeFoldable<Self::Interner>; - fn instantiate_canonical_var_with_infer( + fn instantiate_canonical_var( &self, kind: ty::CanonicalVarKind<Self::Interner>, span: <Self::Interner as Interner>::Span, + var_values: &[<Self::Interner as Interner>::GenericArg], universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex, ) -> <Self::Interner as Interner>::GenericArg; + fn add_item_bounds_for_hidden_type( &self, def_id: <Self::Interner as Interner>::DefId, @@ -76,7 +78,7 @@ pub trait SolverDelegate: Deref<Target = Self::Infcx> + Sized { &self, goal_trait_ref: ty::TraitRef<Self::Interner>, trait_assoc_def_id: <Self::Interner as Interner>::DefId, - impl_def_id: <Self::Interner as Interner>::DefId, + impl_def_id: <Self::Interner as Interner>::ImplId, ) -> Result< Option<<Self::Interner as Interner>::DefId>, <Self::Interner as Interner>::ErrorGuaranteed, diff --git a/compiler/rustc_next_trait_solver/src/solve/assembly/mod.rs b/compiler/rustc_next_trait_solver/src/solve/assembly/mod.rs index be7e4dd4cda..fb777496e31 100644 --- a/compiler/rustc_next_trait_solver/src/solve/assembly/mod.rs +++ b/compiler/rustc_next_trait_solver/src/solve/assembly/mod.rs @@ -11,8 +11,9 @@ use rustc_type_ir::lang_items::SolverTraitLangItem; use rustc_type_ir::search_graph::CandidateHeadUsages; use rustc_type_ir::solve::SizedTraitKind; use rustc_type_ir::{ - self as ty, Interner, TypeFlags, TypeFoldable, TypeSuperVisitable, TypeVisitable, - TypeVisitableExt as _, TypeVisitor, TypingMode, Upcast as _, elaborate, + self as ty, Interner, TypeFlags, TypeFoldable, TypeFolder, TypeSuperFoldable, + TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor, TypingMode, Upcast, + elaborate, }; use tracing::{debug, instrument}; @@ -186,7 +187,8 @@ where fn consider_impl_candidate( ecx: &mut EvalCtxt<'_, D>, goal: Goal<I, Self>, - impl_def_id: I::DefId, + impl_def_id: I::ImplId, + then: impl FnOnce(&mut EvalCtxt<'_, D>, Certainty) -> QueryResult<I>, ) -> Result<Candidate<I>, NoSolution>; /// If the predicate contained an error, we want to avoid emitting unnecessary trait @@ -365,6 +367,15 @@ pub(super) enum AssembleCandidatesFrom { EnvAndBounds, } +impl AssembleCandidatesFrom { + fn should_assemble_impl_candidates(&self) -> bool { + match self { + AssembleCandidatesFrom::All => true, + AssembleCandidatesFrom::EnvAndBounds => false, + } + } +} + /// This is currently used to track the [CandidateHeadUsages] of all failed `ParamEnv` /// candidates. This is then used to ignore their head usages in case there's another /// always applicable `ParamEnv` candidate. Look at how `param_env_head_usages` is @@ -397,14 +408,15 @@ where return (candidates, failed_candidate_info); }; + let goal: Goal<I, G> = goal + .with(self.cx(), goal.predicate.with_replaced_self_ty(self.cx(), normalized_self_ty)); + if normalized_self_ty.is_ty_var() { debug!("self type has been normalized to infer"); - candidates.extend(self.forced_ambiguity(MaybeCause::Ambiguity)); + self.try_assemble_bounds_via_registered_opaques(goal, assemble_from, &mut candidates); return (candidates, failed_candidate_info); } - let goal: Goal<I, G> = goal - .with(self.cx(), goal.predicate.with_replaced_self_ty(self.cx(), normalized_self_ty)); // Vars that show up in the rest of the goal substs may have been constrained by // normalizing the self type as well, since type variables are not uniquified. let goal = self.resolve_vars_if_possible(goal); @@ -484,8 +496,9 @@ where if cx.impl_is_default(impl_def_id) { return; } - - match G::consider_impl_candidate(self, goal, impl_def_id) { + match G::consider_impl_candidate(self, goal, impl_def_id, |ecx, certainty| { + ecx.evaluate_added_goals_and_make_canonical_response(certainty) + }) { Ok(candidate) => candidates.push(candidate), Err(NoSolution) => (), } @@ -943,6 +956,116 @@ where } } + /// If the self type is the hidden type of an opaque, try to assemble + /// candidates for it by consider its item bounds and by using blanket + /// impls. This is used to incompletely guide type inference when handling + /// non-defining uses in the defining scope. + /// + /// We otherwise just fail fail with ambiguity. Even if we're using an + /// opaque type item bound or a blank impls, we still force its certainty + /// to be `Maybe` so that we properly prove this goal later. + /// + /// See <https://github.com/rust-lang/trait-system-refactor-initiative/issues/182> + /// for why this is necessary. + fn try_assemble_bounds_via_registered_opaques<G: GoalKind<D>>( + &mut self, + goal: Goal<I, G>, + assemble_from: AssembleCandidatesFrom, + candidates: &mut Vec<Candidate<I>>, + ) { + let self_ty = goal.predicate.self_ty(); + // If the self type is sub unified with any opaque type, we + // also look at blanket impls for it. + let mut assemble_blanket_impls = false; + for alias_ty in self.opaques_with_sub_unified_hidden_type(self_ty) { + assemble_blanket_impls = true; + debug!("self ty is sub unified with {alias_ty:?}"); + + struct ReplaceOpaque<I: Interner> { + cx: I, + alias_ty: ty::AliasTy<I>, + self_ty: I::Ty, + } + impl<I: Interner> TypeFolder<I> for ReplaceOpaque<I> { + fn cx(&self) -> I { + self.cx + } + fn fold_ty(&mut self, ty: I::Ty) -> I::Ty { + if let ty::Alias(ty::Opaque, alias_ty) = ty.kind() { + if alias_ty == self.alias_ty { + return self.self_ty; + } + } + ty.super_fold_with(self) + } + } + + // We look at all item-bounds of the opaque, replacing the + // opaque with the current self type before considering + // them as a candidate. Imagine e've got `?x: Trait<?y>` + // and `?x` has been sub-unified with the hidden type of + // `impl Trait<u32>`, We take the item bound `opaque: Trait<u32>` + // and replace all occurrences of `opaque` with `?x`. This results + // in a `?x: Trait<u32>` alias-bound candidate. + for item_bound in self + .cx() + .item_self_bounds(alias_ty.def_id) + .iter_instantiated(self.cx(), alias_ty.args) + { + let assumption = + item_bound.fold_with(&mut ReplaceOpaque { cx: self.cx(), alias_ty, self_ty }); + candidates.extend(G::probe_and_match_goal_against_assumption( + self, + CandidateSource::AliasBound, + goal, + assumption, + |ecx| { + // We want to reprove this goal once we've inferred the + // hidden type, so we force the certainty to `Maybe`. + ecx.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS) + }, + )); + } + } + + // We also need to consider blanket impls for not-yet-defined opaque types. + // + // See tests/ui/impl-trait/non-defining-uses/use-blanket-impl.rs for an example. + if assemble_blanket_impls && assemble_from.should_assemble_impl_candidates() { + let cx = self.cx(); + cx.for_each_blanket_impl(goal.predicate.trait_def_id(cx), |impl_def_id| { + // For every `default impl`, there's always a non-default `impl` + // that will *also* apply. There's no reason to register a candidate + // for this impl, since it is *not* proof that the trait goal holds. + if cx.impl_is_default(impl_def_id) { + return; + } + + match G::consider_impl_candidate(self, goal, impl_def_id, |ecx, certainty| { + if ecx.shallow_resolve(self_ty).is_ty_var() { + // We force the certainty of impl candidates to be `Maybe`. + let certainty = certainty.and(Certainty::AMBIGUOUS); + ecx.evaluate_added_goals_and_make_canonical_response(certainty) + } else { + // We don't want to use impls if they constrain the opaque. + // + // FIXME(trait-system-refactor-initiative#229): This isn't + // perfect yet as it still allows us to incorrectly constrain + // other inference variables. + Err(NoSolution) + } + }) { + Ok(candidate) => candidates.push(candidate), + Err(NoSolution) => (), + } + }); + } + + if candidates.is_empty() { + candidates.extend(self.forced_ambiguity(MaybeCause::Ambiguity)); + } + } + /// Assemble and merge candidates for goals which are related to an underlying trait /// goal. Right now, this is normalizes-to and host effect goals. /// diff --git a/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs b/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs index 7c5940828da..f6eab286ba7 100644 --- a/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs +++ b/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs @@ -605,7 +605,7 @@ fn coroutine_closure_to_certain_coroutine<I: Interner>( cx: I, goal_kind: ty::ClosureKind, goal_region: I::Region, - def_id: I::DefId, + def_id: I::CoroutineClosureId, args: ty::CoroutineClosureArgs<I>, sig: ty::CoroutineClosureSignature<I>, ) -> I::Ty { @@ -629,7 +629,7 @@ fn coroutine_closure_to_ambiguous_coroutine<I: Interner>( cx: I, goal_kind: ty::ClosureKind, goal_region: I::Region, - def_id: I::DefId, + def_id: I::CoroutineClosureId, args: ty::CoroutineClosureArgs<I>, sig: ty::CoroutineClosureSignature<I>, ) -> I::Ty { @@ -664,7 +664,7 @@ fn coroutine_closure_to_ambiguous_coroutine<I: Interner>( pub(in crate::solve) fn extract_fn_def_from_const_callable<I: Interner>( cx: I, self_ty: I::Ty, -) -> Result<(ty::Binder<I, (I::FnInputTys, I::Ty)>, I::DefId, I::GenericArgs), NoSolution> { +) -> Result<(ty::Binder<I, (I::FnInputTys, I::Ty)>, I::FunctionId, I::GenericArgs), NoSolution> { match self_ty.kind() { ty::FnDef(def_id, args) => { let sig = cx.fn_sig(def_id); diff --git a/compiler/rustc_next_trait_solver/src/solve/effect_goals.rs b/compiler/rustc_next_trait_solver/src/solve/effect_goals.rs index 229345065b1..cb72c1cd92b 100644 --- a/compiler/rustc_next_trait_solver/src/solve/effect_goals.rs +++ b/compiler/rustc_next_trait_solver/src/solve/effect_goals.rs @@ -123,7 +123,8 @@ where fn consider_impl_candidate( ecx: &mut EvalCtxt<'_, D>, goal: Goal<I, Self>, - impl_def_id: I::DefId, + impl_def_id: I::ImplId, + then: impl FnOnce(&mut EvalCtxt<'_, D>, Certainty) -> QueryResult<I>, ) -> Result<Candidate<I>, NoSolution> { let cx = ecx.cx(); @@ -152,20 +153,20 @@ where } ecx.probe_trait_candidate(CandidateSource::Impl(impl_def_id)).enter(|ecx| { - let impl_args = ecx.fresh_args_for_item(impl_def_id); + let impl_args = ecx.fresh_args_for_item(impl_def_id.into()); ecx.record_impl_args(impl_args); let impl_trait_ref = impl_trait_ref.instantiate(cx, impl_args); ecx.eq(goal.param_env, goal.predicate.trait_ref, impl_trait_ref)?; let where_clause_bounds = cx - .predicates_of(impl_def_id) + .predicates_of(impl_def_id.into()) .iter_instantiated(cx, impl_args) .map(|pred| goal.with(cx, pred)); ecx.add_goals(GoalSource::ImplWhereBound, where_clause_bounds); // For this impl to be `const`, we need to check its `[const]` bounds too. let const_conditions = cx - .const_conditions(impl_def_id) + .const_conditions(impl_def_id.into()) .iter_instantiated(cx, impl_args) .map(|bound_trait_ref| { goal.with( @@ -175,7 +176,7 @@ where }); ecx.add_goals(GoalSource::ImplWhereBound, const_conditions); - ecx.evaluate_added_goals_and_make_canonical_response(certainty) + then(ecx, certainty) }) } @@ -240,7 +241,7 @@ where ty::TraitRef::new(cx, cx.require_trait_lang_item(SolverTraitLangItem::Sized), [output]) }); let requirements = cx - .const_conditions(def_id) + .const_conditions(def_id.into()) .iter_instantiated(cx, args) .map(|trait_ref| { ( diff --git a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/canonical.rs b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/canonical.rs index 6f9f4067384..169832ca5fb 100644 --- a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/canonical.rs +++ b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/canonical.rs @@ -16,7 +16,8 @@ use rustc_type_ir::data_structures::HashSet; use rustc_type_ir::inherent::*; use rustc_type_ir::relate::solver_relating::RelateExt; use rustc_type_ir::{ - self as ty, Canonical, CanonicalVarValues, InferCtxtLike, Interner, TypeFoldable, + self as ty, Canonical, CanonicalVarKind, CanonicalVarValues, InferCtxtLike, Interner, + TypeFoldable, }; use tracing::{debug, instrument, trace}; @@ -336,7 +337,16 @@ where { match result_value.kind() { ty::GenericArgKind::Type(t) => { - if let ty::Bound(debruijn, b) = t.kind() { + // We disable the instantiation guess for inference variables + // and only use it for placeholders. We need to handle the + // `sub_root` of type inference variables which would make this + // more involved. They are also a lot rarer than region variables. + if let ty::Bound(debruijn, b) = t.kind() + && !matches!( + response.variables.get(b.var().as_usize()).unwrap(), + CanonicalVarKind::Ty { .. } + ) + { assert_eq!(debruijn, ty::INNERMOST); opt_values[b.var()] = Some(*original_value); } @@ -355,39 +365,33 @@ where } } } - - let var_values = delegate.cx().mk_args_from_iter( - response.variables.iter().enumerate().map(|(index, var_kind)| { - if var_kind.universe() != ty::UniverseIndex::ROOT { - // A variable from inside a binder of the query. While ideally these shouldn't - // exist at all (see the FIXME at the start of this method), we have to deal with - // them for now. - delegate.instantiate_canonical_var_with_infer(var_kind, span, |idx| { - prev_universe + idx.index() - }) - } else if var_kind.is_existential() { - // As an optimization we sometimes avoid creating a new inference variable here. - // - // All new inference variables we create start out in the current universe of the caller. - // This is conceptually wrong as these inference variables would be able to name - // more placeholders then they should be able to. However the inference variables have - // to "come from somewhere", so by equating them with the original values of the caller - // later on, we pull them down into their correct universe again. - if let Some(v) = opt_values[ty::BoundVar::from_usize(index)] { - v - } else { - delegate - .instantiate_canonical_var_with_infer(var_kind, span, |_| prev_universe) - } + CanonicalVarValues::instantiate(delegate.cx(), response.variables, |var_values, kind| { + if kind.universe() != ty::UniverseIndex::ROOT { + // A variable from inside a binder of the query. While ideally these shouldn't + // exist at all (see the FIXME at the start of this method), we have to deal with + // them for now. + delegate.instantiate_canonical_var(kind, span, &var_values, |idx| { + prev_universe + idx.index() + }) + } else if kind.is_existential() { + // As an optimization we sometimes avoid creating a new inference variable here. + // + // All new inference variables we create start out in the current universe of the caller. + // This is conceptually wrong as these inference variables would be able to name + // more placeholders then they should be able to. However the inference variables have + // to "come from somewhere", so by equating them with the original values of the caller + // later on, we pull them down into their correct universe again. + if let Some(v) = opt_values[ty::BoundVar::from_usize(var_values.len())] { + v } else { - // For placeholders which were already part of the input, we simply map this - // universal bound variable back the placeholder of the input. - original_values[var_kind.expect_placeholder_index()] + delegate.instantiate_canonical_var(kind, span, &var_values, |_| prev_universe) } - }), - ); - - CanonicalVarValues { var_values } + } else { + // For placeholders which were already part of the input, we simply map this + // universal bound variable back the placeholder of the input. + original_values[kind.expect_placeholder_index()] + } + }) } /// Unify the `original_values` with the `var_values` returned by the canonical query.. diff --git a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs index 31106a74527..3e3a5246f3d 100644 --- a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs +++ b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs @@ -407,20 +407,21 @@ where // If we have run this goal before, and it was stalled, check that any of the goal's // args have changed. Otherwise, we don't need to re-run the goal because it'll remain // stalled, since it'll canonicalize the same way and evaluation is pure. - if let Some(stalled_on) = stalled_on - && !stalled_on.stalled_vars.iter().any(|value| self.delegate.is_changed_arg(*value)) - && !self - .delegate - .opaque_types_storage_num_entries() - .needs_reevaluation(stalled_on.num_opaques) + if let Some(GoalStalledOn { num_opaques, ref stalled_vars, ref sub_roots, stalled_cause }) = + stalled_on + && !stalled_vars.iter().any(|value| self.delegate.is_changed_arg(*value)) + && !sub_roots + .iter() + .any(|&vid| self.delegate.sub_unification_table_root_var(vid) != vid) + && !self.delegate.opaque_types_storage_num_entries().needs_reevaluation(num_opaques) { return Ok(( NestedNormalizationGoals::empty(), GoalEvaluation { goal, - certainty: Certainty::Maybe(stalled_on.stalled_cause), + certainty: Certainty::Maybe(stalled_cause), has_changed: HasChanged::No, - stalled_on: Some(stalled_on), + stalled_on, }, )); } @@ -476,16 +477,6 @@ where HasChanged::No => { let mut stalled_vars = orig_values; - // Remove the canonicalized universal vars, since we only care about stalled existentials. - stalled_vars.retain(|arg| match arg.kind() { - ty::GenericArgKind::Type(ty) => matches!(ty.kind(), ty::Infer(_)), - ty::GenericArgKind::Const(ct) => { - matches!(ct.kind(), ty::ConstKind::Infer(_)) - } - // Lifetimes can never stall goals. - ty::GenericArgKind::Lifetime(_) => false, - }); - // Remove the unconstrained RHS arg, which is expected to have changed. if let Some(normalizes_to) = goal.predicate.as_normalizes_to() { let normalizes_to = normalizes_to.skip_binder(); @@ -497,6 +488,27 @@ where stalled_vars.swap_remove(idx); } + // Remove the canonicalized universal vars, since we only care about stalled existentials. + let mut sub_roots = Vec::new(); + stalled_vars.retain(|arg| match arg.kind() { + // Lifetimes can never stall goals. + ty::GenericArgKind::Lifetime(_) => false, + ty::GenericArgKind::Type(ty) => match ty.kind() { + ty::Infer(ty::TyVar(vid)) => { + sub_roots.push(self.delegate.sub_unification_table_root_var(vid)); + true + } + ty::Infer(_) => true, + ty::Param(_) | ty::Placeholder(_) => false, + _ => unreachable!("unexpected orig_value: {ty:?}"), + }, + ty::GenericArgKind::Const(ct) => match ct.kind() { + ty::ConstKind::Infer(_) => true, + ty::ConstKind::Param(_) | ty::ConstKind::Placeholder(_) => false, + _ => unreachable!("unexpected orig_value: {ct:?}"), + }, + }); + Some(GoalStalledOn { num_opaques: canonical_goal .canonical @@ -505,6 +517,7 @@ where .opaque_types .len(), stalled_vars, + sub_roots, stalled_cause, }) } @@ -900,6 +913,10 @@ where && goal.param_env.visit_with(&mut visitor).is_continue() } + pub(super) fn sub_unify_ty_vids_raw(&self, a: ty::TyVid, b: ty::TyVid) { + self.delegate.sub_unify_ty_vids_raw(a, b) + } + #[instrument(level = "trace", skip(self, param_env), ret)] pub(super) fn eq<T: Relate<I>>( &mut self, @@ -1043,6 +1060,10 @@ where self.delegate.resolve_vars_if_possible(value) } + pub(super) fn shallow_resolve(&self, ty: I::Ty) -> I::Ty { + self.delegate.shallow_resolve(ty) + } + pub(super) fn eager_resolve_region(&self, r: I::Region) -> I::Region { if let ty::ReVar(vid) = r.kind() { self.delegate.opportunistic_resolve_lt_var(vid) @@ -1092,7 +1113,7 @@ where &self, goal_trait_ref: ty::TraitRef<I>, trait_assoc_def_id: I::DefId, - impl_def_id: I::DefId, + impl_def_id: I::ImplId, ) -> Result<Option<I::DefId>, I::ErrorGuaranteed> { self.delegate.fetch_eligible_assoc_item(goal_trait_ref, trait_assoc_def_id, impl_def_id) } @@ -1159,6 +1180,33 @@ where ) -> bool { may_use_unstable_feature(&**self.delegate, param_env, symbol) } + + pub(crate) fn opaques_with_sub_unified_hidden_type( + &self, + self_ty: I::Ty, + ) -> impl Iterator<Item = ty::AliasTy<I>> + use<'a, D, I> { + let delegate = self.delegate; + delegate + .clone_opaque_types_lookup_table() + .into_iter() + .chain(delegate.clone_duplicate_opaque_types()) + .filter_map(move |(key, hidden_ty)| { + if let ty::Infer(ty::TyVar(self_vid)) = self_ty.kind() { + if let ty::Infer(ty::TyVar(hidden_vid)) = hidden_ty.kind() { + if delegate.sub_unification_table_root_var(self_vid) + == delegate.sub_unification_table_root_var(hidden_vid) + { + return Some(ty::AliasTy::new_from_args( + delegate.cx(), + key.def_id.into(), + key.args, + )); + } + } + } + None + }) + } } /// Eagerly replace aliases with inference variables, emitting `AliasRelate` @@ -1275,7 +1323,7 @@ pub fn evaluate_root_goal_for_proof_tree_raw_provider< >( cx: I, canonical_goal: CanonicalInput<I>, -) -> (QueryResult<I>, I::ProbeRef) { +) -> (QueryResult<I>, I::Probe) { let mut inspect = inspect::ProofTreeBuilder::new(); let canonical_result = SearchGraph::<D>::evaluate_root_goal_for_proof_tree( cx, @@ -1284,7 +1332,7 @@ pub fn evaluate_root_goal_for_proof_tree_raw_provider< &mut inspect, ); let final_revision = inspect.unwrap(); - (canonical_result, cx.mk_probe_ref(final_revision)) + (canonical_result, cx.mk_probe(final_revision)) } /// Evaluate a goal to build a proof tree. diff --git a/compiler/rustc_next_trait_solver/src/solve/mod.rs b/compiler/rustc_next_trait_solver/src/solve/mod.rs index 85f9d852d95..cd27c9c26c1 100644 --- a/compiler/rustc_next_trait_solver/src/solve/mod.rs +++ b/compiler/rustc_next_trait_solver/src/solve/mod.rs @@ -24,7 +24,7 @@ mod trait_goals; use derive_where::derive_where; use rustc_type_ir::inherent::*; pub use rustc_type_ir::solve::*; -use rustc_type_ir::{self as ty, Interner, TypingMode}; +use rustc_type_ir::{self as ty, Interner, TyVid, TypingMode}; use tracing::instrument; pub use self::eval_ctxt::{ @@ -119,11 +119,15 @@ where #[instrument(level = "trace", skip(self))] fn compute_subtype_goal(&mut self, goal: Goal<I, ty::SubtypePredicate<I>>) -> QueryResult<I> { - if goal.predicate.a.is_ty_var() && goal.predicate.b.is_ty_var() { - self.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS) - } else { - self.sub(goal.param_env, goal.predicate.a, goal.predicate.b)?; - self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes) + match (goal.predicate.a.kind(), goal.predicate.b.kind()) { + (ty::Infer(ty::TyVar(a_vid)), ty::Infer(ty::TyVar(b_vid))) => { + self.sub_unify_ty_vids_raw(a_vid, b_vid); + self.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS) + } + _ => { + self.sub(goal.param_env, goal.predicate.a, goal.predicate.b)?; + self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes) + } } } @@ -422,6 +426,7 @@ pub struct GoalEvaluation<I: Interner> { pub struct GoalStalledOn<I: Interner> { pub num_opaques: usize, pub stalled_vars: Vec<I::GenericArg>, + pub sub_roots: Vec<TyVid>, /// The cause that will be returned on subsequent evaluations if this goal remains stalled. pub stalled_cause: MaybeCause, } diff --git a/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs b/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs index cfdf2007391..54b92ebac1d 100644 --- a/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs +++ b/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs @@ -5,7 +5,7 @@ mod opaque_types; use rustc_type_ir::fast_reject::DeepRejectCtxt; use rustc_type_ir::inherent::*; -use rustc_type_ir::lang_items::{SolverLangItem, SolverTraitLangItem}; +use rustc_type_ir::lang_items::{SolverAdtLangItem, SolverLangItem, SolverTraitLangItem}; use rustc_type_ir::solve::SizedTraitKind; use rustc_type_ir::{self as ty, Interner, NormalizesTo, PredicateKind, Upcast as _}; use tracing::instrument; @@ -193,7 +193,8 @@ where fn consider_impl_candidate( ecx: &mut EvalCtxt<'_, D>, goal: Goal<I, NormalizesTo<I>>, - impl_def_id: I::DefId, + impl_def_id: I::ImplId, + then: impl FnOnce(&mut EvalCtxt<'_, D>, Certainty) -> QueryResult<I>, ) -> Result<Candidate<I>, NoSolution> { let cx = ecx.cx(); @@ -217,13 +218,13 @@ where }; ecx.probe_trait_candidate(CandidateSource::Impl(impl_def_id)).enter(|ecx| { - let impl_args = ecx.fresh_args_for_item(impl_def_id); + let impl_args = ecx.fresh_args_for_item(impl_def_id.into()); let impl_trait_ref = impl_trait_ref.instantiate(cx, impl_args); ecx.eq(goal.param_env, goal_trait_ref, impl_trait_ref)?; let where_clause_bounds = cx - .predicates_of(impl_def_id) + .predicates_of(impl_def_id.into()) .iter_instantiated(cx, impl_args) .map(|pred| goal.with(cx, pred)); ecx.add_goals(GoalSource::ImplWhereBound, where_clause_bounds); @@ -314,8 +315,7 @@ where // nested goal for consistency. ty::TypingMode::Coherence => { ecx.add_goal(GoalSource::Misc, goal.with(cx, PredicateKind::Ambiguous)); - return ecx - .evaluate_added_goals_and_make_canonical_response(Certainty::Yes); + return then(ecx, Certainty::Yes); } ty::TypingMode::Analysis { .. } | ty::TypingMode::Borrowck { .. } @@ -325,8 +325,7 @@ where goal, goal.predicate.alias, ); - return ecx - .evaluate_added_goals_and_make_canonical_response(Certainty::Yes); + return then(ecx, Certainty::Yes); } } } else { @@ -824,10 +823,10 @@ where // coroutine yield ty `Poll<Option<I>>`. let wrapped_expected_ty = Ty::new_adt( cx, - cx.adt_def(cx.require_lang_item(SolverLangItem::Poll)), + cx.adt_def(cx.require_adt_lang_item(SolverAdtLangItem::Poll)), cx.mk_args(&[Ty::new_adt( cx, - cx.adt_def(cx.require_lang_item(SolverLangItem::Option)), + cx.adt_def(cx.require_adt_lang_item(SolverAdtLangItem::Option)), cx.mk_args(&[expected_ty.into()]), ) .into()]), @@ -979,7 +978,7 @@ where fn translate_args( &mut self, goal: Goal<I, ty::NormalizesTo<I>>, - impl_def_id: I::DefId, + impl_def_id: I::ImplId, impl_args: I::GenericArgs, impl_trait_ref: rustc_type_ir::TraitRef<I>, target_container_def_id: I::DefId, @@ -988,14 +987,15 @@ where Ok(if target_container_def_id == impl_trait_ref.def_id.into() { // Default value from the trait definition. No need to rebase. goal.predicate.alias.args - } else if target_container_def_id == impl_def_id { + } else if target_container_def_id == impl_def_id.into() { // Same impl, no need to fully translate, just a rebase from // the trait is sufficient. goal.predicate.alias.args.rebase_onto(cx, impl_trait_ref.def_id.into(), impl_args) } else { let target_args = self.fresh_args_for_item(target_container_def_id); - let target_trait_ref = - cx.impl_trait_ref(target_container_def_id).instantiate(cx, target_args); + let target_trait_ref = cx + .impl_trait_ref(target_container_def_id.try_into().unwrap()) + .instantiate(cx, target_args); // Relate source impl to target impl by equating trait refs. self.eq(goal.param_env, impl_trait_ref, target_trait_ref)?; // Also add predicates since they may be needed to constrain the diff --git a/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs b/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs index cdcfebf2909..a69e867289c 100644 --- a/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs +++ b/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs @@ -54,7 +54,8 @@ where fn consider_impl_candidate( ecx: &mut EvalCtxt<'_, D>, goal: Goal<I, TraitPredicate<I>>, - impl_def_id: I::DefId, + impl_def_id: I::ImplId, + then: impl FnOnce(&mut EvalCtxt<'_, D>, Certainty) -> QueryResult<I>, ) -> Result<Candidate<I>, NoSolution> { let cx = ecx.cx(); @@ -91,13 +92,13 @@ where }; ecx.probe_trait_candidate(CandidateSource::Impl(impl_def_id)).enter(|ecx| { - let impl_args = ecx.fresh_args_for_item(impl_def_id); + let impl_args = ecx.fresh_args_for_item(impl_def_id.into()); ecx.record_impl_args(impl_args); let impl_trait_ref = impl_trait_ref.instantiate(cx, impl_args); ecx.eq(goal.param_env, goal.predicate.trait_ref, impl_trait_ref)?; let where_clause_bounds = cx - .predicates_of(impl_def_id) + .predicates_of(impl_def_id.into()) .iter_instantiated(cx, impl_args) .map(|pred| goal.with(cx, pred)); ecx.add_goals(GoalSource::ImplWhereBound, where_clause_bounds); @@ -112,7 +113,7 @@ where .map(|pred| goal.with(cx, pred)), ); - ecx.evaluate_added_goals_and_make_canonical_response(maximal_certainty) + then(ecx, maximal_certainty) }) } diff --git a/compiler/rustc_parse/messages.ftl b/compiler/rustc_parse/messages.ftl index 77dd313d9b8..72cd75f6d89 100644 --- a/compiler/rustc_parse/messages.ftl +++ b/compiler/rustc_parse/messages.ftl @@ -479,9 +479,6 @@ parse_invalid_identifier_with_leading_number = identifiers cannot start with a n parse_invalid_literal_suffix_on_tuple_index = suffixes on a tuple index are invalid .label = invalid suffix `{$suffix}` - .tuple_exception_line_1 = `{$suffix}` is *temporarily* accepted on tuple index fields as it was incorrectly accepted on stable for a few releases - .tuple_exception_line_2 = on proc macros, you'll want to use `syn::Index::from` or `proc_macro::Literal::*_unsuffixed` for code that will desugar to tuple field access - .tuple_exception_line_3 = see issue #60210 <https://github.com/rust-lang/rust/issues/60210> for more information parse_invalid_logical_operator = `{$incorrect}` is not a logical operator .note = unlike in e.g., Python and PHP, `&&` and `||` are used for logical operators diff --git a/compiler/rustc_parse/src/errors.rs b/compiler/rustc_parse/src/errors.rs index 797d4830c2f..00ca5acd84d 100644 --- a/compiler/rustc_parse/src/errors.rs +++ b/compiler/rustc_parse/src/errors.rs @@ -1017,10 +1017,6 @@ pub(crate) struct InvalidLiteralSuffixOnTupleIndex { #[label] pub span: Span, pub suffix: Symbol, - #[help(parse_tuple_exception_line_1)] - #[help(parse_tuple_exception_line_2)] - #[help(parse_tuple_exception_line_3)] - pub exception: bool, } #[derive(Diagnostic)] diff --git a/compiler/rustc_parse/src/lexer/mod.rs b/compiler/rustc_parse/src/lexer/mod.rs index e3bd6a9a327..51019db7c00 100644 --- a/compiler/rustc_parse/src/lexer/mod.rs +++ b/compiler/rustc_parse/src/lexer/mod.rs @@ -44,19 +44,44 @@ pub(crate) struct UnmatchedDelim { pub candidate_span: Option<Span>, } +/// Which tokens should be stripped before lexing the tokens. +pub enum StripTokens { + /// Strip both shebang and frontmatter. + ShebangAndFrontmatter, + /// Strip the shebang but not frontmatter. + /// + /// That means that char sequences looking like frontmatter are simply + /// interpreted as regular Rust lexemes. + Shebang, + /// Strip nothing. + /// + /// In other words, char sequences looking like a shebang or frontmatter + /// are simply interpreted as regular Rust lexemes. + Nothing, +} + pub(crate) fn lex_token_trees<'psess, 'src>( psess: &'psess ParseSess, mut src: &'src str, mut start_pos: BytePos, override_span: Option<Span>, - frontmatter_allowed: FrontmatterAllowed, + strip_tokens: StripTokens, ) -> Result<TokenStream, Vec<Diag<'psess>>> { - // Skip `#!`, if present. - if let Some(shebang_len) = rustc_lexer::strip_shebang(src) { - src = &src[shebang_len..]; - start_pos = start_pos + BytePos::from_usize(shebang_len); + match strip_tokens { + StripTokens::Shebang | StripTokens::ShebangAndFrontmatter => { + if let Some(shebang_len) = rustc_lexer::strip_shebang(src) { + src = &src[shebang_len..]; + start_pos = start_pos + BytePos::from_usize(shebang_len); + } + } + StripTokens::Nothing => {} } + let frontmatter_allowed = match strip_tokens { + StripTokens::ShebangAndFrontmatter => FrontmatterAllowed::Yes, + StripTokens::Shebang | StripTokens::Nothing => FrontmatterAllowed::No, + }; + let cursor = Cursor::new(src, frontmatter_allowed); let mut lexer = Lexer { psess, diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs index b790966acfd..88b67d792de 100644 --- a/compiler/rustc_parse/src/lib.rs +++ b/compiler/rustc_parse/src/lib.rs @@ -21,7 +21,6 @@ use rustc_ast::tokenstream::{DelimSpan, TokenStream}; use rustc_ast::{AttrItem, Attribute, MetaItemInner, token}; use rustc_ast_pretty::pprust; use rustc_errors::{Diag, EmissionGuarantee, FatalError, PResult, pluralize}; -use rustc_lexer::FrontmatterAllowed; use rustc_session::parse::ParseSess; use rustc_span::source_map::SourceMap; use rustc_span::{FileName, SourceFile, Span}; @@ -34,6 +33,8 @@ pub mod parser; use parser::Parser; use rustc_ast::token::Delimiter; +use crate::lexer::StripTokens; + pub mod lexer; mod errors; @@ -53,29 +54,18 @@ pub fn unwrap_or_emit_fatal<T>(expr: Result<T, Vec<Diag<'_>>>) -> T { } } -/// Creates a new parser from a source string. On failure, the errors must be consumed via -/// `unwrap_or_emit_fatal`, `emit`, `cancel`, etc., otherwise a panic will occur when they are -/// dropped. -pub fn new_parser_from_source_str( - psess: &ParseSess, - name: FileName, - source: String, -) -> Result<Parser<'_>, Vec<Diag<'_>>> { - let source_file = psess.source_map().new_source_file(name, source); - new_parser_from_source_file(psess, source_file, FrontmatterAllowed::Yes) -} - -/// Creates a new parser from a simple (no frontmatter) source string. +/// Creates a new parser from a source string. /// /// On failure, the errors must be consumed via `unwrap_or_emit_fatal`, `emit`, `cancel`, /// etc., otherwise a panic will occur when they are dropped. -pub fn new_parser_from_simple_source_str( +pub fn new_parser_from_source_str( psess: &ParseSess, name: FileName, source: String, + strip_tokens: StripTokens, ) -> Result<Parser<'_>, Vec<Diag<'_>>> { let source_file = psess.source_map().new_source_file(name, source); - new_parser_from_source_file(psess, source_file, FrontmatterAllowed::No) + new_parser_from_source_file(psess, source_file, strip_tokens) } /// Creates a new parser from a filename. On failure, the errors must be consumed via @@ -86,6 +76,7 @@ pub fn new_parser_from_simple_source_str( pub fn new_parser_from_file<'a>( psess: &'a ParseSess, path: &Path, + strip_tokens: StripTokens, sp: Option<Span>, ) -> Result<Parser<'a>, Vec<Diag<'a>>> { let sm = psess.source_map(); @@ -109,7 +100,7 @@ pub fn new_parser_from_file<'a>( } err.emit(); }); - new_parser_from_source_file(psess, source_file, FrontmatterAllowed::Yes) + new_parser_from_source_file(psess, source_file, strip_tokens) } pub fn utf8_error<E: EmissionGuarantee>( @@ -160,10 +151,10 @@ pub fn utf8_error<E: EmissionGuarantee>( fn new_parser_from_source_file( psess: &ParseSess, source_file: Arc<SourceFile>, - frontmatter_allowed: FrontmatterAllowed, + strip_tokens: StripTokens, ) -> Result<Parser<'_>, Vec<Diag<'_>>> { let end_pos = source_file.end_position(); - let stream = source_file_to_stream(psess, source_file, None, frontmatter_allowed)?; + let stream = source_file_to_stream(psess, source_file, None, strip_tokens)?; let mut parser = Parser::new(psess, stream, None); if parser.token == token::Eof { parser.token.span = Span::new(end_pos, end_pos, parser.token.span.ctxt(), None); @@ -171,6 +162,9 @@ fn new_parser_from_source_file( Ok(parser) } +/// Given a source string, produces a sequence of token trees. +/// +/// NOTE: This only strips shebangs, not frontmatter! pub fn source_str_to_stream( psess: &ParseSess, name: FileName, @@ -178,18 +172,21 @@ pub fn source_str_to_stream( override_span: Option<Span>, ) -> Result<TokenStream, Vec<Diag<'_>>> { let source_file = psess.source_map().new_source_file(name, source); - // used mainly for `proc_macro` and the likes, not for our parsing purposes, so don't parse - // frontmatters as frontmatters. - source_file_to_stream(psess, source_file, override_span, FrontmatterAllowed::No) + // FIXME(frontmatter): Consider stripping frontmatter in a future edition. We can't strip them + // in the current edition since that would be breaking. + // See also <https://github.com/rust-lang/rust/issues/145520>. + // Alternatively, stop stripping shebangs here, too, if T-lang and crater approve. + source_file_to_stream(psess, source_file, override_span, StripTokens::Shebang) } -/// Given a source file, produces a sequence of token trees. Returns any buffered errors from -/// parsing the token stream. +/// Given a source file, produces a sequence of token trees. +/// +/// Returns any buffered errors from parsing the token stream. fn source_file_to_stream<'psess>( psess: &'psess ParseSess, source_file: Arc<SourceFile>, override_span: Option<Span>, - frontmatter_allowed: FrontmatterAllowed, + strip_tokens: StripTokens, ) -> Result<TokenStream, Vec<Diag<'psess>>> { let src = source_file.src.as_ref().unwrap_or_else(|| { psess.dcx().bug(format!( @@ -198,13 +195,7 @@ fn source_file_to_stream<'psess>( )); }); - lexer::lex_token_trees( - psess, - src.as_str(), - source_file.start_pos, - override_span, - frontmatter_allowed, - ) + lexer::lex_token_trees(psess, src.as_str(), source_file.start_pos, override_span, strip_tokens) } /// Runs the given subparser `f` on the tokens of the given `attr`'s item. diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs index 9b09cbba7af..81a5d48d94e 100644 --- a/compiler/rustc_parse/src/parser/expr.rs +++ b/compiler/rustc_parse/src/parser/expr.rs @@ -1163,7 +1163,10 @@ impl<'a> Parser<'a> { suffix, }) => { if let Some(suffix) = suffix { - self.expect_no_tuple_index_suffix(current.span, suffix); + self.dcx().emit_err(errors::InvalidLiteralSuffixOnTupleIndex { + span: current.span, + suffix, + }); } match self.break_up_float(symbol, current.span) { // 1e2 @@ -1239,7 +1242,8 @@ impl<'a> Parser<'a> { suffix: Option<Symbol>, ) -> Box<Expr> { if let Some(suffix) = suffix { - self.expect_no_tuple_index_suffix(ident_span, suffix); + self.dcx() + .emit_err(errors::InvalidLiteralSuffixOnTupleIndex { span: ident_span, suffix }); } self.mk_expr(lo.to(ident_span), ExprKind::Field(base, Ident::new(field, ident_span))) } @@ -2225,24 +2229,6 @@ impl<'a> Parser<'a> { }) } - pub(super) fn expect_no_tuple_index_suffix(&self, span: Span, suffix: Symbol) { - if [sym::i32, sym::u32, sym::isize, sym::usize].contains(&suffix) { - // #59553: warn instead of reject out of hand to allow the fix to percolate - // through the ecosystem when people fix their macros - self.dcx().emit_warn(errors::InvalidLiteralSuffixOnTupleIndex { - span, - suffix, - exception: true, - }); - } else { - self.dcx().emit_err(errors::InvalidLiteralSuffixOnTupleIndex { - span, - suffix, - exception: false, - }); - } - } - /// Matches `'-' lit | lit` (cf. `ast_validation::AstValidator::check_expr_within_pat`). /// Keep this in sync with `Token::can_begin_literal_maybe_minus`. pub fn parse_literal_maybe_minus(&mut self) -> PResult<'a, Box<Expr>> { @@ -2742,7 +2728,7 @@ impl<'a> Parser<'a> { /// The specified `edition` in `let_chains_policy` should be that of the whole `if` construct, /// i.e. the same span we use to later decide whether the drop behaviour should be that of /// edition `..=2021` or that of `2024..`. - // Public because it is used in rustfmt forks such as https://github.com/tucant/rustfmt/blob/30c83df9e1db10007bdd16dafce8a86b404329b2/src/parse/macros/html.rs#L57 for custom if expressions. + // Public to use it for custom `if` expressions in rustfmt forks like https://github.com/tucant/rustfmt pub fn parse_expr_cond( &mut self, let_chains_policy: LetChainsPolicy, diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs index 15598f32429..ed4069dae93 100644 --- a/compiler/rustc_parse/src/parser/mod.rs +++ b/compiler/rustc_parse/src/parser/mod.rs @@ -22,6 +22,8 @@ use std::{fmt, mem, slice}; use attr_wrapper::{AttrWrapper, UsePreAttrPos}; pub use diagnostics::AttemptLocalParseRecovery; pub(crate) use expr::ForbiddenLetReason; +// Public to use it for custom `if` expressions in rustfmt forks like https://github.com/tucant/rustfmt +pub use expr::LetChainsPolicy; pub(crate) use item::{FnContext, FnParseMode}; pub use pat::{CommaRecoveryMode, RecoverColon, RecoverComma}; pub use path::PathStyle; @@ -1333,7 +1335,10 @@ impl<'a> Parser<'a> { if let token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) = self.token.kind { if let Some(suffix) = suffix { - self.expect_no_tuple_index_suffix(self.token.span, suffix); + self.dcx().emit_err(errors::InvalidLiteralSuffixOnTupleIndex { + span: self.token.span, + suffix, + }); } self.bump(); Ok(Ident::new(symbol, self.prev_token.span)) diff --git a/compiler/rustc_parse/src/parser/tests.rs b/compiler/rustc_parse/src/parser/tests.rs index a6e7266e71b..e645fb47b9e 100644 --- a/compiler/rustc_parse/src/parser/tests.rs +++ b/compiler/rustc_parse/src/parser/tests.rs @@ -22,6 +22,7 @@ use rustc_span::{ }; use termcolor::WriteColor; +use crate::lexer::StripTokens; use crate::parser::{ForceCollect, Parser}; use crate::{new_parser_from_source_str, source_str_to_stream, unwrap_or_emit_fatal}; @@ -35,6 +36,7 @@ fn string_to_parser(psess: &ParseSess, source_str: String) -> Parser<'_> { psess, PathBuf::from("bogofile").into(), source_str, + StripTokens::Nothing, )) } @@ -2240,7 +2242,7 @@ fn parse_item_from_source_str( source: String, psess: &ParseSess, ) -> PResult<'_, Option<Box<ast::Item>>> { - unwrap_or_emit_fatal(new_parser_from_source_str(psess, name, source)) + unwrap_or_emit_fatal(new_parser_from_source_str(psess, name, source, StripTokens::Nothing)) .parse_item(ForceCollect::No) } @@ -2520,7 +2522,8 @@ fn ttdelim_span() { source: String, psess: &ParseSess, ) -> PResult<'_, Box<ast::Expr>> { - unwrap_or_emit_fatal(new_parser_from_source_str(psess, name, source)).parse_expr() + unwrap_or_emit_fatal(new_parser_from_source_str(psess, name, source, StripTokens::Nothing)) + .parse_expr() } create_default_session_globals_then(|| { diff --git a/compiler/rustc_parse/src/parser/ty.rs b/compiler/rustc_parse/src/parser/ty.rs index 6168647183f..23aaafac934 100644 --- a/compiler/rustc_parse/src/parser/ty.rs +++ b/compiler/rustc_parse/src/parser/ty.rs @@ -92,10 +92,10 @@ fn can_continue_type_after_non_fn_ident(t: &Token) -> bool { } fn can_begin_dyn_bound_in_edition_2015(t: &Token) -> bool { - // `Not`, `Tilde` & `Const` are deliberately not part of this list to + // `!`, `const`, `[`, `async` are deliberately not part of this list to // contain the number of potential regressions esp. in MBE code. - // `Const` would regress `rfc-2632-const-trait-impl/mbe-dyn-const-2015.rs`. - // `Not` would regress `dyn!(...)` macro calls in Rust 2015. + // `const` and `[` would regress UI test `macro-dyn-const-2015.rs`. + // `!` would regress `dyn!(...)` macro calls in Rust 2015. t.is_path_start() || t.is_lifetime() || t == &TokenKind::Question @@ -1015,12 +1015,18 @@ impl<'a> Parser<'a> { || self.check(exp!(Tilde)) || self.check_keyword(exp!(For)) || self.check(exp!(OpenParen)) - || self.check(exp!(OpenBracket)) + || self.can_begin_maybe_const_bound() || self.check_keyword(exp!(Const)) || self.check_keyword(exp!(Async)) || self.check_keyword(exp!(Use)) } + fn can_begin_maybe_const_bound(&mut self) -> bool { + self.check(exp!(OpenBracket)) + && self.look_ahead(1, |t| t.is_keyword(kw::Const)) + && self.look_ahead(2, |t| *t == token::CloseBracket) + } + /// Parse a bound. /// /// ```ebnf @@ -1199,10 +1205,7 @@ impl<'a> Parser<'a> { let span = tilde.to(self.prev_token.span); self.psess.gated_spans.gate(sym::const_trait_impl, span); BoundConstness::Maybe(span) - } else if self.check(exp!(OpenBracket)) - && self.look_ahead(1, |t| t.is_keyword(kw::Const)) - && self.look_ahead(2, |t| *t == token::CloseBracket) - { + } else if self.can_begin_maybe_const_bound() { let start = self.token.span; self.bump(); self.expect_keyword(exp!(Const)).unwrap(); diff --git a/compiler/rustc_passes/messages.ftl b/compiler/rustc_passes/messages.ftl index 1dd96240f0a..75537caa894 100644 --- a/compiler/rustc_passes/messages.ftl +++ b/compiler/rustc_passes/messages.ftl @@ -501,6 +501,10 @@ passes_repr_align_should_be_align = `#[repr(align(...))]` is not supported on {$item} .help = use `#[rustc_align(...)]` instead +passes_repr_align_should_be_align_static = + `#[repr(align(...))]` is not supported on {$item} + .help = use `#[rustc_align_static(...)]` instead + passes_repr_conflicting = conflicting representation hints diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs index 2cd4830b5d9..2562d2e0b83 100644 --- a/compiler/rustc_passes/src/check_attr.rs +++ b/compiler/rustc_passes/src/check_attr.rs @@ -270,6 +270,12 @@ impl<'tcx> CheckAttrVisitor<'tcx> { | AttributeKind::Linkage(..) | AttributeKind::MustUse { .. } | AttributeKind::CrateName { .. } + | AttributeKind::RecursionLimit { .. } + | AttributeKind::MoveSizeLimit { .. } + | AttributeKind::TypeLengthLimit { .. } + | AttributeKind::PatternComplexityLimit { .. } + | AttributeKind::NoCore { .. } + | AttributeKind::NoStd { .. } ) => { /* do nothing */ } Attribute::Unparsed(attr_item) => { style = Some(attr_item.style); @@ -1602,12 +1608,18 @@ impl<'tcx> CheckAttrVisitor<'tcx> { ReprAttr::ReprAlign(align) => { match target { Target::Struct | Target::Union | Target::Enum => {} - Target::Fn | Target::Method(_) => { + Target::Fn | Target::Method(_) if self.tcx.features().fn_align() => { self.dcx().emit_err(errors::ReprAlignShouldBeAlign { span: *repr_span, item: target.plural_name(), }); } + Target::Static if self.tcx.features().static_align() => { + self.dcx().emit_err(errors::ReprAlignShouldBeAlignStatic { + span: *repr_span, + item: target.plural_name(), + }); + } _ => { self.dcx().emit_err(errors::AttrApplication::StructEnumUnion { hint_span: *repr_span, diff --git a/compiler/rustc_passes/src/dead.rs b/compiler/rustc_passes/src/dead.rs index fc33405d455..3c2c9683a4d 100644 --- a/compiler/rustc_passes/src/dead.rs +++ b/compiler/rustc_passes/src/dead.rs @@ -373,31 +373,27 @@ impl<'tcx> MarkSymbolVisitor<'tcx> { /// Automatically generated items marked with `rustc_trivial_field_reads` /// will be ignored for the purposes of dead code analysis (see PR #85200 /// for discussion). - fn should_ignore_item(&mut self, def_id: DefId) -> bool { - if let Some(impl_of) = self.tcx.trait_impl_of_assoc(def_id) { - if !self.tcx.is_automatically_derived(impl_of) { - return false; - } - - if let Some(trait_of) = self.tcx.trait_id_of_impl(impl_of) - && self.tcx.has_attr(trait_of, sym::rustc_trivial_field_reads) + fn should_ignore_impl_item(&mut self, impl_item: &hir::ImplItem<'_>) -> bool { + if let hir::ImplItemImplKind::Trait { .. } = impl_item.impl_kind + && let impl_of = self.tcx.parent(impl_item.owner_id.to_def_id()) + && self.tcx.is_automatically_derived(impl_of) + && let trait_ref = self.tcx.impl_trait_ref(impl_of).unwrap().instantiate_identity() + && self.tcx.has_attr(trait_ref.def_id, sym::rustc_trivial_field_reads) + { + if let ty::Adt(adt_def, _) = trait_ref.self_ty().kind() + && let Some(adt_def_id) = adt_def.did().as_local() { - let trait_ref = self.tcx.impl_trait_ref(impl_of).unwrap().instantiate_identity(); - if let ty::Adt(adt_def, _) = trait_ref.self_ty().kind() - && let Some(adt_def_id) = adt_def.did().as_local() - { - self.ignored_derived_traits.entry(adt_def_id).or_default().insert(trait_of); - } - return true; + self.ignored_derived_traits.entry(adt_def_id).or_default().insert(trait_ref.def_id); } + return true; } false } fn visit_node(&mut self, node: Node<'tcx>) { - if let Node::ImplItem(hir::ImplItem { owner_id, .. }) = node - && self.should_ignore_item(owner_id.to_def_id()) + if let Node::ImplItem(impl_item) = node + && self.should_ignore_impl_item(impl_item) { return; } @@ -439,7 +435,7 @@ impl<'tcx> MarkSymbolVisitor<'tcx> { } Node::ImplItem(impl_item) => { let item = self.tcx.local_parent(impl_item.owner_id.def_id); - if self.tcx.impl_trait_ref(item).is_none() { + if let hir::ImplItemImplKind::Inherent { .. } = impl_item.impl_kind { //// If it's a type whose items are live, then it's live, too. //// This is done to handle the case where, for example, the static //// method of a private type is used, but the type itself is never @@ -484,13 +480,11 @@ impl<'tcx> MarkSymbolVisitor<'tcx> { fn check_impl_or_impl_item_live(&mut self, local_def_id: LocalDefId) -> bool { let (impl_block_id, trait_def_id) = match self.tcx.def_kind(local_def_id) { // assoc impl items of traits are live if the corresponding trait items are live - DefKind::AssocConst | DefKind::AssocTy | DefKind::AssocFn => ( - self.tcx.local_parent(local_def_id), - self.tcx - .associated_item(local_def_id) - .trait_item_def_id - .and_then(|def_id| def_id.as_local()), - ), + DefKind::AssocConst | DefKind::AssocTy | DefKind::AssocFn => { + let trait_item_id = + self.tcx.trait_item_of(local_def_id).and_then(|def_id| def_id.as_local()); + (self.tcx.local_parent(local_def_id), trait_item_id) + } // impl items are live if the corresponding traits are live DefKind::Impl { of_trait: true } => ( local_def_id, diff --git a/compiler/rustc_passes/src/errors.rs b/compiler/rustc_passes/src/errors.rs index 23dcabef1a1..2da4b6f52cf 100644 --- a/compiler/rustc_passes/src/errors.rs +++ b/compiler/rustc_passes/src/errors.rs @@ -1610,6 +1610,15 @@ pub(crate) struct ReprAlignShouldBeAlign { } #[derive(Diagnostic)] +#[diag(passes_repr_align_should_be_align_static)] +pub(crate) struct ReprAlignShouldBeAlignStatic { + #[primary_span] + #[help] + pub span: Span, + pub item: &'static str, +} + +#[derive(Diagnostic)] #[diag(passes_custom_mir_phase_requires_dialect)] pub(crate) struct CustomMirPhaseRequiresDialect { #[primary_span] diff --git a/compiler/rustc_passes/src/stability.rs b/compiler/rustc_passes/src/stability.rs index 71650c6b9b9..2ee1bd0dfd1 100644 --- a/compiler/rustc_passes/src/stability.rs +++ b/compiler/rustc_passes/src/stability.rs @@ -21,8 +21,8 @@ use rustc_middle::middle::lib_features::{FeatureStability, LibFeatures}; use rustc_middle::middle::privacy::EffectiveVisibilities; use rustc_middle::middle::stability::{AllowUnstable, Deprecated, DeprecationEntry, EvalResult}; use rustc_middle::query::{LocalCrate, Providers}; -use rustc_middle::ty::TyCtxt; use rustc_middle::ty::print::with_no_trimmed_paths; +use rustc_middle::ty::{AssocContainer, TyCtxt}; use rustc_session::lint; use rustc_session::lint::builtin::{DEPRECATED, INEFFECTIVE_UNSTABLE_TRAIT_IMPL}; use rustc_span::{Span, Symbol, sym}; @@ -486,8 +486,7 @@ impl<'tcx> Visitor<'tcx> for MissingStabilityAnnotations<'tcx> { fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem<'tcx>) { self.check_compatible_stability(ii.owner_id.def_id); - let impl_def_id = self.tcx.hir_get_parent_item(ii.hir_id()); - if self.tcx.impl_trait_ref(impl_def_id).is_none() { + if let hir::ImplItemImplKind::Inherent { .. } = ii.impl_kind { self.check_missing_stability(ii.owner_id.def_id); self.check_missing_const_stability(ii.owner_id.def_id); } @@ -711,7 +710,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> { for impl_item_ref in items { let impl_item = self.tcx.associated_item(impl_item_ref.owner_id); - if let Some(def_id) = impl_item.trait_item_def_id { + if let AssocContainer::TraitImpl(Ok(def_id)) = impl_item.container { // Pass `None` to skip deprecation warnings. self.tcx.check_stability( def_id, diff --git a/compiler/rustc_public/src/ty.rs b/compiler/rustc_public/src/ty.rs index 1b5f0ed1429..bcc77ff849d 100644 --- a/compiler/rustc_public/src/ty.rs +++ b/compiler/rustc_public/src/ty.rs @@ -1612,11 +1612,7 @@ crate_def! { pub struct AssocItem { pub def_id: AssocDef, pub kind: AssocKind, - pub container: AssocItemContainer, - - /// If this is an item in an impl of a trait then this is the `DefId` of - /// the associated item on the trait that this implements. - pub trait_item_def_id: Option<AssocDef>, + pub container: AssocContainer, } #[derive(Clone, PartialEq, Debug, Eq, Serialize)] @@ -1636,9 +1632,11 @@ pub enum AssocKind { } #[derive(Clone, Debug, Eq, PartialEq, Serialize)] -pub enum AssocItemContainer { +pub enum AssocContainer { + InherentImpl, + /// The `AssocDef` points to the trait item being implemented. + TraitImpl(AssocDef), Trait, - Impl, } #[derive(Clone, Copy, PartialEq, Eq, Debug, Hash, Serialize)] diff --git a/compiler/rustc_public/src/unstable/convert/stable/ty.rs b/compiler/rustc_public/src/unstable/convert/stable/ty.rs index 207038db40d..5131611eb02 100644 --- a/compiler/rustc_public/src/unstable/convert/stable/ty.rs +++ b/compiler/rustc_public/src/unstable/convert/stable/ty.rs @@ -1076,14 +1076,21 @@ impl<'tcx> Stable<'tcx> for ty::AssocKind { } } -impl<'tcx> Stable<'tcx> for ty::AssocItemContainer { - type T = crate::ty::AssocItemContainer; +impl<'tcx> Stable<'tcx> for ty::AssocContainer { + type T = crate::ty::AssocContainer; - fn stable(&self, _: &mut Tables<'_, BridgeTys>, _: &CompilerCtxt<'_, BridgeTys>) -> Self::T { - use crate::ty::AssocItemContainer; + fn stable( + &self, + tables: &mut Tables<'_, BridgeTys>, + _: &CompilerCtxt<'_, BridgeTys>, + ) -> Self::T { + use crate::ty::AssocContainer; match self { - ty::AssocItemContainer::Trait => AssocItemContainer::Trait, - ty::AssocItemContainer::Impl => AssocItemContainer::Impl, + ty::AssocContainer::Trait => AssocContainer::Trait, + ty::AssocContainer::InherentImpl => AssocContainer::InherentImpl, + ty::AssocContainer::TraitImpl(trait_item_id) => { + AssocContainer::TraitImpl(tables.assoc_def(trait_item_id.unwrap())) + } } } } @@ -1100,7 +1107,6 @@ impl<'tcx> Stable<'tcx> for ty::AssocItem { def_id: tables.assoc_def(self.def_id), kind: self.kind.stable(tables, cx), container: self.container.stable(tables, cx), - trait_item_def_id: self.trait_item_def_id.map(|did| tables.assoc_def(did)), } } } diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml index e5cceacf15d..257785b10c8 100644 --- a/compiler/rustc_query_impl/Cargo.toml +++ b/compiler/rustc_query_impl/Cargo.toml @@ -13,7 +13,6 @@ rustc_index = { path = "../rustc_index" } rustc_middle = { path = "../rustc_middle" } rustc_query_system = { path = "../rustc_query_system" } rustc_serialize = { path = "../rustc_serialize" } -rustc_session = { path = "../rustc_session" } rustc_span = { path = "../rustc_span" } tracing = "0.1" # tidy-alphabetical-end diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index 306d4dbc4b4..e499e08c82b 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -58,7 +58,7 @@ impl<'tcx, C: QueryCache, const ANON: bool, const DEPTH_LIMIT: bool, const FEEDA for DynamicConfig<'tcx, C, ANON, DEPTH_LIMIT, FEEDABLE> { fn clone(&self) -> Self { - DynamicConfig { dynamic: self.dynamic } + *self } } diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index 55549cba737..4e601a6c594 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -9,6 +9,7 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::sync::{DynSend, DynSync}; use rustc_data_structures::unord::UnordMap; use rustc_hashes::Hash64; +use rustc_hir::limit::Limit; use rustc_index::Idx; use rustc_middle::bug; use rustc_middle::dep_graph::{ @@ -31,7 +32,6 @@ use rustc_query_system::query::{ }; use rustc_query_system::{QueryOverflow, QueryOverflowNote}; use rustc_serialize::{Decodable, Encodable}; -use rustc_session::Limit; use rustc_span::def_id::LOCAL_CRATE; use crate::QueryConfigRestored; diff --git a/compiler/rustc_query_system/src/error.rs b/compiler/rustc_query_system/src/error.rs index 5108ecaeea3..2778b24e774 100644 --- a/compiler/rustc_query_system/src/error.rs +++ b/compiler/rustc_query_system/src/error.rs @@ -1,6 +1,6 @@ use rustc_errors::codes::*; +use rustc_hir::limit::Limit; use rustc_macros::{Diagnostic, Subdiagnostic}; -use rustc_session::Limit; use rustc_span::{Span, Symbol}; #[derive(Subdiagnostic)] diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs index abbacc70b3e..689e713ef46 100644 --- a/compiler/rustc_resolve/src/diagnostics.rs +++ b/compiler/rustc_resolve/src/diagnostics.rs @@ -1,3 +1,4 @@ +use itertools::Itertools as _; use rustc_ast::visit::{self, Visitor}; use rustc_ast::{ self as ast, CRATE_NODE_ID, Crate, ItemKind, ModKind, NodeId, Path, join_path_idents, @@ -1986,7 +1987,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { let extern_prelude_ambiguity = || { self.extern_prelude.get(&Macros20NormalizedIdent::new(ident)).is_some_and(|entry| { entry.item_binding.map(|(b, _)| b) == Some(b1) - && entry.flag_binding.as_ref().and_then(|pb| pb.get().binding()) == Some(b2) + && entry.flag_binding.as_ref().and_then(|pb| pb.get().0.binding()) == Some(b2) }) }; let (b1, b2, misc1, misc2, swapped) = if b2.span.is_dummy() && !b1.span.is_dummy() { @@ -3469,16 +3470,11 @@ fn show_candidates( err.note(note.to_string()); } } else { - let (_, descr_first, _, _, _) = &inaccessible_path_strings[0]; - let descr = if inaccessible_path_strings + let descr = inaccessible_path_strings .iter() - .skip(1) - .all(|(_, descr, _, _, _)| descr == descr_first) - { - descr_first - } else { - "item" - }; + .map(|&(_, descr, _, _, _)| descr) + .all_equal_value() + .unwrap_or("item"); let plural_descr = if descr.ends_with('s') { format!("{descr}es") } else { format!("{descr}s") }; diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index 80b2095d8cc..9e3c0938836 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -3099,7 +3099,7 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { |err, _, span, message, suggestion, span_suggs| { err.multipart_suggestion_verbose( message, - std::iter::once((span, suggestion)).chain(span_suggs.clone()).collect(), + std::iter::once((span, suggestion)).chain(span_suggs).collect(), Applicability::MaybeIncorrect, ); true diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs index 0dea6ae3327..8b185ce7ef2 100644 --- a/compiler/rustc_resolve/src/lib.rs +++ b/compiler/rustc_resolve/src/lib.rs @@ -1031,7 +1031,7 @@ struct ExternPreludeEntry<'ra> { /// `flag_binding` is `None`, or when `extern crate` introducing `item_binding` used renaming. item_binding: Option<(NameBinding<'ra>, /* introduced by item */ bool)>, /// Binding from an `--extern` flag, lazily populated on first use. - flag_binding: Option<Cell<PendingBinding<'ra>>>, + flag_binding: Option<Cell<(PendingBinding<'ra>, /* finalized */ bool)>>, } impl ExternPreludeEntry<'_> { @@ -1042,7 +1042,7 @@ impl ExternPreludeEntry<'_> { fn flag() -> Self { ExternPreludeEntry { item_binding: None, - flag_binding: Some(Cell::new(PendingBinding::Pending)), + flag_binding: Some(Cell::new((PendingBinding::Pending, false))), } } } @@ -2245,14 +2245,16 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { fn extern_prelude_get_flag(&self, ident: Ident, finalize: bool) -> Option<NameBinding<'ra>> { let entry = self.extern_prelude.get(&Macros20NormalizedIdent::new(ident)); entry.and_then(|entry| entry.flag_binding.as_ref()).and_then(|flag_binding| { - let binding = match flag_binding.get() { + let (pending_binding, finalized) = flag_binding.get(); + let binding = match pending_binding { PendingBinding::Ready(binding) => { - if finalize { + if finalize && !finalized { self.cstore_mut().process_path_extern(self.tcx, ident.name, ident.span); } binding } PendingBinding::Pending => { + debug_assert!(!finalized); let crate_id = if finalize { self.cstore_mut().process_path_extern(self.tcx, ident.name, ident.span) } else { @@ -2264,7 +2266,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { }) } }; - flag_binding.set(PendingBinding::Ready(binding)); + flag_binding.set((PendingBinding::Ready(binding), finalize || finalized)); binding.or_else(|| finalize.then_some(self.dummy_binding)) }) } diff --git a/compiler/rustc_sanitizers/src/cfi/typeid/itanium_cxx_abi/transform.rs b/compiler/rustc_sanitizers/src/cfi/typeid/itanium_cxx_abi/transform.rs index d81fa062e01..577a16a0d25 100644 --- a/compiler/rustc_sanitizers/src/cfi/typeid/itanium_cxx_abi/transform.rs +++ b/compiler/rustc_sanitizers/src/cfi/typeid/itanium_cxx_abi/transform.rs @@ -469,8 +469,7 @@ fn implemented_method<'tcx>( let ancestor = if let Some(impl_id) = tcx.impl_of_assoc(instance.def_id()) { // Implementation in an `impl` block trait_ref = tcx.impl_trait_ref(impl_id)?; - let impl_method = tcx.associated_item(instance.def_id()); - method_id = impl_method.trait_item_def_id?; + method_id = tcx.trait_item_of(instance.def_id())?; trait_method = tcx.associated_item(method_id); trait_id = trait_ref.skip_binder().def_id; impl_id diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index 9793d8091e2..297df7c2c97 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -70,6 +70,7 @@ pub const PRINT_KINDS: &[(&str, PrintKind)] = &[ ("target-libdir", PrintKind::TargetLibdir), ("target-list", PrintKind::TargetList), ("target-spec-json", PrintKind::TargetSpecJson), + ("target-spec-json-schema", PrintKind::TargetSpecJsonSchema), ("tls-models", PrintKind::TlsModels), // tidy-alphabetical-end ]; @@ -1043,6 +1044,7 @@ pub enum PrintKind { TargetLibdir, TargetList, TargetSpecJson, + TargetSpecJsonSchema, TlsModels, // tidy-alphabetical-end } @@ -2323,7 +2325,8 @@ fn is_print_request_stable(print_kind: PrintKind) -> bool { | PrintKind::CheckCfg | PrintKind::CrateRootLintLevels | PrintKind::SupportedCrateTypes - | PrintKind::TargetSpecJson => false, + | PrintKind::TargetSpecJson + | PrintKind::TargetSpecJsonSchema => false, _ => true, } } diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs index 6d5be2d92cd..69facde6936 100644 --- a/compiler/rustc_session/src/options.rs +++ b/compiler/rustc_session/src/options.rs @@ -83,10 +83,77 @@ pub struct TargetModifier { pub value_name: String, } +mod target_modifier_consistency_check { + use super::*; + pub(super) fn sanitizer(l: &TargetModifier, r: Option<&TargetModifier>) -> bool { + let mut lparsed: SanitizerSet = Default::default(); + let lval = if l.value_name.is_empty() { None } else { Some(l.value_name.as_str()) }; + parse::parse_sanitizers(&mut lparsed, lval); + + let mut rparsed: SanitizerSet = Default::default(); + let rval = r.filter(|v| !v.value_name.is_empty()).map(|v| v.value_name.as_str()); + parse::parse_sanitizers(&mut rparsed, rval); + + // Some sanitizers need to be target modifiers, and some do not. + // For now, we should mark all sanitizers as target modifiers except for these: + // AddressSanitizer, LeakSanitizer + let tmod_sanitizers = SanitizerSet::MEMORY + | SanitizerSet::THREAD + | SanitizerSet::HWADDRESS + | SanitizerSet::CFI + | SanitizerSet::MEMTAG + | SanitizerSet::SHADOWCALLSTACK + | SanitizerSet::KCFI + | SanitizerSet::KERNELADDRESS + | SanitizerSet::SAFESTACK + | SanitizerSet::DATAFLOW; + + lparsed & tmod_sanitizers == rparsed & tmod_sanitizers + } + pub(super) fn sanitizer_cfi_normalize_integers( + opts: &Options, + l: &TargetModifier, + r: Option<&TargetModifier>, + ) -> bool { + // For kCFI, the helper flag -Zsanitizer-cfi-normalize-integers should also be a target modifier + if opts.unstable_opts.sanitizer.contains(SanitizerSet::KCFI) { + if let Some(r) = r { + return l.extend().tech_value == r.extend().tech_value; + } else { + return false; + } + } + true + } +} + impl TargetModifier { pub fn extend(&self) -> ExtendedTargetModifierInfo { self.opt.reparse(&self.value_name) } + // Custom consistency check for target modifiers (or default `l.tech_value == r.tech_value`) + // When other is None, consistency with default value is checked + pub fn consistent(&self, opts: &Options, other: Option<&TargetModifier>) -> bool { + assert!(other.is_none() || self.opt == other.unwrap().opt); + match self.opt { + OptionsTargetModifiers::UnstableOptions(unstable) => match unstable { + UnstableOptionsTargetModifiers::sanitizer => { + return target_modifier_consistency_check::sanitizer(self, other); + } + UnstableOptionsTargetModifiers::sanitizer_cfi_normalize_integers => { + return target_modifier_consistency_check::sanitizer_cfi_normalize_integers( + opts, self, other, + ); + } + _ => {} + }, + _ => {} + }; + match other { + Some(other) => self.extend().tech_value == other.extend().tech_value, + None => false, + } + } } fn tmod_push_impl( @@ -2504,13 +2571,13 @@ written to standard error output)"), retpoline_external_thunk: bool = (false, parse_bool, [TRACKED TARGET_MODIFIER], "enables retpoline-external-thunk, retpoline-indirect-branches and retpoline-indirect-calls \ target features (default: no)"), - sanitizer: SanitizerSet = (SanitizerSet::empty(), parse_sanitizers, [TRACKED], + sanitizer: SanitizerSet = (SanitizerSet::empty(), parse_sanitizers, [TRACKED TARGET_MODIFIER], "use a sanitizer"), sanitizer_cfi_canonical_jump_tables: Option<bool> = (Some(true), parse_opt_bool, [TRACKED], "enable canonical jump tables (default: yes)"), sanitizer_cfi_generalize_pointers: Option<bool> = (None, parse_opt_bool, [TRACKED], "enable generalizing pointer types (default: no)"), - sanitizer_cfi_normalize_integers: Option<bool> = (None, parse_opt_bool, [TRACKED], + sanitizer_cfi_normalize_integers: Option<bool> = (None, parse_opt_bool, [TRACKED TARGET_MODIFIER], "enable normalizing integer types (default: no)"), sanitizer_dataflow_abilist: Vec<String> = (Vec::new(), parse_comma_list, [TRACKED], "additional ABI list files that control how shadow parameters are passed (comma separated)"), diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs index 96767d05439..3525c7c1d1a 100644 --- a/compiler/rustc_session/src/session.rs +++ b/compiler/rustc_session/src/session.rs @@ -1,10 +1,9 @@ use std::any::Any; -use std::ops::{Div, Mul}; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::Arc; use std::sync::atomic::AtomicBool; -use std::{env, fmt, io}; +use std::{env, io}; use rand::{RngCore, rng}; use rustc_ast::NodeId; @@ -25,6 +24,7 @@ use rustc_errors::{ Diag, DiagCtxt, DiagCtxtHandle, DiagMessage, Diagnostic, ErrorGuaranteed, FatalAbort, LintEmitter, TerminalUrl, fallback_fluent_bundle, }; +use rustc_hir::limit::Limit; use rustc_macros::HashStable_Generic; pub use rustc_span::def_id::StableCrateId; use rustc_span::edition::Edition; @@ -62,64 +62,6 @@ pub enum CtfeBacktrace { Immediate, } -/// New-type wrapper around `usize` for representing limits. Ensures that comparisons against -/// limits are consistent throughout the compiler. -#[derive(Clone, Copy, Debug, HashStable_Generic)] -pub struct Limit(pub usize); - -impl Limit { - /// Create a new limit from a `usize`. - pub fn new(value: usize) -> Self { - Limit(value) - } - - /// Create a new unlimited limit. - pub fn unlimited() -> Self { - Limit(usize::MAX) - } - - /// Check that `value` is within the limit. Ensures that the same comparisons are used - /// throughout the compiler, as mismatches can cause ICEs, see #72540. - #[inline] - pub fn value_within_limit(&self, value: usize) -> bool { - value <= self.0 - } -} - -impl From<usize> for Limit { - fn from(value: usize) -> Self { - Self::new(value) - } -} - -impl fmt::Display for Limit { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -impl Div<usize> for Limit { - type Output = Limit; - - fn div(self, rhs: usize) -> Self::Output { - Limit::new(self.0 / rhs) - } -} - -impl Mul<usize> for Limit { - type Output = Limit; - - fn mul(self, rhs: usize) -> Self::Output { - Limit::new(self.0 * rhs) - } -} - -impl rustc_errors::IntoDiagArg for Limit { - fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { - self.to_string().into_diag_arg(&mut None) - } -} - #[derive(Clone, Copy, Debug, HashStable_Generic)] pub struct Limits { /// The maximum recursion limit for potentially infinitely recursive diff --git a/compiler/rustc_span/src/analyze_source_file.rs b/compiler/rustc_span/src/analyze_source_file.rs index bb2cda77dff..c32593a6d95 100644 --- a/compiler/rustc_span/src/analyze_source_file.rs +++ b/compiler/rustc_span/src/analyze_source_file.rs @@ -81,8 +81,8 @@ cfg_select! { // use `loadu`, which supports unaligned loading. let chunk = unsafe { _mm_loadu_si128(chunk.as_ptr() as *const __m128i) }; - // For each character in the chunk, see if its byte value is < 0, - // which indicates that it's part of a UTF-8 char. + // For character in the chunk, see if its byte value is < 0, which + // indicates that it's part of a UTF-8 char. let multibyte_test = _mm_cmplt_epi8(chunk, _mm_set1_epi8(0)); // Create a bit mask from the comparison results. let multibyte_mask = _mm_movemask_epi8(multibyte_test); @@ -132,111 +132,8 @@ cfg_select! { } } } - target_arch = "loongarch64" => { - fn analyze_source_file_dispatch( - src: &str, - lines: &mut Vec<RelativeBytePos>, - multi_byte_chars: &mut Vec<MultiByteChar>, - ) { - use std::arch::is_loongarch_feature_detected; - - if is_loongarch_feature_detected!("lsx") { - unsafe { - analyze_source_file_lsx(src, lines, multi_byte_chars); - } - } else { - analyze_source_file_generic( - src, - src.len(), - RelativeBytePos::from_u32(0), - lines, - multi_byte_chars, - ); - } - } - - /// Checks 16 byte chunks of text at a time. If the chunk contains - /// something other than printable ASCII characters and newlines, the - /// function falls back to the generic implementation. Otherwise it uses - /// LSX intrinsics to quickly find all newlines. - #[target_feature(enable = "lsx")] - unsafe fn analyze_source_file_lsx( - src: &str, - lines: &mut Vec<RelativeBytePos>, - multi_byte_chars: &mut Vec<MultiByteChar>, - ) { - use std::arch::loongarch64::*; - - const CHUNK_SIZE: usize = 16; - - let (chunks, tail) = src.as_bytes().as_chunks::<CHUNK_SIZE>(); - - // This variable keeps track of where we should start decoding a - // chunk. If a multi-byte character spans across chunk boundaries, - // we need to skip that part in the next chunk because we already - // handled it. - let mut intra_chunk_offset = 0; - - for (chunk_index, chunk) in chunks.iter().enumerate() { - // All LSX memory instructions support unaligned access, so using - // vld is fine. - let chunk = unsafe { lsx_vld::<0>(chunk.as_ptr() as *const i8) }; - - // For each character in the chunk, see if its byte value is < 0, - // which indicates that it's part of a UTF-8 char. - let multibyte_mask = lsx_vmskltz_b(chunk); - // Create a bit mask from the comparison results. - let multibyte_mask = lsx_vpickve2gr_w::<0>(multibyte_mask); - - // If the bit mask is all zero, we only have ASCII chars here: - if multibyte_mask == 0 { - assert!(intra_chunk_offset == 0); - - // Check for newlines in the chunk - let newlines_test = lsx_vseqi_b::<{b'\n' as i32}>(chunk); - let newlines_mask = lsx_vmskltz_b(newlines_test); - let mut newlines_mask = lsx_vpickve2gr_w::<0>(newlines_mask); - - let output_offset = RelativeBytePos::from_usize(chunk_index * CHUNK_SIZE + 1); - - while newlines_mask != 0 { - let index = newlines_mask.trailing_zeros(); - - lines.push(RelativeBytePos(index) + output_offset); - - // Clear the bit, so we can find the next one. - newlines_mask &= newlines_mask - 1; - } - } else { - // The slow path. - // There are multibyte chars in here, fallback to generic decoding. - let scan_start = chunk_index * CHUNK_SIZE + intra_chunk_offset; - intra_chunk_offset = analyze_source_file_generic( - &src[scan_start..], - CHUNK_SIZE - intra_chunk_offset, - RelativeBytePos::from_usize(scan_start), - lines, - multi_byte_chars, - ); - } - } - - // There might still be a tail left to analyze - let tail_start = src.len() - tail.len() + intra_chunk_offset; - if tail_start < src.len() { - analyze_source_file_generic( - &src[tail_start..], - src.len() - tail_start, - RelativeBytePos::from_usize(tail_start), - lines, - multi_byte_chars, - ); - } - } - } _ => { - // The target (or compiler version) does not support vector instructions - // our specialized implementations need (x86 SSE2, loongarch64 LSX)... + // The target (or compiler version) does not support SSE2 ... fn analyze_source_file_dispatch( src: &str, lines: &mut Vec<RelativeBytePos>, diff --git a/compiler/rustc_span/src/hygiene.rs b/compiler/rustc_span/src/hygiene.rs index 19494ffc37e..633cbc3a4ae 100644 --- a/compiler/rustc_span/src/hygiene.rs +++ b/compiler/rustc_span/src/hygiene.rs @@ -46,6 +46,8 @@ use crate::symbol::{Symbol, kw, sym}; use crate::{DUMMY_SP, HashStableContext, Span, SpanDecoder, SpanEncoder, with_session_globals}; /// A `SyntaxContext` represents a chain of pairs `(ExpnId, Transparency)` named "marks". +/// +/// See <https://rustc-dev-guide.rust-lang.org/macro-expansion.html> for more explanation. #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub struct SyntaxContext(u32); @@ -61,7 +63,10 @@ pub type SyntaxContextKey = (SyntaxContext, ExpnId, Transparency); #[derive(Clone, Copy, Debug)] struct SyntaxContextData { + /// The last macro expansion in the chain. + /// (Here we say the most deeply nested macro expansion is the "outermost" expansion.) outer_expn: ExpnId, + /// Transparency of the last macro expansion outer_transparency: Transparency, parent: SyntaxContext, /// This context, but with all transparent and semi-opaque expansions filtered away. @@ -450,11 +455,13 @@ impl HygieneData { self.syntax_context_data[ctxt.0 as usize].opaque_and_semiopaque } + /// See [`SyntaxContextData::outer_expn`] #[inline] fn outer_expn(&self, ctxt: SyntaxContext) -> ExpnId { self.syntax_context_data[ctxt.0 as usize].outer_expn } + /// The last macro expansion and its Transparency #[inline] fn outer_mark(&self, ctxt: SyntaxContext) -> (ExpnId, Transparency) { let data = &self.syntax_context_data[ctxt.0 as usize]; @@ -900,6 +907,7 @@ impl SyntaxContext { HygieneData::with(|data| data.normalize_to_macro_rules(self)) } + /// See [`SyntaxContextData::outer_expn`] #[inline] pub fn outer_expn(self) -> ExpnId { HygieneData::with(|data| data.outer_expn(self)) @@ -912,6 +920,7 @@ impl SyntaxContext { HygieneData::with(|data| data.expn_data(data.outer_expn(self)).clone()) } + /// See [`HygieneData::outer_mark`] #[inline] fn outer_mark(self) -> (ExpnId, Transparency) { HygieneData::with(|data| data.outer_mark(self)) @@ -982,19 +991,20 @@ impl Span { #[derive(Clone, Debug, Encodable, Decodable, HashStable_Generic)] pub struct ExpnData { // --- The part unique to each expansion. - /// The kind of this expansion - macro or compiler desugaring. pub kind: ExpnKind, - /// The expansion that produced this expansion. + /// The expansion that contains the definition of the macro for this expansion. pub parent: ExpnId, - /// The location of the actual macro invocation or syntax sugar , e.g. - /// `let x = foo!();` or `if let Some(y) = x {}` + /// The span of the macro call which produced this expansion. + /// + /// This span will typically have a different `ExpnData` and `call_site`. + /// This recursively traces back through any macro calls which expanded into further + /// macro calls, until the "source call-site" is reached at the root SyntaxContext. + /// For example, if `food!()` expands to `fruit!()` which then expands to `grape`, + /// then the call-site of `grape` is `fruit!()` and the call-site of `fruit!()` + /// is `food!()`. /// - /// This may recursively refer to other macro invocations, e.g., if - /// `foo!()` invoked `bar!()` internally, and there was an - /// expression inside `bar!`; the call_site of the expression in - /// the expansion would point to the `bar!` invocation; that - /// call_site span would have its own ExpnData, with the call_site - /// pointing to the `foo!` invocation. + /// For a desugaring expansion, this is the span of the expression or node that was + /// desugared. pub call_site: Span, /// Used to force two `ExpnData`s to have different `Fingerprint`s. /// Due to macro expansion, it's possible to end up with two `ExpnId`s diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs index 8907c5e4c4a..e95b743b1ce 100644 --- a/compiler/rustc_span/src/lib.rs +++ b/compiler/rustc_span/src/lib.rs @@ -18,7 +18,6 @@ // tidy-alphabetical-start #![allow(internal_features)] #![cfg_attr(bootstrap, feature(round_char_boundary))] -#![cfg_attr(target_arch = "loongarch64", feature(stdarch_loongarch))] #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(array_windows)] @@ -711,8 +710,8 @@ impl Span { if !ctxt.is_root() { ctxt.outer_expn_data().call_site.source_callsite() } else { self } } - /// The `Span` for the tokens in the previous macro expansion from which `self` was generated, - /// if any. + /// Returns the call-site span of the last macro expansion which produced this `Span`. + /// (see [`ExpnData::call_site`]). Returns `None` if this is not an expansion. pub fn parent_callsite(self) -> Option<Span> { let ctxt = self.ctxt(); (!ctxt.is_root()).then(|| ctxt.outer_expn_data().call_site) diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index ff5cf34cf94..cdb0b5b58da 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -7,7 +7,7 @@ use std::ops::Deref; use std::{fmt, str}; use rustc_arena::DroplessArena; -use rustc_data_structures::fx::FxIndexSet; +use rustc_data_structures::fx::{FxHashSet, FxIndexSet}; use rustc_data_structures::stable_hasher::{ HashStable, StableCompare, StableHasher, ToStableHashKey, }; @@ -21,18 +21,17 @@ mod tests; // The proc macro code for this is in `compiler/rustc_macros/src/symbols.rs`. symbols! { - // This list includes things that are definitely keywords (e.g. `if`), - // a few things that are definitely not keywords (e.g. the empty symbol, - // `{{root}}`) and things where there is disagreement between people and/or - // documents (such as the Rust Reference) about whether it is a keyword - // (e.g. `_`). + // This list includes things that are definitely keywords (e.g. `if`), a + // few things that are definitely not keywords (e.g. `{{root}}`) and things + // where there is disagreement between people and/or documents (such as the + // Rust Reference) about whether it is a keyword (e.g. `_`). // // If you modify this list, adjust any relevant `Symbol::{is,can_be}_*` // predicates and `used_keywords`. Also consider adding new keywords to the // `ui/parser/raw/raw-idents.rs` test. Keywords { - // Special reserved identifiers used internally for elided lifetimes, - // unnamed method parameters, crate root module, error recovery etc. + // Special reserved identifiers used internally for unnamed method + // parameters, crate root module, etc. // Matching predicates: `is_special`/`is_reserved` // // tidy-alphabetical-start @@ -326,6 +325,7 @@ symbols! { RangeSub, RangeTo, RangeToInclusive, + RangeToInclusiveCopy, Rc, RcWeak, Ready, @@ -1280,6 +1280,7 @@ symbols! { lang, lang_items, large_assignments, + last, lateout, lazy_normalization_consts, lazy_type_alias, @@ -1846,6 +1847,7 @@ symbols! { rustc_abi, // FIXME(#82232, #143834): temporary name to mitigate `#[align]` nameres ambiguity rustc_align, + rustc_align_static, rustc_allocator, rustc_allocator_zeroed, rustc_allocator_zeroed_variant, @@ -2097,6 +2099,7 @@ symbols! { staged_api, start, state, + static_align, static_in_const, static_nobundle, static_recursion, @@ -2281,6 +2284,8 @@ symbols! { unboxed_closures, unchecked_add, unchecked_div, + unchecked_funnel_shl, + unchecked_funnel_shr, unchecked_mul, unchecked_rem, unchecked_shl, @@ -2866,11 +2871,20 @@ impl Interner { let byte_strs = FxIndexSet::from_iter( init.iter().copied().chain(extra.iter().copied()).map(|str| str.as_bytes()), ); - assert_eq!( - byte_strs.len(), - init.len() + extra.len(), - "duplicate symbols in the rustc symbol list and the extra symbols added by the driver", - ); + + // The order in which duplicates are reported is irrelevant. + #[expect(rustc::potential_query_instability)] + if byte_strs.len() != init.len() + extra.len() { + panic!( + "duplicate symbols in the rustc symbol list and the extra symbols added by the driver: {:?}", + FxHashSet::intersection( + &init.iter().copied().collect(), + &extra.iter().copied().collect(), + ) + .collect::<Vec<_>>() + ) + } + Interner(Lock::new(InternerInner { arena: Default::default(), byte_strs })) } diff --git a/compiler/rustc_symbol_mangling/src/legacy.rs b/compiler/rustc_symbol_mangling/src/legacy.rs index 4d9741f1d11..95a7ec61868 100644 --- a/compiler/rustc_symbol_mangling/src/legacy.rs +++ b/compiler/rustc_symbol_mangling/src/legacy.rs @@ -54,7 +54,7 @@ pub(super) fn mangle<'tcx>( // Erase regions because they may not be deterministic when hashed // and should not matter anyhow. - let instance_ty = tcx.erase_regions(instance_ty); + let instance_ty = tcx.erase_and_anonymize_regions(instance_ty); let hash = get_symbol_hash(tcx, instance, instance_ty, instantiating_crate); @@ -422,7 +422,10 @@ impl<'tcx> Printer<'tcx> for LegacySymbolMangler<'tcx> { || &args[..generics.count()] == self .tcx - .erase_regions(ty::GenericArgs::identity_for_item(self.tcx, impl_def_id)) + .erase_and_anonymize_regions(ty::GenericArgs::identity_for_item( + self.tcx, + impl_def_id, + )) .as_slice() { ( diff --git a/compiler/rustc_symbol_mangling/src/test.rs b/compiler/rustc_symbol_mangling/src/test.rs index 0c6d1495e39..50935e7caf3 100644 --- a/compiler/rustc_symbol_mangling/src/test.rs +++ b/compiler/rustc_symbol_mangling/src/test.rs @@ -58,7 +58,7 @@ impl SymbolNamesTest<'_> { let def_id = def_id.to_def_id(); let instance = Instance::new_raw( def_id, - tcx.erase_regions(GenericArgs::identity_for_item(tcx, def_id)), + tcx.erase_and_anonymize_regions(GenericArgs::identity_for_item(tcx, def_id)), ); let mangled = tcx.symbol_name(instance); tcx.dcx().emit_err(TestOutput { diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs index 0cbd48ba08c..1605b4958ba 100644 --- a/compiler/rustc_symbol_mangling/src/v0.rs +++ b/compiler/rustc_symbol_mangling/src/v0.rs @@ -82,9 +82,13 @@ pub(super) fn mangle<'tcx>( } pub fn mangle_internal_symbol<'tcx>(tcx: TyCtxt<'tcx>, item_name: &str) -> String { - if item_name == "rust_eh_personality" { + match item_name { // rust_eh_personality must not be renamed as LLVM hard-codes the name - return "rust_eh_personality".to_owned(); + "rust_eh_personality" => return item_name.to_owned(), + // Apple availability symbols need to not be mangled to be usable by + // C/Objective-C code. + "__isPlatformVersionAtLeast" | "__isOSVersionAtLeast" => return item_name.to_owned(), + _ => {} } let prefix = "_R"; @@ -325,7 +329,10 @@ impl<'tcx> Printer<'tcx> for V0SymbolMangler<'tcx> { || &args[..generics.count()] == self .tcx - .erase_regions(ty::GenericArgs::identity_for_item(self.tcx, impl_def_id)) + .erase_and_anonymize_regions(ty::GenericArgs::identity_for_item( + self.tcx, + impl_def_id, + )) .as_slice() { ( @@ -335,7 +342,7 @@ impl<'tcx> Printer<'tcx> for V0SymbolMangler<'tcx> { ) } else { assert!( - !args.has_non_region_param(), + !args.has_non_region_param() && !args.has_free_regions(), "should not be mangling partially substituted \ polymorphic instance: {impl_def_id:?} {args:?}" ); diff --git a/compiler/rustc_target/Cargo.toml b/compiler/rustc_target/Cargo.toml index 7fabb227d9a..ecdb6ab5a57 100644 --- a/compiler/rustc_target/Cargo.toml +++ b/compiler/rustc_target/Cargo.toml @@ -14,6 +14,7 @@ rustc_fs_util = { path = "../rustc_fs_util" } rustc_macros = { path = "../rustc_macros" } rustc_serialize = { path = "../rustc_serialize" } rustc_span = { path = "../rustc_span" } +schemars = "1.0.4" serde = "1.0.219" serde_derive = "1.0.219" serde_json = "1.0.59" diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs index d567ad401bb..9213d73e24e 100644 --- a/compiler/rustc_target/src/callconv/loongarch.rs +++ b/compiler/rustc_target/src/callconv/loongarch.rs @@ -8,16 +8,16 @@ use crate::spec::HasTargetSpec; #[derive(Copy, Clone)] enum RegPassKind { - Float(Reg), - Integer(Reg), + Float { offset_from_start: Size, ty: Reg }, + Integer { offset_from_start: Size, ty: Reg }, Unknown, } #[derive(Copy, Clone)] enum FloatConv { - FloatPair(Reg, Reg), + FloatPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg }, Float(Reg), - MixedPair(Reg, Reg), + MixedPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg }, } #[derive(Copy, Clone)] @@ -37,6 +37,7 @@ fn should_use_fp_conv_helper<'a, Ty, C>( flen: u64, field1_kind: &mut RegPassKind, field2_kind: &mut RegPassKind, + offset_from_start: Size, ) -> Result<(), CannotUseFpConv> where Ty: TyAbiInterface<'a, C> + Copy, @@ -49,16 +50,16 @@ where } match (*field1_kind, *field2_kind) { (RegPassKind::Unknown, _) => { - *field1_kind = RegPassKind::Integer(Reg { - kind: RegKind::Integer, - size: arg_layout.size, - }); + *field1_kind = RegPassKind::Integer { + offset_from_start, + ty: Reg { kind: RegKind::Integer, size: arg_layout.size }, + }; } - (RegPassKind::Float(_), RegPassKind::Unknown) => { - *field2_kind = RegPassKind::Integer(Reg { - kind: RegKind::Integer, - size: arg_layout.size, - }); + (RegPassKind::Float { .. }, RegPassKind::Unknown) => { + *field2_kind = RegPassKind::Integer { + offset_from_start, + ty: Reg { kind: RegKind::Integer, size: arg_layout.size }, + }; } _ => return Err(CannotUseFpConv), } @@ -69,12 +70,16 @@ where } match (*field1_kind, *field2_kind) { (RegPassKind::Unknown, _) => { - *field1_kind = - RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size }); + *field1_kind = RegPassKind::Float { + offset_from_start, + ty: Reg { kind: RegKind::Float, size: arg_layout.size }, + }; } (_, RegPassKind::Unknown) => { - *field2_kind = - RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size }); + *field2_kind = RegPassKind::Float { + offset_from_start, + ty: Reg { kind: RegKind::Float, size: arg_layout.size }, + }; } _ => return Err(CannotUseFpConv), } @@ -96,13 +101,14 @@ where flen, field1_kind, field2_kind, + offset_from_start, ); } return Err(CannotUseFpConv); } } FieldsShape::Array { count, .. } => { - for _ in 0..count { + for i in 0..count { let elem_layout = arg_layout.field(cx, 0); should_use_fp_conv_helper( cx, @@ -111,6 +117,7 @@ where flen, field1_kind, field2_kind, + offset_from_start + elem_layout.size * i, )?; } } @@ -121,7 +128,15 @@ where } for i in arg_layout.fields.index_by_increasing_offset() { let field = arg_layout.field(cx, i); - should_use_fp_conv_helper(cx, &field, xlen, flen, field1_kind, field2_kind)?; + should_use_fp_conv_helper( + cx, + &field, + xlen, + flen, + field1_kind, + field2_kind, + offset_from_start + arg_layout.fields.offset(i), + )?; } } }, @@ -140,14 +155,52 @@ where { let mut field1_kind = RegPassKind::Unknown; let mut field2_kind = RegPassKind::Unknown; - if should_use_fp_conv_helper(cx, arg, xlen, flen, &mut field1_kind, &mut field2_kind).is_err() { + if should_use_fp_conv_helper( + cx, + arg, + xlen, + flen, + &mut field1_kind, + &mut field2_kind, + Size::ZERO, + ) + .is_err() + { return None; } match (field1_kind, field2_kind) { - (RegPassKind::Integer(l), RegPassKind::Float(r)) => Some(FloatConv::MixedPair(l, r)), - (RegPassKind::Float(l), RegPassKind::Integer(r)) => Some(FloatConv::MixedPair(l, r)), - (RegPassKind::Float(l), RegPassKind::Float(r)) => Some(FloatConv::FloatPair(l, r)), - (RegPassKind::Float(f), RegPassKind::Unknown) => Some(FloatConv::Float(f)), + ( + RegPassKind::Integer { offset_from_start, .. } + | RegPassKind::Float { offset_from_start, .. }, + _, + ) if offset_from_start != Size::ZERO => { + panic!("type {:?} has a first field with non-zero offset {offset_from_start:?}", arg.ty) + } + ( + RegPassKind::Integer { ty: first_ty, .. }, + RegPassKind::Float { offset_from_start, ty: second_ty }, + ) => Some(FloatConv::MixedPair { + first_ty, + second_ty_offset_from_start: offset_from_start, + second_ty, + }), + ( + RegPassKind::Float { ty: first_ty, .. }, + RegPassKind::Integer { offset_from_start, ty: second_ty }, + ) => Some(FloatConv::MixedPair { + first_ty, + second_ty_offset_from_start: offset_from_start, + second_ty, + }), + ( + RegPassKind::Float { ty: first_ty, .. }, + RegPassKind::Float { offset_from_start, ty: second_ty }, + ) => Some(FloatConv::FloatPair { + first_ty, + second_ty_offset_from_start: offset_from_start, + second_ty, + }), + (RegPassKind::Float { ty, .. }, RegPassKind::Unknown) => Some(FloatConv::Float(ty)), _ => None, } } @@ -165,11 +218,19 @@ where FloatConv::Float(f) => { arg.cast_to(f); } - FloatConv::FloatPair(l, r) => { - arg.cast_to(CastTarget::pair(l, r)); + FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty } => { + arg.cast_to(CastTarget::offset_pair( + first_ty, + second_ty_offset_from_start, + second_ty, + )); } - FloatConv::MixedPair(l, r) => { - arg.cast_to(CastTarget::pair(l, r)); + FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty } => { + arg.cast_to(CastTarget::offset_pair( + first_ty, + second_ty_offset_from_start, + second_ty, + )); } } return false; @@ -233,15 +294,27 @@ fn classify_arg<'a, Ty, C>( arg.cast_to(f); return; } - Some(FloatConv::FloatPair(l, r)) if *avail_fprs >= 2 => { + Some(FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty }) + if *avail_fprs >= 2 => + { *avail_fprs -= 2; - arg.cast_to(CastTarget::pair(l, r)); + arg.cast_to(CastTarget::offset_pair( + first_ty, + second_ty_offset_from_start, + second_ty, + )); return; } - Some(FloatConv::MixedPair(l, r)) if *avail_fprs >= 1 && *avail_gprs >= 1 => { + Some(FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty }) + if *avail_fprs >= 1 && *avail_gprs >= 1 => + { *avail_gprs -= 1; *avail_fprs -= 1; - arg.cast_to(CastTarget::pair(l, r)); + arg.cast_to(CastTarget::offset_pair( + first_ty, + second_ty_offset_from_start, + second_ty, + )); return; } _ => (), diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs index 91657fef803..b3fe1fffcce 100644 --- a/compiler/rustc_target/src/lib.rs +++ b/compiler/rustc_target/src/lib.rs @@ -72,3 +72,65 @@ fn find_relative_libdir(sysroot: &Path) -> std::borrow::Cow<'static, str> { Some(libdir) => libdir.into(), } } + +macro_rules! target_spec_enum { + ( + $( #[$attr:meta] )* + pub enum $name:ident { + $( + $( #[$variant_attr:meta] )* + $variant:ident = $string:literal, + )* + } + parse_error_type = $parse_error_type:literal; + ) => { + $( #[$attr] )* + #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)] + #[derive(schemars::JsonSchema)] + pub enum $name { + $( + $( #[$variant_attr] )* + #[serde(rename = $string)] // for JSON schema generation only + $variant, + )* + } + + impl FromStr for $name { + type Err = String; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + Ok(match s { + $( $string => Self::$variant, )* + _ => { + let all = [$( concat!("'", $string, "'") ),*].join(", "); + return Err(format!("invalid {}: '{s}'. allowed values: {all}", $parse_error_type)); + } + }) + } + } + + impl $name { + pub fn desc(&self) -> &'static str { + match self { + $( Self::$variant => $string, )* + } + } + } + + impl crate::json::ToJson for $name { + fn to_json(&self) -> crate::json::Json { + self.desc().to_json() + } + } + + crate::json::serde_deserialize_from_str!($name); + + + impl std::fmt::Display for $name { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self.desc()) + } + } + }; +} +use target_spec_enum; diff --git a/compiler/rustc_target/src/spec/json.rs b/compiler/rustc_target/src/spec/json.rs index e9ae5734d5b..f236be92b3b 100644 --- a/compiler/rustc_target/src/spec/json.rs +++ b/compiler/rustc_target/src/spec/json.rs @@ -408,12 +408,12 @@ impl ToJson for Target { } } -#[derive(serde_derive::Deserialize)] +#[derive(serde_derive::Deserialize, schemars::JsonSchema)] struct LinkSelfContainedComponentsWrapper { components: Vec<LinkSelfContainedComponents>, } -#[derive(serde_derive::Deserialize)] +#[derive(serde_derive::Deserialize, schemars::JsonSchema)] #[serde(untagged)] enum TargetFamiliesJson { Array(StaticCow<[StaticCow<str>]>), @@ -429,6 +429,18 @@ impl FromStr for EndianWrapper { } } crate::json::serde_deserialize_from_str!(EndianWrapper); +impl schemars::JsonSchema for EndianWrapper { + fn schema_name() -> std::borrow::Cow<'static, str> { + "Endian".into() + } + fn json_schema(_: &mut schemars::SchemaGenerator) -> schemars::Schema { + schemars::json_schema! ({ + "type": "string", + "enum": ["big", "little"] + }) + .into() + } +} /// `ExternAbi` is in `rustc_abi`, which doesn't have access to the macro and serde. struct ExternAbiWrapper(rustc_abi::ExternAbi); @@ -441,8 +453,22 @@ impl FromStr for ExternAbiWrapper { } } crate::json::serde_deserialize_from_str!(ExternAbiWrapper); +impl schemars::JsonSchema for ExternAbiWrapper { + fn schema_name() -> std::borrow::Cow<'static, str> { + "ExternAbi".into() + } + fn json_schema(_: &mut schemars::SchemaGenerator) -> schemars::Schema { + let all = + rustc_abi::ExternAbi::ALL_VARIANTS.iter().map(|abi| abi.as_str()).collect::<Vec<_>>(); + schemars::json_schema! ({ + "type": "string", + "enum": all, + }) + .into() + } +} -#[derive(serde_derive::Deserialize)] +#[derive(serde_derive::Deserialize, schemars::JsonSchema)] struct TargetSpecJsonMetadata { description: Option<StaticCow<str>>, tier: Option<u64>, @@ -450,7 +476,7 @@ struct TargetSpecJsonMetadata { std: Option<bool>, } -#[derive(serde_derive::Deserialize)] +#[derive(serde_derive::Deserialize, schemars::JsonSchema)] #[serde(rename_all = "kebab-case")] // Ensure that all unexpected fields get turned into errors. // This helps users stay up to date when the schema changes instead of silently @@ -593,3 +619,7 @@ struct TargetSpecJson { supports_xray: Option<bool>, entry_abi: Option<ExternAbiWrapper>, } + +pub fn json_schema() -> schemars::Schema { + schemars::schema_for!(TargetSpecJson) +} diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index 4d9f06c568b..07fb1ce63f7 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -70,6 +70,7 @@ mod json; pub use abi_map::{AbiMap, AbiMapping}; pub use base::apple; pub use base::avr::ef_avr_arch; +pub use json::json_schema; /// Linker is called through a C/C++ compiler. #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] @@ -183,50 +184,15 @@ impl LinkerFlavorCli { } } -#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] -pub enum LldFlavor { - Wasm, - Ld64, - Ld, - Link, -} - -impl LldFlavor { - pub fn as_str(&self) -> &'static str { - match self { - LldFlavor::Wasm => "wasm", - LldFlavor::Ld64 => "darwin", - LldFlavor::Ld => "gnu", - LldFlavor::Link => "link", - } - } -} - -impl FromStr for LldFlavor { - type Err = String; - - fn from_str(s: &str) -> Result<Self, Self::Err> { - Ok(match s { - "darwin" => LldFlavor::Ld64, - "gnu" => LldFlavor::Ld, - "link" => LldFlavor::Link, - "wasm" => LldFlavor::Wasm, - _ => { - return Err( - "invalid value for lld flavor: '{s}', expected one of 'darwin', 'gnu', 'link', 'wasm'" - .into(), - ); - } - }) +crate::target_spec_enum! { + pub enum LldFlavor { + Wasm = "wasm", + Ld64 = "darwin", + Ld = "gnu", + Link = "link", } -} -crate::json::serde_deserialize_from_str!(LldFlavor); - -impl ToJson for LldFlavor { - fn to_json(&self) -> Json { - self.as_str().to_json() - } + parse_error_type = "LLD flavor"; } impl LinkerFlavor { @@ -558,6 +524,20 @@ linker_flavor_cli_impls! { } crate::json::serde_deserialize_from_str!(LinkerFlavorCli); +impl schemars::JsonSchema for LinkerFlavorCli { + fn schema_name() -> std::borrow::Cow<'static, str> { + "LinkerFlavor".into() + } + fn json_schema(_: &mut schemars::SchemaGenerator) -> schemars::Schema { + let all: Vec<&'static str> = + Self::all().iter().map(|flavor| flavor.desc()).collect::<Vec<_>>(); + schemars::json_schema! ({ + "type": "string", + "enum": all + }) + .into() + } +} impl ToJson for LinkerFlavorCli { fn to_json(&self) -> Json { @@ -611,6 +591,18 @@ impl FromStr for LinkSelfContainedDefault { } crate::json::serde_deserialize_from_str!(LinkSelfContainedDefault); +impl schemars::JsonSchema for LinkSelfContainedDefault { + fn schema_name() -> std::borrow::Cow<'static, str> { + "LinkSelfContainedDefault".into() + } + fn json_schema(_: &mut schemars::SchemaGenerator) -> schemars::Schema { + schemars::json_schema! ({ + "type": "string", + "enum": ["false", "true", "wasm", "musl", "mingw"] + }) + .into() + } +} impl ToJson for LinkSelfContainedDefault { fn to_json(&self) -> Json { @@ -743,6 +735,20 @@ impl FromStr for LinkSelfContainedComponents { } crate::json::serde_deserialize_from_str!(LinkSelfContainedComponents); +impl schemars::JsonSchema for LinkSelfContainedComponents { + fn schema_name() -> std::borrow::Cow<'static, str> { + "LinkSelfContainedComponents".into() + } + fn json_schema(_: &mut schemars::SchemaGenerator) -> schemars::Schema { + let all = + Self::all_components().iter().map(|component| component.as_str()).collect::<Vec<_>>(); + schemars::json_schema! ({ + "type": "string", + "enum": all, + }) + .into() + } +} impl ToJson for LinkSelfContainedComponents { fn to_json(&self) -> Json { @@ -823,10 +829,14 @@ impl LinkerFeatures { } } -#[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable, HashStable_Generic)] -pub enum PanicStrategy { - Unwind, - Abort, +crate::target_spec_enum! { + #[derive(Encodable, Decodable, HashStable_Generic)] + pub enum PanicStrategy { + Unwind = "unwind", + Abort = "abort", + } + + parse_error_type = "panic strategy"; } #[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable, HashStable_Generic)] @@ -838,13 +848,6 @@ pub enum OnBrokenPipe { } impl PanicStrategy { - pub fn desc(&self) -> &str { - match *self { - PanicStrategy::Unwind => "unwind", - PanicStrategy::Abort => "abort", - } - } - pub const fn desc_symbol(&self) -> Symbol { match *self { PanicStrategy::Unwind => sym::unwind, @@ -857,24 +860,16 @@ impl PanicStrategy { } } -impl FromStr for PanicStrategy { - type Err = String; - fn from_str(s: &str) -> Result<Self, Self::Err> { - Ok(match s { - "unwind" => PanicStrategy::Unwind, - "abort" => PanicStrategy::Abort, - _ => { - return Err(format!( - "'{}' is not a valid value for \ - panic-strategy. Use 'unwind' or 'abort'.", - s - )); - } - }) +crate::target_spec_enum! { + pub enum RelroLevel { + Full = "full", + Partial = "partial", + Off = "off", + None = "none", } -} -crate::json::serde_deserialize_from_str!(PanicStrategy); + parse_error_type = "relro level"; +} impl IntoDiagArg for PanicStrategy { fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { @@ -882,109 +877,14 @@ impl IntoDiagArg for PanicStrategy { } } -impl ToJson for PanicStrategy { - fn to_json(&self) -> Json { - match *self { - PanicStrategy::Abort => "abort".to_json(), - PanicStrategy::Unwind => "unwind".to_json(), - } - } -} - -#[derive(Clone, Copy, Debug, PartialEq, Hash)] -pub enum RelroLevel { - Full, - Partial, - Off, - None, -} - -impl RelroLevel { - pub fn desc(&self) -> &str { - match *self { - RelroLevel::Full => "full", - RelroLevel::Partial => "partial", - RelroLevel::Off => "off", - RelroLevel::None => "none", - } - } -} - -#[derive(Clone, Copy, Debug, PartialEq, Hash)] -pub enum SymbolVisibility { - Hidden, - Protected, - Interposable, -} - -impl SymbolVisibility { - pub fn desc(&self) -> &str { - match *self { - SymbolVisibility::Hidden => "hidden", - SymbolVisibility::Protected => "protected", - SymbolVisibility::Interposable => "interposable", - } - } -} - -impl FromStr for SymbolVisibility { - type Err = String; - - fn from_str(s: &str) -> Result<SymbolVisibility, Self::Err> { - match s { - "hidden" => Ok(SymbolVisibility::Hidden), - "protected" => Ok(SymbolVisibility::Protected), - "interposable" => Ok(SymbolVisibility::Interposable), - _ => Err(format!( - "'{}' is not a valid value for \ - symbol-visibility. Use 'hidden', 'protected, or 'interposable'.", - s - )), - } - } -} - -crate::json::serde_deserialize_from_str!(SymbolVisibility); - -impl ToJson for SymbolVisibility { - fn to_json(&self) -> Json { - match *self { - SymbolVisibility::Hidden => "hidden".to_json(), - SymbolVisibility::Protected => "protected".to_json(), - SymbolVisibility::Interposable => "interposable".to_json(), - } - } -} - -impl FromStr for RelroLevel { - type Err = String; - - fn from_str(s: &str) -> Result<RelroLevel, Self::Err> { - match s { - "full" => Ok(RelroLevel::Full), - "partial" => Ok(RelroLevel::Partial), - "off" => Ok(RelroLevel::Off), - "none" => Ok(RelroLevel::None), - _ => Err(format!( - "'{}' is not a valid value for \ - relro-level. Use 'full', 'partial, 'off', or 'none'.", - s - )), - } +crate::target_spec_enum! { + pub enum SymbolVisibility { + Hidden = "hidden", + Protected = "protected", + Interposable = "interposable", } -} - -crate::json::serde_deserialize_from_str!(RelroLevel); -impl ToJson for RelroLevel { - fn to_json(&self) -> Json { - match *self { - RelroLevel::Full => "full".to_json(), - RelroLevel::Partial => "partial".to_json(), - RelroLevel::Off => "off".to_json(), - RelroLevel::None => "None".to_json(), - } - } + parse_error_type = "symbol visibility"; } #[derive(Clone, Debug, PartialEq, Hash)] @@ -1014,6 +914,18 @@ impl FromStr for SmallDataThresholdSupport { } crate::json::serde_deserialize_from_str!(SmallDataThresholdSupport); +impl schemars::JsonSchema for SmallDataThresholdSupport { + fn schema_name() -> std::borrow::Cow<'static, str> { + "SmallDataThresholdSupport".into() + } + fn json_schema(_: &mut schemars::SchemaGenerator) -> schemars::Schema { + schemars::json_schema! ({ + "type": "string", + "pattern": r#"^none|default-for-arch|llvm-module-flag=.+|llvm-arg=.+$"#, + }) + .into() + } +} impl ToJson for SmallDataThresholdSupport { fn to_json(&self) -> Value { @@ -1026,76 +938,31 @@ impl ToJson for SmallDataThresholdSupport { } } -#[derive(Clone, Copy, Debug, PartialEq, Hash)] -pub enum MergeFunctions { - Disabled, - Trampolines, - Aliases, -} - -impl MergeFunctions { - pub fn desc(&self) -> &str { - match *self { - MergeFunctions::Disabled => "disabled", - MergeFunctions::Trampolines => "trampolines", - MergeFunctions::Aliases => "aliases", - } +crate::target_spec_enum! { + pub enum MergeFunctions { + Disabled = "disabled", + Trampolines = "trampolines", + Aliases = "aliases", } -} - -impl FromStr for MergeFunctions { - type Err = String; - fn from_str(s: &str) -> Result<MergeFunctions, Self::Err> { - match s { - "disabled" => Ok(MergeFunctions::Disabled), - "trampolines" => Ok(MergeFunctions::Trampolines), - "aliases" => Ok(MergeFunctions::Aliases), - _ => Err(format!( - "'{}' is not a valid value for \ - merge-functions. Use 'disabled', \ - 'trampolines', or 'aliases'.", - s - )), - } - } + parse_error_type = "value for merge-functions"; } -crate::json::serde_deserialize_from_str!(MergeFunctions); - -impl ToJson for MergeFunctions { - fn to_json(&self) -> Json { - match *self { - MergeFunctions::Disabled => "disabled".to_json(), - MergeFunctions::Trampolines => "trampolines".to_json(), - MergeFunctions::Aliases => "aliases".to_json(), - } +crate::target_spec_enum! { + pub enum RelocModel { + Static = "static", + Pic = "pic", + Pie = "pie", + DynamicNoPic = "dynamic-no-pic", + Ropi = "ropi", + Rwpi = "rwpi", + RopiRwpi = "ropi-rwpi", } -} -#[derive(Clone, Copy, PartialEq, Hash, Debug)] -pub enum RelocModel { - Static, - Pic, - Pie, - DynamicNoPic, - Ropi, - Rwpi, - RopiRwpi, + parse_error_type = "relocation model"; } impl RelocModel { - pub fn desc(&self) -> &str { - match *self { - RelocModel::Static => "static", - RelocModel::Pic => "pic", - RelocModel::Pie => "pie", - RelocModel::DynamicNoPic => "dynamic-no-pic", - RelocModel::Ropi => "ropi", - RelocModel::Rwpi => "rwpi", - RelocModel::RopiRwpi => "ropi-rwpi", - } - } pub const fn desc_symbol(&self) -> Symbol { match *self { RelocModel::Static => kw::Static, @@ -1121,236 +988,75 @@ impl RelocModel { } } -impl FromStr for RelocModel { - type Err = String; - - fn from_str(s: &str) -> Result<RelocModel, Self::Err> { - Ok(match s { - "static" => RelocModel::Static, - "pic" => RelocModel::Pic, - "pie" => RelocModel::Pie, - "dynamic-no-pic" => RelocModel::DynamicNoPic, - "ropi" => RelocModel::Ropi, - "rwpi" => RelocModel::Rwpi, - "ropi-rwpi" => RelocModel::RopiRwpi, - _ => { - return Err(format!( - "invalid relocation model '{s}'. - Run `rustc --print relocation-models` to \ - see the list of supported values.'" - )); - } - }) +crate::target_spec_enum! { + pub enum CodeModel { + Tiny = "tiny", + Small = "small", + Kernel = "kernel", + Medium = "medium", + Large = "large", } -} -crate::json::serde_deserialize_from_str!(RelocModel); - -impl ToJson for RelocModel { - fn to_json(&self) -> Json { - self.desc().to_json() - } -} - -#[derive(Clone, Copy, PartialEq, Hash, Debug)] -pub enum CodeModel { - Tiny, - Small, - Kernel, - Medium, - Large, + parse_error_type = "code model"; } -impl FromStr for CodeModel { - type Err = String; - - fn from_str(s: &str) -> Result<CodeModel, Self::Err> { - Ok(match s { - "tiny" => CodeModel::Tiny, - "small" => CodeModel::Small, - "kernel" => CodeModel::Kernel, - "medium" => CodeModel::Medium, - "large" => CodeModel::Large, - _ => { - return Err(format!( - "'{s}' is not a valid code model. \ - Run `rustc --print code-models` to \ - see the list of supported values." - )); - } - }) +crate::target_spec_enum! { + /// The float ABI setting to be configured in the LLVM target machine. + pub enum FloatAbi { + Soft = "soft", + Hard = "hard", } -} -crate::json::serde_deserialize_from_str!(CodeModel); - -impl ToJson for CodeModel { - fn to_json(&self) -> Json { - match *self { - CodeModel::Tiny => "tiny", - CodeModel::Small => "small", - CodeModel::Kernel => "kernel", - CodeModel::Medium => "medium", - CodeModel::Large => "large", - } - .to_json() - } + parse_error_type = "float abi"; } -/// The float ABI setting to be configured in the LLVM target machine. -#[derive(Clone, Copy, PartialEq, Hash, Debug)] -pub enum FloatAbi { - Soft, - Hard, -} - -impl FromStr for FloatAbi { - type Err = String; - - fn from_str(s: &str) -> Result<FloatAbi, Self::Err> { - Ok(match s { - "soft" => FloatAbi::Soft, - "hard" => FloatAbi::Hard, - _ => { - return Err(format!( - "'{}' is not a valid value for \ - llvm-floatabi. Use 'soft' or 'hard'.", - s - )); - } - }) +crate::target_spec_enum! { + /// The Rustc-specific variant of the ABI used for this target. + pub enum RustcAbi { + /// On x86-32 only: make use of SSE and SSE2 for ABI purposes. + X86Sse2 = "x86-sse2", + /// On x86-32/64 only: do not use any FPU or SIMD registers for the ABI. + X86Softfloat = "x86-softfloat", } -} - -crate::json::serde_deserialize_from_str!(FloatAbi); -impl ToJson for FloatAbi { - fn to_json(&self) -> Json { - match *self { - FloatAbi::Soft => "soft", - FloatAbi::Hard => "hard", - } - .to_json() - } -} - -/// The Rustc-specific variant of the ABI used for this target. -#[derive(Clone, Copy, PartialEq, Hash, Debug)] -pub enum RustcAbi { - /// On x86-32 only: make use of SSE and SSE2 for ABI purposes. - X86Sse2, - /// On x86-32/64 only: do not use any FPU or SIMD registers for the ABI. - X86Softfloat, + parse_error_type = "rustc abi"; } -impl FromStr for RustcAbi { - type Err = String; - - fn from_str(s: &str) -> Result<RustcAbi, Self::Err> { - Ok(match s { - "x86-sse2" => RustcAbi::X86Sse2, - "x86-softfloat" => RustcAbi::X86Softfloat, - _ => { - return Err(format!( - "'{s}' is not a valid value for rustc-abi. \ - Use 'x86-softfloat' or leave the field unset." - )); - } - }) - } -} - -crate::json::serde_deserialize_from_str!(RustcAbi); - -impl ToJson for RustcAbi { - fn to_json(&self) -> Json { - match *self { - RustcAbi::X86Sse2 => "x86-sse2", - RustcAbi::X86Softfloat => "x86-softfloat", - } - .to_json() +crate::target_spec_enum! { + pub enum TlsModel { + GeneralDynamic = "global-dynamic", + LocalDynamic = "local-dynamic", + InitialExec = "initial-exec", + LocalExec = "local-exec", + Emulated = "emulated", } -} -#[derive(Clone, Copy, PartialEq, Hash, Debug)] -pub enum TlsModel { - GeneralDynamic, - LocalDynamic, - InitialExec, - LocalExec, - Emulated, -} - -impl FromStr for TlsModel { - type Err = String; - - fn from_str(s: &str) -> Result<TlsModel, Self::Err> { - Ok(match s { - // Note the difference "general" vs "global" difference. The model name is "general", - // but the user-facing option name is "global" for consistency with other compilers. - "global-dynamic" => TlsModel::GeneralDynamic, - "local-dynamic" => TlsModel::LocalDynamic, - "initial-exec" => TlsModel::InitialExec, - "local-exec" => TlsModel::LocalExec, - "emulated" => TlsModel::Emulated, - _ => { - return Err(format!( - "'{s}' is not a valid TLS model. \ - Run `rustc --print tls-models` to \ - see the list of supported values." - )); - } - }) - } + parse_error_type = "TLS model"; } -crate::json::serde_deserialize_from_str!(TlsModel); - -impl ToJson for TlsModel { - fn to_json(&self) -> Json { - match *self { - TlsModel::GeneralDynamic => "global-dynamic", - TlsModel::LocalDynamic => "local-dynamic", - TlsModel::InitialExec => "initial-exec", - TlsModel::LocalExec => "local-exec", - TlsModel::Emulated => "emulated", - } - .to_json() +crate::target_spec_enum! { + /// Everything is flattened to a single enum to make the json encoding/decoding less annoying. + pub enum LinkOutputKind { + /// Dynamically linked non position-independent executable. + DynamicNoPicExe = "dynamic-nopic-exe", + /// Dynamically linked position-independent executable. + DynamicPicExe = "dynamic-pic-exe", + /// Statically linked non position-independent executable. + StaticNoPicExe = "static-nopic-exe", + /// Statically linked position-independent executable. + StaticPicExe = "static-pic-exe", + /// Regular dynamic library ("dynamically linked"). + DynamicDylib = "dynamic-dylib", + /// Dynamic library with bundled libc ("statically linked"). + StaticDylib = "static-dylib", + /// WASI module with a lifetime past the _initialize entry point + WasiReactorExe = "wasi-reactor-exe", } -} -/// Everything is flattened to a single enum to make the json encoding/decoding less annoying. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] -pub enum LinkOutputKind { - /// Dynamically linked non position-independent executable. - DynamicNoPicExe, - /// Dynamically linked position-independent executable. - DynamicPicExe, - /// Statically linked non position-independent executable. - StaticNoPicExe, - /// Statically linked position-independent executable. - StaticPicExe, - /// Regular dynamic library ("dynamically linked"). - DynamicDylib, - /// Dynamic library with bundled libc ("statically linked"). - StaticDylib, - /// WASI module with a lifetime past the _initialize entry point - WasiReactorExe, + parse_error_type = "CRT object kind"; } impl LinkOutputKind { - fn as_str(&self) -> &'static str { - match self { - LinkOutputKind::DynamicNoPicExe => "dynamic-nopic-exe", - LinkOutputKind::DynamicPicExe => "dynamic-pic-exe", - LinkOutputKind::StaticNoPicExe => "static-nopic-exe", - LinkOutputKind::StaticPicExe => "static-pic-exe", - LinkOutputKind::DynamicDylib => "dynamic-dylib", - LinkOutputKind::StaticDylib => "static-dylib", - LinkOutputKind::WasiReactorExe => "wasi-reactor-exe", - } - } - pub fn can_link_dylib(self) -> bool { match self { LinkOutputKind::StaticNoPicExe | LinkOutputKind::StaticPicExe => false, @@ -1363,171 +1069,64 @@ impl LinkOutputKind { } } -impl FromStr for LinkOutputKind { - type Err = String; - - fn from_str(s: &str) -> Result<LinkOutputKind, Self::Err> { - Ok(match s { - "dynamic-nopic-exe" => LinkOutputKind::DynamicNoPicExe, - "dynamic-pic-exe" => LinkOutputKind::DynamicPicExe, - "static-nopic-exe" => LinkOutputKind::StaticNoPicExe, - "static-pic-exe" => LinkOutputKind::StaticPicExe, - "dynamic-dylib" => LinkOutputKind::DynamicDylib, - "static-dylib" => LinkOutputKind::StaticDylib, - "wasi-reactor-exe" => LinkOutputKind::WasiReactorExe, - _ => { - return Err(format!( - "invalid value for CRT object kind. \ - Use '(dynamic,static)-(nopic,pic)-exe' or \ - '(dynamic,static)-dylib' or 'wasi-reactor-exe'" - )); - } - }) - } -} - -crate::json::serde_deserialize_from_str!(LinkOutputKind); - -impl fmt::Display for LinkOutputKind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.as_str()) - } -} - pub type LinkArgs = BTreeMap<LinkerFlavor, Vec<StaticCow<str>>>; pub type LinkArgsCli = BTreeMap<LinkerFlavorCli, Vec<StaticCow<str>>>; -/// Which kind of debuginfo does the target use? -/// -/// Useful in determining whether a target supports Split DWARF (a target with -/// `DebuginfoKind::Dwarf` and supporting `SplitDebuginfo::Unpacked` for example). -#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] -pub enum DebuginfoKind { - /// DWARF debuginfo (such as that used on `x86_64_unknown_linux_gnu`). - #[default] - Dwarf, - /// DWARF debuginfo in dSYM files (such as on Apple platforms). - DwarfDsym, - /// Program database files (such as on Windows). - Pdb, -} - -impl DebuginfoKind { - fn as_str(&self) -> &'static str { - match self { - DebuginfoKind::Dwarf => "dwarf", - DebuginfoKind::DwarfDsym => "dwarf-dsym", - DebuginfoKind::Pdb => "pdb", - } - } -} - -impl FromStr for DebuginfoKind { - type Err = String; - - fn from_str(s: &str) -> Result<Self, Self::Err> { - Ok(match s { - "dwarf" => DebuginfoKind::Dwarf, - "dwarf-dsym" => DebuginfoKind::DwarfDsym, - "pdb" => DebuginfoKind::Pdb, - _ => { - return Err(format!( - "'{s}' is not a valid value for debuginfo-kind. Use 'dwarf', \ - 'dwarf-dsym' or 'pdb'." - )); - } - }) - } -} - -crate::json::serde_deserialize_from_str!(DebuginfoKind); - -impl ToJson for DebuginfoKind { - fn to_json(&self) -> Json { - self.as_str().to_json() - } -} - -impl fmt::Display for DebuginfoKind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.as_str()) - } -} - -#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] -pub enum SplitDebuginfo { - /// Split debug-information is disabled, meaning that on supported platforms - /// you can find all debug information in the executable itself. This is - /// only supported for ELF effectively. - /// - /// * Windows - not supported - /// * macOS - don't run `dsymutil` - /// * ELF - `.debug_*` sections - #[default] - Off, - - /// Split debug-information can be found in a "packed" location separate - /// from the final artifact. This is supported on all platforms. +crate::target_spec_enum! { + /// Which kind of debuginfo does the target use? /// - /// * Windows - `*.pdb` - /// * macOS - `*.dSYM` (run `dsymutil`) - /// * ELF - `*.dwp` (run `thorin`) - Packed, - - /// Split debug-information can be found in individual object files on the - /// filesystem. The main executable may point to the object files. - /// - /// * Windows - not supported - /// * macOS - supported, scattered object files - /// * ELF - supported, scattered `*.dwo` or `*.o` files (see `SplitDwarfKind`) - Unpacked, -} - -impl SplitDebuginfo { - fn as_str(&self) -> &'static str { - match self { - SplitDebuginfo::Off => "off", - SplitDebuginfo::Packed => "packed", - SplitDebuginfo::Unpacked => "unpacked", - } - } -} - -impl FromStr for SplitDebuginfo { - type Err = String; - - fn from_str(s: &str) -> Result<Self, Self::Err> { - Ok(match s { - "off" => SplitDebuginfo::Off, - "unpacked" => SplitDebuginfo::Unpacked, - "packed" => SplitDebuginfo::Packed, - _ => { - return Err(format!( - "'{s}' is not a valid value for \ - split-debuginfo. Use 'off', 'unpacked', or 'packed'.", - )); - } - }) - } -} - -crate::json::serde_deserialize_from_str!(SplitDebuginfo); - -impl ToJson for SplitDebuginfo { - fn to_json(&self) -> Json { - self.as_str().to_json() - } -} - -impl fmt::Display for SplitDebuginfo { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.as_str()) - } + /// Useful in determining whether a target supports Split DWARF (a target with + /// `DebuginfoKind::Dwarf` and supporting `SplitDebuginfo::Unpacked` for example). + #[derive(Default)] + pub enum DebuginfoKind { + /// DWARF debuginfo (such as that used on `x86_64_unknown_linux_gnu`). + #[default] + Dwarf = "dwarf", + /// DWARF debuginfo in dSYM files (such as on Apple platforms). + DwarfDsym = "dwarf-dsym", + /// Program database files (such as on Windows). + Pdb = "pdb", + } + + parse_error_type = "debuginfo kind"; +} + +crate::target_spec_enum! { + #[derive(Default)] + pub enum SplitDebuginfo { + /// Split debug-information is disabled, meaning that on supported platforms + /// you can find all debug information in the executable itself. This is + /// only supported for ELF effectively. + /// + /// * Windows - not supported + /// * macOS - don't run `dsymutil` + /// * ELF - `.debug_*` sections + #[default] + Off = "off", + + /// Split debug-information can be found in a "packed" location separate + /// from the final artifact. This is supported on all platforms. + /// + /// * Windows - `*.pdb` + /// * macOS - `*.dSYM` (run `dsymutil`) + /// * ELF - `*.dwp` (run `thorin`) + Packed = "packed", + + /// Split debug-information can be found in individual object files on the + /// filesystem. The main executable may point to the object files. + /// + /// * Windows - not supported + /// * macOS - supported, scattered object files + /// * ELF - supported, scattered `*.dwo` or `*.o` files (see `SplitDwarfKind`) + Unpacked = "unpacked", + } + + parse_error_type = "split debuginfo"; } into_diag_arg_using_display!(SplitDebuginfo); -#[derive(Clone, Debug, PartialEq, Eq, serde_derive::Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, serde_derive::Deserialize, schemars::JsonSchema)] #[serde(tag = "kind")] #[serde(rename_all = "kebab-case")] pub enum StackProbeType { @@ -1688,6 +1287,19 @@ impl FromStr for SanitizerSet { } crate::json::serde_deserialize_from_str!(SanitizerSet); +impl schemars::JsonSchema for SanitizerSet { + fn schema_name() -> std::borrow::Cow<'static, str> { + "SanitizerSet".into() + } + fn json_schema(_: &mut schemars::SchemaGenerator) -> schemars::Schema { + let all = Self::all().iter().map(|sanitizer| sanitizer.as_str()).collect::<Vec<_>>(); + schemars::json_schema! ({ + "type": "string", + "enum": all, + }) + .into() + } +} impl ToJson for SanitizerSet { fn to_json(&self) -> Json { @@ -1699,17 +1311,20 @@ impl ToJson for SanitizerSet { } } -#[derive(Clone, Copy, PartialEq, Hash, Debug)] -pub enum FramePointer { - /// Forces the machine code generator to always preserve the frame pointers. - Always, - /// Forces the machine code generator to preserve the frame pointers except for the leaf - /// functions (i.e. those that don't call other functions). - NonLeaf, - /// Allows the machine code generator to omit the frame pointers. - /// - /// This option does not guarantee that the frame pointers will be omitted. - MayOmit, +crate::target_spec_enum! { + pub enum FramePointer { + /// Forces the machine code generator to always preserve the frame pointers. + Always = "always", + /// Forces the machine code generator to preserve the frame pointers except for the leaf + /// functions (i.e. those that don't call other functions). + NonLeaf = "non-leaf", + /// Allows the machine code generator to omit the frame pointers. + /// + /// This option does not guarantee that the frame pointers will be omitted. + MayOmit = "may-omit", + } + + parse_error_type = "frame pointer"; } impl FramePointer { @@ -1726,93 +1341,43 @@ impl FramePointer { } } -impl FromStr for FramePointer { - type Err = String; - fn from_str(s: &str) -> Result<Self, Self::Err> { - Ok(match s { - "always" => Self::Always, - "non-leaf" => Self::NonLeaf, - "may-omit" => Self::MayOmit, - _ => return Err(format!("'{s}' is not a valid value for frame-pointer")), - }) - } -} - -crate::json::serde_deserialize_from_str!(FramePointer); - -impl ToJson for FramePointer { - fn to_json(&self) -> Json { - match *self { - Self::Always => "always", - Self::NonLeaf => "non-leaf", - Self::MayOmit => "may-omit", - } - .to_json() - } -} - -/// Controls use of stack canaries. -#[derive(Clone, Copy, Debug, PartialEq, Hash, Eq)] -pub enum StackProtector { - /// Disable stack canary generation. - None, - - /// On LLVM, mark all generated LLVM functions with the `ssp` attribute (see - /// llvm/docs/LangRef.rst). This triggers stack canary generation in - /// functions which contain an array of a byte-sized type with more than - /// eight elements. - Basic, +crate::target_spec_enum! { + /// Controls use of stack canaries. + pub enum StackProtector { + /// Disable stack canary generation. + None = "none", - /// On LLVM, mark all generated LLVM functions with the `sspstrong` - /// attribute (see llvm/docs/LangRef.rst). This triggers stack canary - /// generation in functions which either contain an array, or which take - /// the address of a local variable. - Strong, + /// On LLVM, mark all generated LLVM functions with the `ssp` attribute (see + /// llvm/docs/LangRef.rst). This triggers stack canary generation in + /// functions which contain an array of a byte-sized type with more than + /// eight elements. + Basic = "basic", - /// Generate stack canaries in all functions. - All, -} + /// On LLVM, mark all generated LLVM functions with the `sspstrong` + /// attribute (see llvm/docs/LangRef.rst). This triggers stack canary + /// generation in functions which either contain an array, or which take + /// the address of a local variable. + Strong = "strong", -impl StackProtector { - fn as_str(&self) -> &'static str { - match self { - StackProtector::None => "none", - StackProtector::Basic => "basic", - StackProtector::Strong => "strong", - StackProtector::All => "all", - } + /// Generate stack canaries in all functions. + All = "all", } -} -impl FromStr for StackProtector { - type Err = (); - - fn from_str(s: &str) -> Result<StackProtector, ()> { - Ok(match s { - "none" => StackProtector::None, - "basic" => StackProtector::Basic, - "strong" => StackProtector::Strong, - "all" => StackProtector::All, - _ => return Err(()), - }) - } -} - -impl fmt::Display for StackProtector { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.as_str()) - } + parse_error_type = "stack protector"; } into_diag_arg_using_display!(StackProtector); -#[derive(PartialEq, Clone, Debug)] -pub enum BinaryFormat { - Coff, - Elf, - MachO, - Wasm, - Xcoff, +crate::target_spec_enum! { + pub enum BinaryFormat { + Coff = "coff", + Elf = "elf", + MachO = "mach-o", + Wasm = "wasm", + Xcoff = "xcoff", + } + + parse_error_type = "binary format"; } impl BinaryFormat { @@ -1828,38 +1393,6 @@ impl BinaryFormat { } } -impl FromStr for BinaryFormat { - type Err = String; - fn from_str(s: &str) -> Result<Self, Self::Err> { - match s { - "coff" => Ok(Self::Coff), - "elf" => Ok(Self::Elf), - "mach-o" => Ok(Self::MachO), - "wasm" => Ok(Self::Wasm), - "xcoff" => Ok(Self::Xcoff), - _ => Err(format!( - "'{s}' is not a valid value for binary_format. \ - Use 'coff', 'elf', 'mach-o', 'wasm' or 'xcoff' " - )), - } - } -} - -crate::json::serde_deserialize_from_str!(BinaryFormat); - -impl ToJson for BinaryFormat { - fn to_json(&self) -> Json { - match self { - Self::Coff => "coff", - Self::Elf => "elf", - Self::MachO => "mach-o", - Self::Wasm => "wasm", - Self::Xcoff => "xcoff", - } - .to_json() - } -} - impl ToJson for Align { fn to_json(&self) -> Json { self.bits().to_json() diff --git a/compiler/rustc_target/src/spec/targets/aarch64_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/targets/aarch64_pc_windows_msvc.rs index f1b6fa123de..cd55576ef81 100644 --- a/compiler/rustc_target/src/spec/targets/aarch64_pc_windows_msvc.rs +++ b/compiler/rustc_target/src/spec/targets/aarch64_pc_windows_msvc.rs @@ -15,7 +15,7 @@ pub(crate) fn target() -> Target { llvm_target: "aarch64-pc-windows-msvc".into(), metadata: TargetMetadata { description: Some("ARM64 Windows MSVC".into()), - tier: Some(2), + tier: Some(1), host_tools: Some(true), std: Some(true), }, diff --git a/compiler/rustc_target/src/target_features.rs b/compiler/rustc_target/src/target_features.rs index 4c1b8c99426..dc70089c385 100644 --- a/compiler/rustc_target/src/target_features.rs +++ b/compiler/rustc_target/src/target_features.rs @@ -849,6 +849,7 @@ const IBMZ_FEATURES: &[(&str, Stability, ImpliedFeatures)] = &[ ("miscellaneous-extensions-3", Unstable(sym::s390x_target_feature), &[]), ("miscellaneous-extensions-4", Unstable(sym::s390x_target_feature), &[]), ("nnp-assist", Unstable(sym::s390x_target_feature), &["vector"]), + ("soft-float", Forbidden { reason: "currently unsupported ABI-configuration feature" }, &[]), ("transactional-execution", Unstable(sym::s390x_target_feature), &[]), ("vector", Unstable(sym::s390x_target_feature), &[]), ("vector-enhancements-1", Unstable(sym::s390x_target_feature), &["vector"]), @@ -1177,6 +1178,13 @@ impl Target { _ => unreachable!(), } } + "s390x" => { + // We don't currently support a softfloat target on this architecture. + // As usual, we have to reject swapping the `soft-float` target feature. + // The "vector" target feature does not affect the ABI for floats + // because the vector and float registers overlap. + FeatureConstraints { required: &[], incompatible: &["soft-float"] } + } _ => NOTHING, } } diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs index 9447612d026..e18e294635b 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs @@ -91,7 +91,6 @@ mod suggest; pub mod need_type_info; pub mod nice_region_error; pub mod region; -pub mod sub_relations; /// Makes a valid string literal from a string by escaping special characters (" and \), /// unless they are already escaped. @@ -318,7 +317,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { let (expected, found) = if expected_str == found_str { (join_path_syms(&expected_abs), join_path_syms(&found_abs)) } else { - (expected_str.clone(), found_str.clone()) + (expected_str, found_str) }; // We've displayed "expected `a::b`, found `a::b`". We add context to @@ -1617,20 +1616,10 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { && let Some((e, f)) = values.ty() && let TypeError::ArgumentSorts(..) | TypeError::Sorts(_) = terr { - let e = self.tcx.erase_regions(e); - let f = self.tcx.erase_regions(f); - let mut expected = with_forced_trimmed_paths!(e.sort_string(self.tcx)); - let mut found = with_forced_trimmed_paths!(f.sort_string(self.tcx)); - if let ObligationCauseCode::Pattern { span, .. } = cause.code() - && let Some(span) = span - && !span.from_expansion() - && cause.span.from_expansion() - { - // When the type error comes from a macro like `assert!()`, and we are pointing at - // code the user wrote the cause and effect are reversed as the expected value is - // what the macro expanded to. - (found, expected) = (expected, found); - } + let e = self.tcx.erase_and_anonymize_regions(e); + let f = self.tcx.erase_and_anonymize_regions(f); + let expected = with_forced_trimmed_paths!(e.sort_string(self.tcx)); + let found = with_forced_trimmed_paths!(f.sort_string(self.tcx)); if expected == found { label_or_note(span, terr.to_string(self.tcx)); } else { @@ -2153,9 +2142,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { ) -> Option<(DiagStyledString, DiagStyledString)> { match values { ValuePairs::Regions(exp_found) => self.expected_found_str(exp_found), - ValuePairs::Terms(exp_found) => { - self.expected_found_str_term(cause, exp_found, long_ty_path) - } + ValuePairs::Terms(exp_found) => self.expected_found_str_term(exp_found, long_ty_path), ValuePairs::Aliases(exp_found) => self.expected_found_str(exp_found), ValuePairs::ExistentialTraitRef(exp_found) => self.expected_found_str(exp_found), ValuePairs::ExistentialProjection(exp_found) => self.expected_found_str(exp_found), @@ -2194,7 +2181,6 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { fn expected_found_str_term( &self, - cause: &ObligationCause<'tcx>, exp_found: ty::error::ExpectedFound<ty::Term<'tcx>>, long_ty_path: &mut Option<PathBuf>, ) -> Option<(DiagStyledString, DiagStyledString)> { @@ -2202,27 +2188,8 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { if exp_found.references_error() { return None; } - let (mut expected, mut found) = (exp_found.expected, exp_found.found); - - if let ObligationCauseCode::Pattern { span, .. } = cause.code() - && let Some(span) = span - && !span.from_expansion() - && cause.span.from_expansion() - { - // When the type error comes from a macro like `assert!()`, and we are pointing at - // code the user wrote, the cause and effect are reversed as the expected value is - // what the macro expanded to. So if the user provided a `Type` when the macro is - // written in such a way that a `bool` was expected, we want to print: - // = note: expected `bool` - // found `Type`" - // but as far as the compiler is concerned, after expansion what was expected was `Type` - // = note: expected `Type` - // found `bool`" - // so we reverse them here to match user expectation. - (expected, found) = (found, expected); - } - Some(match (expected.kind(), found.kind()) { + Some(match (exp_found.expected.kind(), exp_found.found.kind()) { (ty::TermKind::Ty(expected), ty::TermKind::Ty(found)) => { let (mut exp, mut fnd) = self.cmp(expected, found); // Use the terminal width as the basis to determine when to compress the printed diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs index ec2287ed516..edab530590b 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs @@ -894,7 +894,8 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> { use ty::{Infer, TyVar}; match (inner_ty.kind(), target_ty.kind()) { (&Infer(TyVar(a_vid)), &Infer(TyVar(b_vid))) => { - self.tecx.sub_relations.borrow_mut().unified(self.tecx, a_vid, b_vid) + self.tecx.sub_unification_table_root_var(a_vid) + == self.tecx.sub_unification_table_root_var(b_vid) } _ => false, } diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs index 40285e5d0e9..f997842a607 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs @@ -629,7 +629,11 @@ impl<T> Trait<T> for X { let tcx = self.tcx; // Don't suggest constraining a projection to something containing itself - if self.tcx.erase_regions(values.found).contains(self.tcx.erase_regions(values.expected)) { + if self + .tcx + .erase_and_anonymize_regions(values.found) + .contains(self.tcx.erase_and_anonymize_regions(values.expected)) + { return; } @@ -853,11 +857,11 @@ fn foo(&self) -> Self::T { String::new() } && self.infcx.can_eq(param_env, assoc_ty, found) { let msg = match assoc_item.container { - ty::AssocItemContainer::Trait => { + ty::AssocContainer::Trait => { "associated type defaults can't be assumed inside the \ trait defining them" } - ty::AssocItemContainer::Impl => { + ty::AssocContainer::InherentImpl | ty::AssocContainer::TraitImpl(_) => { "associated type is `default` and may be overridden" } }; diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs index 7369134420c..518d4fe17e8 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs @@ -571,13 +571,10 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // but right now it's not really very smart when it comes to implicit `Sized` // predicates and bounds on the trait itself. - let Some(impl_def_id) = self.tcx.associated_item(impl_item_def_id).impl_container(self.tcx) - else { - return; - }; - let Some(trait_ref) = self.tcx.impl_trait_ref(impl_def_id) else { + let Some(impl_def_id) = self.tcx.trait_impl_of_assoc(impl_item_def_id.to_def_id()) else { return; }; + let trait_ref = self.tcx.impl_trait_ref(impl_def_id).unwrap(); let trait_args = trait_ref .instantiate_identity() // Replace the explicit self type with `Self` for better suggestion rendering diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/sub_relations.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/sub_relations.rs deleted file mode 100644 index ef26a8ff7b8..00000000000 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/sub_relations.rs +++ /dev/null @@ -1,81 +0,0 @@ -use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::undo_log::NoUndo; -use rustc_data_structures::unify as ut; -use rustc_middle::ty; - -use crate::infer::InferCtxt; - -#[derive(Debug, Copy, Clone, PartialEq)] -struct SubId(u32); -impl ut::UnifyKey for SubId { - type Value = (); - #[inline] - fn index(&self) -> u32 { - self.0 - } - #[inline] - fn from_index(i: u32) -> SubId { - SubId(i) - } - fn tag() -> &'static str { - "SubId" - } -} - -/// When reporting ambiguity errors, we sometimes want to -/// treat all inference vars which are subtypes of each -/// others as if they are equal. For this case we compute -/// the transitive closure of our subtype obligations here. -/// -/// E.g. when encountering ambiguity errors, we want to suggest -/// specifying some method argument or to add a type annotation -/// to a local variable. Because subtyping cannot change the -/// shape of a type, it's fine if the cause of the ambiguity error -/// is only related to the suggested variable via subtyping. -/// -/// Even for something like `let x = returns_arg(); x.method();` the -/// type of `x` is only a supertype of the argument of `returns_arg`. We -/// still want to suggest specifying the type of the argument. -#[derive(Default)] -pub struct SubRelations { - map: FxHashMap<ty::TyVid, SubId>, - table: ut::UnificationTableStorage<SubId>, -} - -impl SubRelations { - fn get_id<'tcx>(&mut self, infcx: &InferCtxt<'tcx>, vid: ty::TyVid) -> SubId { - let root_vid = infcx.root_var(vid); - *self.map.entry(root_vid).or_insert_with(|| self.table.with_log(&mut NoUndo).new_key(())) - } - - pub fn add_constraints<'tcx>( - &mut self, - infcx: &InferCtxt<'tcx>, - obls: impl IntoIterator<Item = ty::Predicate<'tcx>>, - ) { - for p in obls { - let (a, b) = match p.kind().skip_binder() { - ty::PredicateKind::Subtype(ty::SubtypePredicate { a_is_expected: _, a, b }) => { - (a, b) - } - ty::PredicateKind::Coerce(ty::CoercePredicate { a, b }) => (a, b), - _ => continue, - }; - - match (a.kind(), b.kind()) { - (&ty::Infer(ty::TyVar(a_vid)), &ty::Infer(ty::TyVar(b_vid))) => { - let a = self.get_id(infcx, a_vid); - let b = self.get_id(infcx, b_vid); - self.table.with_log(&mut NoUndo).unify_var_var(a, b).unwrap(); - } - _ => continue, - } - } - } - - pub fn unified<'tcx>(&mut self, infcx: &InferCtxt<'tcx>, a: ty::TyVid, b: ty::TyVid) -> bool { - let a = self.get_id(infcx, a); - let b = self.get_id(infcx, b); - self.table.with_log(&mut NoUndo).unioned(a, b) - } -} diff --git a/compiler/rustc_trait_selection/src/error_reporting/mod.rs b/compiler/rustc_trait_selection/src/error_reporting/mod.rs index 82695688ae8..cce20b05c79 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/mod.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/mod.rs @@ -7,8 +7,6 @@ use rustc_macros::extension; use rustc_middle::bug; use rustc_middle::ty::{self, Ty}; -use crate::error_reporting::infer::sub_relations; - pub mod infer; pub mod traits; @@ -21,7 +19,6 @@ pub mod traits; /// methods which should not be used during the happy path. pub struct TypeErrCtxt<'a, 'tcx> { pub infcx: &'a InferCtxt<'tcx>, - pub sub_relations: std::cell::RefCell<sub_relations::SubRelations>, pub typeck_results: Option<std::cell::Ref<'a, ty::TypeckResults<'tcx>>>, pub fallback_has_occurred: bool, @@ -38,7 +35,6 @@ impl<'tcx> InferCtxt<'tcx> { fn err_ctxt(&self) -> TypeErrCtxt<'_, 'tcx> { TypeErrCtxt { infcx: self, - sub_relations: Default::default(), typeck_results: None, fallback_has_occurred: false, normalize_fn_sig: Box::new(|fn_sig| fn_sig), diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/call_kind.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/call_kind.rs index 8a67e4ccd45..f54ebd76cab 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/call_kind.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/call_kind.rs @@ -5,7 +5,7 @@ use rustc_hir::def::DefKind; use rustc_hir::def_id::DefId; use rustc_hir::{LangItem, lang_items}; -use rustc_middle::ty::{AssocItemContainer, GenericArgsRef, Instance, Ty, TyCtxt, TypingEnv}; +use rustc_middle::ty::{AssocContainer, GenericArgsRef, Instance, Ty, TyCtxt, TypingEnv}; use rustc_span::{DUMMY_SP, DesugaringKind, Ident, Span, sym}; use tracing::debug; @@ -76,8 +76,9 @@ pub fn call_kind<'tcx>( let parent = tcx.opt_associated_item(method_did).and_then(|assoc| { let container_id = assoc.container_id(tcx); match assoc.container { - AssocItemContainer::Impl => tcx.trait_id_of_impl(container_id), - AssocItemContainer::Trait => Some(container_id), + AssocContainer::InherentImpl => None, + AssocContainer::TraitImpl(_) => tcx.trait_id_of_impl(container_id), + AssocContainer::Trait => Some(container_id), } }); diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs index bc984f30472..149f5e638b1 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs @@ -27,8 +27,8 @@ use rustc_middle::ty::print::{ with_forced_trimmed_paths, }; use rustc_middle::ty::{ - self, TraitRef, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitableExt, - Upcast, + self, GenericArgKind, TraitRef, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable, + TypeVisitableExt, Upcast, }; use rustc_middle::{bug, span_bug}; use rustc_span::{BytePos, DUMMY_SP, STDLIB_STABLE_CRATES, Span, Symbol, sym}; @@ -75,7 +75,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { { if let Some(cause) = self .tcx - .diagnostic_hir_wf_check((tcx.erase_regions(obligation.predicate), *wf_loc)) + .diagnostic_hir_wf_check((tcx.erase_and_anonymize_regions(obligation.predicate), *wf_loc)) { obligation.cause = cause.clone(); span = obligation.cause.span; @@ -2316,7 +2316,19 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { cand }) .collect(); - impl_candidates.sort_by_key(|cand| (cand.similarity, cand.trait_ref.to_string())); + impl_candidates.sort_by_key(|cand| { + // When suggesting array types, sort them by the length of the array, not lexicographically (#135098) + let len = if let GenericArgKind::Type(ty) = cand.trait_ref.args[0].kind() + && let ty::Array(_, len) = ty.kind() + { + // Deprioritize suggestions for parameterized arrays. + len.try_to_target_usize(self.tcx).unwrap_or(u64::MAX) + } else { + 0 + }; + + (cand.similarity, len, cand.trait_ref.to_string()) + }); let mut impl_candidates: Vec<_> = impl_candidates.into_iter().map(|cand| cand.trait_ref).collect(); impl_candidates.dedup(); @@ -2612,8 +2624,9 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { } // Erase regions because layout code doesn't particularly care about regions. - let trait_pred = - self.tcx.erase_regions(self.tcx.instantiate_bound_regions_with_erased(trait_pred)); + let trait_pred = self.tcx.erase_and_anonymize_regions( + self.tcx.instantiate_bound_regions_with_erased(trait_pred), + ); let src_and_dst = rustc_transmute::Types { dst: trait_pred.trait_ref.args.type_at(0), diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/mod.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/mod.rs index c8500b2d9d4..f794ff632c5 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/mod.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/mod.rs @@ -139,10 +139,6 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { &self, mut errors: Vec<FulfillmentError<'tcx>>, ) -> ErrorGuaranteed { - self.sub_relations - .borrow_mut() - .add_constraints(self, errors.iter().map(|e| e.obligation.predicate)); - #[derive(Debug)] struct ErrorDescriptor<'tcx> { goal: Goal<'tcx, ty::Predicate<'tcx>>, diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/overflow.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/overflow.rs index 4f1f5c330e5..a0876d8fe75 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/overflow.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/overflow.rs @@ -3,10 +3,10 @@ use std::fmt; use rustc_errors::{Diag, E0275, EmissionGuarantee, ErrorGuaranteed, struct_span_code_err}; use rustc_hir::def::Namespace; use rustc_hir::def_id::LOCAL_CRATE; +use rustc_hir::limit::Limit; use rustc_infer::traits::{Obligation, PredicateObligation}; use rustc_middle::ty::print::{FmtPrinter, Print}; use rustc_middle::ty::{self, TyCtxt, Upcast}; -use rustc_session::Limit; use rustc_span::Span; use tracing::debug; @@ -67,7 +67,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // We don't need to save the type to a file, we will be talking about this type already // in a separate note when we explain the obligation, so it will be available that way. let mut p: FmtPrinter<'_, '_> = - FmtPrinter::new_with_limit(tcx, Namespace::TypeNS, rustc_session::Limit(6)); + FmtPrinter::new_with_limit(tcx, Namespace::TypeNS, Limit(6)); value.print(&mut p).unwrap(); p.into_buffer() } else { diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs index 40a27c9aebe..f2f840581cf 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs @@ -32,8 +32,8 @@ use rustc_middle::ty::print::{ }; use rustc_middle::ty::{ self, AdtKind, GenericArgs, InferTy, IsSuggestable, Ty, TyCtxt, TypeFoldable, TypeFolder, - TypeSuperFoldable, TypeVisitableExt, TypeckResults, Upcast, suggest_arbitrary_trait_bound, - suggest_constraining_type_param, + TypeSuperFoldable, TypeSuperVisitable, TypeVisitableExt, TypeVisitor, TypeckResults, Upcast, + suggest_arbitrary_trait_bound, suggest_constraining_type_param, }; use rustc_middle::{bug, span_bug}; use rustc_span::def_id::LocalDefId; @@ -263,6 +263,9 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { _ => (false, None), }; + let mut finder = ParamFinder { .. }; + finder.visit_binder(&trait_pred); + // FIXME: Add check for trait bound that is already present, particularly `?Sized` so we // don't suggest `T: Sized + ?Sized`. loop { @@ -411,6 +414,26 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { } } + hir::Node::TraitItem(hir::TraitItem { + generics, + kind: hir::TraitItemKind::Fn(..), + .. + }) + | hir::Node::ImplItem(hir::ImplItem { + generics, + impl_kind: hir::ImplItemImplKind::Inherent { .. }, + kind: hir::ImplItemKind::Fn(..), + .. + }) if finder.can_suggest_bound(generics) => { + // Missing generic type parameter bound. + suggest_arbitrary_trait_bound( + self.tcx, + generics, + err, + trait_pred, + associated_ty, + ); + } hir::Node::Item(hir::Item { kind: hir::ItemKind::Struct(_, generics, _) @@ -423,7 +446,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { | hir::ItemKind::Const(_, generics, _, _) | hir::ItemKind::TraitAlias(_, generics, _), .. - }) if !param_ty => { + }) if finder.can_suggest_bound(generics) => { // Missing generic type parameter bound. if suggest_arbitrary_trait_bound( self.tcx, @@ -2419,7 +2442,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // Look for a type inside the coroutine interior that matches the target type to get // a span. - let target_ty_erased = self.tcx.erase_regions(target_ty); + let target_ty_erased = self.tcx.erase_and_anonymize_regions(target_ty); let ty_matches = |ty| -> bool { // Careful: the regions for types that appear in the // coroutine interior are not generally known, so we @@ -2431,10 +2454,10 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // interior generally contain "bound regions" to // represent regions that are part of the suspended // coroutine frame. Bound regions are preserved by - // `erase_regions` and so we must also call + // `erase_and_anonymize_regions` and so we must also call // `instantiate_bound_regions_with_erased`. let ty_erased = self.tcx.instantiate_bound_regions_with_erased(ty); - let ty_erased = self.tcx.erase_regions(ty_erased); + let ty_erased = self.tcx.erase_and_anonymize_regions(ty_erased); let eq = ty_erased == target_ty_erased; debug!(?ty_erased, ?target_ty_erased, ?eq); eq @@ -5068,8 +5091,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // Suggesting `T: ?Sized` is only valid in an ADT if `T` is only used in a // borrow. `struct S<'a, T: ?Sized>(&'a T);` is valid, `struct S<T: ?Sized>(T);` // is not. Look for invalid "bare" parameter uses, and suggest using indirection. - let mut visitor = - FindTypeParam { param: param.name.ident().name, invalid_spans: vec![], nested: false }; + let mut visitor = FindTypeParam { param: param.name.ident().name, .. }; visitor.visit_item(item); if visitor.invalid_spans.is_empty() { return false; @@ -5228,7 +5250,7 @@ fn hint_missing_borrow<'tcx>( /// Used to suggest replacing associated types with an explicit type in `where` clauses. #[derive(Debug)] pub struct SelfVisitor<'v> { - pub paths: Vec<&'v hir::Ty<'v>>, + pub paths: Vec<&'v hir::Ty<'v>> = Vec::new(), pub name: Option<Symbol>, } @@ -5599,7 +5621,7 @@ fn point_at_assoc_type_restriction<G: EmissionGuarantee>( ); // Search for the associated type `Self::{name}`, get // its type and suggest replacing the bound with it. - let mut visitor = SelfVisitor { paths: vec![], name: Some(name) }; + let mut visitor = SelfVisitor { name: Some(name), .. }; visitor.visit_trait_ref(trait_ref); for path in visitor.paths { err.span_suggestion_verbose( @@ -5610,7 +5632,7 @@ fn point_at_assoc_type_restriction<G: EmissionGuarantee>( ); } } else { - let mut visitor = SelfVisitor { paths: vec![], name: None }; + let mut visitor = SelfVisitor { name: None, .. }; visitor.visit_trait_ref(trait_ref); let span: MultiSpan = visitor.paths.iter().map(|p| p.span).collect::<Vec<Span>>().into(); @@ -5640,8 +5662,8 @@ fn get_deref_type_and_refs(mut ty: Ty<'_>) -> (Ty<'_>, Vec<hir::Mutability>) { /// `param: ?Sized` would be a valid constraint. struct FindTypeParam { param: rustc_span::Symbol, - invalid_spans: Vec<Span>, - nested: bool, + invalid_spans: Vec<Span> = Vec::new(), + nested: bool = false, } impl<'v> Visitor<'v> for FindTypeParam { @@ -5679,3 +5701,38 @@ impl<'v> Visitor<'v> for FindTypeParam { } } } + +/// Look for type parameters in predicates. We use this to identify whether a bound is suitable in +/// on a given item. +struct ParamFinder { + params: Vec<Symbol> = Vec::new(), +} + +impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ParamFinder { + fn visit_ty(&mut self, t: Ty<'tcx>) -> Self::Result { + match t.kind() { + ty::Param(p) => self.params.push(p.name), + _ => {} + } + t.super_visit_with(self) + } +} + +impl ParamFinder { + /// Whether the `hir::Generics` of the current item can suggest the evaluated bound because its + /// references to type parameters are present in the generics. + fn can_suggest_bound(&self, generics: &hir::Generics<'_>) -> bool { + if self.params.is_empty() { + // There are no references to type parameters at all, so suggesting the bound + // would be reasonable. + return true; + } + generics.params.iter().any(|p| match p.name { + hir::ParamName::Plain(p_name) => { + // All of the parameters in the bound can be referenced in the current item. + self.params.iter().any(|p| *p == p_name.name || *p == kw::SelfUpper) + } + _ => true, + }) + } +} diff --git a/compiler/rustc_trait_selection/src/infer.rs b/compiler/rustc_trait_selection/src/infer.rs index 7c6b7b14ecb..4c50c44b841 100644 --- a/compiler/rustc_trait_selection/src/infer.rs +++ b/compiler/rustc_trait_selection/src/infer.rs @@ -184,10 +184,9 @@ impl<'tcx> InferCtxtBuilder<'tcx> { R: Debug + TypeFoldable<TyCtxt<'tcx>>, Canonical<'tcx, QueryResponse<'tcx, R>>: ArenaAllocatable<'tcx>, { - let (infcx, key, canonical_inference_vars) = - self.build_with_canonical(DUMMY_SP, canonical_key); + let (infcx, key, var_values) = self.build_with_canonical(DUMMY_SP, canonical_key); let ocx = ObligationCtxt::new(&infcx); let value = operation(&ocx, key)?; - ocx.make_canonicalized_query_response(canonical_inference_vars, value) + ocx.make_canonicalized_query_response(var_values, value) } } diff --git a/compiler/rustc_trait_selection/src/lib.rs b/compiler/rustc_trait_selection/src/lib.rs index e2b22f7bab7..fc0cf8f140a 100644 --- a/compiler/rustc_trait_selection/src/lib.rs +++ b/compiler/rustc_trait_selection/src/lib.rs @@ -19,6 +19,7 @@ #![feature(assert_matches)] #![feature(associated_type_defaults)] #![feature(box_patterns)] +#![feature(default_field_values)] #![feature(if_let_guard)] #![feature(iter_intersperse)] #![feature(iterator_try_reduce)] diff --git a/compiler/rustc_trait_selection/src/solve/delegate.rs b/compiler/rustc_trait_selection/src/solve/delegate.rs index e6a2761db5a..b6bdf1067a3 100644 --- a/compiler/rustc_trait_selection/src/solve/delegate.rs +++ b/compiler/rustc_trait_selection/src/solve/delegate.rs @@ -126,13 +126,12 @@ impl<'tcx> rustc_next_trait_solver::delegate::SolverDelegate for SolverDelegate< } ty::PredicateKind::Subtype(ty::SubtypePredicate { a, b, .. }) | ty::PredicateKind::Coerce(ty::CoercePredicate { a, b }) => { - if self.shallow_resolve(a).is_ty_var() && self.shallow_resolve(b).is_ty_var() { - // FIXME: We also need to register a subtype relation between these vars - // when those are added, and if they aren't in the same sub root then - // we should mark this goal as `has_changed`. - Some(Certainty::AMBIGUOUS) - } else { - None + match (self.shallow_resolve(a).kind(), self.shallow_resolve(b).kind()) { + (&ty::Infer(ty::TyVar(a_vid)), &ty::Infer(ty::TyVar(b_vid))) => { + self.sub_unify_ty_vids_raw(a_vid, b_vid); + Some(Certainty::AMBIGUOUS) + } + _ => None, } } ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(ct, _)) => { @@ -238,13 +237,14 @@ impl<'tcx> rustc_next_trait_solver::delegate::SolverDelegate for SolverDelegate< canonical.instantiate(self.tcx, &values) } - fn instantiate_canonical_var_with_infer( + fn instantiate_canonical_var( &self, kind: CanonicalVarKind<'tcx>, span: Span, + var_values: &[ty::GenericArg<'tcx>], universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex, ) -> ty::GenericArg<'tcx> { - self.0.instantiate_canonical_var(span, kind, universe_map) + self.0.instantiate_canonical_var(span, kind, var_values, universe_map) } fn add_item_bounds_for_hidden_type( @@ -300,7 +300,7 @@ impl<'tcx> rustc_next_trait_solver::delegate::SolverDelegate for SolverDelegate< ) -> Result<Certainty, NoSolution> { // Erase regions because we compute layouts in `rustc_transmute`, // which will ICE for region vars. - let (dst, src) = self.tcx.erase_regions((dst, src)); + let (dst, src) = self.tcx.erase_and_anonymize_regions((dst, src)); let Some(assume) = rustc_transmute::Assume::from_const(self.tcx, assume) else { return Err(NoSolution); diff --git a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs index 4b493c95d59..bcd11d6918d 100644 --- a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs +++ b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs @@ -759,7 +759,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for IllegalSelfTypeVisitor<'tcx> { )), ) .map(|trait_ref| { - self.tcx.erase_regions( + self.tcx.erase_and_anonymize_regions( self.tcx.instantiate_bound_regions_with_erased(trait_ref), ) }) diff --git a/compiler/rustc_trait_selection/src/traits/mod.rs b/compiler/rustc_trait_selection/src/traits/mod.rs index a9fb16b8000..6fefac43699 100644 --- a/compiler/rustc_trait_selection/src/traits/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/mod.rs @@ -576,7 +576,7 @@ pub fn try_evaluate_const<'tcx>( let args = replace_param_and_infer_args_with_placeholder(tcx, uv.args); let typing_env = infcx - .typing_env(tcx.erase_regions(param_env)) + .typing_env(tcx.erase_and_anonymize_regions(param_env)) .with_post_analysis_normalized(tcx); (args, typing_env) } @@ -589,7 +589,7 @@ pub fn try_evaluate_const<'tcx>( } } else { let typing_env = infcx - .typing_env(tcx.erase_regions(param_env)) + .typing_env(tcx.erase_and_anonymize_regions(param_env)) .with_post_analysis_normalized(tcx); (uv.args, typing_env) } @@ -634,14 +634,14 @@ pub fn try_evaluate_const<'tcx>( } let typing_env = infcx - .typing_env(tcx.erase_regions(param_env)) + .typing_env(tcx.erase_and_anonymize_regions(param_env)) .with_post_analysis_normalized(tcx); (uv.args, typing_env) } }; let uv = ty::UnevaluatedConst::new(uv.def, args); - let erased_uv = tcx.erase_regions(uv); + let erased_uv = tcx.erase_and_anonymize_regions(uv); use rustc_middle::mir::interpret::ErrorHandled; // FIXME: `def_span` will point at the definition of this const; ideally, we'd point at diff --git a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs index de404532899..945ca7c3775 100644 --- a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs +++ b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs @@ -350,7 +350,9 @@ pub fn dtorck_constraint_for_ty_inner<'tcx>( // Note that we don't care about whether the resume type has any drops since this is // redundant; there is no storage for the resume type, so if it is actually stored // in the interior, we'll already detect the need for a drop by checking the interior. - let typing_env = tcx.erase_regions(typing_env); + // + // FIXME(@lcnr): Why do we erase regions in the env here? Seems odd + let typing_env = tcx.erase_and_anonymize_regions(typing_env); let needs_drop = tcx.mir_coroutine_witnesses(def_id).is_some_and(|witness| { witness.field_tys.iter().any(|field| field.ty.needs_drop(tcx, typing_env)) }); diff --git a/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs b/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs index 19eb85506b6..4d0465777dd 100644 --- a/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs +++ b/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs @@ -387,7 +387,7 @@ pub(crate) fn assoc_def( if let Some(assoc_item) = ancestors.leaf_def(tcx, assoc_def_id) { // Ensure that the impl is constrained, otherwise projection may give us // bad unconstrained infer vars. - if assoc_item.item.container == ty::AssocItemContainer::Impl + if let ty::AssocContainer::TraitImpl(_) = assoc_item.item.container && let Some(impl_def_id) = assoc_item.item.container_id(tcx).as_local() { tcx.ensure_ok().enforce_impl_non_lifetime_params_are_constrained(impl_def_id)?; diff --git a/compiler/rustc_traits/src/codegen.rs b/compiler/rustc_traits/src/codegen.rs index 7dd3c59edd0..4b05e2cc381 100644 --- a/compiler/rustc_traits/src/codegen.rs +++ b/compiler/rustc_traits/src/codegen.rs @@ -73,7 +73,7 @@ pub(crate) fn codegen_select_candidate<'tcx>( } let impl_source = infcx.resolve_vars_if_possible(impl_source); - let impl_source = tcx.erase_regions(impl_source); + let impl_source = tcx.erase_and_anonymize_regions(impl_source); if impl_source.has_non_region_infer() { // Unused generic types or consts on an impl get replaced with inference vars, // but never resolved, causing the return value of a query to contain inference diff --git a/compiler/rustc_traits/src/evaluate_obligation.rs b/compiler/rustc_traits/src/evaluate_obligation.rs index 819b8e3231c..8f72bdf0972 100644 --- a/compiler/rustc_traits/src/evaluate_obligation.rs +++ b/compiler/rustc_traits/src/evaluate_obligation.rs @@ -19,7 +19,7 @@ fn evaluate_obligation<'tcx>( ) -> Result<EvaluationResult, OverflowError> { assert!(!tcx.next_trait_solver_globally()); debug!("evaluate_obligation(canonical_goal={:#?})", canonical_goal); - let (ref infcx, goal, _canonical_inference_vars) = + let (ref infcx, goal, _var_values) = tcx.infer_ctxt().build_with_canonical(DUMMY_SP, &canonical_goal); debug!("evaluate_obligation: goal={:#?}", goal); let ParamEnvAnd { param_env, value: predicate } = goal; diff --git a/compiler/rustc_traits/src/normalize_erasing_regions.rs b/compiler/rustc_traits/src/normalize_erasing_regions.rs index c1b848a2e79..c98b56abe9c 100644 --- a/compiler/rustc_traits/src/normalize_erasing_regions.rs +++ b/compiler/rustc_traits/src/normalize_erasing_regions.rs @@ -41,7 +41,7 @@ fn try_normalize_after_erasing_regions<'tcx, T: TypeFoldable<TyCtxt<'tcx>> + Par // fresh `InferCtxt`. If this assert does trigger, it will give // us a test case. debug_assert_eq!(normalized_value, resolved_value); - let erased = infcx.tcx.erase_regions(resolved_value); + let erased = infcx.tcx.erase_and_anonymize_regions(resolved_value); debug_assert!(!erased.has_infer(), "{erased:?}"); Ok(erased) } diff --git a/compiler/rustc_transmute/src/layout/mod.rs b/compiler/rustc_transmute/src/layout/mod.rs index acbce258f39..4e548ff8fe2 100644 --- a/compiler/rustc_transmute/src/layout/mod.rs +++ b/compiler/rustc_transmute/src/layout/mod.rs @@ -163,7 +163,7 @@ pub mod rustc { ty: Ty<'tcx>, ) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>> { use rustc_middle::ty::layout::LayoutOf; - let ty = cx.tcx().erase_regions(ty); + let ty = cx.tcx().erase_and_anonymize_regions(ty); cx.layout_of(ty).map(|tl| tl.layout) } } diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs index 3f83b4d50aa..d7ea26a2ab1 100644 --- a/compiler/rustc_transmute/src/layout/tree.rs +++ b/compiler/rustc_transmute/src/layout/tree.rs @@ -426,24 +426,23 @@ pub(crate) mod rustc { assert!(def.is_enum()); // Computes the layout of a variant. - let layout_of_variant = - |index, encoding: Option<TagEncoding<VariantIdx>>| -> Result<Self, Err> { - let variant_layout = ty_variant(cx, (ty, layout), index); - if variant_layout.is_uninhabited() { - return Ok(Self::uninhabited()); - } - let tag = cx.tcx().tag_for_variant( - cx.typing_env.as_query_input((cx.tcx().erase_regions(ty), index)), - ); - let variant_def = Def::Variant(def.variant(index)); - Self::from_variant( - variant_def, - tag.map(|tag| (tag, index, encoding.unwrap())), - (ty, variant_layout), - layout.size, - cx, - ) - }; + let layout_of_variant = |index, encoding: Option<_>| -> Result<Self, Err> { + let variant_layout = ty_variant(cx, (ty, layout), index); + if variant_layout.is_uninhabited() { + return Ok(Self::uninhabited()); + } + let tag = cx.tcx().tag_for_variant( + cx.typing_env.as_query_input((cx.tcx().erase_and_anonymize_regions(ty), index)), + ); + let variant_def = Def::Variant(def.variant(index)); + Self::from_variant( + variant_def, + tag.map(|tag| (tag, index, encoding.unwrap())), + (ty, variant_layout), + layout.size, + cx, + ) + }; match layout.variants() { Variants::Empty => Ok(Self::uninhabited()), @@ -634,7 +633,7 @@ pub(crate) mod rustc { (ty, layout): (Ty<'tcx>, Layout<'tcx>), i: VariantIdx, ) -> Layout<'tcx> { - let ty = cx.tcx().erase_regions(ty); + let ty = cx.tcx().erase_and_anonymize_regions(ty); TyAndLayout { ty, layout }.for_variant(&cx, i).layout } } diff --git a/compiler/rustc_ty_utils/src/assoc.rs b/compiler/rustc_ty_utils/src/assoc.rs index e9629e31482..84f52e7fc9d 100644 --- a/compiler/rustc_ty_utils/src/assoc.rs +++ b/compiler/rustc_ty_utils/src/assoc.rs @@ -2,7 +2,7 @@ use rustc_hir::def::DefKind; use rustc_hir::def_id::{DefId, DefIdMap, LocalDefId}; use rustc_hir::definitions::{DefPathData, DisambiguatorState}; use rustc_hir::intravisit::{self, Visitor}; -use rustc_hir::{self as hir, ItemKind}; +use rustc_hir::{self as hir, ImplItemImplKind, ItemKind}; use rustc_middle::query::Providers; use rustc_middle::ty::{self, ImplTraitInTraitData, TyCtxt}; use rustc_middle::{bug, span_bug}; @@ -63,7 +63,7 @@ fn associated_items(tcx: TyCtxt<'_>, def_id: DefId) -> ty::AssocItems { fn impl_item_implementor_ids(tcx: TyCtxt<'_>, impl_id: DefId) -> DefIdMap<DefId> { tcx.associated_items(impl_id) .in_definition_order() - .filter_map(|item| item.trait_item_def_id.map(|trait_item| (trait_item, item.def_id))) + .filter_map(|item| item.trait_item_def_id().map(|trait_item| (trait_item, item.def_id))) .collect() } @@ -97,12 +97,7 @@ fn associated_item_from_trait_item( } }; - ty::AssocItem { - kind, - def_id: owner_id.to_def_id(), - trait_item_def_id: Some(owner_id.to_def_id()), - container: ty::AssocItemContainer::Trait, - } + ty::AssocItem { kind, def_id: owner_id.to_def_id(), container: ty::AssocContainer::Trait } } fn associated_item_from_impl_item(tcx: TyCtxt<'_>, impl_item: &hir::ImplItem<'_>) -> ty::AssocItem { @@ -118,12 +113,13 @@ fn associated_item_from_impl_item(tcx: TyCtxt<'_>, impl_item: &hir::ImplItem<'_> } }; - ty::AssocItem { - kind, - def_id: owner_id.to_def_id(), - trait_item_def_id: impl_item.trait_item_def_id, - container: ty::AssocItemContainer::Impl, - } + let container = match impl_item.impl_kind { + ImplItemImplKind::Inherent { .. } => ty::AssocContainer::InherentImpl, + ImplItemImplKind::Trait { trait_item_def_id, .. } => { + ty::AssocContainer::TraitImpl(trait_item_def_id) + } + }; + ty::AssocItem { kind, def_id: owner_id.to_def_id(), container } } struct RPITVisitor<'a, 'tcx> { tcx: TyCtxt<'tcx>, @@ -190,7 +186,10 @@ fn associated_types_for_impl_traits_in_trait_or_impl<'tcx>( } let did = item.owner_id.def_id.to_def_id(); let item = tcx.hir_impl_item(*item); - let Some(trait_item_def_id) = item.trait_item_def_id else { + let ImplItemImplKind::Trait { + trait_item_def_id: Ok(trait_item_def_id), .. + } = item.impl_kind + else { return Some((did, vec![])); }; let iter = in_trait_def[&trait_item_def_id].iter().map(|&id| { @@ -256,8 +255,7 @@ fn associated_type_for_impl_trait_in_trait( }), }, def_id, - trait_item_def_id: None, - container: ty::AssocItemContainer::Trait, + container: ty::AssocContainer::Trait, }); // Copy visility of the containing function. @@ -322,8 +320,7 @@ fn associated_type_for_impl_trait_in_impl( }), }, def_id, - trait_item_def_id: Some(trait_assoc_def_id), - container: ty::AssocItemContainer::Impl, + container: ty::AssocContainer::TraitImpl(Ok(trait_assoc_def_id)), }); // Copy visility of the containing function. diff --git a/compiler/rustc_ty_utils/src/implied_bounds.rs b/compiler/rustc_ty_utils/src/implied_bounds.rs index cdfb93c4e7d..543f6a3ccf7 100644 --- a/compiler/rustc_ty_utils/src/implied_bounds.rs +++ b/compiler/rustc_ty_utils/src/implied_bounds.rs @@ -107,7 +107,7 @@ fn assumed_wf_types<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx [(Ty<' // the assumed wf types of the trait's RPITIT GAT. ty::ImplTraitInTraitData::Impl { .. } => { let impl_def_id = tcx.local_parent(def_id); - let rpitit_def_id = tcx.associated_item(def_id).trait_item_def_id.unwrap(); + let rpitit_def_id = tcx.trait_item_of(def_id).unwrap(); let args = ty::GenericArgs::identity_for_item(tcx, def_id).rebase_onto( tcx, impl_def_id.to_def_id(), diff --git a/compiler/rustc_ty_utils/src/instance.rs b/compiler/rustc_ty_utils/src/instance.rs index 5c3f7d491a5..e28ebaabc0a 100644 --- a/compiler/rustc_ty_utils/src/instance.rs +++ b/compiler/rustc_ty_utils/src/instance.rs @@ -176,7 +176,7 @@ fn resolve_associated_item<'tcx>( args, leaf_def.defining_node, ); - let args = infcx.tcx.erase_regions(args); + let args = infcx.tcx.erase_and_anonymize_regions(args); // HACK: We may have overlapping `dyn Trait` built-in impls and // user-provided blanket impls. Detect that case here, and return @@ -222,7 +222,7 @@ fn resolve_associated_item<'tcx>( return Err(guar); } - let args = tcx.erase_regions(args); + let args = tcx.erase_and_anonymize_regions(args); // We check that the impl item is compatible with the trait item // because otherwise we may ICE in const eval due to type mismatches, @@ -279,7 +279,7 @@ fn resolve_associated_item<'tcx>( assert_eq!(name, sym::clone_from); // Use the default `fn clone_from` from `trait Clone`. - let args = tcx.erase_regions(rcvr_args); + let args = tcx.erase_and_anonymize_regions(rcvr_args); Some(ty::Instance::new_raw(trait_item_id, args)) } } else if tcx.is_lang_item(trait_ref.def_id, LangItem::FnPtrTrait) { @@ -380,7 +380,7 @@ fn resolve_associated_item<'tcx>( } else if tcx.is_lang_item(trait_ref.def_id, LangItem::TransmuteTrait) { let name = tcx.item_name(trait_item_id); assert_eq!(name, sym::transmute); - let args = tcx.erase_regions(rcvr_args); + let args = tcx.erase_and_anonymize_regions(rcvr_args); Some(ty::Instance::new_raw(trait_item_id, args)) } else { Instance::try_resolve_item_for_coroutine(tcx, trait_item_id, trait_id, rcvr_args) diff --git a/compiler/rustc_ty_utils/src/needs_drop.rs b/compiler/rustc_ty_utils/src/needs_drop.rs index 13d56889bd1..0ef435b1a0e 100644 --- a/compiler/rustc_ty_utils/src/needs_drop.rs +++ b/compiler/rustc_ty_utils/src/needs_drop.rs @@ -2,11 +2,11 @@ use rustc_data_structures::fx::FxHashSet; use rustc_hir::def_id::DefId; +use rustc_hir::limit::Limit; use rustc_middle::bug; use rustc_middle::query::Providers; use rustc_middle::ty::util::{AlwaysRequiresDrop, needs_drop_components}; use rustc_middle::ty::{self, EarlyBinder, GenericArgsRef, Ty, TyCtxt}; -use rustc_session::Limit; use rustc_span::sym; use tracing::{debug, instrument}; diff --git a/compiler/rustc_ty_utils/src/opaque_types.rs b/compiler/rustc_ty_utils/src/opaque_types.rs index 4a7263d0ccd..eb3236d3006 100644 --- a/compiler/rustc_ty_utils/src/opaque_types.rs +++ b/compiler/rustc_ty_utils/src/opaque_types.rs @@ -245,7 +245,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OpaqueTypeCollector<'tcx> { for &assoc in self.tcx.associated_items(parent).in_definition_order() { trace!(?assoc); - if assoc.trait_item_def_id != Some(alias_ty.def_id) { + if assoc.expect_trait_impl() != Ok(alias_ty.def_id) { continue; } diff --git a/compiler/rustc_ty_utils/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs index b22c326b9f2..a5987757dc3 100644 --- a/compiler/rustc_ty_utils/src/ty.rs +++ b/compiler/rustc_ty_utils/src/ty.rs @@ -88,7 +88,10 @@ fn defaultness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Defaultness { }), .. }) - | hir::Node::ImplItem(hir::ImplItem { defaultness, .. }) + | hir::Node::ImplItem(hir::ImplItem { + impl_kind: hir::ImplItemImplKind::Trait { defaultness, .. }, + .. + }) | hir::Node::TraitItem(hir::TraitItem { defaultness, .. }) => *defaultness, node => { bug!("`defaultness` called on {:?}", node); @@ -165,7 +168,7 @@ fn param_env(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamEnv<'_> { if tcx.def_kind(def_id) == DefKind::AssocFn && let assoc_item = tcx.associated_item(def_id) - && assoc_item.container == ty::AssocItemContainer::Trait + && assoc_item.container == ty::AssocContainer::Trait && assoc_item.defaultness(tcx).has_value() { let sig = tcx.fn_sig(def_id).instantiate_identity(); diff --git a/compiler/rustc_type_ir/Cargo.toml b/compiler/rustc_type_ir/Cargo.toml index d55e9b3b1be..6ccddb17ff2 100644 --- a/compiler/rustc_type_ir/Cargo.toml +++ b/compiler/rustc_type_ir/Cargo.toml @@ -5,6 +5,7 @@ edition = "2024" [dependencies] # tidy-alphabetical-start +arrayvec = { version = "0.7", default-features = false } bitflags = "2.4.1" derive-where = "1.2.7" ena = "0.14.3" diff --git a/compiler/rustc_type_ir/src/canonical.rs b/compiler/rustc_type_ir/src/canonical.rs index de2a9186e7c..ecf3ae4f8b2 100644 --- a/compiler/rustc_type_ir/src/canonical.rs +++ b/compiler/rustc_type_ir/src/canonical.rs @@ -1,7 +1,7 @@ use std::fmt; -use std::hash::Hash; use std::ops::Index; +use arrayvec::ArrayVec; use derive_where::derive_where; #[cfg(feature = "nightly")] use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_NoContext}; @@ -91,8 +91,18 @@ impl<I: Interner, V: fmt::Display> fmt::Display for Canonical<I, V> { derive(Decodable_NoContext, Encodable_NoContext, HashStable_NoContext) )] pub enum CanonicalVarKind<I: Interner> { - /// Some kind of type inference variable. - Ty(CanonicalTyVarKind), + /// General type variable `?T` that can be unified with arbitrary types. + /// + /// We also store the index of the first type variable which is sub-unified + /// with this one. If there is no inference variable related to this one, + /// its `sub_root` just points to itself. + Ty { ui: UniverseIndex, sub_root: ty::BoundVar }, + + /// Integral type variable `?I` (that can only be unified with integral types). + Int, + + /// Floating-point type variable `?F` (that can only be unified with float types). + Float, /// A "placeholder" that represents "any type". PlaceholderTy(I::PlaceholderTy), @@ -117,15 +127,13 @@ impl<I: Interner> Eq for CanonicalVarKind<I> {} impl<I: Interner> CanonicalVarKind<I> { pub fn universe(self) -> UniverseIndex { match self { - CanonicalVarKind::Ty(CanonicalTyVarKind::General(ui)) => ui, + CanonicalVarKind::Ty { ui, sub_root: _ } => ui, CanonicalVarKind::Region(ui) => ui, CanonicalVarKind::Const(ui) => ui, CanonicalVarKind::PlaceholderTy(placeholder) => placeholder.universe(), CanonicalVarKind::PlaceholderRegion(placeholder) => placeholder.universe(), CanonicalVarKind::PlaceholderConst(placeholder) => placeholder.universe(), - CanonicalVarKind::Ty(CanonicalTyVarKind::Float | CanonicalTyVarKind::Int) => { - UniverseIndex::ROOT - } + CanonicalVarKind::Float | CanonicalVarKind::Int => UniverseIndex::ROOT, } } @@ -135,9 +143,7 @@ impl<I: Interner> CanonicalVarKind<I> { /// the updated universe is not the root. pub fn with_updated_universe(self, ui: UniverseIndex) -> CanonicalVarKind<I> { match self { - CanonicalVarKind::Ty(CanonicalTyVarKind::General(_)) => { - CanonicalVarKind::Ty(CanonicalTyVarKind::General(ui)) - } + CanonicalVarKind::Ty { ui: _, sub_root } => CanonicalVarKind::Ty { ui, sub_root }, CanonicalVarKind::Region(_) => CanonicalVarKind::Region(ui), CanonicalVarKind::Const(_) => CanonicalVarKind::Const(ui), @@ -150,7 +156,7 @@ impl<I: Interner> CanonicalVarKind<I> { CanonicalVarKind::PlaceholderConst(placeholder) => { CanonicalVarKind::PlaceholderConst(placeholder.with_updated_universe(ui)) } - CanonicalVarKind::Ty(CanonicalTyVarKind::Int | CanonicalTyVarKind::Float) => { + CanonicalVarKind::Int | CanonicalVarKind::Float => { assert_eq!(ui, UniverseIndex::ROOT); self } @@ -159,19 +165,23 @@ impl<I: Interner> CanonicalVarKind<I> { pub fn is_existential(self) -> bool { match self { - CanonicalVarKind::Ty(_) => true, - CanonicalVarKind::PlaceholderTy(_) => false, - CanonicalVarKind::Region(_) => true, - CanonicalVarKind::PlaceholderRegion(..) => false, - CanonicalVarKind::Const(_) => true, - CanonicalVarKind::PlaceholderConst(_) => false, + CanonicalVarKind::Ty { .. } + | CanonicalVarKind::Int + | CanonicalVarKind::Float + | CanonicalVarKind::Region(_) + | CanonicalVarKind::Const(_) => true, + CanonicalVarKind::PlaceholderTy(_) + | CanonicalVarKind::PlaceholderRegion(..) + | CanonicalVarKind::PlaceholderConst(_) => false, } } pub fn is_region(self) -> bool { match self { CanonicalVarKind::Region(_) | CanonicalVarKind::PlaceholderRegion(_) => true, - CanonicalVarKind::Ty(_) + CanonicalVarKind::Ty { .. } + | CanonicalVarKind::Int + | CanonicalVarKind::Float | CanonicalVarKind::PlaceholderTy(_) | CanonicalVarKind::Const(_) | CanonicalVarKind::PlaceholderConst(_) => false, @@ -180,7 +190,11 @@ impl<I: Interner> CanonicalVarKind<I> { pub fn expect_placeholder_index(self) -> usize { match self { - CanonicalVarKind::Ty(_) | CanonicalVarKind::Region(_) | CanonicalVarKind::Const(_) => { + CanonicalVarKind::Ty { .. } + | CanonicalVarKind::Int + | CanonicalVarKind::Float + | CanonicalVarKind::Region(_) + | CanonicalVarKind::Const(_) => { panic!("expected placeholder: {self:?}") } @@ -191,27 +205,6 @@ impl<I: Interner> CanonicalVarKind<I> { } } -/// Rust actually has more than one category of type variables; -/// notably, the type variables we create for literals (e.g., 22 or -/// 22.) can only be instantiated with integral/float types (e.g., -/// usize or f32). In order to faithfully reproduce a type, we need to -/// know what set of types a given type variable can be unified with. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -#[cfg_attr( - feature = "nightly", - derive(Decodable_NoContext, Encodable_NoContext, HashStable_NoContext) -)] -pub enum CanonicalTyVarKind { - /// General type variable `?T` that can be unified with arbitrary types. - General(UniverseIndex), - - /// Integral type variable `?I` (that can only be unified with integral types). - Int, - - /// Floating-point type variable `?F` (that can only be unified with float types). - Float, -} - /// A set of values corresponding to the canonical variables from some /// `Canonical`. You can give these values to /// `canonical_value.instantiate` to instantiate them into the canonical @@ -287,7 +280,10 @@ impl<I: Interner> CanonicalVarValues<I> { var_values: cx.mk_args_from_iter(infos.iter().enumerate().map( |(i, kind)| -> I::GenericArg { match kind { - CanonicalVarKind::Ty(_) | CanonicalVarKind::PlaceholderTy(_) => { + CanonicalVarKind::Ty { .. } + | CanonicalVarKind::Int + | CanonicalVarKind::Float + | CanonicalVarKind::PlaceholderTy(_) => { Ty::new_anon_bound(cx, ty::INNERMOST, ty::BoundVar::from_usize(i)) .into() } @@ -311,6 +307,37 @@ impl<I: Interner> CanonicalVarValues<I> { CanonicalVarValues { var_values: Default::default() } } + pub fn instantiate( + cx: I, + variables: I::CanonicalVarKinds, + mut f: impl FnMut(&[I::GenericArg], CanonicalVarKind<I>) -> I::GenericArg, + ) -> CanonicalVarValues<I> { + // Instantiating `CanonicalVarValues` is really hot, but limited to less than + // 4 most of the time. Avoid creating a `Vec` here. + if variables.len() <= 4 { + let mut var_values = ArrayVec::<_, 4>::new(); + for info in variables.iter() { + var_values.push(f(&var_values, info)); + } + CanonicalVarValues { var_values: cx.mk_args(&var_values) } + } else { + CanonicalVarValues::instantiate_cold(cx, variables, f) + } + } + + #[cold] + fn instantiate_cold( + cx: I, + variables: I::CanonicalVarKinds, + mut f: impl FnMut(&[I::GenericArg], CanonicalVarKind<I>) -> I::GenericArg, + ) -> CanonicalVarValues<I> { + let mut var_values = Vec::with_capacity(variables.len()); + for info in variables.iter() { + var_values.push(f(&var_values, info)); + } + CanonicalVarValues { var_values: cx.mk_args(&var_values) } + } + #[inline] pub fn len(&self) -> usize { self.var_values.len() diff --git a/compiler/rustc_type_ir/src/fast_reject.rs b/compiler/rustc_type_ir/src/fast_reject.rs index 5a05630e1bd..ed6416a7f55 100644 --- a/compiler/rustc_type_ir/src/fast_reject.rs +++ b/compiler/rustc_type_ir/src/fast_reject.rs @@ -122,7 +122,7 @@ pub fn simplify_type<I: Interner>( ty::Int(int_type) => Some(SimplifiedType::Int(int_type)), ty::Uint(uint_type) => Some(SimplifiedType::Uint(uint_type)), ty::Float(float_type) => Some(SimplifiedType::Float(float_type)), - ty::Adt(def, _) => Some(SimplifiedType::Adt(def.def_id())), + ty::Adt(def, _) => Some(SimplifiedType::Adt(def.def_id().into())), ty::Str => Some(SimplifiedType::Str), ty::Array(..) => Some(SimplifiedType::Array), ty::Slice(..) => Some(SimplifiedType::Slice), @@ -135,11 +135,11 @@ pub fn simplify_type<I: Interner>( _ => Some(SimplifiedType::MarkerTraitObject), }, ty::Ref(_, _, mutbl) => Some(SimplifiedType::Ref(mutbl)), - ty::FnDef(def_id, _) | ty::Closure(def_id, _) | ty::CoroutineClosure(def_id, _) => { - Some(SimplifiedType::Closure(def_id)) - } - ty::Coroutine(def_id, _) => Some(SimplifiedType::Coroutine(def_id)), - ty::CoroutineWitness(def_id, _) => Some(SimplifiedType::CoroutineWitness(def_id)), + ty::FnDef(def_id, _) => Some(SimplifiedType::Closure(def_id.into())), + ty::Closure(def_id, _) => Some(SimplifiedType::Closure(def_id.into())), + ty::CoroutineClosure(def_id, _) => Some(SimplifiedType::Closure(def_id.into())), + ty::Coroutine(def_id, _) => Some(SimplifiedType::Coroutine(def_id.into())), + ty::CoroutineWitness(def_id, _) => Some(SimplifiedType::CoroutineWitness(def_id.into())), ty::Never => Some(SimplifiedType::Never), ty::Tuple(tys) => Some(SimplifiedType::Tuple(tys.len())), ty::FnPtr(sig_tys, _hdr) => { @@ -161,7 +161,7 @@ pub fn simplify_type<I: Interner>( } TreatParams::AsRigid | TreatParams::InstantiateWithInfer => None, }, - ty::Foreign(def_id) => Some(SimplifiedType::Foreign(def_id)), + ty::Foreign(def_id) => Some(SimplifiedType::Foreign(def_id.into())), ty::Error(_) => Some(SimplifiedType::Error), ty::Bound(..) | ty::Infer(_) => None, } diff --git a/compiler/rustc_type_ir/src/infer_ctxt.rs b/compiler/rustc_type_ir/src/infer_ctxt.rs index b4462294700..56962b4597b 100644 --- a/compiler/rustc_type_ir/src/infer_ctxt.rs +++ b/compiler/rustc_type_ir/src/infer_ctxt.rs @@ -158,6 +158,7 @@ pub trait InferCtxtLike: Sized { fn universe_of_ct(&self, ct: ty::ConstVid) -> Option<ty::UniverseIndex>; fn root_ty_var(&self, var: ty::TyVid) -> ty::TyVid; + fn sub_unification_table_root_var(&self, var: ty::TyVid) -> ty::TyVid; fn root_const_var(&self, var: ty::ConstVid) -> ty::ConstVid; fn opportunistic_resolve_ty_var(&self, vid: ty::TyVid) -> <Self::Interner as Interner>::Ty; @@ -197,6 +198,7 @@ pub trait InferCtxtLike: Sized { ) -> U; fn equate_ty_vids_raw(&self, a: ty::TyVid, b: ty::TyVid); + fn sub_unify_ty_vids_raw(&self, a: ty::TyVid, b: ty::TyVid); fn equate_int_vids_raw(&self, a: ty::IntVid, b: ty::IntVid); fn equate_float_vids_raw(&self, a: ty::FloatVid, b: ty::FloatVid); fn equate_const_vids_raw(&self, a: ty::ConstVid, b: ty::ConstVid); diff --git a/compiler/rustc_type_ir/src/inherent.rs b/compiler/rustc_type_ir/src/inherent.rs index 64063b1a365..ecfc05a6e20 100644 --- a/compiler/rustc_type_ir/src/inherent.rs +++ b/compiler/rustc_type_ir/src/inherent.rs @@ -74,7 +74,7 @@ pub trait Ty<I: Interner<Ty = Self>>: fn new_adt(interner: I, adt_def: I::AdtDef, args: I::GenericArgs) -> Self; - fn new_foreign(interner: I, def_id: I::DefId) -> Self; + fn new_foreign(interner: I, def_id: I::ForeignId) -> Self; fn new_dynamic( interner: I, @@ -83,17 +83,21 @@ pub trait Ty<I: Interner<Ty = Self>>: kind: ty::DynKind, ) -> Self; - fn new_coroutine(interner: I, def_id: I::DefId, args: I::GenericArgs) -> Self; + fn new_coroutine(interner: I, def_id: I::CoroutineId, args: I::GenericArgs) -> Self; - fn new_coroutine_closure(interner: I, def_id: I::DefId, args: I::GenericArgs) -> Self; + fn new_coroutine_closure( + interner: I, + def_id: I::CoroutineClosureId, + args: I::GenericArgs, + ) -> Self; - fn new_closure(interner: I, def_id: I::DefId, args: I::GenericArgs) -> Self; + fn new_closure(interner: I, def_id: I::ClosureId, args: I::GenericArgs) -> Self; - fn new_coroutine_witness(interner: I, def_id: I::DefId, args: I::GenericArgs) -> Self; + fn new_coroutine_witness(interner: I, def_id: I::CoroutineId, args: I::GenericArgs) -> Self; fn new_coroutine_witness_for_coroutine( interner: I, - def_id: I::DefId, + def_id: I::CoroutineId, coroutine_args: I::GenericArgs, ) -> Self; @@ -112,7 +116,7 @@ pub trait Ty<I: Interner<Ty = Self>>: It: Iterator<Item = T>, T: CollectAndApply<Self, Self>; - fn new_fn_def(interner: I, def_id: I::DefId, args: I::GenericArgs) -> Self; + fn new_fn_def(interner: I, def_id: I::FunctionId, args: I::GenericArgs) -> Self; fn new_fn_ptr(interner: I, sig: ty::Binder<I, ty::FnSig<I>>) -> Self; @@ -599,7 +603,7 @@ pub trait ParamLike: Copy + Debug + Hash + Eq { } pub trait AdtDef<I: Interner>: Copy + Debug + Hash + Eq { - fn def_id(self) -> I::DefId; + fn def_id(self) -> I::AdtId; fn is_struct(self) -> bool; @@ -646,6 +650,16 @@ pub trait DefId<I: Interner>: Copy + Debug + Hash + Eq + TypeFoldable<I> { fn as_local(self) -> Option<I::LocalDefId>; } +pub trait SpecificDefId<I: Interner>: + DefId<I> + Into<I::DefId> + TryFrom<I::DefId, Error: std::fmt::Debug> +{ +} + +impl<I: Interner, T: DefId<I> + Into<I::DefId> + TryFrom<I::DefId, Error: std::fmt::Debug>> + SpecificDefId<I> for T +{ +} + pub trait BoundExistentialPredicates<I: Interner>: Copy + Debug + Hash + Eq + Relate<I> + SliceLike<Item = ty::Binder<I, ty::ExistentialPredicate<I>>> { diff --git a/compiler/rustc_type_ir/src/interner.rs b/compiler/rustc_type_ir/src/interner.rs index f5448baf8c0..cf58aec14a6 100644 --- a/compiler/rustc_type_ir/src/interner.rs +++ b/compiler/rustc_type_ir/src/interner.rs @@ -1,3 +1,4 @@ +use std::borrow::Borrow; use std::fmt::Debug; use std::hash::Hash; use std::ops::Deref; @@ -8,7 +9,7 @@ use rustc_index::bit_set::DenseBitSet; use crate::fold::TypeFoldable; use crate::inherent::*; use crate::ir_print::IrPrint; -use crate::lang_items::{SolverLangItem, SolverTraitLangItem}; +use crate::lang_items::{SolverAdtLangItem, SolverLangItem, SolverTraitLangItem}; use crate::relate::Relate; use crate::solve::{ CanonicalInput, ExternalConstraintsData, PredefinedOpaquesData, QueryResult, inspect, @@ -40,13 +41,20 @@ pub trait Interner: type DefId: DefId<Self>; type LocalDefId: Copy + Debug + Hash + Eq + Into<Self::DefId> + TypeFoldable<Self>; - /// A `DefId` of a trait. - /// - /// In rustc this is just a `DefId`, but rust-analyzer uses different types for different items. - /// - /// Note: The `TryFrom<DefId>` always succeeds (in rustc), so don't use it to check if some `DefId` - /// is a trait! - type TraitId: DefId<Self> + Into<Self::DefId> + TryFrom<Self::DefId, Error: std::fmt::Debug>; + // Various more specific `DefId`s. + // + // rustc just defines them all to be `DefId`, but rust-analyzer uses different types so this is convenient for it. + // + // Note: The `TryFrom<DefId>` always succeeds (in rustc), so don't use it to check if some `DefId` + // is of some specific type! + type TraitId: SpecificDefId<Self>; + type ForeignId: SpecificDefId<Self>; + type FunctionId: SpecificDefId<Self>; + type ClosureId: SpecificDefId<Self>; + type CoroutineClosureId: SpecificDefId<Self>; + type CoroutineId: SpecificDefId<Self>; + type AdtId: SpecificDefId<Self>; + type ImplId: SpecificDefId<Self>; type Span: Span<Self>; type GenericArgs: GenericArgs<Self>; @@ -198,7 +206,7 @@ pub trait Interner: -> ty::EarlyBinder<Self, Self::Ty>; type AdtDef: AdtDef<Self>; - fn adt_def(self, adt_def_id: Self::DefId) -> Self::AdtDef; + fn adt_def(self, adt_def_id: Self::AdtId) -> Self::AdtDef; fn alias_ty_kind(self, alias: ty::AliasTy<Self>) -> ty::AliasTyKind; @@ -239,17 +247,17 @@ pub trait Interner: fn coroutine_hidden_types( self, - def_id: Self::DefId, + def_id: Self::CoroutineId, ) -> ty::EarlyBinder<Self, ty::Binder<Self, ty::CoroutineWitnessTypes<Self>>>; fn fn_sig( self, - def_id: Self::DefId, + def_id: Self::FunctionId, ) -> ty::EarlyBinder<Self, ty::Binder<Self, ty::FnSig<Self>>>; - fn coroutine_movability(self, def_id: Self::DefId) -> Movability; + fn coroutine_movability(self, def_id: Self::CoroutineId) -> Movability; - fn coroutine_for_closure(self, def_id: Self::DefId) -> Self::DefId; + fn coroutine_for_closure(self, def_id: Self::CoroutineClosureId) -> Self::CoroutineId; fn generics_require_sized_self(self, def_id: Self::DefId) -> bool; @@ -292,11 +300,11 @@ pub trait Interner: /// and filtering them to the outlives predicates. This is purely for performance. fn impl_super_outlives( self, - impl_def_id: Self::DefId, + impl_def_id: Self::ImplId, ) -> ty::EarlyBinder<Self, impl IntoIterator<Item = Self::Clause>>; - fn impl_is_const(self, def_id: Self::DefId) -> bool; - fn fn_is_const(self, def_id: Self::DefId) -> bool; + fn impl_is_const(self, def_id: Self::ImplId) -> bool; + fn fn_is_const(self, def_id: Self::FunctionId) -> bool; fn alias_has_const_conditions(self, def_id: Self::DefId) -> bool; fn const_conditions( self, @@ -307,42 +315,50 @@ pub trait Interner: def_id: Self::DefId, ) -> ty::EarlyBinder<Self, impl IntoIterator<Item = ty::Binder<Self, ty::TraitRef<Self>>>>; - fn impl_self_is_guaranteed_unsized(self, def_id: Self::DefId) -> bool; + fn impl_self_is_guaranteed_unsized(self, def_id: Self::ImplId) -> bool; - fn has_target_features(self, def_id: Self::DefId) -> bool; + fn has_target_features(self, def_id: Self::FunctionId) -> bool; fn require_lang_item(self, lang_item: SolverLangItem) -> Self::DefId; fn require_trait_lang_item(self, lang_item: SolverTraitLangItem) -> Self::TraitId; + fn require_adt_lang_item(self, lang_item: SolverAdtLangItem) -> Self::AdtId; + fn is_lang_item(self, def_id: Self::DefId, lang_item: SolverLangItem) -> bool; fn is_trait_lang_item(self, def_id: Self::TraitId, lang_item: SolverTraitLangItem) -> bool; + fn is_adt_lang_item(self, def_id: Self::AdtId, lang_item: SolverAdtLangItem) -> bool; + fn is_default_trait(self, def_id: Self::TraitId) -> bool; fn as_lang_item(self, def_id: Self::DefId) -> Option<SolverLangItem>; fn as_trait_lang_item(self, def_id: Self::TraitId) -> Option<SolverTraitLangItem>; + fn as_adt_lang_item(self, def_id: Self::AdtId) -> Option<SolverAdtLangItem>; + fn associated_type_def_ids(self, def_id: Self::DefId) -> impl IntoIterator<Item = Self::DefId>; fn for_each_relevant_impl( self, trait_def_id: Self::TraitId, self_ty: Self::Ty, - f: impl FnMut(Self::DefId), + f: impl FnMut(Self::ImplId), ); + fn for_each_blanket_impl(self, trait_def_id: Self::TraitId, f: impl FnMut(Self::ImplId)); fn has_item_definition(self, def_id: Self::DefId) -> bool; - fn impl_specializes(self, impl_def_id: Self::DefId, victim_def_id: Self::DefId) -> bool; + fn impl_specializes(self, impl_def_id: Self::ImplId, victim_def_id: Self::ImplId) -> bool; - fn impl_is_default(self, impl_def_id: Self::DefId) -> bool; + fn impl_is_default(self, impl_def_id: Self::ImplId) -> bool; - fn impl_trait_ref(self, impl_def_id: Self::DefId) -> ty::EarlyBinder<Self, ty::TraitRef<Self>>; + fn impl_trait_ref(self, impl_def_id: Self::ImplId) + -> ty::EarlyBinder<Self, ty::TraitRef<Self>>; - fn impl_polarity(self, impl_def_id: Self::DefId) -> ty::ImplPolarity; + fn impl_polarity(self, impl_def_id: Self::ImplId) -> ty::ImplPolarity; fn trait_is_auto(self, trait_def_id: Self::TraitId) -> bool; @@ -363,13 +379,13 @@ pub trait Interner: fn delay_bug(self, msg: impl ToString) -> Self::ErrorGuaranteed; - fn is_general_coroutine(self, coroutine_def_id: Self::DefId) -> bool; - fn coroutine_is_async(self, coroutine_def_id: Self::DefId) -> bool; - fn coroutine_is_gen(self, coroutine_def_id: Self::DefId) -> bool; - fn coroutine_is_async_gen(self, coroutine_def_id: Self::DefId) -> bool; + fn is_general_coroutine(self, coroutine_def_id: Self::CoroutineId) -> bool; + fn coroutine_is_async(self, coroutine_def_id: Self::CoroutineId) -> bool; + fn coroutine_is_gen(self, coroutine_def_id: Self::CoroutineId) -> bool; + fn coroutine_is_async_gen(self, coroutine_def_id: Self::CoroutineId) -> bool; type UnsizingParams: Deref<Target = DenseBitSet<u32>>; - fn unsizing_params_for_adt(self, adt_def_id: Self::DefId) -> Self::UnsizingParams; + fn unsizing_params_for_adt(self, adt_def_id: Self::AdtId) -> Self::UnsizingParams; fn anonymize_bound_vars<T: TypeFoldable<Self>>( self, @@ -383,12 +399,12 @@ pub trait Interner: defining_anchor: Self::LocalDefId, ) -> Self::LocalDefIds; - type ProbeRef: Copy + Debug + Hash + Eq + Deref<Target = inspect::Probe<Self>>; - fn mk_probe_ref(self, probe: inspect::Probe<Self>) -> Self::ProbeRef; + type Probe: Debug + Hash + Eq + Borrow<inspect::Probe<Self>>; + fn mk_probe(self, probe: inspect::Probe<Self>) -> Self::Probe; fn evaluate_root_goal_for_proof_tree_raw( self, canonical_goal: CanonicalInput<Self>, - ) -> (QueryResult<Self>, Self::ProbeRef); + ) -> (QueryResult<Self>, Self::Probe); } /// Imagine you have a function `F: FnOnce(&[T]) -> R`, plus an iterator `iter` diff --git a/compiler/rustc_type_ir/src/lang_items.rs b/compiler/rustc_type_ir/src/lang_items.rs index 31f8f5be588..5f503d8b912 100644 --- a/compiler/rustc_type_ir/src/lang_items.rs +++ b/compiler/rustc_type_ir/src/lang_items.rs @@ -11,6 +11,11 @@ pub enum SolverLangItem { DynMetadata, FutureOutput, Metadata, + // tidy-alphabetical-end +} + +pub enum SolverAdtLangItem { + // tidy-alphabetical-start Option, Poll, // tidy-alphabetical-end diff --git a/compiler/rustc_type_ir/src/relate.rs b/compiler/rustc_type_ir/src/relate.rs index 223230fde9e..690a5f65e08 100644 --- a/compiler/rustc_type_ir/src/relate.rs +++ b/compiler/rustc_type_ir/src/relate.rs @@ -405,7 +405,7 @@ pub fn structurally_relate_tys<I: Interner, R: TypeRelation<I>>( Ok(if a_args.is_empty() { a } else { - let args = relation.relate_item_args(a_def.def_id(), a_args, b_args)?; + let args = relation.relate_item_args(a_def.def_id().into(), a_args, b_args)?; if args == a_args { a } else { Ty::new_adt(cx, a_def, args) } }) } @@ -524,7 +524,7 @@ pub fn structurally_relate_tys<I: Interner, R: TypeRelation<I>>( Ok(if a_args.is_empty() { a } else { - let args = relation.relate_item_args(a_def_id, a_args, b_args)?; + let args = relation.relate_item_args(a_def_id.into(), a_args, b_args)?; if args == a_args { a } else { Ty::new_fn_def(cx, a_def_id, args) } }) } diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index dbbc0c217d7..8f8f019510f 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -1262,7 +1262,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { encountered_overflow |= stack_entry.encountered_overflow; debug_assert_eq!(stack_entry.input, input); - // If the current goal is not the root of a cycle, we are done. + // If the current goal is not a cycle head, we are done. // // There are no provisional cache entries which depend on this goal. let Some(usages) = stack_entry.usages else { @@ -1278,7 +1278,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // // Check whether we reached a fixpoint, either because the final result // is equal to the provisional result of the previous iteration, or because - // this was only the root of either coinductive or inductive cycles, and the + // this was only the head of either coinductive or inductive cycles, and the // final result is equal to the initial response for that case. if self.reached_fixpoint(cx, &stack_entry, usages, result) { self.rebase_provisional_cache_entries(&stack_entry, |_, result| result); diff --git a/compiler/rustc_type_ir/src/search_graph/stack.rs b/compiler/rustc_type_ir/src/search_graph/stack.rs index 3fd8e2bd16e..8348666be41 100644 --- a/compiler/rustc_type_ir/src/search_graph/stack.rs +++ b/compiler/rustc_type_ir/src/search_graph/stack.rs @@ -13,7 +13,7 @@ rustc_index::newtype_index! { pub(super) struct StackDepth {} } -/// Stack entries of the evaluation stack. Its fields tend to be lazily +/// Stack entries of the evaluation stack. Its fields tend to be lazily updated /// when popping a child goal or completely immutable. #[derive_where(Debug; X: Cx)] pub(super) struct StackEntry<X: Cx> { @@ -42,7 +42,7 @@ pub(super) struct StackEntry<X: Cx> { /// Whether evaluating this goal encountered overflow. Lazily updated. pub encountered_overflow: bool, - /// Whether and how this goal has been used as the root of a cycle. Lazily updated. + /// Whether and how this goal has been used as a cycle head. Lazily updated. pub usages: Option<HeadUsages>, /// We want to be able to ignore head usages if they happen inside of candidates diff --git a/compiler/rustc_type_ir/src/solve/inspect.rs b/compiler/rustc_type_ir/src/solve/inspect.rs index 5452ac44158..3af2f8dbaf2 100644 --- a/compiler/rustc_type_ir/src/solve/inspect.rs +++ b/compiler/rustc_type_ir/src/solve/inspect.rs @@ -49,7 +49,7 @@ pub type CanonicalState<I, T> = Canonical<I, State<I, T>>; pub struct GoalEvaluation<I: Interner> { pub uncanonicalized_goal: Goal<I, I::Predicate>, pub orig_values: Vec<I::GenericArg>, - pub final_revision: I::ProbeRef, + pub final_revision: I::Probe, pub result: QueryResult<I>, } diff --git a/compiler/rustc_type_ir/src/solve/mod.rs b/compiler/rustc_type_ir/src/solve/mod.rs index b6d362d77c4..b48a8f46ebe 100644 --- a/compiler/rustc_type_ir/src/solve/mod.rs +++ b/compiler/rustc_type_ir/src/solve/mod.rs @@ -136,7 +136,7 @@ pub enum CandidateSource<I: Interner> { /// let y = x.clone(); /// } /// ``` - Impl(I::DefId), + Impl(I::ImplId), /// A builtin impl generated by the compiler. When adding a new special /// trait, try to use actual impls whenever possible. Builtin impls should /// only be used in cases where the impl cannot be manually be written. diff --git a/compiler/rustc_type_ir/src/ty_kind.rs b/compiler/rustc_type_ir/src/ty_kind.rs index bde506ffd93..225d85f79c3 100644 --- a/compiler/rustc_type_ir/src/ty_kind.rs +++ b/compiler/rustc_type_ir/src/ty_kind.rs @@ -101,7 +101,7 @@ pub enum TyKind<I: Interner> { Adt(I::AdtDef, I::GenericArgs), /// An unsized FFI type that is opaque to Rust. Written as `extern type T`. - Foreign(I::DefId), + Foreign(I::ForeignId), /// The pointee of a string slice. Written as `str`. Str, @@ -137,7 +137,7 @@ pub enum TyKind<I: Interner> { /// fn foo() -> i32 { 1 } /// let bar = foo; // bar: fn() -> i32 {foo} /// ``` - FnDef(I::DefId, I::GenericArgs), + FnDef(I::FunctionId, I::GenericArgs), /// A pointer to a function. Written as `fn() -> i32`. /// @@ -172,21 +172,21 @@ pub enum TyKind<I: Interner> { /// Closure args contain both the - potentially instantiated - generic parameters /// of its parent and some synthetic parameters. See the documentation for /// `ClosureArgs` for more details. - Closure(I::DefId, I::GenericArgs), + Closure(I::ClosureId, I::GenericArgs), /// The anonymous type of a closure. Used to represent the type of `async |a| a`. /// /// Coroutine-closure args contain both the - potentially instantiated - generic /// parameters of its parent and some synthetic parameters. See the documentation /// for `CoroutineClosureArgs` for more details. - CoroutineClosure(I::DefId, I::GenericArgs), + CoroutineClosure(I::CoroutineClosureId, I::GenericArgs), /// The anonymous type of a coroutine. Used to represent the type of /// `|a| yield a`. /// /// For more info about coroutine args, visit the documentation for /// `CoroutineArgs`. - Coroutine(I::DefId, I::GenericArgs), + Coroutine(I::CoroutineId, I::GenericArgs), /// A type representing the types stored inside a coroutine. /// This should only appear as part of the `CoroutineArgs`. @@ -211,7 +211,7 @@ pub enum TyKind<I: Interner> { /// } /// # ; /// ``` - CoroutineWitness(I::DefId, I::GenericArgs), + CoroutineWitness(I::CoroutineId, I::GenericArgs), /// The never type `!`. Never, diff --git a/compiler/rustc_type_ir/src/ty_kind/closure.rs b/compiler/rustc_type_ir/src/ty_kind/closure.rs index a2e16d917a9..3a6d1acfa8d 100644 --- a/compiler/rustc_type_ir/src/ty_kind/closure.rs +++ b/compiler/rustc_type_ir/src/ty_kind/closure.rs @@ -391,7 +391,7 @@ impl<I: Interner> CoroutineClosureSignature<I> { cx: I, parent_args: I::GenericArgsSlice, coroutine_kind_ty: I::Ty, - coroutine_def_id: I::DefId, + coroutine_def_id: I::CoroutineId, tupled_upvars_ty: I::Ty, ) -> I::Ty { let coroutine_args = ty::CoroutineArgs::new( @@ -418,7 +418,7 @@ impl<I: Interner> CoroutineClosureSignature<I> { self, cx: I, parent_args: I::GenericArgsSlice, - coroutine_def_id: I::DefId, + coroutine_def_id: I::CoroutineId, goal_kind: ty::ClosureKind, env_region: I::Region, closure_tupled_upvars_ty: I::Ty, diff --git a/compiler/rustc_type_ir/src/visit.rs b/compiler/rustc_type_ir/src/visit.rs index 5104484e9c4..6e62e1031a9 100644 --- a/compiler/rustc_type_ir/src/visit.rs +++ b/compiler/rustc_type_ir/src/visit.rs @@ -398,9 +398,9 @@ impl std::fmt::Debug for HasTypeFlagsVisitor { // looks, particular for `Ty`/`Predicate` where it's just a field access. // // N.B. The only case where this isn't totally true is binders, which also -// add `HAS_{RE,TY,CT}_LATE_BOUND` flag depending on the *bound variables* that +// add `HAS_BINDER_VARS` flag depending on the *bound variables* that // are present, regardless of whether those bound variables are used. This -// is important for anonymization of binders in `TyCtxt::erase_regions`. We +// is important for anonymization of binders in `TyCtxt::erase_and_anonymize_regions`. We // specifically detect this case in `visit_binder`. impl<I: Interner> TypeVisitor<I> for HasTypeFlagsVisitor { type Result = ControlFlow<FoundFlags>; diff --git a/compiler/rustc_windows_rc/Cargo.toml b/compiler/rustc_windows_rc/Cargo.toml new file mode 100644 index 00000000000..080acd35c38 --- /dev/null +++ b/compiler/rustc_windows_rc/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "rustc_windows_rc" +version = "0.0.0" +edition = "2024" + +[dependencies] +#tidy-alphabetical-start +# `cc` updates often break things, so we pin it here. Cargo enforces "max 1 semver-compat version +# per crate", so if you change this, you need to also change it in `rustc_llvm` and `rustc_codegen_ssa`. +cc = "=1.2.16" +#tidy-alphabetical-end diff --git a/compiler/rustc_windows_rc/rustc.rc.in b/compiler/rustc_windows_rc/rustc.rc.in new file mode 100644 index 00000000000..10a00b9bd4e --- /dev/null +++ b/compiler/rustc_windows_rc/rustc.rc.in @@ -0,0 +1,40 @@ +// A template for the rustc_driver and rustc Windows resource files. +// This file is processed by the build script to produce rustc.rc and rustc_driver.rc +// with the appropriate version information filled in. + +// All the strings are in UTF-8 +#pragma code_page(65001) + +#define RUSTC_FILEDESCRIPTION_STR "@RUSTC_FILEDESCRIPTION_STR@" +#define RUSTC_FILETYPE @RUSTC_FILETYPE@ +#define RUSTC_FILEVERSION_QUAD @RUSTC_FILEVERSION_QUAD@ +#define RUSTC_FILEVERSION_STR "@RUSTC_FILEVERSION_STR@" + +#define RUSTC_PRODUCTNAME_STR "@RUSTC_PRODUCTNAME_STR@" +#define RUSTC_PRODUCTVERSION_QUAD @RUSTC_PRODUCTVERSION_QUAD@ +#define RUSTC_PRODUCTVERSION_STR "@RUSTC_PRODUCTVERSION_STR@" + + +1 VERSIONINFO +FILEVERSION RUSTC_FILEVERSION_QUAD +PRODUCTVERSION RUSTC_PRODUCTVERSION_QUAD +FILEOS 0x00040004 +FILETYPE RUSTC_FILETYPE +FILESUBTYPE 0 +FILEFLAGSMASK 0x3f +FILEFLAGS 0x0 +{ + BLOCK "StringFileInfo" + { + BLOCK "000004b0" + { + VALUE "FileDescription", RUSTC_FILEDESCRIPTION_STR + VALUE "FileVersion", RUSTC_FILEVERSION_STR + VALUE "ProductVersion", RUSTC_PRODUCTVERSION_STR + VALUE "ProductName", RUSTC_PRODUCTNAME_STR + } + } + BLOCK "VarFileInfo" { + VALUE "Translation", 0x0, 0x04b0 + } +} diff --git a/compiler/rustc_windows_rc/src/lib.rs b/compiler/rustc_windows_rc/src/lib.rs new file mode 100644 index 00000000000..caa5e5ef276 --- /dev/null +++ b/compiler/rustc_windows_rc/src/lib.rs @@ -0,0 +1,158 @@ +//! A build script dependency to create a Windows resource file for the compiler +//! +//! Uses values from the `CFG_VERSION` and `CFG_RELEASE` environment variables +//! to set the product and file version information in the Windows resource file. +use std::{env, ffi, fs, path, process}; + +use cc::windows_registry; + +/// The template for the Windows resource file. +const RESOURCE_TEMPLATE: &str = include_str!("../rustc.rc.in"); + +/// A subset of the possible values for the `FILETYPE` field in a Windows resource file +/// +/// See the `dwFileType` member of [VS_FIXEDFILEINFO](https://learn.microsoft.com/en-us/windows/win32/api/verrsrc/ns-verrsrc-vs_fixedfileinfo#members) +#[derive(Debug, Clone, Copy)] +#[repr(u32)] +pub enum VersionInfoFileType { + /// `VFT_APP` - The file is an application. + App = 0x00000001, + /// `VFT_DLL` - The file is a dynamic link library. + Dll = 0x00000002, +} + +/// Create and compile a Windows resource file with the product and file version information for the rust compiler. +/// +/// Returns the path to the compiled resource file +/// +/// Does not emit any cargo directives, the caller is responsible for that. +pub fn compile_windows_resource_file( + file_stem: &path::Path, + file_description: &str, + filetype: VersionInfoFileType, +) -> path::PathBuf { + let mut resources_dir = path::PathBuf::from(env::var_os("OUT_DIR").unwrap()); + resources_dir.push("resources"); + fs::create_dir_all(&resources_dir).unwrap(); + + let resource_compiler = + find_resource_compiler(&env::var("CARGO_CFG_TARGET_ARCH").unwrap()).expect("found rc.exe"); + + let rc_path = resources_dir.join(file_stem.with_extension("rc")); + + write_resource_script_file(&rc_path, file_description, filetype); + + let res_path = resources_dir.join(file_stem.with_extension("res")); + + let status = process::Command::new(resource_compiler) + .arg("/fo") + .arg(&res_path) + .arg(&rc_path) + .status() + .expect("can execute resource compiler"); + assert!(status.success(), "rc.exe failed with status {}", status); + assert!( + res_path.try_exists().unwrap_or(false), + "resource file {} was not created", + res_path.display() + ); + res_path +} + +/// Writes a Windows resource script file for the rust compiler with the product and file version information +/// into `rc_path` +fn write_resource_script_file( + rc_path: &path::Path, + file_description: &str, + filetype: VersionInfoFileType, +) { + let mut resource_script = RESOURCE_TEMPLATE.to_string(); + + // Set the string product and file version to the same thing as `rustc --version` + let descriptive_version = env::var("CFG_VERSION").unwrap_or("unknown".to_string()); + + // Set the product name to "Rust Compiler" or "Rust Compiler (nightly)" etc + let product_name = product_name(env::var("CFG_RELEASE_CHANNEL").unwrap()); + + // For the numeric version we need `major,minor,patch,build`. + // Extract them from `CFG_RELEASE` which is "major.minor.patch" and a "-dev", "-nightly" or similar suffix + let cfg_release = env::var("CFG_RELEASE").unwrap(); + // remove the suffix, if present and parse into [`ResourceVersion`] + let version = parse_version(cfg_release.split("-").next().unwrap_or("0.0.0")) + .expect("valid CFG_RELEASE version"); + + resource_script = resource_script + .replace("@RUSTC_FILEDESCRIPTION_STR@", file_description) + .replace("@RUSTC_FILETYPE@", &format!("{}", filetype as u32)) + .replace("@RUSTC_FILEVERSION_QUAD@", &version.to_quad_string()) + .replace("@RUSTC_FILEVERSION_STR@", &descriptive_version) + .replace("@RUSTC_PRODUCTNAME_STR@", &product_name) + .replace("@RUSTC_PRODUCTVERSION_QUAD@", &version.to_quad_string()) + .replace("@RUSTC_PRODUCTVERSION_STR@", &descriptive_version); + + fs::write(&rc_path, resource_script) + .unwrap_or_else(|_| panic!("failed to write resource file {}", rc_path.display())); +} + +fn product_name(channel: String) -> String { + format!( + "Rust Compiler{}", + if channel == "stable" { "".to_string() } else { format!(" ({})", channel) } + ) +} + +/// Windows resources store versions as four 16-bit integers. +struct ResourceVersion { + major: u16, + minor: u16, + patch: u16, + build: u16, +} + +impl ResourceVersion { + /// Format the version as a comma-separated string of four integers + /// as expected by Windows resource scripts for the `FILEVERSION` and `PRODUCTVERSION` fields. + fn to_quad_string(&self) -> String { + format!("{},{},{},{}", self.major, self.minor, self.patch, self.build) + } +} + +/// Parse a string in the format "major.minor.patch" into a [`ResourceVersion`]. +/// The build is set to 0. +/// Returns `None` if the version string is not in the expected format. +fn parse_version(version: &str) -> Option<ResourceVersion> { + let mut parts = version.split('.'); + let major = parts.next()?.parse::<u16>().ok()?; + let minor = parts.next()?.parse::<u16>().ok()?; + let patch = parts.next()?.parse::<u16>().ok()?; + if parts.next().is_some() { + None + } else { + Some(ResourceVersion { major, minor, patch, build: 0 }) + } +} + +/// Find the Windows SDK resource compiler `rc.exe` for the given architecture or target triple. +/// Returns `None` if the tool could not be found. +fn find_resource_compiler(arch_or_target: &str) -> Option<path::PathBuf> { + find_windows_sdk_tool(arch_or_target, "rc.exe") +} + +/// Find a Windows SDK tool for the given architecture or target triple. +/// Returns `None` if the tool could not be found. +fn find_windows_sdk_tool(arch_or_target: &str, tool_name: &str) -> Option<path::PathBuf> { + // windows_registry::find_tool can only find MSVC tools, not Windows SDK tools, but + // cc does include the Windows SDK tools in the PATH environment of MSVC tools. + + let msvc_linker = windows_registry::find_tool(arch_or_target, "link.exe")?; + let path = &msvc_linker.env().iter().find(|(k, _)| k == "PATH")?.1; + find_tool_in_path(tool_name, path) +} + +/// Find a tool in the directories in a given PATH-like string. +fn find_tool_in_path<P: AsRef<ffi::OsStr>>(tool_name: &str, path: P) -> Option<path::PathBuf> { + env::split_paths(path.as_ref()).find_map(|p| { + let tool_path = p.join(tool_name); + if tool_path.try_exists().unwrap_or(false) { Some(tool_path) } else { None } + }) +} |
