diff options
Diffstat (limited to 'compiler')
29 files changed, 574 insertions, 262 deletions
diff --git a/compiler/rustc_attr_data_structures/src/attributes.rs b/compiler/rustc_attr_data_structures/src/attributes.rs index 845e4d5e5d0..8f95a017809 100644 --- a/compiler/rustc_attr_data_structures/src/attributes.rs +++ b/compiler/rustc_attr_data_structures/src/attributes.rs @@ -130,24 +130,54 @@ impl Deprecation { } } -/// Represent parsed, *built in*, inert attributes. +/// Represents parsed *built-in* inert attributes. /// -/// That means attributes that are not actually ever expanded. -/// For more information on this, see the module docs on the [`rustc_attr_parsing`] crate. -/// They're instead used as markers, to guide the compilation process in various way in most every stage of the compiler. -/// These are kept around after the AST, into the HIR and further on. +/// ## Overview +/// These attributes are markers that guide the compilation process and are never expanded into other code. +/// They persist throughout the compilation phases, from AST to HIR and beyond. /// -/// The word "parsed" could be a little misleading here, because the parser already parses -/// attributes early on. However, the result, an [`ast::Attribute`] -/// is only parsed at a high level, still containing a token stream in many cases. That is -/// because the structure of the contents varies from attribute to attribute. -/// With a parsed attribute I mean that each attribute is processed individually into a -/// final structure, which on-site (the place where the attribute is useful for, think the -/// the place where `must_use` is checked) little to no extra parsing or validating needs to -/// happen. +/// ## Attribute Processing +/// While attributes are initially parsed by [`rustc_parse`] into [`ast::Attribute`], they still contain raw token streams +/// because different attributes have different internal structures. This enum represents the final, +/// fully parsed form of these attributes, where each variant contains contains all the information and +/// structure relevant for the specific attribute. /// -/// For more docs, look in [`rustc_attr_parsing`]. +/// Some attributes can be applied multiple times to the same item, and they are "collapsed" into a single +/// semantic attribute. For example: +/// ```rust +/// #[repr(C)] +/// #[repr(packed)] +/// struct S { } +/// ``` +/// This is equivalent to `#[repr(C, packed)]` and results in a single [`AttributeKind::Repr`] containing +/// both `C` and `packed` annotations. This collapsing happens during parsing and is reflected in the +/// data structures defined in this enum. /// +/// ## Usage +/// These parsed attributes are used throughout the compiler to: +/// - Control code generation (e.g., `#[repr]`) +/// - Mark API stability (`#[stable]`, `#[unstable]`) +/// - Provide documentation (`#[doc]`) +/// - Guide compiler behavior (e.g., `#[allow_internal_unstable]`) +/// +/// ## Note on Attribute Organization +/// Some attributes like `InlineAttr`, `OptimizeAttr`, and `InstructionSetAttr` are defined separately +/// from this enum because they are used in specific compiler phases (like code generation) and don't +/// need to persist throughout the entire compilation process. They are typically processed and +/// converted into their final form earlier in the compilation pipeline. +/// +/// For example: +/// - `InlineAttr` is used during code generation to control function inlining +/// - `OptimizeAttr` is used to control optimization levels +/// - `InstructionSetAttr` is used for target-specific code generation +/// +/// These attributes are handled by their respective compiler passes in the [`rustc_codegen_ssa`] crate +/// and don't need to be preserved in the same way as the attributes in this enum. +/// +/// For more details on attribute parsing, see the [`rustc_attr_parsing`] crate. +/// +/// [`rustc_parse`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_parse/index.html +/// [`rustc_codegen_ssa`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_codegen_ssa/index.html /// [`rustc_attr_parsing`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_attr_parsing/index.html #[derive(Clone, Debug, HashStable_Generic, Encodable, Decodable, PrintAttribute)] pub enum AttributeKind { @@ -158,6 +188,9 @@ pub enum AttributeKind { /// Represents `#[allow_internal_unstable]`. AllowInternalUnstable(ThinVec<(Symbol, Span)>), + /// Represents `#[rustc_as_ptr]` (used by the `dangling_pointers_from_temporaries` lint). + AsPtr(Span), + /// Represents `#[rustc_default_body_unstable]`. BodyStability { stability: DefaultBodyStability, diff --git a/compiler/rustc_attr_data_structures/src/lib.rs b/compiler/rustc_attr_data_structures/src/lib.rs index b0fc19d1cd7..f8355be09ad 100644 --- a/compiler/rustc_attr_data_structures/src/lib.rs +++ b/compiler/rustc_attr_data_structures/src/lib.rs @@ -1,3 +1,7 @@ +//! Data structures for representing parsed attributes in the Rust compiler. +//! For detailed documentation about attribute processing, +//! see [rustc_attr_parsing](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_attr_parsing/index.html). + // tidy-alphabetical-start #![allow(internal_features)] #![doc(rust_logo)] diff --git a/compiler/rustc_attr_parsing/src/attributes/lint_helpers.rs b/compiler/rustc_attr_parsing/src/attributes/lint_helpers.rs new file mode 100644 index 00000000000..32a20d4c5b5 --- /dev/null +++ b/compiler/rustc_attr_parsing/src/attributes/lint_helpers.rs @@ -0,0 +1,21 @@ +use rustc_attr_data_structures::AttributeKind; +use rustc_span::{Symbol, sym}; + +use crate::attributes::{AttributeOrder, OnDuplicate, SingleAttributeParser}; +use crate::context::{AcceptContext, Stage}; +use crate::parser::ArgParser; + +pub(crate) struct AsPtrParser; + +impl<S: Stage> SingleAttributeParser<S> for AsPtrParser { + const PATH: &[Symbol] = &[sym::rustc_as_ptr]; + + const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst; + + const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error; + + fn convert(cx: &mut AcceptContext<'_, '_, S>, _args: &ArgParser<'_>) -> Option<AttributeKind> { + // FIXME: check that there's no args (this is currently checked elsewhere) + Some(AttributeKind::AsPtr(cx.attr_span)) + } +} diff --git a/compiler/rustc_attr_parsing/src/attributes/mod.rs b/compiler/rustc_attr_parsing/src/attributes/mod.rs index caf55e6685e..df488c89a34 100644 --- a/compiler/rustc_attr_parsing/src/attributes/mod.rs +++ b/compiler/rustc_attr_parsing/src/attributes/mod.rs @@ -29,6 +29,7 @@ pub(crate) mod allow_unstable; pub(crate) mod cfg; pub(crate) mod confusables; pub(crate) mod deprecation; +pub(crate) mod lint_helpers; pub(crate) mod repr; pub(crate) mod stability; pub(crate) mod transparency; diff --git a/compiler/rustc_attr_parsing/src/context.rs b/compiler/rustc_attr_parsing/src/context.rs index 47f72232828..3193d8975e9 100644 --- a/compiler/rustc_attr_parsing/src/context.rs +++ b/compiler/rustc_attr_parsing/src/context.rs @@ -18,6 +18,7 @@ use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span, Symbol, sym}; use crate::attributes::allow_unstable::{AllowConstFnUnstableParser, AllowInternalUnstableParser}; use crate::attributes::confusables::ConfusablesParser; use crate::attributes::deprecation::DeprecationParser; +use crate::attributes::lint_helpers::AsPtrParser; use crate::attributes::repr::ReprParser; use crate::attributes::stability::{ BodyStabilityParser, ConstStabilityIndirectParser, ConstStabilityParser, StabilityParser, @@ -102,6 +103,7 @@ attribute_parsers!( // tidy-alphabetical-end // tidy-alphabetical-start + Single<AsPtrParser>, Single<ConstStabilityIndirectParser>, Single<DeprecationParser>, Single<TransparencyParser>, diff --git a/compiler/rustc_builtin_macros/src/test.rs b/compiler/rustc_builtin_macros/src/test.rs index b439fa34f5b..b067578794b 100644 --- a/compiler/rustc_builtin_macros/src/test.rs +++ b/compiler/rustc_builtin_macros/src/test.rs @@ -118,10 +118,7 @@ pub(crate) fn expand_test_or_bench( let (item, is_stmt) = match item { Annotatable::Item(i) => (i, false), - Annotatable::Stmt(stmt) if matches!(stmt.kind, ast::StmtKind::Item(_)) => { - // FIXME: Use an 'if let' guard once they are implemented - if let ast::StmtKind::Item(i) = stmt.kind { (i, true) } else { unreachable!() } - } + Annotatable::Stmt(box ast::Stmt { kind: ast::StmtKind::Item(i), .. }) => (i, true), other => { not_testable_error(cx, attr_sp, None); return vec![other]; diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs index 9a0a5b51039..cd0afee0cfb 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs @@ -40,7 +40,18 @@ fn apply_attrs_to_abi_param(param: AbiParam, arg_attrs: ArgAttributes) -> AbiPar } } -fn cast_target_to_abi_params(cast: &CastTarget) -> SmallVec<[AbiParam; 2]> { +fn cast_target_to_abi_params(cast: &CastTarget) -> SmallVec<[(Size, AbiParam); 2]> { + if let Some(offset_from_start) = cast.rest_offset { + assert!(cast.prefix[1..].iter().all(|p| p.is_none())); + assert_eq!(cast.rest.unit.size, cast.rest.total); + let first = cast.prefix[0].unwrap(); + let second = cast.rest.unit; + return smallvec![ + (Size::ZERO, reg_to_abi_param(first)), + (offset_from_start, reg_to_abi_param(second)) + ]; + } + let (rest_count, rem_bytes) = if cast.rest.unit.size.bytes() == 0 { (0, 0) } else { @@ -55,25 +66,32 @@ fn cast_target_to_abi_params(cast: &CastTarget) -> SmallVec<[AbiParam; 2]> { // different types in Cranelift IR. Instead a single array of primitive types is used. // Create list of fields in the main structure - let mut args = cast + let args = cast .prefix .iter() .flatten() .map(|®| reg_to_abi_param(reg)) - .chain((0..rest_count).map(|_| reg_to_abi_param(cast.rest.unit))) - .collect::<SmallVec<_>>(); + .chain((0..rest_count).map(|_| reg_to_abi_param(cast.rest.unit))); + + let mut res = SmallVec::new(); + let mut offset = Size::ZERO; + + for arg in args { + res.push((offset, arg)); + offset += Size::from_bytes(arg.value_type.bytes()); + } // Append final integer if rem_bytes != 0 { // Only integers can be really split further. assert_eq!(cast.rest.unit.kind, RegKind::Integer); - args.push(reg_to_abi_param(Reg { - kind: RegKind::Integer, - size: Size::from_bytes(rem_bytes), - })); + res.push(( + offset, + reg_to_abi_param(Reg { kind: RegKind::Integer, size: Size::from_bytes(rem_bytes) }), + )); } - args + res } impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { @@ -104,7 +122,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { }, PassMode::Cast { ref cast, pad_i32 } => { assert!(!pad_i32, "padding support not yet implemented"); - cast_target_to_abi_params(cast) + cast_target_to_abi_params(cast).into_iter().map(|(_, param)| param).collect() } PassMode::Indirect { attrs, meta_attrs: None, on_stack } => { if on_stack { @@ -160,9 +178,10 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { } _ => unreachable!("{:?}", self.layout.backend_repr), }, - PassMode::Cast { ref cast, .. } => { - (None, cast_target_to_abi_params(cast).into_iter().collect()) - } + PassMode::Cast { ref cast, .. } => ( + None, + cast_target_to_abi_params(cast).into_iter().map(|(_, param)| param).collect(), + ), PassMode::Indirect { attrs, meta_attrs: None, on_stack } => { assert!(!on_stack); ( @@ -187,12 +206,14 @@ pub(super) fn to_casted_value<'tcx>( ) -> SmallVec<[Value; 2]> { let (ptr, meta) = arg.force_stack(fx); assert!(meta.is_none()); - let mut offset = 0; cast_target_to_abi_params(cast) .into_iter() - .map(|param| { - let val = ptr.offset_i64(fx, offset).load(fx, param.value_type, MemFlags::new()); - offset += i64::from(param.value_type.bytes()); + .map(|(offset, param)| { + let val = ptr.offset_i64(fx, offset.bytes() as i64).load( + fx, + param.value_type, + MemFlags::new(), + ); val }) .collect() @@ -205,7 +226,7 @@ pub(super) fn from_casted_value<'tcx>( cast: &CastTarget, ) -> CValue<'tcx> { let abi_params = cast_target_to_abi_params(cast); - let abi_param_size: u32 = abi_params.iter().map(|param| param.value_type.bytes()).sum(); + let abi_param_size: u32 = abi_params.iter().map(|(_, param)| param.value_type.bytes()).sum(); let layout_size = u32::try_from(layout.size.bytes()).unwrap(); let ptr = fx.create_stack_slot( // Stack slot size may be bigger for example `[u8; 3]` which is packed into an `i32`. @@ -214,16 +235,13 @@ pub(super) fn from_casted_value<'tcx>( std::cmp::max(abi_param_size, layout_size), u32::try_from(layout.align.abi.bytes()).unwrap(), ); - let mut offset = 0; let mut block_params_iter = block_params.iter().copied(); - for param in abi_params { - let val = ptr.offset_i64(fx, offset).store( + for (offset, _) in abi_params { + ptr.offset_i64(fx, offset.bytes() as i64).store( fx, block_params_iter.next().unwrap(), MemFlags::new(), - ); - offset += i64::from(param.value_type.bytes()); - val + ) } assert_eq!(block_params_iter.next(), None, "Leftover block param"); CValue::by_ref(ptr, layout) diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 9e05b8f23aa..c921851b42b 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -626,7 +626,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { bx.lifetime_start(llscratch, scratch_size); // ... where we first store the value... - bx.store(val, llscratch, scratch_align); + rustc_codegen_ssa::mir::store_cast(bx, cast, val, llscratch, scratch_align); // ... and then memcpy it to the intended destination. bx.memcpy( diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index aba63d75f1d..4b07c8aef91 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -229,7 +229,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { let llscratch = bx.alloca(scratch_size, scratch_align); bx.lifetime_start(llscratch, scratch_size); // ...store the value... - bx.store(val, llscratch, scratch_align); + rustc_codegen_ssa::mir::store_cast(bx, cast, val, llscratch, scratch_align); // ... and then memcpy it to the intended destination. bx.memcpy( dst.val.llval, diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 43b87171d51..3df97429e09 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -1,6 +1,6 @@ use std::cmp; -use rustc_abi::{BackendRepr, ExternAbi, HasDataLayout, Reg, Size, WrappingRange}; +use rustc_abi::{Align, BackendRepr, ExternAbi, HasDataLayout, Reg, Size, WrappingRange}; use rustc_ast as ast; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_data_structures::packed::Pu128; @@ -13,7 +13,7 @@ use rustc_middle::{bug, span_bug}; use rustc_session::config::OptLevel; use rustc_span::Span; use rustc_span::source_map::Spanned; -use rustc_target::callconv::{ArgAbi, FnAbi, PassMode}; +use rustc_target::callconv::{ArgAbi, CastTarget, FnAbi, PassMode}; use tracing::{debug, info}; use super::operand::OperandRef; @@ -558,8 +558,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } ZeroSized => bug!("ZST return value shouldn't be in PassMode::Cast"), }; - let ty = bx.cast_backend_type(cast_ty); - bx.load(ty, llslot, self.fn_abi.ret.layout.align.abi) + load_cast(bx, cast_ty, llslot, self.fn_abi.ret.layout.align.abi) } }; bx.ret(llval); @@ -1618,8 +1617,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { MemFlags::empty(), ); // ...and then load it with the ABI type. - let cast_ty = bx.cast_backend_type(cast); - llval = bx.load(cast_ty, llscratch, scratch_align); + llval = load_cast(bx, cast, llscratch, scratch_align); bx.lifetime_end(llscratch, scratch_size); } else { // We can't use `PlaceRef::load` here because the argument @@ -1969,3 +1967,47 @@ enum ReturnDest<'tcx, V> { /// Store a direct return value to an operand local place. DirectOperand(mir::Local), } + +fn load_cast<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + cast: &CastTarget, + ptr: Bx::Value, + align: Align, +) -> Bx::Value { + let cast_ty = bx.cast_backend_type(cast); + if let Some(offset_from_start) = cast.rest_offset { + assert!(cast.prefix[1..].iter().all(|p| p.is_none())); + assert_eq!(cast.rest.unit.size, cast.rest.total); + let first_ty = bx.reg_backend_type(&cast.prefix[0].unwrap()); + let second_ty = bx.reg_backend_type(&cast.rest.unit); + let first = bx.load(first_ty, ptr, align); + let second_ptr = bx.inbounds_ptradd(ptr, bx.const_usize(offset_from_start.bytes())); + let second = bx.load(second_ty, second_ptr, align.restrict_for_offset(offset_from_start)); + let res = bx.cx().const_poison(cast_ty); + let res = bx.insert_value(res, first, 0); + bx.insert_value(res, second, 1) + } else { + bx.load(cast_ty, ptr, align) + } +} + +pub fn store_cast<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + cast: &CastTarget, + value: Bx::Value, + ptr: Bx::Value, + align: Align, +) { + if let Some(offset_from_start) = cast.rest_offset { + assert!(cast.prefix[1..].iter().all(|p| p.is_none())); + assert_eq!(cast.rest.unit.size, cast.rest.total); + assert!(cast.prefix[0].is_some()); + let first = bx.extract_value(value, 0); + let second = bx.extract_value(value, 1); + bx.store(first, ptr, align); + let second_ptr = bx.inbounds_ptradd(ptr, bx.const_usize(offset_from_start.bytes())); + bx.store(second, second_ptr, align.restrict_for_offset(offset_from_start)); + } else { + bx.store(value, ptr, align); + }; +} diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 96a04473aba..66c4af4c935 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -26,6 +26,7 @@ pub mod place; mod rvalue; mod statement; +pub use self::block::store_cast; use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo}; use self::operand::{OperandRef, OperandValue}; use self::place::PlaceRef; @@ -259,7 +260,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } PassMode::Cast { ref cast, .. } => { debug!("alloc: {:?} (return place) -> place", local); - let size = cast.size(&start_bx); + let size = cast.size(&start_bx).max(layout.size); return LocalRef::Place(PlaceRef::alloca_size(&mut start_bx, size, layout)); } _ => {} diff --git a/compiler/rustc_expand/src/base.rs b/compiler/rustc_expand/src/base.rs index 311dadb53c4..7a29f8c9fbd 100644 --- a/compiler/rustc_expand/src/base.rs +++ b/compiler/rustc_expand/src/base.rs @@ -35,6 +35,7 @@ use crate::base::ast::MetaItemInner; use crate::errors; use crate::expand::{self, AstFragment, Invocation}; use crate::module::DirOwnership; +use crate::stats::MacroStat; // When adding new variants, make sure to // adjust the `visit_*` / `flat_map_*` calls in `InvocationCollector` @@ -1191,7 +1192,7 @@ pub struct ExtCtxt<'a> { /// not to expand it again. pub(super) expanded_inert_attrs: MarkedAttrs, /// `-Zmacro-stats` data. - pub macro_stats: FxHashMap<(Symbol, MacroKind), crate::stats::MacroStat>, // njn: quals + pub macro_stats: FxHashMap<(Symbol, MacroKind), MacroStat>, } impl<'a> ExtCtxt<'a> { diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs index 73a21789c5d..7d6e471e7e9 100644 --- a/compiler/rustc_feature/src/builtin_attrs.rs +++ b/compiler/rustc_feature/src/builtin_attrs.rs @@ -887,7 +887,7 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ rustc_legacy_const_generics, Normal, template!(List: "N"), ErrorFollowing, EncodeCrossCrate::Yes, ), - // Do not const-check this function's body. It will always get replaced during CTFE. + // Do not const-check this function's body. It will always get replaced during CTFE via `hook_special_const_fn`. rustc_attr!( rustc_do_not_const_check, Normal, template!(Word), WarnFollowing, EncodeCrossCrate::Yes, "`#[rustc_do_not_const_check]` skips const-check for this function's body", diff --git a/compiler/rustc_lint/src/dangling.rs b/compiler/rustc_lint/src/dangling.rs index 91c7922638d..c737919db9c 100644 --- a/compiler/rustc_lint/src/dangling.rs +++ b/compiler/rustc_lint/src/dangling.rs @@ -1,4 +1,5 @@ use rustc_ast::visit::{visit_opt, walk_list}; +use rustc_attr_data_structures::{AttributeKind, find_attr}; use rustc_hir::def_id::LocalDefId; use rustc_hir::intravisit::{FnKind, Visitor, walk_expr}; use rustc_hir::{Block, Body, Expr, ExprKind, FnDecl, LangItem}; @@ -133,7 +134,7 @@ fn lint_expr(cx: &LateContext<'_>, expr: &Expr<'_>) { && let ty = cx.typeck_results().expr_ty(receiver) && owns_allocation(cx.tcx, ty) && let Some(fn_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id) - && cx.tcx.has_attr(fn_id, sym::rustc_as_ptr) + && find_attr!(cx.tcx.get_all_attrs(fn_id), AttributeKind::AsPtr(_)) { // FIXME: use `emit_node_lint` when `#[primary_span]` is added. cx.tcx.emit_node_span_lint( diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index e4fcd7e56f1..930d9fba433 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -1,8 +1,64 @@ -//! Defines the various compiler queries. //! -//! For more information on the query system, see -//! ["Queries: demand-driven compilation"](https://rustc-dev-guide.rust-lang.org/query.html). -//! This chapter includes instructions for adding new queries. +//! # The rustc Query System: Query Definitions and Modifiers +//! +//! The core processes in rustc are shipped as queries. Each query is a demand-driven function from some key to a value. +//! The execution result of the function is cached and directly read during the next request, thereby improving compilation efficiency. +//! Some results are saved locally and directly read during the next compilation, which are core of incremental compilation. +//! +//! ## How to Read This Module +//! +//! Each `query` block in this file defines a single query, specifying its key and value types, along with various modifiers. +//! These query definitions are processed by the [`rustc_macros`], which expands them into the necessary boilerplate code +//! for the query system—including the [`Providers`] struct (a function table for all query implementations, where each field is +//! a function pointer to the actual provider), caching, and dependency graph integration. +//! **Note:** The `Providers` struct is not a Rust trait, but a struct generated by the `rustc_macros` to hold all provider functions. +//! The `rustc_macros` also supports a set of **query modifiers** (see below) that control the behavior of each query. +//! +//! The actual provider functions are implemented in various modules and registered into the `Providers` struct +//! during compiler initialization (see [`rustc_interface::passes::DEFAULT_QUERY_PROVIDERS`]). +//! +//! [`rustc_macros`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_macros/index.html +//! [`rustc_interface::passes::DEFAULT_QUERY_PROVIDERS`]: ../../rustc_interface/passes/static.DEFAULT_QUERY_PROVIDERS.html +//! +//! ## Query Modifiers +//! +//! Query modifiers are special flags that alter the behavior of a query. They are parsed and processed by the `rustc_macros` +//! The main modifiers are: +//! +//! - `desc { ... }`: Sets the human-readable description for diagnostics and profiling. Required for every query. +//! - `arena_cache`: Use an arena for in-memory caching of the query result. +//! - `cache_on_disk_if { ... }`: Cache the query result to disk if the provided block evaluates to true. +//! - `fatal_cycle`: If a dependency cycle is detected, abort compilation with a fatal error. +//! - `cycle_delay_bug`: If a dependency cycle is detected, emit a delayed bug instead of aborting immediately. +//! - `cycle_stash`: If a dependency cycle is detected, stash the error for later handling. +//! - `no_hash`: Do not hash the query result for incremental compilation; just mark as dirty if recomputed. +//! - `anon`: Make the query anonymous in the dependency graph (no dep node is created). +//! - `eval_always`: Always evaluate the query, ignoring its dependencies and cached results. +//! - `depth_limit`: Impose a recursion depth limit on the query to prevent stack overflows. +//! - `separate_provide_extern`: Use separate provider functions for local and external crates. +//! - `feedable`: Allow the query result to be set from another query ("fed" externally). +//! - `return_result_from_ensure_ok`: When called via `tcx.ensure_ok()`, return `Result<(), ErrorGuaranteed>` instead of `()`. +//! If the query needs to be executed and returns an error, the error is returned to the caller. +//! Only valid for queries returning `Result<_, ErrorGuaranteed>`. +//! +//! For the up-to-date list, see the `QueryModifiers` struct in +//! [`rustc_macros/src/query.rs`](https://github.com/rust-lang/rust/blob/master/compiler/rustc_macros/src/query.rs) +//! and for more details in incremental compilation, see the +//! [Query modifiers in incremental compilation](https://rustc-dev-guide.rust-lang.org/queries/incremental-compilation-in-detail.html#query-modifiers) section of the rustc-dev-guide. +//! +//! ## Query Expansion and Code Generation +//! +//! The [`rustc_macros::rustc_queries`] macro expands each query definition into: +//! - A method on [`TyCtxt`] (and [`TyCtxtAt`]) for invoking the query. +//! - Provider traits and structs for supplying the query's value. +//! - Caching and dependency graph integration. +//! - Support for incremental compilation, disk caching, and arena allocation as controlled by the modifiers. +//! +//! [`rustc_macros::rustc_queries`]: ../../rustc_macros/macro.rustc_queries.html +//! +//! The macro-based approach allows the query system to be highly flexible and maintainable, while minimizing boilerplate. +//! +//! For more details, see the [rustc-dev-guide](https://rustc-dev-guide.rust-lang.org/query.html). #![allow(unused_parens)] diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs index b49a13ce584..a64bb424117 100644 --- a/compiler/rustc_parse/src/parser/diagnostics.rs +++ b/compiler/rustc_parse/src/parser/diagnostics.rs @@ -686,23 +686,34 @@ impl<'a> Parser<'a> { } if let token::DocComment(kind, style, _) = self.token.kind { - // We have something like `expr //!val` where the user likely meant `expr // !val` - let pos = self.token.span.lo() + BytePos(2); - let span = self.token.span.with_lo(pos).with_hi(pos); - err.span_suggestion_verbose( - span, - format!( - "add a space before {} to write a regular comment", - match (kind, style) { - (token::CommentKind::Line, ast::AttrStyle::Inner) => "`!`", - (token::CommentKind::Block, ast::AttrStyle::Inner) => "`!`", - (token::CommentKind::Line, ast::AttrStyle::Outer) => "the last `/`", - (token::CommentKind::Block, ast::AttrStyle::Outer) => "the last `*`", - }, - ), - " ".to_string(), - Applicability::MachineApplicable, - ); + // This is to avoid suggesting converting a doc comment to a regular comment + // when missing a comma before the doc comment in lists (#142311): + // + // ``` + // enum Foo{ + // A /// xxxxxxx + // B, + // } + // ``` + if !expected.contains(&TokenType::Comma) { + // We have something like `expr //!val` where the user likely meant `expr // !val` + let pos = self.token.span.lo() + BytePos(2); + let span = self.token.span.with_lo(pos).with_hi(pos); + err.span_suggestion_verbose( + span, + format!( + "add a space before {} to write a regular comment", + match (kind, style) { + (token::CommentKind::Line, ast::AttrStyle::Inner) => "`!`", + (token::CommentKind::Block, ast::AttrStyle::Inner) => "`!`", + (token::CommentKind::Line, ast::AttrStyle::Outer) => "the last `/`", + (token::CommentKind::Block, ast::AttrStyle::Outer) => "the last `*`", + }, + ), + " ".to_string(), + Applicability::MaybeIncorrect, + ); + } } let sp = if self.token == token::Eof { @@ -2273,23 +2284,18 @@ impl<'a> Parser<'a> { ), // Also catches `fn foo(&a)`. PatKind::Ref(ref inner_pat, mutab) - if matches!(inner_pat.clone().kind, PatKind::Ident(..)) => + if let PatKind::Ident(_, ident, _) = inner_pat.clone().kind => { - match inner_pat.clone().kind { - PatKind::Ident(_, ident, _) => { - let mutab = mutab.prefix_str(); - ( - ident, - "self: ", - format!("{ident}: &{mutab}TypeName"), - "_: ", - pat.span.shrink_to_lo(), - pat.span, - pat.span.shrink_to_lo(), - ) - } - _ => unreachable!(), - } + let mutab = mutab.prefix_str(); + ( + ident, + "self: ", + format!("{ident}: &{mutab}TypeName"), + "_: ", + pat.span.shrink_to_lo(), + pat.span, + pat.span.shrink_to_lo(), + ) } _ => { // Otherwise, try to get a type and emit a suggestion. diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs index 4e2be8ff0b8..dddbf65db72 100644 --- a/compiler/rustc_passes/src/check_attr.rs +++ b/compiler/rustc_passes/src/check_attr.rs @@ -147,6 +147,9 @@ impl<'tcx> CheckAttrVisitor<'tcx> { | AttributeKind::ConstStabilityIndirect | AttributeKind::MacroTransparency(_), ) => { /* do nothing */ } + Attribute::Parsed(AttributeKind::AsPtr(attr_span)) => { + self.check_applied_to_fn_or_method(hir_id, *attr_span, span, target) + } Attribute::Unparsed(_) => { match attr.path().as_slice() { [sym::diagnostic, sym::do_not_recommend, ..] => { @@ -188,26 +191,23 @@ impl<'tcx> CheckAttrVisitor<'tcx> { self.check_rustc_std_internal_symbol(attr, span, target) } [sym::naked, ..] => self.check_naked(hir_id, attr, span, target, attrs), - [sym::rustc_as_ptr, ..] => { - self.check_applied_to_fn_or_method(hir_id, attr, span, target) - } [sym::rustc_no_implicit_autorefs, ..] => { - self.check_applied_to_fn_or_method(hir_id, attr, span, target) + self.check_applied_to_fn_or_method(hir_id, attr.span(), span, target) } [sym::rustc_never_returns_null_ptr, ..] => { - self.check_applied_to_fn_or_method(hir_id, attr, span, target) + self.check_applied_to_fn_or_method(hir_id, attr.span(), span, target) } [sym::rustc_legacy_const_generics, ..] => { self.check_rustc_legacy_const_generics(hir_id, attr, span, target, item) } [sym::rustc_lint_query_instability, ..] => { - self.check_applied_to_fn_or_method(hir_id, attr, span, target) + self.check_applied_to_fn_or_method(hir_id, attr.span(), span, target) } [sym::rustc_lint_untracked_query_information, ..] => { - self.check_applied_to_fn_or_method(hir_id, attr, span, target) + self.check_applied_to_fn_or_method(hir_id, attr.span(), span, target) } [sym::rustc_lint_diagnostics, ..] => { - self.check_applied_to_fn_or_method(hir_id, attr, span, target) + self.check_applied_to_fn_or_method(hir_id, attr.span(), span, target) } [sym::rustc_lint_opt_ty, ..] => self.check_rustc_lint_opt_ty(attr, span, target), [sym::rustc_lint_opt_deny_field_access, ..] => { @@ -1825,15 +1825,15 @@ impl<'tcx> CheckAttrVisitor<'tcx> { fn check_applied_to_fn_or_method( &self, hir_id: HirId, - attr: &Attribute, - span: Span, + attr_span: Span, + defn_span: Span, target: Target, ) { let is_function = matches!(target, Target::Fn | Target::Method(..)); if !is_function { self.dcx().emit_err(errors::AttrShouldBeAppliedToFn { - attr_span: attr.span(), - defn_span: span, + attr_span, + defn_span, on_crate: hir_id == CRATE_HIR_ID, }); } diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs index fa4b024c422..338d9edcd22 100644 --- a/compiler/rustc_resolve/src/late.rs +++ b/compiler/rustc_resolve/src/late.rs @@ -415,24 +415,24 @@ pub(crate) enum AliasPossibility { } #[derive(Copy, Clone, Debug)] -pub(crate) enum PathSource<'a, 'c> { +pub(crate) enum PathSource<'a, 'ast, 'ra> { /// Type paths `Path`. Type, /// Trait paths in bounds or impls. Trait(AliasPossibility), /// Expression paths `path`, with optional parent context. - Expr(Option<&'a Expr>), + Expr(Option<&'ast Expr>), /// Paths in path patterns `Path`. Pat, /// Paths in struct expressions and patterns `Path { .. }`. Struct, /// Paths in tuple struct patterns `Path(..)`. - TupleStruct(Span, &'a [Span]), + TupleStruct(Span, &'ra [Span]), /// `m::A::B` in `<T as m::A>::B::C`. /// /// Second field holds the "cause" of this one, i.e. the context within /// which the trait item is resolved. Used for diagnostics. - TraitItem(Namespace, &'c PathSource<'a, 'c>), + TraitItem(Namespace, &'a PathSource<'a, 'ast, 'ra>), /// Paths in delegation item Delegation, /// An arg in a `use<'a, N>` precise-capturing bound. @@ -443,7 +443,7 @@ pub(crate) enum PathSource<'a, 'c> { DefineOpaques, } -impl<'a> PathSource<'a, '_> { +impl PathSource<'_, '_, '_> { fn namespace(self) -> Namespace { match self { PathSource::Type @@ -773,7 +773,7 @@ struct LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { } /// Walks the whole crate in DFS order, visiting each item, resolving names as it goes. -impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { +impl<'ast, 'ra, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn visit_attribute(&mut self, _: &'ast Attribute) { // We do not want to resolve expressions that appear in attributes, // as they do not correspond to actual code. @@ -1462,7 +1462,7 @@ impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'r } } -impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { +impl<'a, 'ast, 'ra, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { fn new(resolver: &'a mut Resolver<'ra, 'tcx>) -> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { // During late resolution we only track the module component of the parent scope, // although it may be useful to track other components as well for diagnostics. @@ -2010,7 +2010,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { &mut self, partial_res: PartialRes, path: &[Segment], - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, path_span: Span, ) { let proj_start = path.len() - partial_res.unresolved_segments(); @@ -4161,7 +4161,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { id: NodeId, qself: &Option<P<QSelf>>, path: &Path, - source: PathSource<'ast, '_>, + source: PathSource<'_, 'ast, '_>, ) { self.smart_resolve_path_fragment( qself, @@ -4178,7 +4178,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { &mut self, qself: &Option<P<QSelf>>, path: &[Segment], - source: PathSource<'ast, '_>, + source: PathSource<'_, 'ast, '_>, finalize: Finalize, record_partial_res: RecordPartialRes, parent_qself: Option<&QSelf>, @@ -4482,7 +4482,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { span: Span, defer_to_typeck: bool, finalize: Finalize, - source: PathSource<'ast, '_>, + source: PathSource<'_, 'ast, '_>, ) -> Result<Option<PartialRes>, Spanned<ResolutionError<'ra>>> { let mut fin_res = None; @@ -4525,7 +4525,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { path: &[Segment], ns: Namespace, finalize: Finalize, - source: PathSource<'ast, '_>, + source: PathSource<'_, 'ast, '_>, ) -> Result<Option<PartialRes>, Spanned<ResolutionError<'ra>>> { debug!( "resolve_qpath(qself={:?}, path={:?}, ns={:?}, finalize={:?})", diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index 2f6aed35f25..d5dd3bdb6cd 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -170,12 +170,12 @@ impl TypoCandidate { } } -impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { +impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn make_base_error( &mut self, path: &[Segment], span: Span, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, res: Option<Res>, ) -> BaseError { // Make the base error. @@ -421,7 +421,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { path: &[Segment], following_seg: Option<&Segment>, span: Span, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, res: Option<Res>, qself: Option<&QSelf>, ) -> (Diag<'tcx>, Vec<ImportSuggestion>) { @@ -539,7 +539,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { path: &[Segment], following_seg: Option<&Segment>, span: Span, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, res: Option<Res>, qself: Option<&QSelf>, ) { @@ -650,7 +650,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn try_lookup_name_relaxed( &mut self, err: &mut Diag<'_>, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, path: &[Segment], following_seg: Option<&Segment>, span: Span, @@ -940,7 +940,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_trait_and_bounds( &mut self, err: &mut Diag<'_>, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, res: Option<Res>, span: Span, base_error: &BaseError, @@ -1017,7 +1017,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_typo( &mut self, err: &mut Diag<'_>, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, path: &[Segment], following_seg: Option<&Segment>, span: Span, @@ -1063,7 +1063,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_shadowed( &mut self, err: &mut Diag<'_>, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, path: &[Segment], following_seg: Option<&Segment>, span: Span, @@ -1096,7 +1096,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn err_code_special_cases( &mut self, err: &mut Diag<'_>, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, path: &[Segment], span: Span, ) { @@ -1141,7 +1141,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_self_ty( &mut self, err: &mut Diag<'_>, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, path: &[Segment], span: Span, ) -> bool { @@ -1164,7 +1164,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_self_value( &mut self, err: &mut Diag<'_>, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, path: &[Segment], span: Span, ) -> bool { @@ -1332,7 +1332,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_swapping_misplaced_self_ty_and_trait( &mut self, err: &mut Diag<'_>, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, res: Option<Res>, span: Span, ) { @@ -1361,7 +1361,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { &mut self, err: &mut Diag<'_>, res: Option<Res>, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, ) { let PathSource::TupleStruct(_, _) = source else { return }; let Some(Res::Def(DefKind::Fn, _)) = res else { return }; @@ -1373,7 +1373,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { &mut self, err: &mut Diag<'_>, res: Option<Res>, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, span: Span, ) { let PathSource::Trait(_) = source else { return }; @@ -1422,7 +1422,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_pattern_match_with_let( &mut self, err: &mut Diag<'_>, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, span: Span, ) -> bool { if let PathSource::Expr(_) = source @@ -1448,7 +1448,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn get_single_associated_item( &mut self, path: &[Segment], - source: &PathSource<'_, '_>, + source: &PathSource<'_, '_, '_>, filter_fn: &impl Fn(Res) -> bool, ) -> Option<TypoSuggestion> { if let crate::PathSource::TraitItem(_, _) = source { @@ -1556,7 +1556,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { /// Check if the source is call expression and the first argument is `self`. If true, /// return the span of whole call and the span for all arguments expect the first one (`self`). - fn call_has_self_arg(&self, source: PathSource<'_, '_>) -> Option<(Span, Option<Span>)> { + fn call_has_self_arg(&self, source: PathSource<'_, '_, '_>) -> Option<(Span, Option<Span>)> { let mut has_self_arg = None; if let PathSource::Expr(Some(parent)) = source && let ExprKind::Call(_, args) = &parent.kind @@ -1614,7 +1614,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { &mut self, err: &mut Diag<'_>, span: Span, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, path: &[Segment], res: Res, path_str: &str, @@ -1666,7 +1666,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { } }; - let find_span = |source: &PathSource<'_, '_>, err: &mut Diag<'_>| { + let find_span = |source: &PathSource<'_, '_, '_>, err: &mut Diag<'_>| { match source { PathSource::Expr(Some(Expr { span, kind: ExprKind::Call(_, _), .. })) | PathSource::TupleStruct(span, _) => { @@ -2699,7 +2699,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_using_enum_variant( &mut self, err: &mut Diag<'_>, - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, def_id: DefId, span: Span, ) { @@ -2877,7 +2877,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { pub(crate) fn suggest_adding_generic_parameter( &self, path: &[Segment], - source: PathSource<'_, '_>, + source: PathSource<'_, '_, '_>, ) -> Option<(Span, &'static str, String, Applicability)> { let (ident, span) = match path { [segment] diff --git a/compiler/rustc_smir/src/rustc_smir/context.rs b/compiler/rustc_smir/src/rustc_smir/context.rs index ef8c88355f6..baa4c0681e8 100644 --- a/compiler/rustc_smir/src/rustc_smir/context.rs +++ b/compiler/rustc_smir/src/rustc_smir/context.rs @@ -12,7 +12,8 @@ use rustc_middle::ty::layout::{ }; use rustc_middle::ty::print::{with_forced_trimmed_paths, with_no_trimmed_paths}; use rustc_middle::ty::{ - GenericPredicates, Instance, List, ScalarInt, TyCtxt, TypeVisitableExt, ValTree, + CoroutineArgsExt, GenericPredicates, Instance, List, ScalarInt, TyCtxt, TypeVisitableExt, + ValTree, }; use rustc_middle::{mir, ty}; use rustc_span::def_id::LOCAL_CRATE; @@ -22,9 +23,9 @@ use stable_mir::mir::mono::{InstanceDef, StaticDef}; use stable_mir::mir::{BinOp, Body, Place, UnOp}; use stable_mir::target::{MachineInfo, MachineSize}; use stable_mir::ty::{ - AdtDef, AdtKind, Allocation, ClosureDef, ClosureKind, FieldDef, FnDef, ForeignDef, - ForeignItemKind, GenericArgs, IntrinsicDef, LineInfo, MirConst, PolyFnSig, RigidTy, Span, Ty, - TyConst, TyKind, UintTy, VariantDef, + AdtDef, AdtKind, Allocation, ClosureDef, ClosureKind, CoroutineDef, Discr, FieldDef, FnDef, + ForeignDef, ForeignItemKind, GenericArgs, IntrinsicDef, LineInfo, MirConst, PolyFnSig, RigidTy, + Span, Ty, TyConst, TyKind, UintTy, VariantDef, VariantIdx, }; use stable_mir::{Crate, CrateDef, CrateItem, CrateNum, DefId, Error, Filename, ItemKind, Symbol}; @@ -447,6 +448,30 @@ impl<'tcx> SmirCtxt<'tcx> { def.internal(&mut *tables, tcx).variants().len() } + /// Discriminant for a given variant index of AdtDef + pub fn adt_discr_for_variant(&self, adt: AdtDef, variant: VariantIdx) -> Discr { + let mut tables = self.0.borrow_mut(); + let tcx = tables.tcx; + let adt = adt.internal(&mut *tables, tcx); + let variant = variant.internal(&mut *tables, tcx); + adt.discriminant_for_variant(tcx, variant).stable(&mut *tables) + } + + /// Discriminant for a given variand index and args of a coroutine + pub fn coroutine_discr_for_variant( + &self, + coroutine: CoroutineDef, + args: &GenericArgs, + variant: VariantIdx, + ) -> Discr { + let mut tables = self.0.borrow_mut(); + let tcx = tables.tcx; + let coroutine = coroutine.def_id().internal(&mut *tables, tcx); + let args = args.internal(&mut *tables, tcx); + let variant = variant.internal(&mut *tables, tcx); + args.as_coroutine().discriminant_for_variant(coroutine, tcx, variant).stable(&mut *tables) + } + /// The name of a variant. pub fn variant_name(&self, def: VariantDef) -> Symbol { let mut tables = self.0.borrow_mut(); diff --git a/compiler/rustc_smir/src/rustc_smir/convert/ty.rs b/compiler/rustc_smir/src/rustc_smir/convert/ty.rs index 6a26f5f7997..b4239ddd896 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/ty.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/ty.rs @@ -960,3 +960,11 @@ impl<'tcx> Stable<'tcx> for ty::ImplTraitInTraitData { } } } + +impl<'tcx> Stable<'tcx> for rustc_middle::ty::util::Discr<'tcx> { + type T = stable_mir::ty::Discr; + + fn stable(&self, tables: &mut Tables<'_>) -> Self::T { + stable_mir::ty::Discr { val: self.val, ty: self.ty.stable(tables) } + } +} diff --git a/compiler/rustc_smir/src/stable_mir/compiler_interface.rs b/compiler/rustc_smir/src/stable_mir/compiler_interface.rs index 3967ad13eeb..2668fba9f4f 100644 --- a/compiler/rustc_smir/src/stable_mir/compiler_interface.rs +++ b/compiler/rustc_smir/src/stable_mir/compiler_interface.rs @@ -13,10 +13,10 @@ use stable_mir::mir::mono::{Instance, InstanceDef, StaticDef}; use stable_mir::mir::{BinOp, Body, Place, UnOp}; use stable_mir::target::MachineInfo; use stable_mir::ty::{ - AdtDef, AdtKind, Allocation, ClosureDef, ClosureKind, FieldDef, FnDef, ForeignDef, - ForeignItemKind, ForeignModule, ForeignModuleDef, GenericArgs, GenericPredicates, Generics, - ImplDef, ImplTrait, IntrinsicDef, LineInfo, MirConst, PolyFnSig, RigidTy, Span, TraitDecl, - TraitDef, Ty, TyConst, TyConstId, TyKind, UintTy, VariantDef, + AdtDef, AdtKind, Allocation, ClosureDef, ClosureKind, CoroutineDef, Discr, FieldDef, FnDef, + ForeignDef, ForeignItemKind, ForeignModule, ForeignModuleDef, GenericArgs, GenericPredicates, + Generics, ImplDef, ImplTrait, IntrinsicDef, LineInfo, MirConst, PolyFnSig, RigidTy, Span, + TraitDecl, TraitDef, Ty, TyConst, TyConstId, TyKind, UintTy, VariantDef, VariantIdx, }; use stable_mir::{ AssocItems, Crate, CrateItem, CrateItems, CrateNum, DefId, Error, Filename, ImplTraitDecls, @@ -230,6 +230,21 @@ impl<'tcx> SmirInterface<'tcx> { self.cx.adt_variants_len(def) } + /// Discriminant for a given variant index of AdtDef + pub(crate) fn adt_discr_for_variant(&self, adt: AdtDef, variant: VariantIdx) -> Discr { + self.cx.adt_discr_for_variant(adt, variant) + } + + /// Discriminant for a given variand index and args of a coroutine + pub(crate) fn coroutine_discr_for_variant( + &self, + coroutine: CoroutineDef, + args: &GenericArgs, + variant: VariantIdx, + ) -> Discr { + self.cx.coroutine_discr_for_variant(coroutine, args, variant) + } + /// The name of a variant. pub(crate) fn variant_name(&self, def: VariantDef) -> Symbol { self.cx.variant_name(def) diff --git a/compiler/rustc_smir/src/stable_mir/ty.rs b/compiler/rustc_smir/src/stable_mir/ty.rs index 2934af31cd5..4415cd6e2e3 100644 --- a/compiler/rustc_smir/src/stable_mir/ty.rs +++ b/compiler/rustc_smir/src/stable_mir/ty.rs @@ -756,6 +756,12 @@ crate_def! { pub CoroutineDef; } +impl CoroutineDef { + pub fn discriminant_for_variant(&self, args: &GenericArgs, idx: VariantIdx) -> Discr { + with(|cx| cx.coroutine_discr_for_variant(*self, args, idx)) + } +} + crate_def! { #[derive(Serialize)] pub CoroutineClosureDef; @@ -831,6 +837,15 @@ impl AdtDef { pub fn repr(&self) -> ReprOptions { with(|cx| cx.adt_repr(*self)) } + + pub fn discriminant_for_variant(&self, idx: VariantIdx) -> Discr { + with(|cx| cx.adt_discr_for_variant(*self, idx)) + } +} + +pub struct Discr { + pub val: u128, + pub ty: Ty, } /// Definition of a variant, which can be either a struct / union field or an enum variant. diff --git a/compiler/rustc_target/src/callconv/mips64.rs b/compiler/rustc_target/src/callconv/mips64.rs index 89f324bc313..77c0cf06fc1 100644 --- a/compiler/rustc_target/src/callconv/mips64.rs +++ b/compiler/rustc_target/src/callconv/mips64.rs @@ -2,9 +2,7 @@ use rustc_abi::{ BackendRepr, FieldsShape, Float, HasDataLayout, Primitive, Reg, Size, TyAbiInterface, }; -use crate::callconv::{ - ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, PassMode, Uniform, -}; +use crate::callconv::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform}; fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) { // Always sign extend u32 values on 64-bit mips @@ -140,16 +138,7 @@ where // Extract first 8 chunks as the prefix let rest_size = size - Size::from_bytes(8) * prefix_index as u64; - arg.cast_to(CastTarget { - prefix, - rest: Uniform::new(Reg::i64(), rest_size), - attrs: ArgAttributes { - regular: ArgAttribute::default(), - arg_ext: ArgExtension::None, - pointee_size: Size::ZERO, - pointee_align: None, - }, - }); + arg.cast_to(CastTarget::prefixed(prefix, Uniform::new(Reg::i64(), rest_size))); } pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>) diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs index 2ff7a71ca82..71cc2a45563 100644 --- a/compiler/rustc_target/src/callconv/mod.rs +++ b/compiler/rustc_target/src/callconv/mod.rs @@ -197,6 +197,17 @@ impl ArgAttributes { } } +impl From<ArgAttribute> for ArgAttributes { + fn from(value: ArgAttribute) -> Self { + Self { + regular: value, + arg_ext: ArgExtension::None, + pointee_size: Size::ZERO, + pointee_align: None, + } + } +} + /// An argument passed entirely registers with the /// same kind (e.g., HFA / HVA on PPC64 and AArch64). #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)] @@ -251,6 +262,9 @@ impl Uniform { #[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)] pub struct CastTarget { pub prefix: [Option<Reg>; 8], + /// The offset of `rest` from the start of the value. Currently only implemented for a `Reg` + /// pair created by the `offset_pair` method. + pub rest_offset: Option<Size>, pub rest: Uniform, pub attrs: ArgAttributes, } @@ -263,42 +277,45 @@ impl From<Reg> for CastTarget { impl From<Uniform> for CastTarget { fn from(uniform: Uniform) -> CastTarget { - CastTarget { - prefix: [None; 8], - rest: uniform, - attrs: ArgAttributes { - regular: ArgAttribute::default(), - arg_ext: ArgExtension::None, - pointee_size: Size::ZERO, - pointee_align: None, - }, - } + Self::prefixed([None; 8], uniform) } } impl CastTarget { - pub fn pair(a: Reg, b: Reg) -> CastTarget { - CastTarget { + pub fn prefixed(prefix: [Option<Reg>; 8], rest: Uniform) -> Self { + Self { prefix, rest_offset: None, rest, attrs: ArgAttributes::new() } + } + + pub fn offset_pair(a: Reg, offset_from_start: Size, b: Reg) -> Self { + Self { prefix: [Some(a), None, None, None, None, None, None, None], - rest: Uniform::from(b), - attrs: ArgAttributes { - regular: ArgAttribute::default(), - arg_ext: ArgExtension::None, - pointee_size: Size::ZERO, - pointee_align: None, - }, + rest_offset: Some(offset_from_start), + rest: b.into(), + attrs: ArgAttributes::new(), } } + pub fn with_attrs(mut self, attrs: ArgAttributes) -> Self { + self.attrs = attrs; + self + } + + pub fn pair(a: Reg, b: Reg) -> CastTarget { + Self::prefixed([Some(a), None, None, None, None, None, None, None], Uniform::from(b)) + } + /// When you only access the range containing valid data, you can use this unaligned size; /// otherwise, use the safer `size` method. pub fn unaligned_size<C: HasDataLayout>(&self, _cx: &C) -> Size { // Prefix arguments are passed in specific designated registers - let prefix_size = self - .prefix - .iter() - .filter_map(|x| x.map(|reg| reg.size)) - .fold(Size::ZERO, |acc, size| acc + size); + let prefix_size = if let Some(offset_from_start) = self.rest_offset { + offset_from_start + } else { + self.prefix + .iter() + .filter_map(|x| x.map(|reg| reg.size)) + .fold(Size::ZERO, |acc, size| acc + size) + }; // Remaining arguments are passed in chunks of the unit size let rest_size = self.rest.unit.size * self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes()); @@ -322,9 +339,22 @@ impl CastTarget { /// Checks if these two `CastTarget` are equal enough to be considered "the same for all /// function call ABIs". pub fn eq_abi(&self, other: &Self) -> bool { - let CastTarget { prefix: prefix_l, rest: rest_l, attrs: attrs_l } = self; - let CastTarget { prefix: prefix_r, rest: rest_r, attrs: attrs_r } = other; - prefix_l == prefix_r && rest_l == rest_r && attrs_l.eq_abi(attrs_r) + let CastTarget { + prefix: prefix_l, + rest_offset: rest_offset_l, + rest: rest_l, + attrs: attrs_l, + } = self; + let CastTarget { + prefix: prefix_r, + rest_offset: rest_offset_r, + rest: rest_r, + attrs: attrs_r, + } = other; + prefix_l == prefix_r + && rest_offset_l == rest_offset_r + && rest_l == rest_r + && attrs_l.eq_abi(attrs_r) } } diff --git a/compiler/rustc_target/src/callconv/nvptx64.rs b/compiler/rustc_target/src/callconv/nvptx64.rs index a26e7dac5ba..44977de7fcb 100644 --- a/compiler/rustc_target/src/callconv/nvptx64.rs +++ b/compiler/rustc_target/src/callconv/nvptx64.rs @@ -1,6 +1,6 @@ use rustc_abi::{HasDataLayout, Reg, Size, TyAbiInterface}; -use super::{ArgAttribute, ArgAttributes, ArgExtension, CastTarget}; +use super::CastTarget; use crate::callconv::{ArgAbi, FnAbi, Uniform}; fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) { @@ -34,16 +34,10 @@ fn classify_aggregate<Ty>(arg: &mut ArgAbi<'_, Ty>) { }; if align_bytes == size.bytes() { - arg.cast_to(CastTarget { - prefix: [Some(reg), None, None, None, None, None, None, None], - rest: Uniform::new(Reg::i8(), Size::from_bytes(0)), - attrs: ArgAttributes { - regular: ArgAttribute::default(), - arg_ext: ArgExtension::None, - pointee_size: Size::ZERO, - pointee_align: None, - }, - }); + arg.cast_to(CastTarget::prefixed( + [Some(reg), None, None, None, None, None, None, None], + Uniform::new(Reg::i8(), Size::ZERO), + )); } else { arg.cast_to(Uniform::new(reg, size)); } @@ -78,11 +72,10 @@ where }; if arg.layout.size.bytes() / align_bytes == 1 { // Make sure we pass the struct as array at the LLVM IR level and not as a single integer. - arg.cast_to(CastTarget { - prefix: [Some(unit), None, None, None, None, None, None, None], - rest: Uniform::new(unit, Size::ZERO), - attrs: ArgAttributes::new(), - }); + arg.cast_to(CastTarget::prefixed( + [Some(unit), None, None, None, None, None, None, None], + Uniform::new(unit, Size::ZERO), + )); } else { arg.cast_to(Uniform::new(unit, arg.layout.size)); } diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs index cd1d3cd1eee..6a2038f9381 100644 --- a/compiler/rustc_target/src/callconv/riscv.rs +++ b/compiler/rustc_target/src/callconv/riscv.rs @@ -14,16 +14,16 @@ use crate::spec::HasTargetSpec; #[derive(Copy, Clone)] enum RegPassKind { - Float(Reg), - Integer(Reg), + Float { offset_from_start: Size, ty: Reg }, + Integer { offset_from_start: Size, ty: Reg }, Unknown, } #[derive(Copy, Clone)] enum FloatConv { - FloatPair(Reg, Reg), + FloatPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg }, Float(Reg), - MixedPair(Reg, Reg), + MixedPair { first_ty: Reg, second_ty_offset_from_start: Size, second_ty: Reg }, } #[derive(Copy, Clone)] @@ -43,6 +43,7 @@ fn should_use_fp_conv_helper<'a, Ty, C>( flen: u64, field1_kind: &mut RegPassKind, field2_kind: &mut RegPassKind, + offset_from_start: Size, ) -> Result<(), CannotUseFpConv> where Ty: TyAbiInterface<'a, C> + Copy, @@ -55,16 +56,16 @@ where } match (*field1_kind, *field2_kind) { (RegPassKind::Unknown, _) => { - *field1_kind = RegPassKind::Integer(Reg { - kind: RegKind::Integer, - size: arg_layout.size, - }); + *field1_kind = RegPassKind::Integer { + offset_from_start, + ty: Reg { kind: RegKind::Integer, size: arg_layout.size }, + }; } - (RegPassKind::Float(_), RegPassKind::Unknown) => { - *field2_kind = RegPassKind::Integer(Reg { - kind: RegKind::Integer, - size: arg_layout.size, - }); + (RegPassKind::Float { .. }, RegPassKind::Unknown) => { + *field2_kind = RegPassKind::Integer { + offset_from_start, + ty: Reg { kind: RegKind::Integer, size: arg_layout.size }, + }; } _ => return Err(CannotUseFpConv), } @@ -75,12 +76,16 @@ where } match (*field1_kind, *field2_kind) { (RegPassKind::Unknown, _) => { - *field1_kind = - RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size }); + *field1_kind = RegPassKind::Float { + offset_from_start, + ty: Reg { kind: RegKind::Float, size: arg_layout.size }, + }; } (_, RegPassKind::Unknown) => { - *field2_kind = - RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size }); + *field2_kind = RegPassKind::Float { + offset_from_start, + ty: Reg { kind: RegKind::Float, size: arg_layout.size }, + }; } _ => return Err(CannotUseFpConv), } @@ -102,13 +107,14 @@ where flen, field1_kind, field2_kind, + offset_from_start, ); } return Err(CannotUseFpConv); } } FieldsShape::Array { count, .. } => { - for _ in 0..count { + for i in 0..count { let elem_layout = arg_layout.field(cx, 0); should_use_fp_conv_helper( cx, @@ -117,6 +123,7 @@ where flen, field1_kind, field2_kind, + offset_from_start + elem_layout.size * i, )?; } } @@ -127,7 +134,15 @@ where } for i in arg_layout.fields.index_by_increasing_offset() { let field = arg_layout.field(cx, i); - should_use_fp_conv_helper(cx, &field, xlen, flen, field1_kind, field2_kind)?; + should_use_fp_conv_helper( + cx, + &field, + xlen, + flen, + field1_kind, + field2_kind, + offset_from_start + arg_layout.fields.offset(i), + )?; } } }, @@ -146,14 +161,52 @@ where { let mut field1_kind = RegPassKind::Unknown; let mut field2_kind = RegPassKind::Unknown; - if should_use_fp_conv_helper(cx, arg, xlen, flen, &mut field1_kind, &mut field2_kind).is_err() { + if should_use_fp_conv_helper( + cx, + arg, + xlen, + flen, + &mut field1_kind, + &mut field2_kind, + Size::ZERO, + ) + .is_err() + { return None; } match (field1_kind, field2_kind) { - (RegPassKind::Integer(l), RegPassKind::Float(r)) => Some(FloatConv::MixedPair(l, r)), - (RegPassKind::Float(l), RegPassKind::Integer(r)) => Some(FloatConv::MixedPair(l, r)), - (RegPassKind::Float(l), RegPassKind::Float(r)) => Some(FloatConv::FloatPair(l, r)), - (RegPassKind::Float(f), RegPassKind::Unknown) => Some(FloatConv::Float(f)), + ( + RegPassKind::Integer { offset_from_start, .. } + | RegPassKind::Float { offset_from_start, .. }, + _, + ) if offset_from_start != Size::ZERO => { + panic!("type {:?} has a first field with non-zero offset {offset_from_start:?}", arg.ty) + } + ( + RegPassKind::Integer { ty: first_ty, .. }, + RegPassKind::Float { offset_from_start, ty: second_ty }, + ) => Some(FloatConv::MixedPair { + first_ty, + second_ty_offset_from_start: offset_from_start, + second_ty, + }), + ( + RegPassKind::Float { ty: first_ty, .. }, + RegPassKind::Integer { offset_from_start, ty: second_ty }, + ) => Some(FloatConv::MixedPair { + first_ty, + second_ty_offset_from_start: offset_from_start, + second_ty, + }), + ( + RegPassKind::Float { ty: first_ty, .. }, + RegPassKind::Float { offset_from_start, ty: second_ty }, + ) => Some(FloatConv::FloatPair { + first_ty, + second_ty_offset_from_start: offset_from_start, + second_ty, + }), + (RegPassKind::Float { ty, .. }, RegPassKind::Unknown) => Some(FloatConv::Float(ty)), _ => None, } } @@ -171,11 +224,19 @@ where FloatConv::Float(f) => { arg.cast_to(f); } - FloatConv::FloatPair(l, r) => { - arg.cast_to(CastTarget::pair(l, r)); + FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty } => { + arg.cast_to(CastTarget::offset_pair( + first_ty, + second_ty_offset_from_start, + second_ty, + )); } - FloatConv::MixedPair(l, r) => { - arg.cast_to(CastTarget::pair(l, r)); + FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty } => { + arg.cast_to(CastTarget::offset_pair( + first_ty, + second_ty_offset_from_start, + second_ty, + )); } } return false; @@ -239,15 +300,27 @@ fn classify_arg<'a, Ty, C>( arg.cast_to(f); return; } - Some(FloatConv::FloatPair(l, r)) if *avail_fprs >= 2 => { + Some(FloatConv::FloatPair { first_ty, second_ty_offset_from_start, second_ty }) + if *avail_fprs >= 2 => + { *avail_fprs -= 2; - arg.cast_to(CastTarget::pair(l, r)); + arg.cast_to(CastTarget::offset_pair( + first_ty, + second_ty_offset_from_start, + second_ty, + )); return; } - Some(FloatConv::MixedPair(l, r)) if *avail_fprs >= 1 && *avail_gprs >= 1 => { + Some(FloatConv::MixedPair { first_ty, second_ty_offset_from_start, second_ty }) + if *avail_fprs >= 1 && *avail_gprs >= 1 => + { *avail_gprs -= 1; *avail_fprs -= 1; - arg.cast_to(CastTarget::pair(l, r)); + arg.cast_to(CastTarget::offset_pair( + first_ty, + second_ty_offset_from_start, + second_ty, + )); return; } _ => (), diff --git a/compiler/rustc_target/src/callconv/sparc64.rs b/compiler/rustc_target/src/callconv/sparc64.rs index 7ca0031fc59..186826c08fc 100644 --- a/compiler/rustc_target/src/callconv/sparc64.rs +++ b/compiler/rustc_target/src/callconv/sparc64.rs @@ -5,9 +5,7 @@ use rustc_abi::{ TyAndLayout, }; -use crate::callconv::{ - ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, Uniform, -}; +use crate::callconv::{ArgAbi, ArgAttribute, CastTarget, FnAbi, Uniform}; use crate::spec::HasTargetSpec; #[derive(Clone, Debug)] @@ -197,16 +195,10 @@ where rest_size = rest_size - Reg::i32().size; } - arg.cast_to(CastTarget { - prefix: data.prefix, - rest: Uniform::new(Reg::i64(), rest_size), - attrs: ArgAttributes { - regular: data.arg_attribute, - arg_ext: ArgExtension::None, - pointee_size: Size::ZERO, - pointee_align: None, - }, - }); + arg.cast_to( + CastTarget::prefixed(data.prefix, Uniform::new(Reg::i64(), rest_size)) + .with_attrs(data.arg_attribute.into()), + ); return; } } diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index f0eb96b47b1..b59b4f92854 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -842,9 +842,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { /// evaluating this entry would not have ended up depending on either a goal /// already on the stack or a provisional cache entry. fn candidate_is_applicable( - stack: &Stack<X>, + &self, step_kind_from_parent: PathKind, - provisional_cache: &HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>, nested_goals: &NestedGoals<X>, ) -> bool { // If the global cache entry didn't depend on any nested goals, it always @@ -855,7 +854,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // If a nested goal of the global cache entry is on the stack, we would // definitely encounter a cycle. - if stack.iter().any(|e| nested_goals.contains(e.input)) { + if self.stack.iter().any(|e| nested_goals.contains(e.input)) { debug!("cache entry not applicable due to stack"); return false; } @@ -864,7 +863,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // would apply for any of its nested goals. #[allow(rustc::potential_query_instability)] for (input, path_from_global_entry) in nested_goals.iter() { - let Some(entries) = provisional_cache.get(&input) else { + let Some(entries) = self.provisional_cache.get(&input) else { continue; }; @@ -890,7 +889,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // We check if any of the paths taken while computing the global goal // would end up with an applicable provisional cache entry. let head = heads.highest_cycle_head(); - let head_to_curr = Self::cycle_path_kind(stack, step_kind_from_parent, head); + let head_to_curr = Self::cycle_path_kind(&self.stack, step_kind_from_parent, head); let full_paths = path_from_global_entry.extend_with(head_to_curr); if full_paths.contains(head_to_provisional.into()) { debug!( @@ -918,12 +917,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { cx.with_global_cache(|cache| { cache .get(cx, input, available_depth, |nested_goals| { - Self::candidate_is_applicable( - &self.stack, - step_kind_from_parent, - &self.provisional_cache, - nested_goals, - ) + self.candidate_is_applicable(step_kind_from_parent, nested_goals) }) .map(|c| c.result) }) @@ -942,12 +936,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { cx.with_global_cache(|cache| { let CacheData { result, required_depth, encountered_overflow, nested_goals } = cache .get(cx, input, available_depth, |nested_goals| { - Self::candidate_is_applicable( - &self.stack, - step_kind_from_parent, - &self.provisional_cache, - nested_goals, - ) + self.candidate_is_applicable(step_kind_from_parent, nested_goals) })?; // We don't move cycle participants to the global cache, so the |
