about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_attr_data_structures/src/attributes.rs3
-rw-r--r--compiler/rustc_attr_parsing/src/attributes/lint_helpers.rs13
-rw-r--r--compiler/rustc_attr_parsing/src/context.rs3
-rw-r--r--compiler/rustc_feature/src/builtin_attrs.rs2
-rw-r--r--compiler/rustc_hir_analysis/src/check/check.rs7
-rw-r--r--compiler/rustc_mir_transform/src/jump_threading.rs2
-rw-r--r--compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs2
-rw-r--r--compiler/rustc_passes/src/check_attr.rs5
-rw-r--r--compiler/rustc_type_ir/src/search_graph/global_cache.rs17
-rw-r--r--compiler/rustc_type_ir/src/search_graph/mod.rs168
-rw-r--r--compiler/rustc_type_ir/src/search_graph/stack.rs8
11 files changed, 152 insertions, 78 deletions
diff --git a/compiler/rustc_attr_data_structures/src/attributes.rs b/compiler/rustc_attr_data_structures/src/attributes.rs
index 066e3e9eceb..f0f5cc4db07 100644
--- a/compiler/rustc_attr_data_structures/src/attributes.rs
+++ b/compiler/rustc_attr_data_structures/src/attributes.rs
@@ -240,6 +240,9 @@ pub enum AttributeKind {
     /// Represents `#[optimize(size|speed)]`
     Optimize(OptimizeAttr, Span),
 
+    /// Represents `#[rustc_pub_transparent]` (used by the `repr_transparent_external_private_fields` lint).
+    PubTransparent(Span),
+
     /// Represents [`#[repr]`](https://doc.rust-lang.org/stable/reference/type-layout.html#representations).
     Repr(ThinVec<(ReprAttr, Span)>),
 
diff --git a/compiler/rustc_attr_parsing/src/attributes/lint_helpers.rs b/compiler/rustc_attr_parsing/src/attributes/lint_helpers.rs
index d4c846de56e..4cfd9a82ce8 100644
--- a/compiler/rustc_attr_parsing/src/attributes/lint_helpers.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/lint_helpers.rs
@@ -19,3 +19,16 @@ impl<S: Stage> SingleAttributeParser<S> for AsPtrParser {
         Some(AttributeKind::AsPtr(cx.attr_span))
     }
 }
+
+pub(crate) struct PubTransparentParser;
+impl<S: Stage> SingleAttributeParser<S> for PubTransparentParser {
+    const PATH: &[Symbol] = &[sym::rustc_pub_transparent];
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
+    const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
+    const TEMPLATE: AttributeTemplate = template!(Word);
+
+    fn convert(cx: &mut AcceptContext<'_, '_, S>, _args: &ArgParser<'_>) -> Option<AttributeKind> {
+        // FIXME: check that there's no args (this is currently checked elsewhere)
+        Some(AttributeKind::PubTransparent(cx.attr_span))
+    }
+}
diff --git a/compiler/rustc_attr_parsing/src/context.rs b/compiler/rustc_attr_parsing/src/context.rs
index 1bcf500459d..b95ea598e72 100644
--- a/compiler/rustc_attr_parsing/src/context.rs
+++ b/compiler/rustc_attr_parsing/src/context.rs
@@ -19,7 +19,7 @@ use crate::attributes::codegen_attrs::{ColdParser, OptimizeParser};
 use crate::attributes::confusables::ConfusablesParser;
 use crate::attributes::deprecation::DeprecationParser;
 use crate::attributes::inline::{InlineParser, RustcForceInlineParser};
-use crate::attributes::lint_helpers::AsPtrParser;
+use crate::attributes::lint_helpers::{AsPtrParser, PubTransparentParser};
 use crate::attributes::repr::{AlignParser, ReprParser};
 use crate::attributes::semantics::MayDangleParser;
 use crate::attributes::stability::{
@@ -113,6 +113,7 @@ attribute_parsers!(
         Single<InlineParser>,
         Single<MayDangleParser>,
         Single<OptimizeParser>,
+        Single<PubTransparentParser>,
         Single<RustcForceInlineParser>,
         Single<TransparencyParser>,
         // tidy-alphabetical-end
diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs
index 5b1f1684d54..280b33f0723 100644
--- a/compiler/rustc_feature/src/builtin_attrs.rs
+++ b/compiler/rustc_feature/src/builtin_attrs.rs
@@ -710,7 +710,7 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
     ),
     rustc_attr!(
         rustc_pub_transparent, Normal, template!(Word),
-        WarnFollowing, EncodeCrossCrate::Yes,
+        ErrorFollowing, EncodeCrossCrate::Yes,
         "used internally to mark types with a `transparent` representation when it is guaranteed by the documentation",
     ),
 
diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs
index 32fec0604c0..752cc2eff97 100644
--- a/compiler/rustc_hir_analysis/src/check/check.rs
+++ b/compiler/rustc_hir_analysis/src/check/check.rs
@@ -2,6 +2,7 @@ use std::cell::LazyCell;
 use std::ops::ControlFlow;
 
 use rustc_abi::FieldIdx;
+use rustc_attr_data_structures::AttributeKind;
 use rustc_attr_data_structures::ReprAttr::ReprPacked;
 use rustc_data_structures::unord::{UnordMap, UnordSet};
 use rustc_errors::codes::*;
@@ -1384,7 +1385,11 @@ pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>)
                 ty::Tuple(list) => list.iter().try_for_each(|t| check_non_exhaustive(tcx, t)),
                 ty::Array(ty, _) => check_non_exhaustive(tcx, *ty),
                 ty::Adt(def, args) => {
-                    if !def.did().is_local() && !tcx.has_attr(def.did(), sym::rustc_pub_transparent)
+                    if !def.did().is_local()
+                        && !attrs::find_attr!(
+                            tcx.get_all_attrs(def.did()),
+                            AttributeKind::PubTransparent(_)
+                        )
                     {
                         let non_exhaustive = def.is_variant_list_non_exhaustive()
                             || def
diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs
index 48db536c122..b45bff2af44 100644
--- a/compiler/rustc_mir_transform/src/jump_threading.rs
+++ b/compiler/rustc_mir_transform/src/jump_threading.rs
@@ -89,7 +89,7 @@ impl<'tcx> crate::MirPass<'tcx> for JumpThreading {
             opportunities: Vec::new(),
         };
 
-        for bb in body.basic_blocks.indices() {
+        for (bb, _) in traversal::preorder(body) {
             finder.start_from_switch(bb);
         }
 
diff --git a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs
index 7ead0a6d6b7..00fd3ba8046 100644
--- a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs
+++ b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs
@@ -430,7 +430,7 @@ where
                 canonical_input,
                 step_kind_from_parent,
                 &mut canonical_goal_evaluation,
-                |search_graph, canonical_goal_evaluation| {
+                |search_graph, cx, canonical_input, canonical_goal_evaluation| {
                     EvalCtxt::enter_canonical(
                         cx,
                         search_graph,
diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs
index 30ac1bbe94b..d802bf4df19 100644
--- a/compiler/rustc_passes/src/check_attr.rs
+++ b/compiler/rustc_passes/src/check_attr.rs
@@ -150,6 +150,10 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
                 }
                 Attribute::Parsed(AttributeKind::Repr(_)) => { /* handled below this loop and elsewhere */
                 }
+
+                &Attribute::Parsed(AttributeKind::PubTransparent(attr_span)) => {
+                    self.check_rustc_pub_transparent(attr_span, span, attrs)
+                }
                 Attribute::Parsed(AttributeKind::Cold(attr_span)) => {
                     self.check_cold(hir_id, *attr_span, span, target)
                 }
@@ -291,7 +295,6 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
                             self.check_type_const(hir_id,attr, target);
                         }
                         [sym::linkage, ..] => self.check_linkage(attr, span, target),
-                        [sym::rustc_pub_transparent, ..] => self.check_rustc_pub_transparent(attr.span(), span, attrs),
                         [
                             // ok
                             sym::allow
diff --git a/compiler/rustc_type_ir/src/search_graph/global_cache.rs b/compiler/rustc_type_ir/src/search_graph/global_cache.rs
index a2442660259..1b99cc820f1 100644
--- a/compiler/rustc_type_ir/src/search_graph/global_cache.rs
+++ b/compiler/rustc_type_ir/src/search_graph/global_cache.rs
@@ -2,6 +2,7 @@ use derive_where::derive_where;
 
 use super::{AvailableDepth, Cx, NestedGoals};
 use crate::data_structures::HashMap;
+use crate::search_graph::EvaluationResult;
 
 struct Success<X: Cx> {
     required_depth: usize,
@@ -43,28 +44,26 @@ impl<X: Cx> GlobalCache<X> {
         &mut self,
         cx: X,
         input: X::Input,
-
-        origin_result: X::Result,
+        evaluation_result: EvaluationResult<X>,
         dep_node: X::DepNodeIndex,
-
-        required_depth: usize,
-        encountered_overflow: bool,
-        nested_goals: NestedGoals<X>,
     ) {
-        let result = cx.mk_tracked(origin_result, dep_node);
+        let EvaluationResult { encountered_overflow, required_depth, heads, nested_goals, result } =
+            evaluation_result;
+        debug_assert!(heads.is_empty());
+        let result = cx.mk_tracked(result, dep_node);
         let entry = self.map.entry(input).or_default();
         if encountered_overflow {
             let with_overflow = WithOverflow { nested_goals, result };
             let prev = entry.with_overflow.insert(required_depth, with_overflow);
             if let Some(prev) = &prev {
                 assert!(cx.evaluation_is_concurrent());
-                assert_eq!(cx.get_tracked(&prev.result), origin_result);
+                assert_eq!(cx.get_tracked(&prev.result), evaluation_result.result);
             }
         } else {
             let prev = entry.success.replace(Success { required_depth, nested_goals, result });
             if let Some(prev) = &prev {
                 assert!(cx.evaluation_is_concurrent());
-                assert_eq!(cx.get_tracked(&prev.result), origin_result);
+                assert_eq!(cx.get_tracked(&prev.result), evaluation_result.result);
             }
         }
     }
diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs
index b59b4f92854..a857da2fcd5 100644
--- a/compiler/rustc_type_ir/src/search_graph/mod.rs
+++ b/compiler/rustc_type_ir/src/search_graph/mod.rs
@@ -1,16 +1,16 @@
-/// The search graph is responsible for caching and cycle detection in the trait
-/// solver. Making sure that caching doesn't result in soundness bugs or unstable
-/// query results is very challenging and makes this one of the most-involved
-/// self-contained components of the compiler.
-///
-/// We added fuzzing support to test its correctness. The fuzzers used to verify
-/// the current implementation can be found in https://github.com/lcnr/search_graph_fuzz.
-///
-/// This is just a quick overview of the general design, please check out the relevant
-/// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html) for
-/// more details. Caching is split between a global cache and the per-cycle `provisional_cache`.
-/// The global cache has to be completely unobservable, while the per-cycle cache may impact
-/// behavior as long as the resulting behavior is still correct.
+//! The search graph is responsible for caching and cycle detection in the trait
+//! solver. Making sure that caching doesn't result in soundness bugs or unstable
+//! query results is very challenging and makes this one of the most-involved
+//! self-contained components of the compiler.
+//!
+//! We added fuzzing support to test its correctness. The fuzzers used to verify
+//! the current implementation can be found in <https://github.com/lcnr/search_graph_fuzz>.
+//!
+//! This is just a quick overview of the general design, please check out the relevant
+//! [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html) for
+//! more details. Caching is split between a global cache and the per-cycle `provisional_cache`.
+//! The global cache has to be completely unobservable, while the per-cycle cache may impact
+//! behavior as long as the resulting behavior is still correct.
 use std::cmp::Ordering;
 use std::collections::BTreeMap;
 use std::collections::hash_map::Entry;
@@ -381,18 +381,16 @@ impl PathsToNested {
 /// The nested goals of each stack entry and the path from the
 /// stack entry to that nested goal.
 ///
+/// They are used when checking whether reevaluating a global cache
+/// would encounter a cycle or use a provisional cache entry given the
+/// currentl search graph state. We need to disable the global cache
+/// in this case as it could otherwise result in behaviorial differences.
+/// Cycles can impact behavior. The cycle ABA may have different final
+/// results from a the cycle BAB depending on the cycle root.
+///
 /// We only start tracking nested goals once we've either encountered
 /// overflow or a solver cycle. This is a performance optimization to
 /// avoid tracking nested goals on the happy path.
-///
-/// We use nested goals for two reasons:
-/// - when rebasing provisional cache entries
-/// - when checking whether we have to ignore a global cache entry as reevaluating
-///   it would encounter a cycle or use a provisional cache entry.
-///
-/// We need to disable the global cache if using it would hide a cycle, as
-/// cycles can impact behavior. The cycle ABA may have different final
-/// results from a the cycle BAB depending on the cycle root.
 #[derive_where(Debug, Default, Clone; X: Cx)]
 struct NestedGoals<X: Cx> {
     nested_goals: HashMap<X::Input, PathsToNested>,
@@ -450,6 +448,43 @@ struct ProvisionalCacheEntry<X: Cx> {
     result: X::Result,
 }
 
+/// The final result of evaluating a goal.
+///
+/// We reset `encountered_overflow` when reevaluating a goal,
+/// but need to track whether we've hit the recursion limit at
+/// all for correctness.
+///
+/// We've previously simply returned the final `StackEntry` but this
+/// made it easy to accidentally drop information from the previous
+/// evaluation.
+#[derive_where(Debug; X: Cx)]
+struct EvaluationResult<X: Cx> {
+    encountered_overflow: bool,
+    required_depth: usize,
+    heads: CycleHeads,
+    nested_goals: NestedGoals<X>,
+    result: X::Result,
+}
+
+impl<X: Cx> EvaluationResult<X> {
+    fn finalize(
+        final_entry: StackEntry<X>,
+        encountered_overflow: bool,
+        result: X::Result,
+    ) -> EvaluationResult<X> {
+        EvaluationResult {
+            encountered_overflow,
+            // Unlike `encountered_overflow`, we share `heads`, `required_depth`,
+            // and `nested_goals` between evaluations.
+            required_depth: final_entry.required_depth,
+            heads: final_entry.heads,
+            nested_goals: final_entry.nested_goals,
+            // We only care about the final result.
+            result,
+        }
+    }
+}
+
 pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
     root_depth: AvailableDepth,
     /// The stack of goals currently being computed.
@@ -562,7 +597,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
         input: X::Input,
         step_kind_from_parent: PathKind,
         inspect: &mut D::ProofTreeBuilder,
-        mut evaluate_goal: impl FnMut(&mut Self, &mut D::ProofTreeBuilder) -> X::Result,
+        evaluate_goal: impl Fn(&mut Self, X, X::Input, &mut D::ProofTreeBuilder) -> X::Result + Copy,
     ) -> X::Result {
         let Some(available_depth) =
             AvailableDepth::allowed_depth_for_nested::<D>(self.root_depth, &self.stack)
@@ -616,12 +651,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
             input,
             step_kind_from_parent,
             available_depth,
+            provisional_result: None,
             required_depth: 0,
             heads: Default::default(),
             encountered_overflow: false,
             has_been_used: None,
             nested_goals: Default::default(),
-            provisional_result: None,
         });
 
         // This is for global caching, so we properly track query dependencies.
@@ -630,35 +665,41 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
         // not tracked by the cache key and from outside of this anon task, it
         // must not be added to the global cache. Notably, this is the case for
         // trait solver cycles participants.
-        let ((final_entry, result), dep_node) = cx.with_cached_task(|| {
-            self.evaluate_goal_in_task(cx, input, inspect, &mut evaluate_goal)
-        });
+        let (evaluation_result, dep_node) =
+            cx.with_cached_task(|| self.evaluate_goal_in_task(cx, input, inspect, evaluate_goal));
 
         // We've finished computing the goal and have popped it from the stack,
         // lazily update its parent goal.
         Self::update_parent_goal(
             &mut self.stack,
-            final_entry.step_kind_from_parent,
-            final_entry.required_depth,
-            &final_entry.heads,
-            final_entry.encountered_overflow,
-            UpdateParentGoalCtxt::Ordinary(&final_entry.nested_goals),
+            step_kind_from_parent,
+            evaluation_result.required_depth,
+            &evaluation_result.heads,
+            evaluation_result.encountered_overflow,
+            UpdateParentGoalCtxt::Ordinary(&evaluation_result.nested_goals),
         );
+        let result = evaluation_result.result;
 
         // We're now done with this goal. We only add the root of cycles to the global cache.
         // In case this goal is involved in a larger cycle add it to the provisional cache.
-        if final_entry.heads.is_empty() {
+        if evaluation_result.heads.is_empty() {
             if let Some((_scope, expected)) = validate_cache {
                 // Do not try to move a goal into the cache again if we're testing
                 // the global cache.
-                assert_eq!(result, expected, "input={input:?}");
+                assert_eq!(evaluation_result.result, expected, "input={input:?}");
             } else if D::inspect_is_noop(inspect) {
-                self.insert_global_cache(cx, final_entry, result, dep_node)
+                self.insert_global_cache(cx, input, evaluation_result, dep_node)
             }
         } else if D::ENABLE_PROVISIONAL_CACHE {
             debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
             let entry = self.provisional_cache.entry(input).or_default();
-            let StackEntry { heads, encountered_overflow, .. } = final_entry;
+            let EvaluationResult {
+                encountered_overflow,
+                required_depth: _,
+                heads,
+                nested_goals: _,
+                result,
+            } = evaluation_result;
             let path_from_head = Self::cycle_path_kind(
                 &self.stack,
                 step_kind_from_parent,
@@ -1023,19 +1064,25 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
         cx: X,
         input: X::Input,
         inspect: &mut D::ProofTreeBuilder,
-        mut evaluate_goal: impl FnMut(&mut Self, &mut D::ProofTreeBuilder) -> X::Result,
-    ) -> (StackEntry<X>, X::Result) {
+        evaluate_goal: impl Fn(&mut Self, X, X::Input, &mut D::ProofTreeBuilder) -> X::Result + Copy,
+    ) -> EvaluationResult<X> {
+        // We reset `encountered_overflow` each time we rerun this goal
+        // but need to make sure we currently propagate it to the global
+        // cache even if only some of the evaluations actually reach the
+        // recursion limit.
+        let mut encountered_overflow = false;
         let mut i = 0;
         loop {
-            let result = evaluate_goal(self, inspect);
+            let result = evaluate_goal(self, cx, input, inspect);
             let stack_entry = self.stack.pop();
+            encountered_overflow |= stack_entry.encountered_overflow;
             debug_assert_eq!(stack_entry.input, input);
 
             // If the current goal is not the root of a cycle, we are done.
             //
             // There are no provisional cache entries which depend on this goal.
             let Some(usage_kind) = stack_entry.has_been_used else {
-                return (stack_entry, result);
+                return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
             };
 
             // If it is a cycle head, we have to keep trying to prove it until
@@ -1051,7 +1098,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
             // final result is equal to the initial response for that case.
             if self.reached_fixpoint(cx, &stack_entry, usage_kind, result) {
                 self.rebase_provisional_cache_entries(&stack_entry, |_, result| result);
-                return (stack_entry, result);
+                return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
             }
 
             // If computing this goal results in ambiguity with no constraints,
@@ -1070,7 +1117,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
                 self.rebase_provisional_cache_entries(&stack_entry, |input, _| {
                     D::propagate_ambiguity(cx, input, result)
                 });
-                return (stack_entry, result);
+                return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
             };
 
             // If we've reached the fixpoint step limit, we bail with overflow and taint all
@@ -1082,7 +1129,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
                 self.rebase_provisional_cache_entries(&stack_entry, |input, _| {
                     D::on_fixpoint_overflow(cx, input)
                 });
-                return (stack_entry, result);
+                return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
             }
 
             // Clear all provisional cache entries which depend on a previous provisional
@@ -1091,9 +1138,22 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
 
             debug!(?result, "fixpoint changed provisional results");
             self.stack.push(StackEntry {
-                has_been_used: None,
+                input,
+                step_kind_from_parent: stack_entry.step_kind_from_parent,
+                available_depth: stack_entry.available_depth,
                 provisional_result: Some(result),
-                ..stack_entry
+                // We can keep these goals from previous iterations as they are only
+                // ever read after finalizing this evaluation.
+                required_depth: stack_entry.required_depth,
+                heads: stack_entry.heads,
+                nested_goals: stack_entry.nested_goals,
+                // We reset these two fields when rerunning this goal. We could
+                // keep `encountered_overflow` as it's only used as a performance
+                // optimization. However, given that the proof tree will likely look
+                // similar to the previous iterations when reevaluating, it's better
+                // for caching if the reevaluation also starts out with `false`.
+                encountered_overflow: false,
+                has_been_used: None,
             });
         }
     }
@@ -1109,21 +1169,11 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
     fn insert_global_cache(
         &mut self,
         cx: X,
-        final_entry: StackEntry<X>,
-        result: X::Result,
+        input: X::Input,
+        evaluation_result: EvaluationResult<X>,
         dep_node: X::DepNodeIndex,
     ) {
-        debug!(?final_entry, ?result, "insert global cache");
-        cx.with_global_cache(|cache| {
-            cache.insert(
-                cx,
-                final_entry.input,
-                result,
-                dep_node,
-                final_entry.required_depth,
-                final_entry.encountered_overflow,
-                final_entry.nested_goals,
-            )
-        })
+        debug!(?evaluation_result, "insert global cache");
+        cx.with_global_cache(|cache| cache.insert(cx, input, evaluation_result, dep_node))
     }
 }
diff --git a/compiler/rustc_type_ir/src/search_graph/stack.rs b/compiler/rustc_type_ir/src/search_graph/stack.rs
index 8bb247bf055..e0fd934df69 100644
--- a/compiler/rustc_type_ir/src/search_graph/stack.rs
+++ b/compiler/rustc_type_ir/src/search_graph/stack.rs
@@ -26,6 +26,10 @@ pub(super) struct StackEntry<X: Cx> {
     /// The available depth of a given goal, immutable.
     pub available_depth: AvailableDepth,
 
+    /// Starts out as `None` and gets set when rerunning this
+    /// goal in case we encounter a cycle.
+    pub provisional_result: Option<X::Result>,
+
     /// The maximum depth required while evaluating this goal.
     pub required_depth: usize,
 
@@ -42,10 +46,6 @@ pub(super) struct StackEntry<X: Cx> {
 
     /// The nested goals of this goal, see the doc comment of the type.
     pub nested_goals: NestedGoals<X>,
-
-    /// Starts out as `None` and gets set when rerunning this
-    /// goal in case we encounter a cycle.
-    pub provisional_result: Option<X::Result>,
 }
 
 #[derive_where(Default; X: Cx)]