about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--compiler/rustc_query_impl/src/profiling_support.rs51
-rw-r--r--compiler/rustc_query_system/src/cache.rs4
-rw-r--r--compiler/rustc_query_system/src/dep_graph/dep_node.rs36
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs101
-rw-r--r--compiler/rustc_query_system/src/dep_graph/mod.rs3
-rw-r--r--compiler/rustc_query_system/src/query/config.rs32
-rw-r--r--compiler/rustc_query_system/src/query/job.rs6
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs258
-rw-r--r--compiler/rustc_query_system/src/values.rs8
9 files changed, 228 insertions, 271 deletions
diff --git a/compiler/rustc_query_impl/src/profiling_support.rs b/compiler/rustc_query_impl/src/profiling_support.rs
index 2cc311d48c8..81114f2cd82 100644
--- a/compiler/rustc_query_impl/src/profiling_support.rs
+++ b/compiler/rustc_query_impl/src/profiling_support.rs
@@ -19,18 +19,18 @@ impl QueryKeyStringCache {
     }
 }
 
-struct QueryKeyStringBuilder<'p, 'c, 'tcx> {
+struct QueryKeyStringBuilder<'p, 'tcx> {
     profiler: &'p SelfProfiler,
     tcx: TyCtxt<'tcx>,
-    string_cache: &'c mut QueryKeyStringCache,
+    string_cache: &'p mut QueryKeyStringCache,
 }
 
-impl<'p, 'c, 'tcx> QueryKeyStringBuilder<'p, 'c, 'tcx> {
+impl<'p, 'tcx> QueryKeyStringBuilder<'p, 'tcx> {
     fn new(
         profiler: &'p SelfProfiler,
         tcx: TyCtxt<'tcx>,
-        string_cache: &'c mut QueryKeyStringCache,
-    ) -> QueryKeyStringBuilder<'p, 'c, 'tcx> {
+        string_cache: &'p mut QueryKeyStringCache,
+    ) -> QueryKeyStringBuilder<'p, 'tcx> {
         QueryKeyStringBuilder { profiler, tcx, string_cache }
     }
 
@@ -99,7 +99,7 @@ impl<'p, 'c, 'tcx> QueryKeyStringBuilder<'p, 'c, 'tcx> {
 }
 
 trait IntoSelfProfilingString {
-    fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId;
+    fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_>) -> StringId;
 }
 
 // The default implementation of `IntoSelfProfilingString` just uses `Debug`
@@ -109,7 +109,7 @@ trait IntoSelfProfilingString {
 impl<T: Debug> IntoSelfProfilingString for T {
     default fn to_self_profile_string(
         &self,
-        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+        builder: &mut QueryKeyStringBuilder<'_, '_>,
     ) -> StringId {
         let s = format!("{:?}", self);
         builder.profiler.alloc_string(&s[..])
@@ -117,60 +117,42 @@ impl<T: Debug> IntoSelfProfilingString for T {
 }
 
 impl<T: SpecIntoSelfProfilingString> IntoSelfProfilingString for T {
-    fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId {
+    fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_>) -> StringId {
         self.spec_to_self_profile_string(builder)
     }
 }
 
 #[rustc_specialization_trait]
 trait SpecIntoSelfProfilingString: Debug {
-    fn spec_to_self_profile_string(
-        &self,
-        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
-    ) -> StringId;
+    fn spec_to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_>) -> StringId;
 }
 
 impl SpecIntoSelfProfilingString for DefId {
-    fn spec_to_self_profile_string(
-        &self,
-        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
-    ) -> StringId {
+    fn spec_to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_>) -> StringId {
         builder.def_id_to_string_id(*self)
     }
 }
 
 impl SpecIntoSelfProfilingString for CrateNum {
-    fn spec_to_self_profile_string(
-        &self,
-        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
-    ) -> StringId {
+    fn spec_to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_>) -> StringId {
         builder.def_id_to_string_id(self.as_def_id())
     }
 }
 
 impl SpecIntoSelfProfilingString for DefIndex {
-    fn spec_to_self_profile_string(
-        &self,
-        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
-    ) -> StringId {
+    fn spec_to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_>) -> StringId {
         builder.def_id_to_string_id(DefId { krate: LOCAL_CRATE, index: *self })
     }
 }
 
 impl SpecIntoSelfProfilingString for LocalDefId {
-    fn spec_to_self_profile_string(
-        &self,
-        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
-    ) -> StringId {
+    fn spec_to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_>) -> StringId {
         builder.def_id_to_string_id(DefId { krate: LOCAL_CRATE, index: self.local_def_index })
     }
 }
 
 impl<T: SpecIntoSelfProfilingString> SpecIntoSelfProfilingString for WithOptConstParam<T> {
-    fn spec_to_self_profile_string(
-        &self,
-        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
-    ) -> StringId {
+    fn spec_to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_>) -> StringId {
         // We print `WithOptConstParam` values as tuples to make them shorter
         // and more readable, without losing information:
         //
@@ -205,10 +187,7 @@ where
     T0: SpecIntoSelfProfilingString,
     T1: SpecIntoSelfProfilingString,
 {
-    fn spec_to_self_profile_string(
-        &self,
-        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
-    ) -> StringId {
+    fn spec_to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_>) -> StringId {
         let val0 = self.0.to_self_profile_string(builder);
         let val1 = self.1.to_self_profile_string(builder);
 
diff --git a/compiler/rustc_query_system/src/cache.rs b/compiler/rustc_query_system/src/cache.rs
index d592812f79b..7cc885be2ba 100644
--- a/compiler/rustc_query_system/src/cache.rs
+++ b/compiler/rustc_query_system/src/cache.rs
@@ -26,7 +26,7 @@ impl<Key, Value> Cache<Key, Value> {
 }
 
 impl<Key: Eq + Hash, Value: Clone> Cache<Key, Value> {
-    pub fn get<CTX: DepContext>(&self, key: &Key, tcx: CTX) -> Option<Value> {
+    pub fn get<Tcx: DepContext>(&self, key: &Key, tcx: Tcx) -> Option<Value> {
         Some(self.hashmap.borrow().get(key)?.get(tcx))
     }
 
@@ -46,7 +46,7 @@ impl<T: Clone> WithDepNode<T> {
         WithDepNode { dep_node, cached_value }
     }
 
-    pub fn get<CTX: DepContext>(&self, tcx: CTX) -> T {
+    pub fn get<Tcx: DepContext>(&self, tcx: Tcx) -> T {
         tcx.dep_graph().read_index(self.dep_node);
         self.cached_value.clone()
     }
diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
index 5c6ce0556eb..d79c5816a9c 100644
--- a/compiler/rustc_query_system/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
@@ -61,18 +61,18 @@ impl<K: DepKind> DepNode<K> {
     /// Creates a new, parameterless DepNode. This method will assert
     /// that the DepNode corresponding to the given DepKind actually
     /// does not require any parameters.
-    pub fn new_no_params<Ctxt>(tcx: Ctxt, kind: K) -> DepNode<K>
+    pub fn new_no_params<Tcx>(tcx: Tcx, kind: K) -> DepNode<K>
     where
-        Ctxt: super::DepContext<DepKind = K>,
+        Tcx: super::DepContext<DepKind = K>,
     {
         debug_assert_eq!(tcx.fingerprint_style(kind), FingerprintStyle::Unit);
         DepNode { kind, hash: Fingerprint::ZERO.into() }
     }
 
-    pub fn construct<Ctxt, Key>(tcx: Ctxt, kind: K, arg: &Key) -> DepNode<K>
+    pub fn construct<Tcx, Key>(tcx: Tcx, kind: K, arg: &Key) -> DepNode<K>
     where
-        Ctxt: super::DepContext<DepKind = K>,
-        Key: DepNodeParams<Ctxt>,
+        Tcx: super::DepContext<DepKind = K>,
+        Key: DepNodeParams<Tcx>,
     {
         let hash = arg.to_fingerprint(tcx);
         let dep_node = DepNode { kind, hash: hash.into() };
@@ -93,9 +93,9 @@ impl<K: DepKind> DepNode<K> {
     /// Construct a DepNode from the given DepKind and DefPathHash. This
     /// method will assert that the given DepKind actually requires a
     /// single DefId/DefPathHash parameter.
-    pub fn from_def_path_hash<Ctxt>(tcx: Ctxt, def_path_hash: DefPathHash, kind: K) -> Self
+    pub fn from_def_path_hash<Tcx>(tcx: Tcx, def_path_hash: DefPathHash, kind: K) -> Self
     where
-        Ctxt: super::DepContext<DepKind = K>,
+        Tcx: super::DepContext<DepKind = K>,
     {
         debug_assert!(tcx.fingerprint_style(kind) == FingerprintStyle::DefPathHash);
         DepNode { kind, hash: def_path_hash.0.into() }
@@ -108,18 +108,18 @@ impl<K: DepKind> fmt::Debug for DepNode<K> {
     }
 }
 
-pub trait DepNodeParams<Ctxt: DepContext>: fmt::Debug + Sized {
+pub trait DepNodeParams<Tcx: DepContext>: fmt::Debug + Sized {
     fn fingerprint_style() -> FingerprintStyle;
 
     /// This method turns the parameters of a DepNodeConstructor into an opaque
     /// Fingerprint to be used in DepNode.
     /// Not all DepNodeParams support being turned into a Fingerprint (they
     /// don't need to if the corresponding DepNode is anonymous).
-    fn to_fingerprint(&self, _: Ctxt) -> Fingerprint {
+    fn to_fingerprint(&self, _: Tcx) -> Fingerprint {
         panic!("Not implemented. Accidentally called on anonymous node?")
     }
 
-    fn to_debug_str(&self, _: Ctxt) -> String {
+    fn to_debug_str(&self, _: Tcx) -> String {
         format!("{:?}", self)
     }
 
@@ -129,10 +129,10 @@ pub trait DepNodeParams<Ctxt: DepContext>: fmt::Debug + Sized {
     /// `fingerprint_style()` is not `FingerprintStyle::Opaque`.
     /// It is always valid to return `None` here, in which case incremental
     /// compilation will treat the query as having changed instead of forcing it.
-    fn recover(tcx: Ctxt, dep_node: &DepNode<Ctxt::DepKind>) -> Option<Self>;
+    fn recover(tcx: Tcx, dep_node: &DepNode<Tcx::DepKind>) -> Option<Self>;
 }
 
-impl<Ctxt: DepContext, T> DepNodeParams<Ctxt> for T
+impl<Tcx: DepContext, T> DepNodeParams<Tcx> for T
 where
     T: for<'a> HashStable<StableHashingContext<'a>> + fmt::Debug,
 {
@@ -142,7 +142,7 @@ where
     }
 
     #[inline(always)]
-    default fn to_fingerprint(&self, tcx: Ctxt) -> Fingerprint {
+    default fn to_fingerprint(&self, tcx: Tcx) -> Fingerprint {
         tcx.with_stable_hashing_context(|mut hcx| {
             let mut hasher = StableHasher::new();
             self.hash_stable(&mut hcx, &mut hasher);
@@ -151,12 +151,12 @@ where
     }
 
     #[inline(always)]
-    default fn to_debug_str(&self, _: Ctxt) -> String {
+    default fn to_debug_str(&self, _: Tcx) -> String {
         format!("{:?}", *self)
     }
 
     #[inline(always)]
-    default fn recover(_: Ctxt, _: &DepNode<Ctxt::DepKind>) -> Option<Self> {
+    default fn recover(_: Tcx, _: &DepNode<Tcx::DepKind>) -> Option<Self> {
         None
     }
 }
@@ -166,7 +166,7 @@ where
 /// Information is retrieved by indexing the `DEP_KINDS` array using the integer value
 /// of the `DepKind`. Overall, this allows to implement `DepContext` using this manual
 /// jump table instead of large matches.
-pub struct DepKindStruct<CTX: DepContext> {
+pub struct DepKindStruct<Tcx: DepContext> {
     /// Anonymous queries cannot be replayed from one compiler invocation to the next.
     /// When their result is needed, it is recomputed. They are useful for fine-grained
     /// dependency tracking, and caching within one compiler invocation.
@@ -216,10 +216,10 @@ pub struct DepKindStruct<CTX: DepContext> {
     /// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
     /// is actually a `DefPathHash`, and can therefore just look up the corresponding
     /// `DefId` in `tcx.def_path_hash_to_def_id`.
-    pub force_from_dep_node: Option<fn(tcx: CTX, dep_node: DepNode<CTX::DepKind>) -> bool>,
+    pub force_from_dep_node: Option<fn(tcx: Tcx, dep_node: DepNode<Tcx::DepKind>) -> bool>,
 
     /// Invoke a query to put the on-disk cached value in memory.
-    pub try_load_from_on_disk_cache: Option<fn(CTX, DepNode<CTX::DepKind>)>,
+    pub try_load_from_on_disk_cache: Option<fn(Tcx, DepNode<Tcx::DepKind>)>,
 }
 
 /// A "work product" corresponds to a `.o` (or other) file that we
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index 8ff56132749..d86c0bebdcd 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -377,9 +377,9 @@ impl<K: DepKind> DepGraph<K> {
 
     /// Executes something within an "anonymous" task, that is, a task the
     /// `DepNode` of which is determined by the list of inputs it read from.
-    pub fn with_anon_task<Ctxt: DepContext<DepKind = K>, OP, R>(
+    pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
         &self,
-        cx: Ctxt,
+        cx: Tcx,
         dep_kind: K,
         op: OP,
     ) -> (R, DepNodeIndex)
@@ -571,12 +571,12 @@ impl<K: DepKind> DepGraph<K> {
     /// A node will have an index, when it's already been marked green, or when we can mark it
     /// green. This function will mark the current task as a reader of the specified node, when
     /// a node index can be found for that node.
-    pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
+    pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
         &self,
-        tcx: Ctxt,
+        qcx: Qcx,
         dep_node: &DepNode<K>,
     ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
-        debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
+        debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
 
         // Return None if the dep graph is disabled
         let data = self.data.as_ref()?;
@@ -592,15 +592,16 @@ impl<K: DepKind> DepGraph<K> {
                 // in the previous compilation session too, so we can try to
                 // mark it as green by recursively marking all of its
                 // dependencies green.
-                self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
+                self.try_mark_previous_green(qcx, data, prev_index, &dep_node)
                     .map(|dep_node_index| (prev_index, dep_node_index))
             }
         }
     }
 
-    fn try_mark_parent_green<Ctxt: QueryContext<DepKind = K>>(
+    #[instrument(skip(self, qcx, data, parent_dep_node_index), level = "debug")]
+    fn try_mark_parent_green<Qcx: QueryContext<DepKind = K>>(
         &self,
-        tcx: Ctxt,
+        qcx: Qcx,
         data: &DepGraphData<K>,
         parent_dep_node_index: SerializedDepNodeIndex,
         dep_node: &DepNode<K>,
@@ -613,11 +614,7 @@ impl<K: DepKind> DepGraph<K> {
                 // This dependency has been marked as green before, we are
                 // still fine and can continue with checking the other
                 // dependencies.
-                debug!(
-                    "try_mark_previous_green({:?}) --- found dependency {:?} to \
-                            be immediately green",
-                    dep_node, dep_dep_node,
-                );
+                debug!("dependency {dep_dep_node:?} was immediately green");
                 return Some(());
             }
             Some(DepNodeColor::Red) => {
@@ -625,10 +622,7 @@ impl<K: DepKind> DepGraph<K> {
                 // compared to the previous compilation session. We cannot
                 // mark the DepNode as green and also don't need to bother
                 // with checking any of the other dependencies.
-                debug!(
-                    "try_mark_previous_green({:?}) - END - dependency {:?} was immediately red",
-                    dep_node, dep_dep_node,
-                );
+                debug!("dependency {dep_dep_node:?} was immediately red");
                 return None;
             }
             None => {}
@@ -636,35 +630,26 @@ impl<K: DepKind> DepGraph<K> {
 
         // We don't know the state of this dependency. If it isn't
         // an eval_always node, let's try to mark it green recursively.
-        if !tcx.dep_context().is_eval_always(dep_dep_node.kind) {
+        if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
             debug!(
-                "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \
-                                 is unknown, trying to mark it green",
-                dep_node, dep_dep_node, dep_dep_node.hash,
+                "state of dependency {:?} ({}) is unknown, trying to mark it green",
+                dep_dep_node, dep_dep_node.hash,
             );
 
             let node_index =
-                self.try_mark_previous_green(tcx, data, parent_dep_node_index, dep_dep_node);
+                self.try_mark_previous_green(qcx, data, parent_dep_node_index, dep_dep_node);
+
             if node_index.is_some() {
-                debug!(
-                    "try_mark_previous_green({:?}) --- managed to MARK dependency {:?} as green",
-                    dep_node, dep_dep_node
-                );
+                debug!("managed to MARK dependency {dep_dep_node:?} as green",);
                 return Some(());
             }
         }
 
         // We failed to mark it green, so we try to force the query.
-        debug!(
-            "try_mark_previous_green({:?}) --- trying to force dependency {:?}",
-            dep_node, dep_dep_node
-        );
-        if !tcx.dep_context().try_force_from_dep_node(*dep_dep_node) {
+        debug!("trying to force dependency {dep_dep_node:?}");
+        if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node) {
             // The DepNode could not be forced.
-            debug!(
-                "try_mark_previous_green({:?}) - END - dependency {:?} could not be forced",
-                dep_node, dep_dep_node
-            );
+            debug!("dependency {dep_dep_node:?} could not be forced");
             return None;
         }
 
@@ -672,23 +657,17 @@ impl<K: DepKind> DepGraph<K> {
 
         match dep_dep_node_color {
             Some(DepNodeColor::Green(_)) => {
-                debug!(
-                    "try_mark_previous_green({:?}) --- managed to FORCE dependency {:?} to green",
-                    dep_node, dep_dep_node
-                );
+                debug!("managed to FORCE dependency {dep_dep_node:?} to green");
                 return Some(());
             }
             Some(DepNodeColor::Red) => {
-                debug!(
-                    "try_mark_previous_green({:?}) - END - dependency {:?} was red after forcing",
-                    dep_node, dep_dep_node
-                );
+                debug!("dependency {dep_dep_node:?} was red after forcing",);
                 return None;
             }
             None => {}
         }
 
-        if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() {
+        if !qcx.dep_context().sess().has_errors_or_delayed_span_bugs() {
             panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
         }
 
@@ -702,23 +681,19 @@ impl<K: DepKind> DepGraph<K> {
         // invalid state will not be persisted to the
         // incremental compilation cache because of
         // compilation errors being present.
-        debug!(
-            "try_mark_previous_green({:?}) - END - dependency {:?} resulted in compilation error",
-            dep_node, dep_dep_node
-        );
+        debug!("dependency {dep_dep_node:?} resulted in compilation error",);
         return None;
     }
 
     /// Try to mark a dep-node which existed in the previous compilation session as green.
-    fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
+    #[instrument(skip(self, qcx, data, prev_dep_node_index), level = "debug")]
+    fn try_mark_previous_green<Qcx: QueryContext<DepKind = K>>(
         &self,
-        tcx: Ctxt,
+        qcx: Qcx,
         data: &DepGraphData<K>,
         prev_dep_node_index: SerializedDepNodeIndex,
         dep_node: &DepNode<K>,
     ) -> Option<DepNodeIndex> {
-        debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
-
         #[cfg(not(parallel_compiler))]
         {
             debug_assert!(!self.dep_node_exists(dep_node));
@@ -726,14 +701,14 @@ impl<K: DepKind> DepGraph<K> {
         }
 
         // We never try to mark eval_always nodes as green
-        debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
+        debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
 
         debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
 
         let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
 
         for &dep_dep_node_index in prev_deps {
-            self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)?
+            self.try_mark_parent_green(qcx, data, dep_dep_node_index, dep_node)?
         }
 
         // If we got here without hitting a `return` that means that all
@@ -745,7 +720,7 @@ impl<K: DepKind> DepGraph<K> {
         // We allocating an entry for the node in the current dependency graph and
         // adding all the appropriate edges imported from the previous graph
         let dep_node_index = data.current.promote_node_and_deps_to_current(
-            tcx.dep_context().profiler(),
+            qcx.dep_context().profiler(),
             &data.previous,
             prev_dep_node_index,
         );
@@ -754,7 +729,7 @@ impl<K: DepKind> DepGraph<K> {
 
         // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
         // Maybe store a list on disk and encode this fact in the DepNodeState
-        let side_effects = tcx.load_side_effects(prev_dep_node_index);
+        let side_effects = qcx.load_side_effects(prev_dep_node_index);
 
         #[cfg(not(parallel_compiler))]
         debug_assert!(
@@ -765,14 +740,14 @@ impl<K: DepKind> DepGraph<K> {
         );
 
         if !side_effects.is_empty() {
-            self.emit_side_effects(tcx, data, dep_node_index, side_effects);
+            self.emit_side_effects(qcx, data, dep_node_index, side_effects);
         }
 
         // ... and finally storing a "Green" entry in the color map.
         // Multiple threads can all write the same color here
         data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
 
-        debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
+        debug!("successfully marked {dep_node:?} as green");
         Some(dep_node_index)
     }
 
@@ -780,9 +755,9 @@ impl<K: DepKind> DepGraph<K> {
     /// This may be called concurrently on multiple threads for the same dep node.
     #[cold]
     #[inline(never)]
-    fn emit_side_effects<Ctxt: QueryContext<DepKind = K>>(
+    fn emit_side_effects<Qcx: QueryContext<DepKind = K>>(
         &self,
-        tcx: Ctxt,
+        qcx: Qcx,
         data: &DepGraphData<K>,
         dep_node_index: DepNodeIndex,
         side_effects: QuerySideEffects,
@@ -794,9 +769,9 @@ impl<K: DepKind> DepGraph<K> {
             // must process side effects
 
             // Promote the previous diagnostics to the current session.
-            tcx.store_side_effects(dep_node_index, side_effects.clone());
+            qcx.store_side_effects(dep_node_index, side_effects.clone());
 
-            let handle = tcx.dep_context().sess().diagnostic();
+            let handle = qcx.dep_context().sess().diagnostic();
 
             for mut diagnostic in side_effects.diagnostics {
                 handle.emit_diagnostic(&mut diagnostic);
@@ -824,7 +799,7 @@ impl<K: DepKind> DepGraph<K> {
     //
     // This method will only load queries that will end up in the disk cache.
     // Other queries will not be executed.
-    pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
+    pub fn exec_cache_promotions<Tcx: DepContext<DepKind = K>>(&self, tcx: Tcx) {
         let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
 
         let data = self.data.as_ref().unwrap();
diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs
index da2075fd5aa..e370c6990a4 100644
--- a/compiler/rustc_query_system/src/dep_graph/mod.rs
+++ b/compiler/rustc_query_system/src/dep_graph/mod.rs
@@ -52,9 +52,8 @@ pub trait DepContext: Copy {
     }
 
     /// Try to force a dep node to execute and see if it's green.
+    #[instrument(skip(self), level = "debug")]
     fn try_force_from_dep_node(self, dep_node: DepNode<Self::DepKind>) -> bool {
-        debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
-
         let cb = self.dep_kind_info(dep_node.kind);
         if let Some(f) = cb.force_from_dep_node {
             f(self, dep_node);
diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs
index db3ae559ad1..f40e174b7e7 100644
--- a/compiler/rustc_query_system/src/query/config.rs
+++ b/compiler/rustc_query_system/src/query/config.rs
@@ -11,7 +11,7 @@ use rustc_data_structures::fingerprint::Fingerprint;
 use std::fmt::Debug;
 use std::hash::Hash;
 
-pub trait QueryConfig<CTX: QueryContext> {
+pub trait QueryConfig<Qcx: QueryContext> {
     const NAME: &'static str;
 
     type Key: Eq + Hash + Clone + Debug;
@@ -21,47 +21,47 @@ pub trait QueryConfig<CTX: QueryContext> {
     type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
 
     // Don't use this method to access query results, instead use the methods on TyCtxt
-    fn query_state<'a>(tcx: CTX) -> &'a QueryState<Self::Key>
+    fn query_state<'a>(tcx: Qcx) -> &'a QueryState<Self::Key>
     where
-        CTX: 'a;
+        Qcx: 'a;
 
     // Don't use this method to access query results, instead use the methods on TyCtxt
-    fn query_cache<'a>(tcx: CTX) -> &'a Self::Cache
+    fn query_cache<'a>(tcx: Qcx) -> &'a Self::Cache
     where
-        CTX: 'a;
+        Qcx: 'a;
 
     // Don't use this method to compute query results, instead use the methods on TyCtxt
-    fn make_vtable(tcx: CTX, key: &Self::Key) -> QueryVTable<CTX, Self::Key, Self::Value>;
+    fn make_vtable(tcx: Qcx, key: &Self::Key) -> QueryVTable<Qcx, Self::Key, Self::Value>;
 
-    fn cache_on_disk(tcx: CTX::DepContext, key: &Self::Key) -> bool;
+    fn cache_on_disk(tcx: Qcx::DepContext, key: &Self::Key) -> bool;
 
     // Don't use this method to compute query results, instead use the methods on TyCtxt
-    fn execute_query(tcx: CTX::DepContext, k: Self::Key) -> Self::Stored;
+    fn execute_query(tcx: Qcx::DepContext, k: Self::Key) -> Self::Stored;
 }
 
 #[derive(Copy, Clone)]
-pub struct QueryVTable<CTX: QueryContext, K, V> {
+pub struct QueryVTable<Qcx: QueryContext, K, V> {
     pub anon: bool,
-    pub dep_kind: CTX::DepKind,
+    pub dep_kind: Qcx::DepKind,
     pub eval_always: bool,
     pub depth_limit: bool,
 
-    pub compute: fn(CTX::DepContext, K) -> V,
+    pub compute: fn(Qcx::DepContext, K) -> V,
     pub hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
     pub handle_cycle_error: HandleCycleError,
     // NOTE: this is also `None` if `cache_on_disk()` returns false, not just if it's unsupported by the query
-    pub try_load_from_disk: Option<fn(CTX, SerializedDepNodeIndex) -> Option<V>>,
+    pub try_load_from_disk: Option<fn(Qcx, SerializedDepNodeIndex) -> Option<V>>,
 }
 
-impl<CTX: QueryContext, K, V> QueryVTable<CTX, K, V> {
-    pub(crate) fn to_dep_node(&self, tcx: CTX::DepContext, key: &K) -> DepNode<CTX::DepKind>
+impl<Qcx: QueryContext, K, V> QueryVTable<Qcx, K, V> {
+    pub(crate) fn to_dep_node(&self, tcx: Qcx::DepContext, key: &K) -> DepNode<Qcx::DepKind>
     where
-        K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
+        K: crate::dep_graph::DepNodeParams<Qcx::DepContext>,
     {
         DepNode::construct(tcx, self.dep_kind, key)
     }
 
-    pub(crate) fn compute(&self, tcx: CTX::DepContext, key: K) -> V {
+    pub(crate) fn compute(&self, tcx: Qcx::DepContext, key: K) -> V {
         (self.compute)(tcx, key)
     }
 }
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index ed65393f57e..49bbcf57804 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -596,8 +596,8 @@ pub(crate) fn report_cycle<'a>(
     cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic)
 }
 
-pub fn print_query_stack<CTX: QueryContext>(
-    tcx: CTX,
+pub fn print_query_stack<Qcx: QueryContext>(
+    qcx: Qcx,
     mut current_query: Option<QueryJobId>,
     handler: &Handler,
     num_frames: Option<usize>,
@@ -606,7 +606,7 @@ pub fn print_query_stack<CTX: QueryContext>(
     // a panic hook, which means that the global `Handler` may be in a weird
     // state if it was responsible for triggering the panic.
     let mut i = 0;
-    let query_map = tcx.try_collect_active_jobs();
+    let query_map = qcx.try_collect_active_jobs();
 
     while let Some(query) = current_query {
         if Some(i) == num_frames {
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 0f7abe84231..f8d93a27d1c 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -62,10 +62,10 @@ where
         }
     }
 
-    pub fn try_collect_active_jobs<CTX: Copy>(
+    pub fn try_collect_active_jobs<Qcx: Copy>(
         &self,
-        tcx: CTX,
-        make_query: fn(CTX, K) -> QueryStackFrame,
+        qcx: Qcx,
+        make_query: fn(Qcx, K) -> QueryStackFrame,
         jobs: &mut QueryMap,
     ) -> Option<()> {
         #[cfg(parallel_compiler)]
@@ -76,7 +76,7 @@ where
             for shard in shards.iter() {
                 for (k, v) in shard.iter() {
                     if let QueryResult::Started(ref job) = *v {
-                        let query = make_query(tcx, k.clone());
+                        let query = make_query(qcx, k.clone());
                         jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
                     }
                 }
@@ -90,7 +90,7 @@ where
             // really hurt much.)
             for (k, v) in self.active.try_lock()?.iter() {
                 if let QueryResult::Started(ref job) = *v {
-                    let query = make_query(tcx, k.clone());
+                    let query = make_query(qcx, k.clone());
                     jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
                 }
             }
@@ -119,31 +119,31 @@ where
 
 #[cold]
 #[inline(never)]
-fn mk_cycle<CTX, V, R>(
-    tcx: CTX,
+fn mk_cycle<Qcx, V, R>(
+    qcx: Qcx,
     cycle_error: CycleError,
     handler: HandleCycleError,
     cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
 ) -> R
 where
-    CTX: QueryContext,
-    V: std::fmt::Debug + Value<CTX::DepContext>,
+    Qcx: QueryContext,
+    V: std::fmt::Debug + Value<Qcx::DepContext>,
     R: Clone,
 {
-    let error = report_cycle(tcx.dep_context().sess(), &cycle_error);
-    let value = handle_cycle_error(*tcx.dep_context(), &cycle_error, error, handler);
+    let error = report_cycle(qcx.dep_context().sess(), &cycle_error);
+    let value = handle_cycle_error(*qcx.dep_context(), &cycle_error, error, handler);
     cache.store_nocache(value)
 }
 
-fn handle_cycle_error<CTX, V>(
-    tcx: CTX,
+fn handle_cycle_error<Tcx, V>(
+    tcx: Tcx,
     cycle_error: &CycleError,
     mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
     handler: HandleCycleError,
 ) -> V
 where
-    CTX: DepContext,
-    V: Value<CTX>,
+    Tcx: DepContext,
+    V: Value<Tcx>,
 {
     use HandleCycleError::*;
     match handler {
@@ -176,14 +176,14 @@ where
     /// This function is inlined because that results in a noticeable speed-up
     /// for some compile-time benchmarks.
     #[inline(always)]
-    fn try_start<'b, CTX>(
-        tcx: &'b CTX,
+    fn try_start<'b, Qcx>(
+        qcx: &'b Qcx,
         state: &'b QueryState<K>,
         span: Span,
         key: K,
     ) -> TryGetJob<'b, K>
     where
-        CTX: QueryContext,
+        Qcx: QueryContext,
     {
         #[cfg(parallel_compiler)]
         let mut state_lock = state.active.get_shard_by_value(&key).lock();
@@ -193,8 +193,8 @@ where
 
         match lock.entry(key) {
             Entry::Vacant(entry) => {
-                let id = tcx.next_job_id();
-                let job = tcx.current_query_job();
+                let id = qcx.next_job_id();
+                let job = qcx.current_query_job();
                 let job = QueryJob::new(id, span, job);
 
                 let key = entry.key().clone();
@@ -213,8 +213,8 @@ where
                         // If we are single-threaded we know that we have cycle error,
                         // so we just return the error.
                         return TryGetJob::Cycle(id.find_cycle_in_stack(
-                            tcx.try_collect_active_jobs().unwrap(),
-                            &tcx.current_query_job(),
+                            qcx.try_collect_active_jobs().unwrap(),
+                            &qcx.current_query_job(),
                             span,
                         ));
                     }
@@ -223,7 +223,7 @@ where
                         // For parallel queries, we'll block and wait until the query running
                         // in another thread has completed. Record how long we wait in the
                         // self-profiler.
-                        let query_blocked_prof_timer = tcx.dep_context().profiler().query_blocked();
+                        let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked();
 
                         // Get the latch out
                         let latch = job.latch();
@@ -232,7 +232,7 @@ where
 
                         // With parallel queries we might just have to wait on some other
                         // thread.
-                        let result = latch.wait_on(tcx.current_query_job(), span);
+                        let result = latch.wait_on(qcx.current_query_job(), span);
 
                         match result {
                             Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer),
@@ -335,8 +335,8 @@ where
 /// which will be used if the query is not in the cache and we need
 /// to compute it.
 #[inline]
-pub fn try_get_cached<'a, CTX, C, R, OnHit>(
-    tcx: CTX,
+pub fn try_get_cached<'a, Tcx, C, R, OnHit>(
+    tcx: Tcx,
     cache: &'a C,
     key: &C::Key,
     // `on_hit` can be called while holding a lock to the query cache
@@ -344,7 +344,7 @@ pub fn try_get_cached<'a, CTX, C, R, OnHit>(
 ) -> Result<R, ()>
 where
     C: QueryCache,
-    CTX: DepContext,
+    Tcx: DepContext,
     OnHit: FnOnce(&C::Stored) -> R,
 {
     cache.lookup(&key, |value, index| {
@@ -356,29 +356,29 @@ where
     })
 }
 
-fn try_execute_query<CTX, C>(
-    tcx: CTX,
+fn try_execute_query<Qcx, C>(
+    qcx: Qcx,
     state: &QueryState<C::Key>,
     cache: &C,
     span: Span,
     key: C::Key,
-    dep_node: Option<DepNode<CTX::DepKind>>,
-    query: &QueryVTable<CTX, C::Key, C::Value>,
+    dep_node: Option<DepNode<Qcx::DepKind>>,
+    query: &QueryVTable<Qcx, C::Key, C::Value>,
 ) -> (C::Stored, Option<DepNodeIndex>)
 where
     C: QueryCache,
-    C::Key: Clone + DepNodeParams<CTX::DepContext>,
-    C::Value: Value<CTX::DepContext>,
-    CTX: QueryContext,
+    C::Key: Clone + DepNodeParams<Qcx::DepContext>,
+    C::Value: Value<Qcx::DepContext>,
+    Qcx: QueryContext,
 {
-    match JobOwner::<'_, C::Key>::try_start(&tcx, state, span, key.clone()) {
+    match JobOwner::<'_, C::Key>::try_start(&qcx, state, span, key.clone()) {
         TryGetJob::NotYetStarted(job) => {
-            let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id);
+            let (result, dep_node_index) = execute_job(qcx, key, dep_node, query, job.id);
             let result = job.complete(cache, result, dep_node_index);
             (result, Some(dep_node_index))
         }
         TryGetJob::Cycle(error) => {
-            let result = mk_cycle(tcx, error, query.handle_cycle_error, cache);
+            let result = mk_cycle(qcx, error, query.handle_cycle_error, cache);
             (result, None)
         }
         #[cfg(parallel_compiler)]
@@ -387,8 +387,8 @@ where
                 .lookup(&key, |value, index| (value.clone(), index))
                 .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
 
-            if std::intrinsics::unlikely(tcx.dep_context().profiler().enabled()) {
-                tcx.dep_context().profiler().query_cache_hit(index.into());
+            if std::intrinsics::unlikely(qcx.dep_context().profiler().enabled()) {
+                qcx.dep_context().profiler().query_cache_hit(index.into());
             }
             query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
 
@@ -397,25 +397,25 @@ where
     }
 }
 
-fn execute_job<CTX, K, V>(
-    tcx: CTX,
+fn execute_job<Qcx, K, V>(
+    qcx: Qcx,
     key: K,
-    mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
-    query: &QueryVTable<CTX, K, V>,
+    mut dep_node_opt: Option<DepNode<Qcx::DepKind>>,
+    query: &QueryVTable<Qcx, K, V>,
     job_id: QueryJobId,
 ) -> (V, DepNodeIndex)
 where
-    K: Clone + DepNodeParams<CTX::DepContext>,
+    K: Clone + DepNodeParams<Qcx::DepContext>,
     V: Debug,
-    CTX: QueryContext,
+    Qcx: QueryContext,
 {
-    let dep_graph = tcx.dep_context().dep_graph();
+    let dep_graph = qcx.dep_context().dep_graph();
 
     // Fast path for when incr. comp. is off.
     if !dep_graph.is_fully_enabled() {
-        let prof_timer = tcx.dep_context().profiler().query_provider();
-        let result = tcx.start_query(job_id, query.depth_limit, None, || {
-            query.compute(*tcx.dep_context(), key)
+        let prof_timer = qcx.dep_context().profiler().query_provider();
+        let result = qcx.start_query(job_id, query.depth_limit, None, || {
+            query.compute(*qcx.dep_context(), key)
         });
         let dep_node_index = dep_graph.next_virtual_depnode_index();
         prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@@ -425,33 +425,33 @@ where
     if !query.anon && !query.eval_always {
         // `to_dep_node` is expensive for some `DepKind`s.
         let dep_node =
-            dep_node_opt.get_or_insert_with(|| query.to_dep_node(*tcx.dep_context(), &key));
+            dep_node_opt.get_or_insert_with(|| query.to_dep_node(*qcx.dep_context(), &key));
 
         // The diagnostics for this query will be promoted to the current session during
         // `try_mark_green()`, so we can ignore them here.
-        if let Some(ret) = tcx.start_query(job_id, false, None, || {
-            try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
+        if let Some(ret) = qcx.start_query(job_id, false, None, || {
+            try_load_from_disk_and_cache_in_memory(qcx, &key, &dep_node, query)
         }) {
             return ret;
         }
     }
 
-    let prof_timer = tcx.dep_context().profiler().query_provider();
+    let prof_timer = qcx.dep_context().profiler().query_provider();
     let diagnostics = Lock::new(ThinVec::new());
 
     let (result, dep_node_index) =
-        tcx.start_query(job_id, query.depth_limit, Some(&diagnostics), || {
+        qcx.start_query(job_id, query.depth_limit, Some(&diagnostics), || {
             if query.anon {
-                return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
-                    query.compute(*tcx.dep_context(), key)
+                return dep_graph.with_anon_task(*qcx.dep_context(), query.dep_kind, || {
+                    query.compute(*qcx.dep_context(), key)
                 });
             }
 
             // `to_dep_node` is expensive for some `DepKind`s.
             let dep_node =
-                dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
+                dep_node_opt.unwrap_or_else(|| query.to_dep_node(*qcx.dep_context(), &key));
 
-            dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
+            dep_graph.with_task(dep_node, *qcx.dep_context(), key, query.compute, query.hash_result)
         });
 
     prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@@ -461,55 +461,55 @@ where
 
     if std::intrinsics::unlikely(!side_effects.is_empty()) {
         if query.anon {
-            tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
+            qcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
         } else {
-            tcx.store_side_effects(dep_node_index, side_effects);
+            qcx.store_side_effects(dep_node_index, side_effects);
         }
     }
 
     (result, dep_node_index)
 }
 
-fn try_load_from_disk_and_cache_in_memory<CTX, K, V>(
-    tcx: CTX,
+fn try_load_from_disk_and_cache_in_memory<Qcx, K, V>(
+    qcx: Qcx,
     key: &K,
-    dep_node: &DepNode<CTX::DepKind>,
-    query: &QueryVTable<CTX, K, V>,
+    dep_node: &DepNode<Qcx::DepKind>,
+    query: &QueryVTable<Qcx, K, V>,
 ) -> Option<(V, DepNodeIndex)>
 where
     K: Clone,
-    CTX: QueryContext,
+    Qcx: QueryContext,
     V: Debug,
 {
     // Note this function can be called concurrently from the same query
     // We must ensure that this is handled correctly.
 
-    let dep_graph = tcx.dep_context().dep_graph();
-    let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(tcx, &dep_node)?;
+    let dep_graph = qcx.dep_context().dep_graph();
+    let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(qcx, &dep_node)?;
 
     debug_assert!(dep_graph.is_green(dep_node));
 
     // First we try to load the result from the on-disk cache.
     // Some things are never cached on disk.
     if let Some(try_load_from_disk) = query.try_load_from_disk {
-        let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
+        let prof_timer = qcx.dep_context().profiler().incr_cache_loading();
 
         // The call to `with_query_deserialization` enforces that no new `DepNodes`
         // are created during deserialization. See the docs of that method for more
         // details.
         let result =
-            dep_graph.with_query_deserialization(|| try_load_from_disk(tcx, prev_dep_node_index));
+            dep_graph.with_query_deserialization(|| try_load_from_disk(qcx, prev_dep_node_index));
 
         prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
         if let Some(result) = result {
             if std::intrinsics::unlikely(
-                tcx.dep_context().sess().opts.unstable_opts.query_dep_graph,
+                qcx.dep_context().sess().opts.unstable_opts.query_dep_graph,
             ) {
                 dep_graph.mark_debug_loaded_from_disk(*dep_node)
             }
 
-            let prev_fingerprint = tcx
+            let prev_fingerprint = qcx
                 .dep_context()
                 .dep_graph()
                 .prev_fingerprint_of(dep_node)
@@ -523,9 +523,9 @@ where
             // give us some coverage of potential bugs though.
             let try_verify = prev_fingerprint.as_value().1 % 32 == 0;
             if std::intrinsics::unlikely(
-                try_verify || tcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
+                try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
             ) {
-                incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
+                incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query);
             }
 
             return Some((result, dep_node_index));
@@ -534,7 +534,7 @@ where
         // We always expect to find a cached result for things that
         // can be forced from `DepNode`.
         debug_assert!(
-            !tcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
+            !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
             "missing on-disk cache entry for {:?}",
             dep_node
         );
@@ -542,10 +542,10 @@ where
 
     // We could not load a result from the on-disk cache, so
     // recompute.
-    let prof_timer = tcx.dep_context().profiler().query_provider();
+    let prof_timer = qcx.dep_context().profiler().query_provider();
 
     // The dep-graph for this computation is already in-place.
-    let result = dep_graph.with_ignore(|| query.compute(*tcx.dep_context(), key.clone()));
+    let result = dep_graph.with_ignore(|| query.compute(*qcx.dep_context(), key.clone()));
 
     prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
@@ -558,34 +558,38 @@ where
     //
     // See issue #82920 for an example of a miscompilation that would get turned into
     // an ICE by this check
-    incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
+    incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query);
 
     Some((result, dep_node_index))
 }
 
-fn incremental_verify_ich<CTX, K, V: Debug>(
-    tcx: CTX::DepContext,
+#[instrument(skip(qcx, result, query), level = "debug")]
+fn incremental_verify_ich<Qcx, K, V: Debug>(
+    qcx: Qcx::DepContext,
     result: &V,
-    dep_node: &DepNode<CTX::DepKind>,
-    query: &QueryVTable<CTX, K, V>,
+    dep_node: &DepNode<Qcx::DepKind>,
+    query: &QueryVTable<Qcx, K, V>,
 ) where
-    CTX: QueryContext,
+    Qcx: QueryContext,
 {
     assert!(
-        tcx.dep_graph().is_green(dep_node),
+        qcx.dep_graph().is_green(dep_node),
         "fingerprint for green query instance not loaded from cache: {:?}",
         dep_node,
     );
 
-    debug!("BEGIN verify_ich({:?})", dep_node);
     let new_hash = query.hash_result.map_or(Fingerprint::ZERO, |f| {
-        tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
+        qcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
     });
-    let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
-    debug!("END verify_ich({:?})", dep_node);
+
+    let old_hash = qcx.dep_graph().prev_fingerprint_of(dep_node);
 
     if Some(new_hash) != old_hash {
-        incremental_verify_ich_cold(tcx.sess(), DebugArg::from(&dep_node), DebugArg::from(&result));
+        incremental_verify_ich_failed(
+            qcx.sess(),
+            DebugArg::from(&dep_node),
+            DebugArg::from(&result),
+        );
     }
 }
 
@@ -631,13 +635,7 @@ impl std::fmt::Debug for DebugArg<'_> {
 // different implementations for LLVM to chew on (and filling up the final
 // binary, too).
 #[cold]
-fn incremental_verify_ich_cold(sess: &Session, dep_node: DebugArg<'_>, result: DebugArg<'_>) {
-    let run_cmd = if let Some(crate_name) = &sess.opts.crate_name {
-        format!("`cargo clean -p {}` or `cargo clean`", crate_name)
-    } else {
-        "`cargo clean`".to_string()
-    };
-
+fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result: DebugArg<'_>) {
     // When we emit an error message and panic, we try to debug-print the `DepNode`
     // and query result. Unfortunately, this can cause us to run additional queries,
     // which may result in another fingerprint mismatch while we're in the middle
@@ -653,6 +651,12 @@ fn incremental_verify_ich_cold(sess: &Session, dep_node: DebugArg<'_>, result: D
     if old_in_panic {
         sess.emit_err(crate::error::Reentrant);
     } else {
+        let run_cmd = if let Some(crate_name) = &sess.opts.crate_name {
+            format!("`cargo clean -p {}` or `cargo clean`", crate_name)
+        } else {
+            "`cargo clean`".to_string()
+        };
+
         sess.emit_err(crate::error::IncrementCompilation {
             run_cmd,
             dep_node: format!("{:?}", dep_node),
@@ -672,14 +676,14 @@ fn incremental_verify_ich_cold(sess: &Session, dep_node: DebugArg<'_>, result: D
 ///
 /// Note: The optimization is only available during incr. comp.
 #[inline(never)]
-fn ensure_must_run<CTX, K, V>(
-    tcx: CTX,
+fn ensure_must_run<Qcx, K, V>(
+    qcx: Qcx,
     key: &K,
-    query: &QueryVTable<CTX, K, V>,
-) -> (bool, Option<DepNode<CTX::DepKind>>)
+    query: &QueryVTable<Qcx, K, V>,
+) -> (bool, Option<DepNode<Qcx::DepKind>>)
 where
-    K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
-    CTX: QueryContext,
+    K: crate::dep_graph::DepNodeParams<Qcx::DepContext>,
+    Qcx: QueryContext,
 {
     if query.eval_always {
         return (true, None);
@@ -688,10 +692,10 @@ where
     // Ensuring an anonymous query makes no sense
     assert!(!query.anon);
 
-    let dep_node = query.to_dep_node(*tcx.dep_context(), key);
+    let dep_node = query.to_dep_node(*qcx.dep_context(), key);
 
-    let dep_graph = tcx.dep_context().dep_graph();
-    match dep_graph.try_mark_green(tcx, &dep_node) {
+    let dep_graph = qcx.dep_context().dep_graph();
+    match dep_graph.try_mark_green(qcx, &dep_node) {
         None => {
             // A None return from `try_mark_green` means that this is either
             // a new dep node or that the dep node has already been marked red.
@@ -703,7 +707,7 @@ where
         }
         Some((_, dep_node_index)) => {
             dep_graph.read_index(dep_node_index);
-            tcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
+            qcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
             (false, None)
         }
     }
@@ -715,16 +719,16 @@ pub enum QueryMode {
     Ensure,
 }
 
-pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored>
+pub fn get_query<Q, Qcx>(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored>
 where
-    Q: QueryConfig<CTX>,
-    Q::Key: DepNodeParams<CTX::DepContext>,
-    Q::Value: Value<CTX::DepContext>,
-    CTX: QueryContext,
+    Q: QueryConfig<Qcx>,
+    Q::Key: DepNodeParams<Qcx::DepContext>,
+    Q::Value: Value<Qcx::DepContext>,
+    Qcx: QueryContext,
 {
-    let query = Q::make_vtable(tcx, &key);
+    let query = Q::make_vtable(qcx, &key);
     let dep_node = if let QueryMode::Ensure = mode {
-        let (must_run, dep_node) = ensure_must_run(tcx, &key, &query);
+        let (must_run, dep_node) = ensure_must_run(qcx, &key, &query);
         if !must_run {
             return None;
         }
@@ -734,33 +738,33 @@ where
     };
 
     let (result, dep_node_index) = try_execute_query(
-        tcx,
-        Q::query_state(tcx),
-        Q::query_cache(tcx),
+        qcx,
+        Q::query_state(qcx),
+        Q::query_cache(qcx),
         span,
         key,
         dep_node,
         &query,
     );
     if let Some(dep_node_index) = dep_node_index {
-        tcx.dep_context().dep_graph().read_index(dep_node_index)
+        qcx.dep_context().dep_graph().read_index(dep_node_index)
     }
     Some(result)
 }
 
-pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, dep_node: DepNode<CTX::DepKind>)
+pub fn force_query<Q, Qcx>(qcx: Qcx, key: Q::Key, dep_node: DepNode<Qcx::DepKind>)
 where
-    Q: QueryConfig<CTX>,
-    Q::Key: DepNodeParams<CTX::DepContext>,
-    Q::Value: Value<CTX::DepContext>,
-    CTX: QueryContext,
+    Q: QueryConfig<Qcx>,
+    Q::Key: DepNodeParams<Qcx::DepContext>,
+    Q::Value: Value<Qcx::DepContext>,
+    Qcx: QueryContext,
 {
     // We may be concurrently trying both execute and force a query.
     // Ensure that only one of them runs the query.
-    let cache = Q::query_cache(tcx);
+    let cache = Q::query_cache(qcx);
     let cached = cache.lookup(&key, |_, index| {
-        if std::intrinsics::unlikely(tcx.dep_context().profiler().enabled()) {
-            tcx.dep_context().profiler().query_cache_hit(index.into());
+        if std::intrinsics::unlikely(qcx.dep_context().profiler().enabled()) {
+            qcx.dep_context().profiler().query_cache_hit(index.into());
         }
     });
 
@@ -769,9 +773,9 @@ where
         Err(()) => {}
     }
 
-    let query = Q::make_vtable(tcx, &key);
-    let state = Q::query_state(tcx);
+    let query = Q::make_vtable(qcx, &key);
+    let state = Q::query_state(qcx);
     debug_assert!(!query.anon);
 
-    try_execute_query(tcx, state, cache, DUMMY_SP, key, Some(dep_node), &query);
+    try_execute_query(qcx, state, cache, DUMMY_SP, key, Some(dep_node), &query);
 }
diff --git a/compiler/rustc_query_system/src/values.rs b/compiler/rustc_query_system/src/values.rs
index 67fbf14e612..214656abed4 100644
--- a/compiler/rustc_query_system/src/values.rs
+++ b/compiler/rustc_query_system/src/values.rs
@@ -1,12 +1,12 @@
 use crate::dep_graph::DepContext;
 use crate::query::QueryInfo;
 
-pub trait Value<CTX: DepContext>: Sized {
-    fn from_cycle_error(tcx: CTX, cycle: &[QueryInfo]) -> Self;
+pub trait Value<Tcx: DepContext>: Sized {
+    fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo]) -> Self;
 }
 
-impl<CTX: DepContext, T> Value<CTX> for T {
-    default fn from_cycle_error(tcx: CTX, _: &[QueryInfo]) -> T {
+impl<Tcx: DepContext, T> Value<Tcx> for T {
+    default fn from_cycle_error(tcx: Tcx, _: &[QueryInfo]) -> T {
         tcx.sess().abort_if_errors();
         // Ideally we would use `bug!` here. But bug! is only defined in rustc_middle, and it's
         // non-trivial to define it earlier.