about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
authorJohn Kåre Alsaker <john.kare.alsaker@gmail.com>2023-02-24 00:48:50 +0100
committerJohn Kåre Alsaker <john.kare.alsaker@gmail.com>2023-03-09 06:14:34 +0100
commit60ed37c2e16a9b426f84501a9ae4a5f22741816c (patch)
tree23febb7ca1de014260bb6a1b9f26a1e748040ece /compiler
parent6a179026decb823e6ad8ba1c81729528bc5d695f (diff)
downloadrust-60ed37c2e16a9b426f84501a9ae4a5f22741816c.tar.gz
rust-60ed37c2e16a9b426f84501a9ae4a5f22741816c.zip
Move dep graph methods to DepGraphData to avoid branches and `unwrap`s
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs289
-rw-r--r--compiler/rustc_query_system/src/dep_graph/mod.rs3
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs96
3 files changed, 224 insertions, 164 deletions
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index 59e0c359745..2c5f84b2c5c 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -70,7 +70,7 @@ impl DepNodeColor {
     }
 }
 
-struct DepGraphData<K: DepKind> {
+pub struct DepGraphData<K: DepKind> {
     /// The new encoding of the dependency graph, optimized for red/green
     /// tracking. The `current` field is the dependency graph of only the
     /// current compilation session: We don't merge the previous dep-graph into
@@ -168,6 +168,11 @@ impl<K: DepKind> DepGraph<K> {
         DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
     }
 
+    #[inline]
+    pub fn data(&self) -> Option<&DepGraphData<K>> {
+        self.data.as_deref()
+    }
+
     /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
     #[inline]
     pub fn is_fully_enabled(&self) -> bool {
@@ -245,13 +250,45 @@ impl<K: DepKind> DepGraph<K> {
     /// in the query infrastructure, and is not currently needed by the
     /// decoding of any query results. Should the need arise in the future,
     /// we should consider extending the query system with this functionality.
-    pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
+    pub fn with_query_deserialization<OP, R>(op: OP) -> R
     where
         OP: FnOnce() -> R,
     {
         K::with_deps(TaskDepsRef::Forbid, op)
     }
 
+    #[inline(always)]
+    pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
+        &self,
+        key: DepNode<K>,
+        cx: Ctxt,
+        arg: A,
+        task: fn(Ctxt, A) -> R,
+        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
+    ) -> (R, DepNodeIndex) {
+        match self.data() {
+            Some(data) => data.with_task(key, cx, arg, task, hash_result),
+            None => (task(cx, arg), self.next_virtual_depnode_index()),
+        }
+    }
+
+    pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
+        &self,
+        cx: Tcx,
+        dep_kind: K,
+        op: OP,
+    ) -> (R, DepNodeIndex)
+    where
+        OP: FnOnce() -> R,
+    {
+        match self.data() {
+            Some(data) => data.with_anon_task(cx, dep_kind, op),
+            None => (op(), self.next_virtual_depnode_index()),
+        }
+    }
+}
+
+impl<K: DepKind> DepGraphData<K> {
     /// Starts a new dep-graph task. Dep-graph tasks are specified
     /// using a free function (`task`) and **not** a closure -- this
     /// is intentional because we want to exercise tight control over
@@ -288,29 +325,6 @@ impl<K: DepKind> DepGraph<K> {
         task: fn(Ctxt, A) -> R,
         hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
     ) -> (R, DepNodeIndex) {
-        if self.is_fully_enabled() {
-            self.with_task_impl(key, cx, arg, task, hash_result)
-        } else {
-            // Incremental compilation is turned off. We just execute the task
-            // without tracking. We still provide a dep-node index that uniquely
-            // identifies the task so that we have a cheap way of referring to
-            // the query for self-profiling.
-            (task(cx, arg), self.next_virtual_depnode_index())
-        }
-    }
-
-    #[inline(always)]
-    fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
-        &self,
-        key: DepNode<K>,
-        cx: Ctxt,
-        arg: A,
-        task: fn(Ctxt, A) -> R,
-        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
-    ) -> (R, DepNodeIndex) {
-        // This function is only called when the graph is enabled.
-        let data = self.data.as_ref().unwrap();
-
         // If the following assertion triggers, it can have two reasons:
         // 1. Something is wrong with DepNode creation, either here or
         //    in `DepGraph::try_mark_green()`.
@@ -351,9 +365,9 @@ impl<K: DepKind> DepGraph<K> {
         let print_status = cfg!(debug_assertions) && dcx.sess().opts.unstable_opts.dep_tasks;
 
         // Intern the new `DepNode`.
-        let (dep_node_index, prev_and_color) = data.current.intern_node(
+        let (dep_node_index, prev_and_color) = self.current.intern_node(
             dcx.profiler(),
-            &data.previous,
+            &self.previous,
             key,
             edges,
             current_fingerprint,
@@ -364,12 +378,12 @@ impl<K: DepKind> DepGraph<K> {
 
         if let Some((prev_index, color)) = prev_and_color {
             debug_assert!(
-                data.colors.get(prev_index).is_none(),
+                self.colors.get(prev_index).is_none(),
                 "DepGraph::with_task() - Duplicate DepNodeColor \
                             insertion for {key:?}"
             );
 
-            data.colors.insert(prev_index, color);
+            self.colors.insert(prev_index, color);
         }
 
         (result, dep_node_index)
@@ -388,57 +402,55 @@ impl<K: DepKind> DepGraph<K> {
     {
         debug_assert!(!cx.is_eval_always(dep_kind));
 
-        if let Some(ref data) = self.data {
-            let task_deps = Lock::new(TaskDeps::default());
-            let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op);
-            let task_deps = task_deps.into_inner();
-            let task_deps = task_deps.reads;
-
-            let dep_node_index = match task_deps.len() {
-                0 => {
-                    // Because the dep-node id of anon nodes is computed from the sets of its
-                    // dependencies we already know what the ID of this dependency-less node is
-                    // going to be (i.e. equal to the precomputed
-                    // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
-                    // a `StableHasher` and sending the node through interning.
-                    DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
-                }
-                1 => {
-                    // When there is only one dependency, don't bother creating a node.
-                    task_deps[0]
-                }
-                _ => {
-                    // The dep node indices are hashed here instead of hashing the dep nodes of the
-                    // dependencies. These indices may refer to different nodes per session, but this isn't
-                    // a problem here because we that ensure the final dep node hash is per session only by
-                    // combining it with the per session random number `anon_id_seed`. This hash only need
-                    // to map the dependencies to a single value on a per session basis.
-                    let mut hasher = StableHasher::new();
-                    task_deps.hash(&mut hasher);
-
-                    let target_dep_node = DepNode {
-                        kind: dep_kind,
-                        // Fingerprint::combine() is faster than sending Fingerprint
-                        // through the StableHasher (at least as long as StableHasher
-                        // is so slow).
-                        hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
-                    };
+        let task_deps = Lock::new(TaskDeps::default());
+        let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op);
+        let task_deps = task_deps.into_inner();
+        let task_deps = task_deps.reads;
+
+        let dep_node_index = match task_deps.len() {
+            0 => {
+                // Because the dep-node id of anon nodes is computed from the sets of its
+                // dependencies we already know what the ID of this dependency-less node is
+                // going to be (i.e. equal to the precomputed
+                // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
+                // a `StableHasher` and sending the node through interning.
+                DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
+            }
+            1 => {
+                // When there is only one dependency, don't bother creating a node.
+                task_deps[0]
+            }
+            _ => {
+                // The dep node indices are hashed here instead of hashing the dep nodes of the
+                // dependencies. These indices may refer to different nodes per session, but this isn't
+                // a problem here because we that ensure the final dep node hash is per session only by
+                // combining it with the per session random number `anon_id_seed`. This hash only need
+                // to map the dependencies to a single value on a per session basis.
+                let mut hasher = StableHasher::new();
+                task_deps.hash(&mut hasher);
+
+                let target_dep_node = DepNode {
+                    kind: dep_kind,
+                    // Fingerprint::combine() is faster than sending Fingerprint
+                    // through the StableHasher (at least as long as StableHasher
+                    // is so slow).
+                    hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
+                };
 
-                    data.current.intern_new_node(
-                        cx.profiler(),
-                        target_dep_node,
-                        task_deps,
-                        Fingerprint::ZERO,
-                    )
-                }
-            };
+                self.current.intern_new_node(
+                    cx.profiler(),
+                    target_dep_node,
+                    task_deps,
+                    Fingerprint::ZERO,
+                )
+            }
+        };
 
-            (result, dep_node_index)
-        } else {
-            (op(), self.next_virtual_depnode_index())
-        }
+        (result, dep_node_index)
     }
+}
 
+impl<K: DepKind> DepGraph<K> {
     #[inline]
     pub fn read_index(&self, dep_node_index: DepNodeIndex) {
         if let Some(ref data) = self.data {
@@ -521,7 +533,7 @@ impl<K: DepKind> DepGraph<K> {
             // For sanity, we still check that the loaded stable hash and the new one match.
             if let Some(dep_node_index) = self.dep_node_index_of_opt(&node) {
                 let _current_fingerprint =
-                    crate::query::incremental_verify_ich(cx, result, &node, hash_result);
+                    crate::query::incremental_verify_ich(cx, data, result, &node, hash_result);
 
                 #[cfg(debug_assertions)]
                 if hash_result.is_some() {
@@ -585,24 +597,59 @@ impl<K: DepKind> DepGraph<K> {
 
     #[inline]
     pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
-        let data = self.data.as_ref().unwrap();
-        let current = &data.current;
+        self.data.as_ref().unwrap().dep_node_index_of_opt(dep_node)
+    }
+}
 
-        if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
-            current.prev_index_to_index.lock()[prev_index]
+impl<K: DepKind> DepGraphData<K> {
+    #[inline]
+    pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
+        if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
+            self.current.prev_index_to_index.lock()[prev_index]
         } else {
-            current.new_node_to_index.get_shard_by_value(dep_node).lock().get(dep_node).copied()
+            self.current
+                .new_node_to_index
+                .get_shard_by_value(dep_node)
+                .lock()
+                .get(dep_node)
+                .copied()
         }
     }
 
     #[inline]
     pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
-        self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some()
+        self.dep_node_index_of_opt(dep_node).is_some()
+    }
+
+    fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
+        if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
+            self.colors.get(prev_index)
+        } else {
+            // This is a node that did not exist in the previous compilation session.
+            None
+        }
+    }
+
+    /// Returns true if the given node has been marked as green during the
+    /// current compilation session. Used in various assertions
+    pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
+        self.node_color(dep_node).map_or(false, |c| c.is_green())
     }
 
     #[inline]
     pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
-        self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
+        self.previous.fingerprint_of(dep_node)
+    }
+
+    pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>) {
+        self.debug_loaded_from_disk.lock().insert(dep_node);
+    }
+}
+
+impl<K: DepKind> DepGraph<K> {
+    #[inline]
+    pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
+        self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some()
     }
 
     /// Checks whether a previous work product exists for `v` and, if
@@ -617,10 +664,6 @@ impl<K: DepKind> DepGraph<K> {
         &self.data.as_ref().unwrap().previous_work_products
     }
 
-    pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>) {
-        self.data.as_ref().unwrap().debug_loaded_from_disk.lock().insert(dep_node);
-    }
-
     pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode<K>) -> bool {
         self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
     }
@@ -645,17 +688,22 @@ impl<K: DepKind> DepGraph<K> {
 
     fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
         if let Some(ref data) = self.data {
-            if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
-                return data.colors.get(prev_index);
-            } else {
-                // This is a node that did not exist in the previous compilation session.
-                return None;
-            }
+            return data.node_color(dep_node);
         }
 
         None
     }
 
+    pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
+        &self,
+        qcx: Qcx,
+        dep_node: &DepNode<K>,
+    ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
+        self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
+    }
+}
+
+impl<K: DepKind> DepGraphData<K> {
     /// Try to mark a node index for the node dep_node.
     ///
     /// A node will have an index, when it's already been marked green, or when we can mark it
@@ -668,26 +716,23 @@ impl<K: DepKind> DepGraph<K> {
     ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
         debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
 
-        // Return None if the dep graph is disabled
-        let data = self.data.as_ref()?;
-
         // Return None if the dep node didn't exist in the previous session
-        let prev_index = data.previous.node_to_index_opt(dep_node)?;
+        let prev_index = self.previous.node_to_index_opt(dep_node)?;
 
-        match data.colors.get(prev_index) {
+        match self.colors.get(prev_index) {
             Some(DepNodeColor::Green(dep_node_index)) => return Some((prev_index, dep_node_index)),
             Some(DepNodeColor::Red) => return None,
             None => {}
         }
 
-        let backtrace = backtrace_printer(qcx.dep_context().sess(), data, prev_index);
+        let backtrace = backtrace_printer(qcx.dep_context().sess(), self, prev_index);
 
         // This DepNode and the corresponding query invocation existed
         // in the previous compilation session too, so we can try to
         // mark it as green by recursively marking all of its
         // dependencies green.
         let ret = self
-            .try_mark_previous_green(qcx, data, prev_index, &dep_node)
+            .try_mark_previous_green(qcx, prev_index, &dep_node)
             .map(|dep_node_index| (prev_index, dep_node_index));
 
         // We succeeded, no backtrace.
@@ -695,16 +740,15 @@ impl<K: DepKind> DepGraph<K> {
         return ret;
     }
 
-    #[instrument(skip(self, qcx, data, parent_dep_node_index), level = "debug")]
+    #[instrument(skip(self, qcx, parent_dep_node_index), level = "debug")]
     fn try_mark_parent_green<Qcx: QueryContext<DepKind = K>>(
         &self,
         qcx: Qcx,
-        data: &DepGraphData<K>,
         parent_dep_node_index: SerializedDepNodeIndex,
         dep_node: &DepNode<K>,
     ) -> Option<()> {
-        let dep_dep_node_color = data.colors.get(parent_dep_node_index);
-        let dep_dep_node = &data.previous.index_to_node(parent_dep_node_index);
+        let dep_dep_node_color = self.colors.get(parent_dep_node_index);
+        let dep_dep_node = &self.previous.index_to_node(parent_dep_node_index);
 
         match dep_dep_node_color {
             Some(DepNodeColor::Green(_)) => {
@@ -733,8 +777,7 @@ impl<K: DepKind> DepGraph<K> {
                 dep_dep_node, dep_dep_node.hash,
             );
 
-            let node_index =
-                self.try_mark_previous_green(qcx, data, parent_dep_node_index, dep_dep_node);
+            let node_index = self.try_mark_previous_green(qcx, parent_dep_node_index, dep_dep_node);
 
             if node_index.is_some() {
                 debug!("managed to MARK dependency {dep_dep_node:?} as green",);
@@ -750,7 +793,7 @@ impl<K: DepKind> DepGraph<K> {
             return None;
         }
 
-        let dep_dep_node_color = data.colors.get(parent_dep_node_index);
+        let dep_dep_node_color = self.colors.get(parent_dep_node_index);
 
         match dep_dep_node_color {
             Some(DepNodeColor::Green(_)) => {
@@ -783,30 +826,29 @@ impl<K: DepKind> DepGraph<K> {
     }
 
     /// Try to mark a dep-node which existed in the previous compilation session as green.
-    #[instrument(skip(self, qcx, data, prev_dep_node_index), level = "debug")]
+    #[instrument(skip(self, qcx, prev_dep_node_index), level = "debug")]
     fn try_mark_previous_green<Qcx: QueryContext<DepKind = K>>(
         &self,
         qcx: Qcx,
-        data: &DepGraphData<K>,
         prev_dep_node_index: SerializedDepNodeIndex,
         dep_node: &DepNode<K>,
     ) -> Option<DepNodeIndex> {
         #[cfg(not(parallel_compiler))]
         {
             debug_assert!(!self.dep_node_exists(dep_node));
-            debug_assert!(data.colors.get(prev_dep_node_index).is_none());
+            debug_assert!(self.colors.get(prev_dep_node_index).is_none());
         }
 
         // We never try to mark eval_always nodes as green
         debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
 
-        debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
+        debug_assert_eq!(self.previous.index_to_node(prev_dep_node_index), *dep_node);
 
-        let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
+        let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
 
         for &dep_dep_node_index in prev_deps {
-            let backtrace = backtrace_printer(qcx.dep_context().sess(), data, dep_dep_node_index);
-            let success = self.try_mark_parent_green(qcx, data, dep_dep_node_index, dep_node);
+            let backtrace = backtrace_printer(qcx.dep_context().sess(), self, dep_dep_node_index);
+            let success = self.try_mark_parent_green(qcx, dep_dep_node_index, dep_node);
             backtrace.disable();
             success?;
         }
@@ -819,9 +861,9 @@ impl<K: DepKind> DepGraph<K> {
 
         // We allocating an entry for the node in the current dependency graph and
         // adding all the appropriate edges imported from the previous graph
-        let dep_node_index = data.current.promote_node_and_deps_to_current(
+        let dep_node_index = self.current.promote_node_and_deps_to_current(
             qcx.dep_context().profiler(),
-            &data.previous,
+            &self.previous,
             prev_dep_node_index,
         );
 
@@ -833,20 +875,20 @@ impl<K: DepKind> DepGraph<K> {
 
         #[cfg(not(parallel_compiler))]
         debug_assert!(
-            data.colors.get(prev_dep_node_index).is_none(),
+            self.colors.get(prev_dep_node_index).is_none(),
             "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
                       insertion for {dep_node:?}"
         );
 
         if !side_effects.is_empty() {
-            self.with_query_deserialization(|| {
-                self.emit_side_effects(qcx, data, dep_node_index, side_effects)
+            DepGraph::<K>::with_query_deserialization(|| {
+                self.emit_side_effects(qcx, dep_node_index, side_effects)
             });
         }
 
         // ... and finally storing a "Green" entry in the color map.
         // Multiple threads can all write the same color here
-        data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
+        self.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
 
         debug!("successfully marked {dep_node:?} as green");
         Some(dep_node_index)
@@ -859,11 +901,10 @@ impl<K: DepKind> DepGraph<K> {
     fn emit_side_effects<Qcx: QueryContext<DepKind = K>>(
         &self,
         qcx: Qcx,
-        data: &DepGraphData<K>,
         dep_node_index: DepNodeIndex,
         side_effects: QuerySideEffects,
     ) {
-        let mut processed = data.processed_side_effects.lock();
+        let mut processed = self.processed_side_effects.lock();
 
         if processed.insert(dep_node_index) {
             // We were the first to insert the node in the set so this thread
@@ -879,7 +920,9 @@ impl<K: DepKind> DepGraph<K> {
             }
         }
     }
+}
 
+impl<K: DepKind> DepGraph<K> {
     /// Returns true if the given node has been marked as red during the
     /// current compilation session. Used in various assertions
     pub fn is_red(&self, dep_node: &DepNode<K>) -> bool {
diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs
index ba83b775631..5a7b9ae2ab4 100644
--- a/compiler/rustc_query_system/src/dep_graph/mod.rs
+++ b/compiler/rustc_query_system/src/dep_graph/mod.rs
@@ -6,7 +6,8 @@ mod serialized;
 
 pub use dep_node::{DepKindStruct, DepNode, DepNodeParams, WorkProductId};
 pub use graph::{
-    hash_result, DepGraph, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef, WorkProduct,
+    hash_result, DepGraph, DepGraphData, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef,
+    WorkProduct,
 };
 pub use query::DepGraphQuery;
 pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 005fcd8c4cc..87648180690 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -2,8 +2,10 @@
 //! generate the actual methods on tcx which find and execute the provider,
 //! manage the caches, and so forth.
 
-use crate::dep_graph::HasDepContext;
-use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams};
+use crate::dep_graph::{
+    DepContext, DepGraph, DepKind, DepNode, DepNodeIndex, DepNodeParams, TaskDepsRef,
+};
+use crate::dep_graph::{DepGraphData, HasDepContext};
 use crate::ich::StableHashingContext;
 use crate::query::caches::QueryCache;
 use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
@@ -411,32 +413,35 @@ where
     Qcx: QueryContext,
 {
     let dep_graph = qcx.dep_context().dep_graph();
+    let dep_graph_data = match dep_graph.data() {
+        // Fast path for when incr. comp. is off.
+        None => {
+            // Fingerprint the key, just to assert that it doesn't
+            // have anything we don't consider hashable
+            if cfg!(debug_assertions) {
+                let _ = key.to_fingerprint(*qcx.dep_context());
+            }
 
-    // Fast path for when incr. comp. is off.
-    if !dep_graph.is_fully_enabled() {
-        // Fingerprint the key, just to assert that it doesn't
-        // have anything we don't consider hashable
-        if cfg!(debug_assertions) {
-            let _ = key.to_fingerprint(*qcx.dep_context());
-        }
-
-        let prof_timer = qcx.dep_context().profiler().query_provider();
-        let result = qcx.start_query(job_id, query.depth_limit(), None, || query.compute(qcx, key));
-        let dep_node_index = dep_graph.next_virtual_depnode_index();
-        prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+            let prof_timer = qcx.dep_context().profiler().query_provider();
+            let result =
+                qcx.start_query(job_id, query.depth_limit(), None, || query.compute(qcx, key));
+            let dep_node_index = dep_graph.next_virtual_depnode_index();
+            prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
-        // Similarly, fingerprint the result to assert that
-        // it doesn't have anything not considered hashable.
-        if cfg!(debug_assertions)
+            // Similarly, fingerprint the result to assert that
+            // it doesn't have anything not considered hashable.
+            if cfg!(debug_assertions)
             && let Some(hash_result) = query.hash_result()
-        {
+          {
             qcx.dep_context().with_stable_hashing_context(|mut hcx| {
                 hash_result(&mut hcx, &result);
             });
         }
 
-        return (result, dep_node_index);
-    }
+            return (result, dep_node_index);
+        }
+        Some(data) => data,
+    };
 
     if !query.anon() && !query.eval_always() {
         // `to_dep_node` is expensive for some `DepKind`s.
@@ -446,7 +451,7 @@ where
         // The diagnostics for this query will be promoted to the current session during
         // `try_mark_green()`, so we can ignore them here.
         if let Some(ret) = qcx.start_query(job_id, false, None, || {
-            try_load_from_disk_and_cache_in_memory(query, qcx, &key, &dep_node)
+            try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, &dep_node)
         }) {
             return ret;
         }
@@ -458,7 +463,7 @@ where
     let (result, dep_node_index) =
         qcx.start_query(job_id, query.depth_limit(), Some(&diagnostics), || {
             if query.anon() {
-                return dep_graph.with_anon_task(*qcx.dep_context(), query.dep_kind(), || {
+                return dep_graph_data.with_anon_task(*qcx.dep_context(), query.dep_kind(), || {
                     query.compute(qcx, key)
                 });
             }
@@ -467,7 +472,7 @@ where
             let dep_node =
                 dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key));
 
-            dep_graph.with_task(
+            dep_graph_data.with_task(
                 dep_node,
                 (qcx, query),
                 key,
@@ -495,6 +500,7 @@ where
 #[inline(always)]
 fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
     query: Q,
+    dep_graph_data: &DepGraphData<Qcx::DepKind>,
     qcx: Qcx,
     key: &Q::Key,
     dep_node: &DepNode<Qcx::DepKind>,
@@ -506,10 +512,9 @@ where
     // Note this function can be called concurrently from the same query
     // We must ensure that this is handled correctly.
 
-    let dep_graph = qcx.dep_context().dep_graph();
-    let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(qcx, &dep_node)?;
+    let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, &dep_node)?;
 
-    debug_assert!(dep_graph.is_green(dep_node));
+    debug_assert!(dep_graph_data.is_green(dep_node));
 
     // First we try to load the result from the on-disk cache.
     // Some things are never cached on disk.
@@ -519,8 +524,9 @@ where
         // The call to `with_query_deserialization` enforces that no new `DepNodes`
         // are created during deserialization. See the docs of that method for more
         // details.
-        let result =
-            dep_graph.with_query_deserialization(|| try_load_from_disk(qcx, prev_dep_node_index));
+        let result = DepGraph::<Qcx::DepKind>::with_query_deserialization(|| {
+            try_load_from_disk(qcx, prev_dep_node_index)
+        });
 
         prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
@@ -528,14 +534,11 @@ where
             if std::intrinsics::unlikely(
                 qcx.dep_context().sess().opts.unstable_opts.query_dep_graph,
             ) {
-                dep_graph.mark_debug_loaded_from_disk(*dep_node)
+                dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
             }
 
-            let prev_fingerprint = qcx
-                .dep_context()
-                .dep_graph()
-                .prev_fingerprint_of(dep_node)
-                .unwrap_or(Fingerprint::ZERO);
+            let prev_fingerprint =
+                dep_graph_data.prev_fingerprint_of(dep_node).unwrap_or(Fingerprint::ZERO);
             // If `-Zincremental-verify-ich` is specified, re-hash results from
             // the cache and make sure that they have the expected fingerprint.
             //
@@ -547,7 +550,13 @@ where
             if std::intrinsics::unlikely(
                 try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
             ) {
-                incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result());
+                incremental_verify_ich(
+                    *qcx.dep_context(),
+                    dep_graph_data,
+                    &result,
+                    dep_node,
+                    query.hash_result(),
+                );
             }
 
             return Some((result, dep_node_index));
@@ -566,7 +575,7 @@ where
     let prof_timer = qcx.dep_context().profiler().query_provider();
 
     // The dep-graph for this computation is already in-place.
-    let result = dep_graph.with_ignore(|| query.compute(qcx, *key));
+    let result = Qcx::DepKind::with_deps(TaskDepsRef::Ignore, || query.compute(qcx, *key));
 
     prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
@@ -579,15 +588,22 @@ where
     //
     // See issue #82920 for an example of a miscompilation that would get turned into
     // an ICE by this check
-    incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result());
+    incremental_verify_ich(
+        *qcx.dep_context(),
+        dep_graph_data,
+        &result,
+        dep_node,
+        query.hash_result(),
+    );
 
     Some((result, dep_node_index))
 }
 
 #[inline]
-#[instrument(skip(tcx, result, hash_result), level = "debug")]
+#[instrument(skip(tcx, dep_graph_data, result, hash_result), level = "debug")]
 pub(crate) fn incremental_verify_ich<Tcx, V: Debug>(
     tcx: Tcx,
+    dep_graph_data: &DepGraphData<Tcx::DepKind>,
     result: &V,
     dep_node: &DepNode<Tcx::DepKind>,
     hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
@@ -596,7 +612,7 @@ where
     Tcx: DepContext,
 {
     assert!(
-        tcx.dep_graph().is_green(dep_node),
+        dep_graph_data.is_green(dep_node),
         "fingerprint for green query instance not loaded from cache: {dep_node:?}",
     );
 
@@ -604,7 +620,7 @@ where
         tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
     });
 
-    let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
+    let old_hash = dep_graph_data.prev_fingerprint_of(dep_node);
 
     if Some(new_hash) != old_hash {
         incremental_verify_ich_failed(