about summary refs log tree commit diff
path: root/compiler/rustc_query_system
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_query_system')
-rw-r--r--compiler/rustc_query_system/src/dep_graph/debug.rs14
-rw-r--r--compiler/rustc_query_system/src/dep_graph/dep_node.rs92
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs163
-rw-r--r--compiler/rustc_query_system/src/dep_graph/mod.rs84
-rw-r--r--compiler/rustc_query_system/src/dep_graph/query.rs22
-rw-r--r--compiler/rustc_query_system/src/dep_graph/serialized.rs107
-rw-r--r--compiler/rustc_query_system/src/query/config.rs10
-rw-r--r--compiler/rustc_query_system/src/query/job.rs113
-rw-r--r--compiler/rustc_query_system/src/query/mod.rs10
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs70
-rw-r--r--compiler/rustc_query_system/src/values.rs10
11 files changed, 364 insertions, 331 deletions
diff --git a/compiler/rustc_query_system/src/dep_graph/debug.rs b/compiler/rustc_query_system/src/dep_graph/debug.rs
index c2c9600f555..103a6c01bd2 100644
--- a/compiler/rustc_query_system/src/dep_graph/debug.rs
+++ b/compiler/rustc_query_system/src/dep_graph/debug.rs
@@ -1,6 +1,6 @@
 //! Code for debugging the dep-graph.
 
-use super::{DepKind, DepNode, DepNodeIndex};
+use super::{DepNode, DepNodeIndex};
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::sync::Lock;
 use std::error::Error;
@@ -28,7 +28,7 @@ impl DepNodeFilter {
     }
 
     /// Tests whether `node` meets the filter, returning true if so.
-    pub fn test<K: DepKind>(&self, node: &DepNode<K>) -> bool {
+    pub fn test(&self, node: &DepNode) -> bool {
         let debug_str = format!("{node:?}");
         self.text.split('&').map(|s| s.trim()).all(|f| debug_str.contains(f))
     }
@@ -36,14 +36,14 @@ impl DepNodeFilter {
 
 /// A filter like `F -> G` where `F` and `G` are valid dep-node
 /// filters. This can be used to test the source/target independently.
-pub struct EdgeFilter<K: DepKind> {
+pub struct EdgeFilter {
     pub source: DepNodeFilter,
     pub target: DepNodeFilter,
-    pub index_to_node: Lock<FxHashMap<DepNodeIndex, DepNode<K>>>,
+    pub index_to_node: Lock<FxHashMap<DepNodeIndex, DepNode>>,
 }
 
-impl<K: DepKind> EdgeFilter<K> {
-    pub fn new(test: &str) -> Result<EdgeFilter<K>, Box<dyn Error>> {
+impl EdgeFilter {
+    pub fn new(test: &str) -> Result<EdgeFilter, Box<dyn Error>> {
         let parts: Vec<_> = test.split("->").collect();
         if parts.len() != 2 {
             Err(format!("expected a filter like `a&b -> c&d`, not `{test}`").into())
@@ -57,7 +57,7 @@ impl<K: DepKind> EdgeFilter<K> {
     }
 
     #[cfg(debug_assertions)]
-    pub fn test(&self, source: &DepNode<K>, target: &DepNode<K>) -> bool {
+    pub fn test(&self, source: &DepNode, target: &DepNode) -> bool {
         self.source.test(source) && self.target.test(target)
     }
 }
diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
index 39a4cb1b179..17f96896a50 100644
--- a/compiler/rustc_query_system/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
@@ -42,36 +42,84 @@
 //!   `DefId` it was computed from. In other cases, too much information gets
 //!   lost during fingerprint computation.
 
-use super::{DepContext, DepKind, FingerprintStyle};
+use super::{DepContext, FingerprintStyle};
 use crate::ich::StableHashingContext;
 
 use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
 use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableOrd, ToStableHashKey};
+use rustc_data_structures::AtomicRef;
 use rustc_hir::definitions::DefPathHash;
 use std::fmt;
 use std::hash::Hash;
 
-#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
-pub struct DepNode<K> {
-    pub kind: K,
+/// This serves as an index into arrays built by `make_dep_kind_array`.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct DepKind {
+    variant: u16,
+}
+
+impl DepKind {
+    #[inline]
+    pub const fn new(variant: u16) -> Self {
+        Self { variant }
+    }
+
+    #[inline]
+    pub const fn as_inner(&self) -> u16 {
+        self.variant
+    }
+
+    #[inline]
+    pub const fn as_usize(&self) -> usize {
+        self.variant as usize
+    }
+}
+
+static_assert_size!(DepKind, 2);
+
+pub fn default_dep_kind_debug(kind: DepKind, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+    f.debug_struct("DepKind").field("variant", &kind.variant).finish()
+}
+
+pub static DEP_KIND_DEBUG: AtomicRef<fn(DepKind, &mut fmt::Formatter<'_>) -> fmt::Result> =
+    AtomicRef::new(&(default_dep_kind_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
+
+impl fmt::Debug for DepKind {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (*DEP_KIND_DEBUG)(*self, f)
+    }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct DepNode {
+    pub kind: DepKind,
     pub hash: PackedFingerprint,
 }
 
-impl<K: DepKind> DepNode<K> {
+// We keep a lot of `DepNode`s in memory during compilation. It's not
+// required that their size stay the same, but we don't want to change
+// it inadvertently. This assert just ensures we're aware of any change.
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+static_assert_size!(DepNode, 18);
+
+#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+static_assert_size!(DepNode, 24);
+
+impl DepNode {
     /// Creates a new, parameterless DepNode. This method will assert
     /// that the DepNode corresponding to the given DepKind actually
     /// does not require any parameters.
-    pub fn new_no_params<Tcx>(tcx: Tcx, kind: K) -> DepNode<K>
+    pub fn new_no_params<Tcx>(tcx: Tcx, kind: DepKind) -> DepNode
     where
-        Tcx: super::DepContext<DepKind = K>,
+        Tcx: super::DepContext,
     {
         debug_assert_eq!(tcx.fingerprint_style(kind), FingerprintStyle::Unit);
         DepNode { kind, hash: Fingerprint::ZERO.into() }
     }
 
-    pub fn construct<Tcx, Key>(tcx: Tcx, kind: K, arg: &Key) -> DepNode<K>
+    pub fn construct<Tcx, Key>(tcx: Tcx, kind: DepKind, arg: &Key) -> DepNode
     where
-        Tcx: super::DepContext<DepKind = K>,
+        Tcx: super::DepContext,
         Key: DepNodeParams<Tcx>,
     {
         let hash = arg.to_fingerprint(tcx);
@@ -93,18 +141,25 @@ impl<K: DepKind> DepNode<K> {
     /// Construct a DepNode from the given DepKind and DefPathHash. This
     /// method will assert that the given DepKind actually requires a
     /// single DefId/DefPathHash parameter.
-    pub fn from_def_path_hash<Tcx>(tcx: Tcx, def_path_hash: DefPathHash, kind: K) -> Self
+    pub fn from_def_path_hash<Tcx>(tcx: Tcx, def_path_hash: DefPathHash, kind: DepKind) -> Self
     where
-        Tcx: super::DepContext<DepKind = K>,
+        Tcx: super::DepContext,
     {
         debug_assert!(tcx.fingerprint_style(kind) == FingerprintStyle::DefPathHash);
         DepNode { kind, hash: def_path_hash.0.into() }
     }
 }
 
-impl<K: DepKind> fmt::Debug for DepNode<K> {
+pub fn default_dep_node_debug(node: DepNode, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+    f.debug_struct("DepNode").field("kind", &node.kind).field("hash", &node.hash).finish()
+}
+
+pub static DEP_NODE_DEBUG: AtomicRef<fn(DepNode, &mut fmt::Formatter<'_>) -> fmt::Result> =
+    AtomicRef::new(&(default_dep_node_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
+
+impl fmt::Debug for DepNode {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        K::debug_node(self, f)
+        (*DEP_NODE_DEBUG)(*self, f)
     }
 }
 
@@ -129,7 +184,7 @@ pub trait DepNodeParams<Tcx: DepContext>: fmt::Debug + Sized {
     /// `fingerprint_style()` is not `FingerprintStyle::Opaque`.
     /// It is always valid to return `None` here, in which case incremental
     /// compilation will treat the query as having changed instead of forcing it.
-    fn recover(tcx: Tcx, dep_node: &DepNode<Tcx::DepKind>) -> Option<Self>;
+    fn recover(tcx: Tcx, dep_node: &DepNode) -> Option<Self>;
 }
 
 impl<Tcx: DepContext, T> DepNodeParams<Tcx> for T
@@ -156,7 +211,7 @@ where
     }
 
     #[inline(always)]
-    default fn recover(_: Tcx, _: &DepNode<Tcx::DepKind>) -> Option<Self> {
+    default fn recover(_: Tcx, _: &DepNode) -> Option<Self> {
         None
     }
 }
@@ -216,10 +271,13 @@ pub struct DepKindStruct<Tcx: DepContext> {
     /// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
     /// is actually a `DefPathHash`, and can therefore just look up the corresponding
     /// `DefId` in `tcx.def_path_hash_to_def_id`.
-    pub force_from_dep_node: Option<fn(tcx: Tcx, dep_node: DepNode<Tcx::DepKind>) -> bool>,
+    pub force_from_dep_node: Option<fn(tcx: Tcx, dep_node: DepNode) -> bool>,
 
     /// Invoke a query to put the on-disk cached value in memory.
-    pub try_load_from_on_disk_cache: Option<fn(Tcx, DepNode<Tcx::DepKind>)>,
+    pub try_load_from_on_disk_cache: Option<fn(Tcx, DepNode)>,
+
+    /// The name of this dep kind.
+    pub name: &'static &'static str,
 }
 
 /// A "work product" corresponds to a `.o` (or other) file that we
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index fa54e1a2e6a..c7e92d7b2b2 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -17,7 +17,7 @@ use std::sync::atomic::Ordering::Relaxed;
 
 use super::query::DepGraphQuery;
 use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
-use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
+use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
 use crate::dep_graph::EdgesVec;
 use crate::ich::StableHashingContext;
 use crate::query::{QueryContext, QuerySideEffects};
@@ -26,8 +26,8 @@ use crate::query::{QueryContext, QuerySideEffects};
 use {super::debug::EdgeFilter, std::env};
 
 #[derive(Clone)]
-pub struct DepGraph<K: DepKind> {
-    data: Option<Lrc<DepGraphData<K>>>,
+pub struct DepGraph<D: Deps> {
+    data: Option<Lrc<DepGraphData<D>>>,
 
     /// This field is used for assigning DepNodeIndices when running in
     /// non-incremental mode. Even in non-incremental mode we make sure that
@@ -74,16 +74,16 @@ impl DepNodeColor {
     }
 }
 
-pub struct DepGraphData<K: DepKind> {
+pub struct DepGraphData<D: Deps> {
     /// The new encoding of the dependency graph, optimized for red/green
     /// tracking. The `current` field is the dependency graph of only the
     /// current compilation session: We don't merge the previous dep-graph into
     /// current one anymore, but we do reference shared data to save space.
-    current: CurrentDepGraph<K>,
+    current: CurrentDepGraph<D>,
 
     /// The dep-graph from the previous compilation session. It contains all
     /// nodes and edges as well as all fingerprints of nodes that have them.
-    previous: SerializedDepGraph<K>,
+    previous: SerializedDepGraph,
 
     colors: DepNodeColorMap,
 
@@ -95,12 +95,12 @@ pub struct DepGraphData<K: DepKind> {
     /// this map. We can later look for and extract that data.
     previous_work_products: WorkProductMap,
 
-    dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
+    dep_node_debug: Lock<FxHashMap<DepNode, String>>,
 
     /// Used by incremental compilation tests to assert that
     /// a particular query result was decoded from disk
     /// (not just marked green)
-    debug_loaded_from_disk: Lock<FxHashSet<DepNode<K>>>,
+    debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
 }
 
 pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
@@ -112,15 +112,15 @@ where
     stable_hasher.finish()
 }
 
-impl<K: DepKind> DepGraph<K> {
+impl<D: Deps> DepGraph<D> {
     pub fn new(
         profiler: &SelfProfilerRef,
-        prev_graph: SerializedDepGraph<K>,
+        prev_graph: SerializedDepGraph,
         prev_work_products: WorkProductMap,
         encoder: FileEncoder,
         record_graph: bool,
         record_stats: bool,
-    ) -> DepGraph<K> {
+    ) -> DepGraph<D> {
         let prev_graph_node_count = prev_graph.node_count();
 
         let current = CurrentDepGraph::new(
@@ -136,7 +136,7 @@ impl<K: DepKind> DepGraph<K> {
         // Instantiate a dependy-less node only once for anonymous queries.
         let _green_node_index = current.intern_new_node(
             profiler,
-            DepNode { kind: DepKind::NULL, hash: current.anon_id_seed.into() },
+            DepNode { kind: D::DEP_KIND_NULL, hash: current.anon_id_seed.into() },
             EdgesVec::new(),
             Fingerprint::ZERO,
         );
@@ -146,7 +146,7 @@ impl<K: DepKind> DepGraph<K> {
         let (red_node_index, red_node_prev_index_and_color) = current.intern_node(
             profiler,
             &prev_graph,
-            DepNode { kind: DepKind::RED, hash: Fingerprint::ZERO.into() },
+            DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
             EdgesVec::new(),
             None,
             false,
@@ -181,12 +181,12 @@ impl<K: DepKind> DepGraph<K> {
         }
     }
 
-    pub fn new_disabled() -> DepGraph<K> {
+    pub fn new_disabled() -> DepGraph<D> {
         DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
     }
 
     #[inline]
-    pub fn data(&self) -> Option<&DepGraphData<K>> {
+    pub fn data(&self) -> Option<&DepGraphData<D>> {
         self.data.as_deref()
     }
 
@@ -196,7 +196,7 @@ impl<K: DepKind> DepGraph<K> {
         self.data.is_some()
     }
 
-    pub fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
+    pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
         if let Some(data) = &self.data {
             data.current.encoder.borrow().with_query(f)
         }
@@ -204,7 +204,7 @@ impl<K: DepKind> DepGraph<K> {
 
     pub fn assert_ignored(&self) {
         if let Some(..) = self.data {
-            K::read_deps(|task_deps| {
+            D::read_deps(|task_deps| {
                 assert_matches!(
                     task_deps,
                     TaskDepsRef::Ignore,
@@ -218,7 +218,7 @@ impl<K: DepKind> DepGraph<K> {
     where
         OP: FnOnce() -> R,
     {
-        K::with_deps(TaskDepsRef::Ignore, op)
+        D::with_deps(TaskDepsRef::Ignore, op)
     }
 
     /// Used to wrap the deserialization of a query result from disk,
@@ -271,13 +271,13 @@ impl<K: DepKind> DepGraph<K> {
     where
         OP: FnOnce() -> R,
     {
-        K::with_deps(TaskDepsRef::Forbid, op)
+        D::with_deps(TaskDepsRef::Forbid, op)
     }
 
     #[inline(always)]
-    pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
+    pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
         &self,
-        key: DepNode<K>,
+        key: DepNode,
         cx: Ctxt,
         arg: A,
         task: fn(Ctxt, A) -> R,
@@ -289,10 +289,10 @@ impl<K: DepKind> DepGraph<K> {
         }
     }
 
-    pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
+    pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
         &self,
         cx: Tcx,
-        dep_kind: K,
+        dep_kind: DepKind,
         op: OP,
     ) -> (R, DepNodeIndex)
     where
@@ -305,7 +305,7 @@ impl<K: DepKind> DepGraph<K> {
     }
 }
 
-impl<K: DepKind> DepGraphData<K> {
+impl<D: Deps> DepGraphData<D> {
     /// Starts a new dep-graph task. Dep-graph tasks are specified
     /// using a free function (`task`) and **not** a closure -- this
     /// is intentional because we want to exercise tight control over
@@ -334,9 +334,9 @@ impl<K: DepKind> DepGraphData<K> {
     ///
     /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
     #[inline(always)]
-    pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
+    pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
         &self,
-        key: DepNode<K>,
+        key: DepNode,
         cx: Ctxt,
         arg: A,
         task: fn(Ctxt, A) -> R,
@@ -354,7 +354,7 @@ impl<K: DepKind> DepGraphData<K> {
                  - dep-node: {key:?}"
         );
 
-        let with_deps = |task_deps| K::with_deps(task_deps, || task(cx, arg));
+        let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
         let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
             (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
         } else {
@@ -402,10 +402,10 @@ impl<K: DepKind> DepGraphData<K> {
 
     /// Executes something within an "anonymous" task, that is, a task the
     /// `DepNode` of which is determined by the list of inputs it read from.
-    pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
+    pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
         &self,
         cx: Tcx,
-        dep_kind: K,
+        dep_kind: DepKind,
         op: OP,
     ) -> (R, DepNodeIndex)
     where
@@ -414,7 +414,7 @@ impl<K: DepKind> DepGraphData<K> {
         debug_assert!(!cx.is_eval_always(dep_kind));
 
         let task_deps = Lock::new(TaskDeps::default());
-        let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op);
+        let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
         let task_deps = task_deps.into_inner();
         let task_deps = task_deps.reads;
 
@@ -461,11 +461,11 @@ impl<K: DepKind> DepGraphData<K> {
     }
 }
 
-impl<K: DepKind> DepGraph<K> {
+impl<D: Deps> DepGraph<D> {
     #[inline]
     pub fn read_index(&self, dep_node_index: DepNodeIndex) {
         if let Some(ref data) = self.data {
-            K::read_deps(|task_deps| {
+            D::read_deps(|task_deps| {
                 let mut task_deps = match task_deps {
                     TaskDepsRef::Allow(deps) => deps.lock(),
                     TaskDepsRef::EvalAlways => {
@@ -532,9 +532,9 @@ impl<K: DepKind> DepGraph<K> {
     /// FIXME: If the code is changed enough for this node to be marked before requiring the
     /// caller's node, we suppose that those changes will be enough to mark this node red and
     /// force a recomputation using the "normal" way.
-    pub fn with_feed_task<Ctxt: DepContext<DepKind = K>, A: Debug, R: Debug>(
+    pub fn with_feed_task<Ctxt: DepContext<Deps = D>, A: Debug, R: Debug>(
         &self,
-        node: DepNode<K>,
+        node: DepNode,
         cx: Ctxt,
         key: A,
         result: &R,
@@ -573,7 +573,7 @@ impl<K: DepKind> DepGraph<K> {
             }
 
             let mut edges = EdgesVec::new();
-            K::read_deps(|task_deps| match task_deps {
+            D::read_deps(|task_deps| match task_deps {
                 TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
                 TaskDepsRef::EvalAlways => {
                     edges.push(DepNodeIndex::FOREVER_RED_NODE);
@@ -623,9 +623,9 @@ impl<K: DepKind> DepGraph<K> {
     }
 }
 
-impl<K: DepKind> DepGraphData<K> {
+impl<D: Deps> DepGraphData<D> {
     #[inline]
-    pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
+    pub fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option<DepNodeIndex> {
         if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
             self.current.prev_index_to_index.lock()[prev_index]
         } else {
@@ -634,11 +634,11 @@ impl<K: DepKind> DepGraphData<K> {
     }
 
     #[inline]
-    pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
+    pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
         self.dep_node_index_of_opt(dep_node).is_some()
     }
 
-    fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
+    fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
         if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
             self.colors.get(prev_index)
         } else {
@@ -660,18 +660,18 @@ impl<K: DepKind> DepGraphData<K> {
     }
 
     #[inline]
-    pub fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode<K> {
+    pub fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode {
         self.previous.index_to_node(prev_index)
     }
 
-    pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>) {
+    pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
         self.debug_loaded_from_disk.lock().insert(dep_node);
     }
 }
 
-impl<K: DepKind> DepGraph<K> {
+impl<D: Deps> DepGraph<D> {
     #[inline]
-    pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
+    pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
         self.data.as_ref().is_some_and(|data| data.dep_node_exists(dep_node))
     }
 
@@ -687,12 +687,12 @@ impl<K: DepKind> DepGraph<K> {
         &self.data.as_ref().unwrap().previous_work_products
     }
 
-    pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode<K>) -> bool {
+    pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
         self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
     }
 
     #[inline(always)]
-    pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F)
+    pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
     where
         F: FnOnce() -> String,
     {
@@ -705,11 +705,11 @@ impl<K: DepKind> DepGraph<K> {
         dep_node_debug.borrow_mut().insert(dep_node, debug_str);
     }
 
-    pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> {
+    pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
         self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
     }
 
-    fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
+    fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
         if let Some(ref data) = self.data {
             return data.node_color(dep_node);
         }
@@ -717,25 +717,25 @@ impl<K: DepKind> DepGraph<K> {
         None
     }
 
-    pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
+    pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
         &self,
         qcx: Qcx,
-        dep_node: &DepNode<K>,
+        dep_node: &DepNode,
     ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
         self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
     }
 }
 
-impl<K: DepKind> DepGraphData<K> {
+impl<D: Deps> DepGraphData<D> {
     /// Try to mark a node index for the node dep_node.
     ///
     /// A node will have an index, when it's already been marked green, or when we can mark it
     /// green. This function will mark the current task as a reader of the specified node, when
     /// a node index can be found for that node.
-    pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
+    pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
         &self,
         qcx: Qcx,
-        dep_node: &DepNode<K>,
+        dep_node: &DepNode,
     ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
         debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
 
@@ -757,11 +757,11 @@ impl<K: DepKind> DepGraphData<K> {
     }
 
     #[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]
-    fn try_mark_parent_green<Qcx: QueryContext<DepKind = K>>(
+    fn try_mark_parent_green<Qcx: QueryContext<Deps = D>>(
         &self,
         qcx: Qcx,
         parent_dep_node_index: SerializedDepNodeIndex,
-        dep_node: &DepNode<K>,
+        dep_node: &DepNode,
         frame: Option<&MarkFrame<'_>>,
     ) -> Option<()> {
         let dep_dep_node_color = self.colors.get(parent_dep_node_index);
@@ -845,11 +845,11 @@ impl<K: DepKind> DepGraphData<K> {
 
     /// Try to mark a dep-node which existed in the previous compilation session as green.
     #[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]
-    fn try_mark_previous_green<Qcx: QueryContext<DepKind = K>>(
+    fn try_mark_previous_green<Qcx: QueryContext<Deps = D>>(
         &self,
         qcx: Qcx,
         prev_dep_node_index: SerializedDepNodeIndex,
-        dep_node: &DepNode<K>,
+        dep_node: &DepNode,
         frame: Option<&MarkFrame<'_>>,
     ) -> Option<DepNodeIndex> {
         let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
@@ -916,7 +916,7 @@ impl<K: DepKind> DepGraphData<K> {
     /// This may be called concurrently on multiple threads for the same dep node.
     #[cold]
     #[inline(never)]
-    fn emit_side_effects<Qcx: QueryContext<DepKind = K>>(
+    fn emit_side_effects<Qcx: QueryContext<Deps = D>>(
         &self,
         qcx: Qcx,
         dep_node_index: DepNodeIndex,
@@ -940,16 +940,16 @@ impl<K: DepKind> DepGraphData<K> {
     }
 }
 
-impl<K: DepKind> DepGraph<K> {
+impl<D: Deps> DepGraph<D> {
     /// Returns true if the given node has been marked as red during the
     /// current compilation session. Used in various assertions
-    pub fn is_red(&self, dep_node: &DepNode<K>) -> bool {
+    pub fn is_red(&self, dep_node: &DepNode) -> bool {
         self.node_color(dep_node) == Some(DepNodeColor::Red)
     }
 
     /// Returns true if the given node has been marked as green during the
     /// current compilation session. Used in various assertions
-    pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
+    pub fn is_green(&self, dep_node: &DepNode) -> bool {
         self.node_color(dep_node).is_some_and(|c| c.is_green())
     }
 
@@ -961,7 +961,7 @@ impl<K: DepKind> DepGraph<K> {
     ///
     /// This method will only load queries that will end up in the disk cache.
     /// Other queries will not be executed.
-    pub fn exec_cache_promotions<Tcx: DepContext<DepKind = K>>(&self, tcx: Tcx) {
+    pub fn exec_cache_promotions<Tcx: DepContext>(&self, tcx: Tcx) {
         let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
 
         let data = self.data.as_ref().unwrap();
@@ -1076,9 +1076,9 @@ rustc_index::newtype_index! {
 /// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
 /// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
 /// first, and `data` second.
-pub(super) struct CurrentDepGraph<K: DepKind> {
-    encoder: Steal<GraphEncoder<K>>,
-    new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
+pub(super) struct CurrentDepGraph<D: Deps> {
+    encoder: Steal<GraphEncoder<D>>,
+    new_node_to_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
     prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
 
     /// This is used to verify that fingerprints do not change between the creation of a node
@@ -1089,7 +1089,7 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
     /// Used to trap when a specific edge is added to the graph.
     /// This is used for debug purposes and is only active with `debug_assertions`.
     #[cfg(debug_assertions)]
-    forbidden_edge: Option<EdgeFilter<K>>,
+    forbidden_edge: Option<EdgeFilter>,
 
     /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
     /// their edges. This has the beneficial side-effect that multiple anonymous
@@ -1116,14 +1116,14 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
     node_intern_event_id: Option<EventId>,
 }
 
-impl<K: DepKind> CurrentDepGraph<K> {
+impl<D: Deps> CurrentDepGraph<D> {
     fn new(
         profiler: &SelfProfilerRef,
         prev_graph_node_count: usize,
         encoder: FileEncoder,
         record_graph: bool,
         record_stats: bool,
-    ) -> CurrentDepGraph<K> {
+    ) -> Self {
         use std::time::{SystemTime, UNIX_EPOCH};
 
         let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
@@ -1178,7 +1178,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
     }
 
     #[cfg(debug_assertions)]
-    fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>, fingerprint: Fingerprint) {
+    fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) {
         if let Some(forbidden_edge) = &self.forbidden_edge {
             forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
         }
@@ -1192,7 +1192,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
     fn intern_new_node(
         &self,
         profiler: &SelfProfilerRef,
-        key: DepNode<K>,
+        key: DepNode,
         edges: EdgesVec,
         current_fingerprint: Fingerprint,
     ) -> DepNodeIndex {
@@ -1215,8 +1215,8 @@ impl<K: DepKind> CurrentDepGraph<K> {
     fn intern_node(
         &self,
         profiler: &SelfProfilerRef,
-        prev_graph: &SerializedDepGraph<K>,
-        key: DepNode<K>,
+        prev_graph: &SerializedDepGraph,
+        key: DepNode,
         edges: EdgesVec,
         fingerprint: Option<Fingerprint>,
         print_status: bool,
@@ -1289,7 +1289,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
     fn promote_node_and_deps_to_current(
         &self,
         profiler: &SelfProfilerRef,
-        prev_graph: &SerializedDepGraph<K>,
+        prev_graph: &SerializedDepGraph,
         prev_index: SerializedDepNodeIndex,
     ) -> DepNodeIndex {
         self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
@@ -1317,7 +1317,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
     #[inline]
     fn debug_assert_not_in_new_nodes(
         &self,
-        prev_graph: &SerializedDepGraph<K>,
+        prev_graph: &SerializedDepGraph,
         prev_index: SerializedDepNodeIndex,
     ) {
         let node = &prev_graph.index_to_node(prev_index);
@@ -1329,11 +1329,11 @@ impl<K: DepKind> CurrentDepGraph<K> {
 }
 
 #[derive(Debug, Clone, Copy)]
-pub enum TaskDepsRef<'a, K: DepKind> {
+pub enum TaskDepsRef<'a> {
     /// New dependencies can be added to the
     /// `TaskDeps`. This is used when executing a 'normal' query
     /// (no `eval_always` modifier)
-    Allow(&'a Lock<TaskDeps<K>>),
+    Allow(&'a Lock<TaskDeps>),
     /// This is used when executing an `eval_always` query. We don't
     /// need to track dependencies for a query that's always
     /// re-executed -- but we need to know that this is an `eval_always`
@@ -1350,15 +1350,15 @@ pub enum TaskDepsRef<'a, K: DepKind> {
 }
 
 #[derive(Debug)]
-pub struct TaskDeps<K: DepKind> {
+pub struct TaskDeps {
     #[cfg(debug_assertions)]
-    node: Option<DepNode<K>>,
+    node: Option<DepNode>,
     reads: EdgesVec,
     read_set: FxHashSet<DepNodeIndex>,
-    phantom_data: PhantomData<DepNode<K>>,
+    phantom_data: PhantomData<DepNode>,
 }
 
-impl<K: DepKind> Default for TaskDeps<K> {
+impl Default for TaskDeps {
     fn default() -> Self {
         Self {
             #[cfg(debug_assertions)]
@@ -1410,10 +1410,7 @@ impl DepNodeColorMap {
 
 #[inline(never)]
 #[cold]
-pub(crate) fn print_markframe_trace<K: DepKind>(
-    graph: &DepGraph<K>,
-    frame: Option<&MarkFrame<'_>>,
-) {
+pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: Option<&MarkFrame<'_>>) {
     let data = graph.data.as_ref().unwrap();
 
     eprintln!("there was a panic while trying to force a dep node");
diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs
index 272028d35bf..624ae680a8f 100644
--- a/compiler/rustc_query_system/src/dep_graph/mod.rs
+++ b/compiler/rustc_query_system/src/dep_graph/mod.rs
@@ -1,11 +1,11 @@
 pub mod debug;
-mod dep_node;
+pub mod dep_node;
 mod edges;
 mod graph;
 mod query;
 mod serialized;
 
-pub use dep_node::{DepKindStruct, DepNode, DepNodeParams, WorkProductId};
+pub use dep_node::{DepKind, DepKindStruct, DepNode, DepNodeParams, WorkProductId};
 pub use edges::EdgesVec;
 pub use graph::{
     hash_result, DepGraph, DepGraphData, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef,
@@ -16,22 +16,20 @@ pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
 
 use crate::ich::StableHashingContext;
 use rustc_data_structures::profiling::SelfProfilerRef;
-use rustc_serialize::{opaque::FileEncoder, Encodable};
 use rustc_session::Session;
 
-use std::hash::Hash;
-use std::{fmt, panic};
+use std::panic;
 
 use self::graph::{print_markframe_trace, MarkFrame};
 
 pub trait DepContext: Copy {
-    type DepKind: self::DepKind;
+    type Deps: Deps;
 
     /// Create a hashing context for hashing new results.
     fn with_stable_hashing_context<R>(self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R;
 
     /// Access the DepGraph.
-    fn dep_graph(&self) -> &DepGraph<Self::DepKind>;
+    fn dep_graph(&self) -> &DepGraph<Self::Deps>;
 
     /// Access the profiler.
     fn profiler(&self) -> &SelfProfilerRef;
@@ -39,10 +37,10 @@ pub trait DepContext: Copy {
     /// Access the compiler session.
     fn sess(&self) -> &Session;
 
-    fn dep_kind_info(&self, dep_node: Self::DepKind) -> &DepKindStruct<Self>;
+    fn dep_kind_info(&self, dep_node: DepKind) -> &DepKindStruct<Self>;
 
     #[inline(always)]
-    fn fingerprint_style(self, kind: Self::DepKind) -> FingerprintStyle {
+    fn fingerprint_style(self, kind: DepKind) -> FingerprintStyle {
         let data = self.dep_kind_info(kind);
         if data.is_anon {
             return FingerprintStyle::Opaque;
@@ -52,18 +50,14 @@ pub trait DepContext: Copy {
 
     #[inline(always)]
     /// Return whether this kind always require evaluation.
-    fn is_eval_always(self, kind: Self::DepKind) -> bool {
+    fn is_eval_always(self, kind: DepKind) -> bool {
         self.dep_kind_info(kind).is_eval_always
     }
 
     /// Try to force a dep node to execute and see if it's green.
     #[inline]
     #[instrument(skip(self, frame), level = "debug")]
-    fn try_force_from_dep_node(
-        self,
-        dep_node: DepNode<Self::DepKind>,
-        frame: Option<&MarkFrame<'_>>,
-    ) -> bool {
+    fn try_force_from_dep_node(self, dep_node: DepNode, frame: Option<&MarkFrame<'_>>) -> bool {
         let cb = self.dep_kind_info(dep_node.kind);
         if let Some(f) = cb.force_from_dep_node {
             if let Err(value) = panic::catch_unwind(panic::AssertUnwindSafe(|| {
@@ -81,7 +75,7 @@ pub trait DepContext: Copy {
     }
 
     /// Load data from the on-disk cache.
-    fn try_load_from_on_disk_cache(self, dep_node: DepNode<Self::DepKind>) {
+    fn try_load_from_on_disk_cache(self, dep_node: DepNode) {
         let cb = self.dep_kind_info(dep_node.kind);
         if let Some(f) = cb.try_load_from_on_disk_cache {
             f(self, dep_node)
@@ -89,15 +83,37 @@ pub trait DepContext: Copy {
     }
 }
 
+pub trait Deps {
+    /// Execute the operation with provided dependencies.
+    fn with_deps<OP, R>(deps: TaskDepsRef<'_>, op: OP) -> R
+    where
+        OP: FnOnce() -> R;
+
+    /// Access dependencies from current implicit context.
+    fn read_deps<OP>(op: OP)
+    where
+        OP: for<'a> FnOnce(TaskDepsRef<'a>);
+
+    /// We use this for most things when incr. comp. is turned off.
+    const DEP_KIND_NULL: DepKind;
+
+    /// We use this to create a forever-red node.
+    const DEP_KIND_RED: DepKind;
+
+    /// This is the highest value a `DepKind` can have. It's used during encoding to
+    /// pack information into the unused bits.
+    const DEP_KIND_MAX: u16;
+}
+
 pub trait HasDepContext: Copy {
-    type DepKind: self::DepKind;
-    type DepContext: self::DepContext<DepKind = Self::DepKind>;
+    type Deps: self::Deps;
+    type DepContext: self::DepContext<Deps = Self::Deps>;
 
     fn dep_context(&self) -> &Self::DepContext;
 }
 
 impl<T: DepContext> HasDepContext for T {
-    type DepKind = T::DepKind;
+    type Deps = T::Deps;
     type DepContext = Self;
 
     fn dep_context(&self) -> &Self::DepContext {
@@ -106,7 +122,7 @@ impl<T: DepContext> HasDepContext for T {
 }
 
 impl<T: HasDepContext, Q: Copy> HasDepContext for (T, Q) {
-    type DepKind = T::DepKind;
+    type Deps = T::Deps;
     type DepContext = T::DepContext;
 
     fn dep_context(&self) -> &Self::DepContext {
@@ -138,31 +154,3 @@ impl FingerprintStyle {
         }
     }
 }
-
-/// Describe the different families of dependency nodes.
-pub trait DepKind: Copy + fmt::Debug + Eq + Hash + Send + Encodable<FileEncoder> + 'static {
-    /// DepKind to use when incr. comp. is turned off.
-    const NULL: Self;
-
-    /// DepKind to use to create the initial forever-red node.
-    const RED: Self;
-
-    /// Implementation of `std::fmt::Debug` for `DepNode`.
-    fn debug_node(node: &DepNode<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result;
-
-    /// Execute the operation with provided dependencies.
-    fn with_deps<OP, R>(deps: TaskDepsRef<'_, Self>, op: OP) -> R
-    where
-        OP: FnOnce() -> R;
-
-    /// Access dependencies from current implicit context.
-    fn read_deps<OP>(op: OP)
-    where
-        OP: for<'a> FnOnce(TaskDepsRef<'a, Self>);
-
-    fn from_u16(u: u16) -> Self;
-
-    fn to_u16(self) -> u16;
-
-    const MAX: u16;
-}
diff --git a/compiler/rustc_query_system/src/dep_graph/query.rs b/compiler/rustc_query_system/src/dep_graph/query.rs
index 5cbc6bf8f8a..5969e5fbe98 100644
--- a/compiler/rustc_query_system/src/dep_graph/query.rs
+++ b/compiler/rustc_query_system/src/dep_graph/query.rs
@@ -2,16 +2,16 @@ use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::graph::implementation::{Direction, Graph, NodeIndex, INCOMING};
 use rustc_index::IndexVec;
 
-use super::{DepKind, DepNode, DepNodeIndex};
+use super::{DepNode, DepNodeIndex};
 
-pub struct DepGraphQuery<K> {
-    pub graph: Graph<DepNode<K>, ()>,
-    pub indices: FxHashMap<DepNode<K>, NodeIndex>,
+pub struct DepGraphQuery {
+    pub graph: Graph<DepNode, ()>,
+    pub indices: FxHashMap<DepNode, NodeIndex>,
     pub dep_index_to_index: IndexVec<DepNodeIndex, Option<NodeIndex>>,
 }
 
-impl<K: DepKind> DepGraphQuery<K> {
-    pub fn new(prev_node_count: usize) -> DepGraphQuery<K> {
+impl DepGraphQuery {
+    pub fn new(prev_node_count: usize) -> DepGraphQuery {
         let node_count = prev_node_count + prev_node_count / 4;
         let edge_count = 6 * node_count;
 
@@ -22,7 +22,7 @@ impl<K: DepKind> DepGraphQuery<K> {
         DepGraphQuery { graph, indices, dep_index_to_index }
     }
 
-    pub fn push(&mut self, index: DepNodeIndex, node: DepNode<K>, edges: &[DepNodeIndex]) {
+    pub fn push(&mut self, index: DepNodeIndex, node: DepNode, edges: &[DepNodeIndex]) {
         let source = self.graph.add_node(node);
         self.dep_index_to_index.insert(index, source);
         self.indices.insert(node, source);
@@ -37,11 +37,11 @@ impl<K: DepKind> DepGraphQuery<K> {
         }
     }
 
-    pub fn nodes(&self) -> Vec<&DepNode<K>> {
+    pub fn nodes(&self) -> Vec<&DepNode> {
         self.graph.all_nodes().iter().map(|n| &n.data).collect()
     }
 
-    pub fn edges(&self) -> Vec<(&DepNode<K>, &DepNode<K>)> {
+    pub fn edges(&self) -> Vec<(&DepNode, &DepNode)> {
         self.graph
             .all_edges()
             .iter()
@@ -50,7 +50,7 @@ impl<K: DepKind> DepGraphQuery<K> {
             .collect()
     }
 
-    fn reachable_nodes(&self, node: &DepNode<K>, direction: Direction) -> Vec<&DepNode<K>> {
+    fn reachable_nodes(&self, node: &DepNode, direction: Direction) -> Vec<&DepNode> {
         if let Some(&index) = self.indices.get(node) {
             self.graph.depth_traverse(index, direction).map(|s| self.graph.node_data(s)).collect()
         } else {
@@ -59,7 +59,7 @@ impl<K: DepKind> DepGraphQuery<K> {
     }
 
     /// All nodes that can reach `node`.
-    pub fn transitive_predecessors(&self, node: &DepNode<K>) -> Vec<&DepNode<K>> {
+    pub fn transitive_predecessors(&self, node: &DepNode) -> Vec<&DepNode> {
         self.reachable_nodes(node, INCOMING)
     }
 }
diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs
index 07eb10cba02..fcf46be6e6f 100644
--- a/compiler/rustc_query_system/src/dep_graph/serialized.rs
+++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs
@@ -36,7 +36,7 @@
 //! store it directly after the header with leb128.
 
 use super::query::DepGraphQuery;
-use super::{DepKind, DepNode, DepNodeIndex};
+use super::{DepKind, DepNode, DepNodeIndex, Deps};
 use crate::dep_graph::EdgesVec;
 use rustc_data_structures::fingerprint::Fingerprint;
 use rustc_data_structures::fingerprint::PackedFingerprint;
@@ -70,9 +70,9 @@ const DEP_NODE_WIDTH_BITS: usize = DEP_NODE_SIZE / 2;
 
 /// Data for use when recompiling the **current crate**.
 #[derive(Debug)]
-pub struct SerializedDepGraph<K: DepKind> {
+pub struct SerializedDepGraph {
     /// The set of all DepNodes in the graph
-    nodes: IndexVec<SerializedDepNodeIndex, DepNode<K>>,
+    nodes: IndexVec<SerializedDepNodeIndex, DepNode>,
     /// The set of all Fingerprints in the graph. Each Fingerprint corresponds to
     /// the DepNode at the same index in the nodes vector.
     fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint>,
@@ -88,7 +88,7 @@ pub struct SerializedDepGraph<K: DepKind> {
     index: Vec<UnhashMap<PackedFingerprint, SerializedDepNodeIndex>>,
 }
 
-impl<K: DepKind> Default for SerializedDepGraph<K> {
+impl Default for SerializedDepGraph {
     fn default() -> Self {
         SerializedDepGraph {
             nodes: Default::default(),
@@ -100,7 +100,7 @@ impl<K: DepKind> Default for SerializedDepGraph<K> {
     }
 }
 
-impl<K: DepKind> SerializedDepGraph<K> {
+impl SerializedDepGraph {
     #[inline]
     pub fn edge_targets_from(
         &self,
@@ -134,13 +134,13 @@ impl<K: DepKind> SerializedDepGraph<K> {
     }
 
     #[inline]
-    pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode<K> {
+    pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode {
         self.nodes[dep_node_index]
     }
 
     #[inline]
-    pub fn node_to_index_opt(&self, dep_node: &DepNode<K>) -> Option<SerializedDepNodeIndex> {
-        self.index.get(dep_node.kind.to_u16() as usize)?.get(&dep_node.hash).cloned()
+    pub fn node_to_index_opt(&self, dep_node: &DepNode) -> Option<SerializedDepNodeIndex> {
+        self.index.get(dep_node.kind.as_usize())?.get(&dep_node.hash).cloned()
     }
 
     #[inline]
@@ -184,11 +184,9 @@ fn mask(bits: usize) -> usize {
     usize::MAX >> ((std::mem::size_of::<usize>() * 8) - bits)
 }
 
-impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
-    for SerializedDepGraph<K>
-{
+impl SerializedDepGraph {
     #[instrument(level = "debug", skip(d))]
-    fn decode(d: &mut MemDecoder<'a>) -> SerializedDepGraph<K> {
+    pub fn decode<D: Deps>(d: &mut MemDecoder<'_>) -> SerializedDepGraph {
         // The last 16 bytes are the node count and edge count.
         debug!("position: {:?}", d.position());
         let (node_count, edge_count) =
@@ -217,14 +215,14 @@ impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
         // least (34 byte header + 1 byte len + 64 bytes edge data), which is ~1%. A 2-byte leb128
         // length is about the same fractional overhead and it amortizes for yet greater lengths.
         let mut edge_list_data = Vec::with_capacity(
-            graph_bytes - node_count * std::mem::size_of::<SerializedNodeHeader<K>>(),
+            graph_bytes - node_count * std::mem::size_of::<SerializedNodeHeader<D>>(),
         );
 
         for _index in 0..node_count {
             // Decode the header for this edge; the header packs together as many of the fixed-size
             // fields as possible to limit the number of times we update decoder state.
             let node_header =
-                SerializedNodeHeader::<K> { bytes: d.read_array(), _marker: PhantomData };
+                SerializedNodeHeader::<D> { bytes: d.read_array(), _marker: PhantomData };
 
             let _i: SerializedDepNodeIndex = nodes.push(node_header.node());
             debug_assert_eq!(_i.index(), _index);
@@ -256,12 +254,12 @@ impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
         edge_list_data.extend(&[0u8; DEP_NODE_PAD]);
 
         // Read the number of each dep kind and use it to create an hash map with a suitable size.
-        let mut index: Vec<_> = (0..(K::MAX as usize + 1))
+        let mut index: Vec<_> = (0..(D::DEP_KIND_MAX + 1))
             .map(|_| UnhashMap::with_capacity_and_hasher(d.read_u32() as usize, Default::default()))
             .collect();
 
         for (idx, node) in nodes.iter_enumerated() {
-            index[node.kind.to_u16() as usize].insert(node.hash, idx);
+            index[node.kind.as_usize()].insert(node.hash, idx);
         }
 
         SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data, index }
@@ -276,20 +274,20 @@ impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
 /// * The `DepKind`'s discriminant (a u16, but not all bits are used...)
 /// * The byte width of the encoded edges for this node
 /// * In whatever bits remain, the length of the edge list for this node, if it fits
-struct SerializedNodeHeader<K> {
+struct SerializedNodeHeader<D> {
     // 2 bytes for the DepNode
     // 16 for Fingerprint in DepNode
     // 16 for Fingerprint in NodeInfo
     bytes: [u8; 34],
-    _marker: PhantomData<K>,
+    _marker: PhantomData<D>,
 }
 
 // The fields of a `SerializedNodeHeader`, this struct is an implementation detail and exists only
 // to make the implementation of `SerializedNodeHeader` simpler.
-struct Unpacked<K> {
+struct Unpacked {
     len: Option<usize>,
     bytes_per_index: usize,
-    kind: K,
+    kind: DepKind,
     hash: PackedFingerprint,
     fingerprint: Fingerprint,
 }
@@ -301,20 +299,20 @@ struct Unpacked<K> {
 // 0..M    length of the edge
 // M..M+N  bytes per index
 // M+N..16 kind
-impl<K: DepKind> SerializedNodeHeader<K> {
-    const TOTAL_BITS: usize = std::mem::size_of::<K>() * 8;
+impl<D: Deps> SerializedNodeHeader<D> {
+    const TOTAL_BITS: usize = std::mem::size_of::<DepKind>() * 8;
     const LEN_BITS: usize = Self::TOTAL_BITS - Self::KIND_BITS - Self::WIDTH_BITS;
     const WIDTH_BITS: usize = DEP_NODE_WIDTH_BITS;
-    const KIND_BITS: usize = Self::TOTAL_BITS - K::MAX.leading_zeros() as usize;
+    const KIND_BITS: usize = Self::TOTAL_BITS - D::DEP_KIND_MAX.leading_zeros() as usize;
     const MAX_INLINE_LEN: usize = (u16::MAX as usize >> (Self::TOTAL_BITS - Self::LEN_BITS)) - 1;
 
     #[inline]
-    fn new(node_info: &NodeInfo<K>) -> Self {
+    fn new(node_info: &NodeInfo) -> Self {
         debug_assert_eq!(Self::TOTAL_BITS, Self::LEN_BITS + Self::WIDTH_BITS + Self::KIND_BITS);
 
         let NodeInfo { node, fingerprint, edges } = node_info;
 
-        let mut head = node.kind.to_u16();
+        let mut head = node.kind.as_inner();
 
         let free_bytes = edges.max_index().leading_zeros() as usize / 8;
         let bytes_per_index = (DEP_NODE_SIZE - free_bytes).saturating_sub(1);
@@ -347,7 +345,7 @@ impl<K: DepKind> SerializedNodeHeader<K> {
     }
 
     #[inline]
-    fn unpack(&self) -> Unpacked<K> {
+    fn unpack(&self) -> Unpacked {
         let head = u16::from_le_bytes(self.bytes[..2].try_into().unwrap());
         let hash = self.bytes[2..18].try_into().unwrap();
         let fingerprint = self.bytes[18..].try_into().unwrap();
@@ -359,7 +357,7 @@ impl<K: DepKind> SerializedNodeHeader<K> {
         Unpacked {
             len: len.checked_sub(1),
             bytes_per_index: bytes_per_index as usize + 1,
-            kind: DepKind::from_u16(kind),
+            kind: DepKind::new(kind),
             hash: Fingerprint::from_le_bytes(hash).into(),
             fingerprint: Fingerprint::from_le_bytes(fingerprint),
         }
@@ -381,7 +379,7 @@ impl<K: DepKind> SerializedNodeHeader<K> {
     }
 
     #[inline]
-    fn node(&self) -> DepNode<K> {
+    fn node(&self) -> DepNode {
         let Unpacked { kind, hash, .. } = self.unpack();
         DepNode { kind, hash }
     }
@@ -395,15 +393,15 @@ impl<K: DepKind> SerializedNodeHeader<K> {
 }
 
 #[derive(Debug)]
-struct NodeInfo<K: DepKind> {
-    node: DepNode<K>,
+struct NodeInfo {
+    node: DepNode,
     fingerprint: Fingerprint,
     edges: EdgesVec,
 }
 
-impl<K: DepKind> Encodable<FileEncoder> for NodeInfo<K> {
-    fn encode(&self, e: &mut FileEncoder) {
-        let header = SerializedNodeHeader::new(self);
+impl NodeInfo {
+    fn encode<D: Deps>(&self, e: &mut FileEncoder) {
+        let header = SerializedNodeHeader::<D>::new(self);
         e.write_array(header.bytes);
 
         if header.len().is_none() {
@@ -420,41 +418,43 @@ impl<K: DepKind> Encodable<FileEncoder> for NodeInfo<K> {
     }
 }
 
-struct Stat<K: DepKind> {
-    kind: K,
+struct Stat {
+    kind: DepKind,
     node_counter: u64,
     edge_counter: u64,
 }
 
-struct EncoderState<K: DepKind> {
+struct EncoderState<D: Deps> {
     encoder: FileEncoder,
     total_node_count: usize,
     total_edge_count: usize,
-    stats: Option<FxHashMap<K, Stat<K>>>,
+    stats: Option<FxHashMap<DepKind, Stat>>,
 
     /// Stores the number of times we've encoded each dep kind.
     kind_stats: Vec<u32>,
+    marker: PhantomData<D>,
 }
 
-impl<K: DepKind> EncoderState<K> {
+impl<D: Deps> EncoderState<D> {
     fn new(encoder: FileEncoder, record_stats: bool) -> Self {
         Self {
             encoder,
             total_edge_count: 0,
             total_node_count: 0,
             stats: record_stats.then(FxHashMap::default),
-            kind_stats: iter::repeat(0).take(K::MAX as usize + 1).collect(),
+            kind_stats: iter::repeat(0).take(D::DEP_KIND_MAX as usize + 1).collect(),
+            marker: PhantomData,
         }
     }
 
     fn encode_node(
         &mut self,
-        node: &NodeInfo<K>,
-        record_graph: &Option<Lock<DepGraphQuery<K>>>,
+        node: &NodeInfo,
+        record_graph: &Option<Lock<DepGraphQuery>>,
     ) -> DepNodeIndex {
         let index = DepNodeIndex::new(self.total_node_count);
         self.total_node_count += 1;
-        self.kind_stats[node.node.kind.to_u16() as usize] += 1;
+        self.kind_stats[node.node.kind.as_usize()] += 1;
 
         let edge_count = node.edges.len();
         self.total_edge_count += edge_count;
@@ -475,12 +475,19 @@ impl<K: DepKind> EncoderState<K> {
         }
 
         let encoder = &mut self.encoder;
-        node.encode(encoder);
+        node.encode::<D>(encoder);
         index
     }
 
     fn finish(self, profiler: &SelfProfilerRef) -> FileEncodeResult {
-        let Self { mut encoder, total_node_count, total_edge_count, stats: _, kind_stats } = self;
+        let Self {
+            mut encoder,
+            total_node_count,
+            total_edge_count,
+            stats: _,
+            kind_stats,
+            marker: _,
+        } = self;
 
         let node_count = total_node_count.try_into().unwrap();
         let edge_count = total_edge_count.try_into().unwrap();
@@ -506,12 +513,12 @@ impl<K: DepKind> EncoderState<K> {
     }
 }
 
-pub struct GraphEncoder<K: DepKind> {
-    status: Lock<EncoderState<K>>,
-    record_graph: Option<Lock<DepGraphQuery<K>>>,
+pub struct GraphEncoder<D: Deps> {
+    status: Lock<EncoderState<D>>,
+    record_graph: Option<Lock<DepGraphQuery>>,
 }
 
-impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
+impl<D: Deps> GraphEncoder<D> {
     pub fn new(
         encoder: FileEncoder,
         prev_node_count: usize,
@@ -523,7 +530,7 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
         GraphEncoder { status, record_graph }
     }
 
-    pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
+    pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
         if let Some(record_graph) = &self.record_graph {
             f(&record_graph.lock())
         }
@@ -584,7 +591,7 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
     pub(crate) fn send(
         &self,
         profiler: &SelfProfilerRef,
-        node: DepNode<K>,
+        node: DepNode,
         fingerprint: Fingerprint,
         edges: EdgesVec,
     ) -> DepNodeIndex {
diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs
index d14c6315dc1..c025fac2631 100644
--- a/compiler/rustc_query_system/src/query/config.rs
+++ b/compiler/rustc_query_system/src/query/config.rs
@@ -1,6 +1,6 @@
 //! Query configuration and description traits.
 
-use crate::dep_graph::{DepNode, DepNodeParams, SerializedDepNodeIndex};
+use crate::dep_graph::{DepKind, DepNode, DepNodeParams, SerializedDepNodeIndex};
 use crate::error::HandleCycleError;
 use crate::ich::StableHashingContext;
 use crate::query::caches::QueryCache;
@@ -27,7 +27,7 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
     fn format_value(self) -> fn(&Self::Value) -> String;
 
     // Don't use this method to access query results, instead use the methods on TyCtxt
-    fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind>
+    fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key>
     where
         Qcx: 'a;
 
@@ -57,7 +57,7 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
     fn value_from_cycle_error(
         self,
         tcx: Qcx::DepContext,
-        cycle: &[QueryInfo<Qcx::DepKind>],
+        cycle: &[QueryInfo],
         guar: ErrorGuaranteed,
     ) -> Self::Value;
 
@@ -66,12 +66,12 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
     fn depth_limit(self) -> bool;
     fn feedable(self) -> bool;
 
-    fn dep_kind(self) -> Qcx::DepKind;
+    fn dep_kind(self) -> DepKind;
     fn handle_cycle_error(self) -> HandleCycleError;
     fn hash_result(self) -> HashResult<Self::Value>;
 
     // Just here for convenience and checking that the key matches the kind, don't override this.
-    fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> {
+    fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode {
         DepNode::construct(tcx, self.dep_kind(), key)
     }
 }
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index 6c01dc3c00d..2e9ebde296c 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -1,9 +1,8 @@
-use crate::dep_graph::DepKind;
+use crate::dep_graph::DepContext;
 use crate::error::CycleStack;
 use crate::query::plumbing::CycleError;
+use crate::query::DepKind;
 use crate::query::{QueryContext, QueryStackFrame};
-use core::marker::PhantomData;
-
 use rustc_data_structures::fx::FxHashMap;
 use rustc_errors::{
     Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, Level,
@@ -30,48 +29,48 @@ use {
 
 /// Represents a span and a query key.
 #[derive(Clone, Debug)]
-pub struct QueryInfo<D: DepKind> {
+pub struct QueryInfo {
     /// The span corresponding to the reason for which this query was required.
     pub span: Span,
-    pub query: QueryStackFrame<D>,
+    pub query: QueryStackFrame,
 }
 
-pub type QueryMap<D> = FxHashMap<QueryJobId, QueryJobInfo<D>>;
+pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
 
 /// A value uniquely identifying an active query job.
 #[derive(Copy, Clone, Eq, PartialEq, Hash)]
 pub struct QueryJobId(pub NonZeroU64);
 
 impl QueryJobId {
-    fn query<D: DepKind>(self, map: &QueryMap<D>) -> QueryStackFrame<D> {
+    fn query(self, map: &QueryMap) -> QueryStackFrame {
         map.get(&self).unwrap().query.clone()
     }
 
     #[cfg(parallel_compiler)]
-    fn span<D: DepKind>(self, map: &QueryMap<D>) -> Span {
+    fn span(self, map: &QueryMap) -> Span {
         map.get(&self).unwrap().job.span
     }
 
     #[cfg(parallel_compiler)]
-    fn parent<D: DepKind>(self, map: &QueryMap<D>) -> Option<QueryJobId> {
+    fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
         map.get(&self).unwrap().job.parent
     }
 
     #[cfg(parallel_compiler)]
-    fn latch<D: DepKind>(self, map: &QueryMap<D>) -> Option<&QueryLatch<D>> {
+    fn latch(self, map: &QueryMap) -> Option<&QueryLatch> {
         map.get(&self).unwrap().job.latch.as_ref()
     }
 }
 
 #[derive(Clone)]
-pub struct QueryJobInfo<D: DepKind> {
-    pub query: QueryStackFrame<D>,
-    pub job: QueryJob<D>,
+pub struct QueryJobInfo {
+    pub query: QueryStackFrame,
+    pub job: QueryJob,
 }
 
 /// Represents an active query job.
 #[derive(Clone)]
-pub struct QueryJob<D: DepKind> {
+pub struct QueryJob {
     pub id: QueryJobId,
 
     /// The span corresponding to the reason for which this query was required.
@@ -82,11 +81,10 @@ pub struct QueryJob<D: DepKind> {
 
     /// The latch that is used to wait on this job.
     #[cfg(parallel_compiler)]
-    latch: Option<QueryLatch<D>>,
-    spooky: core::marker::PhantomData<D>,
+    latch: Option<QueryLatch>,
 }
 
-impl<D: DepKind> QueryJob<D> {
+impl QueryJob {
     /// Creates a new query job.
     #[inline]
     pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
@@ -96,12 +94,11 @@ impl<D: DepKind> QueryJob<D> {
             parent,
             #[cfg(parallel_compiler)]
             latch: None,
-            spooky: PhantomData,
         }
     }
 
     #[cfg(parallel_compiler)]
-    pub(super) fn latch(&mut self) -> QueryLatch<D> {
+    pub(super) fn latch(&mut self) -> QueryLatch {
         if self.latch.is_none() {
             self.latch = Some(QueryLatch::new());
         }
@@ -124,12 +121,12 @@ impl<D: DepKind> QueryJob<D> {
 }
 
 impl QueryJobId {
-    pub(super) fn find_cycle_in_stack<D: DepKind>(
+    pub(super) fn find_cycle_in_stack(
         &self,
-        query_map: QueryMap<D>,
+        query_map: QueryMap,
         current_job: &Option<QueryJobId>,
         span: Span,
-    ) -> CycleError<D> {
+    ) -> CycleError {
         // Find the waitee amongst `current_job` parents
         let mut cycle = Vec::new();
         let mut current_job = Option::clone(current_job);
@@ -163,18 +160,18 @@ impl QueryJobId {
 
     #[cold]
     #[inline(never)]
-    pub fn try_find_layout_root<D: DepKind>(
+    pub fn try_find_layout_root(
         &self,
-        query_map: QueryMap<D>,
-    ) -> Option<(QueryJobInfo<D>, usize)> {
+        query_map: QueryMap,
+        layout_of_kind: DepKind,
+    ) -> Option<(QueryJobInfo, usize)> {
         let mut last_layout = None;
         let mut current_id = Some(*self);
         let mut depth = 0;
 
         while let Some(id) = current_id {
             let info = query_map.get(&id).unwrap();
-            // FIXME: This string comparison should probably not be done.
-            if format!("{:?}", info.query.dep_kind) == "layout_of" {
+            if info.query.dep_kind == layout_of_kind {
                 depth += 1;
                 last_layout = Some((info.clone(), depth));
             }
@@ -185,15 +182,15 @@ impl QueryJobId {
 }
 
 #[cfg(parallel_compiler)]
-struct QueryWaiter<D: DepKind> {
+struct QueryWaiter {
     query: Option<QueryJobId>,
     condvar: Condvar,
     span: Span,
-    cycle: Mutex<Option<CycleError<D>>>,
+    cycle: Mutex<Option<CycleError>>,
 }
 
 #[cfg(parallel_compiler)]
-impl<D: DepKind> QueryWaiter<D> {
+impl QueryWaiter {
     fn notify(&self, registry: &rayon_core::Registry) {
         rayon_core::mark_unblocked(registry);
         self.condvar.notify_one();
@@ -201,19 +198,19 @@ impl<D: DepKind> QueryWaiter<D> {
 }
 
 #[cfg(parallel_compiler)]
-struct QueryLatchInfo<D: DepKind> {
+struct QueryLatchInfo {
     complete: bool,
-    waiters: Vec<Arc<QueryWaiter<D>>>,
+    waiters: Vec<Arc<QueryWaiter>>,
 }
 
 #[cfg(parallel_compiler)]
 #[derive(Clone)]
-pub(super) struct QueryLatch<D: DepKind> {
-    info: Arc<Mutex<QueryLatchInfo<D>>>,
+pub(super) struct QueryLatch {
+    info: Arc<Mutex<QueryLatchInfo>>,
 }
 
 #[cfg(parallel_compiler)]
-impl<D: DepKind> QueryLatch<D> {
+impl QueryLatch {
     fn new() -> Self {
         QueryLatch {
             info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
@@ -221,11 +218,7 @@ impl<D: DepKind> QueryLatch<D> {
     }
 
     /// Awaits for the query job to complete.
-    pub(super) fn wait_on(
-        &self,
-        query: Option<QueryJobId>,
-        span: Span,
-    ) -> Result<(), CycleError<D>> {
+    pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> {
         let waiter =
             Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() });
         self.wait_on_inner(&waiter);
@@ -240,7 +233,7 @@ impl<D: DepKind> QueryLatch<D> {
     }
 
     /// Awaits the caller on this latch by blocking the current thread.
-    fn wait_on_inner(&self, waiter: &Arc<QueryWaiter<D>>) {
+    fn wait_on_inner(&self, waiter: &Arc<QueryWaiter>) {
         let mut info = self.info.lock();
         if !info.complete {
             // We push the waiter on to the `waiters` list. It can be accessed inside
@@ -274,7 +267,7 @@ impl<D: DepKind> QueryLatch<D> {
 
     /// Removes a single waiter from the list of waiters.
     /// This is used to break query cycles.
-    fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<D>> {
+    fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter> {
         let mut info = self.info.lock();
         debug_assert!(!info.complete);
         // Remove the waiter from the list of waiters
@@ -296,14 +289,9 @@ type Waiter = (QueryJobId, usize);
 /// required information to resume the waiter.
 /// If all `visit` calls returns None, this function also returns None.
 #[cfg(parallel_compiler)]
-fn visit_waiters<F, D>(
-    query_map: &QueryMap<D>,
-    query: QueryJobId,
-    mut visit: F,
-) -> Option<Option<Waiter>>
+fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
 where
     F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
-    D: DepKind,
 {
     // Visit the parent query which is a non-resumable waiter since it's on the same stack
     if let Some(parent) = query.parent(query_map) {
@@ -332,8 +320,8 @@ where
 /// If a cycle is detected, this initial value is replaced with the span causing
 /// the cycle.
 #[cfg(parallel_compiler)]
-fn cycle_check<D: DepKind>(
-    query_map: &QueryMap<D>,
+fn cycle_check(
+    query_map: &QueryMap,
     query: QueryJobId,
     span: Span,
     stack: &mut Vec<(Span, QueryJobId)>,
@@ -373,8 +361,8 @@ fn cycle_check<D: DepKind>(
 /// from `query` without going through any of the queries in `visited`.
 /// This is achieved with a depth first search.
 #[cfg(parallel_compiler)]
-fn connected_to_root<D: DepKind>(
-    query_map: &QueryMap<D>,
+fn connected_to_root(
+    query_map: &QueryMap,
     query: QueryJobId,
     visited: &mut FxHashSet<QueryJobId>,
 ) -> bool {
@@ -396,10 +384,9 @@ fn connected_to_root<D: DepKind>(
 
 // Deterministically pick an query from a list
 #[cfg(parallel_compiler)]
-fn pick_query<'a, T, F, D>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T
+fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
 where
     F: Fn(&T) -> (Span, QueryJobId),
-    D: DepKind,
 {
     // Deterministically pick an entry point
     // FIXME: Sort this instead
@@ -423,10 +410,10 @@ where
 /// If a cycle was not found, the starting query is removed from `jobs` and
 /// the function returns false.
 #[cfg(parallel_compiler)]
-fn remove_cycle<D: DepKind>(
-    query_map: &QueryMap<D>,
+fn remove_cycle(
+    query_map: &QueryMap,
     jobs: &mut Vec<QueryJobId>,
-    wakelist: &mut Vec<Arc<QueryWaiter<D>>>,
+    wakelist: &mut Vec<Arc<QueryWaiter>>,
 ) -> bool {
     let mut visited = FxHashSet::default();
     let mut stack = Vec::new();
@@ -528,7 +515,7 @@ fn remove_cycle<D: DepKind>(
 /// There may be multiple cycles involved in a deadlock, so this searches
 /// all active queries for cycles before finally resuming all the waiters at once.
 #[cfg(parallel_compiler)]
-pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Registry) {
+pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) {
     let on_panic = defer(|| {
         eprintln!("deadlock handler panicked, aborting process");
         process::abort();
@@ -566,9 +553,9 @@ pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Regis
 
 #[inline(never)]
 #[cold]
-pub(crate) fn report_cycle<'a, D: DepKind>(
+pub(crate) fn report_cycle<'a>(
     sess: &'a Session,
-    CycleError { usage, cycle: stack }: &CycleError<D>,
+    CycleError { usage, cycle: stack }: &CycleError,
 ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
     assert!(!stack.is_empty());
 
@@ -655,8 +642,10 @@ pub fn print_query_stack<Qcx: QueryContext>(
         if let Some(ref mut file) = file {
             let _ = writeln!(
                 file,
-                "#{} [{:?}] {}",
-                count_total, query_info.query.dep_kind, query_info.query.description
+                "#{} [{}] {}",
+                count_total,
+                qcx.dep_context().dep_kind_info(query_info.query.dep_kind).name,
+                query_info.query.description
             );
         }
 
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
index f7619d75be7..05dee9f12db 100644
--- a/compiler/rustc_query_system/src/query/mod.rs
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -28,27 +28,27 @@ use thin_vec::ThinVec;
 ///
 /// This is mostly used in case of cycles for error reporting.
 #[derive(Clone, Debug)]
-pub struct QueryStackFrame<D: DepKind> {
+pub struct QueryStackFrame {
     pub description: String,
     span: Option<Span>,
     pub def_id: Option<DefId>,
     pub def_kind: Option<DefKind>,
     pub ty_adt_id: Option<DefId>,
-    pub dep_kind: D,
+    pub dep_kind: DepKind,
     /// This hash is used to deterministically pick
     /// a query to remove cycles in the parallel compiler.
     #[cfg(parallel_compiler)]
     hash: Hash64,
 }
 
-impl<D: DepKind> QueryStackFrame<D> {
+impl QueryStackFrame {
     #[inline]
     pub fn new(
         description: String,
         span: Option<Span>,
         def_id: Option<DefId>,
         def_kind: Option<DefKind>,
-        dep_kind: D,
+        dep_kind: DepKind,
         ty_adt_id: Option<DefId>,
         _hash: impl FnOnce() -> Hash64,
     ) -> Self {
@@ -106,7 +106,7 @@ pub trait QueryContext: HasDepContext {
     /// Get the query information from the TLS context.
     fn current_query_job(self) -> Option<QueryJobId>;
 
-    fn try_collect_active_jobs(self) -> Option<QueryMap<Self::DepKind>>;
+    fn try_collect_active_jobs(self) -> Option<QueryMap>;
 
     /// Load side effects associated to the node in the previous session.
     fn load_side_effects(self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 07db15e6d8b..f93edffca79 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -2,8 +2,8 @@
 //! generate the actual methods on tcx which find and execute the provider,
 //! manage the caches, and so forth.
 
-use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams};
-use crate::dep_graph::{DepGraphData, HasDepContext};
+use crate::dep_graph::DepGraphData;
+use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
 use crate::ich::StableHashingContext;
 use crate::query::caches::QueryCache;
 #[cfg(parallel_compiler)]
@@ -30,24 +30,23 @@ use thin_vec::ThinVec;
 
 use super::QueryConfig;
 
-pub struct QueryState<K, D: DepKind> {
-    active: Sharded<FxHashMap<K, QueryResult<D>>>,
+pub struct QueryState<K> {
+    active: Sharded<FxHashMap<K, QueryResult>>,
 }
 
 /// Indicates the state of a query for a given key in a query map.
-enum QueryResult<D: DepKind> {
+enum QueryResult {
     /// An already executing query. The query job can be used to await for its completion.
-    Started(QueryJob<D>),
+    Started(QueryJob),
 
     /// The query panicked. Queries trying to wait on this will raise a fatal error which will
     /// silently panic.
     Poisoned,
 }
 
-impl<K, D> QueryState<K, D>
+impl<K> QueryState<K>
 where
     K: Eq + Hash + Copy + Debug,
-    D: DepKind,
 {
     pub fn all_inactive(&self) -> bool {
         self.active.lock_shards().all(|shard| shard.is_empty())
@@ -56,8 +55,8 @@ where
     pub fn try_collect_active_jobs<Qcx: Copy>(
         &self,
         qcx: Qcx,
-        make_query: fn(Qcx, K) -> QueryStackFrame<D>,
-        jobs: &mut QueryMap<D>,
+        make_query: fn(Qcx, K) -> QueryStackFrame,
+        jobs: &mut QueryMap,
     ) -> Option<()> {
         let mut active = Vec::new();
 
@@ -82,25 +81,25 @@ where
     }
 }
 
-impl<K, D: DepKind> Default for QueryState<K, D> {
-    fn default() -> QueryState<K, D> {
+impl<K> Default for QueryState<K> {
+    fn default() -> QueryState<K> {
         QueryState { active: Default::default() }
     }
 }
 
 /// A type representing the responsibility to execute the job in the `job` field.
 /// This will poison the relevant query if dropped.
-struct JobOwner<'tcx, K, D: DepKind>
+struct JobOwner<'tcx, K>
 where
     K: Eq + Hash + Copy,
 {
-    state: &'tcx QueryState<K, D>,
+    state: &'tcx QueryState<K>,
     key: K,
 }
 
 #[cold]
 #[inline(never)]
-fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError<Qcx::DepKind>) -> Q::Value
+fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError) -> Q::Value
 where
     Q: QueryConfig<Qcx>,
     Qcx: QueryContext,
@@ -112,7 +111,7 @@ where
 fn handle_cycle_error<Q, Qcx>(
     query: Q,
     qcx: Qcx,
-    cycle_error: &CycleError<Qcx::DepKind>,
+    cycle_error: &CycleError,
     mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
 ) -> Q::Value
 where
@@ -137,7 +136,7 @@ where
     }
 }
 
-impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D>
+impl<'tcx, K> JobOwner<'tcx, K>
 where
     K: Eq + Hash + Copy,
 {
@@ -169,10 +168,9 @@ where
     }
 }
 
-impl<'tcx, K, D> Drop for JobOwner<'tcx, K, D>
+impl<'tcx, K> Drop for JobOwner<'tcx, K>
 where
     K: Eq + Hash + Copy,
-    D: DepKind,
 {
     #[inline(never)]
     #[cold]
@@ -195,10 +193,10 @@ where
 }
 
 #[derive(Clone)]
-pub(crate) struct CycleError<D: DepKind> {
+pub(crate) struct CycleError {
     /// The query and related span that uses the cycle.
-    pub usage: Option<(Span, QueryStackFrame<D>)>,
-    pub cycle: Vec<QueryInfo<D>>,
+    pub usage: Option<(Span, QueryStackFrame)>,
+    pub cycle: Vec<QueryInfo>,
 }
 
 /// Checks if the query is already computed and in the cache.
@@ -248,7 +246,7 @@ fn wait_for_query<Q, Qcx>(
     qcx: Qcx,
     span: Span,
     key: Q::Key,
-    latch: QueryLatch<Qcx::DepKind>,
+    latch: QueryLatch,
     current: Option<QueryJobId>,
 ) -> (Q::Value, Option<DepNodeIndex>)
 where
@@ -296,7 +294,7 @@ fn try_execute_query<Q, Qcx, const INCR: bool>(
     qcx: Qcx,
     span: Span,
     key: Q::Key,
-    dep_node: Option<DepNode<Qcx::DepKind>>,
+    dep_node: Option<DepNode>,
 ) -> (Q::Value, Option<DepNodeIndex>)
 where
     Q: QueryConfig<Qcx>,
@@ -364,10 +362,10 @@ where
 fn execute_job<Q, Qcx, const INCR: bool>(
     query: Q,
     qcx: Qcx,
-    state: &QueryState<Q::Key, Qcx::DepKind>,
+    state: &QueryState<Q::Key>,
     key: Q::Key,
     id: QueryJobId,
-    dep_node: Option<DepNode<Qcx::DepKind>>,
+    dep_node: Option<DepNode>,
 ) -> (Q::Value, Option<DepNodeIndex>)
 where
     Q: QueryConfig<Qcx>,
@@ -474,9 +472,9 @@ where
 fn execute_job_incr<Q, Qcx>(
     query: Q,
     qcx: Qcx,
-    dep_graph_data: &DepGraphData<Qcx::DepKind>,
+    dep_graph_data: &DepGraphData<Qcx::Deps>,
     key: Q::Key,
-    mut dep_node_opt: Option<DepNode<Qcx::DepKind>>,
+    mut dep_node_opt: Option<DepNode>,
     job_id: QueryJobId,
 ) -> (Q::Value, DepNodeIndex)
 where
@@ -540,10 +538,10 @@ where
 #[inline(always)]
 fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
     query: Q,
-    dep_graph_data: &DepGraphData<Qcx::DepKind>,
+    dep_graph_data: &DepGraphData<Qcx::Deps>,
     qcx: Qcx,
     key: &Q::Key,
-    dep_node: &DepNode<Qcx::DepKind>,
+    dep_node: &DepNode,
 ) -> Option<(Q::Value, DepNodeIndex)>
 where
     Q: QueryConfig<Qcx>,
@@ -637,7 +635,7 @@ where
 #[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")]
 pub(crate) fn incremental_verify_ich<Tcx, V>(
     tcx: Tcx,
-    dep_graph_data: &DepGraphData<Tcx::DepKind>,
+    dep_graph_data: &DepGraphData<Tcx::Deps>,
     result: &V,
     prev_index: SerializedDepNodeIndex,
     hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
@@ -730,7 +728,7 @@ fn ensure_must_run<Q, Qcx>(
     qcx: Qcx,
     key: &Q::Key,
     check_cache: bool,
-) -> (bool, Option<DepNode<Qcx::DepKind>>)
+) -> (bool, Option<DepNode>)
 where
     Q: QueryConfig<Qcx>,
     Qcx: QueryContext,
@@ -821,12 +819,8 @@ where
     Some(result)
 }
 
-pub fn force_query<Q, Qcx>(
-    query: Q,
-    qcx: Qcx,
-    key: Q::Key,
-    dep_node: DepNode<<Qcx as HasDepContext>::DepKind>,
-) where
+pub fn force_query<Q, Qcx>(query: Q, qcx: Qcx, key: Q::Key, dep_node: DepNode)
+where
     Q: QueryConfig<Qcx>,
     Qcx: QueryContext,
 {
diff --git a/compiler/rustc_query_system/src/values.rs b/compiler/rustc_query_system/src/values.rs
index 07c28fdb73b..8848fda9da3 100644
--- a/compiler/rustc_query_system/src/values.rs
+++ b/compiler/rustc_query_system/src/values.rs
@@ -1,14 +1,14 @@
 use rustc_span::ErrorGuaranteed;
 
-use crate::dep_graph::{DepContext, DepKind};
+use crate::dep_graph::DepContext;
 use crate::query::QueryInfo;
 
-pub trait Value<Tcx: DepContext, D: DepKind>: Sized {
-    fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo<D>], guar: ErrorGuaranteed) -> Self;
+pub trait Value<Tcx: DepContext>: Sized {
+    fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo], guar: ErrorGuaranteed) -> Self;
 }
 
-impl<Tcx: DepContext, T, D: DepKind> Value<Tcx, D> for T {
-    default fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo<D>], _guar: ErrorGuaranteed) -> T {
+impl<Tcx: DepContext, T> Value<Tcx> for T {
+    default fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo], _guar: ErrorGuaranteed) -> T {
         tcx.sess().abort_if_errors();
         // Ideally we would use `bug!` here. But bug! is only defined in rustc_middle, and it's
         // non-trivial to define it earlier.