about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--compiler/rustc_codegen_ssa/src/base.rs11
-rw-r--r--compiler/rustc_incremental/src/persist/save.rs2
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs149
-rw-r--r--compiler/rustc_session/src/options.rs3
4 files changed, 110 insertions, 55 deletions
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index 396febcc637..6d047f82566 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -1104,11 +1104,12 @@ pub fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) ->
     // know that later). If we are not doing LTO, there is only one optimized
     // version of each module, so we re-use that.
     let dep_node = cgu.codegen_dep_node(tcx);
-    assert!(
-        !tcx.dep_graph.dep_node_exists(&dep_node),
-        "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
-        cgu.name()
-    );
+    tcx.dep_graph.assert_dep_node_not_yet_allocated_in_current_session(&dep_node, || {
+        format!(
+            "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
+            cgu.name()
+        )
+    });
 
     if tcx.try_mark_green(&dep_node) {
         // We can re-use either the pre- or the post-thinlto state. If no LTO is
diff --git a/compiler/rustc_incremental/src/persist/save.rs b/compiler/rustc_incremental/src/persist/save.rs
index 8fc50ca1b43..94ce6d9fa81 100644
--- a/compiler/rustc_incremental/src/persist/save.rs
+++ b/compiler/rustc_incremental/src/persist/save.rs
@@ -173,7 +173,7 @@ pub(crate) fn build_dep_graph(
     sess.opts.dep_tracking_hash(false).encode(&mut encoder);
 
     Some(DepGraph::new(
-        &sess.prof,
+        sess,
         prev_graph,
         prev_work_products,
         encoder,
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index bfd8b320715..22926c0d146 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -1,4 +1,5 @@
 use std::assert_matches::assert_matches;
+use std::collections::hash_map::Entry;
 use std::fmt::Debug;
 use std::hash::Hash;
 use std::marker::PhantomData;
@@ -7,8 +8,8 @@ use std::sync::atomic::{AtomicU32, Ordering};
 
 use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_data_structures::profiling::{QueryInvocationId, SelfProfilerRef};
-use rustc_data_structures::sharded::{self, ShardedHashMap};
+use rustc_data_structures::profiling::QueryInvocationId;
+use rustc_data_structures::sharded::{self, Sharded};
 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
 use rustc_data_structures::sync::{AtomicU64, Lock};
 use rustc_data_structures::unord::UnordMap;
@@ -16,6 +17,7 @@ use rustc_errors::DiagInner;
 use rustc_index::IndexVec;
 use rustc_macros::{Decodable, Encodable};
 use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
+use rustc_session::Session;
 use tracing::{debug, instrument};
 #[cfg(debug_assertions)]
 use {super::debug::EdgeFilter, std::env};
@@ -117,7 +119,7 @@ where
 
 impl<D: Deps> DepGraph<D> {
     pub fn new(
-        profiler: &SelfProfilerRef,
+        session: &Session,
         prev_graph: Arc<SerializedDepGraph>,
         prev_work_products: WorkProductMap,
         encoder: FileEncoder,
@@ -127,7 +129,7 @@ impl<D: Deps> DepGraph<D> {
         let prev_graph_node_count = prev_graph.node_count();
 
         let current = CurrentDepGraph::new(
-            profiler,
+            session,
             prev_graph_node_count,
             encoder,
             record_graph,
@@ -351,12 +353,13 @@ impl<D: Deps> DepGraphData<D> {
         //    in `DepGraph::try_mark_green()`.
         // 2. Two distinct query keys get mapped to the same `DepNode`
         //    (see for example #48923).
-        assert!(
-            !self.dep_node_exists(&key),
-            "forcing query with already existing `DepNode`\n\
+        self.assert_dep_node_not_yet_allocated_in_current_session(&key, || {
+            format!(
+                "forcing query with already existing `DepNode`\n\
                  - query-key: {arg:?}\n\
                  - dep-node: {key:?}"
-        );
+            )
+        });
 
         let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
         let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
@@ -436,7 +439,31 @@ impl<D: Deps> DepGraphData<D> {
                     hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
                 };
 
-                self.current.intern_new_node(target_dep_node, task_deps, Fingerprint::ZERO)
+                // The DepNodes generated by the process above are not unique. 2 queries could
+                // have exactly the same dependencies. However, deserialization does not handle
+                // duplicated nodes, so we do the deduplication here directly.
+                //
+                // As anonymous nodes are a small quantity compared to the full dep-graph, the
+                // memory impact of this `anon_node_to_index` map remains tolerable, and helps
+                // us avoid useless growth of the graph with almost-equivalent nodes.
+                match self
+                    .current
+                    .anon_node_to_index
+                    .get_shard_by_value(&target_dep_node)
+                    .lock()
+                    .entry(target_dep_node)
+                {
+                    Entry::Occupied(entry) => *entry.get(),
+                    Entry::Vacant(entry) => {
+                        let dep_node_index = self.current.intern_new_node(
+                            target_dep_node,
+                            task_deps,
+                            Fingerprint::ZERO,
+                        );
+                        entry.insert(dep_node_index);
+                        dep_node_index
+                    }
+                }
             }
         };
 
@@ -637,20 +664,22 @@ impl<D: Deps> DepGraph<D> {
 }
 
 impl<D: Deps> DepGraphData<D> {
-    #[inline]
-    fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option<DepNodeIndex> {
+    fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
+        &self,
+        dep_node: &DepNode,
+        msg: impl FnOnce() -> S,
+    ) {
         if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
-            self.current.prev_index_to_index.lock()[prev_index]
-        } else {
-            self.current.new_node_to_index.get(dep_node)
+            let current = self.current.prev_index_to_index.lock()[prev_index];
+            assert!(current.is_none(), "{}", msg())
+        } else if let Some(nodes_newly_allocated_in_current_session) =
+            &self.current.nodes_newly_allocated_in_current_session
+        {
+            let seen = nodes_newly_allocated_in_current_session.lock().contains(dep_node);
+            assert!(!seen, "{}", msg());
         }
     }
 
-    #[inline]
-    fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
-        self.dep_node_index_of_opt(dep_node).is_some()
-    }
-
     fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
         if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
             self.colors.get(prev_index)
@@ -734,11 +763,6 @@ impl<D: Deps> DepGraphData<D> {
 }
 
 impl<D: Deps> DepGraph<D> {
-    #[inline]
-    pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
-        self.data.as_ref().is_some_and(|data| data.dep_node_exists(dep_node))
-    }
-
     /// Checks whether a previous work product exists for `v` and, if
     /// so, return the path that leads to it. Used to skip doing work.
     pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
@@ -964,6 +988,16 @@ impl<D: Deps> DepGraph<D> {
         self.node_color(dep_node).is_some_and(|c| c.is_green())
     }
 
+    pub fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
+        &self,
+        dep_node: &DepNode,
+        msg: impl FnOnce() -> S,
+    ) {
+        if let Some(data) = &self.data {
+            data.assert_dep_node_not_yet_allocated_in_current_session(dep_node, msg)
+        }
+    }
+
     /// This method loads all on-disk cacheable query results into memory, so
     /// they can be written out to the new cache file again. Most query results
     /// will already be in memory but in the case where we marked something as
@@ -1069,24 +1103,24 @@ rustc_index::newtype_index! {
 /// largest in the compiler.
 ///
 /// For this reason, we avoid storing `DepNode`s more than once as map
-/// keys. The `new_node_to_index` map only contains nodes not in the previous
+/// keys. The `anon_node_to_index` map only contains nodes of anonymous queries not in the previous
 /// graph, and we map nodes in the previous graph to indices via a two-step
 /// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
 /// and the `prev_index_to_index` vector (which is more compact and faster than
 /// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
 ///
-/// This struct uses three locks internally. The `data`, `new_node_to_index`,
+/// This struct uses three locks internally. The `data`, `anon_node_to_index`,
 /// and `prev_index_to_index` fields are locked separately. Operations that take
 /// a `DepNodeIndex` typically just access the `data` field.
 ///
 /// We only need to manipulate at most two locks simultaneously:
-/// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
-/// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
+/// `anon_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
+/// manipulating both, we acquire `anon_node_to_index` or `prev_index_to_index`
 /// first, and `data` second.
 pub(super) struct CurrentDepGraph<D: Deps> {
     encoder: GraphEncoder<D>,
-    new_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>,
     prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
+    anon_node_to_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
 
     /// This is used to verify that fingerprints do not change between the creation of a node
     /// and its recomputation.
@@ -1098,6 +1132,13 @@ pub(super) struct CurrentDepGraph<D: Deps> {
     #[cfg(debug_assertions)]
     forbidden_edge: Option<EdgeFilter>,
 
+    /// Used to verify the absence of hash collisions among DepNodes.
+    /// This field is only `Some` if the `-Z incremental_verify_ich` option is present.
+    ///
+    /// The map contains all DepNodes that have been allocated in the current session so far and
+    /// for which there is no equivalent in the previous session.
+    nodes_newly_allocated_in_current_session: Option<Lock<FxHashSet<DepNode>>>,
+
     /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
     /// their edges. This has the beneficial side-effect that multiple anonymous
     /// nodes can be coalesced into one without changing the semantics of the
@@ -1119,7 +1160,7 @@ pub(super) struct CurrentDepGraph<D: Deps> {
 
 impl<D: Deps> CurrentDepGraph<D> {
     fn new(
-        profiler: &SelfProfilerRef,
+        session: &Session,
         prev_graph_node_count: usize,
         encoder: FileEncoder,
         record_graph: bool,
@@ -1151,18 +1192,31 @@ impl<D: Deps> CurrentDepGraph<D> {
                 prev_graph_node_count,
                 record_graph,
                 record_stats,
-                profiler,
+                &session.prof,
                 previous,
             ),
-            new_node_to_index: ShardedHashMap::with_capacity(
-                new_node_count_estimate / sharded::shards(),
-            ),
+            anon_node_to_index: Sharded::new(|| {
+                FxHashMap::with_capacity_and_hasher(
+                    new_node_count_estimate / sharded::shards(),
+                    Default::default(),
+                )
+            }),
             prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
             anon_id_seed,
             #[cfg(debug_assertions)]
             forbidden_edge,
             #[cfg(debug_assertions)]
             fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
+            nodes_newly_allocated_in_current_session: session
+                .opts
+                .unstable_opts
+                .incremental_verify_ich
+                .then(|| {
+                    Lock::new(FxHashSet::with_capacity_and_hasher(
+                        new_node_count_estimate,
+                        Default::default(),
+                    ))
+                }),
             total_read_count: AtomicU64::new(0),
             total_duplicate_read_count: AtomicU64::new(0),
         }
@@ -1186,13 +1240,19 @@ impl<D: Deps> CurrentDepGraph<D> {
         edges: EdgesVec,
         current_fingerprint: Fingerprint,
     ) -> DepNodeIndex {
-        let dep_node_index = self
-            .new_node_to_index
-            .get_or_insert_with(key, || self.encoder.send(key, current_fingerprint, edges));
+        let dep_node_index = self.encoder.send(key, current_fingerprint, edges);
 
         #[cfg(debug_assertions)]
         self.record_edge(dep_node_index, key, current_fingerprint);
 
+        if let Some(ref nodes_newly_allocated_in_current_session) =
+            self.nodes_newly_allocated_in_current_session
+        {
+            if !nodes_newly_allocated_in_current_session.lock().insert(key) {
+                panic!("Found duplicate dep-node {key:?}");
+            }
+        }
+
         dep_node_index
     }
 
@@ -1286,7 +1346,10 @@ impl<D: Deps> CurrentDepGraph<D> {
     ) {
         let node = &prev_graph.index_to_node(prev_index);
         debug_assert!(
-            !self.new_node_to_index.get(node).is_some(),
+            !self
+                .nodes_newly_allocated_in_current_session
+                .as_ref()
+                .map_or(false, |set| set.lock().contains(node)),
             "node from previous graph present in new node collection"
         );
     }
@@ -1408,16 +1471,6 @@ fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepN
         }
     }
 
-    if dep_node.is_none() {
-        // Try to find it among the new nodes
-        for shard in data.current.new_node_to_index.lock_shards() {
-            if let Some((node, _)) = shard.iter().find(|(_, index)| *index == dep_node_index) {
-                dep_node = Some(*node);
-                break;
-            }
-        }
-    }
-
     let dep_node = dep_node.map_or_else(
         || format!("with index {:?}", dep_node_index),
         |dep_node| format!("`{:?}`", dep_node),
diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs
index b3be4b611f0..d2f202eb24e 100644
--- a/compiler/rustc_session/src/options.rs
+++ b/compiler/rustc_session/src/options.rs
@@ -2227,7 +2227,8 @@ options! {
     incremental_verify_ich: bool = (false, parse_bool, [UNTRACKED],
         "verify extended properties for incr. comp. (default: no):
         - hashes of green query instances
-        - hash collisions of query keys"),
+        - hash collisions of query keys
+        - hash collisions when creating dep-nodes"),
     inline_llvm: bool = (true, parse_bool, [TRACKED],
         "enable LLVM inlining (default: yes)"),
     inline_mir: Option<bool> = (None, parse_opt_bool, [TRACKED],