about summary refs log tree commit diff
diff options
context:
space:
mode:
authorJohn Kåre Alsaker <john.kare.alsaker@gmail.com>2025-05-05 20:49:09 +0200
committerJohn Kåre Alsaker <john.kare.alsaker@gmail.com>2025-05-05 20:50:32 +0200
commitaeab2819f650d180eb0e7238ff8a0733af70b3ae (patch)
tree1179826066ecaa5240f11044ed89b9fc0e20d558
parentc43a6f05d6cedbb6a191e0c307cd257665a342e0 (diff)
downloadrust-aeab2819f650d180eb0e7238ff8a0733af70b3ae.tar.gz
rust-aeab2819f650d180eb0e7238ff8a0733af70b3ae.zip
Tweak index chunk allocation
-rw-r--r--compiler/rustc_query_system/src/dep_graph/serialized.rs14
1 files changed, 9 insertions, 5 deletions
diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs
index 648823edb18..b5eda7b04a7 100644
--- a/compiler/rustc_query_system/src/dep_graph/serialized.rs
+++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs
@@ -572,14 +572,18 @@ impl<D: Deps> EncoderState<D> {
     #[inline]
     fn next_index(&self, local: &mut LocalEncoderState) -> DepNodeIndex {
         if local.remaining_node_index == 0 {
-            let count = 256;
+            const COUNT: u32 = 256;
 
-            // We assume that there won't be enough active threads to overflow u64 from u32::MAX here.
-            assert!(self.next_node_index.load(Ordering::Relaxed) <= u32::MAX as u64);
+            // We assume that there won't be enough active threads to overflow `u64` from `u32::MAX` here.
+            // This can exceed u32::MAX by at most `N` * `COUNT` where `N` is the thread pool count since
+            // `try_into().unwrap()` will make threads panic when `self.next_node_index` exceeds u32::MAX.
             local.next_node_index =
-                self.next_node_index.fetch_add(count, Ordering::Relaxed).try_into().unwrap();
+                self.next_node_index.fetch_add(COUNT as u64, Ordering::Relaxed).try_into().unwrap();
 
-            local.remaining_node_index = count as u32;
+            // Check that we'll stay within `u32`
+            local.next_node_index.checked_add(COUNT).unwrap();
+
+            local.remaining_node_index = COUNT;
         }
 
         DepNodeIndex::from_u32(local.next_node_index)