about summary refs log tree commit diff
diff options
context:
space:
mode:
authorJohn Kåre Alsaker <john.kare.alsaker@gmail.com>2023-08-16 13:50:31 +0200
committerJohn Kåre Alsaker <john.kare.alsaker@gmail.com>2023-09-08 08:48:44 +0200
commit8fc160b742e53eeb77a14b3e247b06b778f3243d (patch)
treebc7e71a2ab910b69a5f6a402d80a6ff14124f987
parent3d249706aa8b0167dd49efa1b3ce7cc0e9cbba08 (diff)
downloadrust-8fc160b742e53eeb77a14b3e247b06b778f3243d.tar.gz
rust-8fc160b742e53eeb77a14b3e247b06b778f3243d.zip
Add optimized lock methods for `Sharded`
-rw-r--r--compiler/rustc_data_structures/src/sharded.rs53
-rw-r--r--compiler/rustc_data_structures/src/sync/lock.rs87
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs12
-rw-r--r--compiler/rustc_query_system/src/query/caches.rs8
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs6
5 files changed, 133 insertions, 33 deletions
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs
index 0f769c1f3bf..3706f62383f 100644
--- a/compiler/rustc_data_structures/src/sharded.rs
+++ b/compiler/rustc_data_structures/src/sharded.rs
@@ -73,6 +73,53 @@ impl<T> Sharded<T> {
         }
     }
 
+    /// The shard is selected by hashing `val` with `FxHasher`.
+    #[inline]
+    pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> LockGuard<'_, T> {
+        match self {
+            Self::Single(single) => {
+                // Syncronization is disabled so use the `lock_assume_no_sync` method optimized
+                // for that case.
+
+                // SAFETY: We know `is_dyn_thread_safe` was false when creating the lock thus
+                // `might_be_dyn_thread_safe` was also false.
+                unsafe { single.lock_assume_no_sync() }
+            }
+            #[cfg(parallel_compiler)]
+            Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)),
+        }
+    }
+
+    #[inline]
+    pub fn lock_shard_by_hash(&self, hash: u64) -> LockGuard<'_, T> {
+        self.lock_shard_by_index(get_shard_hash(hash))
+    }
+
+    #[inline]
+    pub fn lock_shard_by_index(&self, _i: usize) -> LockGuard<'_, T> {
+        match self {
+            Self::Single(single) => {
+                // Syncronization is disabled so use the `lock_assume_no_sync` method optimized
+                // for that case.
+
+                // SAFETY: We know `is_dyn_thread_safe` was false when creating the lock thus
+                // `might_be_dyn_thread_safe` was also false.
+                unsafe { single.lock_assume_no_sync() }
+            }
+            #[cfg(parallel_compiler)]
+            Self::Shards(shards) => {
+                // Syncronization is enabled so use the `lock_assume_sync` method optimized
+                // for that case.
+
+                // SAFETY (get_unchecked): The index gets ANDed with the shard mask, ensuring it is
+                // always inbounds.
+                // SAFETY (lock_assume_sync): We know `is_dyn_thread_safe` was true when creating
+                // the lock thus `might_be_dyn_thread_safe` was also true.
+                unsafe { shards.get_unchecked(_i & (SHARDS - 1)).0.lock_assume_sync() }
+            }
+        }
+    }
+
     #[inline]
     pub fn lock_shards(&self) -> impl Iterator<Item = LockGuard<'_, T>> {
         match self {
@@ -124,7 +171,7 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
         Q: Hash + Eq,
     {
         let hash = make_hash(value);
-        let mut shard = self.get_shard_by_hash(hash).lock();
+        let mut shard = self.lock_shard_by_hash(hash);
         let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value);
 
         match entry {
@@ -144,7 +191,7 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
         Q: Hash + Eq,
     {
         let hash = make_hash(&value);
-        let mut shard = self.get_shard_by_hash(hash).lock();
+        let mut shard = self.lock_shard_by_hash(hash);
         let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value);
 
         match entry {
@@ -166,7 +213,7 @@ pub trait IntoPointer {
 impl<K: Eq + Hash + Copy + IntoPointer> ShardedHashMap<K, ()> {
     pub fn contains_pointer_to<T: Hash + IntoPointer>(&self, value: &T) -> bool {
         let hash = make_hash(&value);
-        let shard = self.get_shard_by_hash(hash).lock();
+        let shard = self.lock_shard_by_hash(hash);
         let value = value.into_pointer();
         shard.raw_entry().from_hash(hash, |entry| entry.into_pointer() == value).is_some()
     }
diff --git a/compiler/rustc_data_structures/src/sync/lock.rs b/compiler/rustc_data_structures/src/sync/lock.rs
index 62cd1b993de..0a8c0306fe5 100644
--- a/compiler/rustc_data_structures/src/sync/lock.rs
+++ b/compiler/rustc_data_structures/src/sync/lock.rs
@@ -51,6 +51,20 @@ impl<T> Lock<T> {
 
     #[inline(always)]
     #[track_caller]
+    // This is unsafe to match the API for the `parallel_compiler` case.
+    pub unsafe fn lock_assume_no_sync(&self) -> LockGuard<'_, T> {
+        self.0.borrow_mut()
+    }
+
+    #[inline(always)]
+    #[track_caller]
+    // This is unsafe to match the API for the `parallel_compiler` case.
+    pub unsafe fn lock_assume_sync(&self) -> LockGuard<'_, T> {
+        self.0.borrow_mut()
+    }
+
+    #[inline(always)]
+    #[track_caller]
     pub fn lock(&self) -> LockGuard<'_, T> {
         self.0.borrow_mut()
     }
@@ -150,24 +164,45 @@ impl LockRaw {
 
     #[inline(always)]
     fn lock(&self) {
-        if super::ERROR_CHECKING {
-            // We're in the debugging mode, so assert that the lock is not held so we
-            // get a panic instead of waiting for the lock.
-            assert_eq!(self.try_lock(), true, "lock must not be hold");
-        } else {
-            // SAFETY: This is safe since the union fields are used in accordance with `self.sync`.
-            unsafe {
-                if likely(!self.sync) {
-                    if unlikely(self.opt.cell.replace(true)) {
-                        cold_path(|| panic!("lock was already held"))
-                    }
-                } else {
-                    self.opt.lock.lock();
-                }
+        // SAFETY: This is safe since `self.sync` is used in accordance with the preconditions of
+        // `lock_assume_no_sync` and `lock_assume_sync`.
+        unsafe {
+            if likely(!self.sync) {
+                self.lock_assume_no_sync()
+            } else {
+                self.lock_assume_sync();
+            }
+        }
+    }
+
+    /// This acquires the lock assuming no syncronization is required.
+    ///
+    /// Safety
+    /// This method must only be called if `might_be_dyn_thread_safe` was false on lock creation.
+    #[inline(always)]
+    unsafe fn lock_assume_no_sync(&self) {
+        // SAFETY: This is safe since `self.opt.cell` is the union field used due to the
+        // precondition on this function.
+        unsafe {
+            if unlikely(self.opt.cell.replace(true)) {
+                cold_path(|| panic!("lock was already held"))
             }
         }
     }
 
+    /// This acquires the lock assuming syncronization is required.
+    ///
+    /// Safety
+    /// This method must only be called if `might_be_dyn_thread_safe` was true on lock creation.
+    #[inline(always)]
+    unsafe fn lock_assume_sync(&self) {
+        // SAFETY: This is safe since `self.opt.lock` is the union field used due to the
+        // precondition on this function.
+        unsafe {
+            self.opt.lock.lock();
+        }
+    }
+
     /// This unlocks the lock.
     ///
     /// Safety
@@ -217,6 +252,30 @@ impl<T> Lock<T> {
         if self.raw.try_lock() { Some(LockGuard { lock: self, marker: PhantomData }) } else { None }
     }
 
+    /// This acquires the lock assuming no syncronization is required.
+    ///
+    /// Safety
+    /// This method must only be called if `might_be_dyn_thread_safe` was false on lock creation.
+    #[inline(always)]
+    pub(crate) unsafe fn lock_assume_no_sync(&self) -> LockGuard<'_, T> {
+        unsafe {
+            self.raw.lock_assume_no_sync();
+        }
+        LockGuard { lock: self, marker: PhantomData }
+    }
+
+    /// This acquires the lock assuming syncronization is required.
+    ///
+    /// Safety
+    /// This method must only be called if `might_be_dyn_thread_safe` was true on lock creation.
+    #[inline(always)]
+    pub(crate) unsafe fn lock_assume_sync(&self) -> LockGuard<'_, T> {
+        unsafe {
+            self.raw.lock_assume_sync();
+        }
+        LockGuard { lock: self, marker: PhantomData }
+    }
+
     #[inline(always)]
     pub fn lock(&self) -> LockGuard<'_, T> {
         self.raw.lock();
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index 7b7981e1425..fa54e1a2e6a 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -629,12 +629,7 @@ impl<K: DepKind> DepGraphData<K> {
         if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
             self.current.prev_index_to_index.lock()[prev_index]
         } else {
-            self.current
-                .new_node_to_index
-                .get_shard_by_value(dep_node)
-                .lock()
-                .get(dep_node)
-                .copied()
+            self.current.new_node_to_index.lock_shard_by_value(dep_node).get(dep_node).copied()
         }
     }
 
@@ -1201,8 +1196,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
         edges: EdgesVec,
         current_fingerprint: Fingerprint,
     ) -> DepNodeIndex {
-        let dep_node_index = match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key)
-        {
+        let dep_node_index = match self.new_node_to_index.lock_shard_by_value(&key).entry(key) {
             Entry::Occupied(entry) => *entry.get(),
             Entry::Vacant(entry) => {
                 let dep_node_index =
@@ -1328,7 +1322,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
     ) {
         let node = &prev_graph.index_to_node(prev_index);
         debug_assert!(
-            !self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node),
+            !self.new_node_to_index.lock_shard_by_value(node).contains_key(node),
             "node from previous graph present in new node collection"
         );
     }
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs
index d8aa377af42..0240f012da0 100644
--- a/compiler/rustc_query_system/src/query/caches.rs
+++ b/compiler/rustc_query_system/src/query/caches.rs
@@ -55,7 +55,7 @@ where
     #[inline(always)]
     fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
         let key_hash = sharded::make_hash(key);
-        let lock = self.cache.get_shard_by_hash(key_hash).lock();
+        let lock = self.cache.lock_shard_by_hash(key_hash);
         let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
 
         if let Some((_, value)) = result { Some(*value) } else { None }
@@ -63,7 +63,7 @@ where
 
     #[inline]
     fn complete(&self, key: K, value: V, index: DepNodeIndex) {
-        let mut lock = self.cache.get_shard_by_value(&key).lock();
+        let mut lock = self.cache.lock_shard_by_value(&key);
         // We may be overwriting another value. This is all right, since the dep-graph
         // will check that the fingerprint matches.
         lock.insert(key, (value, index));
@@ -148,13 +148,13 @@ where
 
     #[inline(always)]
     fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
-        let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
+        let lock = self.cache.lock_shard_by_hash(key.index() as u64);
         if let Some(Some(value)) = lock.get(*key) { Some(*value) } else { None }
     }
 
     #[inline]
     fn complete(&self, key: K, value: V, index: DepNodeIndex) {
-        let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
+        let mut lock = self.cache.lock_shard_by_hash(key.index() as u64);
         lock.insert(key, (value, index));
     }
 
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 4fa168965a7..07db15e6d8b 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -158,7 +158,7 @@ where
         cache.complete(key, result, dep_node_index);
 
         let job = {
-            let mut lock = state.active.get_shard_by_value(&key).lock();
+            let mut lock = state.active.lock_shard_by_value(&key);
             match lock.remove(&key).unwrap() {
                 QueryResult::Started(job) => job,
                 QueryResult::Poisoned => panic!(),
@@ -180,7 +180,7 @@ where
         // Poison the query so jobs waiting on it panic.
         let state = self.state;
         let job = {
-            let mut shard = state.active.get_shard_by_value(&self.key).lock();
+            let mut shard = state.active.lock_shard_by_value(&self.key);
             let job = match shard.remove(&self.key).unwrap() {
                 QueryResult::Started(job) => job,
                 QueryResult::Poisoned => panic!(),
@@ -303,7 +303,7 @@ where
     Qcx: QueryContext,
 {
     let state = query.query_state(qcx);
-    let mut state_lock = state.active.get_shard_by_value(&key).lock();
+    let mut state_lock = state.active.lock_shard_by_value(&key);
 
     // For the parallel compiler we need to check both the query cache and query state structures
     // while holding the state lock to ensure that 1) the query has not yet completed and 2) the