diff options
Diffstat (limited to 'compiler/rustc_data_structures/src')
| -rw-r--r-- | compiler/rustc_data_structures/src/sharded.rs | 16 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/sync.rs | 67 |
2 files changed, 38 insertions, 45 deletions
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs index 5a53f8af5f8..e6be9c256f0 100644 --- a/compiler/rustc_data_structures/src/sharded.rs +++ b/compiler/rustc_data_structures/src/sharded.rs @@ -43,10 +43,10 @@ impl<T> Sharded<T> { /// The shard is selected by hashing `val` with `FxHasher`. #[inline] - pub fn get_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> &Lock<T> { + pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> { match self { Self::Single(single) => single, - Self::Shards(..) => self.get_shard_by_hash(make_hash(_val)), + Self::Shards(..) => self.get_shard_by_hash(make_hash(val)), } } @@ -56,12 +56,12 @@ impl<T> Sharded<T> { } #[inline] - pub fn get_shard_by_index(&self, _i: usize) -> &Lock<T> { + pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> { match self { Self::Single(single) => single, Self::Shards(shards) => { // SAFETY: The index gets ANDed with the shard mask, ensuring it is always inbounds. - unsafe { &shards.get_unchecked(_i & (SHARDS - 1)).0 } + unsafe { &shards.get_unchecked(i & (SHARDS - 1)).0 } } } } @@ -69,7 +69,7 @@ impl<T> Sharded<T> { /// The shard is selected by hashing `val` with `FxHasher`. #[inline] #[track_caller] - pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> LockGuard<'_, T> { + pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> LockGuard<'_, T> { match self { Self::Single(single) => { // Synchronization is disabled so use the `lock_assume_no_sync` method optimized @@ -79,7 +79,7 @@ impl<T> Sharded<T> { // `might_be_dyn_thread_safe` was also false. unsafe { single.lock_assume(Mode::NoSync) } } - Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)), + Self::Shards(..) => self.lock_shard_by_hash(make_hash(val)), } } @@ -91,7 +91,7 @@ impl<T> Sharded<T> { #[inline] #[track_caller] - pub fn lock_shard_by_index(&self, _i: usize) -> LockGuard<'_, T> { + pub fn lock_shard_by_index(&self, i: usize) -> LockGuard<'_, T> { match self { Self::Single(single) => { // Synchronization is disabled so use the `lock_assume_no_sync` method optimized @@ -109,7 +109,7 @@ impl<T> Sharded<T> { // always inbounds. // SAFETY (lock_assume_sync): We know `is_dyn_thread_safe` was true when creating // the lock thus `might_be_dyn_thread_safe` was also true. - unsafe { shards.get_unchecked(_i & (SHARDS - 1)).0.lock_assume(Mode::Sync) } + unsafe { shards.get_unchecked(i & (SHARDS - 1)).0.lock_assume(Mode::Sync) } } } } diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index 37b54fe38ff..a1cc75c4985 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -18,42 +18,54 @@ //! //! | Type | Serial version | Parallel version | //! | ----------------------- | ------------------- | ------------------------------- | -//! | `LRef<'a, T>` [^2] | `&'a mut T` | `&'a T` | -//! | | | | //! | `Lock<T>` | `RefCell<T>` | `RefCell<T>` or | //! | | | `parking_lot::Mutex<T>` | //! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` | //! | `MTLock<T>` [^1] | `T` | `Lock<T>` | -//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` | //! | | | | //! | `ParallelIterator` | `Iterator` | `rayon::iter::ParallelIterator` | //! //! [^1]: `MTLock` is similar to `Lock`, but the serial version avoids the cost //! of a `RefCell`. This is appropriate when interior mutability is not //! required. -//! -//! [^2]: `MTRef`, `MTLockRef` are type aliases. use std::collections::HashMap; use std::hash::{BuildHasher, Hash}; -pub use crate::marker::*; +pub use parking_lot::{ + MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard, + RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard, +}; -mod lock; +pub use self::atomic::AtomicU64; +pub use self::freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard}; #[doc(no_inline)] -pub use lock::{Lock, LockGuard, Mode}; - -mod worker_local; -pub use worker_local::{Registry, WorkerLocal}; +pub use self::lock::{Lock, LockGuard, Mode}; +pub use self::mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; +pub use self::parallel::{ + join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in, +}; +pub use self::vec::{AppendOnlyIndexVec, AppendOnlyVec}; +pub use self::worker_local::{Registry, WorkerLocal}; +pub use crate::marker::*; +mod freeze; +mod lock; mod parallel; -pub use parallel::{join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in}; -pub use vec::{AppendOnlyIndexVec, AppendOnlyVec}; - mod vec; +mod worker_local; -mod freeze; -pub use freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard}; +/// Keep the conditional imports together in a submodule, so that import-sorting +/// doesn't split them up. +mod atomic { + // Most hosts can just use a regular AtomicU64. + #[cfg(target_has_atomic = "64")] + pub use std::sync::atomic::AtomicU64; + + // Some 32-bit hosts don't have AtomicU64, so use a fallback. + #[cfg(not(target_has_atomic = "64"))] + pub use portable_atomic::AtomicU64; +} mod mode { use std::sync::atomic::{AtomicU8, Ordering}; @@ -97,21 +109,6 @@ mod mode { // FIXME(parallel_compiler): Get rid of these aliases across the compiler. -pub use std::sync::OnceLock; -// Use portable AtomicU64 for targets without native 64-bit atomics -#[cfg(target_has_atomic = "64")] -pub use std::sync::atomic::AtomicU64; - -pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; -pub use parking_lot::{ - MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard, - RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard, -}; -#[cfg(not(target_has_atomic = "64"))] -pub use portable_atomic::AtomicU64; - -pub type LRef<'a, T> = &'a T; - #[derive(Debug, Default)] pub struct MTLock<T>(Lock<T>); @@ -142,14 +139,10 @@ impl<T> MTLock<T> { } } -use parking_lot::RwLock as InnerRwLock; - /// This makes locks panic if they are already held. /// It is only useful when you are running in a single thread const ERROR_CHECKING: bool = false; -pub type MTLockRef<'a, T> = LRef<'a, MTLock<T>>; - #[derive(Default)] #[repr(align(64))] pub struct CacheAligned<T>(pub T); @@ -167,12 +160,12 @@ impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> } #[derive(Debug, Default)] -pub struct RwLock<T>(InnerRwLock<T>); +pub struct RwLock<T>(parking_lot::RwLock<T>); impl<T> RwLock<T> { #[inline(always)] pub fn new(inner: T) -> Self { - RwLock(InnerRwLock::new(inner)) + RwLock(parking_lot::RwLock::new(inner)) } #[inline(always)] |
