diff options
Diffstat (limited to 'compiler/rustc_data_structures/src')
| -rw-r--r-- | compiler/rustc_data_structures/src/captures.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/lib.rs | 1 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/sharded.rs | 16 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/sorted_map.rs | 33 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/sync.rs | 67 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/sync/worker_local.rs | 11 |
6 files changed, 77 insertions, 59 deletions
diff --git a/compiler/rustc_data_structures/src/captures.rs b/compiler/rustc_data_structures/src/captures.rs deleted file mode 100644 index 677ccb31454..00000000000 --- a/compiler/rustc_data_structures/src/captures.rs +++ /dev/null @@ -1,8 +0,0 @@ -/// "Signaling" trait used in impl trait to tag lifetimes that you may -/// need to capture but don't really need for other reasons. -/// Basically a workaround; see [this comment] for details. -/// -/// [this comment]: https://github.com/rust-lang/rust/issues/34511#issuecomment-373423999 -pub trait Captures<'a> {} - -impl<'a, T: ?Sized> Captures<'a> for T {} diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs index 66d3834d857..a3b62b46919 100644 --- a/compiler/rustc_data_structures/src/lib.rs +++ b/compiler/rustc_data_structures/src/lib.rs @@ -48,7 +48,6 @@ pub use rustc_index::static_assert_size; pub mod aligned; pub mod base_n; pub mod binary_search_util; -pub mod captures; pub mod fingerprint; pub mod flat_map_in_place; pub mod flock; diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs index 5a53f8af5f8..e6be9c256f0 100644 --- a/compiler/rustc_data_structures/src/sharded.rs +++ b/compiler/rustc_data_structures/src/sharded.rs @@ -43,10 +43,10 @@ impl<T> Sharded<T> { /// The shard is selected by hashing `val` with `FxHasher`. #[inline] - pub fn get_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> &Lock<T> { + pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> { match self { Self::Single(single) => single, - Self::Shards(..) => self.get_shard_by_hash(make_hash(_val)), + Self::Shards(..) => self.get_shard_by_hash(make_hash(val)), } } @@ -56,12 +56,12 @@ impl<T> Sharded<T> { } #[inline] - pub fn get_shard_by_index(&self, _i: usize) -> &Lock<T> { + pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> { match self { Self::Single(single) => single, Self::Shards(shards) => { // SAFETY: The index gets ANDed with the shard mask, ensuring it is always inbounds. - unsafe { &shards.get_unchecked(_i & (SHARDS - 1)).0 } + unsafe { &shards.get_unchecked(i & (SHARDS - 1)).0 } } } } @@ -69,7 +69,7 @@ impl<T> Sharded<T> { /// The shard is selected by hashing `val` with `FxHasher`. #[inline] #[track_caller] - pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> LockGuard<'_, T> { + pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> LockGuard<'_, T> { match self { Self::Single(single) => { // Synchronization is disabled so use the `lock_assume_no_sync` method optimized @@ -79,7 +79,7 @@ impl<T> Sharded<T> { // `might_be_dyn_thread_safe` was also false. unsafe { single.lock_assume(Mode::NoSync) } } - Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)), + Self::Shards(..) => self.lock_shard_by_hash(make_hash(val)), } } @@ -91,7 +91,7 @@ impl<T> Sharded<T> { #[inline] #[track_caller] - pub fn lock_shard_by_index(&self, _i: usize) -> LockGuard<'_, T> { + pub fn lock_shard_by_index(&self, i: usize) -> LockGuard<'_, T> { match self { Self::Single(single) => { // Synchronization is disabled so use the `lock_assume_no_sync` method optimized @@ -109,7 +109,7 @@ impl<T> Sharded<T> { // always inbounds. // SAFETY (lock_assume_sync): We know `is_dyn_thread_safe` was true when creating // the lock thus `might_be_dyn_thread_safe` was also true. - unsafe { shards.get_unchecked(_i & (SHARDS - 1)).0.lock_assume(Mode::Sync) } + unsafe { shards.get_unchecked(i & (SHARDS - 1)).0.lock_assume(Mode::Sync) } } } } diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs index 066ea03b4ac..a01a420dfbd 100644 --- a/compiler/rustc_data_structures/src/sorted_map.rs +++ b/compiler/rustc_data_structures/src/sorted_map.rs @@ -1,4 +1,5 @@ use std::borrow::Borrow; +use std::cmp::Ordering; use std::fmt::Debug; use std::mem; use std::ops::{Bound, Index, IndexMut, RangeBounds}; @@ -156,6 +157,38 @@ impl<K: Ord, V> SortedMap<K, V> { &self.data[start..end] } + /// `sm.range_is_empty(r)` == `sm.range(r).is_empty()`, but is faster. + #[inline] + pub fn range_is_empty<R>(&self, range: R) -> bool + where + R: RangeBounds<K>, + { + // `range` must (via `range_slice_indices`) search for the start and + // end separately. But here we can do a single binary search for the + // entire range. If a single `x` matching `range` is found then the + // range is *not* empty. + self.data + .binary_search_by(|(x, _)| { + // Is `x` below `range`? + match range.start_bound() { + Bound::Included(start) if x < start => return Ordering::Less, + Bound::Excluded(start) if x <= start => return Ordering::Less, + _ => {} + }; + + // Is `x` above `range`? + match range.end_bound() { + Bound::Included(end) if x > end => return Ordering::Greater, + Bound::Excluded(end) if x >= end => return Ordering::Greater, + _ => {} + }; + + // `x` must be within `range`. + Ordering::Equal + }) + .is_err() + } + #[inline] pub fn remove_range<R>(&mut self, range: R) where diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index 37b54fe38ff..a1cc75c4985 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -18,42 +18,54 @@ //! //! | Type | Serial version | Parallel version | //! | ----------------------- | ------------------- | ------------------------------- | -//! | `LRef<'a, T>` [^2] | `&'a mut T` | `&'a T` | -//! | | | | //! | `Lock<T>` | `RefCell<T>` | `RefCell<T>` or | //! | | | `parking_lot::Mutex<T>` | //! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` | //! | `MTLock<T>` [^1] | `T` | `Lock<T>` | -//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` | //! | | | | //! | `ParallelIterator` | `Iterator` | `rayon::iter::ParallelIterator` | //! //! [^1]: `MTLock` is similar to `Lock`, but the serial version avoids the cost //! of a `RefCell`. This is appropriate when interior mutability is not //! required. -//! -//! [^2]: `MTRef`, `MTLockRef` are type aliases. use std::collections::HashMap; use std::hash::{BuildHasher, Hash}; -pub use crate::marker::*; +pub use parking_lot::{ + MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard, + RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard, +}; -mod lock; +pub use self::atomic::AtomicU64; +pub use self::freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard}; #[doc(no_inline)] -pub use lock::{Lock, LockGuard, Mode}; - -mod worker_local; -pub use worker_local::{Registry, WorkerLocal}; +pub use self::lock::{Lock, LockGuard, Mode}; +pub use self::mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; +pub use self::parallel::{ + join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in, +}; +pub use self::vec::{AppendOnlyIndexVec, AppendOnlyVec}; +pub use self::worker_local::{Registry, WorkerLocal}; +pub use crate::marker::*; +mod freeze; +mod lock; mod parallel; -pub use parallel::{join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in}; -pub use vec::{AppendOnlyIndexVec, AppendOnlyVec}; - mod vec; +mod worker_local; -mod freeze; -pub use freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard}; +/// Keep the conditional imports together in a submodule, so that import-sorting +/// doesn't split them up. +mod atomic { + // Most hosts can just use a regular AtomicU64. + #[cfg(target_has_atomic = "64")] + pub use std::sync::atomic::AtomicU64; + + // Some 32-bit hosts don't have AtomicU64, so use a fallback. + #[cfg(not(target_has_atomic = "64"))] + pub use portable_atomic::AtomicU64; +} mod mode { use std::sync::atomic::{AtomicU8, Ordering}; @@ -97,21 +109,6 @@ mod mode { // FIXME(parallel_compiler): Get rid of these aliases across the compiler. -pub use std::sync::OnceLock; -// Use portable AtomicU64 for targets without native 64-bit atomics -#[cfg(target_has_atomic = "64")] -pub use std::sync::atomic::AtomicU64; - -pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; -pub use parking_lot::{ - MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard, - RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard, -}; -#[cfg(not(target_has_atomic = "64"))] -pub use portable_atomic::AtomicU64; - -pub type LRef<'a, T> = &'a T; - #[derive(Debug, Default)] pub struct MTLock<T>(Lock<T>); @@ -142,14 +139,10 @@ impl<T> MTLock<T> { } } -use parking_lot::RwLock as InnerRwLock; - /// This makes locks panic if they are already held. /// It is only useful when you are running in a single thread const ERROR_CHECKING: bool = false; -pub type MTLockRef<'a, T> = LRef<'a, MTLock<T>>; - #[derive(Default)] #[repr(align(64))] pub struct CacheAligned<T>(pub T); @@ -167,12 +160,12 @@ impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> } #[derive(Debug, Default)] -pub struct RwLock<T>(InnerRwLock<T>); +pub struct RwLock<T>(parking_lot::RwLock<T>); impl<T> RwLock<T> { #[inline(always)] pub fn new(inner: T) -> Self { - RwLock(InnerRwLock::new(inner)) + RwLock(parking_lot::RwLock::new(inner)) } #[inline(always)] diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs index 402ec9827bb..d75af009850 100644 --- a/compiler/rustc_data_structures/src/sync/worker_local.rs +++ b/compiler/rustc_data_structures/src/sync/worker_local.rs @@ -106,6 +106,12 @@ pub struct WorkerLocal<T> { registry: Registry, } +// This is safe because the `deref` call will return a reference to a `T` unique to each thread +// or it will panic for threads without an associated local. So there isn't a need for `T` to do +// it's own synchronization. The `verify` method on `RegistryId` has an issue where the id +// can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse. +unsafe impl<T: Send> Sync for WorkerLocal<T> {} + impl<T> WorkerLocal<T> { /// Creates a new worker local where the `initial` closure computes the /// value this worker local should take for each thread in the registry. @@ -132,11 +138,6 @@ impl<T> Deref for WorkerLocal<T> { fn deref(&self) -> &T { // This is safe because `verify` will only return values less than // `self.registry.thread_limit` which is the size of the `self.locals` array. - - // The `deref` call will return a reference to a `T` unique to each thread - // or it will panic for threads without an associated local. So there isn't a need for `T` to do - // it's own synchronization. The `verify` method on `RegistryId` has an issue where the id - // can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse. unsafe { &self.locals.get_unchecked(self.registry.id().verify()).0 } } } |
