diff options
| author | 许杰友 Jieyou Xu (Joe) <39484203+jieyouxu@users.noreply.github.com> | 2025-03-13 15:20:11 +0800 |
|---|---|---|
| committer | 许杰友 Jieyou Xu (Joe) <39484203+jieyouxu@users.noreply.github.com> | 2025-03-13 15:20:11 +0800 |
| commit | 7bde17630561c8df94676cb6950b27c648de0e54 (patch) | |
| tree | 36f02f05011b3474033443ffd46313083c1da9f7 /compiler/rustc_data_structures/src | |
| parent | d4c5e752c51e61f9a34188d214d15647d7653dca (diff) | |
| parent | 8536f201ffdb2c24925d7f9e87996d7dca93428b (diff) | |
| download | rust-7bde17630561c8df94676cb6950b27c648de0e54.tar.gz rust-7bde17630561c8df94676cb6950b27c648de0e54.zip | |
Merge from rustc
Diffstat (limited to 'compiler/rustc_data_structures/src')
| -rw-r--r-- | compiler/rustc_data_structures/src/aligned.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/captures.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/flat_map_in_place.rs | 42 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/graph/tests.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/lib.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/marker.rs | 12 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/obligation_forest/mod.rs | 3 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/profiling.rs | 6 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/sharded.rs | 112 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/sorted_map.rs | 33 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/sync.rs | 69 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/sync/parallel.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/sync/worker_local.rs | 11 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/tagged_ptr/tests.rs | 2 |
14 files changed, 202 insertions, 112 deletions
diff --git a/compiler/rustc_data_structures/src/aligned.rs b/compiler/rustc_data_structures/src/aligned.rs index 0e5ecfd9bff..a636d09fcae 100644 --- a/compiler/rustc_data_structures/src/aligned.rs +++ b/compiler/rustc_data_structures/src/aligned.rs @@ -2,10 +2,8 @@ use std::ptr::Alignment; /// Returns the ABI-required minimum alignment of a type in bytes. /// -/// This is equivalent to [`mem::align_of`], but also works for some unsized +/// This is equivalent to [`align_of`], but also works for some unsized /// types (e.g. slices or rustc's `List`s). -/// -/// [`mem::align_of`]: std::mem::align_of pub const fn align_of<T: ?Sized + Aligned>() -> Alignment { T::ALIGN } @@ -15,10 +13,10 @@ pub const fn align_of<T: ?Sized + Aligned>() -> Alignment { /// # Safety /// /// `Self::ALIGN` must be equal to the alignment of `Self`. For sized types it -/// is [`mem::align_of<Self>()`], for unsized types it depends on the type, for +/// is [`align_of::<Self>()`], for unsized types it depends on the type, for /// example `[T]` has alignment of `T`. /// -/// [`mem::align_of<Self>()`]: std::mem::align_of +/// [`align_of::<Self>()`]: align_of pub unsafe trait Aligned { /// Alignment of `Self`. const ALIGN: Alignment; diff --git a/compiler/rustc_data_structures/src/captures.rs b/compiler/rustc_data_structures/src/captures.rs deleted file mode 100644 index 677ccb31454..00000000000 --- a/compiler/rustc_data_structures/src/captures.rs +++ /dev/null @@ -1,8 +0,0 @@ -/// "Signaling" trait used in impl trait to tag lifetimes that you may -/// need to capture but don't really need for other reasons. -/// Basically a workaround; see [this comment] for details. -/// -/// [this comment]: https://github.com/rust-lang/rust/issues/34511#issuecomment-373423999 -pub trait Captures<'a> {} - -impl<'a, T: ?Sized> Captures<'a> for T {} diff --git a/compiler/rustc_data_structures/src/flat_map_in_place.rs b/compiler/rustc_data_structures/src/flat_map_in_place.rs index e66b00b7557..6d718059f9c 100644 --- a/compiler/rustc_data_structures/src/flat_map_in_place.rs +++ b/compiler/rustc_data_structures/src/flat_map_in_place.rs @@ -1,4 +1,4 @@ -use std::ptr; +use std::{mem, ptr}; use smallvec::{Array, SmallVec}; use thin_vec::ThinVec; @@ -13,39 +13,44 @@ pub trait FlatMapInPlace<T>: Sized { // The implementation of this method is syntactically identical for all the // different vector types. macro_rules! flat_map_in_place { - () => { + ($vec:ident $( where T: $bound:path)?) => { fn flat_map_in_place<F, I>(&mut self, mut f: F) where F: FnMut(T) -> I, I: IntoIterator<Item = T>, { + struct LeakGuard<'a, T $(: $bound)?>(&'a mut $vec<T>); + + impl<'a, T $(: $bound)?> Drop for LeakGuard<'a, T> { + fn drop(&mut self) { + unsafe { + self.0.set_len(0); // make sure we just leak elements in case of panic + } + } + } + + let this = LeakGuard(self); + let mut read_i = 0; let mut write_i = 0; unsafe { - let mut old_len = self.len(); - self.set_len(0); // make sure we just leak elements in case of panic - - while read_i < old_len { + while read_i < this.0.len() { // move the read_i'th item out of the vector and map it // to an iterator - let e = ptr::read(self.as_ptr().add(read_i)); + let e = ptr::read(this.0.as_ptr().add(read_i)); let iter = f(e).into_iter(); read_i += 1; for e in iter { if write_i < read_i { - ptr::write(self.as_mut_ptr().add(write_i), e); + ptr::write(this.0.as_mut_ptr().add(write_i), e); write_i += 1; } else { // If this is reached we ran out of space // in the middle of the vector. // However, the vector is in a valid state here, // so we just do a somewhat inefficient insert. - self.set_len(old_len); - self.insert(write_i, e); - - old_len = self.len(); - self.set_len(0); + this.0.insert(write_i, e); read_i += 1; write_i += 1; @@ -54,20 +59,23 @@ macro_rules! flat_map_in_place { } // write_i tracks the number of actually written new items. - self.set_len(write_i); + this.0.set_len(write_i); + + // The ThinVec is in a sane state again. Prevent the LeakGuard from leaking the data. + mem::forget(this); } } }; } impl<T> FlatMapInPlace<T> for Vec<T> { - flat_map_in_place!(); + flat_map_in_place!(Vec); } impl<T, A: Array<Item = T>> FlatMapInPlace<T> for SmallVec<A> { - flat_map_in_place!(); + flat_map_in_place!(SmallVec where T: Array); } impl<T> FlatMapInPlace<T> for ThinVec<T> { - flat_map_in_place!(); + flat_map_in_place!(ThinVec); } diff --git a/compiler/rustc_data_structures/src/graph/tests.rs b/compiler/rustc_data_structures/src/graph/tests.rs index b69b9dbc4a8..e48b9686c26 100644 --- a/compiler/rustc_data_structures/src/graph/tests.rs +++ b/compiler/rustc_data_structures/src/graph/tests.rs @@ -3,7 +3,7 @@ use std::cmp::max; use super::*; use crate::fx::FxHashMap; -pub struct TestGraph { +pub(super) struct TestGraph { num_nodes: usize, start_node: usize, successors: FxHashMap<usize, Vec<usize>>, @@ -11,7 +11,7 @@ pub struct TestGraph { } impl TestGraph { - pub fn new(start_node: usize, edges: &[(usize, usize)]) -> Self { + pub(super) fn new(start_node: usize, edges: &[(usize, usize)]) -> Self { let mut graph = TestGraph { num_nodes: start_node + 1, start_node, diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs index 66d3834d857..865424fd6bb 100644 --- a/compiler/rustc_data_structures/src/lib.rs +++ b/compiler/rustc_data_structures/src/lib.rs @@ -24,7 +24,6 @@ #![feature(dropck_eyepatch)] #![feature(extend_one)] #![feature(file_buffered)] -#![feature(hash_raw_entry)] #![feature(macro_metavar_expr)] #![feature(map_try_insert)] #![feature(min_specialization)] @@ -48,7 +47,6 @@ pub use rustc_index::static_assert_size; pub mod aligned; pub mod base_n; pub mod binary_search_util; -pub mod captures; pub mod fingerprint; pub mod flat_map_in_place; pub mod flock; diff --git a/compiler/rustc_data_structures/src/marker.rs b/compiler/rustc_data_structures/src/marker.rs index 6ae97222f77..64c64bfa3c2 100644 --- a/compiler/rustc_data_structures/src/marker.rs +++ b/compiler/rustc_data_structures/src/marker.rs @@ -1,3 +1,5 @@ +use std::alloc::Allocator; + #[rustc_on_unimplemented(message = "`{Self}` doesn't implement `DynSend`. \ Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Send`")] // This is an auto trait for types which can be sent across threads if `sync::is_dyn_thread_safe()` @@ -28,8 +30,8 @@ impls_dyn_send_neg!( [*const T where T: ?Sized] [*mut T where T: ?Sized] [std::ptr::NonNull<T> where T: ?Sized] - [std::rc::Rc<T> where T: ?Sized] - [std::rc::Weak<T> where T: ?Sized] + [std::rc::Rc<T, A> where T: ?Sized, A: Allocator] + [std::rc::Weak<T, A> where T: ?Sized, A: Allocator] [std::sync::MutexGuard<'_, T> where T: ?Sized] [std::sync::RwLockReadGuard<'_, T> where T: ?Sized] [std::sync::RwLockWriteGuard<'_, T> where T: ?Sized] @@ -74,6 +76,7 @@ impl_dyn_send!( [crate::sync::RwLock<T> where T: DynSend] [crate::tagged_ptr::TaggedRef<'a, P, T> where 'a, P: Sync, T: Send + crate::tagged_ptr::Tag] [rustc_arena::TypedArena<T> where T: DynSend] + [hashbrown::HashTable<T> where T: DynSend] [indexmap::IndexSet<V, S> where V: DynSend, S: DynSend] [indexmap::IndexMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend] [thin_vec::ThinVec<T> where T: DynSend] @@ -96,8 +99,8 @@ impls_dyn_sync_neg!( [std::cell::RefCell<T> where T: ?Sized] [std::cell::UnsafeCell<T> where T: ?Sized] [std::ptr::NonNull<T> where T: ?Sized] - [std::rc::Rc<T> where T: ?Sized] - [std::rc::Weak<T> where T: ?Sized] + [std::rc::Rc<T, A> where T: ?Sized, A: Allocator] + [std::rc::Weak<T, A> where T: ?Sized, A: Allocator] [std::cell::OnceCell<T> where T] [std::sync::mpsc::Receiver<T> where T] [std::sync::mpsc::Sender<T> where T] @@ -151,6 +154,7 @@ impl_dyn_sync!( [crate::tagged_ptr::TaggedRef<'a, P, T> where 'a, P: Sync, T: Sync + crate::tagged_ptr::Tag] [parking_lot::lock_api::Mutex<R, T> where R: DynSync, T: ?Sized + DynSend] [parking_lot::lock_api::RwLock<R, T> where R: DynSync, T: ?Sized + DynSend + DynSync] + [hashbrown::HashTable<T> where T: DynSync] [indexmap::IndexSet<V, S> where V: DynSync, S: DynSync] [indexmap::IndexMap<K, V, S> where K: DynSync, V: DynSync, S: DynSync] [smallvec::SmallVec<A> where A: smallvec::Array + DynSync] diff --git a/compiler/rustc_data_structures/src/obligation_forest/mod.rs b/compiler/rustc_data_structures/src/obligation_forest/mod.rs index 78d69a66edc..f63b201742d 100644 --- a/compiler/rustc_data_structures/src/obligation_forest/mod.rs +++ b/compiler/rustc_data_structures/src/obligation_forest/mod.rs @@ -313,8 +313,9 @@ pub struct Error<O, E> { mod helper { use super::*; - pub type ObligationTreeIdGenerator = impl Iterator<Item = ObligationTreeId>; + pub(super) type ObligationTreeIdGenerator = impl Iterator<Item = ObligationTreeId>; impl<O: ForestObligation> ObligationForest<O> { + #[cfg_attr(not(bootstrap), define_opaque(ObligationTreeIdGenerator))] pub fn new() -> ObligationForest<O> { ObligationForest { nodes: vec![], diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs index 39db551adfb..60f007083ba 100644 --- a/compiler/rustc_data_structures/src/profiling.rs +++ b/compiler/rustc_data_structures/src/profiling.rs @@ -863,15 +863,13 @@ fn get_thread_id() -> u32 { cfg_match! { windows => { pub fn get_resident_set_size() -> Option<usize> { - use std::mem; - use windows::{ Win32::System::ProcessStatus::{K32GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS}, Win32::System::Threading::GetCurrentProcess, }; let mut pmc = PROCESS_MEMORY_COUNTERS::default(); - let pmc_size = mem::size_of_val(&pmc); + let pmc_size = size_of_val(&pmc); unsafe { K32GetProcessMemoryInfo( GetCurrentProcess(), @@ -889,7 +887,7 @@ cfg_match! { pub fn get_resident_set_size() -> Option<usize> { use libc::{c_int, c_void, getpid, proc_pidinfo, proc_taskinfo, PROC_PIDTASKINFO}; use std::mem; - const PROC_TASKINFO_SIZE: c_int = mem::size_of::<proc_taskinfo>() as c_int; + const PROC_TASKINFO_SIZE: c_int = size_of::<proc_taskinfo>() as c_int; unsafe { let mut info: proc_taskinfo = mem::zeroed(); diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs index 65488c73d3c..49cafcb17a0 100644 --- a/compiler/rustc_data_structures/src/sharded.rs +++ b/compiler/rustc_data_structures/src/sharded.rs @@ -1,11 +1,11 @@ use std::borrow::Borrow; -use std::collections::hash_map::RawEntryMut; use std::hash::{Hash, Hasher}; use std::{iter, mem}; use either::Either; +use hashbrown::hash_table::{Entry, HashTable}; -use crate::fx::{FxHashMap, FxHasher}; +use crate::fx::FxHasher; use crate::sync::{CacheAligned, Lock, LockGuard, Mode, is_dyn_thread_safe}; // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700, @@ -43,10 +43,10 @@ impl<T> Sharded<T> { /// The shard is selected by hashing `val` with `FxHasher`. #[inline] - pub fn get_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> &Lock<T> { + pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> { match self { Self::Single(single) => single, - Self::Shards(..) => self.get_shard_by_hash(make_hash(_val)), + Self::Shards(..) => self.get_shard_by_hash(make_hash(val)), } } @@ -56,12 +56,12 @@ impl<T> Sharded<T> { } #[inline] - pub fn get_shard_by_index(&self, _i: usize) -> &Lock<T> { + pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> { match self { Self::Single(single) => single, Self::Shards(shards) => { // SAFETY: The index gets ANDed with the shard mask, ensuring it is always inbounds. - unsafe { &shards.get_unchecked(_i & (SHARDS - 1)).0 } + unsafe { &shards.get_unchecked(i & (SHARDS - 1)).0 } } } } @@ -69,7 +69,7 @@ impl<T> Sharded<T> { /// The shard is selected by hashing `val` with `FxHasher`. #[inline] #[track_caller] - pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> LockGuard<'_, T> { + pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> LockGuard<'_, T> { match self { Self::Single(single) => { // Synchronization is disabled so use the `lock_assume_no_sync` method optimized @@ -79,7 +79,7 @@ impl<T> Sharded<T> { // `might_be_dyn_thread_safe` was also false. unsafe { single.lock_assume(Mode::NoSync) } } - Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)), + Self::Shards(..) => self.lock_shard_by_hash(make_hash(val)), } } @@ -91,7 +91,7 @@ impl<T> Sharded<T> { #[inline] #[track_caller] - pub fn lock_shard_by_index(&self, _i: usize) -> LockGuard<'_, T> { + pub fn lock_shard_by_index(&self, i: usize) -> LockGuard<'_, T> { match self { Self::Single(single) => { // Synchronization is disabled so use the `lock_assume_no_sync` method optimized @@ -109,7 +109,7 @@ impl<T> Sharded<T> { // always inbounds. // SAFETY (lock_assume_sync): We know `is_dyn_thread_safe` was true when creating // the lock thus `might_be_dyn_thread_safe` was also true. - unsafe { shards.get_unchecked(_i & (SHARDS - 1)).0.lock_assume(Mode::Sync) } + unsafe { shards.get_unchecked(i & (SHARDS - 1)).0.lock_assume(Mode::Sync) } } } } @@ -140,14 +140,67 @@ pub fn shards() -> usize { 1 } -pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>; +pub type ShardedHashMap<K, V> = Sharded<HashTable<(K, V)>>; impl<K: Eq, V> ShardedHashMap<K, V> { + pub fn with_capacity(cap: usize) -> Self { + Self::new(|| HashTable::with_capacity(cap)) + } pub fn len(&self) -> usize { self.lock_shards().map(|shard| shard.len()).sum() } } +impl<K: Eq + Hash, V> ShardedHashMap<K, V> { + #[inline] + pub fn get<Q>(&self, key: &Q) -> Option<V> + where + K: Borrow<Q>, + Q: Hash + Eq, + V: Clone, + { + let hash = make_hash(key); + let shard = self.lock_shard_by_hash(hash); + let (_, value) = shard.find(hash, |(k, _)| k.borrow() == key)?; + Some(value.clone()) + } + + #[inline] + pub fn get_or_insert_with(&self, key: K, default: impl FnOnce() -> V) -> V + where + V: Copy, + { + let hash = make_hash(&key); + let mut shard = self.lock_shard_by_hash(hash); + + match table_entry(&mut shard, hash, &key) { + Entry::Occupied(e) => e.get().1, + Entry::Vacant(e) => { + let value = default(); + e.insert((key, value)); + value + } + } + } + + #[inline] + pub fn insert(&self, key: K, value: V) -> Option<V> { + let hash = make_hash(&key); + let mut shard = self.lock_shard_by_hash(hash); + + match table_entry(&mut shard, hash, &key) { + Entry::Occupied(e) => { + let previous = mem::replace(&mut e.into_mut().1, value); + Some(previous) + } + Entry::Vacant(e) => { + e.insert((key, value)); + None + } + } + } +} + impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> { #[inline] pub fn intern_ref<Q: ?Sized>(&self, value: &Q, make: impl FnOnce() -> K) -> K @@ -157,13 +210,12 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> { { let hash = make_hash(value); let mut shard = self.lock_shard_by_hash(hash); - let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value); - match entry { - RawEntryMut::Occupied(e) => *e.key(), - RawEntryMut::Vacant(e) => { + match table_entry(&mut shard, hash, value) { + Entry::Occupied(e) => e.get().0, + Entry::Vacant(e) => { let v = make(); - e.insert_hashed_nocheck(hash, v, ()); + e.insert((v, ())); v } } @@ -177,13 +229,12 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> { { let hash = make_hash(&value); let mut shard = self.lock_shard_by_hash(hash); - let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value); - match entry { - RawEntryMut::Occupied(e) => *e.key(), - RawEntryMut::Vacant(e) => { + match table_entry(&mut shard, hash, &value) { + Entry::Occupied(e) => e.get().0, + Entry::Vacant(e) => { let v = make(value); - e.insert_hashed_nocheck(hash, v, ()); + e.insert((v, ())); v } } @@ -200,17 +251,30 @@ impl<K: Eq + Hash + Copy + IntoPointer> ShardedHashMap<K, ()> { let hash = make_hash(&value); let shard = self.lock_shard_by_hash(hash); let value = value.into_pointer(); - shard.raw_entry().from_hash(hash, |entry| entry.into_pointer() == value).is_some() + shard.find(hash, |(k, ())| k.into_pointer() == value).is_some() } } #[inline] -pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 { +fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 { let mut state = FxHasher::default(); val.hash(&mut state); state.finish() } +#[inline] +fn table_entry<'a, K, V, Q>( + table: &'a mut HashTable<(K, V)>, + hash: u64, + key: &Q, +) -> Entry<'a, (K, V)> +where + K: Hash + Borrow<Q>, + Q: ?Sized + Eq, +{ + table.entry(hash, move |(k, _)| k.borrow() == key, |(k, _)| make_hash(k)) +} + /// Get a shard with a pre-computed hash value. If `get_shard_by_value` is /// ever used in combination with `get_shard_by_hash` on a single `Sharded` /// instance, then `hash` must be computed with `FxHasher`. Otherwise, @@ -218,7 +282,7 @@ pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 { /// consistently for each `Sharded` instance. #[inline] fn get_shard_hash(hash: u64) -> usize { - let hash_len = mem::size_of::<usize>(); + let hash_len = size_of::<usize>(); // Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits. // hashbrown also uses the lowest bits, so we can't use those (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs index 066ea03b4ac..a01a420dfbd 100644 --- a/compiler/rustc_data_structures/src/sorted_map.rs +++ b/compiler/rustc_data_structures/src/sorted_map.rs @@ -1,4 +1,5 @@ use std::borrow::Borrow; +use std::cmp::Ordering; use std::fmt::Debug; use std::mem; use std::ops::{Bound, Index, IndexMut, RangeBounds}; @@ -156,6 +157,38 @@ impl<K: Ord, V> SortedMap<K, V> { &self.data[start..end] } + /// `sm.range_is_empty(r)` == `sm.range(r).is_empty()`, but is faster. + #[inline] + pub fn range_is_empty<R>(&self, range: R) -> bool + where + R: RangeBounds<K>, + { + // `range` must (via `range_slice_indices`) search for the start and + // end separately. But here we can do a single binary search for the + // entire range. If a single `x` matching `range` is found then the + // range is *not* empty. + self.data + .binary_search_by(|(x, _)| { + // Is `x` below `range`? + match range.start_bound() { + Bound::Included(start) if x < start => return Ordering::Less, + Bound::Excluded(start) if x <= start => return Ordering::Less, + _ => {} + }; + + // Is `x` above `range`? + match range.end_bound() { + Bound::Included(end) if x > end => return Ordering::Greater, + Bound::Excluded(end) if x >= end => return Ordering::Greater, + _ => {} + }; + + // `x` must be within `range`. + Ordering::Equal + }) + .is_err() + } + #[inline] pub fn remove_range<R>(&mut self, range: R) where diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index 37b54fe38ff..616a18a72ab 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -18,42 +18,54 @@ //! //! | Type | Serial version | Parallel version | //! | ----------------------- | ------------------- | ------------------------------- | -//! | `LRef<'a, T>` [^2] | `&'a mut T` | `&'a T` | -//! | | | | //! | `Lock<T>` | `RefCell<T>` | `RefCell<T>` or | //! | | | `parking_lot::Mutex<T>` | //! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` | //! | `MTLock<T>` [^1] | `T` | `Lock<T>` | -//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` | //! | | | | //! | `ParallelIterator` | `Iterator` | `rayon::iter::ParallelIterator` | //! //! [^1]: `MTLock` is similar to `Lock`, but the serial version avoids the cost //! of a `RefCell`. This is appropriate when interior mutability is not //! required. -//! -//! [^2]: `MTRef`, `MTLockRef` are type aliases. use std::collections::HashMap; use std::hash::{BuildHasher, Hash}; -pub use crate::marker::*; +pub use parking_lot::{ + MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard, + RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard, +}; -mod lock; +pub use self::atomic::AtomicU64; +pub use self::freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard}; #[doc(no_inline)] -pub use lock::{Lock, LockGuard, Mode}; - -mod worker_local; -pub use worker_local::{Registry, WorkerLocal}; +pub use self::lock::{Lock, LockGuard, Mode}; +pub use self::mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; +pub use self::parallel::{ + join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in, +}; +pub use self::vec::{AppendOnlyIndexVec, AppendOnlyVec}; +pub use self::worker_local::{Registry, WorkerLocal}; +pub use crate::marker::*; +mod freeze; +mod lock; mod parallel; -pub use parallel::{join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in}; -pub use vec::{AppendOnlyIndexVec, AppendOnlyVec}; - mod vec; +mod worker_local; -mod freeze; -pub use freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard}; +/// Keep the conditional imports together in a submodule, so that import-sorting +/// doesn't split them up. +mod atomic { + // Most hosts can just use a regular AtomicU64. + #[cfg(target_has_atomic = "64")] + pub use std::sync::atomic::AtomicU64; + + // Some 32-bit hosts don't have AtomicU64, so use a fallback. + #[cfg(not(target_has_atomic = "64"))] + pub use portable_atomic::AtomicU64; +} mod mode { use std::sync::atomic::{AtomicU8, Ordering}; @@ -76,7 +88,7 @@ mod mode { // Whether thread safety might be enabled. #[inline] - pub fn might_be_dyn_thread_safe() -> bool { + pub(super) fn might_be_dyn_thread_safe() -> bool { DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) != DYN_NOT_THREAD_SAFE } @@ -97,21 +109,6 @@ mod mode { // FIXME(parallel_compiler): Get rid of these aliases across the compiler. -pub use std::sync::OnceLock; -// Use portable AtomicU64 for targets without native 64-bit atomics -#[cfg(target_has_atomic = "64")] -pub use std::sync::atomic::AtomicU64; - -pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; -pub use parking_lot::{ - MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard, - RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard, -}; -#[cfg(not(target_has_atomic = "64"))] -pub use portable_atomic::AtomicU64; - -pub type LRef<'a, T> = &'a T; - #[derive(Debug, Default)] pub struct MTLock<T>(Lock<T>); @@ -142,14 +139,10 @@ impl<T> MTLock<T> { } } -use parking_lot::RwLock as InnerRwLock; - /// This makes locks panic if they are already held. /// It is only useful when you are running in a single thread const ERROR_CHECKING: bool = false; -pub type MTLockRef<'a, T> = LRef<'a, MTLock<T>>; - #[derive(Default)] #[repr(align(64))] pub struct CacheAligned<T>(pub T); @@ -167,12 +160,12 @@ impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> } #[derive(Debug, Default)] -pub struct RwLock<T>(InnerRwLock<T>); +pub struct RwLock<T>(parking_lot::RwLock<T>); impl<T> RwLock<T> { #[inline(always)] pub fn new(inner: T) -> Self { - RwLock(InnerRwLock::new(inner)) + RwLock(parking_lot::RwLock::new(inner)) } #[inline(always)] diff --git a/compiler/rustc_data_structures/src/sync/parallel.rs b/compiler/rustc_data_structures/src/sync/parallel.rs index 1ba631b8623..8ef8a3f3585 100644 --- a/compiler/rustc_data_structures/src/sync/parallel.rs +++ b/compiler/rustc_data_structures/src/sync/parallel.rs @@ -46,7 +46,7 @@ pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R { ret } -pub fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB) +fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB) where A: FnOnce() -> RA, B: FnOnce() -> RB, diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs index 402ec9827bb..d75af009850 100644 --- a/compiler/rustc_data_structures/src/sync/worker_local.rs +++ b/compiler/rustc_data_structures/src/sync/worker_local.rs @@ -106,6 +106,12 @@ pub struct WorkerLocal<T> { registry: Registry, } +// This is safe because the `deref` call will return a reference to a `T` unique to each thread +// or it will panic for threads without an associated local. So there isn't a need for `T` to do +// it's own synchronization. The `verify` method on `RegistryId` has an issue where the id +// can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse. +unsafe impl<T: Send> Sync for WorkerLocal<T> {} + impl<T> WorkerLocal<T> { /// Creates a new worker local where the `initial` closure computes the /// value this worker local should take for each thread in the registry. @@ -132,11 +138,6 @@ impl<T> Deref for WorkerLocal<T> { fn deref(&self) -> &T { // This is safe because `verify` will only return values less than // `self.registry.thread_limit` which is the size of the `self.locals` array. - - // The `deref` call will return a reference to a `T` unique to each thread - // or it will panic for threads without an associated local. So there isn't a need for `T` to do - // it's own synchronization. The `verify` method on `RegistryId` has an issue where the id - // can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse. unsafe { &self.locals.get_unchecked(self.registry.id().verify()).0 } } } diff --git a/compiler/rustc_data_structures/src/tagged_ptr/tests.rs b/compiler/rustc_data_structures/src/tagged_ptr/tests.rs index 9c1e4cefa69..85b21a7c8ec 100644 --- a/compiler/rustc_data_structures/src/tagged_ptr/tests.rs +++ b/compiler/rustc_data_structures/src/tagged_ptr/tests.rs @@ -7,7 +7,7 @@ use crate::stable_hasher::{HashStable, StableHasher}; /// A tag type used in [`TaggedRef`] tests. #[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum Tag2 { +enum Tag2 { B00 = 0b00, B01 = 0b01, B10 = 0b10, |
