diff options
| author | Boxy <rust@boxyuwu.dev> | 2025-02-25 21:27:44 +0000 |
|---|---|---|
| committer | Boxy <rust@boxyuwu.dev> | 2025-02-25 21:27:44 +0000 |
| commit | d9683df7c2f6d4141b1321e27635d2ce3167eaa4 (patch) | |
| tree | dce0d46d1b7d624ec9b9b09b2c1854f6245a5ff4 /compiler/rustc_data_structures | |
| parent | 46392d1661540e256fd9573d8f06c2784a58c983 (diff) | |
| parent | 4ecd70ddd1039a3954056c1071e40278048476fa (diff) | |
| download | rust-d9683df7c2f6d4141b1321e27635d2ce3167eaa4.tar.gz rust-d9683df7c2f6d4141b1321e27635d2ce3167eaa4.zip | |
Merge from rustc
Diffstat (limited to 'compiler/rustc_data_structures')
22 files changed, 40 insertions, 328 deletions
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml index a8f83ca13e2..bdf5494f210 100644 --- a/compiler/rustc_data_structures/Cargo.toml +++ b/compiler/rustc_data_structures/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "rustc_data_structures" version = "0.0.0" -edition = "2021" +edition = "2024" [dependencies] # tidy-alphabetical-start @@ -18,6 +18,7 @@ rustc-rayon = { version = "0.5.1", features = ["indexmap"] } rustc-stable-hash = { version = "0.1.0", features = ["nightly"] } rustc_arena = { path = "../rustc_arena" } rustc_graphviz = { path = "../rustc_graphviz" } +rustc_hashes = { path = "../rustc_hashes" } rustc_index = { path = "../rustc_index", package = "rustc_index" } rustc_macros = { path = "../rustc_macros" } rustc_serialize = { path = "../rustc_serialize" } diff --git a/compiler/rustc_data_structures/src/fingerprint.rs b/compiler/rustc_data_structures/src/fingerprint.rs index 16c66824c5b..c7c0d0ab072 100644 --- a/compiler/rustc_data_structures/src/fingerprint.rs +++ b/compiler/rustc_data_structures/src/fingerprint.rs @@ -1,10 +1,9 @@ use std::hash::{Hash, Hasher}; +use rustc_hashes::Hash64; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; -use crate::stable_hasher::{ - FromStableHash, Hash64, StableHasherHash, impl_stable_traits_for_trivial_type, -}; +use crate::stable_hasher::{FromStableHash, StableHasherHash, impl_stable_traits_for_trivial_type}; #[cfg(test)] mod tests; diff --git a/compiler/rustc_data_structures/src/flock.rs b/compiler/rustc_data_structures/src/flock.rs index 292a33d5646..d423d8acefd 100644 --- a/compiler/rustc_data_structures/src/flock.rs +++ b/compiler/rustc_data_structures/src/flock.rs @@ -4,31 +4,6 @@ //! green/native threading. This is just a bare-bones enough solution for //! librustdoc, it is not production quality at all. -#[cfg(bootstrap)] -cfg_match! { - cfg(target_os = "linux") => { - mod linux; - use linux as imp; - } - cfg(target_os = "redox") => { - mod linux; - use linux as imp; - } - cfg(unix) => { - mod unix; - use unix as imp; - } - cfg(windows) => { - mod windows; - use self::windows as imp; - } - _ => { - mod unsupported; - use unsupported as imp; - } -} - -#[cfg(not(bootstrap))] cfg_match! { target_os = "linux" => { mod linux; diff --git a/compiler/rustc_data_structures/src/graph/implementation/mod.rs b/compiler/rustc_data_structures/src/graph/implementation/mod.rs index 7724e9347d8..a80365938b9 100644 --- a/compiler/rustc_data_structures/src/graph/implementation/mod.rs +++ b/compiler/rustc_data_structures/src/graph/implementation/mod.rs @@ -193,11 +193,11 @@ impl<N: Debug, E: Debug> Graph<N, E> { AdjacentEdges { graph: self, direction, next: first_edge } } - pub fn successor_nodes(&self, source: NodeIndex) -> impl Iterator<Item = NodeIndex> + '_ { + pub fn successor_nodes(&self, source: NodeIndex) -> impl Iterator<Item = NodeIndex> { self.outgoing_edges(source).targets() } - pub fn predecessor_nodes(&self, target: NodeIndex) -> impl Iterator<Item = NodeIndex> + '_ { + pub fn predecessor_nodes(&self, target: NodeIndex) -> impl Iterator<Item = NodeIndex> { self.incoming_edges(target).sources() } @@ -255,11 +255,11 @@ pub struct AdjacentEdges<'g, N, E> { } impl<'g, N: Debug, E: Debug> AdjacentEdges<'g, N, E> { - fn targets(self) -> impl Iterator<Item = NodeIndex> + 'g { + fn targets(self) -> impl Iterator<Item = NodeIndex> { self.map(|(_, edge)| edge.target) } - fn sources(self) -> impl Iterator<Item = NodeIndex> + 'g { + fn sources(self) -> impl Iterator<Item = NodeIndex> { self.map(|(_, edge)| edge.source) } } diff --git a/compiler/rustc_data_structures/src/graph/scc/mod.rs b/compiler/rustc_data_structures/src/graph/scc/mod.rs index 93f6192b10b..e7c4ea3daae 100644 --- a/compiler/rustc_data_structures/src/graph/scc/mod.rs +++ b/compiler/rustc_data_structures/src/graph/scc/mod.rs @@ -133,7 +133,7 @@ impl<N: Idx, S: Idx + Ord, A: Annotation> Sccs<N, S, A> { /// meaning that if `S1 -> S2`, we will visit `S2` first and `S1` after. /// This is convenient when the edges represent dependencies: when you visit /// `S1`, the value for `S2` will already have been computed. - pub fn all_sccs(&self) -> impl Iterator<Item = S> { + pub fn all_sccs(&self) -> impl Iterator<Item = S> + 'static { (0..self.scc_data.len()).map(S::new) } diff --git a/compiler/rustc_data_structures/src/hashes.rs b/compiler/rustc_data_structures/src/hashes.rs deleted file mode 100644 index 8f4639fc2e6..00000000000 --- a/compiler/rustc_data_structures/src/hashes.rs +++ /dev/null @@ -1,147 +0,0 @@ -//! rustc encodes a lot of hashes. If hashes are stored as `u64` or `u128`, a `derive(Encodable)` -//! will apply varint encoding to the hashes, which is less efficient than directly encoding the 8 -//! or 16 bytes of the hash. -//! -//! The types in this module represent 64-bit or 128-bit hashes produced by a `StableHasher`. -//! `Hash64` and `Hash128` expose some utility functions to encourage users to not extract the inner -//! hash value as an integer type and accidentally apply varint encoding to it. -//! -//! In contrast with `Fingerprint`, users of these types cannot and should not attempt to construct -//! and decompose these types into constituent pieces. The point of these types is only to -//! connect the fact that they can only be produced by a `StableHasher` to their -//! `Encode`/`Decode` impls. - -use std::fmt; -use std::ops::BitXorAssign; - -use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; - -use crate::stable_hasher::{FromStableHash, StableHasherHash}; - -#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Default)] -pub struct Hash64 { - inner: u64, -} - -impl Hash64 { - pub const ZERO: Hash64 = Hash64 { inner: 0 }; - - #[inline] - pub fn new(n: u64) -> Self { - Self { inner: n } - } - - #[inline] - pub fn as_u64(self) -> u64 { - self.inner - } -} - -impl BitXorAssign<u64> for Hash64 { - #[inline] - fn bitxor_assign(&mut self, rhs: u64) { - self.inner ^= rhs; - } -} - -impl<S: Encoder> Encodable<S> for Hash64 { - #[inline] - fn encode(&self, s: &mut S) { - s.emit_raw_bytes(&self.inner.to_le_bytes()); - } -} - -impl<D: Decoder> Decodable<D> for Hash64 { - #[inline] - fn decode(d: &mut D) -> Self { - Self { inner: u64::from_le_bytes(d.read_raw_bytes(8).try_into().unwrap()) } - } -} - -impl FromStableHash for Hash64 { - type Hash = StableHasherHash; - - #[inline] - fn from(StableHasherHash([_0, __1]): Self::Hash) -> Self { - Self { inner: _0 } - } -} - -impl fmt::Debug for Hash64 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(f) - } -} - -impl fmt::LowerHex for Hash64 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::LowerHex::fmt(&self.inner, f) - } -} - -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)] -pub struct Hash128 { - inner: u128, -} - -// We expect Hash128 to be well mixed. So there's no point in hashing both parts. -// -// This also allows using Hash128-containing types in UnHash-based hashmaps, which would otherwise -// debug_assert! that we're hashing more than a single u64. -impl std::hash::Hash for Hash128 { - fn hash<H: std::hash::Hasher>(&self, h: &mut H) { - h.write_u64(self.truncate().as_u64()); - } -} - -impl Hash128 { - #[inline] - pub fn truncate(self) -> Hash64 { - Hash64 { inner: self.inner as u64 } - } - - #[inline] - pub fn wrapping_add(self, other: Self) -> Self { - Self { inner: self.inner.wrapping_add(other.inner) } - } - - #[inline] - pub fn as_u128(self) -> u128 { - self.inner - } -} - -impl<S: Encoder> Encodable<S> for Hash128 { - #[inline] - fn encode(&self, s: &mut S) { - s.emit_raw_bytes(&self.inner.to_le_bytes()); - } -} - -impl<D: Decoder> Decodable<D> for Hash128 { - #[inline] - fn decode(d: &mut D) -> Self { - Self { inner: u128::from_le_bytes(d.read_raw_bytes(16).try_into().unwrap()) } - } -} - -impl FromStableHash for Hash128 { - type Hash = StableHasherHash; - - #[inline] - fn from(StableHasherHash([_0, _1]): Self::Hash) -> Self { - Self { inner: u128::from(_0) | (u128::from(_1) << 64) } - } -} - -impl fmt::Debug for Hash128 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(f) - } -} - -impl fmt::LowerHex for Hash128 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::LowerHex::fmt(&self.inner, f) - } -} diff --git a/compiler/rustc_data_structures/src/intern.rs b/compiler/rustc_data_structures/src/intern.rs index 850b052f564..8079212fac5 100644 --- a/compiler/rustc_data_structures/src/intern.rs +++ b/compiler/rustc_data_structures/src/intern.rs @@ -92,7 +92,10 @@ impl<'a, T: Ord> Ord for Interned<'a, T> { } } -impl<'a, T> Hash for Interned<'a, T> { +impl<'a, T> Hash for Interned<'a, T> +where + T: Hash, +{ #[inline] fn hash<H: Hasher>(&self, s: &mut H) { // Pointer hashing is sufficient, due to the uniqueness constraint. diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs index 6ef73debadd..66d3834d857 100644 --- a/compiler/rustc_data_structures/src/lib.rs +++ b/compiler/rustc_data_structures/src/lib.rs @@ -84,7 +84,6 @@ pub mod vec_cache; pub mod work_queue; mod atomic_ref; -mod hashes; /// This calls the passed function while ensuring it won't be inlined into the caller. #[inline(never)] diff --git a/compiler/rustc_data_structures/src/owned_slice.rs b/compiler/rustc_data_structures/src/owned_slice.rs index 17c48aee6fa..0c00e4f4a4b 100644 --- a/compiler/rustc_data_structures/src/owned_slice.rs +++ b/compiler/rustc_data_structures/src/owned_slice.rs @@ -2,11 +2,6 @@ use std::borrow::Borrow; use std::ops::Deref; use std::sync::Arc; -// Use our fake Send/Sync traits when on not parallel compiler, -// so that `OwnedSlice` only implements/requires Send/Sync -// for parallel compiler builds. -use crate::sync; - /// An owned slice. /// /// This is similar to `Arc<[u8]>` but allows slicing and using anything as the @@ -34,7 +29,7 @@ pub struct OwnedSlice { // \/ // ⊂(´・◡・⊂ )∘˚˳° (I am the phantom remnant of #97770) #[expect(dead_code)] - owner: Arc<dyn sync::Send + sync::Sync>, + owner: Arc<dyn Send + Sync>, } /// Makes an [`OwnedSlice`] out of an `owner` and a `slicer` function. @@ -61,7 +56,7 @@ pub struct OwnedSlice { /// ``` pub fn slice_owned<O, F>(owner: O, slicer: F) -> OwnedSlice where - O: sync::Send + sync::Sync + 'static, + O: Send + Sync + 'static, F: FnOnce(&O) -> &[u8], { try_slice_owned(owner, |x| Ok::<_, !>(slicer(x))).into_ok() @@ -72,7 +67,7 @@ where /// See [`slice_owned`] for the infallible version. pub fn try_slice_owned<O, F, E>(owner: O, slicer: F) -> Result<OwnedSlice, E> where - O: sync::Send + sync::Sync + 'static, + O: Send + Sync + 'static, F: FnOnce(&O) -> Result<&[u8], E>, { // We wrap the owner of the bytes in, so it doesn't move. @@ -139,10 +134,10 @@ impl Borrow<[u8]> for OwnedSlice { } // Safety: `OwnedSlice` is conceptually `(&'self.1 [u8], Arc<dyn Send + Sync>)`, which is `Send` -unsafe impl sync::Send for OwnedSlice {} +unsafe impl Send for OwnedSlice {} // Safety: `OwnedSlice` is conceptually `(&'self.1 [u8], Arc<dyn Send + Sync>)`, which is `Sync` -unsafe impl sync::Sync for OwnedSlice {} +unsafe impl Sync for OwnedSlice {} #[cfg(test)] mod tests; diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs index 18e98e6c39f..39db551adfb 100644 --- a/compiler/rustc_data_structures/src/profiling.rs +++ b/compiler/rustc_data_structures/src/profiling.rs @@ -860,69 +860,6 @@ fn get_thread_id() -> u32 { } // Memory reporting -#[cfg(bootstrap)] -cfg_match! { - cfg(windows) => { - pub fn get_resident_set_size() -> Option<usize> { - use std::mem; - - use windows::{ - Win32::System::ProcessStatus::{K32GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS}, - Win32::System::Threading::GetCurrentProcess, - }; - - let mut pmc = PROCESS_MEMORY_COUNTERS::default(); - let pmc_size = mem::size_of_val(&pmc); - unsafe { - K32GetProcessMemoryInfo( - GetCurrentProcess(), - &mut pmc, - pmc_size as u32, - ) - } - .ok() - .ok()?; - - Some(pmc.WorkingSetSize) - } - } - cfg(target_os = "macos") => { - pub fn get_resident_set_size() -> Option<usize> { - use libc::{c_int, c_void, getpid, proc_pidinfo, proc_taskinfo, PROC_PIDTASKINFO}; - use std::mem; - const PROC_TASKINFO_SIZE: c_int = mem::size_of::<proc_taskinfo>() as c_int; - - unsafe { - let mut info: proc_taskinfo = mem::zeroed(); - let info_ptr = &mut info as *mut proc_taskinfo as *mut c_void; - let pid = getpid() as c_int; - let ret = proc_pidinfo(pid, PROC_PIDTASKINFO, 0, info_ptr, PROC_TASKINFO_SIZE); - if ret == PROC_TASKINFO_SIZE { - Some(info.pti_resident_size as usize) - } else { - None - } - } - } - } - cfg(unix) => { - pub fn get_resident_set_size() -> Option<usize> { - let field = 1; - let contents = fs::read("/proc/self/statm").ok()?; - let contents = String::from_utf8(contents).ok()?; - let s = contents.split_whitespace().nth(field)?; - let npages = s.parse::<usize>().ok()?; - Some(npages * 4096) - } - } - _ => { - pub fn get_resident_set_size() -> Option<usize> { - None - } - } -} - -#[cfg(not(bootstrap))] cfg_match! { windows => { pub fn get_resident_set_size() -> Option<usize> { diff --git a/compiler/rustc_data_structures/src/sorted_map/index_map.rs b/compiler/rustc_data_structures/src/sorted_map/index_map.rs index e9a5fb51975..b38b09d60eb 100644 --- a/compiler/rustc_data_structures/src/sorted_map/index_map.rs +++ b/compiler/rustc_data_structures/src/sorted_map/index_map.rs @@ -84,7 +84,7 @@ impl<I: Idx, K: Ord, V> SortedIndexMultiMap<I, K, V> { /// If there are multiple items that are equivalent to `key`, they will be yielded in /// insertion order. #[inline] - pub fn get_by_key(&self, key: K) -> impl Iterator<Item = &V> + '_ { + pub fn get_by_key(&self, key: K) -> impl Iterator<Item = &V> { self.get_by_key_enumerated(key).map(|(_, v)| v) } @@ -94,7 +94,7 @@ impl<I: Idx, K: Ord, V> SortedIndexMultiMap<I, K, V> { /// If there are multiple items that are equivalent to `key`, they will be yielded in /// insertion order. #[inline] - pub fn get_by_key_enumerated(&self, key: K) -> impl Iterator<Item = (I, &V)> + '_ { + pub fn get_by_key_enumerated(&self, key: K) -> impl Iterator<Item = (I, &V)> { let lower_bound = self.idx_sorted_by_item_key.partition_point(|&i| self.items[i].0 < key); self.idx_sorted_by_item_key[lower_bound..].iter().map_while(move |&i| { let (k, v) = &self.items[i]; @@ -147,7 +147,7 @@ impl<I: Idx, K: Ord, V> FromIterator<(K, V)> for SortedIndexMultiMap<I, K, V> { where J: IntoIterator<Item = (K, V)>, { - let items = IndexVec::from_iter(iter); + let items = IndexVec::<I, _>::from_iter(iter); let mut idx_sorted_by_item_key: Vec<_> = items.indices().collect(); // `sort_by_key` is stable, so insertion order is preserved for duplicate items. diff --git a/compiler/rustc_data_structures/src/sorted_map/tests.rs b/compiler/rustc_data_structures/src/sorted_map/tests.rs index def7a7112fb..ea4d2f1feac 100644 --- a/compiler/rustc_data_structures/src/sorted_map/tests.rs +++ b/compiler/rustc_data_structures/src/sorted_map/tests.rs @@ -24,7 +24,7 @@ fn test_sorted_index_multi_map() { // `get_by_key` returns items in insertion order. let twos: Vec<_> = set.get_by_key_enumerated(2).collect(); let idxs: Vec<usize> = twos.iter().map(|(i, _)| *i).collect(); - let values: Vec<usize> = twos.iter().map(|(_, &v)| v).collect(); + let values: Vec<usize> = twos.iter().map(|&(_, &v)| v).collect(); assert_eq!(idxs, vec![0, 2, 4]); assert_eq!(values, vec![0, 1, 2]); diff --git a/compiler/rustc_data_structures/src/sso/map.rs b/compiler/rustc_data_structures/src/sso/map.rs index 3200249a2dc..827c82fa46a 100644 --- a/compiler/rustc_data_structures/src/sso/map.rs +++ b/compiler/rustc_data_structures/src/sso/map.rs @@ -165,7 +165,7 @@ impl<K, V> SsoHashMap<K, V> { /// Clears the map, returning all key-value pairs as an iterator. Keeps the /// allocated memory for reuse. - pub fn drain(&mut self) -> impl Iterator<Item = (K, V)> + '_ { + pub fn drain(&mut self) -> impl Iterator<Item = (K, V)> { match self { SsoHashMap::Array(array) => Either::Left(array.drain(..)), SsoHashMap::Map(map) => Either::Right(map.drain()), diff --git a/compiler/rustc_data_structures/src/sso/set.rs b/compiler/rustc_data_structures/src/sso/set.rs index a4b40138933..e3fa1cbf4cc 100644 --- a/compiler/rustc_data_structures/src/sso/set.rs +++ b/compiler/rustc_data_structures/src/sso/set.rs @@ -80,7 +80,7 @@ impl<T> SsoHashSet<T> { /// Clears the set, returning all elements in an iterator. #[inline] - pub fn drain(&mut self) -> impl Iterator<Item = T> + '_ { + pub fn drain(&mut self) -> impl Iterator<Item = T> { self.map.drain().map(entry_to_key) } } diff --git a/compiler/rustc_data_structures/src/stable_hasher.rs b/compiler/rustc_data_structures/src/stable_hasher.rs index 9cd0cc499ca..ffbe54d6206 100644 --- a/compiler/rustc_data_structures/src/stable_hasher.rs +++ b/compiler/rustc_data_structures/src/stable_hasher.rs @@ -10,12 +10,11 @@ use smallvec::SmallVec; #[cfg(test)] mod tests; +use rustc_hashes::{Hash64, Hash128}; pub use rustc_stable_hash::{ FromStableHash, SipHasher128Hash as StableHasherHash, StableSipHasher128 as StableHasher, }; -pub use crate::hashes::{Hash64, Hash128}; - /// Something that implements `HashStable<CTX>` can be hashed in a way that is /// stable across multiple compilation sessions. /// diff --git a/compiler/rustc_data_structures/src/stack.rs b/compiler/rustc_data_structures/src/stack.rs index 102b3640911..3d6d0003483 100644 --- a/compiler/rustc_data_structures/src/stack.rs +++ b/compiler/rustc_data_structures/src/stack.rs @@ -17,18 +17,6 @@ const STACK_PER_RECURSION: usize = 16 * 1024 * 1024; // 16MB /// /// Should not be sprinkled around carelessly, as it causes a little bit of overhead. #[inline] -#[cfg(not(miri))] pub fn ensure_sufficient_stack<R>(f: impl FnOnce() -> R) -> R { stacker::maybe_grow(RED_ZONE, STACK_PER_RECURSION, f) } - -/// Grows the stack on demand to prevent stack overflow. Call this in strategic locations -/// to "break up" recursive calls. E.g. almost any call to `visit_expr` or equivalent can benefit -/// from this. -/// -/// Should not be sprinkled around carelessly, as it causes a little bit of overhead. -#[cfg(miri)] -#[inline] -pub fn ensure_sufficient_stack<R>(f: impl FnOnce() -> R) -> R { - f() -} diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index bea87a6685d..37b54fe38ff 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -18,14 +18,8 @@ //! //! | Type | Serial version | Parallel version | //! | ----------------------- | ------------------- | ------------------------------- | -//! |` Weak<T>` | `rc::Weak<T>` | `sync::Weak<T>` | //! | `LRef<'a, T>` [^2] | `&'a mut T` | `&'a T` | //! | | | | -//! | `AtomicBool` | `Cell<bool>` | `atomic::AtomicBool` | -//! | `AtomicU32` | `Cell<u32>` | `atomic::AtomicU32` | -//! | `AtomicU64` | `Cell<u64>` | `atomic::AtomicU64` | -//! | `AtomicUsize` | `Cell<usize>` | `atomic::AtomicUsize` | -//! | | | | //! | `Lock<T>` | `RefCell<T>` | `RefCell<T>` or | //! | | | `parking_lot::Mutex<T>` | //! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` | @@ -103,18 +97,15 @@ mod mode { // FIXME(parallel_compiler): Get rid of these aliases across the compiler. -pub use std::marker::{Send, Sync}; +pub use std::sync::OnceLock; // Use portable AtomicU64 for targets without native 64-bit atomics #[cfg(target_has_atomic = "64")] pub use std::sync::atomic::AtomicU64; -pub use std::sync::atomic::{AtomicBool, AtomicU32, AtomicUsize}; -pub use std::sync::{OnceLock, Weak}; pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; pub use parking_lot::{ - MappedMutexGuard as MappedLockGuard, MappedRwLockReadGuard as MappedReadGuard, - MappedRwLockWriteGuard as MappedWriteGuard, RwLockReadGuard as ReadGuard, - RwLockWriteGuard as WriteGuard, + MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard, + RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard, }; #[cfg(not(target_has_atomic = "64"))] pub use portable_atomic::AtomicU64; @@ -204,12 +195,6 @@ impl<T> RwLock<T> { } #[inline(always)] - #[track_caller] - pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R { - f(&*self.read()) - } - - #[inline(always)] pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> { self.0.try_write().ok_or(()) } @@ -225,12 +210,6 @@ impl<T> RwLock<T> { #[inline(always)] #[track_caller] - pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R { - f(&mut *self.write()) - } - - #[inline(always)] - #[track_caller] pub fn borrow(&self) -> ReadGuard<'_, T> { self.read() } @@ -240,20 +219,4 @@ impl<T> RwLock<T> { pub fn borrow_mut(&self) -> WriteGuard<'_, T> { self.write() } - - #[inline(always)] - pub fn leak(&self) -> &T { - let guard = self.read(); - let ret = unsafe { &*(&raw const *guard) }; - std::mem::forget(guard); - ret - } -} - -// FIXME: Probably a bad idea -impl<T: Clone> Clone for RwLock<T> { - #[inline] - fn clone(&self) -> Self { - RwLock::new(self.borrow().clone()) - } } diff --git a/compiler/rustc_data_structures/src/sync/freeze.rs b/compiler/rustc_data_structures/src/sync/freeze.rs index 5236c9fe156..9720b22ea7d 100644 --- a/compiler/rustc_data_structures/src/sync/freeze.rs +++ b/compiler/rustc_data_structures/src/sync/freeze.rs @@ -3,9 +3,9 @@ use std::intrinsics::likely; use std::marker::PhantomData; use std::ops::{Deref, DerefMut}; use std::ptr::NonNull; -use std::sync::atomic::Ordering; +use std::sync::atomic::{AtomicBool, Ordering}; -use crate::sync::{AtomicBool, DynSend, DynSync, ReadGuard, RwLock, WriteGuard}; +use crate::sync::{DynSend, DynSync, ReadGuard, RwLock, WriteGuard}; /// A type which allows mutation using a lock until /// the value is frozen and can be accessed lock-free. diff --git a/compiler/rustc_data_structures/src/sync/vec.rs b/compiler/rustc_data_structures/src/sync/vec.rs index 21ec5cf6c13..afbb0cef9f9 100644 --- a/compiler/rustc_data_structures/src/sync/vec.rs +++ b/compiler/rustc_data_structures/src/sync/vec.rs @@ -45,14 +45,14 @@ impl<T: Copy> AppendOnlyVec<T> { self.vec.read().get(i).copied() } - pub fn iter_enumerated(&self) -> impl Iterator<Item = (usize, T)> + '_ { + pub fn iter_enumerated(&self) -> impl Iterator<Item = (usize, T)> { (0..) .map(|i| (i, self.get(i))) .take_while(|(_, o)| o.is_some()) .filter_map(|(i, o)| Some((i, o?))) } - pub fn iter(&self) -> impl Iterator<Item = T> + '_ { + pub fn iter(&self) -> impl Iterator<Item = T> { (0..).map(|i| self.get(i)).take_while(|o| o.is_some()).flatten() } } diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs index d75af009850..402ec9827bb 100644 --- a/compiler/rustc_data_structures/src/sync/worker_local.rs +++ b/compiler/rustc_data_structures/src/sync/worker_local.rs @@ -106,12 +106,6 @@ pub struct WorkerLocal<T> { registry: Registry, } -// This is safe because the `deref` call will return a reference to a `T` unique to each thread -// or it will panic for threads without an associated local. So there isn't a need for `T` to do -// it's own synchronization. The `verify` method on `RegistryId` has an issue where the id -// can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse. -unsafe impl<T: Send> Sync for WorkerLocal<T> {} - impl<T> WorkerLocal<T> { /// Creates a new worker local where the `initial` closure computes the /// value this worker local should take for each thread in the registry. @@ -138,6 +132,11 @@ impl<T> Deref for WorkerLocal<T> { fn deref(&self) -> &T { // This is safe because `verify` will only return values less than // `self.registry.thread_limit` which is the size of the `self.locals` array. + + // The `deref` call will return a reference to a `T` unique to each thread + // or it will panic for threads without an associated local. So there isn't a need for `T` to do + // it's own synchronization. The `verify` method on `RegistryId` has an issue where the id + // can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse. unsafe { &self.locals.get_unchecked(self.registry.id().verify()).0 } } } diff --git a/compiler/rustc_data_structures/src/tagged_ptr/tests.rs b/compiler/rustc_data_structures/src/tagged_ptr/tests.rs index b1bdee18d6d..9c1e4cefa69 100644 --- a/compiler/rustc_data_structures/src/tagged_ptr/tests.rs +++ b/compiler/rustc_data_structures/src/tagged_ptr/tests.rs @@ -1,7 +1,8 @@ use std::ptr; +use rustc_hashes::Hash128; + use super::*; -use crate::hashes::Hash128; use crate::stable_hasher::{HashStable, StableHasher}; /// A tag type used in [`TaggedRef`] tests. diff --git a/compiler/rustc_data_structures/src/transitive_relation.rs b/compiler/rustc_data_structures/src/transitive_relation.rs index e81ebb9a4be..33ac279f3e0 100644 --- a/compiler/rustc_data_structures/src/transitive_relation.rs +++ b/compiler/rustc_data_structures/src/transitive_relation.rs @@ -363,7 +363,7 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> { /// Lists all the base edges in the graph: the initial _non-transitive_ set of element /// relations, which will be later used as the basis for the transitive closure computation. - pub fn base_edges(&self) -> impl Iterator<Item = (T, T)> + '_ { + pub fn base_edges(&self) -> impl Iterator<Item = (T, T)> { self.edges .iter() .map(move |edge| (self.elements[edge.source.0], self.elements[edge.target.0])) |
