From 22a5267c83a3e17f2b763279eb24bb632c45dc6b Mon Sep 17 00:00:00 2001 From: joboet Date: Tue, 12 Mar 2024 14:55:06 +0100 Subject: std: move `Once` implementations to `sys` --- library/std/src/sync/condvar.rs | 2 +- library/std/src/sync/mutex.rs | 2 +- library/std/src/sync/once.rs | 2 +- library/std/src/sync/reentrant_lock.rs | 2 +- library/std/src/sync/rwlock.rs | 2 +- library/std/src/sys/locks/condvar/futex.rs | 56 --- library/std/src/sys/locks/condvar/itron.rs | 294 ------------- library/std/src/sys/locks/condvar/mod.rs | 37 -- library/std/src/sys/locks/condvar/no_threads.rs | 26 -- library/std/src/sys/locks/condvar/pthread.rs | 206 --------- library/std/src/sys/locks/condvar/sgx.rs | 45 -- library/std/src/sys/locks/condvar/teeos.rs | 100 ----- library/std/src/sys/locks/condvar/windows7.rs | 50 --- library/std/src/sys/locks/condvar/xous.rs | 148 ------- library/std/src/sys/locks/mod.rs | 7 - library/std/src/sys/locks/mutex/fuchsia.rs | 164 ------- library/std/src/sys/locks/mutex/futex.rs | 108 ----- library/std/src/sys/locks/mutex/itron.rs | 68 --- library/std/src/sys/locks/mutex/mod.rs | 39 -- library/std/src/sys/locks/mutex/no_threads.rs | 32 -- library/std/src/sys/locks/mutex/pthread.rs | 148 ------- library/std/src/sys/locks/mutex/sgx.rs | 59 --- library/std/src/sys/locks/mutex/windows7.rs | 54 --- library/std/src/sys/locks/mutex/xous.rs | 110 ----- library/std/src/sys/locks/rwlock/futex.rs | 320 -------------- library/std/src/sys/locks/rwlock/mod.rs | 37 -- library/std/src/sys/locks/rwlock/no_threads.rs | 65 --- library/std/src/sys/locks/rwlock/queue.rs | 557 ------------------------ library/std/src/sys/locks/rwlock/sgx.rs | 219 ---------- library/std/src/sys/locks/rwlock/sgx/tests.rs | 21 - library/std/src/sys/locks/rwlock/solid.rs | 93 ---- library/std/src/sys/locks/rwlock/teeos.rs | 44 -- library/std/src/sys/locks/rwlock/windows7.rs | 40 -- library/std/src/sys/locks/rwlock/xous.rs | 74 ---- library/std/src/sys/mod.rs | 2 +- library/std/src/sys/pal/teeos/mod.rs | 2 - library/std/src/sys/pal/uefi/mod.rs | 2 - library/std/src/sys/pal/unsupported/mod.rs | 1 - library/std/src/sys/pal/unsupported/once.rs | 100 ----- library/std/src/sys/pal/wasi/mod.rs | 2 - library/std/src/sys/pal/wasip2/mod.rs | 4 - library/std/src/sys/pal/wasm/mod.rs | 2 - library/std/src/sys/pal/zkvm/mod.rs | 2 - library/std/src/sys/sync/condvar/futex.rs | 56 +++ library/std/src/sys/sync/condvar/itron.rs | 294 +++++++++++++ library/std/src/sys/sync/condvar/mod.rs | 37 ++ library/std/src/sys/sync/condvar/no_threads.rs | 26 ++ library/std/src/sys/sync/condvar/pthread.rs | 206 +++++++++ library/std/src/sys/sync/condvar/sgx.rs | 45 ++ library/std/src/sys/sync/condvar/teeos.rs | 100 +++++ library/std/src/sys/sync/condvar/windows7.rs | 50 +++ library/std/src/sys/sync/condvar/xous.rs | 148 +++++++ library/std/src/sys/sync/mod.rs | 9 + library/std/src/sys/sync/mutex/fuchsia.rs | 164 +++++++ library/std/src/sys/sync/mutex/futex.rs | 108 +++++ library/std/src/sys/sync/mutex/itron.rs | 68 +++ library/std/src/sys/sync/mutex/mod.rs | 39 ++ library/std/src/sys/sync/mutex/no_threads.rs | 32 ++ library/std/src/sys/sync/mutex/pthread.rs | 148 +++++++ library/std/src/sys/sync/mutex/sgx.rs | 59 +++ library/std/src/sys/sync/mutex/windows7.rs | 54 +++ library/std/src/sys/sync/mutex/xous.rs | 110 +++++ library/std/src/sys/sync/once/futex.rs | 146 +++++++ library/std/src/sys/sync/once/mod.rs | 36 ++ library/std/src/sys/sync/once/no_threads.rs | 100 +++++ library/std/src/sys/sync/once/queue.rs | 294 +++++++++++++ library/std/src/sys/sync/rwlock/futex.rs | 320 ++++++++++++++ library/std/src/sys/sync/rwlock/mod.rs | 37 ++ library/std/src/sys/sync/rwlock/no_threads.rs | 65 +++ library/std/src/sys/sync/rwlock/queue.rs | 557 ++++++++++++++++++++++++ library/std/src/sys/sync/rwlock/sgx.rs | 219 ++++++++++ library/std/src/sys/sync/rwlock/sgx/tests.rs | 21 + library/std/src/sys/sync/rwlock/solid.rs | 93 ++++ library/std/src/sys/sync/rwlock/teeos.rs | 44 ++ library/std/src/sys/sync/rwlock/windows7.rs | 40 ++ library/std/src/sys/sync/rwlock/xous.rs | 74 ++++ library/std/src/sys_common/mod.rs | 1 - library/std/src/sys_common/once/futex.rs | 146 ------- library/std/src/sys_common/once/mod.rs | 35 -- library/std/src/sys_common/once/queue.rs | 294 ------------- 80 files changed, 3805 insertions(+), 3818 deletions(-) delete mode 100644 library/std/src/sys/locks/condvar/futex.rs delete mode 100644 library/std/src/sys/locks/condvar/itron.rs delete mode 100644 library/std/src/sys/locks/condvar/mod.rs delete mode 100644 library/std/src/sys/locks/condvar/no_threads.rs delete mode 100644 library/std/src/sys/locks/condvar/pthread.rs delete mode 100644 library/std/src/sys/locks/condvar/sgx.rs delete mode 100644 library/std/src/sys/locks/condvar/teeos.rs delete mode 100644 library/std/src/sys/locks/condvar/windows7.rs delete mode 100644 library/std/src/sys/locks/condvar/xous.rs delete mode 100644 library/std/src/sys/locks/mod.rs delete mode 100644 library/std/src/sys/locks/mutex/fuchsia.rs delete mode 100644 library/std/src/sys/locks/mutex/futex.rs delete mode 100644 library/std/src/sys/locks/mutex/itron.rs delete mode 100644 library/std/src/sys/locks/mutex/mod.rs delete mode 100644 library/std/src/sys/locks/mutex/no_threads.rs delete mode 100644 library/std/src/sys/locks/mutex/pthread.rs delete mode 100644 library/std/src/sys/locks/mutex/sgx.rs delete mode 100644 library/std/src/sys/locks/mutex/windows7.rs delete mode 100644 library/std/src/sys/locks/mutex/xous.rs delete mode 100644 library/std/src/sys/locks/rwlock/futex.rs delete mode 100644 library/std/src/sys/locks/rwlock/mod.rs delete mode 100644 library/std/src/sys/locks/rwlock/no_threads.rs delete mode 100644 library/std/src/sys/locks/rwlock/queue.rs delete mode 100644 library/std/src/sys/locks/rwlock/sgx.rs delete mode 100644 library/std/src/sys/locks/rwlock/sgx/tests.rs delete mode 100644 library/std/src/sys/locks/rwlock/solid.rs delete mode 100644 library/std/src/sys/locks/rwlock/teeos.rs delete mode 100644 library/std/src/sys/locks/rwlock/windows7.rs delete mode 100644 library/std/src/sys/locks/rwlock/xous.rs delete mode 100644 library/std/src/sys/pal/unsupported/once.rs create mode 100644 library/std/src/sys/sync/condvar/futex.rs create mode 100644 library/std/src/sys/sync/condvar/itron.rs create mode 100644 library/std/src/sys/sync/condvar/mod.rs create mode 100644 library/std/src/sys/sync/condvar/no_threads.rs create mode 100644 library/std/src/sys/sync/condvar/pthread.rs create mode 100644 library/std/src/sys/sync/condvar/sgx.rs create mode 100644 library/std/src/sys/sync/condvar/teeos.rs create mode 100644 library/std/src/sys/sync/condvar/windows7.rs create mode 100644 library/std/src/sys/sync/condvar/xous.rs create mode 100644 library/std/src/sys/sync/mod.rs create mode 100644 library/std/src/sys/sync/mutex/fuchsia.rs create mode 100644 library/std/src/sys/sync/mutex/futex.rs create mode 100644 library/std/src/sys/sync/mutex/itron.rs create mode 100644 library/std/src/sys/sync/mutex/mod.rs create mode 100644 library/std/src/sys/sync/mutex/no_threads.rs create mode 100644 library/std/src/sys/sync/mutex/pthread.rs create mode 100644 library/std/src/sys/sync/mutex/sgx.rs create mode 100644 library/std/src/sys/sync/mutex/windows7.rs create mode 100644 library/std/src/sys/sync/mutex/xous.rs create mode 100644 library/std/src/sys/sync/once/futex.rs create mode 100644 library/std/src/sys/sync/once/mod.rs create mode 100644 library/std/src/sys/sync/once/no_threads.rs create mode 100644 library/std/src/sys/sync/once/queue.rs create mode 100644 library/std/src/sys/sync/rwlock/futex.rs create mode 100644 library/std/src/sys/sync/rwlock/mod.rs create mode 100644 library/std/src/sys/sync/rwlock/no_threads.rs create mode 100644 library/std/src/sys/sync/rwlock/queue.rs create mode 100644 library/std/src/sys/sync/rwlock/sgx.rs create mode 100644 library/std/src/sys/sync/rwlock/sgx/tests.rs create mode 100644 library/std/src/sys/sync/rwlock/solid.rs create mode 100644 library/std/src/sys/sync/rwlock/teeos.rs create mode 100644 library/std/src/sys/sync/rwlock/windows7.rs create mode 100644 library/std/src/sys/sync/rwlock/xous.rs delete mode 100644 library/std/src/sys_common/once/futex.rs delete mode 100644 library/std/src/sys_common/once/mod.rs delete mode 100644 library/std/src/sys_common/once/queue.rs (limited to 'library/std/src') diff --git a/library/std/src/sync/condvar.rs b/library/std/src/sync/condvar.rs index 9c4b926b7ec..b20574e4f14 100644 --- a/library/std/src/sync/condvar.rs +++ b/library/std/src/sync/condvar.rs @@ -3,7 +3,7 @@ mod tests; use crate::fmt; use crate::sync::{mutex, poison, LockResult, MutexGuard, PoisonError}; -use crate::sys::locks as sys; +use crate::sys::sync as sys; use crate::time::{Duration, Instant}; /// A type indicating whether a timed wait on a condition variable returned diff --git a/library/std/src/sync/mutex.rs b/library/std/src/sync/mutex.rs index 65ff10e02d4..895fcbd6b7e 100644 --- a/library/std/src/sync/mutex.rs +++ b/library/std/src/sync/mutex.rs @@ -8,7 +8,7 @@ use crate::mem::ManuallyDrop; use crate::ops::{Deref, DerefMut}; use crate::ptr::NonNull; use crate::sync::{poison, LockResult, TryLockError, TryLockResult}; -use crate::sys::locks as sys; +use crate::sys::sync as sys; /// A mutual exclusion primitive useful for protecting shared data /// diff --git a/library/std/src/sync/once.rs b/library/std/src/sync/once.rs index 2bb4f3f9e03..608229fd674 100644 --- a/library/std/src/sync/once.rs +++ b/library/std/src/sync/once.rs @@ -8,7 +8,7 @@ mod tests; use crate::fmt; use crate::panic::{RefUnwindSafe, UnwindSafe}; -use crate::sys_common::once as sys; +use crate::sys::sync as sys; /// A synchronization primitive which can be used to run a one-time global /// initialization. Useful for one-time initialization for FFI or related diff --git a/library/std/src/sync/reentrant_lock.rs b/library/std/src/sync/reentrant_lock.rs index 9a44998ebf6..80b9e0cf152 100644 --- a/library/std/src/sync/reentrant_lock.rs +++ b/library/std/src/sync/reentrant_lock.rs @@ -6,7 +6,7 @@ use crate::fmt; use crate::ops::Deref; use crate::panic::{RefUnwindSafe, UnwindSafe}; use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed}; -use crate::sys::locks as sys; +use crate::sys::sync as sys; /// A re-entrant mutual exclusion lock /// diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs index 0b3d25c3298..f7f098c082a 100644 --- a/library/std/src/sync/rwlock.rs +++ b/library/std/src/sync/rwlock.rs @@ -8,7 +8,7 @@ use crate::mem::ManuallyDrop; use crate::ops::{Deref, DerefMut}; use crate::ptr::NonNull; use crate::sync::{poison, LockResult, TryLockError, TryLockResult}; -use crate::sys::locks as sys; +use crate::sys::sync as sys; /// A reader-writer lock /// diff --git a/library/std/src/sys/locks/condvar/futex.rs b/library/std/src/sys/locks/condvar/futex.rs deleted file mode 100644 index 3ad93ce07f7..00000000000 --- a/library/std/src/sys/locks/condvar/futex.rs +++ /dev/null @@ -1,56 +0,0 @@ -use crate::sync::atomic::{AtomicU32, Ordering::Relaxed}; -use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all}; -use crate::sys::locks::Mutex; -use crate::time::Duration; - -pub struct Condvar { - // The value of this atomic is simply incremented on every notification. - // This is used by `.wait()` to not miss any notifications after - // unlocking the mutex and before waiting for notifications. - futex: AtomicU32, -} - -impl Condvar { - #[inline] - pub const fn new() -> Self { - Self { futex: AtomicU32::new(0) } - } - - // All the memory orderings here are `Relaxed`, - // because synchronization is done by unlocking and locking the mutex. - - pub fn notify_one(&self) { - self.futex.fetch_add(1, Relaxed); - futex_wake(&self.futex); - } - - pub fn notify_all(&self) { - self.futex.fetch_add(1, Relaxed); - futex_wake_all(&self.futex); - } - - pub unsafe fn wait(&self, mutex: &Mutex) { - self.wait_optional_timeout(mutex, None); - } - - pub unsafe fn wait_timeout(&self, mutex: &Mutex, timeout: Duration) -> bool { - self.wait_optional_timeout(mutex, Some(timeout)) - } - - unsafe fn wait_optional_timeout(&self, mutex: &Mutex, timeout: Option) -> bool { - // Examine the notification counter _before_ we unlock the mutex. - let futex_value = self.futex.load(Relaxed); - - // Unlock the mutex before going to sleep. - mutex.unlock(); - - // Wait, but only if there hasn't been any - // notification since we unlocked the mutex. - let r = futex_wait(&self.futex, futex_value, timeout); - - // Lock the mutex again. - mutex.lock(); - - r - } -} diff --git a/library/std/src/sys/locks/condvar/itron.rs b/library/std/src/sys/locks/condvar/itron.rs deleted file mode 100644 index 4c6f5e9dad2..00000000000 --- a/library/std/src/sys/locks/condvar/itron.rs +++ /dev/null @@ -1,294 +0,0 @@ -//! POSIX conditional variable implementation based on user-space wait queues. -use crate::sys::pal::itron::{ - abi, error::expect_success_aborting, spin::SpinMutex, task, time::with_tmos_strong, -}; -use crate::{mem::replace, ptr::NonNull, sys::locks::Mutex, time::Duration}; - -// The implementation is inspired by the queue-based implementation shown in -// Andrew D. Birrell's paper "Implementing Condition Variables with Semaphores" - -pub struct Condvar { - waiters: SpinMutex, -} - -unsafe impl Send for Condvar {} -unsafe impl Sync for Condvar {} - -impl Condvar { - #[inline] - pub const fn new() -> Condvar { - Condvar { waiters: SpinMutex::new(waiter_queue::WaiterQueue::new()) } - } - - pub fn notify_one(&self) { - self.waiters.with_locked(|waiters| { - if let Some(task) = waiters.pop_front() { - // Unpark the task - match unsafe { abi::wup_tsk(task) } { - // The task already has a token. - abi::E_QOVR => {} - // Can't undo the effect; abort the program on failure - er => { - expect_success_aborting(er, &"wup_tsk"); - } - } - } - }); - } - - pub fn notify_all(&self) { - self.waiters.with_locked(|waiters| { - while let Some(task) = waiters.pop_front() { - // Unpark the task - match unsafe { abi::wup_tsk(task) } { - // The task already has a token. - abi::E_QOVR => {} - // Can't undo the effect; abort the program on failure - er => { - expect_success_aborting(er, &"wup_tsk"); - } - } - } - }); - } - - pub unsafe fn wait(&self, mutex: &Mutex) { - // Construct `Waiter`. - let mut waiter = waiter_queue::Waiter::new(); - let waiter = NonNull::from(&mut waiter); - - self.waiters.with_locked(|waiters| unsafe { - waiters.insert(waiter); - }); - - unsafe { mutex.unlock() }; - - // Wait until `waiter` is removed from the queue - loop { - // Park the current task - expect_success_aborting(unsafe { abi::slp_tsk() }, &"slp_tsk"); - - if !self.waiters.with_locked(|waiters| unsafe { waiters.is_queued(waiter) }) { - break; - } - } - - mutex.lock(); - } - - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - // Construct and pin `Waiter` - let mut waiter = waiter_queue::Waiter::new(); - let waiter = NonNull::from(&mut waiter); - - self.waiters.with_locked(|waiters| unsafe { - waiters.insert(waiter); - }); - - unsafe { mutex.unlock() }; - - // Park the current task and do not wake up until the timeout elapses - // or the task gets woken up by `notify_*` - match with_tmos_strong(dur, |tmo| { - let er = unsafe { abi::tslp_tsk(tmo) }; - if er == 0 { - // We were unparked. Are we really dequeued? - if self.waiters.with_locked(|waiters| unsafe { waiters.is_queued(waiter) }) { - // No we are not. Continue waiting. - return abi::E_TMOUT; - } - } - er - }) { - abi::E_TMOUT => {} - er => { - expect_success_aborting(er, &"tslp_tsk"); - } - } - - // Remove `waiter` from `self.waiters`. If `waiter` is still in - // `waiters`, it means we woke up because of a timeout. Otherwise, - // we woke up because of `notify_*`. - let success = self.waiters.with_locked(|waiters| unsafe { !waiters.remove(waiter) }); - - mutex.lock(); - success - } -} - -mod waiter_queue { - use super::*; - - pub struct WaiterQueue { - head: Option, - } - - #[derive(Copy, Clone)] - struct ListHead { - first: NonNull, - last: NonNull, - } - - unsafe impl Send for ListHead {} - unsafe impl Sync for ListHead {} - - pub struct Waiter { - // These fields are only accessed through `&[mut] WaiterQueue`. - /// The waiting task's ID. Will be zeroed when the task is woken up - /// and removed from a queue. - task: abi::ID, - priority: abi::PRI, - prev: Option>, - next: Option>, - } - - unsafe impl Send for Waiter {} - unsafe impl Sync for Waiter {} - - impl Waiter { - #[inline] - pub fn new() -> Self { - let task = task::current_task_id(); - let priority = task::task_priority(abi::TSK_SELF); - - // Zeroness of `Waiter::task` indicates whether the `Waiter` is - // linked to a queue or not. This invariant is important for - // the correctness. - debug_assert_ne!(task, 0); - - Self { task, priority, prev: None, next: None } - } - } - - impl WaiterQueue { - #[inline] - pub const fn new() -> Self { - Self { head: None } - } - - /// # Safety - /// - /// - The caller must own `*waiter_ptr`. The caller will lose the - /// ownership until `*waiter_ptr` is removed from `self`. - /// - /// - `*waiter_ptr` must be valid until it's removed from the queue. - /// - /// - `*waiter_ptr` must not have been previously inserted to a `WaiterQueue`. - /// - pub unsafe fn insert(&mut self, mut waiter_ptr: NonNull) { - unsafe { - let waiter = waiter_ptr.as_mut(); - - debug_assert!(waiter.prev.is_none()); - debug_assert!(waiter.next.is_none()); - - if let Some(head) = &mut self.head { - // Find the insertion position and insert `waiter` - let insert_after = { - let mut cursor = head.last; - loop { - if waiter.priority >= cursor.as_ref().priority { - // `cursor` and all previous waiters have the same or higher - // priority than `current_task_priority`. Insert the new - // waiter right after `cursor`. - break Some(cursor); - } - cursor = if let Some(prev) = cursor.as_ref().prev { - prev - } else { - break None; - }; - } - }; - - if let Some(mut insert_after) = insert_after { - // Insert `waiter` after `insert_after` - let insert_before = insert_after.as_ref().next; - - waiter.prev = Some(insert_after); - insert_after.as_mut().next = Some(waiter_ptr); - - waiter.next = insert_before; - if let Some(mut insert_before) = insert_before { - insert_before.as_mut().prev = Some(waiter_ptr); - } else { - head.last = waiter_ptr; - } - } else { - // Insert `waiter` to the front - waiter.next = Some(head.first); - head.first.as_mut().prev = Some(waiter_ptr); - head.first = waiter_ptr; - } - } else { - // `waiter` is the only element - self.head = Some(ListHead { first: waiter_ptr, last: waiter_ptr }); - } - } - } - - /// Given a `Waiter` that was previously inserted to `self`, remove - /// it from `self` if it's still there. - #[inline] - pub unsafe fn remove(&mut self, mut waiter_ptr: NonNull) -> bool { - unsafe { - let waiter = waiter_ptr.as_mut(); - if waiter.task != 0 { - let head = self.head.as_mut().unwrap(); - - match (waiter.prev, waiter.next) { - (Some(mut prev), Some(mut next)) => { - prev.as_mut().next = Some(next); - next.as_mut().prev = Some(prev); - } - (None, Some(mut next)) => { - head.first = next; - next.as_mut().prev = None; - } - (Some(mut prev), None) => { - prev.as_mut().next = None; - head.last = prev; - } - (None, None) => { - self.head = None; - } - } - - waiter.task = 0; - - true - } else { - false - } - } - } - - /// Given a `Waiter` that was previously inserted to `self`, return a - /// flag indicating whether it's still in `self`. - #[inline] - pub unsafe fn is_queued(&self, waiter: NonNull) -> bool { - unsafe { waiter.as_ref().task != 0 } - } - - #[inline] - pub fn pop_front(&mut self) -> Option { - unsafe { - let head = self.head.as_mut()?; - let waiter = head.first.as_mut(); - - // Get the ID - let id = replace(&mut waiter.task, 0); - - // Unlink the waiter - if let Some(mut next) = waiter.next { - head.first = next; - next.as_mut().prev = None; - } else { - self.head = None; - } - - Some(id) - } - } - } -} diff --git a/library/std/src/sys/locks/condvar/mod.rs b/library/std/src/sys/locks/condvar/mod.rs deleted file mode 100644 index 6849cacf88e..00000000000 --- a/library/std/src/sys/locks/condvar/mod.rs +++ /dev/null @@ -1,37 +0,0 @@ -cfg_if::cfg_if! { - if #[cfg(any( - all(target_os = "windows", not(target_vendor="win7")), - target_os = "linux", - target_os = "android", - target_os = "freebsd", - target_os = "openbsd", - target_os = "dragonfly", - target_os = "fuchsia", - all(target_family = "wasm", target_feature = "atomics"), - target_os = "hermit", - ))] { - mod futex; - pub use futex::Condvar; - } else if #[cfg(target_family = "unix")] { - mod pthread; - pub use pthread::Condvar; - } else if #[cfg(all(target_os = "windows", target_vendor = "win7"))] { - mod windows7; - pub use windows7::Condvar; - } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { - mod sgx; - pub use sgx::Condvar; - } else if #[cfg(target_os = "solid_asp3")] { - mod itron; - pub use itron::Condvar; - } else if #[cfg(target_os = "teeos")] { - mod teeos; - pub use teeos::Condvar; - } else if #[cfg(target_os = "xous")] { - mod xous; - pub use xous::Condvar; - } else { - mod no_threads; - pub use no_threads::Condvar; - } -} diff --git a/library/std/src/sys/locks/condvar/no_threads.rs b/library/std/src/sys/locks/condvar/no_threads.rs deleted file mode 100644 index 3f0943b50ee..00000000000 --- a/library/std/src/sys/locks/condvar/no_threads.rs +++ /dev/null @@ -1,26 +0,0 @@ -use crate::sys::locks::Mutex; -use crate::time::Duration; - -pub struct Condvar {} - -impl Condvar { - #[inline] - #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] - pub const fn new() -> Condvar { - Condvar {} - } - - #[inline] - pub fn notify_one(&self) {} - - #[inline] - pub fn notify_all(&self) {} - - pub unsafe fn wait(&self, _mutex: &Mutex) { - panic!("condvar wait not supported") - } - - pub unsafe fn wait_timeout(&self, _mutex: &Mutex, _dur: Duration) -> bool { - panic!("condvar wait not supported"); - } -} diff --git a/library/std/src/sys/locks/condvar/pthread.rs b/library/std/src/sys/locks/condvar/pthread.rs deleted file mode 100644 index 094738d5a3f..00000000000 --- a/library/std/src/sys/locks/condvar/pthread.rs +++ /dev/null @@ -1,206 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::ptr; -use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed}; -use crate::sys::locks::{mutex, Mutex}; -#[cfg(not(target_os = "nto"))] -use crate::sys::time::TIMESPEC_MAX; -#[cfg(target_os = "nto")] -use crate::sys::time::TIMESPEC_MAX_CAPPED; -use crate::sys_common::lazy_box::{LazyBox, LazyInit}; -use crate::time::Duration; - -struct AllocatedCondvar(UnsafeCell); - -pub struct Condvar { - inner: LazyBox, - mutex: AtomicPtr, -} - -#[inline] -fn raw(c: &Condvar) -> *mut libc::pthread_cond_t { - c.inner.0.get() -} - -unsafe impl Send for AllocatedCondvar {} -unsafe impl Sync for AllocatedCondvar {} - -impl LazyInit for AllocatedCondvar { - fn init() -> Box { - let condvar = Box::new(AllocatedCondvar(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER))); - - cfg_if::cfg_if! { - if #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "watchos", - target_os = "l4re", - target_os = "android", - target_os = "redox" - ))] { - // `pthread_condattr_setclock` is unfortunately not supported on these platforms. - } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] { - // NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet - // So on that platform, init() should always be called - // Moreover, that platform does not have pthread_condattr_setclock support, - // hence that initialization should be skipped as well - // - // Similar story for the 3DS (horizon). - let r = unsafe { libc::pthread_cond_init(condvar.0.get(), crate::ptr::null()) }; - assert_eq!(r, 0); - } else { - use crate::mem::MaybeUninit; - let mut attr = MaybeUninit::::uninit(); - let r = unsafe { libc::pthread_condattr_init(attr.as_mut_ptr()) }; - assert_eq!(r, 0); - let r = unsafe { libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC) }; - assert_eq!(r, 0); - let r = unsafe { libc::pthread_cond_init(condvar.0.get(), attr.as_ptr()) }; - assert_eq!(r, 0); - let r = unsafe { libc::pthread_condattr_destroy(attr.as_mut_ptr()) }; - assert_eq!(r, 0); - } - } - - condvar - } -} - -impl Drop for AllocatedCondvar { - #[inline] - fn drop(&mut self) { - let r = unsafe { libc::pthread_cond_destroy(self.0.get()) }; - if cfg!(target_os = "dragonfly") { - // On DragonFly pthread_cond_destroy() returns EINVAL if called on - // a condvar that was just initialized with - // libc::PTHREAD_COND_INITIALIZER. Once it is used or - // pthread_cond_init() is called, this behaviour no longer occurs. - debug_assert!(r == 0 || r == libc::EINVAL); - } else { - debug_assert_eq!(r, 0); - } - } -} - -impl Condvar { - pub const fn new() -> Condvar { - Condvar { inner: LazyBox::new(), mutex: AtomicPtr::new(ptr::null_mut()) } - } - - #[inline] - fn verify(&self, mutex: *mut libc::pthread_mutex_t) { - // Relaxed is okay here because we never read through `self.addr`, and only use it to - // compare addresses. - match self.mutex.compare_exchange(ptr::null_mut(), mutex, Relaxed, Relaxed) { - Ok(_) => {} // Stored the address - Err(n) if n == mutex => {} // Lost a race to store the same address - _ => panic!("attempted to use a condition variable with two mutexes"), - } - } - - #[inline] - pub fn notify_one(&self) { - let r = unsafe { libc::pthread_cond_signal(raw(self)) }; - debug_assert_eq!(r, 0); - } - - #[inline] - pub fn notify_all(&self) { - let r = unsafe { libc::pthread_cond_broadcast(raw(self)) }; - debug_assert_eq!(r, 0); - } - - #[inline] - pub unsafe fn wait(&self, mutex: &Mutex) { - let mutex = mutex::raw(mutex); - self.verify(mutex); - let r = libc::pthread_cond_wait(raw(self), mutex); - debug_assert_eq!(r, 0); - } - - // This implementation is used on systems that support pthread_condattr_setclock - // where we configure condition variable to use monotonic clock (instead of - // default system clock). This approach avoids all problems that result - // from changes made to the system time. - #[cfg(not(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "watchos", - target_os = "android", - target_os = "espidf", - target_os = "horizon" - )))] - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - use crate::sys::time::Timespec; - - let mutex = mutex::raw(mutex); - self.verify(mutex); - - #[cfg(not(target_os = "nto"))] - let timeout = Timespec::now(libc::CLOCK_MONOTONIC) - .checked_add_duration(&dur) - .and_then(|t| t.to_timespec()) - .unwrap_or(TIMESPEC_MAX); - - #[cfg(target_os = "nto")] - let timeout = Timespec::now(libc::CLOCK_MONOTONIC) - .checked_add_duration(&dur) - .and_then(|t| t.to_timespec_capped()) - .unwrap_or(TIMESPEC_MAX_CAPPED); - - let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout); - assert!(r == libc::ETIMEDOUT || r == 0); - r == 0 - } - - // This implementation is modeled after libcxx's condition_variable - // https://github.com/llvm-mirror/libcxx/blob/release_35/src/condition_variable.cpp#L46 - // https://github.com/llvm-mirror/libcxx/blob/release_35/include/__mutex_base#L367 - #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "watchos", - target_os = "android", - target_os = "espidf", - target_os = "horizon" - ))] - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - use crate::sys::time::SystemTime; - use crate::time::Instant; - - let mutex = mutex::raw(mutex); - self.verify(mutex); - - // OSX implementation of `pthread_cond_timedwait` is buggy - // with super long durations. When duration is greater than - // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait` - // in macOS Sierra returns error 316. - // - // This program demonstrates the issue: - // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c - // - // To work around this issue, and possible bugs of other OSes, timeout - // is clamped to 1000 years, which is allowable per the API of `wait_timeout` - // because of spurious wakeups. - let dur = Duration::min(dur, Duration::from_secs(1000 * 365 * 86400)); - - // pthread_cond_timedwait uses system time, but we want to report timeout - // based on stable time. - let now = Instant::now(); - - let timeout = SystemTime::now() - .t - .checked_add_duration(&dur) - .and_then(|t| t.to_timespec()) - .unwrap_or(TIMESPEC_MAX); - - let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout); - debug_assert!(r == libc::ETIMEDOUT || r == 0); - - // ETIMEDOUT is not a totally reliable method of determining timeout due - // to clock shifts, so do the check ourselves - now.elapsed() < dur - } -} diff --git a/library/std/src/sys/locks/condvar/sgx.rs b/library/std/src/sys/locks/condvar/sgx.rs deleted file mode 100644 index cabd3250275..00000000000 --- a/library/std/src/sys/locks/condvar/sgx.rs +++ /dev/null @@ -1,45 +0,0 @@ -use crate::sys::locks::Mutex; -use crate::sys::pal::waitqueue::{SpinMutex, WaitQueue, WaitVariable}; -use crate::sys_common::lazy_box::{LazyBox, LazyInit}; -use crate::time::Duration; - -/// FIXME: `UnsafeList` is not movable. -struct AllocatedCondvar(SpinMutex>); - -pub struct Condvar { - inner: LazyBox, -} - -impl LazyInit for AllocatedCondvar { - fn init() -> Box { - Box::new(AllocatedCondvar(SpinMutex::new(WaitVariable::new(())))) - } -} - -impl Condvar { - pub const fn new() -> Condvar { - Condvar { inner: LazyBox::new() } - } - - #[inline] - pub fn notify_one(&self) { - let _ = WaitQueue::notify_one(self.inner.0.lock()); - } - - #[inline] - pub fn notify_all(&self) { - let _ = WaitQueue::notify_all(self.inner.0.lock()); - } - - pub unsafe fn wait(&self, mutex: &Mutex) { - let guard = self.inner.0.lock(); - WaitQueue::wait(guard, || unsafe { mutex.unlock() }); - mutex.lock() - } - - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - let success = WaitQueue::wait_timeout(&self.inner.0, dur, || unsafe { mutex.unlock() }); - mutex.lock(); - success - } -} diff --git a/library/std/src/sys/locks/condvar/teeos.rs b/library/std/src/sys/locks/condvar/teeos.rs deleted file mode 100644 index c08e8145b8c..00000000000 --- a/library/std/src/sys/locks/condvar/teeos.rs +++ /dev/null @@ -1,100 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::ptr; -use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed}; -use crate::sys::locks::mutex::{self, Mutex}; -use crate::sys::time::TIMESPEC_MAX; -use crate::sys_common::lazy_box::{LazyBox, LazyInit}; -use crate::time::Duration; - -extern "C" { - pub fn pthread_cond_timedwait( - cond: *mut libc::pthread_cond_t, - lock: *mut libc::pthread_mutex_t, - adstime: *const libc::timespec, - ) -> libc::c_int; -} - -struct AllocatedCondvar(UnsafeCell); - -pub struct Condvar { - inner: LazyBox, - mutex: AtomicPtr, -} - -#[inline] -fn raw(c: &Condvar) -> *mut libc::pthread_cond_t { - c.inner.0.get() -} - -unsafe impl Send for AllocatedCondvar {} -unsafe impl Sync for AllocatedCondvar {} - -impl LazyInit for AllocatedCondvar { - fn init() -> Box { - let condvar = Box::new(AllocatedCondvar(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER))); - - let r = unsafe { libc::pthread_cond_init(condvar.0.get(), crate::ptr::null()) }; - assert_eq!(r, 0); - - condvar - } -} - -impl Drop for AllocatedCondvar { - #[inline] - fn drop(&mut self) { - let r = unsafe { libc::pthread_cond_destroy(self.0.get()) }; - debug_assert_eq!(r, 0); - } -} - -impl Condvar { - pub const fn new() -> Condvar { - Condvar { inner: LazyBox::new(), mutex: AtomicPtr::new(ptr::null_mut()) } - } - - #[inline] - fn verify(&self, mutex: *mut libc::pthread_mutex_t) { - match self.mutex.compare_exchange(ptr::null_mut(), mutex, Relaxed, Relaxed) { - Ok(_) => {} // Stored the address - Err(n) if n == mutex => {} // Lost a race to store the same address - _ => panic!("attempted to use a condition variable with two mutexes"), - } - } - - #[inline] - pub fn notify_one(&self) { - let r = unsafe { libc::pthread_cond_signal(raw(self)) }; - debug_assert_eq!(r, 0); - } - - #[inline] - pub fn notify_all(&self) { - let r = unsafe { libc::pthread_cond_broadcast(raw(self)) }; - debug_assert_eq!(r, 0); - } - - #[inline] - pub unsafe fn wait(&self, mutex: &Mutex) { - let mutex = mutex::raw(mutex); - self.verify(mutex); - let r = libc::pthread_cond_wait(raw(self), mutex); - debug_assert_eq!(r, 0); - } - - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - use crate::sys::time::Timespec; - - let mutex = mutex::raw(mutex); - self.verify(mutex); - - let timeout = Timespec::now(libc::CLOCK_MONOTONIC) - .checked_add_duration(&dur) - .and_then(|t| t.to_timespec()) - .unwrap_or(TIMESPEC_MAX); - - let r = pthread_cond_timedwait(raw(self), mutex, &timeout); - assert!(r == libc::ETIMEDOUT || r == 0); - r == 0 - } -} diff --git a/library/std/src/sys/locks/condvar/windows7.rs b/library/std/src/sys/locks/condvar/windows7.rs deleted file mode 100644 index 28a288335d2..00000000000 --- a/library/std/src/sys/locks/condvar/windows7.rs +++ /dev/null @@ -1,50 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::sys::c; -use crate::sys::locks::{mutex, Mutex}; -use crate::sys::os; -use crate::time::Duration; - -pub struct Condvar { - inner: UnsafeCell, -} - -unsafe impl Send for Condvar {} -unsafe impl Sync for Condvar {} - -impl Condvar { - #[inline] - pub const fn new() -> Condvar { - Condvar { inner: UnsafeCell::new(c::CONDITION_VARIABLE_INIT) } - } - - #[inline] - pub unsafe fn wait(&self, mutex: &Mutex) { - let r = c::SleepConditionVariableSRW(self.inner.get(), mutex::raw(mutex), c::INFINITE, 0); - debug_assert!(r != 0); - } - - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - let r = c::SleepConditionVariableSRW( - self.inner.get(), - mutex::raw(mutex), - crate::sys::pal::dur2timeout(dur), - 0, - ); - if r == 0 { - debug_assert_eq!(os::errno() as usize, c::ERROR_TIMEOUT as usize); - false - } else { - true - } - } - - #[inline] - pub fn notify_one(&self) { - unsafe { c::WakeConditionVariable(self.inner.get()) } - } - - #[inline] - pub fn notify_all(&self) { - unsafe { c::WakeAllConditionVariable(self.inner.get()) } - } -} diff --git a/library/std/src/sys/locks/condvar/xous.rs b/library/std/src/sys/locks/condvar/xous.rs deleted file mode 100644 index 0e51449e0af..00000000000 --- a/library/std/src/sys/locks/condvar/xous.rs +++ /dev/null @@ -1,148 +0,0 @@ -use crate::os::xous::ffi::{blocking_scalar, scalar}; -use crate::os::xous::services::{ticktimer_server, TicktimerScalar}; -use crate::sys::locks::Mutex; -use crate::time::Duration; -use core::sync::atomic::{AtomicUsize, Ordering}; - -// The implementation is inspired by Andrew D. Birrell's paper -// "Implementing Condition Variables with Semaphores" - -const NOTIFY_TRIES: usize = 3; - -pub struct Condvar { - counter: AtomicUsize, - timed_out: AtomicUsize, -} - -unsafe impl Send for Condvar {} -unsafe impl Sync for Condvar {} - -impl Condvar { - #[inline] - #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] - pub const fn new() -> Condvar { - Condvar { counter: AtomicUsize::new(0), timed_out: AtomicUsize::new(0) } - } - - fn notify_some(&self, to_notify: usize) { - // Assumption: The Mutex protecting this condvar is locked throughout the - // entirety of this call, preventing calls to `wait` and `wait_timeout`. - - // Logic check: Ensure that there aren't any missing waiters. Remove any that - // timed-out, ensuring the counter doesn't underflow. - assert!(self.timed_out.load(Ordering::Relaxed) <= self.counter.load(Ordering::Relaxed)); - self.counter.fetch_sub(self.timed_out.swap(0, Ordering::Relaxed), Ordering::Relaxed); - - // Figure out how many threads to notify. Note that it is impossible for `counter` - // to increase during this operation because Mutex is locked. However, it is - // possible for `counter` to decrease due to a condvar timing out, in which - // case the corresponding `timed_out` will increase accordingly. - let Ok(waiter_count) = - self.counter.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |counter| { - if counter == 0 { - return None; - } else { - Some(counter - counter.min(to_notify)) - } - }) - else { - // No threads are waiting on this condvar - return; - }; - - let mut remaining_to_wake = waiter_count.min(to_notify); - if remaining_to_wake == 0 { - return; - } - for _wake_tries in 0..NOTIFY_TRIES { - let result = blocking_scalar( - ticktimer_server(), - TicktimerScalar::NotifyCondition(self.index(), remaining_to_wake).into(), - ) - .expect("failure to send NotifyCondition command"); - - // Remove the list of waiters that were notified - remaining_to_wake -= result[0]; - - // Also remove the number of waiters that timed out. Clamp it to 0 in order to - // ensure we don't wait forever in case the waiter woke up between the time - // we counted the remaining waiters and now. - remaining_to_wake = - remaining_to_wake.saturating_sub(self.timed_out.swap(0, Ordering::Relaxed)); - if remaining_to_wake == 0 { - return; - } - crate::thread::yield_now(); - } - } - - pub fn notify_one(&self) { - self.notify_some(1) - } - - pub fn notify_all(&self) { - self.notify_some(self.counter.load(Ordering::Relaxed)) - } - - fn index(&self) -> usize { - core::ptr::from_ref(self).addr() - } - - /// Unlock the given Mutex and wait for the notification. Wait at most - /// `ms` milliseconds, or pass `0` to wait forever. - /// - /// Returns `true` if the condition was received, `false` if it timed out - fn wait_ms(&self, mutex: &Mutex, ms: usize) -> bool { - self.counter.fetch_add(1, Ordering::Relaxed); - unsafe { mutex.unlock() }; - - // Threading concern: There is a chance that the `notify` thread wakes up here before - // we have a chance to wait for the condition. This is fine because we've recorded - // the fact that we're waiting by incrementing the counter. - let result = blocking_scalar( - ticktimer_server(), - TicktimerScalar::WaitForCondition(self.index(), ms).into(), - ); - let awoken = result.expect("Ticktimer: failure to send WaitForCondition command")[0] == 0; - - // If we awoke due to a timeout, increment the `timed_out` counter so that the - // main loop of `notify` knows there's a timeout. - // - // This is done with the Mutex still unlocked, because the Mutex might still - // be locked by the `notify` process above. - if !awoken { - self.timed_out.fetch_add(1, Ordering::Relaxed); - } - - unsafe { mutex.lock() }; - awoken - } - - pub unsafe fn wait(&self, mutex: &Mutex) { - // Wait for 0 ms, which is a special case to "wait forever" - self.wait_ms(mutex, 0); - } - - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - let mut millis = dur.as_millis() as usize; - // Ensure we don't wait for 0 ms, which would cause us to wait forever - if millis == 0 { - millis = 1; - } - self.wait_ms(mutex, millis) - } -} - -impl Drop for Condvar { - fn drop(&mut self) { - let remaining_count = self.counter.load(Ordering::Relaxed); - let timed_out = self.timed_out.load(Ordering::Relaxed); - assert!( - remaining_count - timed_out == 0, - "counter was {} and timed_out was {} not 0", - remaining_count, - timed_out - ); - scalar(ticktimer_server(), TicktimerScalar::FreeCondition(self.index()).into()).ok(); - } -} diff --git a/library/std/src/sys/locks/mod.rs b/library/std/src/sys/locks/mod.rs deleted file mode 100644 index 0bdc4a1e1db..00000000000 --- a/library/std/src/sys/locks/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod condvar; -mod mutex; -mod rwlock; - -pub use condvar::Condvar; -pub use mutex::Mutex; -pub use rwlock::RwLock; diff --git a/library/std/src/sys/locks/mutex/fuchsia.rs b/library/std/src/sys/locks/mutex/fuchsia.rs deleted file mode 100644 index 5d89e5a13fd..00000000000 --- a/library/std/src/sys/locks/mutex/fuchsia.rs +++ /dev/null @@ -1,164 +0,0 @@ -//! A priority inheriting mutex for Fuchsia. -//! -//! This is a port of the [mutex in Fuchsia's libsync]. Contrary to the original, -//! it does not abort the process when reentrant locking is detected, but deadlocks. -//! -//! Priority inheritance is achieved by storing the owning thread's handle in an -//! atomic variable. Fuchsia's futex operations support setting an owner thread -//! for a futex, which can boost that thread's priority while the futex is waited -//! upon. -//! -//! libsync is licenced under the following BSD-style licence: -//! -//! Copyright 2016 The Fuchsia Authors. -//! -//! Redistribution and use in source and binary forms, with or without -//! modification, are permitted provided that the following conditions are -//! met: -//! -//! * Redistributions of source code must retain the above copyright -//! notice, this list of conditions and the following disclaimer. -//! * Redistributions in binary form must reproduce the above -//! copyright notice, this list of conditions and the following -//! disclaimer in the documentation and/or other materials provided -//! with the distribution. -//! -//! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -//! "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -//! LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -//! A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -//! OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -//! SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -//! LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -//! DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -//! THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -//! (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -//! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -//! -//! [mutex in Fuchsia's libsync]: https://cs.opensource.google/fuchsia/fuchsia/+/main:zircon/system/ulib/sync/mutex.c - -use crate::sync::atomic::{ - AtomicU32, - Ordering::{Acquire, Relaxed, Release}, -}; -use crate::sys::futex::zircon::{ - zx_futex_wait, zx_futex_wake_single_owner, zx_handle_t, zx_thread_self, ZX_ERR_BAD_HANDLE, - ZX_ERR_BAD_STATE, ZX_ERR_INVALID_ARGS, ZX_ERR_TIMED_OUT, ZX_ERR_WRONG_TYPE, ZX_OK, - ZX_TIME_INFINITE, -}; - -// The lowest two bits of a `zx_handle_t` are always set, so the lowest bit is used to mark the -// mutex as contested by clearing it. -const CONTESTED_BIT: u32 = 1; -// This can never be a valid `zx_handle_t`. -const UNLOCKED: u32 = 0; - -pub struct Mutex { - futex: AtomicU32, -} - -#[inline] -fn to_state(owner: zx_handle_t) -> u32 { - owner -} - -#[inline] -fn to_owner(state: u32) -> zx_handle_t { - state | CONTESTED_BIT -} - -#[inline] -fn is_contested(state: u32) -> bool { - state & CONTESTED_BIT == 0 -} - -#[inline] -fn mark_contested(state: u32) -> u32 { - state & !CONTESTED_BIT -} - -impl Mutex { - #[inline] - pub const fn new() -> Mutex { - Mutex { futex: AtomicU32::new(UNLOCKED) } - } - - #[inline] - pub fn try_lock(&self) -> bool { - let thread_self = unsafe { zx_thread_self() }; - self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed).is_ok() - } - - #[inline] - pub fn lock(&self) { - let thread_self = unsafe { zx_thread_self() }; - if let Err(state) = - self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed) - { - unsafe { - self.lock_contested(state, thread_self); - } - } - } - - /// # Safety - /// `thread_self` must be the handle for the current thread. - #[cold] - unsafe fn lock_contested(&self, mut state: u32, thread_self: zx_handle_t) { - let owned_state = mark_contested(to_state(thread_self)); - loop { - // Mark the mutex as contested if it is not already. - let contested = mark_contested(state); - if is_contested(state) - || self.futex.compare_exchange(state, contested, Relaxed, Relaxed).is_ok() - { - // The mutex has been marked as contested, wait for the state to change. - unsafe { - match zx_futex_wait( - &self.futex, - AtomicU32::new(contested), - to_owner(state), - ZX_TIME_INFINITE, - ) { - ZX_OK | ZX_ERR_BAD_STATE | ZX_ERR_TIMED_OUT => (), - // Note that if a thread handle is reused after its associated thread - // exits without unlocking the mutex, an arbitrary thread's priority - // could be boosted by the wait, but there is currently no way to - // prevent that. - ZX_ERR_INVALID_ARGS | ZX_ERR_BAD_HANDLE | ZX_ERR_WRONG_TYPE => { - panic!( - "either the current thread is trying to lock a mutex it has - already locked, or the previous owner did not unlock the mutex - before exiting" - ) - } - error => panic!("unexpected error in zx_futex_wait: {error}"), - } - } - } - - // The state has changed or a wakeup occurred, try to lock the mutex. - match self.futex.compare_exchange(UNLOCKED, owned_state, Acquire, Relaxed) { - Ok(_) => return, - Err(updated) => state = updated, - } - } - } - - #[inline] - pub unsafe fn unlock(&self) { - if is_contested(self.futex.swap(UNLOCKED, Release)) { - // The woken thread will mark the mutex as contested again, - // and return here, waking until there are no waiters left, - // in which case this is a noop. - self.wake(); - } - } - - #[cold] - fn wake(&self) { - unsafe { - zx_futex_wake_single_owner(&self.futex); - } - } -} diff --git a/library/std/src/sys/locks/mutex/futex.rs b/library/std/src/sys/locks/mutex/futex.rs deleted file mode 100644 index 7427cae94d6..00000000000 --- a/library/std/src/sys/locks/mutex/futex.rs +++ /dev/null @@ -1,108 +0,0 @@ -use crate::sync::atomic::{ - self, - Ordering::{Acquire, Relaxed, Release}, -}; -use crate::sys::futex::{futex_wait, futex_wake}; - -cfg_if::cfg_if! { -if #[cfg(windows)] { - // On Windows we can have a smol futex - type Atomic = atomic::AtomicU8; - type State = u8; -} else { - type Atomic = atomic::AtomicU32; - type State = u32; -} -} - -pub struct Mutex { - futex: Atomic, -} - -const UNLOCKED: State = 0; -const LOCKED: State = 1; // locked, no other threads waiting -const CONTENDED: State = 2; // locked, and other threads waiting (contended) - -impl Mutex { - #[inline] - pub const fn new() -> Self { - Self { futex: Atomic::new(UNLOCKED) } - } - - #[inline] - pub fn try_lock(&self) -> bool { - self.futex.compare_exchange(UNLOCKED, LOCKED, Acquire, Relaxed).is_ok() - } - - #[inline] - pub fn lock(&self) { - if self.futex.compare_exchange(UNLOCKED, LOCKED, Acquire, Relaxed).is_err() { - self.lock_contended(); - } - } - - #[cold] - fn lock_contended(&self) { - // Spin first to speed things up if the lock is released quickly. - let mut state = self.spin(); - - // If it's unlocked now, attempt to take the lock - // without marking it as contended. - if state == UNLOCKED { - match self.futex.compare_exchange(UNLOCKED, LOCKED, Acquire, Relaxed) { - Ok(_) => return, // Locked! - Err(s) => state = s, - } - } - - loop { - // Put the lock in contended state. - // We avoid an unnecessary write if it as already set to CONTENDED, - // to be friendlier for the caches. - if state != CONTENDED && self.futex.swap(CONTENDED, Acquire) == UNLOCKED { - // We changed it from UNLOCKED to CONTENDED, so we just successfully locked it. - return; - } - - // Wait for the futex to change state, assuming it is still CONTENDED. - futex_wait(&self.futex, CONTENDED, None); - - // Spin again after waking up. - state = self.spin(); - } - } - - fn spin(&self) -> State { - let mut spin = 100; - loop { - // We only use `load` (and not `swap` or `compare_exchange`) - // while spinning, to be easier on the caches. - let state = self.futex.load(Relaxed); - - // We stop spinning when the mutex is UNLOCKED, - // but also when it's CONTENDED. - if state != LOCKED || spin == 0 { - return state; - } - - crate::hint::spin_loop(); - spin -= 1; - } - } - - #[inline] - pub unsafe fn unlock(&self) { - if self.futex.swap(UNLOCKED, Release) == CONTENDED { - // We only wake up one thread. When that thread locks the mutex, it - // will mark the mutex as CONTENDED (see lock_contended above), - // which makes sure that any other waiting threads will also be - // woken up eventually. - self.wake(); - } - } - - #[cold] - fn wake(&self) { - futex_wake(&self.futex); - } -} diff --git a/library/std/src/sys/locks/mutex/itron.rs b/library/std/src/sys/locks/mutex/itron.rs deleted file mode 100644 index a134eb2d1be..00000000000 --- a/library/std/src/sys/locks/mutex/itron.rs +++ /dev/null @@ -1,68 +0,0 @@ -//! Mutex implementation backed by μITRON mutexes. Assumes `acre_mtx` and -//! `TA_INHERIT` are available. -use crate::sys::pal::itron::{ - abi, - error::{expect_success, expect_success_aborting, fail, ItronError}, - spin::SpinIdOnceCell, -}; - -pub struct Mutex { - /// The ID of the underlying mutex object - mtx: SpinIdOnceCell<()>, -} - -/// Create a mutex object. This function never panics. -fn new_mtx() -> Result { - ItronError::err_if_negative(unsafe { - abi::acre_mtx(&abi::T_CMTX { - // Priority inheritance mutex - mtxatr: abi::TA_INHERIT, - // Unused - ceilpri: 0, - }) - }) -} - -impl Mutex { - #[inline] - pub const fn new() -> Mutex { - Mutex { mtx: SpinIdOnceCell::new() } - } - - /// Get the inner mutex's ID, which is lazily created. - fn raw(&self) -> abi::ID { - match self.mtx.get_or_try_init(|| new_mtx().map(|id| (id, ()))) { - Ok((id, ())) => id, - Err(e) => fail(e, &"acre_mtx"), - } - } - - pub fn lock(&self) { - let mtx = self.raw(); - expect_success(unsafe { abi::loc_mtx(mtx) }, &"loc_mtx"); - } - - pub unsafe fn unlock(&self) { - let mtx = unsafe { self.mtx.get_unchecked().0 }; - expect_success_aborting(unsafe { abi::unl_mtx(mtx) }, &"unl_mtx"); - } - - pub fn try_lock(&self) -> bool { - let mtx = self.raw(); - match unsafe { abi::ploc_mtx(mtx) } { - abi::E_TMOUT => false, - er => { - expect_success(er, &"ploc_mtx"); - true - } - } - } -} - -impl Drop for Mutex { - fn drop(&mut self) { - if let Some(mtx) = self.mtx.get().map(|x| x.0) { - expect_success_aborting(unsafe { abi::del_mtx(mtx) }, &"del_mtx"); - } - } -} diff --git a/library/std/src/sys/locks/mutex/mod.rs b/library/std/src/sys/locks/mutex/mod.rs deleted file mode 100644 index 73d9bd273de..00000000000 --- a/library/std/src/sys/locks/mutex/mod.rs +++ /dev/null @@ -1,39 +0,0 @@ -cfg_if::cfg_if! { - if #[cfg(any( - all(target_os = "windows", not(target_vendor = "win7")), - target_os = "linux", - target_os = "android", - target_os = "freebsd", - target_os = "openbsd", - target_os = "dragonfly", - all(target_family = "wasm", target_feature = "atomics"), - target_os = "hermit", - ))] { - mod futex; - pub use futex::Mutex; - } else if #[cfg(target_os = "fuchsia")] { - mod fuchsia; - pub use fuchsia::Mutex; - } else if #[cfg(any( - target_family = "unix", - target_os = "teeos", - ))] { - mod pthread; - pub use pthread::{Mutex, raw}; - } else if #[cfg(all(target_os = "windows", target_vendor = "win7"))] { - mod windows7; - pub use windows7::{Mutex, raw}; - } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { - mod sgx; - pub use sgx::Mutex; - } else if #[cfg(target_os = "solid_asp3")] { - mod itron; - pub use itron::Mutex; - } else if #[cfg(target_os = "xous")] { - mod xous; - pub use xous::Mutex; - } else { - mod no_threads; - pub use no_threads::Mutex; - } -} diff --git a/library/std/src/sys/locks/mutex/no_threads.rs b/library/std/src/sys/locks/mutex/no_threads.rs deleted file mode 100644 index 4a13c55fb8b..00000000000 --- a/library/std/src/sys/locks/mutex/no_threads.rs +++ /dev/null @@ -1,32 +0,0 @@ -use crate::cell::Cell; - -pub struct Mutex { - // This platform has no threads, so we can use a Cell here. - locked: Cell, -} - -unsafe impl Send for Mutex {} -unsafe impl Sync for Mutex {} // no threads on this platform - -impl Mutex { - #[inline] - #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] - pub const fn new() -> Mutex { - Mutex { locked: Cell::new(false) } - } - - #[inline] - pub fn lock(&self) { - assert_eq!(self.locked.replace(true), false, "cannot recursively acquire mutex"); - } - - #[inline] - pub unsafe fn unlock(&self) { - self.locked.set(false); - } - - #[inline] - pub fn try_lock(&self) -> bool { - self.locked.replace(true) == false - } -} diff --git a/library/std/src/sys/locks/mutex/pthread.rs b/library/std/src/sys/locks/mutex/pthread.rs deleted file mode 100644 index ee0794334fb..00000000000 --- a/library/std/src/sys/locks/mutex/pthread.rs +++ /dev/null @@ -1,148 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::io::Error; -use crate::mem::{forget, MaybeUninit}; -use crate::sys::cvt_nz; -use crate::sys_common::lazy_box::{LazyBox, LazyInit}; - -struct AllocatedMutex(UnsafeCell); - -pub struct Mutex { - inner: LazyBox, -} - -#[inline] -pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t { - m.inner.0.get() -} - -unsafe impl Send for AllocatedMutex {} -unsafe impl Sync for AllocatedMutex {} - -impl LazyInit for AllocatedMutex { - fn init() -> Box { - let mutex = Box::new(AllocatedMutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER))); - - // Issue #33770 - // - // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have - // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you - // try to re-lock it from the same thread when you already hold a lock - // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_init.html). - // This is the case even if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL - // (https://github.com/rust-lang/rust/issues/33770#issuecomment-220847521) -- in that - // case, `pthread_mutexattr_settype(PTHREAD_MUTEX_DEFAULT)` will of course be the same - // as setting it to `PTHREAD_MUTEX_NORMAL`, but not setting any mode will result in - // a Mutex where re-locking is UB. - // - // In practice, glibc takes advantage of this undefined behavior to - // implement hardware lock elision, which uses hardware transactional - // memory to avoid acquiring the lock. While a transaction is in - // progress, the lock appears to be unlocked. This isn't a problem for - // other threads since the transactional memory will abort if a conflict - // is detected, however no abort is generated when re-locking from the - // same thread. - // - // Since locking the same mutex twice will result in two aliasing &mut - // references, we instead create the mutex with type - // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to - // re-lock it from the same thread, thus avoiding undefined behavior. - unsafe { - let mut attr = MaybeUninit::::uninit(); - cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap(); - let attr = PthreadMutexAttr(&mut attr); - cvt_nz(libc::pthread_mutexattr_settype( - attr.0.as_mut_ptr(), - libc::PTHREAD_MUTEX_NORMAL, - )) - .unwrap(); - cvt_nz(libc::pthread_mutex_init(mutex.0.get(), attr.0.as_ptr())).unwrap(); - } - - mutex - } - - fn destroy(mutex: Box) { - // We're not allowed to pthread_mutex_destroy a locked mutex, - // so check first if it's unlocked. - if unsafe { libc::pthread_mutex_trylock(mutex.0.get()) == 0 } { - unsafe { libc::pthread_mutex_unlock(mutex.0.get()) }; - drop(mutex); - } else { - // The mutex is locked. This happens if a MutexGuard is leaked. - // In this case, we just leak the Mutex too. - forget(mutex); - } - } - - fn cancel_init(_: Box) { - // In this case, we can just drop it without any checks, - // since it cannot have been locked yet. - } -} - -impl Drop for AllocatedMutex { - #[inline] - fn drop(&mut self) { - let r = unsafe { libc::pthread_mutex_destroy(self.0.get()) }; - if cfg!(target_os = "dragonfly") { - // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a - // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER. - // Once it is used (locked/unlocked) or pthread_mutex_init() is called, - // this behaviour no longer occurs. - debug_assert!(r == 0 || r == libc::EINVAL); - } else { - debug_assert_eq!(r, 0); - } - } -} - -impl Mutex { - #[inline] - pub const fn new() -> Mutex { - Mutex { inner: LazyBox::new() } - } - - #[inline] - pub unsafe fn lock(&self) { - #[cold] - #[inline(never)] - fn fail(r: i32) -> ! { - let error = Error::from_raw_os_error(r); - panic!("failed to lock mutex: {error}"); - } - - let r = libc::pthread_mutex_lock(raw(self)); - // As we set the mutex type to `PTHREAD_MUTEX_NORMAL` above, we expect - // the lock call to never fail. Unfortunately however, some platforms - // (Solaris) do not conform to the standard, and instead always provide - // deadlock detection. How kind of them! Unfortunately that means that - // we need to check the error code here. To save us from UB on other - // less well-behaved platforms in the future, we do it even on "good" - // platforms like macOS. See #120147 for more context. - if r != 0 { - fail(r) - } - } - - #[inline] - pub unsafe fn unlock(&self) { - let r = libc::pthread_mutex_unlock(raw(self)); - debug_assert_eq!(r, 0); - } - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - libc::pthread_mutex_trylock(raw(self)) == 0 - } -} - -pub(super) struct PthreadMutexAttr<'a>(pub &'a mut MaybeUninit); - -impl Drop for PthreadMutexAttr<'_> { - fn drop(&mut self) { - unsafe { - let result = libc::pthread_mutexattr_destroy(self.0.as_mut_ptr()); - debug_assert_eq!(result, 0); - } - } -} diff --git a/library/std/src/sys/locks/mutex/sgx.rs b/library/std/src/sys/locks/mutex/sgx.rs deleted file mode 100644 index d37bd02adf8..00000000000 --- a/library/std/src/sys/locks/mutex/sgx.rs +++ /dev/null @@ -1,59 +0,0 @@ -use crate::sys::pal::waitqueue::{try_lock_or_false, SpinMutex, WaitQueue, WaitVariable}; -use crate::sys_common::lazy_box::{LazyBox, LazyInit}; - -/// FIXME: `UnsafeList` is not movable. -struct AllocatedMutex(SpinMutex>); - -pub struct Mutex { - inner: LazyBox, -} - -impl LazyInit for AllocatedMutex { - fn init() -> Box { - Box::new(AllocatedMutex(SpinMutex::new(WaitVariable::new(false)))) - } -} - -// Implementation according to “Operating Systems: Three Easy Pieces”, chapter 28 -impl Mutex { - pub const fn new() -> Mutex { - Mutex { inner: LazyBox::new() } - } - - #[inline] - pub fn lock(&self) { - let mut guard = self.inner.0.lock(); - if *guard.lock_var() { - // Another thread has the lock, wait - WaitQueue::wait(guard, || {}) - // Another thread has passed the lock to us - } else { - // We are just now obtaining the lock - *guard.lock_var_mut() = true; - } - } - - #[inline] - pub unsafe fn unlock(&self) { - let guard = self.inner.0.lock(); - if let Err(mut guard) = WaitQueue::notify_one(guard) { - // No other waiters, unlock - *guard.lock_var_mut() = false; - } else { - // There was a thread waiting, just pass the lock - } - } - - #[inline] - pub fn try_lock(&self) -> bool { - let mut guard = try_lock_or_false!(self.inner.0); - if *guard.lock_var() { - // Another thread has the lock - false - } else { - // We are just now obtaining the lock - *guard.lock_var_mut() = true; - true - } - } -} diff --git a/library/std/src/sys/locks/mutex/windows7.rs b/library/std/src/sys/locks/mutex/windows7.rs deleted file mode 100644 index ef2f84082cd..00000000000 --- a/library/std/src/sys/locks/mutex/windows7.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! System Mutexes -//! -//! The Windows implementation of mutexes is a little odd and it might not be -//! immediately obvious what's going on. The primary oddness is that SRWLock is -//! used instead of CriticalSection, and this is done because: -//! -//! 1. SRWLock is several times faster than CriticalSection according to -//! benchmarks performed on both Windows 8 and Windows 7. -//! -//! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The -//! Unix implementation deadlocks so consistency is preferred. See #19962 for -//! more details. -//! -//! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy -//! is that there are no guarantees of fairness. - -use crate::cell::UnsafeCell; -use crate::sys::c; - -pub struct Mutex { - srwlock: UnsafeCell, -} - -unsafe impl Send for Mutex {} -unsafe impl Sync for Mutex {} - -#[inline] -pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK { - m.srwlock.get() -} - -impl Mutex { - #[inline] - pub const fn new() -> Mutex { - Mutex { srwlock: UnsafeCell::new(c::SRWLOCK_INIT) } - } - - #[inline] - pub fn lock(&self) { - unsafe { - c::AcquireSRWLockExclusive(raw(self)); - } - } - - #[inline] - pub fn try_lock(&self) -> bool { - unsafe { c::TryAcquireSRWLockExclusive(raw(self)) != 0 } - } - - #[inline] - pub unsafe fn unlock(&self) { - c::ReleaseSRWLockExclusive(raw(self)); - } -} diff --git a/library/std/src/sys/locks/mutex/xous.rs b/library/std/src/sys/locks/mutex/xous.rs deleted file mode 100644 index a8c9518ff0b..00000000000 --- a/library/std/src/sys/locks/mutex/xous.rs +++ /dev/null @@ -1,110 +0,0 @@ -use crate::os::xous::ffi::{blocking_scalar, do_yield}; -use crate::os::xous::services::{ticktimer_server, TicktimerScalar}; -use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed, Ordering::SeqCst}; - -pub struct Mutex { - /// The "locked" value indicates how many threads are waiting on this - /// Mutex. Possible values are: - /// 0: The lock is unlocked - /// 1: The lock is locked and uncontended - /// >=2: The lock is locked and contended - /// - /// A lock is "contended" when there is more than one thread waiting - /// for a lock, or it is locked for long periods of time. Rather than - /// spinning, these locks send a Message to the ticktimer server - /// requesting that they be woken up when a lock is unlocked. - locked: AtomicUsize, - - /// Whether this Mutex ever was contended, and therefore made a trip - /// to the ticktimer server. If this was never set, then we were never - /// on the slow path and can skip deregistering the mutex. - contended: AtomicBool, -} - -impl Mutex { - #[inline] - #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] - pub const fn new() -> Mutex { - Mutex { locked: AtomicUsize::new(0), contended: AtomicBool::new(false) } - } - - fn index(&self) -> usize { - core::ptr::from_ref(self).addr() - } - - #[inline] - pub unsafe fn lock(&self) { - // Try multiple times to acquire the lock without resorting to the ticktimer - // server. For locks that are held for a short amount of time, this will - // result in the ticktimer server never getting invoked. The `locked` value - // will be either 0 or 1. - for _attempts in 0..3 { - if unsafe { self.try_lock() } { - return; - } - do_yield(); - } - - // Try one more time to lock. If the lock is released between the previous code and - // here, then the inner `locked` value will be 1 at the end of this. If it was not - // locked, then the value will be more than 1, for example if there are multiple other - // threads waiting on this lock. - if unsafe { self.try_lock_or_poison() } { - return; - } - - // When this mutex is dropped, we will need to deregister it with the server. - self.contended.store(true, Relaxed); - - // The lock is now "contended". When the lock is released, a Message will get sent to the - // ticktimer server to wake it up. Note that this may already have happened, so the actual - // value of `lock` may be anything (0, 1, 2, ...). - blocking_scalar( - ticktimer_server(), - crate::os::xous::services::TicktimerScalar::LockMutex(self.index()).into(), - ) - .expect("failure to send LockMutex command"); - } - - #[inline] - pub unsafe fn unlock(&self) { - let prev = self.locked.fetch_sub(1, SeqCst); - - // If the previous value was 1, then this was a "fast path" unlock, so no - // need to involve the Ticktimer server - if prev == 1 { - return; - } - - // If it was 0, then something has gone seriously wrong and the counter - // has just wrapped around. - if prev == 0 { - panic!("mutex lock count underflowed"); - } - - // Unblock one thread that is waiting on this message. - blocking_scalar(ticktimer_server(), TicktimerScalar::UnlockMutex(self.index()).into()) - .expect("failure to send UnlockMutex command"); - } - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() - } - - #[inline] - pub unsafe fn try_lock_or_poison(&self) -> bool { - self.locked.fetch_add(1, SeqCst) == 0 - } -} - -impl Drop for Mutex { - fn drop(&mut self) { - // If there was Mutex contention, then we involved the ticktimer. Free - // the resources associated with this Mutex as it is deallocated. - if self.contended.load(Relaxed) { - blocking_scalar(ticktimer_server(), TicktimerScalar::FreeMutex(self.index()).into()) - .ok(); - } - } -} diff --git a/library/std/src/sys/locks/rwlock/futex.rs b/library/std/src/sys/locks/rwlock/futex.rs deleted file mode 100644 index aa0de900238..00000000000 --- a/library/std/src/sys/locks/rwlock/futex.rs +++ /dev/null @@ -1,320 +0,0 @@ -use crate::sync::atomic::{ - AtomicU32, - Ordering::{Acquire, Relaxed, Release}, -}; -use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all}; - -pub struct RwLock { - // The state consists of a 30-bit reader counter, a 'readers waiting' flag, and a 'writers waiting' flag. - // Bits 0..30: - // 0: Unlocked - // 1..=0x3FFF_FFFE: Locked by N readers - // 0x3FFF_FFFF: Write locked - // Bit 30: Readers are waiting on this futex. - // Bit 31: Writers are waiting on the writer_notify futex. - state: AtomicU32, - // The 'condition variable' to notify writers through. - // Incremented on every signal. - writer_notify: AtomicU32, -} - -const READ_LOCKED: u32 = 1; -const MASK: u32 = (1 << 30) - 1; -const WRITE_LOCKED: u32 = MASK; -const MAX_READERS: u32 = MASK - 1; -const READERS_WAITING: u32 = 1 << 30; -const WRITERS_WAITING: u32 = 1 << 31; - -#[inline] -fn is_unlocked(state: u32) -> bool { - state & MASK == 0 -} - -#[inline] -fn is_write_locked(state: u32) -> bool { - state & MASK == WRITE_LOCKED -} - -#[inline] -fn has_readers_waiting(state: u32) -> bool { - state & READERS_WAITING != 0 -} - -#[inline] -fn has_writers_waiting(state: u32) -> bool { - state & WRITERS_WAITING != 0 -} - -#[inline] -fn is_read_lockable(state: u32) -> bool { - // This also returns false if the counter could overflow if we tried to read lock it. - // - // We don't allow read-locking if there's readers waiting, even if the lock is unlocked - // and there's no writers waiting. The only situation when this happens is after unlocking, - // at which point the unlocking thread might be waking up writers, which have priority over readers. - // The unlocking thread will clear the readers waiting bit and wake up readers, if necessary. - state & MASK < MAX_READERS && !has_readers_waiting(state) && !has_writers_waiting(state) -} - -#[inline] -fn has_reached_max_readers(state: u32) -> bool { - state & MASK == MAX_READERS -} - -impl RwLock { - #[inline] - pub const fn new() -> Self { - Self { state: AtomicU32::new(0), writer_notify: AtomicU32::new(0) } - } - - #[inline] - pub fn try_read(&self) -> bool { - self.state - .fetch_update(Acquire, Relaxed, |s| is_read_lockable(s).then(|| s + READ_LOCKED)) - .is_ok() - } - - #[inline] - pub fn read(&self) { - let state = self.state.load(Relaxed); - if !is_read_lockable(state) - || self - .state - .compare_exchange_weak(state, state + READ_LOCKED, Acquire, Relaxed) - .is_err() - { - self.read_contended(); - } - } - - #[inline] - pub unsafe fn read_unlock(&self) { - let state = self.state.fetch_sub(READ_LOCKED, Release) - READ_LOCKED; - - // It's impossible for a reader to be waiting on a read-locked RwLock, - // except if there is also a writer waiting. - debug_assert!(!has_readers_waiting(state) || has_writers_waiting(state)); - - // Wake up a writer if we were the last reader and there's a writer waiting. - if is_unlocked(state) && has_writers_waiting(state) { - self.wake_writer_or_readers(state); - } - } - - #[cold] - fn read_contended(&self) { - let mut state = self.spin_read(); - - loop { - // If we can lock it, lock it. - if is_read_lockable(state) { - match self.state.compare_exchange_weak(state, state + READ_LOCKED, Acquire, Relaxed) - { - Ok(_) => return, // Locked! - Err(s) => { - state = s; - continue; - } - } - } - - // Check for overflow. - if has_reached_max_readers(state) { - panic!("too many active read locks on RwLock"); - } - - // Make sure the readers waiting bit is set before we go to sleep. - if !has_readers_waiting(state) { - if let Err(s) = - self.state.compare_exchange(state, state | READERS_WAITING, Relaxed, Relaxed) - { - state = s; - continue; - } - } - - // Wait for the state to change. - futex_wait(&self.state, state | READERS_WAITING, None); - - // Spin again after waking up. - state = self.spin_read(); - } - } - - #[inline] - pub fn try_write(&self) -> bool { - self.state - .fetch_update(Acquire, Relaxed, |s| is_unlocked(s).then(|| s + WRITE_LOCKED)) - .is_ok() - } - - #[inline] - pub fn write(&self) { - if self.state.compare_exchange_weak(0, WRITE_LOCKED, Acquire, Relaxed).is_err() { - self.write_contended(); - } - } - - #[inline] - pub unsafe fn write_unlock(&self) { - let state = self.state.fetch_sub(WRITE_LOCKED, Release) - WRITE_LOCKED; - - debug_assert!(is_unlocked(state)); - - if has_writers_waiting(state) || has_readers_waiting(state) { - self.wake_writer_or_readers(state); - } - } - - #[cold] - fn write_contended(&self) { - let mut state = self.spin_write(); - - let mut other_writers_waiting = 0; - - loop { - // If it's unlocked, we try to lock it. - if is_unlocked(state) { - match self.state.compare_exchange_weak( - state, - state | WRITE_LOCKED | other_writers_waiting, - Acquire, - Relaxed, - ) { - Ok(_) => return, // Locked! - Err(s) => { - state = s; - continue; - } - } - } - - // Set the waiting bit indicating that we're waiting on it. - if !has_writers_waiting(state) { - if let Err(s) = - self.state.compare_exchange(state, state | WRITERS_WAITING, Relaxed, Relaxed) - { - state = s; - continue; - } - } - - // Other writers might be waiting now too, so we should make sure - // we keep that bit on once we manage lock it. - other_writers_waiting = WRITERS_WAITING; - - // Examine the notification counter before we check if `state` has changed, - // to make sure we don't miss any notifications. - let seq = self.writer_notify.load(Acquire); - - // Don't go to sleep if the lock has become available, - // or if the writers waiting bit is no longer set. - state = self.state.load(Relaxed); - if is_unlocked(state) || !has_writers_waiting(state) { - continue; - } - - // Wait for the state to change. - futex_wait(&self.writer_notify, seq, None); - - // Spin again after waking up. - state = self.spin_write(); - } - } - - /// Wake up waiting threads after unlocking. - /// - /// If both are waiting, this will wake up only one writer, but will fall - /// back to waking up readers if there was no writer to wake up. - #[cold] - fn wake_writer_or_readers(&self, mut state: u32) { - assert!(is_unlocked(state)); - - // The readers waiting bit might be turned on at any point now, - // since readers will block when there's anything waiting. - // Writers will just lock the lock though, regardless of the waiting bits, - // so we don't have to worry about the writer waiting bit. - // - // If the lock gets locked in the meantime, we don't have to do - // anything, because then the thread that locked the lock will take - // care of waking up waiters when it unlocks. - - // If only writers are waiting, wake one of them up. - if state == WRITERS_WAITING { - match self.state.compare_exchange(state, 0, Relaxed, Relaxed) { - Ok(_) => { - self.wake_writer(); - return; - } - Err(s) => { - // Maybe some readers are now waiting too. So, continue to the next `if`. - state = s; - } - } - } - - // If both writers and readers are waiting, leave the readers waiting - // and only wake up one writer. - if state == READERS_WAITING + WRITERS_WAITING { - if self.state.compare_exchange(state, READERS_WAITING, Relaxed, Relaxed).is_err() { - // The lock got locked. Not our problem anymore. - return; - } - if self.wake_writer() { - return; - } - // No writers were actually blocked on futex_wait, so we continue - // to wake up readers instead, since we can't be sure if we notified a writer. - state = READERS_WAITING; - } - - // If readers are waiting, wake them all up. - if state == READERS_WAITING { - if self.state.compare_exchange(state, 0, Relaxed, Relaxed).is_ok() { - futex_wake_all(&self.state); - } - } - } - - /// This wakes one writer and returns true if we woke up a writer that was - /// blocked on futex_wait. - /// - /// If this returns false, it might still be the case that we notified a - /// writer that was about to go to sleep. - fn wake_writer(&self) -> bool { - self.writer_notify.fetch_add(1, Release); - futex_wake(&self.writer_notify) - // Note that FreeBSD and DragonFlyBSD don't tell us whether they woke - // up any threads or not, and always return `false` here. That still - // results in correct behaviour: it just means readers get woken up as - // well in case both readers and writers were waiting. - } - - /// Spin for a while, but stop directly at the given condition. - #[inline] - fn spin_until(&self, f: impl Fn(u32) -> bool) -> u32 { - let mut spin = 100; // Chosen by fair dice roll. - loop { - let state = self.state.load(Relaxed); - if f(state) || spin == 0 { - return state; - } - crate::hint::spin_loop(); - spin -= 1; - } - } - - #[inline] - fn spin_write(&self) -> u32 { - // Stop spinning when it's unlocked or when there's waiting writers, to keep things somewhat fair. - self.spin_until(|state| is_unlocked(state) || has_writers_waiting(state)) - } - - #[inline] - fn spin_read(&self) -> u32 { - // Stop spinning when it's unlocked or read locked, or when there's waiting threads. - self.spin_until(|state| { - !is_write_locked(state) || has_readers_waiting(state) || has_writers_waiting(state) - }) - } -} diff --git a/library/std/src/sys/locks/rwlock/mod.rs b/library/std/src/sys/locks/rwlock/mod.rs deleted file mode 100644 index 675931c64bd..00000000000 --- a/library/std/src/sys/locks/rwlock/mod.rs +++ /dev/null @@ -1,37 +0,0 @@ -cfg_if::cfg_if! { - if #[cfg(any( - all(target_os = "windows", not(target_vendor = "win7")), - target_os = "linux", - target_os = "android", - target_os = "freebsd", - target_os = "openbsd", - target_os = "dragonfly", - target_os = "fuchsia", - all(target_family = "wasm", target_feature = "atomics"), - target_os = "hermit", - ))] { - mod futex; - pub use futex::RwLock; - } else if #[cfg(target_family = "unix")] { - mod queue; - pub use queue::RwLock; - } else if #[cfg(all(target_os = "windows", target_vendor = "win7"))] { - mod windows7; - pub use windows7::RwLock; - } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { - mod sgx; - pub use sgx::RwLock; - } else if #[cfg(target_os = "solid_asp3")] { - mod solid; - pub use solid::RwLock; - } else if #[cfg(target_os = "teeos")] { - mod teeos; - pub use teeos::RwLock; - } else if #[cfg(target_os = "xous")] { - mod xous; - pub use xous::RwLock; - } else { - mod no_threads; - pub use no_threads::RwLock; - } -} diff --git a/library/std/src/sys/locks/rwlock/no_threads.rs b/library/std/src/sys/locks/rwlock/no_threads.rs deleted file mode 100644 index 789ef9b29e5..00000000000 --- a/library/std/src/sys/locks/rwlock/no_threads.rs +++ /dev/null @@ -1,65 +0,0 @@ -use crate::cell::Cell; - -pub struct RwLock { - // This platform has no threads, so we can use a Cell here. - mode: Cell, -} - -unsafe impl Send for RwLock {} -unsafe impl Sync for RwLock {} // no threads on this platform - -impl RwLock { - #[inline] - #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] - pub const fn new() -> RwLock { - RwLock { mode: Cell::new(0) } - } - - #[inline] - pub fn read(&self) { - let m = self.mode.get(); - if m >= 0 { - self.mode.set(m + 1); - } else { - rtabort!("rwlock locked for writing"); - } - } - - #[inline] - pub fn try_read(&self) -> bool { - let m = self.mode.get(); - if m >= 0 { - self.mode.set(m + 1); - true - } else { - false - } - } - - #[inline] - pub fn write(&self) { - if self.mode.replace(-1) != 0 { - rtabort!("rwlock locked for reading") - } - } - - #[inline] - pub fn try_write(&self) -> bool { - if self.mode.get() == 0 { - self.mode.set(-1); - true - } else { - false - } - } - - #[inline] - pub unsafe fn read_unlock(&self) { - self.mode.set(self.mode.get() - 1); - } - - #[inline] - pub unsafe fn write_unlock(&self) { - assert_eq!(self.mode.replace(0), -1); - } -} diff --git a/library/std/src/sys/locks/rwlock/queue.rs b/library/std/src/sys/locks/rwlock/queue.rs deleted file mode 100644 index dce966086b8..00000000000 --- a/library/std/src/sys/locks/rwlock/queue.rs +++ /dev/null @@ -1,557 +0,0 @@ -//! Efficient read-write locking without `pthread_rwlock_t`. -//! -//! The readers-writer lock provided by the `pthread` library has a number of -//! problems which make it a suboptimal choice for `std`: -//! -//! * It is non-movable, so it needs to be allocated (lazily, to make the -//! constructor `const`). -//! * `pthread` is an external library, meaning the fast path of acquiring an -//! uncontended lock cannot be inlined. -//! * Some platforms (at least glibc before version 2.25) have buggy implementations -//! that can easily lead to undefined behaviour in safe Rust code when not properly -//! guarded against. -//! * On some platforms (e.g. macOS), the lock is very slow. -//! -//! Therefore, we implement our own `RwLock`! Naively, one might reach for a -//! spinlock, but those [can be quite problematic] when the lock is contended. -//! Instead, this readers-writer lock copies its implementation strategy from -//! the Windows [SRWLOCK] and the [usync] library. Spinning is still used for the -//! fast path, but it is bounded: after spinning fails, threads will locklessly -//! add an information structure containing a [`Thread`] handle into a queue of -//! waiters associated with the lock. The lock owner, upon releasing the lock, -//! will scan through the queue and wake up threads as appropriate, which will -//! then again try to acquire the lock. The resulting [`RwLock`] is: -//! -//! * adaptive, since it spins before doing any heavywheight parking operations -//! * allocation-free, modulo the per-thread [`Thread`] handle, which is -//! allocated regardless when using threads created by `std` -//! * writer-preferring, even if some readers may still slip through -//! * unfair, which reduces context-switching and thus drastically improves -//! performance -//! -//! and also quite fast in most cases. -//! -//! [can be quite problematic]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html -//! [SRWLOCK]: https://learn.microsoft.com/en-us/windows/win32/sync/slim-reader-writer--srw--locks -//! [usync]: https://crates.io/crates/usync -//! -//! # Implementation -//! -//! ## State -//! -//! A single [`AtomicPtr`] is used as state variable. The lowest three bits are used -//! to indicate the meaning of the remaining bits: -//! -//! | [`LOCKED`] | [`QUEUED`] | [`QUEUE_LOCKED`] | Remaining | | -//! |:-----------|:-----------|:-----------------|:-------------|:----------------------------------------------------------------------------------------------------------------------------| -//! | 0 | 0 | 0 | 0 | The lock is unlocked, no threads are waiting | -//! | 1 | 0 | 0 | 0 | The lock is write-locked, no threads waiting | -//! | 1 | 0 | 0 | n > 0 | The lock is read-locked with n readers | -//! | 0 | 1 | * | `*mut Node` | The lock is unlocked, but some threads are waiting. Only writers may lock the lock | -//! | 1 | 1 | * | `*mut Node` | The lock is locked, but some threads are waiting. If the lock is read-locked, the last queue node contains the reader count | -//! -//! ## Waiter queue -//! -//! When threads are waiting on the lock (`QUEUE` is set), the lock state -//! points to a queue of waiters, which is implemented as a linked list of -//! nodes stored on the stack to avoid memory allocation. To enable lockless -//! enqueuing of new nodes to the queue, the linked list is single-linked upon -//! creation. Since when the lock is read-locked, the lock count is stored in -//! the last link of the queue, threads have to traverse the queue to find the -//! last element upon releasing the lock. To avoid having to traverse the whole -//! list again and again, a pointer to the found tail is cached in the (current) -//! first element of the queue. -//! -//! Also, while the lock is unfair for performance reasons, it is still best to -//! wake the tail node first, which requires backlinks to previous nodes to be -//! created. This is done at the same time as finding the tail, and thus a set -//! tail field indicates the remaining portion of the queue is initialized. -//! -//! TLDR: Here's a diagram of what the queue looks like: -//! -//! ```text -//! state -//! │ -//! ▼ -//! ╭───────╮ next ╭───────╮ next ╭───────╮ next ╭───────╮ -//! │ ├─────►│ ├─────►│ ├─────►│ count │ -//! │ │ │ │ │ │ │ │ -//! │ │ │ │◄─────┤ │◄─────┤ │ -//! ╰───────╯ ╰───────╯ prev ╰───────╯ prev ╰───────╯ -//! │ ▲ -//! └───────────────────────────┘ -//! tail -//! ``` -//! -//! Invariants: -//! 1. At least one node must contain a non-null, current `tail` field. -//! 2. The first non-null `tail` field must be valid and current. -//! 3. All nodes preceding this node must have a correct, non-null `next` field. -//! 4. All nodes following this node must have a correct, non-null `prev` field. -//! -//! Access to the queue is controlled by the `QUEUE_LOCKED` bit, which threads -//! try to set both after enqueuing themselves to eagerly add backlinks to the -//! queue, which drastically improves performance, and after unlocking the lock -//! to wake the next waiter(s). This is done atomically at the same time as the -//! enqueuing/unlocking operation. The thread releasing the `QUEUE_LOCK` bit -//! will check the state of the lock and wake up waiters as appropriate. This -//! guarantees forward-progress even if the unlocking thread could not acquire -//! the queue lock. -//! -//! ## Memory orderings -//! -//! To properly synchronize changes to the data protected by the lock, the lock -//! is acquired and released with [`Acquire`] and [`Release`] ordering, respectively. -//! To propagate the initialization of nodes, changes to the queue lock are also -//! performed using these orderings. - -#![forbid(unsafe_op_in_unsafe_fn)] - -use crate::cell::OnceCell; -use crate::hint::spin_loop; -use crate::mem; -use crate::ptr::{self, null_mut, without_provenance_mut, NonNull}; -use crate::sync::atomic::{ - AtomicBool, AtomicPtr, - Ordering::{AcqRel, Acquire, Relaxed, Release}, -}; -use crate::sys_common::thread_info; -use crate::thread::Thread; - -// Locking uses exponential backoff. `SPIN_COUNT` indicates how many times the -// locking operation will be retried. -// `spin_loop` will be called `2.pow(SPIN_COUNT) - 1` times. -const SPIN_COUNT: usize = 7; - -type State = *mut (); -type AtomicState = AtomicPtr<()>; - -const UNLOCKED: State = without_provenance_mut(0); -const LOCKED: usize = 1; -const QUEUED: usize = 2; -const QUEUE_LOCKED: usize = 4; -const SINGLE: usize = 8; -const MASK: usize = !(QUEUE_LOCKED | QUEUED | LOCKED); - -/// Marks the state as write-locked, if possible. -#[inline] -fn write_lock(state: State) -> Option { - let state = state.wrapping_byte_add(LOCKED); - if state.addr() & LOCKED == LOCKED { Some(state) } else { None } -} - -/// Marks the state as read-locked, if possible. -#[inline] -fn read_lock(state: State) -> Option { - if state.addr() & QUEUED == 0 && state.addr() != LOCKED { - Some(without_provenance_mut(state.addr().checked_add(SINGLE)? | LOCKED)) - } else { - None - } -} - -/// Masks the state, assuming it points to a queue node. -/// -/// # Safety -/// The state must contain a valid pointer to a queue node. -#[inline] -unsafe fn to_node(state: State) -> NonNull { - unsafe { NonNull::new_unchecked(state.mask(MASK)).cast() } -} - -/// An atomic node pointer with relaxed operations. -struct AtomicLink(AtomicPtr); - -impl AtomicLink { - fn new(v: Option>) -> AtomicLink { - AtomicLink(AtomicPtr::new(v.map_or(null_mut(), NonNull::as_ptr))) - } - - fn get(&self) -> Option> { - NonNull::new(self.0.load(Relaxed)) - } - - fn set(&self, v: Option>) { - self.0.store(v.map_or(null_mut(), NonNull::as_ptr), Relaxed); - } -} - -#[repr(align(8))] -struct Node { - next: AtomicLink, - prev: AtomicLink, - tail: AtomicLink, - write: bool, - thread: OnceCell, - completed: AtomicBool, -} - -impl Node { - /// Create a new queue node. - fn new(write: bool) -> Node { - Node { - next: AtomicLink::new(None), - prev: AtomicLink::new(None), - tail: AtomicLink::new(None), - write, - thread: OnceCell::new(), - completed: AtomicBool::new(false), - } - } - - /// Prepare this node for waiting. - fn prepare(&mut self) { - // Fall back to creating an unnamed `Thread` handle to allow locking in - // TLS destructors. - self.thread - .get_or_init(|| thread_info::current_thread().unwrap_or_else(|| Thread::new(None))); - self.completed = AtomicBool::new(false); - } - - /// Wait until this node is marked as completed. - /// - /// # Safety - /// May only be called from the thread that created the node. - unsafe fn wait(&self) { - while !self.completed.load(Acquire) { - unsafe { - self.thread.get().unwrap().park(); - } - } - } - - /// Atomically mark this node as completed. The node may not outlive this call. - unsafe fn complete(this: NonNull) { - // Since the node may be destroyed immediately after the completed flag - // is set, clone the thread handle before that. - let thread = unsafe { this.as_ref().thread.get().unwrap().clone() }; - unsafe { - this.as_ref().completed.store(true, Release); - } - thread.unpark(); - } -} - -struct PanicGuard; - -impl Drop for PanicGuard { - fn drop(&mut self) { - rtabort!("tried to drop node in intrusive list."); - } -} - -/// Add backlinks to the queue, returning the tail. -/// -/// May be called from multiple threads at the same time, while the queue is not -/// modified (this happens when unlocking multiple readers). -/// -/// # Safety -/// * `head` must point to a node in a valid queue. -/// * `head` must be or be in front of the head of the queue at the time of the -/// last removal. -/// * The part of the queue starting with `head` must not be modified during this -/// call. -unsafe fn add_backlinks_and_find_tail(head: NonNull) -> NonNull { - let mut current = head; - let tail = loop { - let c = unsafe { current.as_ref() }; - match c.tail.get() { - Some(tail) => break tail, - // SAFETY: - // All `next` fields before the first node with a `set` tail are - // non-null and valid (invariant 3). - None => unsafe { - let next = c.next.get().unwrap_unchecked(); - next.as_ref().prev.set(Some(current)); - current = next; - }, - } - }; - - unsafe { - head.as_ref().tail.set(Some(tail)); - tail - } -} - -pub struct RwLock { - state: AtomicState, -} - -impl RwLock { - #[inline] - pub const fn new() -> RwLock { - RwLock { state: AtomicPtr::new(UNLOCKED) } - } - - #[inline] - pub fn try_read(&self) -> bool { - self.state.fetch_update(Acquire, Relaxed, read_lock).is_ok() - } - - #[inline] - pub fn read(&self) { - if !self.try_read() { - self.lock_contended(false) - } - } - - #[inline] - pub fn try_write(&self) -> bool { - // Atomically set the `LOCKED` bit. This is lowered to a single atomic - // instruction on most modern processors (e.g. "lock bts" on x86 and - // "ldseta" on modern AArch64), and therefore is more efficient than - // `fetch_update(lock(true))`, which can spuriously fail if a new node - // is appended to the queue. - self.state.fetch_or(LOCKED, Acquire).addr() & LOCKED == 0 - } - - #[inline] - pub fn write(&self) { - if !self.try_write() { - self.lock_contended(true) - } - } - - #[cold] - fn lock_contended(&self, write: bool) { - let update = if write { write_lock } else { read_lock }; - let mut node = Node::new(write); - let mut state = self.state.load(Relaxed); - let mut count = 0; - loop { - if let Some(next) = update(state) { - // The lock is available, try locking it. - match self.state.compare_exchange_weak(state, next, Acquire, Relaxed) { - Ok(_) => return, - Err(new) => state = new, - } - } else if state.addr() & QUEUED == 0 && count < SPIN_COUNT { - // If the lock is not available and no threads are queued, spin - // for a while, using exponential backoff to decrease cache - // contention. - for _ in 0..(1 << count) { - spin_loop(); - } - state = self.state.load(Relaxed); - count += 1; - } else { - // Fall back to parking. First, prepare the node. - node.prepare(); - - // If there are threads queued, set the `next` field to a - // pointer to the next node in the queue. Otherwise set it to - // the lock count if the state is read-locked or to zero if it - // is write-locked. - node.next.0 = AtomicPtr::new(state.mask(MASK).cast()); - node.prev = AtomicLink::new(None); - let mut next = ptr::from_ref(&node) - .map_addr(|addr| addr | QUEUED | (state.addr() & LOCKED)) - as State; - - if state.addr() & QUEUED == 0 { - // If this is the first node in the queue, set the tail field to - // the node itself to ensure there is a current `tail` field in - // the queue (invariants 1 and 2). This needs to use `set` to - // avoid invalidating the new pointer. - node.tail.set(Some(NonNull::from(&node))); - } else { - // Otherwise, the tail of the queue is not known. - node.tail.set(None); - // Try locking the queue to eagerly add backlinks. - next = next.map_addr(|addr| addr | QUEUE_LOCKED); - } - - // Register the node, using release ordering to propagate our - // changes to the waking thread. - if let Err(new) = self.state.compare_exchange_weak(state, next, AcqRel, Relaxed) { - // The state has changed, just try again. - state = new; - continue; - } - - // The node is registered, so the structure must not be - // mutably accessed or destroyed while other threads may - // be accessing it. Guard against unwinds using a panic - // guard that aborts when dropped. - let guard = PanicGuard; - - // If the current thread locked the queue, unlock it again, - // linking it in the process. - if state.addr() & (QUEUE_LOCKED | QUEUED) == QUEUED { - unsafe { - self.unlock_queue(next); - } - } - - // Wait until the node is removed from the queue. - // SAFETY: the node was created by the current thread. - unsafe { - node.wait(); - } - - // The node was removed from the queue, disarm the guard. - mem::forget(guard); - - // Reload the state and try again. - state = self.state.load(Relaxed); - count = 0; - } - } - } - - #[inline] - pub unsafe fn read_unlock(&self) { - match self.state.fetch_update(Release, Acquire, |state| { - if state.addr() & QUEUED == 0 { - let count = state.addr() - (SINGLE | LOCKED); - Some(if count > 0 { without_provenance_mut(count | LOCKED) } else { UNLOCKED }) - } else { - None - } - }) { - Ok(_) => {} - // There are waiters queued and the lock count was moved to the - // tail of the queue. - Err(state) => unsafe { self.read_unlock_contended(state) }, - } - } - - #[cold] - unsafe fn read_unlock_contended(&self, state: State) { - // The state was observed with acquire ordering above, so the current - // thread will observe all node initializations. - - // SAFETY: - // Because new read-locks cannot be acquired while threads are queued, - // all queue-lock owners will observe the set `LOCKED` bit. Because they - // do not modify the queue while there is a lock owner, the queue will - // not be removed from here. - let tail = unsafe { add_backlinks_and_find_tail(to_node(state)).as_ref() }; - // The lock count is stored in the `next` field of `tail`. - // Decrement it, making sure to observe all changes made to the queue - // by the other lock owners by using acquire-release ordering. - let was_last = tail.next.0.fetch_byte_sub(SINGLE, AcqRel).addr() - SINGLE == 0; - if was_last { - // SAFETY: - // Other threads cannot read-lock while threads are queued. Also, - // the `LOCKED` bit is still set, so there are no writers. Therefore, - // the current thread exclusively owns the lock. - unsafe { self.unlock_contended(state) } - } - } - - #[inline] - pub unsafe fn write_unlock(&self) { - if let Err(state) = - self.state.compare_exchange(without_provenance_mut(LOCKED), UNLOCKED, Release, Relaxed) - { - // SAFETY: - // Since other threads cannot acquire the lock, the state can only - // have changed because there are threads queued on the lock. - unsafe { self.unlock_contended(state) } - } - } - - /// # Safety - /// * The lock must be exclusively owned by this thread. - /// * There must be threads queued on the lock. - #[cold] - unsafe fn unlock_contended(&self, mut state: State) { - loop { - // Atomically release the lock and try to acquire the queue lock. - let next = state.map_addr(|a| (a & !LOCKED) | QUEUE_LOCKED); - match self.state.compare_exchange_weak(state, next, AcqRel, Relaxed) { - // The queue lock was acquired. Release it, waking up the next - // waiter in the process. - Ok(_) if state.addr() & QUEUE_LOCKED == 0 => unsafe { - return self.unlock_queue(next); - }, - // Another thread already holds the queue lock, leave waking up - // waiters to it. - Ok(_) => return, - Err(new) => state = new, - } - } - } - - /// Unlocks the queue. If the lock is unlocked, wakes up the next eligible - /// thread(s). - /// - /// # Safety - /// The queue lock must be held by the current thread. - unsafe fn unlock_queue(&self, mut state: State) { - debug_assert_eq!(state.addr() & (QUEUED | QUEUE_LOCKED), QUEUED | QUEUE_LOCKED); - - loop { - let tail = unsafe { add_backlinks_and_find_tail(to_node(state)) }; - - if state.addr() & LOCKED == LOCKED { - // Another thread has locked the lock. Leave waking up waiters - // to them by releasing the queue lock. - match self.state.compare_exchange_weak( - state, - state.mask(!QUEUE_LOCKED), - Release, - Acquire, - ) { - Ok(_) => return, - Err(new) => { - state = new; - continue; - } - } - } - - let is_writer = unsafe { tail.as_ref().write }; - if is_writer && let Some(prev) = unsafe { tail.as_ref().prev.get() } { - // `tail` is a writer and there is a node before `tail`. - // Split off `tail`. - - // There are no set `tail` links before the node pointed to by - // `state`, so the first non-null tail field will be current - // (invariant 2). Invariant 4 is fullfilled since `find_tail` - // was called on this node, which ensures all backlinks are set. - unsafe { - to_node(state).as_ref().tail.set(Some(prev)); - } - - // Release the queue lock. Doing this by subtraction is more - // efficient on modern processors since it is a single instruction - // instead of an update loop, which will fail if new threads are - // added to the list. - self.state.fetch_byte_sub(QUEUE_LOCKED, Release); - - // The tail was split off and the lock released. Mark the node as - // completed. - unsafe { - return Node::complete(tail); - } - } else { - // The next waiter is a reader or the queue only consists of one - // waiter. Just wake all threads. - - // The lock cannot be locked (checked above), so mark it as - // unlocked to reset the queue. - if let Err(new) = - self.state.compare_exchange_weak(state, UNLOCKED, Release, Acquire) - { - state = new; - continue; - } - - let mut current = tail; - loop { - let prev = unsafe { current.as_ref().prev.get() }; - unsafe { - Node::complete(current); - } - match prev { - Some(prev) => current = prev, - None => return, - } - } - } - } - } -} diff --git a/library/std/src/sys/locks/rwlock/sgx.rs b/library/std/src/sys/locks/rwlock/sgx.rs deleted file mode 100644 index 136dea597bb..00000000000 --- a/library/std/src/sys/locks/rwlock/sgx.rs +++ /dev/null @@ -1,219 +0,0 @@ -#[cfg(test)] -mod tests; - -use crate::alloc::Layout; -use crate::num::NonZero; -use crate::sys::pal::waitqueue::{ - try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable, -}; -use crate::sys_common::lazy_box::{LazyBox, LazyInit}; - -struct AllocatedRwLock { - readers: SpinMutex>>>, - writer: SpinMutex>, -} - -pub struct RwLock { - inner: LazyBox, -} - -impl LazyInit for AllocatedRwLock { - fn init() -> Box { - Box::new(AllocatedRwLock { - readers: SpinMutex::new(WaitVariable::new(None)), - writer: SpinMutex::new(WaitVariable::new(false)), - }) - } -} - -// Check at compile time that RwLock's size and alignment matches the C definition -// in libunwind (see also `test_c_rwlock_initializer` in `tests`). -const _: () = { - let rust = Layout::new::(); - let c = Layout::new::<*mut ()>(); - assert!(rust.size() == c.size()); - assert!(rust.align() == c.align()); -}; - -impl RwLock { - pub const fn new() -> RwLock { - RwLock { inner: LazyBox::new() } - } - - #[inline] - pub fn read(&self) { - let lock = &*self.inner; - let mut rguard = lock.readers.lock(); - let wguard = lock.writer.lock(); - if *wguard.lock_var() || !wguard.queue_empty() { - // Another thread has or is waiting for the write lock, wait - drop(wguard); - WaitQueue::wait(rguard, || {}); - // Another thread has passed the lock to us - } else { - // No waiting writers, acquire the read lock - *rguard.lock_var_mut() = NonZero::new(rguard.lock_var().map_or(0, |n| n.get()) + 1); - } - } - - #[inline] - pub unsafe fn try_read(&self) -> bool { - let lock = &*self.inner; - let mut rguard = try_lock_or_false!(lock.readers); - let wguard = try_lock_or_false!(lock.writer); - if *wguard.lock_var() || !wguard.queue_empty() { - // Another thread has or is waiting for the write lock - false - } else { - // No waiting writers, acquire the read lock - *rguard.lock_var_mut() = NonZero::new(rguard.lock_var().map_or(0, |n| n.get()) + 1); - true - } - } - - #[inline] - pub fn write(&self) { - let lock = &*self.inner; - let rguard = lock.readers.lock(); - let mut wguard = lock.writer.lock(); - if *wguard.lock_var() || rguard.lock_var().is_some() { - // Another thread has the lock, wait - drop(rguard); - WaitQueue::wait(wguard, || {}); - // Another thread has passed the lock to us - } else { - // We are just now obtaining the lock - *wguard.lock_var_mut() = true; - } - } - - #[inline] - pub fn try_write(&self) -> bool { - let lock = &*self.inner; - let rguard = try_lock_or_false!(lock.readers); - let mut wguard = try_lock_or_false!(lock.writer); - if *wguard.lock_var() || rguard.lock_var().is_some() { - // Another thread has the lock - false - } else { - // We are just now obtaining the lock - *wguard.lock_var_mut() = true; - true - } - } - - #[inline] - unsafe fn __read_unlock( - &self, - mut rguard: SpinMutexGuard<'_, WaitVariable>>>, - wguard: SpinMutexGuard<'_, WaitVariable>, - ) { - *rguard.lock_var_mut() = NonZero::new(rguard.lock_var().unwrap().get() - 1); - if rguard.lock_var().is_some() { - // There are other active readers - } else { - if let Ok(mut wguard) = WaitQueue::notify_one(wguard) { - // A writer was waiting, pass the lock - *wguard.lock_var_mut() = true; - wguard.drop_after(rguard); - } else { - // No writers were waiting, the lock is released - rtassert!(rguard.queue_empty()); - } - } - } - - #[inline] - pub unsafe fn read_unlock(&self) { - let lock = &*self.inner; - let rguard = lock.readers.lock(); - let wguard = lock.writer.lock(); - unsafe { self.__read_unlock(rguard, wguard) }; - } - - #[inline] - unsafe fn __write_unlock( - &self, - rguard: SpinMutexGuard<'_, WaitVariable>>>, - wguard: SpinMutexGuard<'_, WaitVariable>, - ) { - match WaitQueue::notify_one(wguard) { - Err(mut wguard) => { - // No writers waiting, release the write lock - *wguard.lock_var_mut() = false; - if let Ok(mut rguard) = WaitQueue::notify_all(rguard) { - // One or more readers were waiting, pass the lock to them - if let NotifiedTcs::All { count } = rguard.notified_tcs() { - *rguard.lock_var_mut() = Some(count) - } else { - unreachable!() // called notify_all - } - rguard.drop_after(wguard); - } else { - // No readers waiting, the lock is released - } - } - Ok(wguard) => { - // There was a thread waiting for write, just pass the lock - wguard.drop_after(rguard); - } - } - } - - #[inline] - pub unsafe fn write_unlock(&self) { - let lock = &*self.inner; - let rguard = lock.readers.lock(); - let wguard = lock.writer.lock(); - unsafe { self.__write_unlock(rguard, wguard) }; - } - - // only used by __rust_rwlock_unlock below - #[inline] - #[cfg_attr(test, allow(dead_code))] - unsafe fn unlock(&self) { - let lock = &*self.inner; - let rguard = lock.readers.lock(); - let wguard = lock.writer.lock(); - if *wguard.lock_var() == true { - unsafe { self.__write_unlock(rguard, wguard) }; - } else { - unsafe { self.__read_unlock(rguard, wguard) }; - } - } -} - -// The following functions are needed by libunwind. These symbols are named -// in pre-link args for the target specification, so keep that in sync. -#[cfg(not(test))] -const EINVAL: i32 = 22; - -#[cfg(not(test))] -#[no_mangle] -pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RwLock) -> i32 { - if p.is_null() { - return EINVAL; - } - unsafe { (*p).read() }; - return 0; -} - -#[cfg(not(test))] -#[no_mangle] -pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RwLock) -> i32 { - if p.is_null() { - return EINVAL; - } - unsafe { (*p).write() }; - return 0; -} - -#[cfg(not(test))] -#[no_mangle] -pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RwLock) -> i32 { - if p.is_null() { - return EINVAL; - } - unsafe { (*p).unlock() }; - return 0; -} diff --git a/library/std/src/sys/locks/rwlock/sgx/tests.rs b/library/std/src/sys/locks/rwlock/sgx/tests.rs deleted file mode 100644 index 5fd6670afd4..00000000000 --- a/library/std/src/sys/locks/rwlock/sgx/tests.rs +++ /dev/null @@ -1,21 +0,0 @@ -use super::*; -use crate::ptr; - -// Verify that the byte pattern libunwind uses to initialize an RwLock is -// equivalent to the value of RwLock::new(). If the value changes, -// `src/UnwindRustSgx.h` in libunwind needs to be changed too. -#[test] -fn test_c_rwlock_initializer() { - const C_RWLOCK_INIT: *mut () = ptr::null_mut(); - - // For the test to work, we need the padding/unused bytes in RwLock to be - // initialized as 0. In practice, this is the case with statics. - static RUST_RWLOCK_INIT: RwLock = RwLock::new(); - - unsafe { - // If the assertion fails, that not necessarily an issue with the value - // of C_RWLOCK_INIT. It might just be an issue with the way padding - // bytes are initialized in the test code. - assert_eq!(crate::mem::transmute_copy::<_, *mut ()>(&RUST_RWLOCK_INIT), C_RWLOCK_INIT); - }; -} diff --git a/library/std/src/sys/locks/rwlock/solid.rs b/library/std/src/sys/locks/rwlock/solid.rs deleted file mode 100644 index 9bf6f5dbb73..00000000000 --- a/library/std/src/sys/locks/rwlock/solid.rs +++ /dev/null @@ -1,93 +0,0 @@ -//! A readers-writer lock implementation backed by the SOLID kernel extension. -use crate::sys::pal::{ - abi, - itron::{ - error::{expect_success, expect_success_aborting, fail, ItronError}, - spin::SpinIdOnceCell, - }, -}; - -pub struct RwLock { - /// The ID of the underlying mutex object - rwl: SpinIdOnceCell<()>, -} - -// Safety: `num_readers` is protected by `mtx_num_readers` -unsafe impl Send for RwLock {} -unsafe impl Sync for RwLock {} - -fn new_rwl() -> Result { - ItronError::err_if_negative(unsafe { abi::rwl_acre_rwl() }) -} - -impl RwLock { - #[inline] - pub const fn new() -> RwLock { - RwLock { rwl: SpinIdOnceCell::new() } - } - - /// Get the inner mutex's ID, which is lazily created. - fn raw(&self) -> abi::ID { - match self.rwl.get_or_try_init(|| new_rwl().map(|id| (id, ()))) { - Ok((id, ())) => id, - Err(e) => fail(e, &"rwl_acre_rwl"), - } - } - - #[inline] - pub fn read(&self) { - let rwl = self.raw(); - expect_success(unsafe { abi::rwl_loc_rdl(rwl) }, &"rwl_loc_rdl"); - } - - #[inline] - pub fn try_read(&self) -> bool { - let rwl = self.raw(); - match unsafe { abi::rwl_ploc_rdl(rwl) } { - abi::E_TMOUT => false, - er => { - expect_success(er, &"rwl_ploc_rdl"); - true - } - } - } - - #[inline] - pub fn write(&self) { - let rwl = self.raw(); - expect_success(unsafe { abi::rwl_loc_wrl(rwl) }, &"rwl_loc_wrl"); - } - - #[inline] - pub fn try_write(&self) -> bool { - let rwl = self.raw(); - match unsafe { abi::rwl_ploc_wrl(rwl) } { - abi::E_TMOUT => false, - er => { - expect_success(er, &"rwl_ploc_wrl"); - true - } - } - } - - #[inline] - pub unsafe fn read_unlock(&self) { - let rwl = self.raw(); - expect_success_aborting(unsafe { abi::rwl_unl_rwl(rwl) }, &"rwl_unl_rwl"); - } - - #[inline] - pub unsafe fn write_unlock(&self) { - let rwl = self.raw(); - expect_success_aborting(unsafe { abi::rwl_unl_rwl(rwl) }, &"rwl_unl_rwl"); - } -} - -impl Drop for RwLock { - #[inline] - fn drop(&mut self) { - if let Some(rwl) = self.rwl.get().map(|x| x.0) { - expect_success_aborting(unsafe { abi::rwl_del_rwl(rwl) }, &"rwl_del_rwl"); - } - } -} diff --git a/library/std/src/sys/locks/rwlock/teeos.rs b/library/std/src/sys/locks/rwlock/teeos.rs deleted file mode 100644 index 27cdb88788f..00000000000 --- a/library/std/src/sys/locks/rwlock/teeos.rs +++ /dev/null @@ -1,44 +0,0 @@ -use crate::sys::locks::mutex::Mutex; - -/// we do not supported rwlock, so use mutex to simulate rwlock. -/// it's useful because so many code in std will use rwlock. -pub struct RwLock { - inner: Mutex, -} - -impl RwLock { - #[inline] - pub const fn new() -> RwLock { - RwLock { inner: Mutex::new() } - } - - #[inline] - pub fn read(&self) { - unsafe { self.inner.lock() }; - } - - #[inline] - pub fn try_read(&self) -> bool { - unsafe { self.inner.try_lock() } - } - - #[inline] - pub fn write(&self) { - unsafe { self.inner.lock() }; - } - - #[inline] - pub unsafe fn try_write(&self) -> bool { - unsafe { self.inner.try_lock() } - } - - #[inline] - pub unsafe fn read_unlock(&self) { - unsafe { self.inner.unlock() }; - } - - #[inline] - pub unsafe fn write_unlock(&self) { - unsafe { self.inner.unlock() }; - } -} diff --git a/library/std/src/sys/locks/rwlock/windows7.rs b/library/std/src/sys/locks/rwlock/windows7.rs deleted file mode 100644 index e69415baac4..00000000000 --- a/library/std/src/sys/locks/rwlock/windows7.rs +++ /dev/null @@ -1,40 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::sys::c; - -pub struct RwLock { - inner: UnsafeCell, -} - -unsafe impl Send for RwLock {} -unsafe impl Sync for RwLock {} - -impl RwLock { - #[inline] - pub const fn new() -> RwLock { - RwLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) } - } - #[inline] - pub fn read(&self) { - unsafe { c::AcquireSRWLockShared(self.inner.get()) } - } - #[inline] - pub fn try_read(&self) -> bool { - unsafe { c::TryAcquireSRWLockShared(self.inner.get()) != 0 } - } - #[inline] - pub fn write(&self) { - unsafe { c::AcquireSRWLockExclusive(self.inner.get()) } - } - #[inline] - pub fn try_write(&self) -> bool { - unsafe { c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 } - } - #[inline] - pub unsafe fn read_unlock(&self) { - c::ReleaseSRWLockShared(self.inner.get()) - } - #[inline] - pub unsafe fn write_unlock(&self) { - c::ReleaseSRWLockExclusive(self.inner.get()) - } -} diff --git a/library/std/src/sys/locks/rwlock/xous.rs b/library/std/src/sys/locks/rwlock/xous.rs deleted file mode 100644 index ab45b33e1f6..00000000000 --- a/library/std/src/sys/locks/rwlock/xous.rs +++ /dev/null @@ -1,74 +0,0 @@ -use crate::sync::atomic::{AtomicIsize, Ordering::Acquire}; -use crate::thread::yield_now; - -pub struct RwLock { - /// The "mode" value indicates how many threads are waiting on this - /// Mutex. Possible values are: - /// -1: The lock is locked for writing - /// 0: The lock is unlocked - /// >=1: The lock is locked for reading - /// - /// This currently spins waiting for the lock to be freed. An - /// optimization would be to involve the ticktimer server to - /// coordinate unlocks. - mode: AtomicIsize, -} - -const RWLOCK_WRITING: isize = -1; -const RWLOCK_FREE: isize = 0; - -unsafe impl Send for RwLock {} -unsafe impl Sync for RwLock {} - -impl RwLock { - #[inline] - #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] - pub const fn new() -> RwLock { - RwLock { mode: AtomicIsize::new(RWLOCK_FREE) } - } - - #[inline] - pub unsafe fn read(&self) { - while !unsafe { self.try_read() } { - yield_now(); - } - } - - #[inline] - pub unsafe fn try_read(&self) -> bool { - self.mode - .fetch_update( - Acquire, - Acquire, - |v| if v == RWLOCK_WRITING { None } else { Some(v + 1) }, - ) - .is_ok() - } - - #[inline] - pub unsafe fn write(&self) { - while !unsafe { self.try_write() } { - yield_now(); - } - } - - #[inline] - pub unsafe fn try_write(&self) -> bool { - self.mode.compare_exchange(RWLOCK_FREE, RWLOCK_WRITING, Acquire, Acquire).is_ok() - } - - #[inline] - pub unsafe fn read_unlock(&self) { - let previous = self.mode.fetch_sub(1, Acquire); - assert!(previous != RWLOCK_FREE); - assert!(previous != RWLOCK_WRITING); - } - - #[inline] - pub unsafe fn write_unlock(&self) { - assert_eq!( - self.mode.compare_exchange(RWLOCK_WRITING, RWLOCK_FREE, Acquire, Acquire), - Ok(RWLOCK_WRITING) - ); - } -} diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs index 81200e0061e..bbd1d840e92 100644 --- a/library/std/src/sys/mod.rs +++ b/library/std/src/sys/mod.rs @@ -6,9 +6,9 @@ mod pal; mod personality; pub mod cmath; -pub mod locks; pub mod os_str; pub mod path; +pub mod sync; #[allow(dead_code)] #[allow(unused_imports)] pub mod thread_local; diff --git a/library/std/src/sys/pal/teeos/mod.rs b/library/std/src/sys/pal/teeos/mod.rs index 1fb9d5438de..c392a0ea264 100644 --- a/library/std/src/sys/pal/teeos/mod.rs +++ b/library/std/src/sys/pal/teeos/mod.rs @@ -19,8 +19,6 @@ pub mod fs; #[path = "../unsupported/io.rs"] pub mod io; pub mod net; -#[path = "../unsupported/once.rs"] -pub mod once; pub mod os; #[path = "../unsupported/pipe.rs"] pub mod pipe; diff --git a/library/std/src/sys/pal/uefi/mod.rs b/library/std/src/sys/pal/uefi/mod.rs index 7c5b37fb490..562b00c2c01 100644 --- a/library/std/src/sys/pal/uefi/mod.rs +++ b/library/std/src/sys/pal/uefi/mod.rs @@ -21,8 +21,6 @@ pub mod fs; pub mod io; #[path = "../unsupported/net.rs"] pub mod net; -#[path = "../unsupported/once.rs"] -pub mod once; pub mod os; #[path = "../unsupported/pipe.rs"] pub mod pipe; diff --git a/library/std/src/sys/pal/unsupported/mod.rs b/library/std/src/sys/pal/unsupported/mod.rs index 9ce275ee72d..be344fb7cae 100644 --- a/library/std/src/sys/pal/unsupported/mod.rs +++ b/library/std/src/sys/pal/unsupported/mod.rs @@ -6,7 +6,6 @@ pub mod env; pub mod fs; pub mod io; pub mod net; -pub mod once; pub mod os; pub mod pipe; pub mod process; diff --git a/library/std/src/sys/pal/unsupported/once.rs b/library/std/src/sys/pal/unsupported/once.rs deleted file mode 100644 index 11fde1888ba..00000000000 --- a/library/std/src/sys/pal/unsupported/once.rs +++ /dev/null @@ -1,100 +0,0 @@ -use crate::cell::Cell; -use crate::sync as public; -use crate::sync::once::ExclusiveState; - -pub struct Once { - state: Cell, -} - -pub struct OnceState { - poisoned: bool, - set_state_to: Cell, -} - -#[derive(Clone, Copy, PartialEq, Eq)] -enum State { - Incomplete, - Poisoned, - Running, - Complete, -} - -struct CompletionGuard<'a> { - state: &'a Cell, - set_state_on_drop_to: State, -} - -impl<'a> Drop for CompletionGuard<'a> { - fn drop(&mut self) { - self.state.set(self.set_state_on_drop_to); - } -} - -// Safety: threads are not supported on this platform. -unsafe impl Sync for Once {} - -impl Once { - #[inline] - #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")] - pub const fn new() -> Once { - Once { state: Cell::new(State::Incomplete) } - } - - #[inline] - pub fn is_completed(&self) -> bool { - self.state.get() == State::Complete - } - - #[inline] - pub(crate) fn state(&mut self) -> ExclusiveState { - match self.state.get() { - State::Incomplete => ExclusiveState::Incomplete, - State::Poisoned => ExclusiveState::Poisoned, - State::Complete => ExclusiveState::Complete, - _ => unreachable!("invalid Once state"), - } - } - - #[cold] - #[track_caller] - pub fn call(&self, ignore_poisoning: bool, f: &mut impl FnMut(&public::OnceState)) { - let state = self.state.get(); - match state { - State::Poisoned if !ignore_poisoning => { - // Panic to propagate the poison. - panic!("Once instance has previously been poisoned"); - } - State::Incomplete | State::Poisoned => { - self.state.set(State::Running); - // `guard` will set the new state on drop. - let mut guard = - CompletionGuard { state: &self.state, set_state_on_drop_to: State::Poisoned }; - // Run the function, letting it know if we're poisoned or not. - let f_state = public::OnceState { - inner: OnceState { - poisoned: state == State::Poisoned, - set_state_to: Cell::new(State::Complete), - }, - }; - f(&f_state); - guard.set_state_on_drop_to = f_state.inner.set_state_to.get(); - } - State::Running => { - panic!("one-time initialization may not be performed recursively"); - } - State::Complete => {} - } - } -} - -impl OnceState { - #[inline] - pub fn is_poisoned(&self) -> bool { - self.poisoned - } - - #[inline] - pub fn poison(&self) { - self.set_state_to.set(State::Poisoned) - } -} diff --git a/library/std/src/sys/pal/wasi/mod.rs b/library/std/src/sys/pal/wasi/mod.rs index 308dd296004..a78547261ad 100644 --- a/library/std/src/sys/pal/wasi/mod.rs +++ b/library/std/src/sys/pal/wasi/mod.rs @@ -41,8 +41,6 @@ pub mod time; cfg_if::cfg_if! { if #[cfg(not(target_feature = "atomics"))] { - #[path = "../unsupported/once.rs"] - pub mod once; #[path = "../unsupported/thread_parking.rs"] pub mod thread_parking; } diff --git a/library/std/src/sys/pal/wasip2/mod.rs b/library/std/src/sys/pal/wasip2/mod.rs index b12a8d5ea11..d1d444d7b79 100644 --- a/library/std/src/sys/pal/wasip2/mod.rs +++ b/library/std/src/sys/pal/wasip2/mod.rs @@ -51,10 +51,6 @@ cfg_if::cfg_if! { if #[cfg(target_feature = "atomics")] { compile_error!("The wasm32-wasip2 target does not support atomics"); } else { - #[path = "../unsupported/locks/mod.rs"] - pub mod locks; - #[path = "../unsupported/once.rs"] - pub mod once; #[path = "../unsupported/thread_parking.rs"] pub mod thread_parking; } diff --git a/library/std/src/sys/pal/wasm/mod.rs b/library/std/src/sys/pal/wasm/mod.rs index 40b15120e6d..5cbc3e45341 100644 --- a/library/std/src/sys/pal/wasm/mod.rs +++ b/library/std/src/sys/pal/wasm/mod.rs @@ -48,8 +48,6 @@ cfg_if::cfg_if! { #[path = "atomics/thread.rs"] pub mod thread; } else { - #[path = "../unsupported/once.rs"] - pub mod once; #[path = "../unsupported/thread.rs"] pub mod thread; #[path = "../unsupported/thread_parking.rs"] diff --git a/library/std/src/sys/pal/zkvm/mod.rs b/library/std/src/sys/pal/zkvm/mod.rs index 6c714f76309..228a976dbab 100644 --- a/library/std/src/sys/pal/zkvm/mod.rs +++ b/library/std/src/sys/pal/zkvm/mod.rs @@ -21,8 +21,6 @@ pub mod fs; pub mod io; #[path = "../unsupported/net.rs"] pub mod net; -#[path = "../unsupported/once.rs"] -pub mod once; pub mod os; #[path = "../unsupported/pipe.rs"] pub mod pipe; diff --git a/library/std/src/sys/sync/condvar/futex.rs b/library/std/src/sys/sync/condvar/futex.rs new file mode 100644 index 00000000000..4586d0fd941 --- /dev/null +++ b/library/std/src/sys/sync/condvar/futex.rs @@ -0,0 +1,56 @@ +use crate::sync::atomic::{AtomicU32, Ordering::Relaxed}; +use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all}; +use crate::sys::sync::Mutex; +use crate::time::Duration; + +pub struct Condvar { + // The value of this atomic is simply incremented on every notification. + // This is used by `.wait()` to not miss any notifications after + // unlocking the mutex and before waiting for notifications. + futex: AtomicU32, +} + +impl Condvar { + #[inline] + pub const fn new() -> Self { + Self { futex: AtomicU32::new(0) } + } + + // All the memory orderings here are `Relaxed`, + // because synchronization is done by unlocking and locking the mutex. + + pub fn notify_one(&self) { + self.futex.fetch_add(1, Relaxed); + futex_wake(&self.futex); + } + + pub fn notify_all(&self) { + self.futex.fetch_add(1, Relaxed); + futex_wake_all(&self.futex); + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + self.wait_optional_timeout(mutex, None); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, timeout: Duration) -> bool { + self.wait_optional_timeout(mutex, Some(timeout)) + } + + unsafe fn wait_optional_timeout(&self, mutex: &Mutex, timeout: Option) -> bool { + // Examine the notification counter _before_ we unlock the mutex. + let futex_value = self.futex.load(Relaxed); + + // Unlock the mutex before going to sleep. + mutex.unlock(); + + // Wait, but only if there hasn't been any + // notification since we unlocked the mutex. + let r = futex_wait(&self.futex, futex_value, timeout); + + // Lock the mutex again. + mutex.lock(); + + r + } +} diff --git a/library/std/src/sys/sync/condvar/itron.rs b/library/std/src/sys/sync/condvar/itron.rs new file mode 100644 index 00000000000..9b64d241efd --- /dev/null +++ b/library/std/src/sys/sync/condvar/itron.rs @@ -0,0 +1,294 @@ +//! POSIX conditional variable implementation based on user-space wait queues. +use crate::sys::pal::itron::{ + abi, error::expect_success_aborting, spin::SpinMutex, task, time::with_tmos_strong, +}; +use crate::{mem::replace, ptr::NonNull, sys::sync::Mutex, time::Duration}; + +// The implementation is inspired by the queue-based implementation shown in +// Andrew D. Birrell's paper "Implementing Condition Variables with Semaphores" + +pub struct Condvar { + waiters: SpinMutex, +} + +unsafe impl Send for Condvar {} +unsafe impl Sync for Condvar {} + +impl Condvar { + #[inline] + pub const fn new() -> Condvar { + Condvar { waiters: SpinMutex::new(waiter_queue::WaiterQueue::new()) } + } + + pub fn notify_one(&self) { + self.waiters.with_locked(|waiters| { + if let Some(task) = waiters.pop_front() { + // Unpark the task + match unsafe { abi::wup_tsk(task) } { + // The task already has a token. + abi::E_QOVR => {} + // Can't undo the effect; abort the program on failure + er => { + expect_success_aborting(er, &"wup_tsk"); + } + } + } + }); + } + + pub fn notify_all(&self) { + self.waiters.with_locked(|waiters| { + while let Some(task) = waiters.pop_front() { + // Unpark the task + match unsafe { abi::wup_tsk(task) } { + // The task already has a token. + abi::E_QOVR => {} + // Can't undo the effect; abort the program on failure + er => { + expect_success_aborting(er, &"wup_tsk"); + } + } + } + }); + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + // Construct `Waiter`. + let mut waiter = waiter_queue::Waiter::new(); + let waiter = NonNull::from(&mut waiter); + + self.waiters.with_locked(|waiters| unsafe { + waiters.insert(waiter); + }); + + unsafe { mutex.unlock() }; + + // Wait until `waiter` is removed from the queue + loop { + // Park the current task + expect_success_aborting(unsafe { abi::slp_tsk() }, &"slp_tsk"); + + if !self.waiters.with_locked(|waiters| unsafe { waiters.is_queued(waiter) }) { + break; + } + } + + mutex.lock(); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + // Construct and pin `Waiter` + let mut waiter = waiter_queue::Waiter::new(); + let waiter = NonNull::from(&mut waiter); + + self.waiters.with_locked(|waiters| unsafe { + waiters.insert(waiter); + }); + + unsafe { mutex.unlock() }; + + // Park the current task and do not wake up until the timeout elapses + // or the task gets woken up by `notify_*` + match with_tmos_strong(dur, |tmo| { + let er = unsafe { abi::tslp_tsk(tmo) }; + if er == 0 { + // We were unparked. Are we really dequeued? + if self.waiters.with_locked(|waiters| unsafe { waiters.is_queued(waiter) }) { + // No we are not. Continue waiting. + return abi::E_TMOUT; + } + } + er + }) { + abi::E_TMOUT => {} + er => { + expect_success_aborting(er, &"tslp_tsk"); + } + } + + // Remove `waiter` from `self.waiters`. If `waiter` is still in + // `waiters`, it means we woke up because of a timeout. Otherwise, + // we woke up because of `notify_*`. + let success = self.waiters.with_locked(|waiters| unsafe { !waiters.remove(waiter) }); + + mutex.lock(); + success + } +} + +mod waiter_queue { + use super::*; + + pub struct WaiterQueue { + head: Option, + } + + #[derive(Copy, Clone)] + struct ListHead { + first: NonNull, + last: NonNull, + } + + unsafe impl Send for ListHead {} + unsafe impl Sync for ListHead {} + + pub struct Waiter { + // These fields are only accessed through `&[mut] WaiterQueue`. + /// The waiting task's ID. Will be zeroed when the task is woken up + /// and removed from a queue. + task: abi::ID, + priority: abi::PRI, + prev: Option>, + next: Option>, + } + + unsafe impl Send for Waiter {} + unsafe impl Sync for Waiter {} + + impl Waiter { + #[inline] + pub fn new() -> Self { + let task = task::current_task_id(); + let priority = task::task_priority(abi::TSK_SELF); + + // Zeroness of `Waiter::task` indicates whether the `Waiter` is + // linked to a queue or not. This invariant is important for + // the correctness. + debug_assert_ne!(task, 0); + + Self { task, priority, prev: None, next: None } + } + } + + impl WaiterQueue { + #[inline] + pub const fn new() -> Self { + Self { head: None } + } + + /// # Safety + /// + /// - The caller must own `*waiter_ptr`. The caller will lose the + /// ownership until `*waiter_ptr` is removed from `self`. + /// + /// - `*waiter_ptr` must be valid until it's removed from the queue. + /// + /// - `*waiter_ptr` must not have been previously inserted to a `WaiterQueue`. + /// + pub unsafe fn insert(&mut self, mut waiter_ptr: NonNull) { + unsafe { + let waiter = waiter_ptr.as_mut(); + + debug_assert!(waiter.prev.is_none()); + debug_assert!(waiter.next.is_none()); + + if let Some(head) = &mut self.head { + // Find the insertion position and insert `waiter` + let insert_after = { + let mut cursor = head.last; + loop { + if waiter.priority >= cursor.as_ref().priority { + // `cursor` and all previous waiters have the same or higher + // priority than `current_task_priority`. Insert the new + // waiter right after `cursor`. + break Some(cursor); + } + cursor = if let Some(prev) = cursor.as_ref().prev { + prev + } else { + break None; + }; + } + }; + + if let Some(mut insert_after) = insert_after { + // Insert `waiter` after `insert_after` + let insert_before = insert_after.as_ref().next; + + waiter.prev = Some(insert_after); + insert_after.as_mut().next = Some(waiter_ptr); + + waiter.next = insert_before; + if let Some(mut insert_before) = insert_before { + insert_before.as_mut().prev = Some(waiter_ptr); + } else { + head.last = waiter_ptr; + } + } else { + // Insert `waiter` to the front + waiter.next = Some(head.first); + head.first.as_mut().prev = Some(waiter_ptr); + head.first = waiter_ptr; + } + } else { + // `waiter` is the only element + self.head = Some(ListHead { first: waiter_ptr, last: waiter_ptr }); + } + } + } + + /// Given a `Waiter` that was previously inserted to `self`, remove + /// it from `self` if it's still there. + #[inline] + pub unsafe fn remove(&mut self, mut waiter_ptr: NonNull) -> bool { + unsafe { + let waiter = waiter_ptr.as_mut(); + if waiter.task != 0 { + let head = self.head.as_mut().unwrap(); + + match (waiter.prev, waiter.next) { + (Some(mut prev), Some(mut next)) => { + prev.as_mut().next = Some(next); + next.as_mut().prev = Some(prev); + } + (None, Some(mut next)) => { + head.first = next; + next.as_mut().prev = None; + } + (Some(mut prev), None) => { + prev.as_mut().next = None; + head.last = prev; + } + (None, None) => { + self.head = None; + } + } + + waiter.task = 0; + + true + } else { + false + } + } + } + + /// Given a `Waiter` that was previously inserted to `self`, return a + /// flag indicating whether it's still in `self`. + #[inline] + pub unsafe fn is_queued(&self, waiter: NonNull) -> bool { + unsafe { waiter.as_ref().task != 0 } + } + + #[inline] + pub fn pop_front(&mut self) -> Option { + unsafe { + let head = self.head.as_mut()?; + let waiter = head.first.as_mut(); + + // Get the ID + let id = replace(&mut waiter.task, 0); + + // Unlink the waiter + if let Some(mut next) = waiter.next { + head.first = next; + next.as_mut().prev = None; + } else { + self.head = None; + } + + Some(id) + } + } + } +} diff --git a/library/std/src/sys/sync/condvar/mod.rs b/library/std/src/sys/sync/condvar/mod.rs new file mode 100644 index 00000000000..6849cacf88e --- /dev/null +++ b/library/std/src/sys/sync/condvar/mod.rs @@ -0,0 +1,37 @@ +cfg_if::cfg_if! { + if #[cfg(any( + all(target_os = "windows", not(target_vendor="win7")), + target_os = "linux", + target_os = "android", + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", + target_os = "fuchsia", + all(target_family = "wasm", target_feature = "atomics"), + target_os = "hermit", + ))] { + mod futex; + pub use futex::Condvar; + } else if #[cfg(target_family = "unix")] { + mod pthread; + pub use pthread::Condvar; + } else if #[cfg(all(target_os = "windows", target_vendor = "win7"))] { + mod windows7; + pub use windows7::Condvar; + } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { + mod sgx; + pub use sgx::Condvar; + } else if #[cfg(target_os = "solid_asp3")] { + mod itron; + pub use itron::Condvar; + } else if #[cfg(target_os = "teeos")] { + mod teeos; + pub use teeos::Condvar; + } else if #[cfg(target_os = "xous")] { + mod xous; + pub use xous::Condvar; + } else { + mod no_threads; + pub use no_threads::Condvar; + } +} diff --git a/library/std/src/sys/sync/condvar/no_threads.rs b/library/std/src/sys/sync/condvar/no_threads.rs new file mode 100644 index 00000000000..36b89c5f5be --- /dev/null +++ b/library/std/src/sys/sync/condvar/no_threads.rs @@ -0,0 +1,26 @@ +use crate::sys::sync::Mutex; +use crate::time::Duration; + +pub struct Condvar {} + +impl Condvar { + #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] + pub const fn new() -> Condvar { + Condvar {} + } + + #[inline] + pub fn notify_one(&self) {} + + #[inline] + pub fn notify_all(&self) {} + + pub unsafe fn wait(&self, _mutex: &Mutex) { + panic!("condvar wait not supported") + } + + pub unsafe fn wait_timeout(&self, _mutex: &Mutex, _dur: Duration) -> bool { + panic!("condvar wait not supported"); + } +} diff --git a/library/std/src/sys/sync/condvar/pthread.rs b/library/std/src/sys/sync/condvar/pthread.rs new file mode 100644 index 00000000000..728371685ee --- /dev/null +++ b/library/std/src/sys/sync/condvar/pthread.rs @@ -0,0 +1,206 @@ +use crate::cell::UnsafeCell; +use crate::ptr; +use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed}; +use crate::sys::sync::{mutex, Mutex}; +#[cfg(not(target_os = "nto"))] +use crate::sys::time::TIMESPEC_MAX; +#[cfg(target_os = "nto")] +use crate::sys::time::TIMESPEC_MAX_CAPPED; +use crate::sys_common::lazy_box::{LazyBox, LazyInit}; +use crate::time::Duration; + +struct AllocatedCondvar(UnsafeCell); + +pub struct Condvar { + inner: LazyBox, + mutex: AtomicPtr, +} + +#[inline] +fn raw(c: &Condvar) -> *mut libc::pthread_cond_t { + c.inner.0.get() +} + +unsafe impl Send for AllocatedCondvar {} +unsafe impl Sync for AllocatedCondvar {} + +impl LazyInit for AllocatedCondvar { + fn init() -> Box { + let condvar = Box::new(AllocatedCondvar(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER))); + + cfg_if::cfg_if! { + if #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "watchos", + target_os = "l4re", + target_os = "android", + target_os = "redox" + ))] { + // `pthread_condattr_setclock` is unfortunately not supported on these platforms. + } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] { + // NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet + // So on that platform, init() should always be called + // Moreover, that platform does not have pthread_condattr_setclock support, + // hence that initialization should be skipped as well + // + // Similar story for the 3DS (horizon). + let r = unsafe { libc::pthread_cond_init(condvar.0.get(), crate::ptr::null()) }; + assert_eq!(r, 0); + } else { + use crate::mem::MaybeUninit; + let mut attr = MaybeUninit::::uninit(); + let r = unsafe { libc::pthread_condattr_init(attr.as_mut_ptr()) }; + assert_eq!(r, 0); + let r = unsafe { libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC) }; + assert_eq!(r, 0); + let r = unsafe { libc::pthread_cond_init(condvar.0.get(), attr.as_ptr()) }; + assert_eq!(r, 0); + let r = unsafe { libc::pthread_condattr_destroy(attr.as_mut_ptr()) }; + assert_eq!(r, 0); + } + } + + condvar + } +} + +impl Drop for AllocatedCondvar { + #[inline] + fn drop(&mut self) { + let r = unsafe { libc::pthread_cond_destroy(self.0.get()) }; + if cfg!(target_os = "dragonfly") { + // On DragonFly pthread_cond_destroy() returns EINVAL if called on + // a condvar that was just initialized with + // libc::PTHREAD_COND_INITIALIZER. Once it is used or + // pthread_cond_init() is called, this behaviour no longer occurs. + debug_assert!(r == 0 || r == libc::EINVAL); + } else { + debug_assert_eq!(r, 0); + } + } +} + +impl Condvar { + pub const fn new() -> Condvar { + Condvar { inner: LazyBox::new(), mutex: AtomicPtr::new(ptr::null_mut()) } + } + + #[inline] + fn verify(&self, mutex: *mut libc::pthread_mutex_t) { + // Relaxed is okay here because we never read through `self.addr`, and only use it to + // compare addresses. + match self.mutex.compare_exchange(ptr::null_mut(), mutex, Relaxed, Relaxed) { + Ok(_) => {} // Stored the address + Err(n) if n == mutex => {} // Lost a race to store the same address + _ => panic!("attempted to use a condition variable with two mutexes"), + } + } + + #[inline] + pub fn notify_one(&self) { + let r = unsafe { libc::pthread_cond_signal(raw(self)) }; + debug_assert_eq!(r, 0); + } + + #[inline] + pub fn notify_all(&self) { + let r = unsafe { libc::pthread_cond_broadcast(raw(self)) }; + debug_assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn wait(&self, mutex: &Mutex) { + let mutex = mutex::raw(mutex); + self.verify(mutex); + let r = libc::pthread_cond_wait(raw(self), mutex); + debug_assert_eq!(r, 0); + } + + // This implementation is used on systems that support pthread_condattr_setclock + // where we configure condition variable to use monotonic clock (instead of + // default system clock). This approach avoids all problems that result + // from changes made to the system time. + #[cfg(not(any( + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "watchos", + target_os = "android", + target_os = "espidf", + target_os = "horizon" + )))] + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + use crate::sys::time::Timespec; + + let mutex = mutex::raw(mutex); + self.verify(mutex); + + #[cfg(not(target_os = "nto"))] + let timeout = Timespec::now(libc::CLOCK_MONOTONIC) + .checked_add_duration(&dur) + .and_then(|t| t.to_timespec()) + .unwrap_or(TIMESPEC_MAX); + + #[cfg(target_os = "nto")] + let timeout = Timespec::now(libc::CLOCK_MONOTONIC) + .checked_add_duration(&dur) + .and_then(|t| t.to_timespec_capped()) + .unwrap_or(TIMESPEC_MAX_CAPPED); + + let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout); + assert!(r == libc::ETIMEDOUT || r == 0); + r == 0 + } + + // This implementation is modeled after libcxx's condition_variable + // https://github.com/llvm-mirror/libcxx/blob/release_35/src/condition_variable.cpp#L46 + // https://github.com/llvm-mirror/libcxx/blob/release_35/include/__mutex_base#L367 + #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "watchos", + target_os = "android", + target_os = "espidf", + target_os = "horizon" + ))] + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + use crate::sys::time::SystemTime; + use crate::time::Instant; + + let mutex = mutex::raw(mutex); + self.verify(mutex); + + // OSX implementation of `pthread_cond_timedwait` is buggy + // with super long durations. When duration is greater than + // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait` + // in macOS Sierra returns error 316. + // + // This program demonstrates the issue: + // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c + // + // To work around this issue, and possible bugs of other OSes, timeout + // is clamped to 1000 years, which is allowable per the API of `wait_timeout` + // because of spurious wakeups. + let dur = Duration::min(dur, Duration::from_secs(1000 * 365 * 86400)); + + // pthread_cond_timedwait uses system time, but we want to report timeout + // based on stable time. + let now = Instant::now(); + + let timeout = SystemTime::now() + .t + .checked_add_duration(&dur) + .and_then(|t| t.to_timespec()) + .unwrap_or(TIMESPEC_MAX); + + let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout); + debug_assert!(r == libc::ETIMEDOUT || r == 0); + + // ETIMEDOUT is not a totally reliable method of determining timeout due + // to clock shifts, so do the check ourselves + now.elapsed() < dur + } +} diff --git a/library/std/src/sys/sync/condvar/sgx.rs b/library/std/src/sys/sync/condvar/sgx.rs new file mode 100644 index 00000000000..ecb5872f60d --- /dev/null +++ b/library/std/src/sys/sync/condvar/sgx.rs @@ -0,0 +1,45 @@ +use crate::sys::pal::waitqueue::{SpinMutex, WaitQueue, WaitVariable}; +use crate::sys::sync::Mutex; +use crate::sys_common::lazy_box::{LazyBox, LazyInit}; +use crate::time::Duration; + +/// FIXME: `UnsafeList` is not movable. +struct AllocatedCondvar(SpinMutex>); + +pub struct Condvar { + inner: LazyBox, +} + +impl LazyInit for AllocatedCondvar { + fn init() -> Box { + Box::new(AllocatedCondvar(SpinMutex::new(WaitVariable::new(())))) + } +} + +impl Condvar { + pub const fn new() -> Condvar { + Condvar { inner: LazyBox::new() } + } + + #[inline] + pub fn notify_one(&self) { + let _ = WaitQueue::notify_one(self.inner.0.lock()); + } + + #[inline] + pub fn notify_all(&self) { + let _ = WaitQueue::notify_all(self.inner.0.lock()); + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + let guard = self.inner.0.lock(); + WaitQueue::wait(guard, || unsafe { mutex.unlock() }); + mutex.lock() + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let success = WaitQueue::wait_timeout(&self.inner.0, dur, || unsafe { mutex.unlock() }); + mutex.lock(); + success + } +} diff --git a/library/std/src/sys/sync/condvar/teeos.rs b/library/std/src/sys/sync/condvar/teeos.rs new file mode 100644 index 00000000000..0a931f407d2 --- /dev/null +++ b/library/std/src/sys/sync/condvar/teeos.rs @@ -0,0 +1,100 @@ +use crate::cell::UnsafeCell; +use crate::ptr; +use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed}; +use crate::sys::sync::mutex::{self, Mutex}; +use crate::sys::time::TIMESPEC_MAX; +use crate::sys_common::lazy_box::{LazyBox, LazyInit}; +use crate::time::Duration; + +extern "C" { + pub fn pthread_cond_timedwait( + cond: *mut libc::pthread_cond_t, + lock: *mut libc::pthread_mutex_t, + adstime: *const libc::timespec, + ) -> libc::c_int; +} + +struct AllocatedCondvar(UnsafeCell); + +pub struct Condvar { + inner: LazyBox, + mutex: AtomicPtr, +} + +#[inline] +fn raw(c: &Condvar) -> *mut libc::pthread_cond_t { + c.inner.0.get() +} + +unsafe impl Send for AllocatedCondvar {} +unsafe impl Sync for AllocatedCondvar {} + +impl LazyInit for AllocatedCondvar { + fn init() -> Box { + let condvar = Box::new(AllocatedCondvar(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER))); + + let r = unsafe { libc::pthread_cond_init(condvar.0.get(), crate::ptr::null()) }; + assert_eq!(r, 0); + + condvar + } +} + +impl Drop for AllocatedCondvar { + #[inline] + fn drop(&mut self) { + let r = unsafe { libc::pthread_cond_destroy(self.0.get()) }; + debug_assert_eq!(r, 0); + } +} + +impl Condvar { + pub const fn new() -> Condvar { + Condvar { inner: LazyBox::new(), mutex: AtomicPtr::new(ptr::null_mut()) } + } + + #[inline] + fn verify(&self, mutex: *mut libc::pthread_mutex_t) { + match self.mutex.compare_exchange(ptr::null_mut(), mutex, Relaxed, Relaxed) { + Ok(_) => {} // Stored the address + Err(n) if n == mutex => {} // Lost a race to store the same address + _ => panic!("attempted to use a condition variable with two mutexes"), + } + } + + #[inline] + pub fn notify_one(&self) { + let r = unsafe { libc::pthread_cond_signal(raw(self)) }; + debug_assert_eq!(r, 0); + } + + #[inline] + pub fn notify_all(&self) { + let r = unsafe { libc::pthread_cond_broadcast(raw(self)) }; + debug_assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn wait(&self, mutex: &Mutex) { + let mutex = mutex::raw(mutex); + self.verify(mutex); + let r = libc::pthread_cond_wait(raw(self), mutex); + debug_assert_eq!(r, 0); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + use crate::sys::time::Timespec; + + let mutex = mutex::raw(mutex); + self.verify(mutex); + + let timeout = Timespec::now(libc::CLOCK_MONOTONIC) + .checked_add_duration(&dur) + .and_then(|t| t.to_timespec()) + .unwrap_or(TIMESPEC_MAX); + + let r = pthread_cond_timedwait(raw(self), mutex, &timeout); + assert!(r == libc::ETIMEDOUT || r == 0); + r == 0 + } +} diff --git a/library/std/src/sys/sync/condvar/windows7.rs b/library/std/src/sys/sync/condvar/windows7.rs new file mode 100644 index 00000000000..07fa5fdd698 --- /dev/null +++ b/library/std/src/sys/sync/condvar/windows7.rs @@ -0,0 +1,50 @@ +use crate::cell::UnsafeCell; +use crate::sys::c; +use crate::sys::os; +use crate::sys::sync::{mutex, Mutex}; +use crate::time::Duration; + +pub struct Condvar { + inner: UnsafeCell, +} + +unsafe impl Send for Condvar {} +unsafe impl Sync for Condvar {} + +impl Condvar { + #[inline] + pub const fn new() -> Condvar { + Condvar { inner: UnsafeCell::new(c::CONDITION_VARIABLE_INIT) } + } + + #[inline] + pub unsafe fn wait(&self, mutex: &Mutex) { + let r = c::SleepConditionVariableSRW(self.inner.get(), mutex::raw(mutex), c::INFINITE, 0); + debug_assert!(r != 0); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let r = c::SleepConditionVariableSRW( + self.inner.get(), + mutex::raw(mutex), + crate::sys::pal::dur2timeout(dur), + 0, + ); + if r == 0 { + debug_assert_eq!(os::errno() as usize, c::ERROR_TIMEOUT as usize); + false + } else { + true + } + } + + #[inline] + pub fn notify_one(&self) { + unsafe { c::WakeConditionVariable(self.inner.get()) } + } + + #[inline] + pub fn notify_all(&self) { + unsafe { c::WakeAllConditionVariable(self.inner.get()) } + } +} diff --git a/library/std/src/sys/sync/condvar/xous.rs b/library/std/src/sys/sync/condvar/xous.rs new file mode 100644 index 00000000000..7b218818ef8 --- /dev/null +++ b/library/std/src/sys/sync/condvar/xous.rs @@ -0,0 +1,148 @@ +use crate::os::xous::ffi::{blocking_scalar, scalar}; +use crate::os::xous::services::{ticktimer_server, TicktimerScalar}; +use crate::sys::sync::Mutex; +use crate::time::Duration; +use core::sync::atomic::{AtomicUsize, Ordering}; + +// The implementation is inspired by Andrew D. Birrell's paper +// "Implementing Condition Variables with Semaphores" + +const NOTIFY_TRIES: usize = 3; + +pub struct Condvar { + counter: AtomicUsize, + timed_out: AtomicUsize, +} + +unsafe impl Send for Condvar {} +unsafe impl Sync for Condvar {} + +impl Condvar { + #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] + pub const fn new() -> Condvar { + Condvar { counter: AtomicUsize::new(0), timed_out: AtomicUsize::new(0) } + } + + fn notify_some(&self, to_notify: usize) { + // Assumption: The Mutex protecting this condvar is locked throughout the + // entirety of this call, preventing calls to `wait` and `wait_timeout`. + + // Logic check: Ensure that there aren't any missing waiters. Remove any that + // timed-out, ensuring the counter doesn't underflow. + assert!(self.timed_out.load(Ordering::Relaxed) <= self.counter.load(Ordering::Relaxed)); + self.counter.fetch_sub(self.timed_out.swap(0, Ordering::Relaxed), Ordering::Relaxed); + + // Figure out how many threads to notify. Note that it is impossible for `counter` + // to increase during this operation because Mutex is locked. However, it is + // possible for `counter` to decrease due to a condvar timing out, in which + // case the corresponding `timed_out` will increase accordingly. + let Ok(waiter_count) = + self.counter.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |counter| { + if counter == 0 { + return None; + } else { + Some(counter - counter.min(to_notify)) + } + }) + else { + // No threads are waiting on this condvar + return; + }; + + let mut remaining_to_wake = waiter_count.min(to_notify); + if remaining_to_wake == 0 { + return; + } + for _wake_tries in 0..NOTIFY_TRIES { + let result = blocking_scalar( + ticktimer_server(), + TicktimerScalar::NotifyCondition(self.index(), remaining_to_wake).into(), + ) + .expect("failure to send NotifyCondition command"); + + // Remove the list of waiters that were notified + remaining_to_wake -= result[0]; + + // Also remove the number of waiters that timed out. Clamp it to 0 in order to + // ensure we don't wait forever in case the waiter woke up between the time + // we counted the remaining waiters and now. + remaining_to_wake = + remaining_to_wake.saturating_sub(self.timed_out.swap(0, Ordering::Relaxed)); + if remaining_to_wake == 0 { + return; + } + crate::thread::yield_now(); + } + } + + pub fn notify_one(&self) { + self.notify_some(1) + } + + pub fn notify_all(&self) { + self.notify_some(self.counter.load(Ordering::Relaxed)) + } + + fn index(&self) -> usize { + core::ptr::from_ref(self).addr() + } + + /// Unlock the given Mutex and wait for the notification. Wait at most + /// `ms` milliseconds, or pass `0` to wait forever. + /// + /// Returns `true` if the condition was received, `false` if it timed out + fn wait_ms(&self, mutex: &Mutex, ms: usize) -> bool { + self.counter.fetch_add(1, Ordering::Relaxed); + unsafe { mutex.unlock() }; + + // Threading concern: There is a chance that the `notify` thread wakes up here before + // we have a chance to wait for the condition. This is fine because we've recorded + // the fact that we're waiting by incrementing the counter. + let result = blocking_scalar( + ticktimer_server(), + TicktimerScalar::WaitForCondition(self.index(), ms).into(), + ); + let awoken = result.expect("Ticktimer: failure to send WaitForCondition command")[0] == 0; + + // If we awoke due to a timeout, increment the `timed_out` counter so that the + // main loop of `notify` knows there's a timeout. + // + // This is done with the Mutex still unlocked, because the Mutex might still + // be locked by the `notify` process above. + if !awoken { + self.timed_out.fetch_add(1, Ordering::Relaxed); + } + + unsafe { mutex.lock() }; + awoken + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + // Wait for 0 ms, which is a special case to "wait forever" + self.wait_ms(mutex, 0); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let mut millis = dur.as_millis() as usize; + // Ensure we don't wait for 0 ms, which would cause us to wait forever + if millis == 0 { + millis = 1; + } + self.wait_ms(mutex, millis) + } +} + +impl Drop for Condvar { + fn drop(&mut self) { + let remaining_count = self.counter.load(Ordering::Relaxed); + let timed_out = self.timed_out.load(Ordering::Relaxed); + assert!( + remaining_count - timed_out == 0, + "counter was {} and timed_out was {} not 0", + remaining_count, + timed_out + ); + scalar(ticktimer_server(), TicktimerScalar::FreeCondition(self.index()).into()).ok(); + } +} diff --git a/library/std/src/sys/sync/mod.rs b/library/std/src/sys/sync/mod.rs new file mode 100644 index 00000000000..623e6bccd51 --- /dev/null +++ b/library/std/src/sys/sync/mod.rs @@ -0,0 +1,9 @@ +mod condvar; +mod mutex; +mod once; +mod rwlock; + +pub use condvar::Condvar; +pub use mutex::Mutex; +pub use once::{Once, OnceState}; +pub use rwlock::RwLock; diff --git a/library/std/src/sys/sync/mutex/fuchsia.rs b/library/std/src/sys/sync/mutex/fuchsia.rs new file mode 100644 index 00000000000..5d89e5a13fd --- /dev/null +++ b/library/std/src/sys/sync/mutex/fuchsia.rs @@ -0,0 +1,164 @@ +//! A priority inheriting mutex for Fuchsia. +//! +//! This is a port of the [mutex in Fuchsia's libsync]. Contrary to the original, +//! it does not abort the process when reentrant locking is detected, but deadlocks. +//! +//! Priority inheritance is achieved by storing the owning thread's handle in an +//! atomic variable. Fuchsia's futex operations support setting an owner thread +//! for a futex, which can boost that thread's priority while the futex is waited +//! upon. +//! +//! libsync is licenced under the following BSD-style licence: +//! +//! Copyright 2016 The Fuchsia Authors. +//! +//! Redistribution and use in source and binary forms, with or without +//! modification, are permitted provided that the following conditions are +//! met: +//! +//! * Redistributions of source code must retain the above copyright +//! notice, this list of conditions and the following disclaimer. +//! * Redistributions in binary form must reproduce the above +//! copyright notice, this list of conditions and the following +//! disclaimer in the documentation and/or other materials provided +//! with the distribution. +//! +//! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +//! "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +//! LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +//! A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +//! OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +//! SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +//! LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +//! DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +//! THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +//! (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +//! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +//! +//! [mutex in Fuchsia's libsync]: https://cs.opensource.google/fuchsia/fuchsia/+/main:zircon/system/ulib/sync/mutex.c + +use crate::sync::atomic::{ + AtomicU32, + Ordering::{Acquire, Relaxed, Release}, +}; +use crate::sys::futex::zircon::{ + zx_futex_wait, zx_futex_wake_single_owner, zx_handle_t, zx_thread_self, ZX_ERR_BAD_HANDLE, + ZX_ERR_BAD_STATE, ZX_ERR_INVALID_ARGS, ZX_ERR_TIMED_OUT, ZX_ERR_WRONG_TYPE, ZX_OK, + ZX_TIME_INFINITE, +}; + +// The lowest two bits of a `zx_handle_t` are always set, so the lowest bit is used to mark the +// mutex as contested by clearing it. +const CONTESTED_BIT: u32 = 1; +// This can never be a valid `zx_handle_t`. +const UNLOCKED: u32 = 0; + +pub struct Mutex { + futex: AtomicU32, +} + +#[inline] +fn to_state(owner: zx_handle_t) -> u32 { + owner +} + +#[inline] +fn to_owner(state: u32) -> zx_handle_t { + state | CONTESTED_BIT +} + +#[inline] +fn is_contested(state: u32) -> bool { + state & CONTESTED_BIT == 0 +} + +#[inline] +fn mark_contested(state: u32) -> u32 { + state & !CONTESTED_BIT +} + +impl Mutex { + #[inline] + pub const fn new() -> Mutex { + Mutex { futex: AtomicU32::new(UNLOCKED) } + } + + #[inline] + pub fn try_lock(&self) -> bool { + let thread_self = unsafe { zx_thread_self() }; + self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed).is_ok() + } + + #[inline] + pub fn lock(&self) { + let thread_self = unsafe { zx_thread_self() }; + if let Err(state) = + self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed) + { + unsafe { + self.lock_contested(state, thread_self); + } + } + } + + /// # Safety + /// `thread_self` must be the handle for the current thread. + #[cold] + unsafe fn lock_contested(&self, mut state: u32, thread_self: zx_handle_t) { + let owned_state = mark_contested(to_state(thread_self)); + loop { + // Mark the mutex as contested if it is not already. + let contested = mark_contested(state); + if is_contested(state) + || self.futex.compare_exchange(state, contested, Relaxed, Relaxed).is_ok() + { + // The mutex has been marked as contested, wait for the state to change. + unsafe { + match zx_futex_wait( + &self.futex, + AtomicU32::new(contested), + to_owner(state), + ZX_TIME_INFINITE, + ) { + ZX_OK | ZX_ERR_BAD_STATE | ZX_ERR_TIMED_OUT => (), + // Note that if a thread handle is reused after its associated thread + // exits without unlocking the mutex, an arbitrary thread's priority + // could be boosted by the wait, but there is currently no way to + // prevent that. + ZX_ERR_INVALID_ARGS | ZX_ERR_BAD_HANDLE | ZX_ERR_WRONG_TYPE => { + panic!( + "either the current thread is trying to lock a mutex it has + already locked, or the previous owner did not unlock the mutex + before exiting" + ) + } + error => panic!("unexpected error in zx_futex_wait: {error}"), + } + } + } + + // The state has changed or a wakeup occurred, try to lock the mutex. + match self.futex.compare_exchange(UNLOCKED, owned_state, Acquire, Relaxed) { + Ok(_) => return, + Err(updated) => state = updated, + } + } + } + + #[inline] + pub unsafe fn unlock(&self) { + if is_contested(self.futex.swap(UNLOCKED, Release)) { + // The woken thread will mark the mutex as contested again, + // and return here, waking until there are no waiters left, + // in which case this is a noop. + self.wake(); + } + } + + #[cold] + fn wake(&self) { + unsafe { + zx_futex_wake_single_owner(&self.futex); + } + } +} diff --git a/library/std/src/sys/sync/mutex/futex.rs b/library/std/src/sys/sync/mutex/futex.rs new file mode 100644 index 00000000000..7427cae94d6 --- /dev/null +++ b/library/std/src/sys/sync/mutex/futex.rs @@ -0,0 +1,108 @@ +use crate::sync::atomic::{ + self, + Ordering::{Acquire, Relaxed, Release}, +}; +use crate::sys::futex::{futex_wait, futex_wake}; + +cfg_if::cfg_if! { +if #[cfg(windows)] { + // On Windows we can have a smol futex + type Atomic = atomic::AtomicU8; + type State = u8; +} else { + type Atomic = atomic::AtomicU32; + type State = u32; +} +} + +pub struct Mutex { + futex: Atomic, +} + +const UNLOCKED: State = 0; +const LOCKED: State = 1; // locked, no other threads waiting +const CONTENDED: State = 2; // locked, and other threads waiting (contended) + +impl Mutex { + #[inline] + pub const fn new() -> Self { + Self { futex: Atomic::new(UNLOCKED) } + } + + #[inline] + pub fn try_lock(&self) -> bool { + self.futex.compare_exchange(UNLOCKED, LOCKED, Acquire, Relaxed).is_ok() + } + + #[inline] + pub fn lock(&self) { + if self.futex.compare_exchange(UNLOCKED, LOCKED, Acquire, Relaxed).is_err() { + self.lock_contended(); + } + } + + #[cold] + fn lock_contended(&self) { + // Spin first to speed things up if the lock is released quickly. + let mut state = self.spin(); + + // If it's unlocked now, attempt to take the lock + // without marking it as contended. + if state == UNLOCKED { + match self.futex.compare_exchange(UNLOCKED, LOCKED, Acquire, Relaxed) { + Ok(_) => return, // Locked! + Err(s) => state = s, + } + } + + loop { + // Put the lock in contended state. + // We avoid an unnecessary write if it as already set to CONTENDED, + // to be friendlier for the caches. + if state != CONTENDED && self.futex.swap(CONTENDED, Acquire) == UNLOCKED { + // We changed it from UNLOCKED to CONTENDED, so we just successfully locked it. + return; + } + + // Wait for the futex to change state, assuming it is still CONTENDED. + futex_wait(&self.futex, CONTENDED, None); + + // Spin again after waking up. + state = self.spin(); + } + } + + fn spin(&self) -> State { + let mut spin = 100; + loop { + // We only use `load` (and not `swap` or `compare_exchange`) + // while spinning, to be easier on the caches. + let state = self.futex.load(Relaxed); + + // We stop spinning when the mutex is UNLOCKED, + // but also when it's CONTENDED. + if state != LOCKED || spin == 0 { + return state; + } + + crate::hint::spin_loop(); + spin -= 1; + } + } + + #[inline] + pub unsafe fn unlock(&self) { + if self.futex.swap(UNLOCKED, Release) == CONTENDED { + // We only wake up one thread. When that thread locks the mutex, it + // will mark the mutex as CONTENDED (see lock_contended above), + // which makes sure that any other waiting threads will also be + // woken up eventually. + self.wake(); + } + } + + #[cold] + fn wake(&self) { + futex_wake(&self.futex); + } +} diff --git a/library/std/src/sys/sync/mutex/itron.rs b/library/std/src/sys/sync/mutex/itron.rs new file mode 100644 index 00000000000..a134eb2d1be --- /dev/null +++ b/library/std/src/sys/sync/mutex/itron.rs @@ -0,0 +1,68 @@ +//! Mutex implementation backed by μITRON mutexes. Assumes `acre_mtx` and +//! `TA_INHERIT` are available. +use crate::sys::pal::itron::{ + abi, + error::{expect_success, expect_success_aborting, fail, ItronError}, + spin::SpinIdOnceCell, +}; + +pub struct Mutex { + /// The ID of the underlying mutex object + mtx: SpinIdOnceCell<()>, +} + +/// Create a mutex object. This function never panics. +fn new_mtx() -> Result { + ItronError::err_if_negative(unsafe { + abi::acre_mtx(&abi::T_CMTX { + // Priority inheritance mutex + mtxatr: abi::TA_INHERIT, + // Unused + ceilpri: 0, + }) + }) +} + +impl Mutex { + #[inline] + pub const fn new() -> Mutex { + Mutex { mtx: SpinIdOnceCell::new() } + } + + /// Get the inner mutex's ID, which is lazily created. + fn raw(&self) -> abi::ID { + match self.mtx.get_or_try_init(|| new_mtx().map(|id| (id, ()))) { + Ok((id, ())) => id, + Err(e) => fail(e, &"acre_mtx"), + } + } + + pub fn lock(&self) { + let mtx = self.raw(); + expect_success(unsafe { abi::loc_mtx(mtx) }, &"loc_mtx"); + } + + pub unsafe fn unlock(&self) { + let mtx = unsafe { self.mtx.get_unchecked().0 }; + expect_success_aborting(unsafe { abi::unl_mtx(mtx) }, &"unl_mtx"); + } + + pub fn try_lock(&self) -> bool { + let mtx = self.raw(); + match unsafe { abi::ploc_mtx(mtx) } { + abi::E_TMOUT => false, + er => { + expect_success(er, &"ploc_mtx"); + true + } + } + } +} + +impl Drop for Mutex { + fn drop(&mut self) { + if let Some(mtx) = self.mtx.get().map(|x| x.0) { + expect_success_aborting(unsafe { abi::del_mtx(mtx) }, &"del_mtx"); + } + } +} diff --git a/library/std/src/sys/sync/mutex/mod.rs b/library/std/src/sys/sync/mutex/mod.rs new file mode 100644 index 00000000000..73d9bd273de --- /dev/null +++ b/library/std/src/sys/sync/mutex/mod.rs @@ -0,0 +1,39 @@ +cfg_if::cfg_if! { + if #[cfg(any( + all(target_os = "windows", not(target_vendor = "win7")), + target_os = "linux", + target_os = "android", + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", + all(target_family = "wasm", target_feature = "atomics"), + target_os = "hermit", + ))] { + mod futex; + pub use futex::Mutex; + } else if #[cfg(target_os = "fuchsia")] { + mod fuchsia; + pub use fuchsia::Mutex; + } else if #[cfg(any( + target_family = "unix", + target_os = "teeos", + ))] { + mod pthread; + pub use pthread::{Mutex, raw}; + } else if #[cfg(all(target_os = "windows", target_vendor = "win7"))] { + mod windows7; + pub use windows7::{Mutex, raw}; + } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { + mod sgx; + pub use sgx::Mutex; + } else if #[cfg(target_os = "solid_asp3")] { + mod itron; + pub use itron::Mutex; + } else if #[cfg(target_os = "xous")] { + mod xous; + pub use xous::Mutex; + } else { + mod no_threads; + pub use no_threads::Mutex; + } +} diff --git a/library/std/src/sys/sync/mutex/no_threads.rs b/library/std/src/sys/sync/mutex/no_threads.rs new file mode 100644 index 00000000000..4a13c55fb8b --- /dev/null +++ b/library/std/src/sys/sync/mutex/no_threads.rs @@ -0,0 +1,32 @@ +use crate::cell::Cell; + +pub struct Mutex { + // This platform has no threads, so we can use a Cell here. + locked: Cell, +} + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} // no threads on this platform + +impl Mutex { + #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] + pub const fn new() -> Mutex { + Mutex { locked: Cell::new(false) } + } + + #[inline] + pub fn lock(&self) { + assert_eq!(self.locked.replace(true), false, "cannot recursively acquire mutex"); + } + + #[inline] + pub unsafe fn unlock(&self) { + self.locked.set(false); + } + + #[inline] + pub fn try_lock(&self) -> bool { + self.locked.replace(true) == false + } +} diff --git a/library/std/src/sys/sync/mutex/pthread.rs b/library/std/src/sys/sync/mutex/pthread.rs new file mode 100644 index 00000000000..ee0794334fb --- /dev/null +++ b/library/std/src/sys/sync/mutex/pthread.rs @@ -0,0 +1,148 @@ +use crate::cell::UnsafeCell; +use crate::io::Error; +use crate::mem::{forget, MaybeUninit}; +use crate::sys::cvt_nz; +use crate::sys_common::lazy_box::{LazyBox, LazyInit}; + +struct AllocatedMutex(UnsafeCell); + +pub struct Mutex { + inner: LazyBox, +} + +#[inline] +pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t { + m.inner.0.get() +} + +unsafe impl Send for AllocatedMutex {} +unsafe impl Sync for AllocatedMutex {} + +impl LazyInit for AllocatedMutex { + fn init() -> Box { + let mutex = Box::new(AllocatedMutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER))); + + // Issue #33770 + // + // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have + // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you + // try to re-lock it from the same thread when you already hold a lock + // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_init.html). + // This is the case even if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL + // (https://github.com/rust-lang/rust/issues/33770#issuecomment-220847521) -- in that + // case, `pthread_mutexattr_settype(PTHREAD_MUTEX_DEFAULT)` will of course be the same + // as setting it to `PTHREAD_MUTEX_NORMAL`, but not setting any mode will result in + // a Mutex where re-locking is UB. + // + // In practice, glibc takes advantage of this undefined behavior to + // implement hardware lock elision, which uses hardware transactional + // memory to avoid acquiring the lock. While a transaction is in + // progress, the lock appears to be unlocked. This isn't a problem for + // other threads since the transactional memory will abort if a conflict + // is detected, however no abort is generated when re-locking from the + // same thread. + // + // Since locking the same mutex twice will result in two aliasing &mut + // references, we instead create the mutex with type + // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to + // re-lock it from the same thread, thus avoiding undefined behavior. + unsafe { + let mut attr = MaybeUninit::::uninit(); + cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap(); + let attr = PthreadMutexAttr(&mut attr); + cvt_nz(libc::pthread_mutexattr_settype( + attr.0.as_mut_ptr(), + libc::PTHREAD_MUTEX_NORMAL, + )) + .unwrap(); + cvt_nz(libc::pthread_mutex_init(mutex.0.get(), attr.0.as_ptr())).unwrap(); + } + + mutex + } + + fn destroy(mutex: Box) { + // We're not allowed to pthread_mutex_destroy a locked mutex, + // so check first if it's unlocked. + if unsafe { libc::pthread_mutex_trylock(mutex.0.get()) == 0 } { + unsafe { libc::pthread_mutex_unlock(mutex.0.get()) }; + drop(mutex); + } else { + // The mutex is locked. This happens if a MutexGuard is leaked. + // In this case, we just leak the Mutex too. + forget(mutex); + } + } + + fn cancel_init(_: Box) { + // In this case, we can just drop it without any checks, + // since it cannot have been locked yet. + } +} + +impl Drop for AllocatedMutex { + #[inline] + fn drop(&mut self) { + let r = unsafe { libc::pthread_mutex_destroy(self.0.get()) }; + if cfg!(target_os = "dragonfly") { + // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a + // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER. + // Once it is used (locked/unlocked) or pthread_mutex_init() is called, + // this behaviour no longer occurs. + debug_assert!(r == 0 || r == libc::EINVAL); + } else { + debug_assert_eq!(r, 0); + } + } +} + +impl Mutex { + #[inline] + pub const fn new() -> Mutex { + Mutex { inner: LazyBox::new() } + } + + #[inline] + pub unsafe fn lock(&self) { + #[cold] + #[inline(never)] + fn fail(r: i32) -> ! { + let error = Error::from_raw_os_error(r); + panic!("failed to lock mutex: {error}"); + } + + let r = libc::pthread_mutex_lock(raw(self)); + // As we set the mutex type to `PTHREAD_MUTEX_NORMAL` above, we expect + // the lock call to never fail. Unfortunately however, some platforms + // (Solaris) do not conform to the standard, and instead always provide + // deadlock detection. How kind of them! Unfortunately that means that + // we need to check the error code here. To save us from UB on other + // less well-behaved platforms in the future, we do it even on "good" + // platforms like macOS. See #120147 for more context. + if r != 0 { + fail(r) + } + } + + #[inline] + pub unsafe fn unlock(&self) { + let r = libc::pthread_mutex_unlock(raw(self)); + debug_assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + libc::pthread_mutex_trylock(raw(self)) == 0 + } +} + +pub(super) struct PthreadMutexAttr<'a>(pub &'a mut MaybeUninit); + +impl Drop for PthreadMutexAttr<'_> { + fn drop(&mut self) { + unsafe { + let result = libc::pthread_mutexattr_destroy(self.0.as_mut_ptr()); + debug_assert_eq!(result, 0); + } + } +} diff --git a/library/std/src/sys/sync/mutex/sgx.rs b/library/std/src/sys/sync/mutex/sgx.rs new file mode 100644 index 00000000000..d37bd02adf8 --- /dev/null +++ b/library/std/src/sys/sync/mutex/sgx.rs @@ -0,0 +1,59 @@ +use crate::sys::pal::waitqueue::{try_lock_or_false, SpinMutex, WaitQueue, WaitVariable}; +use crate::sys_common::lazy_box::{LazyBox, LazyInit}; + +/// FIXME: `UnsafeList` is not movable. +struct AllocatedMutex(SpinMutex>); + +pub struct Mutex { + inner: LazyBox, +} + +impl LazyInit for AllocatedMutex { + fn init() -> Box { + Box::new(AllocatedMutex(SpinMutex::new(WaitVariable::new(false)))) + } +} + +// Implementation according to “Operating Systems: Three Easy Pieces”, chapter 28 +impl Mutex { + pub const fn new() -> Mutex { + Mutex { inner: LazyBox::new() } + } + + #[inline] + pub fn lock(&self) { + let mut guard = self.inner.0.lock(); + if *guard.lock_var() { + // Another thread has the lock, wait + WaitQueue::wait(guard, || {}) + // Another thread has passed the lock to us + } else { + // We are just now obtaining the lock + *guard.lock_var_mut() = true; + } + } + + #[inline] + pub unsafe fn unlock(&self) { + let guard = self.inner.0.lock(); + if let Err(mut guard) = WaitQueue::notify_one(guard) { + // No other waiters, unlock + *guard.lock_var_mut() = false; + } else { + // There was a thread waiting, just pass the lock + } + } + + #[inline] + pub fn try_lock(&self) -> bool { + let mut guard = try_lock_or_false!(self.inner.0); + if *guard.lock_var() { + // Another thread has the lock + false + } else { + // We are just now obtaining the lock + *guard.lock_var_mut() = true; + true + } + } +} diff --git a/library/std/src/sys/sync/mutex/windows7.rs b/library/std/src/sys/sync/mutex/windows7.rs new file mode 100644 index 00000000000..ef2f84082cd --- /dev/null +++ b/library/std/src/sys/sync/mutex/windows7.rs @@ -0,0 +1,54 @@ +//! System Mutexes +//! +//! The Windows implementation of mutexes is a little odd and it might not be +//! immediately obvious what's going on. The primary oddness is that SRWLock is +//! used instead of CriticalSection, and this is done because: +//! +//! 1. SRWLock is several times faster than CriticalSection according to +//! benchmarks performed on both Windows 8 and Windows 7. +//! +//! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The +//! Unix implementation deadlocks so consistency is preferred. See #19962 for +//! more details. +//! +//! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy +//! is that there are no guarantees of fairness. + +use crate::cell::UnsafeCell; +use crate::sys::c; + +pub struct Mutex { + srwlock: UnsafeCell, +} + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +#[inline] +pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK { + m.srwlock.get() +} + +impl Mutex { + #[inline] + pub const fn new() -> Mutex { + Mutex { srwlock: UnsafeCell::new(c::SRWLOCK_INIT) } + } + + #[inline] + pub fn lock(&self) { + unsafe { + c::AcquireSRWLockExclusive(raw(self)); + } + } + + #[inline] + pub fn try_lock(&self) -> bool { + unsafe { c::TryAcquireSRWLockExclusive(raw(self)) != 0 } + } + + #[inline] + pub unsafe fn unlock(&self) { + c::ReleaseSRWLockExclusive(raw(self)); + } +} diff --git a/library/std/src/sys/sync/mutex/xous.rs b/library/std/src/sys/sync/mutex/xous.rs new file mode 100644 index 00000000000..a8c9518ff0b --- /dev/null +++ b/library/std/src/sys/sync/mutex/xous.rs @@ -0,0 +1,110 @@ +use crate::os::xous::ffi::{blocking_scalar, do_yield}; +use crate::os::xous::services::{ticktimer_server, TicktimerScalar}; +use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed, Ordering::SeqCst}; + +pub struct Mutex { + /// The "locked" value indicates how many threads are waiting on this + /// Mutex. Possible values are: + /// 0: The lock is unlocked + /// 1: The lock is locked and uncontended + /// >=2: The lock is locked and contended + /// + /// A lock is "contended" when there is more than one thread waiting + /// for a lock, or it is locked for long periods of time. Rather than + /// spinning, these locks send a Message to the ticktimer server + /// requesting that they be woken up when a lock is unlocked. + locked: AtomicUsize, + + /// Whether this Mutex ever was contended, and therefore made a trip + /// to the ticktimer server. If this was never set, then we were never + /// on the slow path and can skip deregistering the mutex. + contended: AtomicBool, +} + +impl Mutex { + #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] + pub const fn new() -> Mutex { + Mutex { locked: AtomicUsize::new(0), contended: AtomicBool::new(false) } + } + + fn index(&self) -> usize { + core::ptr::from_ref(self).addr() + } + + #[inline] + pub unsafe fn lock(&self) { + // Try multiple times to acquire the lock without resorting to the ticktimer + // server. For locks that are held for a short amount of time, this will + // result in the ticktimer server never getting invoked. The `locked` value + // will be either 0 or 1. + for _attempts in 0..3 { + if unsafe { self.try_lock() } { + return; + } + do_yield(); + } + + // Try one more time to lock. If the lock is released between the previous code and + // here, then the inner `locked` value will be 1 at the end of this. If it was not + // locked, then the value will be more than 1, for example if there are multiple other + // threads waiting on this lock. + if unsafe { self.try_lock_or_poison() } { + return; + } + + // When this mutex is dropped, we will need to deregister it with the server. + self.contended.store(true, Relaxed); + + // The lock is now "contended". When the lock is released, a Message will get sent to the + // ticktimer server to wake it up. Note that this may already have happened, so the actual + // value of `lock` may be anything (0, 1, 2, ...). + blocking_scalar( + ticktimer_server(), + crate::os::xous::services::TicktimerScalar::LockMutex(self.index()).into(), + ) + .expect("failure to send LockMutex command"); + } + + #[inline] + pub unsafe fn unlock(&self) { + let prev = self.locked.fetch_sub(1, SeqCst); + + // If the previous value was 1, then this was a "fast path" unlock, so no + // need to involve the Ticktimer server + if prev == 1 { + return; + } + + // If it was 0, then something has gone seriously wrong and the counter + // has just wrapped around. + if prev == 0 { + panic!("mutex lock count underflowed"); + } + + // Unblock one thread that is waiting on this message. + blocking_scalar(ticktimer_server(), TicktimerScalar::UnlockMutex(self.index()).into()) + .expect("failure to send UnlockMutex command"); + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() + } + + #[inline] + pub unsafe fn try_lock_or_poison(&self) -> bool { + self.locked.fetch_add(1, SeqCst) == 0 + } +} + +impl Drop for Mutex { + fn drop(&mut self) { + // If there was Mutex contention, then we involved the ticktimer. Free + // the resources associated with this Mutex as it is deallocated. + if self.contended.load(Relaxed) { + blocking_scalar(ticktimer_server(), TicktimerScalar::FreeMutex(self.index()).into()) + .ok(); + } + } +} diff --git a/library/std/src/sys/sync/once/futex.rs b/library/std/src/sys/sync/once/futex.rs new file mode 100644 index 00000000000..609085dcd47 --- /dev/null +++ b/library/std/src/sys/sync/once/futex.rs @@ -0,0 +1,146 @@ +use crate::cell::Cell; +use crate::sync as public; +use crate::sync::atomic::{ + AtomicU32, + Ordering::{Acquire, Relaxed, Release}, +}; +use crate::sync::once::ExclusiveState; +use crate::sys::futex::{futex_wait, futex_wake_all}; + +// On some platforms, the OS is very nice and handles the waiter queue for us. +// This means we only need one atomic value with 5 states: + +/// No initialization has run yet, and no thread is currently using the Once. +const INCOMPLETE: u32 = 0; +/// Some thread has previously attempted to initialize the Once, but it panicked, +/// so the Once is now poisoned. There are no other threads currently accessing +/// this Once. +const POISONED: u32 = 1; +/// Some thread is currently attempting to run initialization. It may succeed, +/// so all future threads need to wait for it to finish. +const RUNNING: u32 = 2; +/// Some thread is currently attempting to run initialization and there are threads +/// waiting for it to finish. +const QUEUED: u32 = 3; +/// Initialization has completed and all future calls should finish immediately. +const COMPLETE: u32 = 4; + +// Threads wait by setting the state to QUEUED and calling `futex_wait` on the state +// variable. When the running thread finishes, it will wake all waiting threads using +// `futex_wake_all`. + +pub struct OnceState { + poisoned: bool, + set_state_to: Cell, +} + +impl OnceState { + #[inline] + pub fn is_poisoned(&self) -> bool { + self.poisoned + } + + #[inline] + pub fn poison(&self) { + self.set_state_to.set(POISONED); + } +} + +struct CompletionGuard<'a> { + state: &'a AtomicU32, + set_state_on_drop_to: u32, +} + +impl<'a> Drop for CompletionGuard<'a> { + fn drop(&mut self) { + // Use release ordering to propagate changes to all threads checking + // up on the Once. `futex_wake_all` does its own synchronization, hence + // we do not need `AcqRel`. + if self.state.swap(self.set_state_on_drop_to, Release) == QUEUED { + futex_wake_all(&self.state); + } + } +} + +pub struct Once { + state: AtomicU32, +} + +impl Once { + #[inline] + pub const fn new() -> Once { + Once { state: AtomicU32::new(INCOMPLETE) } + } + + #[inline] + pub fn is_completed(&self) -> bool { + // Use acquire ordering to make all initialization changes visible to the + // current thread. + self.state.load(Acquire) == COMPLETE + } + + #[inline] + pub(crate) fn state(&mut self) -> ExclusiveState { + match *self.state.get_mut() { + INCOMPLETE => ExclusiveState::Incomplete, + POISONED => ExclusiveState::Poisoned, + COMPLETE => ExclusiveState::Complete, + _ => unreachable!("invalid Once state"), + } + } + + // This uses FnMut to match the API of the generic implementation. As this + // implementation is quite light-weight, it is generic over the closure and + // so avoids the cost of dynamic dispatch. + #[cold] + #[track_caller] + pub fn call(&self, ignore_poisoning: bool, f: &mut impl FnMut(&public::OnceState)) { + let mut state = self.state.load(Acquire); + loop { + match state { + POISONED if !ignore_poisoning => { + // Panic to propagate the poison. + panic!("Once instance has previously been poisoned"); + } + INCOMPLETE | POISONED => { + // Try to register the current thread as the one running. + if let Err(new) = + self.state.compare_exchange_weak(state, RUNNING, Acquire, Acquire) + { + state = new; + continue; + } + // `waiter_queue` will manage other waiting threads, and + // wake them up on drop. + let mut waiter_queue = + CompletionGuard { state: &self.state, set_state_on_drop_to: POISONED }; + // Run the function, letting it know if we're poisoned or not. + let f_state = public::OnceState { + inner: OnceState { + poisoned: state == POISONED, + set_state_to: Cell::new(COMPLETE), + }, + }; + f(&f_state); + waiter_queue.set_state_on_drop_to = f_state.inner.set_state_to.get(); + return; + } + RUNNING | QUEUED => { + // Set the state to QUEUED if it is not already. + if state == RUNNING + && let Err(new) = + self.state.compare_exchange_weak(RUNNING, QUEUED, Relaxed, Acquire) + { + state = new; + continue; + } + + futex_wait(&self.state, QUEUED, None); + state = self.state.load(Acquire); + } + COMPLETE => return, + _ => unreachable!("state is never set to invalid values"), + } + } + } +} diff --git a/library/std/src/sys/sync/once/mod.rs b/library/std/src/sys/sync/once/mod.rs new file mode 100644 index 00000000000..61b29713fa1 --- /dev/null +++ b/library/std/src/sys/sync/once/mod.rs @@ -0,0 +1,36 @@ +// A "once" is a relatively simple primitive, and it's also typically provided +// by the OS as well (see `pthread_once` or `InitOnceExecuteOnce`). The OS +// primitives, however, tend to have surprising restrictions, such as the Unix +// one doesn't allow an argument to be passed to the function. +// +// As a result, we end up implementing it ourselves in the standard library. +// This also gives us the opportunity to optimize the implementation a bit which +// should help the fast path on call sites. + +cfg_if::cfg_if! { + if #[cfg(any( + target_os = "linux", + target_os = "android", + all(target_arch = "wasm32", target_feature = "atomics"), + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", + target_os = "fuchsia", + target_os = "hermit", + ))] { + mod futex; + pub use futex::{Once, OnceState}; + } else if #[cfg(any( + windows, + target_family = "unix", + all(target_vendor = "fortanix", target_env = "sgx"), + target_os = "solid_asp3", + target_os = "xous", + ))] { + mod queue; + pub use queue::{Once, OnceState}; + } else { + mod no_threads; + pub use no_threads::{Once, OnceState}; + } +} diff --git a/library/std/src/sys/sync/once/no_threads.rs b/library/std/src/sys/sync/once/no_threads.rs new file mode 100644 index 00000000000..11fde1888ba --- /dev/null +++ b/library/std/src/sys/sync/once/no_threads.rs @@ -0,0 +1,100 @@ +use crate::cell::Cell; +use crate::sync as public; +use crate::sync::once::ExclusiveState; + +pub struct Once { + state: Cell, +} + +pub struct OnceState { + poisoned: bool, + set_state_to: Cell, +} + +#[derive(Clone, Copy, PartialEq, Eq)] +enum State { + Incomplete, + Poisoned, + Running, + Complete, +} + +struct CompletionGuard<'a> { + state: &'a Cell, + set_state_on_drop_to: State, +} + +impl<'a> Drop for CompletionGuard<'a> { + fn drop(&mut self) { + self.state.set(self.set_state_on_drop_to); + } +} + +// Safety: threads are not supported on this platform. +unsafe impl Sync for Once {} + +impl Once { + #[inline] + #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")] + pub const fn new() -> Once { + Once { state: Cell::new(State::Incomplete) } + } + + #[inline] + pub fn is_completed(&self) -> bool { + self.state.get() == State::Complete + } + + #[inline] + pub(crate) fn state(&mut self) -> ExclusiveState { + match self.state.get() { + State::Incomplete => ExclusiveState::Incomplete, + State::Poisoned => ExclusiveState::Poisoned, + State::Complete => ExclusiveState::Complete, + _ => unreachable!("invalid Once state"), + } + } + + #[cold] + #[track_caller] + pub fn call(&self, ignore_poisoning: bool, f: &mut impl FnMut(&public::OnceState)) { + let state = self.state.get(); + match state { + State::Poisoned if !ignore_poisoning => { + // Panic to propagate the poison. + panic!("Once instance has previously been poisoned"); + } + State::Incomplete | State::Poisoned => { + self.state.set(State::Running); + // `guard` will set the new state on drop. + let mut guard = + CompletionGuard { state: &self.state, set_state_on_drop_to: State::Poisoned }; + // Run the function, letting it know if we're poisoned or not. + let f_state = public::OnceState { + inner: OnceState { + poisoned: state == State::Poisoned, + set_state_to: Cell::new(State::Complete), + }, + }; + f(&f_state); + guard.set_state_on_drop_to = f_state.inner.set_state_to.get(); + } + State::Running => { + panic!("one-time initialization may not be performed recursively"); + } + State::Complete => {} + } + } +} + +impl OnceState { + #[inline] + pub fn is_poisoned(&self) -> bool { + self.poisoned + } + + #[inline] + pub fn poison(&self) { + self.set_state_to.set(State::Poisoned) + } +} diff --git a/library/std/src/sys/sync/once/queue.rs b/library/std/src/sys/sync/once/queue.rs new file mode 100644 index 00000000000..730cdb768bd --- /dev/null +++ b/library/std/src/sys/sync/once/queue.rs @@ -0,0 +1,294 @@ +// Each `Once` has one word of atomic state, and this state is CAS'd on to +// determine what to do. There are four possible state of a `Once`: +// +// * Incomplete - no initialization has run yet, and no thread is currently +// using the Once. +// * Poisoned - some thread has previously attempted to initialize the Once, but +// it panicked, so the Once is now poisoned. There are no other +// threads currently accessing this Once. +// * Running - some thread is currently attempting to run initialization. It may +// succeed, so all future threads need to wait for it to finish. +// Note that this state is accompanied with a payload, described +// below. +// * Complete - initialization has completed and all future calls should finish +// immediately. +// +// With 4 states we need 2 bits to encode this, and we use the remaining bits +// in the word we have allocated as a queue of threads waiting for the thread +// responsible for entering the RUNNING state. This queue is just a linked list +// of Waiter nodes which is monotonically increasing in size. Each node is +// allocated on the stack, and whenever the running closure finishes it will +// consume the entire queue and notify all waiters they should try again. +// +// You'll find a few more details in the implementation, but that's the gist of +// it! +// +// Atomic orderings: +// When running `Once` we deal with multiple atomics: +// `Once.state_and_queue` and an unknown number of `Waiter.signaled`. +// * `state_and_queue` is used (1) as a state flag, (2) for synchronizing the +// result of the `Once`, and (3) for synchronizing `Waiter` nodes. +// - At the end of the `call` function we have to make sure the result +// of the `Once` is acquired. So every load which can be the only one to +// load COMPLETED must have at least acquire ordering, which means all +// three of them. +// - `WaiterQueue::drop` is the only place that may store COMPLETED, and +// must do so with release ordering to make the result available. +// - `wait` inserts `Waiter` nodes as a pointer in `state_and_queue`, and +// needs to make the nodes available with release ordering. The load in +// its `compare_exchange` can be relaxed because it only has to compare +// the atomic, not to read other data. +// - `WaiterQueue::drop` must see the `Waiter` nodes, so it must load +// `state_and_queue` with acquire ordering. +// - There is just one store where `state_and_queue` is used only as a +// state flag, without having to synchronize data: switching the state +// from INCOMPLETE to RUNNING in `call`. This store can be Relaxed, +// but the read has to be Acquire because of the requirements mentioned +// above. +// * `Waiter.signaled` is both used as a flag, and to protect a field with +// interior mutability in `Waiter`. `Waiter.thread` is changed in +// `WaiterQueue::drop` which then sets `signaled` with release ordering. +// After `wait` loads `signaled` with acquire ordering and sees it is true, +// it needs to see the changes to drop the `Waiter` struct correctly. +// * There is one place where the two atomics `Once.state_and_queue` and +// `Waiter.signaled` come together, and might be reordered by the compiler or +// processor. Because both use acquire ordering such a reordering is not +// allowed, so no need for `SeqCst`. + +use crate::cell::Cell; +use crate::fmt; +use crate::ptr; +use crate::sync as public; +use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; +use crate::sync::once::ExclusiveState; +use crate::thread::{self, Thread}; + +type Masked = (); + +pub struct Once { + state_and_queue: AtomicPtr, +} + +pub struct OnceState { + poisoned: bool, + set_state_on_drop_to: Cell<*mut Masked>, +} + +// Four states that a Once can be in, encoded into the lower bits of +// `state_and_queue` in the Once structure. +const INCOMPLETE: usize = 0x0; +const POISONED: usize = 0x1; +const RUNNING: usize = 0x2; +const COMPLETE: usize = 0x3; + +// Mask to learn about the state. All other bits are the queue of waiters if +// this is in the RUNNING state. +const STATE_MASK: usize = 0x3; + +// Representation of a node in the linked list of waiters, used while in the +// RUNNING state. +// Note: `Waiter` can't hold a mutable pointer to the next thread, because then +// `wait` would both hand out a mutable reference to its `Waiter` node, and keep +// a shared reference to check `signaled`. Instead we hold shared references and +// use interior mutability. +#[repr(align(4))] // Ensure the two lower bits are free to use as state bits. +struct Waiter { + thread: Cell>, + signaled: AtomicBool, + next: *const Waiter, +} + +// Head of a linked list of waiters. +// Every node is a struct on the stack of a waiting thread. +// Will wake up the waiters when it gets dropped, i.e. also on panic. +struct WaiterQueue<'a> { + state_and_queue: &'a AtomicPtr, + set_state_on_drop_to: *mut Masked, +} + +impl Once { + #[inline] + #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")] + pub const fn new() -> Once { + Once { state_and_queue: AtomicPtr::new(ptr::without_provenance_mut(INCOMPLETE)) } + } + + #[inline] + pub fn is_completed(&self) -> bool { + // An `Acquire` load is enough because that makes all the initialization + // operations visible to us, and, this being a fast path, weaker + // ordering helps with performance. This `Acquire` synchronizes with + // `Release` operations on the slow path. + self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE + } + + #[inline] + pub(crate) fn state(&mut self) -> ExclusiveState { + match self.state_and_queue.get_mut().addr() { + INCOMPLETE => ExclusiveState::Incomplete, + POISONED => ExclusiveState::Poisoned, + COMPLETE => ExclusiveState::Complete, + _ => unreachable!("invalid Once state"), + } + } + + // This is a non-generic function to reduce the monomorphization cost of + // using `call_once` (this isn't exactly a trivial or small implementation). + // + // Additionally, this is tagged with `#[cold]` as it should indeed be cold + // and it helps let LLVM know that calls to this function should be off the + // fast path. Essentially, this should help generate more straight line code + // in LLVM. + // + // Finally, this takes an `FnMut` instead of a `FnOnce` because there's + // currently no way to take an `FnOnce` and call it via virtual dispatch + // without some allocation overhead. + #[cold] + #[track_caller] + pub fn call(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&public::OnceState)) { + let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire); + loop { + match state_and_queue.addr() { + COMPLETE => break, + POISONED if !ignore_poisoning => { + // Panic to propagate the poison. + panic!("Once instance has previously been poisoned"); + } + POISONED | INCOMPLETE => { + // Try to register this thread as the one RUNNING. + let exchange_result = self.state_and_queue.compare_exchange( + state_and_queue, + ptr::without_provenance_mut(RUNNING), + Ordering::Acquire, + Ordering::Acquire, + ); + if let Err(old) = exchange_result { + state_and_queue = old; + continue; + } + // `waiter_queue` will manage other waiting threads, and + // wake them up on drop. + let mut waiter_queue = WaiterQueue { + state_and_queue: &self.state_and_queue, + set_state_on_drop_to: ptr::without_provenance_mut(POISONED), + }; + // Run the initialization function, letting it know if we're + // poisoned or not. + let init_state = public::OnceState { + inner: OnceState { + poisoned: state_and_queue.addr() == POISONED, + set_state_on_drop_to: Cell::new(ptr::without_provenance_mut(COMPLETE)), + }, + }; + init(&init_state); + waiter_queue.set_state_on_drop_to = init_state.inner.set_state_on_drop_to.get(); + break; + } + _ => { + // All other values must be RUNNING with possibly a + // pointer to the waiter queue in the more significant bits. + assert!(state_and_queue.addr() & STATE_MASK == RUNNING); + wait(&self.state_and_queue, state_and_queue); + state_and_queue = self.state_and_queue.load(Ordering::Acquire); + } + } + } + } +} + +fn wait(state_and_queue: &AtomicPtr, mut current_state: *mut Masked) { + // Note: the following code was carefully written to avoid creating a + // mutable reference to `node` that gets aliased. + loop { + // Don't queue this thread if the status is no longer running, + // otherwise we will not be woken up. + if current_state.addr() & STATE_MASK != RUNNING { + return; + } + + // Create the node for our current thread. + let node = Waiter { + thread: Cell::new(Some(thread::current())), + signaled: AtomicBool::new(false), + next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter, + }; + let me = core::ptr::addr_of!(node) as *const Masked as *mut Masked; + + // Try to slide in the node at the head of the linked list, making sure + // that another thread didn't just replace the head of the linked list. + let exchange_result = state_and_queue.compare_exchange( + current_state, + me.with_addr(me.addr() | RUNNING), + Ordering::Release, + Ordering::Relaxed, + ); + if let Err(old) = exchange_result { + current_state = old; + continue; + } + + // We have enqueued ourselves, now lets wait. + // It is important not to return before being signaled, otherwise we + // would drop our `Waiter` node and leave a hole in the linked list + // (and a dangling reference). Guard against spurious wakeups by + // reparking ourselves until we are signaled. + while !node.signaled.load(Ordering::Acquire) { + // If the managing thread happens to signal and unpark us before we + // can park ourselves, the result could be this thread never gets + // unparked. Luckily `park` comes with the guarantee that if it got + // an `unpark` just before on an unparked thread it does not park. + thread::park(); + } + break; + } +} + +#[stable(feature = "std_debug", since = "1.16.0")] +impl fmt::Debug for Once { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Once").finish_non_exhaustive() + } +} + +impl Drop for WaiterQueue<'_> { + fn drop(&mut self) { + // Swap out our state with however we finished. + let state_and_queue = + self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel); + + // We should only ever see an old state which was RUNNING. + assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING); + + // Walk the entire linked list of waiters and wake them up (in lifo + // order, last to register is first to wake up). + unsafe { + // Right after setting `node.signaled = true` the other thread may + // free `node` if there happens to be has a spurious wakeup. + // So we have to take out the `thread` field and copy the pointer to + // `next` first. + let mut queue = + state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter; + while !queue.is_null() { + let next = (*queue).next; + let thread = (*queue).thread.take().unwrap(); + (*queue).signaled.store(true, Ordering::Release); + // ^- FIXME (maybe): This is another case of issue #55005 + // `store()` has a potentially dangling ref to `signaled`. + queue = next; + thread.unpark(); + } + } + } +} + +impl OnceState { + #[inline] + pub fn is_poisoned(&self) -> bool { + self.poisoned + } + + #[inline] + pub fn poison(&self) { + self.set_state_on_drop_to.set(ptr::without_provenance_mut(POISONED)); + } +} diff --git a/library/std/src/sys/sync/rwlock/futex.rs b/library/std/src/sys/sync/rwlock/futex.rs new file mode 100644 index 00000000000..aa0de900238 --- /dev/null +++ b/library/std/src/sys/sync/rwlock/futex.rs @@ -0,0 +1,320 @@ +use crate::sync::atomic::{ + AtomicU32, + Ordering::{Acquire, Relaxed, Release}, +}; +use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all}; + +pub struct RwLock { + // The state consists of a 30-bit reader counter, a 'readers waiting' flag, and a 'writers waiting' flag. + // Bits 0..30: + // 0: Unlocked + // 1..=0x3FFF_FFFE: Locked by N readers + // 0x3FFF_FFFF: Write locked + // Bit 30: Readers are waiting on this futex. + // Bit 31: Writers are waiting on the writer_notify futex. + state: AtomicU32, + // The 'condition variable' to notify writers through. + // Incremented on every signal. + writer_notify: AtomicU32, +} + +const READ_LOCKED: u32 = 1; +const MASK: u32 = (1 << 30) - 1; +const WRITE_LOCKED: u32 = MASK; +const MAX_READERS: u32 = MASK - 1; +const READERS_WAITING: u32 = 1 << 30; +const WRITERS_WAITING: u32 = 1 << 31; + +#[inline] +fn is_unlocked(state: u32) -> bool { + state & MASK == 0 +} + +#[inline] +fn is_write_locked(state: u32) -> bool { + state & MASK == WRITE_LOCKED +} + +#[inline] +fn has_readers_waiting(state: u32) -> bool { + state & READERS_WAITING != 0 +} + +#[inline] +fn has_writers_waiting(state: u32) -> bool { + state & WRITERS_WAITING != 0 +} + +#[inline] +fn is_read_lockable(state: u32) -> bool { + // This also returns false if the counter could overflow if we tried to read lock it. + // + // We don't allow read-locking if there's readers waiting, even if the lock is unlocked + // and there's no writers waiting. The only situation when this happens is after unlocking, + // at which point the unlocking thread might be waking up writers, which have priority over readers. + // The unlocking thread will clear the readers waiting bit and wake up readers, if necessary. + state & MASK < MAX_READERS && !has_readers_waiting(state) && !has_writers_waiting(state) +} + +#[inline] +fn has_reached_max_readers(state: u32) -> bool { + state & MASK == MAX_READERS +} + +impl RwLock { + #[inline] + pub const fn new() -> Self { + Self { state: AtomicU32::new(0), writer_notify: AtomicU32::new(0) } + } + + #[inline] + pub fn try_read(&self) -> bool { + self.state + .fetch_update(Acquire, Relaxed, |s| is_read_lockable(s).then(|| s + READ_LOCKED)) + .is_ok() + } + + #[inline] + pub fn read(&self) { + let state = self.state.load(Relaxed); + if !is_read_lockable(state) + || self + .state + .compare_exchange_weak(state, state + READ_LOCKED, Acquire, Relaxed) + .is_err() + { + self.read_contended(); + } + } + + #[inline] + pub unsafe fn read_unlock(&self) { + let state = self.state.fetch_sub(READ_LOCKED, Release) - READ_LOCKED; + + // It's impossible for a reader to be waiting on a read-locked RwLock, + // except if there is also a writer waiting. + debug_assert!(!has_readers_waiting(state) || has_writers_waiting(state)); + + // Wake up a writer if we were the last reader and there's a writer waiting. + if is_unlocked(state) && has_writers_waiting(state) { + self.wake_writer_or_readers(state); + } + } + + #[cold] + fn read_contended(&self) { + let mut state = self.spin_read(); + + loop { + // If we can lock it, lock it. + if is_read_lockable(state) { + match self.state.compare_exchange_weak(state, state + READ_LOCKED, Acquire, Relaxed) + { + Ok(_) => return, // Locked! + Err(s) => { + state = s; + continue; + } + } + } + + // Check for overflow. + if has_reached_max_readers(state) { + panic!("too many active read locks on RwLock"); + } + + // Make sure the readers waiting bit is set before we go to sleep. + if !has_readers_waiting(state) { + if let Err(s) = + self.state.compare_exchange(state, state | READERS_WAITING, Relaxed, Relaxed) + { + state = s; + continue; + } + } + + // Wait for the state to change. + futex_wait(&self.state, state | READERS_WAITING, None); + + // Spin again after waking up. + state = self.spin_read(); + } + } + + #[inline] + pub fn try_write(&self) -> bool { + self.state + .fetch_update(Acquire, Relaxed, |s| is_unlocked(s).then(|| s + WRITE_LOCKED)) + .is_ok() + } + + #[inline] + pub fn write(&self) { + if self.state.compare_exchange_weak(0, WRITE_LOCKED, Acquire, Relaxed).is_err() { + self.write_contended(); + } + } + + #[inline] + pub unsafe fn write_unlock(&self) { + let state = self.state.fetch_sub(WRITE_LOCKED, Release) - WRITE_LOCKED; + + debug_assert!(is_unlocked(state)); + + if has_writers_waiting(state) || has_readers_waiting(state) { + self.wake_writer_or_readers(state); + } + } + + #[cold] + fn write_contended(&self) { + let mut state = self.spin_write(); + + let mut other_writers_waiting = 0; + + loop { + // If it's unlocked, we try to lock it. + if is_unlocked(state) { + match self.state.compare_exchange_weak( + state, + state | WRITE_LOCKED | other_writers_waiting, + Acquire, + Relaxed, + ) { + Ok(_) => return, // Locked! + Err(s) => { + state = s; + continue; + } + } + } + + // Set the waiting bit indicating that we're waiting on it. + if !has_writers_waiting(state) { + if let Err(s) = + self.state.compare_exchange(state, state | WRITERS_WAITING, Relaxed, Relaxed) + { + state = s; + continue; + } + } + + // Other writers might be waiting now too, so we should make sure + // we keep that bit on once we manage lock it. + other_writers_waiting = WRITERS_WAITING; + + // Examine the notification counter before we check if `state` has changed, + // to make sure we don't miss any notifications. + let seq = self.writer_notify.load(Acquire); + + // Don't go to sleep if the lock has become available, + // or if the writers waiting bit is no longer set. + state = self.state.load(Relaxed); + if is_unlocked(state) || !has_writers_waiting(state) { + continue; + } + + // Wait for the state to change. + futex_wait(&self.writer_notify, seq, None); + + // Spin again after waking up. + state = self.spin_write(); + } + } + + /// Wake up waiting threads after unlocking. + /// + /// If both are waiting, this will wake up only one writer, but will fall + /// back to waking up readers if there was no writer to wake up. + #[cold] + fn wake_writer_or_readers(&self, mut state: u32) { + assert!(is_unlocked(state)); + + // The readers waiting bit might be turned on at any point now, + // since readers will block when there's anything waiting. + // Writers will just lock the lock though, regardless of the waiting bits, + // so we don't have to worry about the writer waiting bit. + // + // If the lock gets locked in the meantime, we don't have to do + // anything, because then the thread that locked the lock will take + // care of waking up waiters when it unlocks. + + // If only writers are waiting, wake one of them up. + if state == WRITERS_WAITING { + match self.state.compare_exchange(state, 0, Relaxed, Relaxed) { + Ok(_) => { + self.wake_writer(); + return; + } + Err(s) => { + // Maybe some readers are now waiting too. So, continue to the next `if`. + state = s; + } + } + } + + // If both writers and readers are waiting, leave the readers waiting + // and only wake up one writer. + if state == READERS_WAITING + WRITERS_WAITING { + if self.state.compare_exchange(state, READERS_WAITING, Relaxed, Relaxed).is_err() { + // The lock got locked. Not our problem anymore. + return; + } + if self.wake_writer() { + return; + } + // No writers were actually blocked on futex_wait, so we continue + // to wake up readers instead, since we can't be sure if we notified a writer. + state = READERS_WAITING; + } + + // If readers are waiting, wake them all up. + if state == READERS_WAITING { + if self.state.compare_exchange(state, 0, Relaxed, Relaxed).is_ok() { + futex_wake_all(&self.state); + } + } + } + + /// This wakes one writer and returns true if we woke up a writer that was + /// blocked on futex_wait. + /// + /// If this returns false, it might still be the case that we notified a + /// writer that was about to go to sleep. + fn wake_writer(&self) -> bool { + self.writer_notify.fetch_add(1, Release); + futex_wake(&self.writer_notify) + // Note that FreeBSD and DragonFlyBSD don't tell us whether they woke + // up any threads or not, and always return `false` here. That still + // results in correct behaviour: it just means readers get woken up as + // well in case both readers and writers were waiting. + } + + /// Spin for a while, but stop directly at the given condition. + #[inline] + fn spin_until(&self, f: impl Fn(u32) -> bool) -> u32 { + let mut spin = 100; // Chosen by fair dice roll. + loop { + let state = self.state.load(Relaxed); + if f(state) || spin == 0 { + return state; + } + crate::hint::spin_loop(); + spin -= 1; + } + } + + #[inline] + fn spin_write(&self) -> u32 { + // Stop spinning when it's unlocked or when there's waiting writers, to keep things somewhat fair. + self.spin_until(|state| is_unlocked(state) || has_writers_waiting(state)) + } + + #[inline] + fn spin_read(&self) -> u32 { + // Stop spinning when it's unlocked or read locked, or when there's waiting threads. + self.spin_until(|state| { + !is_write_locked(state) || has_readers_waiting(state) || has_writers_waiting(state) + }) + } +} diff --git a/library/std/src/sys/sync/rwlock/mod.rs b/library/std/src/sys/sync/rwlock/mod.rs new file mode 100644 index 00000000000..675931c64bd --- /dev/null +++ b/library/std/src/sys/sync/rwlock/mod.rs @@ -0,0 +1,37 @@ +cfg_if::cfg_if! { + if #[cfg(any( + all(target_os = "windows", not(target_vendor = "win7")), + target_os = "linux", + target_os = "android", + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", + target_os = "fuchsia", + all(target_family = "wasm", target_feature = "atomics"), + target_os = "hermit", + ))] { + mod futex; + pub use futex::RwLock; + } else if #[cfg(target_family = "unix")] { + mod queue; + pub use queue::RwLock; + } else if #[cfg(all(target_os = "windows", target_vendor = "win7"))] { + mod windows7; + pub use windows7::RwLock; + } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { + mod sgx; + pub use sgx::RwLock; + } else if #[cfg(target_os = "solid_asp3")] { + mod solid; + pub use solid::RwLock; + } else if #[cfg(target_os = "teeos")] { + mod teeos; + pub use teeos::RwLock; + } else if #[cfg(target_os = "xous")] { + mod xous; + pub use xous::RwLock; + } else { + mod no_threads; + pub use no_threads::RwLock; + } +} diff --git a/library/std/src/sys/sync/rwlock/no_threads.rs b/library/std/src/sys/sync/rwlock/no_threads.rs new file mode 100644 index 00000000000..789ef9b29e5 --- /dev/null +++ b/library/std/src/sys/sync/rwlock/no_threads.rs @@ -0,0 +1,65 @@ +use crate::cell::Cell; + +pub struct RwLock { + // This platform has no threads, so we can use a Cell here. + mode: Cell, +} + +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} // no threads on this platform + +impl RwLock { + #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] + pub const fn new() -> RwLock { + RwLock { mode: Cell::new(0) } + } + + #[inline] + pub fn read(&self) { + let m = self.mode.get(); + if m >= 0 { + self.mode.set(m + 1); + } else { + rtabort!("rwlock locked for writing"); + } + } + + #[inline] + pub fn try_read(&self) -> bool { + let m = self.mode.get(); + if m >= 0 { + self.mode.set(m + 1); + true + } else { + false + } + } + + #[inline] + pub fn write(&self) { + if self.mode.replace(-1) != 0 { + rtabort!("rwlock locked for reading") + } + } + + #[inline] + pub fn try_write(&self) -> bool { + if self.mode.get() == 0 { + self.mode.set(-1); + true + } else { + false + } + } + + #[inline] + pub unsafe fn read_unlock(&self) { + self.mode.set(self.mode.get() - 1); + } + + #[inline] + pub unsafe fn write_unlock(&self) { + assert_eq!(self.mode.replace(0), -1); + } +} diff --git a/library/std/src/sys/sync/rwlock/queue.rs b/library/std/src/sys/sync/rwlock/queue.rs new file mode 100644 index 00000000000..dce966086b8 --- /dev/null +++ b/library/std/src/sys/sync/rwlock/queue.rs @@ -0,0 +1,557 @@ +//! Efficient read-write locking without `pthread_rwlock_t`. +//! +//! The readers-writer lock provided by the `pthread` library has a number of +//! problems which make it a suboptimal choice for `std`: +//! +//! * It is non-movable, so it needs to be allocated (lazily, to make the +//! constructor `const`). +//! * `pthread` is an external library, meaning the fast path of acquiring an +//! uncontended lock cannot be inlined. +//! * Some platforms (at least glibc before version 2.25) have buggy implementations +//! that can easily lead to undefined behaviour in safe Rust code when not properly +//! guarded against. +//! * On some platforms (e.g. macOS), the lock is very slow. +//! +//! Therefore, we implement our own `RwLock`! Naively, one might reach for a +//! spinlock, but those [can be quite problematic] when the lock is contended. +//! Instead, this readers-writer lock copies its implementation strategy from +//! the Windows [SRWLOCK] and the [usync] library. Spinning is still used for the +//! fast path, but it is bounded: after spinning fails, threads will locklessly +//! add an information structure containing a [`Thread`] handle into a queue of +//! waiters associated with the lock. The lock owner, upon releasing the lock, +//! will scan through the queue and wake up threads as appropriate, which will +//! then again try to acquire the lock. The resulting [`RwLock`] is: +//! +//! * adaptive, since it spins before doing any heavywheight parking operations +//! * allocation-free, modulo the per-thread [`Thread`] handle, which is +//! allocated regardless when using threads created by `std` +//! * writer-preferring, even if some readers may still slip through +//! * unfair, which reduces context-switching and thus drastically improves +//! performance +//! +//! and also quite fast in most cases. +//! +//! [can be quite problematic]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html +//! [SRWLOCK]: https://learn.microsoft.com/en-us/windows/win32/sync/slim-reader-writer--srw--locks +//! [usync]: https://crates.io/crates/usync +//! +//! # Implementation +//! +//! ## State +//! +//! A single [`AtomicPtr`] is used as state variable. The lowest three bits are used +//! to indicate the meaning of the remaining bits: +//! +//! | [`LOCKED`] | [`QUEUED`] | [`QUEUE_LOCKED`] | Remaining | | +//! |:-----------|:-----------|:-----------------|:-------------|:----------------------------------------------------------------------------------------------------------------------------| +//! | 0 | 0 | 0 | 0 | The lock is unlocked, no threads are waiting | +//! | 1 | 0 | 0 | 0 | The lock is write-locked, no threads waiting | +//! | 1 | 0 | 0 | n > 0 | The lock is read-locked with n readers | +//! | 0 | 1 | * | `*mut Node` | The lock is unlocked, but some threads are waiting. Only writers may lock the lock | +//! | 1 | 1 | * | `*mut Node` | The lock is locked, but some threads are waiting. If the lock is read-locked, the last queue node contains the reader count | +//! +//! ## Waiter queue +//! +//! When threads are waiting on the lock (`QUEUE` is set), the lock state +//! points to a queue of waiters, which is implemented as a linked list of +//! nodes stored on the stack to avoid memory allocation. To enable lockless +//! enqueuing of new nodes to the queue, the linked list is single-linked upon +//! creation. Since when the lock is read-locked, the lock count is stored in +//! the last link of the queue, threads have to traverse the queue to find the +//! last element upon releasing the lock. To avoid having to traverse the whole +//! list again and again, a pointer to the found tail is cached in the (current) +//! first element of the queue. +//! +//! Also, while the lock is unfair for performance reasons, it is still best to +//! wake the tail node first, which requires backlinks to previous nodes to be +//! created. This is done at the same time as finding the tail, and thus a set +//! tail field indicates the remaining portion of the queue is initialized. +//! +//! TLDR: Here's a diagram of what the queue looks like: +//! +//! ```text +//! state +//! │ +//! ▼ +//! ╭───────╮ next ╭───────╮ next ╭───────╮ next ╭───────╮ +//! │ ├─────►│ ├─────►│ ├─────►│ count │ +//! │ │ │ │ │ │ │ │ +//! │ │ │ │◄─────┤ │◄─────┤ │ +//! ╰───────╯ ╰───────╯ prev ╰───────╯ prev ╰───────╯ +//! │ ▲ +//! └───────────────────────────┘ +//! tail +//! ``` +//! +//! Invariants: +//! 1. At least one node must contain a non-null, current `tail` field. +//! 2. The first non-null `tail` field must be valid and current. +//! 3. All nodes preceding this node must have a correct, non-null `next` field. +//! 4. All nodes following this node must have a correct, non-null `prev` field. +//! +//! Access to the queue is controlled by the `QUEUE_LOCKED` bit, which threads +//! try to set both after enqueuing themselves to eagerly add backlinks to the +//! queue, which drastically improves performance, and after unlocking the lock +//! to wake the next waiter(s). This is done atomically at the same time as the +//! enqueuing/unlocking operation. The thread releasing the `QUEUE_LOCK` bit +//! will check the state of the lock and wake up waiters as appropriate. This +//! guarantees forward-progress even if the unlocking thread could not acquire +//! the queue lock. +//! +//! ## Memory orderings +//! +//! To properly synchronize changes to the data protected by the lock, the lock +//! is acquired and released with [`Acquire`] and [`Release`] ordering, respectively. +//! To propagate the initialization of nodes, changes to the queue lock are also +//! performed using these orderings. + +#![forbid(unsafe_op_in_unsafe_fn)] + +use crate::cell::OnceCell; +use crate::hint::spin_loop; +use crate::mem; +use crate::ptr::{self, null_mut, without_provenance_mut, NonNull}; +use crate::sync::atomic::{ + AtomicBool, AtomicPtr, + Ordering::{AcqRel, Acquire, Relaxed, Release}, +}; +use crate::sys_common::thread_info; +use crate::thread::Thread; + +// Locking uses exponential backoff. `SPIN_COUNT` indicates how many times the +// locking operation will be retried. +// `spin_loop` will be called `2.pow(SPIN_COUNT) - 1` times. +const SPIN_COUNT: usize = 7; + +type State = *mut (); +type AtomicState = AtomicPtr<()>; + +const UNLOCKED: State = without_provenance_mut(0); +const LOCKED: usize = 1; +const QUEUED: usize = 2; +const QUEUE_LOCKED: usize = 4; +const SINGLE: usize = 8; +const MASK: usize = !(QUEUE_LOCKED | QUEUED | LOCKED); + +/// Marks the state as write-locked, if possible. +#[inline] +fn write_lock(state: State) -> Option { + let state = state.wrapping_byte_add(LOCKED); + if state.addr() & LOCKED == LOCKED { Some(state) } else { None } +} + +/// Marks the state as read-locked, if possible. +#[inline] +fn read_lock(state: State) -> Option { + if state.addr() & QUEUED == 0 && state.addr() != LOCKED { + Some(without_provenance_mut(state.addr().checked_add(SINGLE)? | LOCKED)) + } else { + None + } +} + +/// Masks the state, assuming it points to a queue node. +/// +/// # Safety +/// The state must contain a valid pointer to a queue node. +#[inline] +unsafe fn to_node(state: State) -> NonNull { + unsafe { NonNull::new_unchecked(state.mask(MASK)).cast() } +} + +/// An atomic node pointer with relaxed operations. +struct AtomicLink(AtomicPtr); + +impl AtomicLink { + fn new(v: Option>) -> AtomicLink { + AtomicLink(AtomicPtr::new(v.map_or(null_mut(), NonNull::as_ptr))) + } + + fn get(&self) -> Option> { + NonNull::new(self.0.load(Relaxed)) + } + + fn set(&self, v: Option>) { + self.0.store(v.map_or(null_mut(), NonNull::as_ptr), Relaxed); + } +} + +#[repr(align(8))] +struct Node { + next: AtomicLink, + prev: AtomicLink, + tail: AtomicLink, + write: bool, + thread: OnceCell, + completed: AtomicBool, +} + +impl Node { + /// Create a new queue node. + fn new(write: bool) -> Node { + Node { + next: AtomicLink::new(None), + prev: AtomicLink::new(None), + tail: AtomicLink::new(None), + write, + thread: OnceCell::new(), + completed: AtomicBool::new(false), + } + } + + /// Prepare this node for waiting. + fn prepare(&mut self) { + // Fall back to creating an unnamed `Thread` handle to allow locking in + // TLS destructors. + self.thread + .get_or_init(|| thread_info::current_thread().unwrap_or_else(|| Thread::new(None))); + self.completed = AtomicBool::new(false); + } + + /// Wait until this node is marked as completed. + /// + /// # Safety + /// May only be called from the thread that created the node. + unsafe fn wait(&self) { + while !self.completed.load(Acquire) { + unsafe { + self.thread.get().unwrap().park(); + } + } + } + + /// Atomically mark this node as completed. The node may not outlive this call. + unsafe fn complete(this: NonNull) { + // Since the node may be destroyed immediately after the completed flag + // is set, clone the thread handle before that. + let thread = unsafe { this.as_ref().thread.get().unwrap().clone() }; + unsafe { + this.as_ref().completed.store(true, Release); + } + thread.unpark(); + } +} + +struct PanicGuard; + +impl Drop for PanicGuard { + fn drop(&mut self) { + rtabort!("tried to drop node in intrusive list."); + } +} + +/// Add backlinks to the queue, returning the tail. +/// +/// May be called from multiple threads at the same time, while the queue is not +/// modified (this happens when unlocking multiple readers). +/// +/// # Safety +/// * `head` must point to a node in a valid queue. +/// * `head` must be or be in front of the head of the queue at the time of the +/// last removal. +/// * The part of the queue starting with `head` must not be modified during this +/// call. +unsafe fn add_backlinks_and_find_tail(head: NonNull) -> NonNull { + let mut current = head; + let tail = loop { + let c = unsafe { current.as_ref() }; + match c.tail.get() { + Some(tail) => break tail, + // SAFETY: + // All `next` fields before the first node with a `set` tail are + // non-null and valid (invariant 3). + None => unsafe { + let next = c.next.get().unwrap_unchecked(); + next.as_ref().prev.set(Some(current)); + current = next; + }, + } + }; + + unsafe { + head.as_ref().tail.set(Some(tail)); + tail + } +} + +pub struct RwLock { + state: AtomicState, +} + +impl RwLock { + #[inline] + pub const fn new() -> RwLock { + RwLock { state: AtomicPtr::new(UNLOCKED) } + } + + #[inline] + pub fn try_read(&self) -> bool { + self.state.fetch_update(Acquire, Relaxed, read_lock).is_ok() + } + + #[inline] + pub fn read(&self) { + if !self.try_read() { + self.lock_contended(false) + } + } + + #[inline] + pub fn try_write(&self) -> bool { + // Atomically set the `LOCKED` bit. This is lowered to a single atomic + // instruction on most modern processors (e.g. "lock bts" on x86 and + // "ldseta" on modern AArch64), and therefore is more efficient than + // `fetch_update(lock(true))`, which can spuriously fail if a new node + // is appended to the queue. + self.state.fetch_or(LOCKED, Acquire).addr() & LOCKED == 0 + } + + #[inline] + pub fn write(&self) { + if !self.try_write() { + self.lock_contended(true) + } + } + + #[cold] + fn lock_contended(&self, write: bool) { + let update = if write { write_lock } else { read_lock }; + let mut node = Node::new(write); + let mut state = self.state.load(Relaxed); + let mut count = 0; + loop { + if let Some(next) = update(state) { + // The lock is available, try locking it. + match self.state.compare_exchange_weak(state, next, Acquire, Relaxed) { + Ok(_) => return, + Err(new) => state = new, + } + } else if state.addr() & QUEUED == 0 && count < SPIN_COUNT { + // If the lock is not available and no threads are queued, spin + // for a while, using exponential backoff to decrease cache + // contention. + for _ in 0..(1 << count) { + spin_loop(); + } + state = self.state.load(Relaxed); + count += 1; + } else { + // Fall back to parking. First, prepare the node. + node.prepare(); + + // If there are threads queued, set the `next` field to a + // pointer to the next node in the queue. Otherwise set it to + // the lock count if the state is read-locked or to zero if it + // is write-locked. + node.next.0 = AtomicPtr::new(state.mask(MASK).cast()); + node.prev = AtomicLink::new(None); + let mut next = ptr::from_ref(&node) + .map_addr(|addr| addr | QUEUED | (state.addr() & LOCKED)) + as State; + + if state.addr() & QUEUED == 0 { + // If this is the first node in the queue, set the tail field to + // the node itself to ensure there is a current `tail` field in + // the queue (invariants 1 and 2). This needs to use `set` to + // avoid invalidating the new pointer. + node.tail.set(Some(NonNull::from(&node))); + } else { + // Otherwise, the tail of the queue is not known. + node.tail.set(None); + // Try locking the queue to eagerly add backlinks. + next = next.map_addr(|addr| addr | QUEUE_LOCKED); + } + + // Register the node, using release ordering to propagate our + // changes to the waking thread. + if let Err(new) = self.state.compare_exchange_weak(state, next, AcqRel, Relaxed) { + // The state has changed, just try again. + state = new; + continue; + } + + // The node is registered, so the structure must not be + // mutably accessed or destroyed while other threads may + // be accessing it. Guard against unwinds using a panic + // guard that aborts when dropped. + let guard = PanicGuard; + + // If the current thread locked the queue, unlock it again, + // linking it in the process. + if state.addr() & (QUEUE_LOCKED | QUEUED) == QUEUED { + unsafe { + self.unlock_queue(next); + } + } + + // Wait until the node is removed from the queue. + // SAFETY: the node was created by the current thread. + unsafe { + node.wait(); + } + + // The node was removed from the queue, disarm the guard. + mem::forget(guard); + + // Reload the state and try again. + state = self.state.load(Relaxed); + count = 0; + } + } + } + + #[inline] + pub unsafe fn read_unlock(&self) { + match self.state.fetch_update(Release, Acquire, |state| { + if state.addr() & QUEUED == 0 { + let count = state.addr() - (SINGLE | LOCKED); + Some(if count > 0 { without_provenance_mut(count | LOCKED) } else { UNLOCKED }) + } else { + None + } + }) { + Ok(_) => {} + // There are waiters queued and the lock count was moved to the + // tail of the queue. + Err(state) => unsafe { self.read_unlock_contended(state) }, + } + } + + #[cold] + unsafe fn read_unlock_contended(&self, state: State) { + // The state was observed with acquire ordering above, so the current + // thread will observe all node initializations. + + // SAFETY: + // Because new read-locks cannot be acquired while threads are queued, + // all queue-lock owners will observe the set `LOCKED` bit. Because they + // do not modify the queue while there is a lock owner, the queue will + // not be removed from here. + let tail = unsafe { add_backlinks_and_find_tail(to_node(state)).as_ref() }; + // The lock count is stored in the `next` field of `tail`. + // Decrement it, making sure to observe all changes made to the queue + // by the other lock owners by using acquire-release ordering. + let was_last = tail.next.0.fetch_byte_sub(SINGLE, AcqRel).addr() - SINGLE == 0; + if was_last { + // SAFETY: + // Other threads cannot read-lock while threads are queued. Also, + // the `LOCKED` bit is still set, so there are no writers. Therefore, + // the current thread exclusively owns the lock. + unsafe { self.unlock_contended(state) } + } + } + + #[inline] + pub unsafe fn write_unlock(&self) { + if let Err(state) = + self.state.compare_exchange(without_provenance_mut(LOCKED), UNLOCKED, Release, Relaxed) + { + // SAFETY: + // Since other threads cannot acquire the lock, the state can only + // have changed because there are threads queued on the lock. + unsafe { self.unlock_contended(state) } + } + } + + /// # Safety + /// * The lock must be exclusively owned by this thread. + /// * There must be threads queued on the lock. + #[cold] + unsafe fn unlock_contended(&self, mut state: State) { + loop { + // Atomically release the lock and try to acquire the queue lock. + let next = state.map_addr(|a| (a & !LOCKED) | QUEUE_LOCKED); + match self.state.compare_exchange_weak(state, next, AcqRel, Relaxed) { + // The queue lock was acquired. Release it, waking up the next + // waiter in the process. + Ok(_) if state.addr() & QUEUE_LOCKED == 0 => unsafe { + return self.unlock_queue(next); + }, + // Another thread already holds the queue lock, leave waking up + // waiters to it. + Ok(_) => return, + Err(new) => state = new, + } + } + } + + /// Unlocks the queue. If the lock is unlocked, wakes up the next eligible + /// thread(s). + /// + /// # Safety + /// The queue lock must be held by the current thread. + unsafe fn unlock_queue(&self, mut state: State) { + debug_assert_eq!(state.addr() & (QUEUED | QUEUE_LOCKED), QUEUED | QUEUE_LOCKED); + + loop { + let tail = unsafe { add_backlinks_and_find_tail(to_node(state)) }; + + if state.addr() & LOCKED == LOCKED { + // Another thread has locked the lock. Leave waking up waiters + // to them by releasing the queue lock. + match self.state.compare_exchange_weak( + state, + state.mask(!QUEUE_LOCKED), + Release, + Acquire, + ) { + Ok(_) => return, + Err(new) => { + state = new; + continue; + } + } + } + + let is_writer = unsafe { tail.as_ref().write }; + if is_writer && let Some(prev) = unsafe { tail.as_ref().prev.get() } { + // `tail` is a writer and there is a node before `tail`. + // Split off `tail`. + + // There are no set `tail` links before the node pointed to by + // `state`, so the first non-null tail field will be current + // (invariant 2). Invariant 4 is fullfilled since `find_tail` + // was called on this node, which ensures all backlinks are set. + unsafe { + to_node(state).as_ref().tail.set(Some(prev)); + } + + // Release the queue lock. Doing this by subtraction is more + // efficient on modern processors since it is a single instruction + // instead of an update loop, which will fail if new threads are + // added to the list. + self.state.fetch_byte_sub(QUEUE_LOCKED, Release); + + // The tail was split off and the lock released. Mark the node as + // completed. + unsafe { + return Node::complete(tail); + } + } else { + // The next waiter is a reader or the queue only consists of one + // waiter. Just wake all threads. + + // The lock cannot be locked (checked above), so mark it as + // unlocked to reset the queue. + if let Err(new) = + self.state.compare_exchange_weak(state, UNLOCKED, Release, Acquire) + { + state = new; + continue; + } + + let mut current = tail; + loop { + let prev = unsafe { current.as_ref().prev.get() }; + unsafe { + Node::complete(current); + } + match prev { + Some(prev) => current = prev, + None => return, + } + } + } + } + } +} diff --git a/library/std/src/sys/sync/rwlock/sgx.rs b/library/std/src/sys/sync/rwlock/sgx.rs new file mode 100644 index 00000000000..136dea597bb --- /dev/null +++ b/library/std/src/sys/sync/rwlock/sgx.rs @@ -0,0 +1,219 @@ +#[cfg(test)] +mod tests; + +use crate::alloc::Layout; +use crate::num::NonZero; +use crate::sys::pal::waitqueue::{ + try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable, +}; +use crate::sys_common::lazy_box::{LazyBox, LazyInit}; + +struct AllocatedRwLock { + readers: SpinMutex>>>, + writer: SpinMutex>, +} + +pub struct RwLock { + inner: LazyBox, +} + +impl LazyInit for AllocatedRwLock { + fn init() -> Box { + Box::new(AllocatedRwLock { + readers: SpinMutex::new(WaitVariable::new(None)), + writer: SpinMutex::new(WaitVariable::new(false)), + }) + } +} + +// Check at compile time that RwLock's size and alignment matches the C definition +// in libunwind (see also `test_c_rwlock_initializer` in `tests`). +const _: () = { + let rust = Layout::new::(); + let c = Layout::new::<*mut ()>(); + assert!(rust.size() == c.size()); + assert!(rust.align() == c.align()); +}; + +impl RwLock { + pub const fn new() -> RwLock { + RwLock { inner: LazyBox::new() } + } + + #[inline] + pub fn read(&self) { + let lock = &*self.inner; + let mut rguard = lock.readers.lock(); + let wguard = lock.writer.lock(); + if *wguard.lock_var() || !wguard.queue_empty() { + // Another thread has or is waiting for the write lock, wait + drop(wguard); + WaitQueue::wait(rguard, || {}); + // Another thread has passed the lock to us + } else { + // No waiting writers, acquire the read lock + *rguard.lock_var_mut() = NonZero::new(rguard.lock_var().map_or(0, |n| n.get()) + 1); + } + } + + #[inline] + pub unsafe fn try_read(&self) -> bool { + let lock = &*self.inner; + let mut rguard = try_lock_or_false!(lock.readers); + let wguard = try_lock_or_false!(lock.writer); + if *wguard.lock_var() || !wguard.queue_empty() { + // Another thread has or is waiting for the write lock + false + } else { + // No waiting writers, acquire the read lock + *rguard.lock_var_mut() = NonZero::new(rguard.lock_var().map_or(0, |n| n.get()) + 1); + true + } + } + + #[inline] + pub fn write(&self) { + let lock = &*self.inner; + let rguard = lock.readers.lock(); + let mut wguard = lock.writer.lock(); + if *wguard.lock_var() || rguard.lock_var().is_some() { + // Another thread has the lock, wait + drop(rguard); + WaitQueue::wait(wguard, || {}); + // Another thread has passed the lock to us + } else { + // We are just now obtaining the lock + *wguard.lock_var_mut() = true; + } + } + + #[inline] + pub fn try_write(&self) -> bool { + let lock = &*self.inner; + let rguard = try_lock_or_false!(lock.readers); + let mut wguard = try_lock_or_false!(lock.writer); + if *wguard.lock_var() || rguard.lock_var().is_some() { + // Another thread has the lock + false + } else { + // We are just now obtaining the lock + *wguard.lock_var_mut() = true; + true + } + } + + #[inline] + unsafe fn __read_unlock( + &self, + mut rguard: SpinMutexGuard<'_, WaitVariable>>>, + wguard: SpinMutexGuard<'_, WaitVariable>, + ) { + *rguard.lock_var_mut() = NonZero::new(rguard.lock_var().unwrap().get() - 1); + if rguard.lock_var().is_some() { + // There are other active readers + } else { + if let Ok(mut wguard) = WaitQueue::notify_one(wguard) { + // A writer was waiting, pass the lock + *wguard.lock_var_mut() = true; + wguard.drop_after(rguard); + } else { + // No writers were waiting, the lock is released + rtassert!(rguard.queue_empty()); + } + } + } + + #[inline] + pub unsafe fn read_unlock(&self) { + let lock = &*self.inner; + let rguard = lock.readers.lock(); + let wguard = lock.writer.lock(); + unsafe { self.__read_unlock(rguard, wguard) }; + } + + #[inline] + unsafe fn __write_unlock( + &self, + rguard: SpinMutexGuard<'_, WaitVariable>>>, + wguard: SpinMutexGuard<'_, WaitVariable>, + ) { + match WaitQueue::notify_one(wguard) { + Err(mut wguard) => { + // No writers waiting, release the write lock + *wguard.lock_var_mut() = false; + if let Ok(mut rguard) = WaitQueue::notify_all(rguard) { + // One or more readers were waiting, pass the lock to them + if let NotifiedTcs::All { count } = rguard.notified_tcs() { + *rguard.lock_var_mut() = Some(count) + } else { + unreachable!() // called notify_all + } + rguard.drop_after(wguard); + } else { + // No readers waiting, the lock is released + } + } + Ok(wguard) => { + // There was a thread waiting for write, just pass the lock + wguard.drop_after(rguard); + } + } + } + + #[inline] + pub unsafe fn write_unlock(&self) { + let lock = &*self.inner; + let rguard = lock.readers.lock(); + let wguard = lock.writer.lock(); + unsafe { self.__write_unlock(rguard, wguard) }; + } + + // only used by __rust_rwlock_unlock below + #[inline] + #[cfg_attr(test, allow(dead_code))] + unsafe fn unlock(&self) { + let lock = &*self.inner; + let rguard = lock.readers.lock(); + let wguard = lock.writer.lock(); + if *wguard.lock_var() == true { + unsafe { self.__write_unlock(rguard, wguard) }; + } else { + unsafe { self.__read_unlock(rguard, wguard) }; + } + } +} + +// The following functions are needed by libunwind. These symbols are named +// in pre-link args for the target specification, so keep that in sync. +#[cfg(not(test))] +const EINVAL: i32 = 22; + +#[cfg(not(test))] +#[no_mangle] +pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RwLock) -> i32 { + if p.is_null() { + return EINVAL; + } + unsafe { (*p).read() }; + return 0; +} + +#[cfg(not(test))] +#[no_mangle] +pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RwLock) -> i32 { + if p.is_null() { + return EINVAL; + } + unsafe { (*p).write() }; + return 0; +} + +#[cfg(not(test))] +#[no_mangle] +pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RwLock) -> i32 { + if p.is_null() { + return EINVAL; + } + unsafe { (*p).unlock() }; + return 0; +} diff --git a/library/std/src/sys/sync/rwlock/sgx/tests.rs b/library/std/src/sys/sync/rwlock/sgx/tests.rs new file mode 100644 index 00000000000..5fd6670afd4 --- /dev/null +++ b/library/std/src/sys/sync/rwlock/sgx/tests.rs @@ -0,0 +1,21 @@ +use super::*; +use crate::ptr; + +// Verify that the byte pattern libunwind uses to initialize an RwLock is +// equivalent to the value of RwLock::new(). If the value changes, +// `src/UnwindRustSgx.h` in libunwind needs to be changed too. +#[test] +fn test_c_rwlock_initializer() { + const C_RWLOCK_INIT: *mut () = ptr::null_mut(); + + // For the test to work, we need the padding/unused bytes in RwLock to be + // initialized as 0. In practice, this is the case with statics. + static RUST_RWLOCK_INIT: RwLock = RwLock::new(); + + unsafe { + // If the assertion fails, that not necessarily an issue with the value + // of C_RWLOCK_INIT. It might just be an issue with the way padding + // bytes are initialized in the test code. + assert_eq!(crate::mem::transmute_copy::<_, *mut ()>(&RUST_RWLOCK_INIT), C_RWLOCK_INIT); + }; +} diff --git a/library/std/src/sys/sync/rwlock/solid.rs b/library/std/src/sys/sync/rwlock/solid.rs new file mode 100644 index 00000000000..9bf6f5dbb73 --- /dev/null +++ b/library/std/src/sys/sync/rwlock/solid.rs @@ -0,0 +1,93 @@ +//! A readers-writer lock implementation backed by the SOLID kernel extension. +use crate::sys::pal::{ + abi, + itron::{ + error::{expect_success, expect_success_aborting, fail, ItronError}, + spin::SpinIdOnceCell, + }, +}; + +pub struct RwLock { + /// The ID of the underlying mutex object + rwl: SpinIdOnceCell<()>, +} + +// Safety: `num_readers` is protected by `mtx_num_readers` +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} + +fn new_rwl() -> Result { + ItronError::err_if_negative(unsafe { abi::rwl_acre_rwl() }) +} + +impl RwLock { + #[inline] + pub const fn new() -> RwLock { + RwLock { rwl: SpinIdOnceCell::new() } + } + + /// Get the inner mutex's ID, which is lazily created. + fn raw(&self) -> abi::ID { + match self.rwl.get_or_try_init(|| new_rwl().map(|id| (id, ()))) { + Ok((id, ())) => id, + Err(e) => fail(e, &"rwl_acre_rwl"), + } + } + + #[inline] + pub fn read(&self) { + let rwl = self.raw(); + expect_success(unsafe { abi::rwl_loc_rdl(rwl) }, &"rwl_loc_rdl"); + } + + #[inline] + pub fn try_read(&self) -> bool { + let rwl = self.raw(); + match unsafe { abi::rwl_ploc_rdl(rwl) } { + abi::E_TMOUT => false, + er => { + expect_success(er, &"rwl_ploc_rdl"); + true + } + } + } + + #[inline] + pub fn write(&self) { + let rwl = self.raw(); + expect_success(unsafe { abi::rwl_loc_wrl(rwl) }, &"rwl_loc_wrl"); + } + + #[inline] + pub fn try_write(&self) -> bool { + let rwl = self.raw(); + match unsafe { abi::rwl_ploc_wrl(rwl) } { + abi::E_TMOUT => false, + er => { + expect_success(er, &"rwl_ploc_wrl"); + true + } + } + } + + #[inline] + pub unsafe fn read_unlock(&self) { + let rwl = self.raw(); + expect_success_aborting(unsafe { abi::rwl_unl_rwl(rwl) }, &"rwl_unl_rwl"); + } + + #[inline] + pub unsafe fn write_unlock(&self) { + let rwl = self.raw(); + expect_success_aborting(unsafe { abi::rwl_unl_rwl(rwl) }, &"rwl_unl_rwl"); + } +} + +impl Drop for RwLock { + #[inline] + fn drop(&mut self) { + if let Some(rwl) = self.rwl.get().map(|x| x.0) { + expect_success_aborting(unsafe { abi::rwl_del_rwl(rwl) }, &"rwl_del_rwl"); + } + } +} diff --git a/library/std/src/sys/sync/rwlock/teeos.rs b/library/std/src/sys/sync/rwlock/teeos.rs new file mode 100644 index 00000000000..ef9b1ab5154 --- /dev/null +++ b/library/std/src/sys/sync/rwlock/teeos.rs @@ -0,0 +1,44 @@ +use crate::sys::sync::mutex::Mutex; + +/// we do not supported rwlock, so use mutex to simulate rwlock. +/// it's useful because so many code in std will use rwlock. +pub struct RwLock { + inner: Mutex, +} + +impl RwLock { + #[inline] + pub const fn new() -> RwLock { + RwLock { inner: Mutex::new() } + } + + #[inline] + pub fn read(&self) { + unsafe { self.inner.lock() }; + } + + #[inline] + pub fn try_read(&self) -> bool { + unsafe { self.inner.try_lock() } + } + + #[inline] + pub fn write(&self) { + unsafe { self.inner.lock() }; + } + + #[inline] + pub unsafe fn try_write(&self) -> bool { + unsafe { self.inner.try_lock() } + } + + #[inline] + pub unsafe fn read_unlock(&self) { + unsafe { self.inner.unlock() }; + } + + #[inline] + pub unsafe fn write_unlock(&self) { + unsafe { self.inner.unlock() }; + } +} diff --git a/library/std/src/sys/sync/rwlock/windows7.rs b/library/std/src/sys/sync/rwlock/windows7.rs new file mode 100644 index 00000000000..e69415baac4 --- /dev/null +++ b/library/std/src/sys/sync/rwlock/windows7.rs @@ -0,0 +1,40 @@ +use crate::cell::UnsafeCell; +use crate::sys::c; + +pub struct RwLock { + inner: UnsafeCell, +} + +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} + +impl RwLock { + #[inline] + pub const fn new() -> RwLock { + RwLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) } + } + #[inline] + pub fn read(&self) { + unsafe { c::AcquireSRWLockShared(self.inner.get()) } + } + #[inline] + pub fn try_read(&self) -> bool { + unsafe { c::TryAcquireSRWLockShared(self.inner.get()) != 0 } + } + #[inline] + pub fn write(&self) { + unsafe { c::AcquireSRWLockExclusive(self.inner.get()) } + } + #[inline] + pub fn try_write(&self) -> bool { + unsafe { c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 } + } + #[inline] + pub unsafe fn read_unlock(&self) { + c::ReleaseSRWLockShared(self.inner.get()) + } + #[inline] + pub unsafe fn write_unlock(&self) { + c::ReleaseSRWLockExclusive(self.inner.get()) + } +} diff --git a/library/std/src/sys/sync/rwlock/xous.rs b/library/std/src/sys/sync/rwlock/xous.rs new file mode 100644 index 00000000000..ab45b33e1f6 --- /dev/null +++ b/library/std/src/sys/sync/rwlock/xous.rs @@ -0,0 +1,74 @@ +use crate::sync::atomic::{AtomicIsize, Ordering::Acquire}; +use crate::thread::yield_now; + +pub struct RwLock { + /// The "mode" value indicates how many threads are waiting on this + /// Mutex. Possible values are: + /// -1: The lock is locked for writing + /// 0: The lock is unlocked + /// >=1: The lock is locked for reading + /// + /// This currently spins waiting for the lock to be freed. An + /// optimization would be to involve the ticktimer server to + /// coordinate unlocks. + mode: AtomicIsize, +} + +const RWLOCK_WRITING: isize = -1; +const RWLOCK_FREE: isize = 0; + +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} + +impl RwLock { + #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] + pub const fn new() -> RwLock { + RwLock { mode: AtomicIsize::new(RWLOCK_FREE) } + } + + #[inline] + pub unsafe fn read(&self) { + while !unsafe { self.try_read() } { + yield_now(); + } + } + + #[inline] + pub unsafe fn try_read(&self) -> bool { + self.mode + .fetch_update( + Acquire, + Acquire, + |v| if v == RWLOCK_WRITING { None } else { Some(v + 1) }, + ) + .is_ok() + } + + #[inline] + pub unsafe fn write(&self) { + while !unsafe { self.try_write() } { + yield_now(); + } + } + + #[inline] + pub unsafe fn try_write(&self) -> bool { + self.mode.compare_exchange(RWLOCK_FREE, RWLOCK_WRITING, Acquire, Acquire).is_ok() + } + + #[inline] + pub unsafe fn read_unlock(&self) { + let previous = self.mode.fetch_sub(1, Acquire); + assert!(previous != RWLOCK_FREE); + assert!(previous != RWLOCK_WRITING); + } + + #[inline] + pub unsafe fn write_unlock(&self) { + assert_eq!( + self.mode.compare_exchange(RWLOCK_WRITING, RWLOCK_FREE, Acquire, Acquire), + Ok(RWLOCK_WRITING) + ); + } +} diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs index c9025a81bf3..5410f135a73 100644 --- a/library/std/src/sys_common/mod.rs +++ b/library/std/src/sys_common/mod.rs @@ -24,7 +24,6 @@ pub mod backtrace; pub mod fs; pub mod io; pub mod lazy_box; -pub mod once; pub mod process; pub mod thread; pub mod thread_info; diff --git a/library/std/src/sys_common/once/futex.rs b/library/std/src/sys_common/once/futex.rs deleted file mode 100644 index 609085dcd47..00000000000 --- a/library/std/src/sys_common/once/futex.rs +++ /dev/null @@ -1,146 +0,0 @@ -use crate::cell::Cell; -use crate::sync as public; -use crate::sync::atomic::{ - AtomicU32, - Ordering::{Acquire, Relaxed, Release}, -}; -use crate::sync::once::ExclusiveState; -use crate::sys::futex::{futex_wait, futex_wake_all}; - -// On some platforms, the OS is very nice and handles the waiter queue for us. -// This means we only need one atomic value with 5 states: - -/// No initialization has run yet, and no thread is currently using the Once. -const INCOMPLETE: u32 = 0; -/// Some thread has previously attempted to initialize the Once, but it panicked, -/// so the Once is now poisoned. There are no other threads currently accessing -/// this Once. -const POISONED: u32 = 1; -/// Some thread is currently attempting to run initialization. It may succeed, -/// so all future threads need to wait for it to finish. -const RUNNING: u32 = 2; -/// Some thread is currently attempting to run initialization and there are threads -/// waiting for it to finish. -const QUEUED: u32 = 3; -/// Initialization has completed and all future calls should finish immediately. -const COMPLETE: u32 = 4; - -// Threads wait by setting the state to QUEUED and calling `futex_wait` on the state -// variable. When the running thread finishes, it will wake all waiting threads using -// `futex_wake_all`. - -pub struct OnceState { - poisoned: bool, - set_state_to: Cell, -} - -impl OnceState { - #[inline] - pub fn is_poisoned(&self) -> bool { - self.poisoned - } - - #[inline] - pub fn poison(&self) { - self.set_state_to.set(POISONED); - } -} - -struct CompletionGuard<'a> { - state: &'a AtomicU32, - set_state_on_drop_to: u32, -} - -impl<'a> Drop for CompletionGuard<'a> { - fn drop(&mut self) { - // Use release ordering to propagate changes to all threads checking - // up on the Once. `futex_wake_all` does its own synchronization, hence - // we do not need `AcqRel`. - if self.state.swap(self.set_state_on_drop_to, Release) == QUEUED { - futex_wake_all(&self.state); - } - } -} - -pub struct Once { - state: AtomicU32, -} - -impl Once { - #[inline] - pub const fn new() -> Once { - Once { state: AtomicU32::new(INCOMPLETE) } - } - - #[inline] - pub fn is_completed(&self) -> bool { - // Use acquire ordering to make all initialization changes visible to the - // current thread. - self.state.load(Acquire) == COMPLETE - } - - #[inline] - pub(crate) fn state(&mut self) -> ExclusiveState { - match *self.state.get_mut() { - INCOMPLETE => ExclusiveState::Incomplete, - POISONED => ExclusiveState::Poisoned, - COMPLETE => ExclusiveState::Complete, - _ => unreachable!("invalid Once state"), - } - } - - // This uses FnMut to match the API of the generic implementation. As this - // implementation is quite light-weight, it is generic over the closure and - // so avoids the cost of dynamic dispatch. - #[cold] - #[track_caller] - pub fn call(&self, ignore_poisoning: bool, f: &mut impl FnMut(&public::OnceState)) { - let mut state = self.state.load(Acquire); - loop { - match state { - POISONED if !ignore_poisoning => { - // Panic to propagate the poison. - panic!("Once instance has previously been poisoned"); - } - INCOMPLETE | POISONED => { - // Try to register the current thread as the one running. - if let Err(new) = - self.state.compare_exchange_weak(state, RUNNING, Acquire, Acquire) - { - state = new; - continue; - } - // `waiter_queue` will manage other waiting threads, and - // wake them up on drop. - let mut waiter_queue = - CompletionGuard { state: &self.state, set_state_on_drop_to: POISONED }; - // Run the function, letting it know if we're poisoned or not. - let f_state = public::OnceState { - inner: OnceState { - poisoned: state == POISONED, - set_state_to: Cell::new(COMPLETE), - }, - }; - f(&f_state); - waiter_queue.set_state_on_drop_to = f_state.inner.set_state_to.get(); - return; - } - RUNNING | QUEUED => { - // Set the state to QUEUED if it is not already. - if state == RUNNING - && let Err(new) = - self.state.compare_exchange_weak(RUNNING, QUEUED, Relaxed, Acquire) - { - state = new; - continue; - } - - futex_wait(&self.state, QUEUED, None); - state = self.state.load(Acquire); - } - COMPLETE => return, - _ => unreachable!("state is never set to invalid values"), - } - } - } -} diff --git a/library/std/src/sys_common/once/mod.rs b/library/std/src/sys_common/once/mod.rs deleted file mode 100644 index ec57568c54c..00000000000 --- a/library/std/src/sys_common/once/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -// A "once" is a relatively simple primitive, and it's also typically provided -// by the OS as well (see `pthread_once` or `InitOnceExecuteOnce`). The OS -// primitives, however, tend to have surprising restrictions, such as the Unix -// one doesn't allow an argument to be passed to the function. -// -// As a result, we end up implementing it ourselves in the standard library. -// This also gives us the opportunity to optimize the implementation a bit which -// should help the fast path on call sites. - -cfg_if::cfg_if! { - if #[cfg(any( - target_os = "linux", - target_os = "android", - all(target_arch = "wasm32", target_feature = "atomics"), - target_os = "freebsd", - target_os = "openbsd", - target_os = "dragonfly", - target_os = "fuchsia", - target_os = "hermit", - ))] { - mod futex; - pub use futex::{Once, OnceState}; - } else if #[cfg(any( - windows, - target_family = "unix", - all(target_vendor = "fortanix", target_env = "sgx"), - target_os = "solid_asp3", - target_os = "xous", - ))] { - mod queue; - pub use queue::{Once, OnceState}; - } else { - pub use crate::sys::once::{Once, OnceState}; - } -} diff --git a/library/std/src/sys_common/once/queue.rs b/library/std/src/sys_common/once/queue.rs deleted file mode 100644 index 730cdb768bd..00000000000 --- a/library/std/src/sys_common/once/queue.rs +++ /dev/null @@ -1,294 +0,0 @@ -// Each `Once` has one word of atomic state, and this state is CAS'd on to -// determine what to do. There are four possible state of a `Once`: -// -// * Incomplete - no initialization has run yet, and no thread is currently -// using the Once. -// * Poisoned - some thread has previously attempted to initialize the Once, but -// it panicked, so the Once is now poisoned. There are no other -// threads currently accessing this Once. -// * Running - some thread is currently attempting to run initialization. It may -// succeed, so all future threads need to wait for it to finish. -// Note that this state is accompanied with a payload, described -// below. -// * Complete - initialization has completed and all future calls should finish -// immediately. -// -// With 4 states we need 2 bits to encode this, and we use the remaining bits -// in the word we have allocated as a queue of threads waiting for the thread -// responsible for entering the RUNNING state. This queue is just a linked list -// of Waiter nodes which is monotonically increasing in size. Each node is -// allocated on the stack, and whenever the running closure finishes it will -// consume the entire queue and notify all waiters they should try again. -// -// You'll find a few more details in the implementation, but that's the gist of -// it! -// -// Atomic orderings: -// When running `Once` we deal with multiple atomics: -// `Once.state_and_queue` and an unknown number of `Waiter.signaled`. -// * `state_and_queue` is used (1) as a state flag, (2) for synchronizing the -// result of the `Once`, and (3) for synchronizing `Waiter` nodes. -// - At the end of the `call` function we have to make sure the result -// of the `Once` is acquired. So every load which can be the only one to -// load COMPLETED must have at least acquire ordering, which means all -// three of them. -// - `WaiterQueue::drop` is the only place that may store COMPLETED, and -// must do so with release ordering to make the result available. -// - `wait` inserts `Waiter` nodes as a pointer in `state_and_queue`, and -// needs to make the nodes available with release ordering. The load in -// its `compare_exchange` can be relaxed because it only has to compare -// the atomic, not to read other data. -// - `WaiterQueue::drop` must see the `Waiter` nodes, so it must load -// `state_and_queue` with acquire ordering. -// - There is just one store where `state_and_queue` is used only as a -// state flag, without having to synchronize data: switching the state -// from INCOMPLETE to RUNNING in `call`. This store can be Relaxed, -// but the read has to be Acquire because of the requirements mentioned -// above. -// * `Waiter.signaled` is both used as a flag, and to protect a field with -// interior mutability in `Waiter`. `Waiter.thread` is changed in -// `WaiterQueue::drop` which then sets `signaled` with release ordering. -// After `wait` loads `signaled` with acquire ordering and sees it is true, -// it needs to see the changes to drop the `Waiter` struct correctly. -// * There is one place where the two atomics `Once.state_and_queue` and -// `Waiter.signaled` come together, and might be reordered by the compiler or -// processor. Because both use acquire ordering such a reordering is not -// allowed, so no need for `SeqCst`. - -use crate::cell::Cell; -use crate::fmt; -use crate::ptr; -use crate::sync as public; -use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; -use crate::sync::once::ExclusiveState; -use crate::thread::{self, Thread}; - -type Masked = (); - -pub struct Once { - state_and_queue: AtomicPtr, -} - -pub struct OnceState { - poisoned: bool, - set_state_on_drop_to: Cell<*mut Masked>, -} - -// Four states that a Once can be in, encoded into the lower bits of -// `state_and_queue` in the Once structure. -const INCOMPLETE: usize = 0x0; -const POISONED: usize = 0x1; -const RUNNING: usize = 0x2; -const COMPLETE: usize = 0x3; - -// Mask to learn about the state. All other bits are the queue of waiters if -// this is in the RUNNING state. -const STATE_MASK: usize = 0x3; - -// Representation of a node in the linked list of waiters, used while in the -// RUNNING state. -// Note: `Waiter` can't hold a mutable pointer to the next thread, because then -// `wait` would both hand out a mutable reference to its `Waiter` node, and keep -// a shared reference to check `signaled`. Instead we hold shared references and -// use interior mutability. -#[repr(align(4))] // Ensure the two lower bits are free to use as state bits. -struct Waiter { - thread: Cell>, - signaled: AtomicBool, - next: *const Waiter, -} - -// Head of a linked list of waiters. -// Every node is a struct on the stack of a waiting thread. -// Will wake up the waiters when it gets dropped, i.e. also on panic. -struct WaiterQueue<'a> { - state_and_queue: &'a AtomicPtr, - set_state_on_drop_to: *mut Masked, -} - -impl Once { - #[inline] - #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")] - pub const fn new() -> Once { - Once { state_and_queue: AtomicPtr::new(ptr::without_provenance_mut(INCOMPLETE)) } - } - - #[inline] - pub fn is_completed(&self) -> bool { - // An `Acquire` load is enough because that makes all the initialization - // operations visible to us, and, this being a fast path, weaker - // ordering helps with performance. This `Acquire` synchronizes with - // `Release` operations on the slow path. - self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE - } - - #[inline] - pub(crate) fn state(&mut self) -> ExclusiveState { - match self.state_and_queue.get_mut().addr() { - INCOMPLETE => ExclusiveState::Incomplete, - POISONED => ExclusiveState::Poisoned, - COMPLETE => ExclusiveState::Complete, - _ => unreachable!("invalid Once state"), - } - } - - // This is a non-generic function to reduce the monomorphization cost of - // using `call_once` (this isn't exactly a trivial or small implementation). - // - // Additionally, this is tagged with `#[cold]` as it should indeed be cold - // and it helps let LLVM know that calls to this function should be off the - // fast path. Essentially, this should help generate more straight line code - // in LLVM. - // - // Finally, this takes an `FnMut` instead of a `FnOnce` because there's - // currently no way to take an `FnOnce` and call it via virtual dispatch - // without some allocation overhead. - #[cold] - #[track_caller] - pub fn call(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&public::OnceState)) { - let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire); - loop { - match state_and_queue.addr() { - COMPLETE => break, - POISONED if !ignore_poisoning => { - // Panic to propagate the poison. - panic!("Once instance has previously been poisoned"); - } - POISONED | INCOMPLETE => { - // Try to register this thread as the one RUNNING. - let exchange_result = self.state_and_queue.compare_exchange( - state_and_queue, - ptr::without_provenance_mut(RUNNING), - Ordering::Acquire, - Ordering::Acquire, - ); - if let Err(old) = exchange_result { - state_and_queue = old; - continue; - } - // `waiter_queue` will manage other waiting threads, and - // wake them up on drop. - let mut waiter_queue = WaiterQueue { - state_and_queue: &self.state_and_queue, - set_state_on_drop_to: ptr::without_provenance_mut(POISONED), - }; - // Run the initialization function, letting it know if we're - // poisoned or not. - let init_state = public::OnceState { - inner: OnceState { - poisoned: state_and_queue.addr() == POISONED, - set_state_on_drop_to: Cell::new(ptr::without_provenance_mut(COMPLETE)), - }, - }; - init(&init_state); - waiter_queue.set_state_on_drop_to = init_state.inner.set_state_on_drop_to.get(); - break; - } - _ => { - // All other values must be RUNNING with possibly a - // pointer to the waiter queue in the more significant bits. - assert!(state_and_queue.addr() & STATE_MASK == RUNNING); - wait(&self.state_and_queue, state_and_queue); - state_and_queue = self.state_and_queue.load(Ordering::Acquire); - } - } - } - } -} - -fn wait(state_and_queue: &AtomicPtr, mut current_state: *mut Masked) { - // Note: the following code was carefully written to avoid creating a - // mutable reference to `node` that gets aliased. - loop { - // Don't queue this thread if the status is no longer running, - // otherwise we will not be woken up. - if current_state.addr() & STATE_MASK != RUNNING { - return; - } - - // Create the node for our current thread. - let node = Waiter { - thread: Cell::new(Some(thread::current())), - signaled: AtomicBool::new(false), - next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter, - }; - let me = core::ptr::addr_of!(node) as *const Masked as *mut Masked; - - // Try to slide in the node at the head of the linked list, making sure - // that another thread didn't just replace the head of the linked list. - let exchange_result = state_and_queue.compare_exchange( - current_state, - me.with_addr(me.addr() | RUNNING), - Ordering::Release, - Ordering::Relaxed, - ); - if let Err(old) = exchange_result { - current_state = old; - continue; - } - - // We have enqueued ourselves, now lets wait. - // It is important not to return before being signaled, otherwise we - // would drop our `Waiter` node and leave a hole in the linked list - // (and a dangling reference). Guard against spurious wakeups by - // reparking ourselves until we are signaled. - while !node.signaled.load(Ordering::Acquire) { - // If the managing thread happens to signal and unpark us before we - // can park ourselves, the result could be this thread never gets - // unparked. Luckily `park` comes with the guarantee that if it got - // an `unpark` just before on an unparked thread it does not park. - thread::park(); - } - break; - } -} - -#[stable(feature = "std_debug", since = "1.16.0")] -impl fmt::Debug for Once { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Once").finish_non_exhaustive() - } -} - -impl Drop for WaiterQueue<'_> { - fn drop(&mut self) { - // Swap out our state with however we finished. - let state_and_queue = - self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel); - - // We should only ever see an old state which was RUNNING. - assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING); - - // Walk the entire linked list of waiters and wake them up (in lifo - // order, last to register is first to wake up). - unsafe { - // Right after setting `node.signaled = true` the other thread may - // free `node` if there happens to be has a spurious wakeup. - // So we have to take out the `thread` field and copy the pointer to - // `next` first. - let mut queue = - state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter; - while !queue.is_null() { - let next = (*queue).next; - let thread = (*queue).thread.take().unwrap(); - (*queue).signaled.store(true, Ordering::Release); - // ^- FIXME (maybe): This is another case of issue #55005 - // `store()` has a potentially dangling ref to `signaled`. - queue = next; - thread.unpark(); - } - } - } -} - -impl OnceState { - #[inline] - pub fn is_poisoned(&self) -> bool { - self.poisoned - } - - #[inline] - pub fn poison(&self) { - self.set_state_on_drop_to.set(ptr::without_provenance_mut(POISONED)); - } -} -- cgit 1.4.1-3-g733a5