From 52fa9daa645772603dc170a3f3bed5cab73d646d Mon Sep 17 00:00:00 2001 From: Christiaan Dirkx Date: Wed, 28 Apr 2021 15:51:14 +0200 Subject: Rework `wasm::thread` to `thread_atomics` --- library/std/src/sys/wasm/mod.rs | 5 +- library/std/src/sys/wasm/thread.rs | 98 ------------------------------ library/std/src/sys/wasm/thread_atomics.rs | 84 +++++++++++++++++++++++++ 3 files changed, 88 insertions(+), 99 deletions(-) delete mode 100644 library/std/src/sys/wasm/thread.rs create mode 100644 library/std/src/sys/wasm/thread_atomics.rs (limited to 'library/std/src') diff --git a/library/std/src/sys/wasm/mod.rs b/library/std/src/sys/wasm/mod.rs index 8705910c73a..71877fce993 100644 --- a/library/std/src/sys/wasm/mod.rs +++ b/library/std/src/sys/wasm/mod.rs @@ -37,7 +37,6 @@ pub mod pipe; pub mod process; #[path = "../unsupported/stdio.rs"] pub mod stdio; -pub mod thread; #[path = "../unsupported/thread_local_dtor.rs"] pub mod thread_local_dtor; #[path = "../unsupported/thread_local_key.rs"] @@ -57,6 +56,8 @@ cfg_if::cfg_if! { pub mod rwlock; #[path = "futex_atomics.rs"] pub mod futex; + #[path = "thread_atomics.rs"] + pub mod thread; } else { #[path = "../unsupported/condvar.rs"] pub mod condvar; @@ -64,6 +65,8 @@ cfg_if::cfg_if! { pub mod mutex; #[path = "../unsupported/rwlock.rs"] pub mod rwlock; + #[path = "../unsupported/thread.rs"] + pub mod thread; } } diff --git a/library/std/src/sys/wasm/thread.rs b/library/std/src/sys/wasm/thread.rs deleted file mode 100644 index b7bf95c89b4..00000000000 --- a/library/std/src/sys/wasm/thread.rs +++ /dev/null @@ -1,98 +0,0 @@ -use crate::ffi::CStr; -use crate::io; -use crate::sys::unsupported; -use crate::time::Duration; - -pub struct Thread(!); - -pub const DEFAULT_MIN_STACK_SIZE: usize = 4096; - -impl Thread { - // unsafe: see thread::Builder::spawn_unchecked for safety requirements - pub unsafe fn new(_stack: usize, _p: Box) -> io::Result { - unsupported() - } - - pub fn yield_now() { - // do nothing - } - - pub fn set_name(_name: &CStr) { - // nope - } - - #[cfg(not(target_feature = "atomics"))] - pub fn sleep(_dur: Duration) { - panic!("can't sleep"); - } - - #[cfg(target_feature = "atomics")] - pub fn sleep(dur: Duration) { - use crate::arch::wasm32; - use crate::cmp; - - // Use an atomic wait to block the current thread artificially with a - // timeout listed. Note that we should never be notified (return value - // of 0) or our comparison should never fail (return value of 1) so we - // should always only resume execution through a timeout (return value - // 2). - let mut nanos = dur.as_nanos(); - while nanos > 0 { - let amt = cmp::min(i64::MAX as u128, nanos); - let mut x = 0; - let val = unsafe { wasm32::memory_atomic_wait32(&mut x, 0, amt as i64) }; - debug_assert_eq!(val, 2); - nanos -= amt; - } - } - - pub fn join(self) { - self.0 - } -} - -pub mod guard { - pub type Guard = !; - pub unsafe fn current() -> Option { - None - } - pub unsafe fn init() -> Option { - None - } -} - -// This is only used by atomics primitives when the `atomics` feature is -// enabled. In that mode we currently just use our own thread-local to store our -// current thread's ID, and then we lazily initialize it to something allocated -// from a global counter. -#[cfg(target_feature = "atomics")] -pub fn my_id() -> u32 { - use crate::sync::atomic::{AtomicU32, Ordering::SeqCst}; - - static NEXT_ID: AtomicU32 = AtomicU32::new(0); - - #[thread_local] - static mut MY_ID: u32 = 0; - - unsafe { - // If our thread ID isn't set yet then we need to allocate one. Do so - // with with a simple "atomically add to a global counter" strategy. - // This strategy doesn't handled what happens when the counter - // overflows, however, so just abort everything once the counter - // overflows and eventually we could have some sort of recycling scheme - // (or maybe this is all totally irrelevant by that point!). In any case - // though we're using a CAS loop instead of a `fetch_add` to ensure that - // the global counter never overflows. - if MY_ID == 0 { - let mut cur = NEXT_ID.load(SeqCst); - MY_ID = loop { - let next = cur.checked_add(1).unwrap_or_else(|| crate::process::abort()); - match NEXT_ID.compare_exchange(cur, next, SeqCst, SeqCst) { - Ok(_) => break next, - Err(i) => cur = i, - } - }; - } - MY_ID - } -} diff --git a/library/std/src/sys/wasm/thread_atomics.rs b/library/std/src/sys/wasm/thread_atomics.rs new file mode 100644 index 00000000000..54bc877aa7d --- /dev/null +++ b/library/std/src/sys/wasm/thread_atomics.rs @@ -0,0 +1,84 @@ +use crate::ffi::CStr; +use crate::io; +use crate::sys::unsupported; +use crate::time::Duration; + +pub struct Thread(!); + +pub const DEFAULT_MIN_STACK_SIZE: usize = 4096; + +impl Thread { + // unsafe: see thread::Builder::spawn_unchecked for safety requirements + pub unsafe fn new(_stack: usize, _p: Box) -> io::Result { + unsupported() + } + + pub fn yield_now() {} + + pub fn set_name(_name: &CStr) {} + + pub fn sleep(dur: Duration) { + use crate::arch::wasm32; + use crate::cmp; + + // Use an atomic wait to block the current thread artificially with a + // timeout listed. Note that we should never be notified (return value + // of 0) or our comparison should never fail (return value of 1) so we + // should always only resume execution through a timeout (return value + // 2). + let mut nanos = dur.as_nanos(); + while nanos > 0 { + let amt = cmp::min(i64::MAX as u128, nanos); + let mut x = 0; + let val = unsafe { wasm32::memory_atomic_wait32(&mut x, 0, amt as i64) }; + debug_assert_eq!(val, 2); + nanos -= amt; + } + } + + pub fn join(self) {} +} + +pub mod guard { + pub type Guard = !; + pub unsafe fn current() -> Option { + None + } + pub unsafe fn init() -> Option { + None + } +} + +// We currently just use our own thread-local to store our +// current thread's ID, and then we lazily initialize it to something allocated +// from a global counter. +pub fn my_id() -> u32 { + use crate::sync::atomic::{AtomicU32, Ordering::SeqCst}; + + static NEXT_ID: AtomicU32 = AtomicU32::new(0); + + #[thread_local] + static mut MY_ID: u32 = 0; + + unsafe { + // If our thread ID isn't set yet then we need to allocate one. Do so + // with with a simple "atomically add to a global counter" strategy. + // This strategy doesn't handled what happens when the counter + // overflows, however, so just abort everything once the counter + // overflows and eventually we could have some sort of recycling scheme + // (or maybe this is all totally irrelevant by that point!). In any case + // though we're using a CAS loop instead of a `fetch_add` to ensure that + // the global counter never overflows. + if MY_ID == 0 { + let mut cur = NEXT_ID.load(SeqCst); + MY_ID = loop { + let next = cur.checked_add(1).unwrap_or_else(|| crate::process::abort()); + match NEXT_ID.compare_exchange(cur, next, SeqCst, SeqCst) { + Ok(_) => break next, + Err(i) => cur = i, + } + }; + } + MY_ID + } +} -- cgit 1.4.1-3-g733a5 From fab841080157b0ddc6e0a0d88c4b3fc43d32f147 Mon Sep 17 00:00:00 2001 From: Christiaan Dirkx Date: Wed, 28 Apr 2021 15:54:08 +0200 Subject: Move `wasm` atomics code to `wasm/atomics` --- library/std/src/sys/wasm/atomics/condvar.rs | 102 ++++++++++++++++++ library/std/src/sys/wasm/atomics/futex.rs | 17 +++ library/std/src/sys/wasm/atomics/mutex.rs | 156 ++++++++++++++++++++++++++++ library/std/src/sys/wasm/atomics/rwlock.rs | 144 +++++++++++++++++++++++++ library/std/src/sys/wasm/atomics/thread.rs | 84 +++++++++++++++ library/std/src/sys/wasm/condvar_atomics.rs | 102 ------------------ library/std/src/sys/wasm/futex_atomics.rs | 17 --- library/std/src/sys/wasm/mod.rs | 10 +- library/std/src/sys/wasm/mutex_atomics.rs | 156 ---------------------------- library/std/src/sys/wasm/rwlock_atomics.rs | 144 ------------------------- library/std/src/sys/wasm/thread_atomics.rs | 84 --------------- 11 files changed, 508 insertions(+), 508 deletions(-) create mode 100644 library/std/src/sys/wasm/atomics/condvar.rs create mode 100644 library/std/src/sys/wasm/atomics/futex.rs create mode 100644 library/std/src/sys/wasm/atomics/mutex.rs create mode 100644 library/std/src/sys/wasm/atomics/rwlock.rs create mode 100644 library/std/src/sys/wasm/atomics/thread.rs delete mode 100644 library/std/src/sys/wasm/condvar_atomics.rs delete mode 100644 library/std/src/sys/wasm/futex_atomics.rs delete mode 100644 library/std/src/sys/wasm/mutex_atomics.rs delete mode 100644 library/std/src/sys/wasm/rwlock_atomics.rs delete mode 100644 library/std/src/sys/wasm/thread_atomics.rs (limited to 'library/std/src') diff --git a/library/std/src/sys/wasm/atomics/condvar.rs b/library/std/src/sys/wasm/atomics/condvar.rs new file mode 100644 index 00000000000..0c1c076cc91 --- /dev/null +++ b/library/std/src/sys/wasm/atomics/condvar.rs @@ -0,0 +1,102 @@ +use crate::arch::wasm32; +use crate::cmp; +use crate::mem; +use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst}; +use crate::sys::mutex::Mutex; +use crate::time::Duration; + +pub struct Condvar { + cnt: AtomicUsize, +} + +pub type MovableCondvar = Condvar; + +// Condition variables are implemented with a simple counter internally that is +// likely to cause spurious wakeups. Blocking on a condition variable will first +// read the value of the internal counter, unlock the given mutex, and then +// block if and only if the counter's value is still the same. Notifying a +// condition variable will modify the counter (add one for now) and then wake up +// a thread waiting on the address of the counter. +// +// A thread waiting on the condition variable will as a result avoid going to +// sleep if it's notified after the lock is unlocked but before it fully goes to +// sleep. A sleeping thread is guaranteed to be woken up at some point as it can +// only be woken up with a call to `wake`. +// +// Note that it's possible for 2 or more threads to be woken up by a call to +// `notify_one` with this implementation. That can happen where the modification +// of `cnt` causes any threads in the middle of `wait` to avoid going to sleep, +// and the subsequent `wake` may wake up a thread that's actually blocking. We +// consider this a spurious wakeup, though, which all users of condition +// variables must already be prepared to handle. As a result, this source of +// spurious wakeups is currently though to be ok, although it may be problematic +// later on if it causes too many spurious wakeups. + +impl Condvar { + pub const fn new() -> Condvar { + Condvar { cnt: AtomicUsize::new(0) } + } + + #[inline] + pub unsafe fn init(&mut self) { + // nothing to do + } + + pub unsafe fn notify_one(&self) { + self.cnt.fetch_add(1, SeqCst); + // SAFETY: ptr() is always valid + unsafe { + wasm32::memory_atomic_notify(self.ptr(), 1); + } + } + + #[inline] + pub unsafe fn notify_all(&self) { + self.cnt.fetch_add(1, SeqCst); + // SAFETY: ptr() is always valid + unsafe { + wasm32::memory_atomic_notify(self.ptr(), u32::MAX); // -1 == "wake everyone" + } + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + // "atomically block and unlock" implemented by loading our current + // counter's value, unlocking the mutex, and blocking if the counter + // still has the same value. + // + // Notifications happen by incrementing the counter and then waking a + // thread. Incrementing the counter after we unlock the mutex will + // prevent us from sleeping and otherwise the call to `wake` will + // wake us up once we're asleep. + let ticket = self.cnt.load(SeqCst) as i32; + mutex.unlock(); + let val = wasm32::memory_atomic_wait32(self.ptr(), ticket, -1); + // 0 == woken, 1 == not equal to `ticket`, 2 == timeout (shouldn't happen) + debug_assert!(val == 0 || val == 1); + mutex.lock(); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let ticket = self.cnt.load(SeqCst) as i32; + mutex.unlock(); + let nanos = dur.as_nanos(); + let nanos = cmp::min(i64::MAX as u128, nanos); + + // If the return value is 2 then a timeout happened, so we return + // `false` as we weren't actually notified. + let ret = wasm32::memory_atomic_wait32(self.ptr(), ticket, nanos as i64) != 2; + mutex.lock(); + return ret; + } + + #[inline] + pub unsafe fn destroy(&self) { + // nothing to do + } + + #[inline] + fn ptr(&self) -> *mut i32 { + assert_eq!(mem::size_of::(), mem::size_of::()); + self.cnt.as_mut_ptr() as *mut i32 + } +} diff --git a/library/std/src/sys/wasm/atomics/futex.rs b/library/std/src/sys/wasm/atomics/futex.rs new file mode 100644 index 00000000000..3d8bf42f725 --- /dev/null +++ b/library/std/src/sys/wasm/atomics/futex.rs @@ -0,0 +1,17 @@ +use crate::arch::wasm32; +use crate::convert::TryInto; +use crate::sync::atomic::AtomicI32; +use crate::time::Duration; + +pub fn futex_wait(futex: &AtomicI32, expected: i32, timeout: Option) { + let timeout = timeout.and_then(|t| t.as_nanos().try_into().ok()).unwrap_or(-1); + unsafe { + wasm32::memory_atomic_wait32(futex as *const AtomicI32 as *mut i32, expected, timeout); + } +} + +pub fn futex_wake(futex: &AtomicI32) { + unsafe { + wasm32::memory_atomic_notify(futex as *const AtomicI32 as *mut i32, 1); + } +} diff --git a/library/std/src/sys/wasm/atomics/mutex.rs b/library/std/src/sys/wasm/atomics/mutex.rs new file mode 100644 index 00000000000..5ff0ec052b6 --- /dev/null +++ b/library/std/src/sys/wasm/atomics/mutex.rs @@ -0,0 +1,156 @@ +use crate::arch::wasm32; +use crate::cell::UnsafeCell; +use crate::mem; +use crate::sync::atomic::{AtomicU32, AtomicUsize, Ordering::SeqCst}; +use crate::sys::thread; + +pub struct Mutex { + locked: AtomicUsize, +} + +pub type MovableMutex = Mutex; + +// Mutexes have a pretty simple implementation where they contain an `i32` +// internally that is 0 when unlocked and 1 when the mutex is locked. +// Acquisition has a fast path where it attempts to cmpxchg the 0 to a 1, and +// if it fails it then waits for a notification. Releasing a lock is then done +// by swapping in 0 and then notifying any waiters, if present. + +impl Mutex { + pub const fn new() -> Mutex { + Mutex { locked: AtomicUsize::new(0) } + } + + #[inline] + pub unsafe fn init(&mut self) { + // nothing to do + } + + pub unsafe fn lock(&self) { + while !self.try_lock() { + // SAFETY: the caller must uphold the safety contract for `memory_atomic_wait32`. + let val = unsafe { + wasm32::memory_atomic_wait32( + self.ptr(), + 1, // we expect our mutex is locked + -1, // wait infinitely + ) + }; + // we should have either woke up (0) or got a not-equal due to a + // race (1). We should never time out (2) + debug_assert!(val == 0 || val == 1); + } + } + + pub unsafe fn unlock(&self) { + let prev = self.locked.swap(0, SeqCst); + debug_assert_eq!(prev, 1); + wasm32::memory_atomic_notify(self.ptr(), 1); // wake up one waiter, if any + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() + } + + #[inline] + pub unsafe fn destroy(&self) { + // nothing to do + } + + #[inline] + fn ptr(&self) -> *mut i32 { + assert_eq!(mem::size_of::(), mem::size_of::()); + self.locked.as_mut_ptr() as *mut i32 + } +} + +pub struct ReentrantMutex { + owner: AtomicU32, + recursions: UnsafeCell, +} + +unsafe impl Send for ReentrantMutex {} +unsafe impl Sync for ReentrantMutex {} + +// Reentrant mutexes are similarly implemented to mutexs above except that +// instead of "1" meaning unlocked we use the id of a thread to represent +// whether it has locked a mutex. That way we have an atomic counter which +// always holds the id of the thread that currently holds the lock (or 0 if the +// lock is unlocked). +// +// Once a thread acquires a lock recursively, which it detects by looking at +// the value that's already there, it will update a local `recursions` counter +// in a nonatomic fashion (as we hold the lock). The lock is then fully +// released when this recursion counter reaches 0. + +impl ReentrantMutex { + pub const unsafe fn uninitialized() -> ReentrantMutex { + ReentrantMutex { owner: AtomicU32::new(0), recursions: UnsafeCell::new(0) } + } + + pub unsafe fn init(&self) { + // nothing to do... + } + + pub unsafe fn lock(&self) { + let me = thread::my_id(); + while let Err(owner) = self._try_lock(me) { + // SAFETY: the caller must gurantee that `self.ptr()` and `owner` are valid i32. + let val = unsafe { wasm32::memory_atomic_wait32(self.ptr(), owner as i32, -1) }; + debug_assert!(val == 0 || val == 1); + } + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + unsafe { self._try_lock(thread::my_id()).is_ok() } + } + + #[inline] + unsafe fn _try_lock(&self, id: u32) -> Result<(), u32> { + let id = id.checked_add(1).unwrap(); + match self.owner.compare_exchange(0, id, SeqCst, SeqCst) { + // we transitioned from unlocked to locked + Ok(_) => { + debug_assert_eq!(*self.recursions.get(), 0); + Ok(()) + } + + // we currently own this lock, so let's update our count and return + // true. + Err(n) if n == id => { + *self.recursions.get() += 1; + Ok(()) + } + + // Someone else owns the lock, let our caller take care of it + Err(other) => Err(other), + } + } + + pub unsafe fn unlock(&self) { + // If we didn't ever recursively lock the lock then we fully unlock the + // mutex and wake up a waiter, if any. Otherwise we decrement our + // recursive counter and let some one else take care of the zero. + match *self.recursions.get() { + 0 => { + self.owner.swap(0, SeqCst); + // SAFETY: the caller must gurantee that `self.ptr()` is valid i32. + unsafe { + wasm32::memory_atomic_notify(self.ptr() as *mut i32, 1); + } // wake up one waiter, if any + } + ref mut n => *n -= 1, + } + } + + pub unsafe fn destroy(&self) { + // nothing to do... + } + + #[inline] + fn ptr(&self) -> *mut i32 { + self.owner.as_mut_ptr() as *mut i32 + } +} diff --git a/library/std/src/sys/wasm/atomics/rwlock.rs b/library/std/src/sys/wasm/atomics/rwlock.rs new file mode 100644 index 00000000000..06442e925f4 --- /dev/null +++ b/library/std/src/sys/wasm/atomics/rwlock.rs @@ -0,0 +1,144 @@ +use crate::cell::UnsafeCell; +use crate::sys::condvar::Condvar; +use crate::sys::mutex::Mutex; + +pub struct RWLock { + lock: Mutex, + cond: Condvar, + state: UnsafeCell, +} + +enum State { + Unlocked, + Reading(usize), + Writing, +} + +unsafe impl Send for RWLock {} +unsafe impl Sync for RWLock {} + +// This rwlock implementation is a relatively simple implementation which has a +// condition variable for readers/writers as well as a mutex protecting the +// internal state of the lock. A current downside of the implementation is that +// unlocking the lock will notify *all* waiters rather than just readers or just +// writers. This can cause lots of "thundering stampede" problems. While +// hopefully correct this implementation is very likely to want to be changed in +// the future. + +impl RWLock { + pub const fn new() -> RWLock { + RWLock { lock: Mutex::new(), cond: Condvar::new(), state: UnsafeCell::new(State::Unlocked) } + } + + #[inline] + pub unsafe fn read(&self) { + self.lock.lock(); + while !(*self.state.get()).inc_readers() { + self.cond.wait(&self.lock); + } + self.lock.unlock(); + } + + #[inline] + pub unsafe fn try_read(&self) -> bool { + self.lock.lock(); + let ok = (*self.state.get()).inc_readers(); + self.lock.unlock(); + return ok; + } + + #[inline] + pub unsafe fn write(&self) { + self.lock.lock(); + while !(*self.state.get()).inc_writers() { + self.cond.wait(&self.lock); + } + self.lock.unlock(); + } + + #[inline] + pub unsafe fn try_write(&self) -> bool { + self.lock.lock(); + let ok = (*self.state.get()).inc_writers(); + self.lock.unlock(); + return ok; + } + + #[inline] + pub unsafe fn read_unlock(&self) { + self.lock.lock(); + let notify = (*self.state.get()).dec_readers(); + self.lock.unlock(); + if notify { + // FIXME: should only wake up one of these some of the time + self.cond.notify_all(); + } + } + + #[inline] + pub unsafe fn write_unlock(&self) { + self.lock.lock(); + (*self.state.get()).dec_writers(); + self.lock.unlock(); + // FIXME: should only wake up one of these some of the time + self.cond.notify_all(); + } + + #[inline] + pub unsafe fn destroy(&self) { + self.lock.destroy(); + self.cond.destroy(); + } +} + +impl State { + fn inc_readers(&mut self) -> bool { + match *self { + State::Unlocked => { + *self = State::Reading(1); + true + } + State::Reading(ref mut cnt) => { + *cnt += 1; + true + } + State::Writing => false, + } + } + + fn inc_writers(&mut self) -> bool { + match *self { + State::Unlocked => { + *self = State::Writing; + true + } + State::Reading(_) | State::Writing => false, + } + } + + fn dec_readers(&mut self) -> bool { + let zero = match *self { + State::Reading(ref mut cnt) => { + *cnt -= 1; + *cnt == 0 + } + State::Unlocked | State::Writing => invalid(), + }; + if zero { + *self = State::Unlocked; + } + zero + } + + fn dec_writers(&mut self) { + match *self { + State::Writing => {} + State::Unlocked | State::Reading(_) => invalid(), + } + *self = State::Unlocked; + } +} + +fn invalid() -> ! { + panic!("inconsistent rwlock"); +} diff --git a/library/std/src/sys/wasm/atomics/thread.rs b/library/std/src/sys/wasm/atomics/thread.rs new file mode 100644 index 00000000000..54bc877aa7d --- /dev/null +++ b/library/std/src/sys/wasm/atomics/thread.rs @@ -0,0 +1,84 @@ +use crate::ffi::CStr; +use crate::io; +use crate::sys::unsupported; +use crate::time::Duration; + +pub struct Thread(!); + +pub const DEFAULT_MIN_STACK_SIZE: usize = 4096; + +impl Thread { + // unsafe: see thread::Builder::spawn_unchecked for safety requirements + pub unsafe fn new(_stack: usize, _p: Box) -> io::Result { + unsupported() + } + + pub fn yield_now() {} + + pub fn set_name(_name: &CStr) {} + + pub fn sleep(dur: Duration) { + use crate::arch::wasm32; + use crate::cmp; + + // Use an atomic wait to block the current thread artificially with a + // timeout listed. Note that we should never be notified (return value + // of 0) or our comparison should never fail (return value of 1) so we + // should always only resume execution through a timeout (return value + // 2). + let mut nanos = dur.as_nanos(); + while nanos > 0 { + let amt = cmp::min(i64::MAX as u128, nanos); + let mut x = 0; + let val = unsafe { wasm32::memory_atomic_wait32(&mut x, 0, amt as i64) }; + debug_assert_eq!(val, 2); + nanos -= amt; + } + } + + pub fn join(self) {} +} + +pub mod guard { + pub type Guard = !; + pub unsafe fn current() -> Option { + None + } + pub unsafe fn init() -> Option { + None + } +} + +// We currently just use our own thread-local to store our +// current thread's ID, and then we lazily initialize it to something allocated +// from a global counter. +pub fn my_id() -> u32 { + use crate::sync::atomic::{AtomicU32, Ordering::SeqCst}; + + static NEXT_ID: AtomicU32 = AtomicU32::new(0); + + #[thread_local] + static mut MY_ID: u32 = 0; + + unsafe { + // If our thread ID isn't set yet then we need to allocate one. Do so + // with with a simple "atomically add to a global counter" strategy. + // This strategy doesn't handled what happens when the counter + // overflows, however, so just abort everything once the counter + // overflows and eventually we could have some sort of recycling scheme + // (or maybe this is all totally irrelevant by that point!). In any case + // though we're using a CAS loop instead of a `fetch_add` to ensure that + // the global counter never overflows. + if MY_ID == 0 { + let mut cur = NEXT_ID.load(SeqCst); + MY_ID = loop { + let next = cur.checked_add(1).unwrap_or_else(|| crate::process::abort()); + match NEXT_ID.compare_exchange(cur, next, SeqCst, SeqCst) { + Ok(_) => break next, + Err(i) => cur = i, + } + }; + } + MY_ID + } +} diff --git a/library/std/src/sys/wasm/condvar_atomics.rs b/library/std/src/sys/wasm/condvar_atomics.rs deleted file mode 100644 index 0c1c076cc91..00000000000 --- a/library/std/src/sys/wasm/condvar_atomics.rs +++ /dev/null @@ -1,102 +0,0 @@ -use crate::arch::wasm32; -use crate::cmp; -use crate::mem; -use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst}; -use crate::sys::mutex::Mutex; -use crate::time::Duration; - -pub struct Condvar { - cnt: AtomicUsize, -} - -pub type MovableCondvar = Condvar; - -// Condition variables are implemented with a simple counter internally that is -// likely to cause spurious wakeups. Blocking on a condition variable will first -// read the value of the internal counter, unlock the given mutex, and then -// block if and only if the counter's value is still the same. Notifying a -// condition variable will modify the counter (add one for now) and then wake up -// a thread waiting on the address of the counter. -// -// A thread waiting on the condition variable will as a result avoid going to -// sleep if it's notified after the lock is unlocked but before it fully goes to -// sleep. A sleeping thread is guaranteed to be woken up at some point as it can -// only be woken up with a call to `wake`. -// -// Note that it's possible for 2 or more threads to be woken up by a call to -// `notify_one` with this implementation. That can happen where the modification -// of `cnt` causes any threads in the middle of `wait` to avoid going to sleep, -// and the subsequent `wake` may wake up a thread that's actually blocking. We -// consider this a spurious wakeup, though, which all users of condition -// variables must already be prepared to handle. As a result, this source of -// spurious wakeups is currently though to be ok, although it may be problematic -// later on if it causes too many spurious wakeups. - -impl Condvar { - pub const fn new() -> Condvar { - Condvar { cnt: AtomicUsize::new(0) } - } - - #[inline] - pub unsafe fn init(&mut self) { - // nothing to do - } - - pub unsafe fn notify_one(&self) { - self.cnt.fetch_add(1, SeqCst); - // SAFETY: ptr() is always valid - unsafe { - wasm32::memory_atomic_notify(self.ptr(), 1); - } - } - - #[inline] - pub unsafe fn notify_all(&self) { - self.cnt.fetch_add(1, SeqCst); - // SAFETY: ptr() is always valid - unsafe { - wasm32::memory_atomic_notify(self.ptr(), u32::MAX); // -1 == "wake everyone" - } - } - - pub unsafe fn wait(&self, mutex: &Mutex) { - // "atomically block and unlock" implemented by loading our current - // counter's value, unlocking the mutex, and blocking if the counter - // still has the same value. - // - // Notifications happen by incrementing the counter and then waking a - // thread. Incrementing the counter after we unlock the mutex will - // prevent us from sleeping and otherwise the call to `wake` will - // wake us up once we're asleep. - let ticket = self.cnt.load(SeqCst) as i32; - mutex.unlock(); - let val = wasm32::memory_atomic_wait32(self.ptr(), ticket, -1); - // 0 == woken, 1 == not equal to `ticket`, 2 == timeout (shouldn't happen) - debug_assert!(val == 0 || val == 1); - mutex.lock(); - } - - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - let ticket = self.cnt.load(SeqCst) as i32; - mutex.unlock(); - let nanos = dur.as_nanos(); - let nanos = cmp::min(i64::MAX as u128, nanos); - - // If the return value is 2 then a timeout happened, so we return - // `false` as we weren't actually notified. - let ret = wasm32::memory_atomic_wait32(self.ptr(), ticket, nanos as i64) != 2; - mutex.lock(); - return ret; - } - - #[inline] - pub unsafe fn destroy(&self) { - // nothing to do - } - - #[inline] - fn ptr(&self) -> *mut i32 { - assert_eq!(mem::size_of::(), mem::size_of::()); - self.cnt.as_mut_ptr() as *mut i32 - } -} diff --git a/library/std/src/sys/wasm/futex_atomics.rs b/library/std/src/sys/wasm/futex_atomics.rs deleted file mode 100644 index 3d8bf42f725..00000000000 --- a/library/std/src/sys/wasm/futex_atomics.rs +++ /dev/null @@ -1,17 +0,0 @@ -use crate::arch::wasm32; -use crate::convert::TryInto; -use crate::sync::atomic::AtomicI32; -use crate::time::Duration; - -pub fn futex_wait(futex: &AtomicI32, expected: i32, timeout: Option) { - let timeout = timeout.and_then(|t| t.as_nanos().try_into().ok()).unwrap_or(-1); - unsafe { - wasm32::memory_atomic_wait32(futex as *const AtomicI32 as *mut i32, expected, timeout); - } -} - -pub fn futex_wake(futex: &AtomicI32) { - unsafe { - wasm32::memory_atomic_notify(futex as *const AtomicI32 as *mut i32, 1); - } -} diff --git a/library/std/src/sys/wasm/mod.rs b/library/std/src/sys/wasm/mod.rs index 71877fce993..0bd2c0d0f9d 100644 --- a/library/std/src/sys/wasm/mod.rs +++ b/library/std/src/sys/wasm/mod.rs @@ -48,15 +48,15 @@ pub use crate::sys_common::os_str_bytes as os_str; cfg_if::cfg_if! { if #[cfg(target_feature = "atomics")] { - #[path = "condvar_atomics.rs"] + #[path = "atomics/condvar.rs"] pub mod condvar; - #[path = "mutex_atomics.rs"] + #[path = "atomics/mutex.rs"] pub mod mutex; - #[path = "rwlock_atomics.rs"] + #[path = "atomics/rwlock.rs"] pub mod rwlock; - #[path = "futex_atomics.rs"] + #[path = "atomics/futex.rs"] pub mod futex; - #[path = "thread_atomics.rs"] + #[path = "atomics/thread.rs"] pub mod thread; } else { #[path = "../unsupported/condvar.rs"] diff --git a/library/std/src/sys/wasm/mutex_atomics.rs b/library/std/src/sys/wasm/mutex_atomics.rs deleted file mode 100644 index 5ff0ec052b6..00000000000 --- a/library/std/src/sys/wasm/mutex_atomics.rs +++ /dev/null @@ -1,156 +0,0 @@ -use crate::arch::wasm32; -use crate::cell::UnsafeCell; -use crate::mem; -use crate::sync::atomic::{AtomicU32, AtomicUsize, Ordering::SeqCst}; -use crate::sys::thread; - -pub struct Mutex { - locked: AtomicUsize, -} - -pub type MovableMutex = Mutex; - -// Mutexes have a pretty simple implementation where they contain an `i32` -// internally that is 0 when unlocked and 1 when the mutex is locked. -// Acquisition has a fast path where it attempts to cmpxchg the 0 to a 1, and -// if it fails it then waits for a notification. Releasing a lock is then done -// by swapping in 0 and then notifying any waiters, if present. - -impl Mutex { - pub const fn new() -> Mutex { - Mutex { locked: AtomicUsize::new(0) } - } - - #[inline] - pub unsafe fn init(&mut self) { - // nothing to do - } - - pub unsafe fn lock(&self) { - while !self.try_lock() { - // SAFETY: the caller must uphold the safety contract for `memory_atomic_wait32`. - let val = unsafe { - wasm32::memory_atomic_wait32( - self.ptr(), - 1, // we expect our mutex is locked - -1, // wait infinitely - ) - }; - // we should have either woke up (0) or got a not-equal due to a - // race (1). We should never time out (2) - debug_assert!(val == 0 || val == 1); - } - } - - pub unsafe fn unlock(&self) { - let prev = self.locked.swap(0, SeqCst); - debug_assert_eq!(prev, 1); - wasm32::memory_atomic_notify(self.ptr(), 1); // wake up one waiter, if any - } - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() - } - - #[inline] - pub unsafe fn destroy(&self) { - // nothing to do - } - - #[inline] - fn ptr(&self) -> *mut i32 { - assert_eq!(mem::size_of::(), mem::size_of::()); - self.locked.as_mut_ptr() as *mut i32 - } -} - -pub struct ReentrantMutex { - owner: AtomicU32, - recursions: UnsafeCell, -} - -unsafe impl Send for ReentrantMutex {} -unsafe impl Sync for ReentrantMutex {} - -// Reentrant mutexes are similarly implemented to mutexs above except that -// instead of "1" meaning unlocked we use the id of a thread to represent -// whether it has locked a mutex. That way we have an atomic counter which -// always holds the id of the thread that currently holds the lock (or 0 if the -// lock is unlocked). -// -// Once a thread acquires a lock recursively, which it detects by looking at -// the value that's already there, it will update a local `recursions` counter -// in a nonatomic fashion (as we hold the lock). The lock is then fully -// released when this recursion counter reaches 0. - -impl ReentrantMutex { - pub const unsafe fn uninitialized() -> ReentrantMutex { - ReentrantMutex { owner: AtomicU32::new(0), recursions: UnsafeCell::new(0) } - } - - pub unsafe fn init(&self) { - // nothing to do... - } - - pub unsafe fn lock(&self) { - let me = thread::my_id(); - while let Err(owner) = self._try_lock(me) { - // SAFETY: the caller must gurantee that `self.ptr()` and `owner` are valid i32. - let val = unsafe { wasm32::memory_atomic_wait32(self.ptr(), owner as i32, -1) }; - debug_assert!(val == 0 || val == 1); - } - } - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - unsafe { self._try_lock(thread::my_id()).is_ok() } - } - - #[inline] - unsafe fn _try_lock(&self, id: u32) -> Result<(), u32> { - let id = id.checked_add(1).unwrap(); - match self.owner.compare_exchange(0, id, SeqCst, SeqCst) { - // we transitioned from unlocked to locked - Ok(_) => { - debug_assert_eq!(*self.recursions.get(), 0); - Ok(()) - } - - // we currently own this lock, so let's update our count and return - // true. - Err(n) if n == id => { - *self.recursions.get() += 1; - Ok(()) - } - - // Someone else owns the lock, let our caller take care of it - Err(other) => Err(other), - } - } - - pub unsafe fn unlock(&self) { - // If we didn't ever recursively lock the lock then we fully unlock the - // mutex and wake up a waiter, if any. Otherwise we decrement our - // recursive counter and let some one else take care of the zero. - match *self.recursions.get() { - 0 => { - self.owner.swap(0, SeqCst); - // SAFETY: the caller must gurantee that `self.ptr()` is valid i32. - unsafe { - wasm32::memory_atomic_notify(self.ptr() as *mut i32, 1); - } // wake up one waiter, if any - } - ref mut n => *n -= 1, - } - } - - pub unsafe fn destroy(&self) { - // nothing to do... - } - - #[inline] - fn ptr(&self) -> *mut i32 { - self.owner.as_mut_ptr() as *mut i32 - } -} diff --git a/library/std/src/sys/wasm/rwlock_atomics.rs b/library/std/src/sys/wasm/rwlock_atomics.rs deleted file mode 100644 index 06442e925f4..00000000000 --- a/library/std/src/sys/wasm/rwlock_atomics.rs +++ /dev/null @@ -1,144 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::sys::condvar::Condvar; -use crate::sys::mutex::Mutex; - -pub struct RWLock { - lock: Mutex, - cond: Condvar, - state: UnsafeCell, -} - -enum State { - Unlocked, - Reading(usize), - Writing, -} - -unsafe impl Send for RWLock {} -unsafe impl Sync for RWLock {} - -// This rwlock implementation is a relatively simple implementation which has a -// condition variable for readers/writers as well as a mutex protecting the -// internal state of the lock. A current downside of the implementation is that -// unlocking the lock will notify *all* waiters rather than just readers or just -// writers. This can cause lots of "thundering stampede" problems. While -// hopefully correct this implementation is very likely to want to be changed in -// the future. - -impl RWLock { - pub const fn new() -> RWLock { - RWLock { lock: Mutex::new(), cond: Condvar::new(), state: UnsafeCell::new(State::Unlocked) } - } - - #[inline] - pub unsafe fn read(&self) { - self.lock.lock(); - while !(*self.state.get()).inc_readers() { - self.cond.wait(&self.lock); - } - self.lock.unlock(); - } - - #[inline] - pub unsafe fn try_read(&self) -> bool { - self.lock.lock(); - let ok = (*self.state.get()).inc_readers(); - self.lock.unlock(); - return ok; - } - - #[inline] - pub unsafe fn write(&self) { - self.lock.lock(); - while !(*self.state.get()).inc_writers() { - self.cond.wait(&self.lock); - } - self.lock.unlock(); - } - - #[inline] - pub unsafe fn try_write(&self) -> bool { - self.lock.lock(); - let ok = (*self.state.get()).inc_writers(); - self.lock.unlock(); - return ok; - } - - #[inline] - pub unsafe fn read_unlock(&self) { - self.lock.lock(); - let notify = (*self.state.get()).dec_readers(); - self.lock.unlock(); - if notify { - // FIXME: should only wake up one of these some of the time - self.cond.notify_all(); - } - } - - #[inline] - pub unsafe fn write_unlock(&self) { - self.lock.lock(); - (*self.state.get()).dec_writers(); - self.lock.unlock(); - // FIXME: should only wake up one of these some of the time - self.cond.notify_all(); - } - - #[inline] - pub unsafe fn destroy(&self) { - self.lock.destroy(); - self.cond.destroy(); - } -} - -impl State { - fn inc_readers(&mut self) -> bool { - match *self { - State::Unlocked => { - *self = State::Reading(1); - true - } - State::Reading(ref mut cnt) => { - *cnt += 1; - true - } - State::Writing => false, - } - } - - fn inc_writers(&mut self) -> bool { - match *self { - State::Unlocked => { - *self = State::Writing; - true - } - State::Reading(_) | State::Writing => false, - } - } - - fn dec_readers(&mut self) -> bool { - let zero = match *self { - State::Reading(ref mut cnt) => { - *cnt -= 1; - *cnt == 0 - } - State::Unlocked | State::Writing => invalid(), - }; - if zero { - *self = State::Unlocked; - } - zero - } - - fn dec_writers(&mut self) { - match *self { - State::Writing => {} - State::Unlocked | State::Reading(_) => invalid(), - } - *self = State::Unlocked; - } -} - -fn invalid() -> ! { - panic!("inconsistent rwlock"); -} diff --git a/library/std/src/sys/wasm/thread_atomics.rs b/library/std/src/sys/wasm/thread_atomics.rs deleted file mode 100644 index 54bc877aa7d..00000000000 --- a/library/std/src/sys/wasm/thread_atomics.rs +++ /dev/null @@ -1,84 +0,0 @@ -use crate::ffi::CStr; -use crate::io; -use crate::sys::unsupported; -use crate::time::Duration; - -pub struct Thread(!); - -pub const DEFAULT_MIN_STACK_SIZE: usize = 4096; - -impl Thread { - // unsafe: see thread::Builder::spawn_unchecked for safety requirements - pub unsafe fn new(_stack: usize, _p: Box) -> io::Result { - unsupported() - } - - pub fn yield_now() {} - - pub fn set_name(_name: &CStr) {} - - pub fn sleep(dur: Duration) { - use crate::arch::wasm32; - use crate::cmp; - - // Use an atomic wait to block the current thread artificially with a - // timeout listed. Note that we should never be notified (return value - // of 0) or our comparison should never fail (return value of 1) so we - // should always only resume execution through a timeout (return value - // 2). - let mut nanos = dur.as_nanos(); - while nanos > 0 { - let amt = cmp::min(i64::MAX as u128, nanos); - let mut x = 0; - let val = unsafe { wasm32::memory_atomic_wait32(&mut x, 0, amt as i64) }; - debug_assert_eq!(val, 2); - nanos -= amt; - } - } - - pub fn join(self) {} -} - -pub mod guard { - pub type Guard = !; - pub unsafe fn current() -> Option { - None - } - pub unsafe fn init() -> Option { - None - } -} - -// We currently just use our own thread-local to store our -// current thread's ID, and then we lazily initialize it to something allocated -// from a global counter. -pub fn my_id() -> u32 { - use crate::sync::atomic::{AtomicU32, Ordering::SeqCst}; - - static NEXT_ID: AtomicU32 = AtomicU32::new(0); - - #[thread_local] - static mut MY_ID: u32 = 0; - - unsafe { - // If our thread ID isn't set yet then we need to allocate one. Do so - // with with a simple "atomically add to a global counter" strategy. - // This strategy doesn't handled what happens when the counter - // overflows, however, so just abort everything once the counter - // overflows and eventually we could have some sort of recycling scheme - // (or maybe this is all totally irrelevant by that point!). In any case - // though we're using a CAS loop instead of a `fetch_add` to ensure that - // the global counter never overflows. - if MY_ID == 0 { - let mut cur = NEXT_ID.load(SeqCst); - MY_ID = loop { - let next = cur.checked_add(1).unwrap_or_else(|| crate::process::abort()); - match NEXT_ID.compare_exchange(cur, next, SeqCst, SeqCst) { - Ok(_) => break next, - Err(i) => cur = i, - } - }; - } - MY_ID - } -} -- cgit 1.4.1-3-g733a5 From 45bc1930cae22b8b08adc9bb5ed4a494831d6678 Mon Sep 17 00:00:00 2001 From: Christiaan Dirkx Date: Wed, 28 Apr 2021 15:44:13 +0200 Subject: Reuse `unsupported::args` on `wasm` --- library/std/src/sys/wasm/args.rs | 42 ---------------------------------------- library/std/src/sys/wasm/mod.rs | 1 + 2 files changed, 1 insertion(+), 42 deletions(-) delete mode 100644 library/std/src/sys/wasm/args.rs (limited to 'library/std/src') diff --git a/library/std/src/sys/wasm/args.rs b/library/std/src/sys/wasm/args.rs deleted file mode 100644 index fde1ab79e1f..00000000000 --- a/library/std/src/sys/wasm/args.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::ffi::OsString; -use crate::fmt; -use crate::vec; - -pub fn args() -> Args { - Args { iter: Vec::new().into_iter() } -} - -pub struct Args { - iter: vec::IntoIter, -} - -impl !Send for Args {} -impl !Sync for Args {} - -impl fmt::Debug for Args { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.iter.as_slice().fmt(f) - } -} - -impl Iterator for Args { - type Item = OsString; - fn next(&mut self) -> Option { - self.iter.next() - } - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl ExactSizeIterator for Args { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl DoubleEndedIterator for Args { - fn next_back(&mut self) -> Option { - self.iter.next_back() - } -} diff --git a/library/std/src/sys/wasm/mod.rs b/library/std/src/sys/wasm/mod.rs index 0bd2c0d0f9d..18d8ea183f7 100644 --- a/library/std/src/sys/wasm/mod.rs +++ b/library/std/src/sys/wasm/mod.rs @@ -17,6 +17,7 @@ #![deny(unsafe_op_in_unsafe_fn)] pub mod alloc; +#[path = "../unsupported/args.rs"] pub mod args; #[path = "../unsupported/cmath.rs"] pub mod cmath; -- cgit 1.4.1-3-g733a5 From cf79c06575f1710f05ee5309fb24bcdfff6f0140 Mon Sep 17 00:00:00 2001 From: Christiaan Dirkx Date: Wed, 28 Apr 2021 16:16:01 +0200 Subject: Fix missing import in `unsupported::args` --- library/std/src/sys/unsupported/args.rs | 1 + 1 file changed, 1 insertion(+) (limited to 'library/std/src') diff --git a/library/std/src/sys/unsupported/args.rs b/library/std/src/sys/unsupported/args.rs index c924a7d8a26..a2d75a61976 100644 --- a/library/std/src/sys/unsupported/args.rs +++ b/library/std/src/sys/unsupported/args.rs @@ -1,4 +1,5 @@ use crate::ffi::OsString; +use crate::fmt; pub struct Args {} -- cgit 1.4.1-3-g733a5 From 30b82e0f96579d9f897c4e2a780af82662d89772 Mon Sep 17 00:00:00 2001 From: Jethro Beekman Date: Fri, 7 May 2021 13:21:38 +0200 Subject: SGX mutex is movable --- library/std/src/sys/sgx/mutex.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'library/std/src') diff --git a/library/std/src/sys/sgx/mutex.rs b/library/std/src/sys/sgx/mutex.rs index 8874517dac6..1b5ced4178f 100644 --- a/library/std/src/sys/sgx/mutex.rs +++ b/library/std/src/sys/sgx/mutex.rs @@ -8,7 +8,7 @@ pub struct Mutex { inner: SpinMutex>, } -pub type MovableMutex = Box; +pub type MovableMutex = Mutex; // Implementation according to “Operating Systems: Three Easy Pieces”, chapter 28 impl Mutex { -- cgit 1.4.1-3-g733a5 From bfa84842e546a5fb2d1c69ba499ddc94bee1e01a Mon Sep 17 00:00:00 2001 From: Jethro Beekman Date: Fri, 7 May 2021 13:40:43 +0200 Subject: Rearrange SGX split module files In #75979 several inlined modules were split out into multiple files. This PR keeps the multiple files but moves a few things around to organize things in a coherent way. --- library/std/src/sys/sgx/abi/tls.rs | 132 ------------ library/std/src/sys/sgx/abi/tls/mod.rs | 132 ++++++++++++ library/std/src/sys/sgx/waitqueue.rs | 245 ----------------------- library/std/src/sys/sgx/waitqueue/mod.rs | 240 ++++++++++++++++++++++ library/std/src/sys/sgx/waitqueue/spin_mutex.rs | 3 + library/std/src/sys/sgx/waitqueue/unsafe_list.rs | 3 + 6 files changed, 378 insertions(+), 377 deletions(-) delete mode 100644 library/std/src/sys/sgx/abi/tls.rs create mode 100644 library/std/src/sys/sgx/abi/tls/mod.rs delete mode 100644 library/std/src/sys/sgx/waitqueue.rs create mode 100644 library/std/src/sys/sgx/waitqueue/mod.rs (limited to 'library/std/src') diff --git a/library/std/src/sys/sgx/abi/tls.rs b/library/std/src/sys/sgx/abi/tls.rs deleted file mode 100644 index 13d96e9a633..00000000000 --- a/library/std/src/sys/sgx/abi/tls.rs +++ /dev/null @@ -1,132 +0,0 @@ -mod sync_bitset; - -use self::sync_bitset::*; -use crate::cell::Cell; -use crate::mem; -use crate::num::NonZeroUsize; -use crate::ptr; -use crate::sync::atomic::{AtomicUsize, Ordering}; - -#[cfg(target_pointer_width = "64")] -const USIZE_BITS: usize = 64; -const TLS_KEYS: usize = 128; // Same as POSIX minimum -const TLS_KEYS_BITSET_SIZE: usize = (TLS_KEYS + (USIZE_BITS - 1)) / USIZE_BITS; - -#[cfg_attr(test, linkage = "available_externally")] -#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_KEY_IN_USEE"] -static TLS_KEY_IN_USE: SyncBitset = SYNC_BITSET_INIT; -macro_rules! dup { - ((* $($exp:tt)*) $($val:tt)*) => (dup!( ($($exp)*) $($val)* $($val)* )); - (() $($val:tt)*) => ([$($val),*]) -} -#[cfg_attr(test, linkage = "available_externally")] -#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_DESTRUCTORE"] -static TLS_DESTRUCTOR: [AtomicUsize; TLS_KEYS] = dup!((* * * * * * *) (AtomicUsize::new(0))); - -extern "C" { - fn get_tls_ptr() -> *const u8; - fn set_tls_ptr(tls: *const u8); -} - -#[derive(Copy, Clone)] -#[repr(C)] -pub struct Key(NonZeroUsize); - -impl Key { - fn to_index(self) -> usize { - self.0.get() - 1 - } - - fn from_index(index: usize) -> Self { - Key(NonZeroUsize::new(index + 1).unwrap()) - } - - pub fn as_usize(self) -> usize { - self.0.get() - } - - pub fn from_usize(index: usize) -> Self { - Key(NonZeroUsize::new(index).unwrap()) - } -} - -#[repr(C)] -pub struct Tls { - data: [Cell<*mut u8>; TLS_KEYS], -} - -pub struct ActiveTls<'a> { - tls: &'a Tls, -} - -impl<'a> Drop for ActiveTls<'a> { - fn drop(&mut self) { - let value_with_destructor = |key: usize| { - let ptr = TLS_DESTRUCTOR[key].load(Ordering::Relaxed); - unsafe { mem::transmute::<_, Option>(ptr) } - .map(|dtor| (&self.tls.data[key], dtor)) - }; - - let mut any_non_null_dtor = true; - while any_non_null_dtor { - any_non_null_dtor = false; - for (value, dtor) in TLS_KEY_IN_USE.iter().filter_map(&value_with_destructor) { - let value = value.replace(ptr::null_mut()); - if !value.is_null() { - any_non_null_dtor = true; - unsafe { dtor(value) } - } - } - } - } -} - -impl Tls { - pub fn new() -> Tls { - Tls { data: dup!((* * * * * * *) (Cell::new(ptr::null_mut()))) } - } - - pub unsafe fn activate(&self) -> ActiveTls<'_> { - // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition. - unsafe { set_tls_ptr(self as *const Tls as _) }; - ActiveTls { tls: self } - } - - #[allow(unused)] - pub unsafe fn activate_persistent(self: Box) { - // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition. - unsafe { set_tls_ptr((&*self) as *const Tls as _) }; - mem::forget(self); - } - - unsafe fn current<'a>() -> &'a Tls { - // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition. - unsafe { &*(get_tls_ptr() as *const Tls) } - } - - pub fn create(dtor: Option) -> Key { - let index = if let Some(index) = TLS_KEY_IN_USE.set() { - index - } else { - rtabort!("TLS limit exceeded") - }; - TLS_DESTRUCTOR[index].store(dtor.map_or(0, |f| f as usize), Ordering::Relaxed); - Key::from_index(index) - } - - pub fn set(key: Key, value: *mut u8) { - let index = key.to_index(); - rtassert!(TLS_KEY_IN_USE.get(index)); - unsafe { Self::current() }.data[index].set(value); - } - - pub fn get(key: Key) -> *mut u8 { - let index = key.to_index(); - rtassert!(TLS_KEY_IN_USE.get(index)); - unsafe { Self::current() }.data[index].get() - } - - pub fn destroy(key: Key) { - TLS_KEY_IN_USE.clear(key.to_index()); - } -} diff --git a/library/std/src/sys/sgx/abi/tls/mod.rs b/library/std/src/sys/sgx/abi/tls/mod.rs new file mode 100644 index 00000000000..13d96e9a633 --- /dev/null +++ b/library/std/src/sys/sgx/abi/tls/mod.rs @@ -0,0 +1,132 @@ +mod sync_bitset; + +use self::sync_bitset::*; +use crate::cell::Cell; +use crate::mem; +use crate::num::NonZeroUsize; +use crate::ptr; +use crate::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(target_pointer_width = "64")] +const USIZE_BITS: usize = 64; +const TLS_KEYS: usize = 128; // Same as POSIX minimum +const TLS_KEYS_BITSET_SIZE: usize = (TLS_KEYS + (USIZE_BITS - 1)) / USIZE_BITS; + +#[cfg_attr(test, linkage = "available_externally")] +#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_KEY_IN_USEE"] +static TLS_KEY_IN_USE: SyncBitset = SYNC_BITSET_INIT; +macro_rules! dup { + ((* $($exp:tt)*) $($val:tt)*) => (dup!( ($($exp)*) $($val)* $($val)* )); + (() $($val:tt)*) => ([$($val),*]) +} +#[cfg_attr(test, linkage = "available_externally")] +#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_DESTRUCTORE"] +static TLS_DESTRUCTOR: [AtomicUsize; TLS_KEYS] = dup!((* * * * * * *) (AtomicUsize::new(0))); + +extern "C" { + fn get_tls_ptr() -> *const u8; + fn set_tls_ptr(tls: *const u8); +} + +#[derive(Copy, Clone)] +#[repr(C)] +pub struct Key(NonZeroUsize); + +impl Key { + fn to_index(self) -> usize { + self.0.get() - 1 + } + + fn from_index(index: usize) -> Self { + Key(NonZeroUsize::new(index + 1).unwrap()) + } + + pub fn as_usize(self) -> usize { + self.0.get() + } + + pub fn from_usize(index: usize) -> Self { + Key(NonZeroUsize::new(index).unwrap()) + } +} + +#[repr(C)] +pub struct Tls { + data: [Cell<*mut u8>; TLS_KEYS], +} + +pub struct ActiveTls<'a> { + tls: &'a Tls, +} + +impl<'a> Drop for ActiveTls<'a> { + fn drop(&mut self) { + let value_with_destructor = |key: usize| { + let ptr = TLS_DESTRUCTOR[key].load(Ordering::Relaxed); + unsafe { mem::transmute::<_, Option>(ptr) } + .map(|dtor| (&self.tls.data[key], dtor)) + }; + + let mut any_non_null_dtor = true; + while any_non_null_dtor { + any_non_null_dtor = false; + for (value, dtor) in TLS_KEY_IN_USE.iter().filter_map(&value_with_destructor) { + let value = value.replace(ptr::null_mut()); + if !value.is_null() { + any_non_null_dtor = true; + unsafe { dtor(value) } + } + } + } + } +} + +impl Tls { + pub fn new() -> Tls { + Tls { data: dup!((* * * * * * *) (Cell::new(ptr::null_mut()))) } + } + + pub unsafe fn activate(&self) -> ActiveTls<'_> { + // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition. + unsafe { set_tls_ptr(self as *const Tls as _) }; + ActiveTls { tls: self } + } + + #[allow(unused)] + pub unsafe fn activate_persistent(self: Box) { + // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition. + unsafe { set_tls_ptr((&*self) as *const Tls as _) }; + mem::forget(self); + } + + unsafe fn current<'a>() -> &'a Tls { + // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition. + unsafe { &*(get_tls_ptr() as *const Tls) } + } + + pub fn create(dtor: Option) -> Key { + let index = if let Some(index) = TLS_KEY_IN_USE.set() { + index + } else { + rtabort!("TLS limit exceeded") + }; + TLS_DESTRUCTOR[index].store(dtor.map_or(0, |f| f as usize), Ordering::Relaxed); + Key::from_index(index) + } + + pub fn set(key: Key, value: *mut u8) { + let index = key.to_index(); + rtassert!(TLS_KEY_IN_USE.get(index)); + unsafe { Self::current() }.data[index].set(value); + } + + pub fn get(key: Key) -> *mut u8 { + let index = key.to_index(); + rtassert!(TLS_KEY_IN_USE.get(index)); + unsafe { Self::current() }.data[index].get() + } + + pub fn destroy(key: Key) { + TLS_KEY_IN_USE.clear(key.to_index()); + } +} diff --git a/library/std/src/sys/sgx/waitqueue.rs b/library/std/src/sys/sgx/waitqueue.rs deleted file mode 100644 index e464dc3ee9d..00000000000 --- a/library/std/src/sys/sgx/waitqueue.rs +++ /dev/null @@ -1,245 +0,0 @@ -//! A simple queue implementation for synchronization primitives. -//! -//! This queue is used to implement condition variable and mutexes. -//! -//! Users of this API are expected to use the `WaitVariable` type. Since -//! that type is not `Sync`, it needs to be protected by e.g., a `SpinMutex` to -//! allow shared access. -//! -//! Since userspace may send spurious wake-ups, the wakeup event state is -//! recorded in the enclave. The wakeup event state is protected by a spinlock. -//! The queue and associated wait state are stored in a `WaitVariable`. - -#[cfg(test)] -mod tests; - -/// A doubly-linked list where callers are in charge of memory allocation -/// of the nodes in the list. -mod unsafe_list; - -/// Trivial spinlock-based implementation of `sync::Mutex`. -// FIXME: Perhaps use Intel TSX to avoid locking? -mod spin_mutex; - -use crate::num::NonZeroUsize; -use crate::ops::{Deref, DerefMut}; -use crate::time::Duration; - -use super::abi::thread; -use super::abi::usercalls; -use fortanix_sgx_abi::{Tcs, EV_UNPARK, WAIT_INDEFINITE}; - -pub use self::spin_mutex::{try_lock_or_false, SpinMutex, SpinMutexGuard}; -use self::unsafe_list::{UnsafeList, UnsafeListEntry}; - -/// An queue entry in a `WaitQueue`. -struct WaitEntry { - /// TCS address of the thread that is waiting - tcs: Tcs, - /// Whether this thread has been notified to be awoken - wake: bool, -} - -/// Data stored with a `WaitQueue` alongside it. This ensures accesses to the -/// queue and the data are synchronized, since the type itself is not `Sync`. -/// -/// Consumers of this API should use a synchronization primitive for shared -/// access, such as `SpinMutex`. -#[derive(Default)] -pub struct WaitVariable { - queue: WaitQueue, - lock: T, -} - -impl WaitVariable { - pub const fn new(var: T) -> Self { - WaitVariable { queue: WaitQueue::new(), lock: var } - } - - pub fn queue_empty(&self) -> bool { - self.queue.is_empty() - } - - pub fn lock_var(&self) -> &T { - &self.lock - } - - pub fn lock_var_mut(&mut self) -> &mut T { - &mut self.lock - } -} - -#[derive(Copy, Clone)] -pub enum NotifiedTcs { - Single(Tcs), - All { count: NonZeroUsize }, -} - -/// An RAII guard that will notify a set of target threads as well as unlock -/// a mutex on drop. -pub struct WaitGuard<'a, T: 'a> { - mutex_guard: Option>>, - notified_tcs: NotifiedTcs, -} - -/// A queue of threads that are waiting on some synchronization primitive. -/// -/// `UnsafeList` entries are allocated on the waiting thread's stack. This -/// avoids any global locking that might happen in the heap allocator. This is -/// safe because the waiting thread will not return from that stack frame until -/// after it is notified. The notifying thread ensures to clean up any -/// references to the list entries before sending the wakeup event. -pub struct WaitQueue { - // We use an inner Mutex here to protect the data in the face of spurious - // wakeups. - inner: UnsafeList>, -} -unsafe impl Send for WaitQueue {} - -impl Default for WaitQueue { - fn default() -> Self { - Self::new() - } -} - -impl<'a, T> WaitGuard<'a, T> { - /// Returns which TCSes will be notified when this guard drops. - pub fn notified_tcs(&self) -> NotifiedTcs { - self.notified_tcs - } - - /// Drop this `WaitGuard`, after dropping another `guard`. - pub fn drop_after(self, guard: U) { - drop(guard); - drop(self); - } -} - -impl<'a, T> Deref for WaitGuard<'a, T> { - type Target = SpinMutexGuard<'a, WaitVariable>; - - fn deref(&self) -> &Self::Target { - self.mutex_guard.as_ref().unwrap() - } -} - -impl<'a, T> DerefMut for WaitGuard<'a, T> { - fn deref_mut(&mut self) -> &mut Self::Target { - self.mutex_guard.as_mut().unwrap() - } -} - -impl<'a, T> Drop for WaitGuard<'a, T> { - fn drop(&mut self) { - drop(self.mutex_guard.take()); - let target_tcs = match self.notified_tcs { - NotifiedTcs::Single(tcs) => Some(tcs), - NotifiedTcs::All { .. } => None, - }; - rtunwrap!(Ok, usercalls::send(EV_UNPARK, target_tcs)); - } -} - -impl WaitQueue { - pub const fn new() -> Self { - WaitQueue { inner: UnsafeList::new() } - } - - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } - - /// Adds the calling thread to the `WaitVariable`'s wait queue, then wait - /// until a wakeup event. - /// - /// This function does not return until this thread has been awoken. - pub fn wait(mut guard: SpinMutexGuard<'_, WaitVariable>, before_wait: F) { - // very unsafe: check requirements of UnsafeList::push - unsafe { - let mut entry = UnsafeListEntry::new(SpinMutex::new(WaitEntry { - tcs: thread::current(), - wake: false, - })); - let entry = guard.queue.inner.push(&mut entry); - drop(guard); - before_wait(); - while !entry.lock().wake { - // don't panic, this would invalidate `entry` during unwinding - let eventset = rtunwrap!(Ok, usercalls::wait(EV_UNPARK, WAIT_INDEFINITE)); - rtassert!(eventset & EV_UNPARK == EV_UNPARK); - } - } - } - - /// Adds the calling thread to the `WaitVariable`'s wait queue, then wait - /// until a wakeup event or timeout. If event was observed, returns true. - /// If not, it will remove the calling thread from the wait queue. - pub fn wait_timeout( - lock: &SpinMutex>, - timeout: Duration, - before_wait: F, - ) -> bool { - // very unsafe: check requirements of UnsafeList::push - unsafe { - let mut entry = UnsafeListEntry::new(SpinMutex::new(WaitEntry { - tcs: thread::current(), - wake: false, - })); - let entry_lock = lock.lock().queue.inner.push(&mut entry); - before_wait(); - usercalls::wait_timeout(EV_UNPARK, timeout, || entry_lock.lock().wake); - // acquire the wait queue's lock first to avoid deadlock. - let mut guard = lock.lock(); - let success = entry_lock.lock().wake; - if !success { - // nobody is waking us up, so remove our entry from the wait queue. - guard.queue.inner.remove(&mut entry); - } - success - } - } - - /// Either find the next waiter on the wait queue, or return the mutex - /// guard unchanged. - /// - /// If a waiter is found, a `WaitGuard` is returned which will notify the - /// waiter when it is dropped. - pub fn notify_one( - mut guard: SpinMutexGuard<'_, WaitVariable>, - ) -> Result, SpinMutexGuard<'_, WaitVariable>> { - unsafe { - if let Some(entry) = guard.queue.inner.pop() { - let mut entry_guard = entry.lock(); - let tcs = entry_guard.tcs; - entry_guard.wake = true; - drop(entry); - Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::Single(tcs) }) - } else { - Err(guard) - } - } - } - - /// Either find any and all waiters on the wait queue, or return the mutex - /// guard unchanged. - /// - /// If at least one waiter is found, a `WaitGuard` is returned which will - /// notify all waiters when it is dropped. - pub fn notify_all( - mut guard: SpinMutexGuard<'_, WaitVariable>, - ) -> Result, SpinMutexGuard<'_, WaitVariable>> { - unsafe { - let mut count = 0; - while let Some(entry) = guard.queue.inner.pop() { - count += 1; - let mut entry_guard = entry.lock(); - entry_guard.wake = true; - } - if let Some(count) = NonZeroUsize::new(count) { - Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::All { count } }) - } else { - Err(guard) - } - } - } -} diff --git a/library/std/src/sys/sgx/waitqueue/mod.rs b/library/std/src/sys/sgx/waitqueue/mod.rs new file mode 100644 index 00000000000..61bb11d9a6f --- /dev/null +++ b/library/std/src/sys/sgx/waitqueue/mod.rs @@ -0,0 +1,240 @@ +//! A simple queue implementation for synchronization primitives. +//! +//! This queue is used to implement condition variable and mutexes. +//! +//! Users of this API are expected to use the `WaitVariable` type. Since +//! that type is not `Sync`, it needs to be protected by e.g., a `SpinMutex` to +//! allow shared access. +//! +//! Since userspace may send spurious wake-ups, the wakeup event state is +//! recorded in the enclave. The wakeup event state is protected by a spinlock. +//! The queue and associated wait state are stored in a `WaitVariable`. + +#[cfg(test)] +mod tests; + +mod spin_mutex; +mod unsafe_list; + +use crate::num::NonZeroUsize; +use crate::ops::{Deref, DerefMut}; +use crate::time::Duration; + +use super::abi::thread; +use super::abi::usercalls; +use fortanix_sgx_abi::{Tcs, EV_UNPARK, WAIT_INDEFINITE}; + +pub use self::spin_mutex::{try_lock_or_false, SpinMutex, SpinMutexGuard}; +use self::unsafe_list::{UnsafeList, UnsafeListEntry}; + +/// An queue entry in a `WaitQueue`. +struct WaitEntry { + /// TCS address of the thread that is waiting + tcs: Tcs, + /// Whether this thread has been notified to be awoken + wake: bool, +} + +/// Data stored with a `WaitQueue` alongside it. This ensures accesses to the +/// queue and the data are synchronized, since the type itself is not `Sync`. +/// +/// Consumers of this API should use a synchronization primitive for shared +/// access, such as `SpinMutex`. +#[derive(Default)] +pub struct WaitVariable { + queue: WaitQueue, + lock: T, +} + +impl WaitVariable { + pub const fn new(var: T) -> Self { + WaitVariable { queue: WaitQueue::new(), lock: var } + } + + pub fn queue_empty(&self) -> bool { + self.queue.is_empty() + } + + pub fn lock_var(&self) -> &T { + &self.lock + } + + pub fn lock_var_mut(&mut self) -> &mut T { + &mut self.lock + } +} + +#[derive(Copy, Clone)] +pub enum NotifiedTcs { + Single(Tcs), + All { count: NonZeroUsize }, +} + +/// An RAII guard that will notify a set of target threads as well as unlock +/// a mutex on drop. +pub struct WaitGuard<'a, T: 'a> { + mutex_guard: Option>>, + notified_tcs: NotifiedTcs, +} + +/// A queue of threads that are waiting on some synchronization primitive. +/// +/// `UnsafeList` entries are allocated on the waiting thread's stack. This +/// avoids any global locking that might happen in the heap allocator. This is +/// safe because the waiting thread will not return from that stack frame until +/// after it is notified. The notifying thread ensures to clean up any +/// references to the list entries before sending the wakeup event. +pub struct WaitQueue { + // We use an inner Mutex here to protect the data in the face of spurious + // wakeups. + inner: UnsafeList>, +} +unsafe impl Send for WaitQueue {} + +impl Default for WaitQueue { + fn default() -> Self { + Self::new() + } +} + +impl<'a, T> WaitGuard<'a, T> { + /// Returns which TCSes will be notified when this guard drops. + pub fn notified_tcs(&self) -> NotifiedTcs { + self.notified_tcs + } + + /// Drop this `WaitGuard`, after dropping another `guard`. + pub fn drop_after(self, guard: U) { + drop(guard); + drop(self); + } +} + +impl<'a, T> Deref for WaitGuard<'a, T> { + type Target = SpinMutexGuard<'a, WaitVariable>; + + fn deref(&self) -> &Self::Target { + self.mutex_guard.as_ref().unwrap() + } +} + +impl<'a, T> DerefMut for WaitGuard<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.mutex_guard.as_mut().unwrap() + } +} + +impl<'a, T> Drop for WaitGuard<'a, T> { + fn drop(&mut self) { + drop(self.mutex_guard.take()); + let target_tcs = match self.notified_tcs { + NotifiedTcs::Single(tcs) => Some(tcs), + NotifiedTcs::All { .. } => None, + }; + rtunwrap!(Ok, usercalls::send(EV_UNPARK, target_tcs)); + } +} + +impl WaitQueue { + pub const fn new() -> Self { + WaitQueue { inner: UnsafeList::new() } + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + /// Adds the calling thread to the `WaitVariable`'s wait queue, then wait + /// until a wakeup event. + /// + /// This function does not return until this thread has been awoken. + pub fn wait(mut guard: SpinMutexGuard<'_, WaitVariable>, before_wait: F) { + // very unsafe: check requirements of UnsafeList::push + unsafe { + let mut entry = UnsafeListEntry::new(SpinMutex::new(WaitEntry { + tcs: thread::current(), + wake: false, + })); + let entry = guard.queue.inner.push(&mut entry); + drop(guard); + before_wait(); + while !entry.lock().wake { + // don't panic, this would invalidate `entry` during unwinding + let eventset = rtunwrap!(Ok, usercalls::wait(EV_UNPARK, WAIT_INDEFINITE)); + rtassert!(eventset & EV_UNPARK == EV_UNPARK); + } + } + } + + /// Adds the calling thread to the `WaitVariable`'s wait queue, then wait + /// until a wakeup event or timeout. If event was observed, returns true. + /// If not, it will remove the calling thread from the wait queue. + pub fn wait_timeout( + lock: &SpinMutex>, + timeout: Duration, + before_wait: F, + ) -> bool { + // very unsafe: check requirements of UnsafeList::push + unsafe { + let mut entry = UnsafeListEntry::new(SpinMutex::new(WaitEntry { + tcs: thread::current(), + wake: false, + })); + let entry_lock = lock.lock().queue.inner.push(&mut entry); + before_wait(); + usercalls::wait_timeout(EV_UNPARK, timeout, || entry_lock.lock().wake); + // acquire the wait queue's lock first to avoid deadlock. + let mut guard = lock.lock(); + let success = entry_lock.lock().wake; + if !success { + // nobody is waking us up, so remove our entry from the wait queue. + guard.queue.inner.remove(&mut entry); + } + success + } + } + + /// Either find the next waiter on the wait queue, or return the mutex + /// guard unchanged. + /// + /// If a waiter is found, a `WaitGuard` is returned which will notify the + /// waiter when it is dropped. + pub fn notify_one( + mut guard: SpinMutexGuard<'_, WaitVariable>, + ) -> Result, SpinMutexGuard<'_, WaitVariable>> { + unsafe { + if let Some(entry) = guard.queue.inner.pop() { + let mut entry_guard = entry.lock(); + let tcs = entry_guard.tcs; + entry_guard.wake = true; + drop(entry); + Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::Single(tcs) }) + } else { + Err(guard) + } + } + } + + /// Either find any and all waiters on the wait queue, or return the mutex + /// guard unchanged. + /// + /// If at least one waiter is found, a `WaitGuard` is returned which will + /// notify all waiters when it is dropped. + pub fn notify_all( + mut guard: SpinMutexGuard<'_, WaitVariable>, + ) -> Result, SpinMutexGuard<'_, WaitVariable>> { + unsafe { + let mut count = 0; + while let Some(entry) = guard.queue.inner.pop() { + count += 1; + let mut entry_guard = entry.lock(); + entry_guard.wake = true; + } + if let Some(count) = NonZeroUsize::new(count) { + Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::All { count } }) + } else { + Err(guard) + } + } + } +} diff --git a/library/std/src/sys/sgx/waitqueue/spin_mutex.rs b/library/std/src/sys/sgx/waitqueue/spin_mutex.rs index 7f1a671bab4..f6e851ccadd 100644 --- a/library/std/src/sys/sgx/waitqueue/spin_mutex.rs +++ b/library/std/src/sys/sgx/waitqueue/spin_mutex.rs @@ -1,3 +1,6 @@ +//! Trivial spinlock-based implementation of `sync::Mutex`. +// FIXME: Perhaps use Intel TSX to avoid locking? + #[cfg(test)] mod tests; diff --git a/library/std/src/sys/sgx/waitqueue/unsafe_list.rs b/library/std/src/sys/sgx/waitqueue/unsafe_list.rs index 0834d2593fc..cf2f0886c15 100644 --- a/library/std/src/sys/sgx/waitqueue/unsafe_list.rs +++ b/library/std/src/sys/sgx/waitqueue/unsafe_list.rs @@ -1,3 +1,6 @@ +//! A doubly-linked list where callers are in charge of memory allocation +//! of the nodes in the list. + #[cfg(test)] mod tests; -- cgit 1.4.1-3-g733a5