diff options
| author | joboet <jonasboettiger@icloud.com> | 2024-10-01 14:57:38 +0200 |
|---|---|---|
| committer | joboet <jonasboettiger@icloud.com> | 2024-10-01 22:05:35 +0200 |
| commit | c1acccdf1744561d4dda9b943fa91d873cea3b40 (patch) | |
| tree | 58a5bfa4ba8d6644632941fcbffd3b09947be91c /library/std/src/sys/sync/once_box.rs | |
| parent | 21aa500bb050a6aca30d80b1eeb0cb4e1974d57d (diff) | |
| download | rust-c1acccdf1744561d4dda9b943fa91d873cea3b40.tar.gz rust-c1acccdf1744561d4dda9b943fa91d873cea3b40.zip | |
std: replace `LazyBox` with `OnceBox`
This PR replaces the `LazyBox` wrapper used to allocate the pthread primitives with `OnceBox`, which has a more familiar API mirroring that of `OnceLock`. This cleans up the code in preparation for larger changes like #128184 (from which this PR was split) and allows some neat optimizations, like avoid an acquire-load of the allocation pointer in `Mutex::unlock`, where the initialization of the allocation must have already been observed. Additionally, I've gotten rid of the TEEOS `Condvar` code, it's just a duplicate of the pthread one anyway and I didn't want to repeat myself.
Diffstat (limited to 'library/std/src/sys/sync/once_box.rs')
| -rw-r--r-- | library/std/src/sys/sync/once_box.rs | 82 |
1 files changed, 82 insertions, 0 deletions
diff --git a/library/std/src/sys/sync/once_box.rs b/library/std/src/sys/sync/once_box.rs new file mode 100644 index 00000000000..1422b5a1721 --- /dev/null +++ b/library/std/src/sys/sync/once_box.rs @@ -0,0 +1,82 @@ +//! A racily-initialized alternative to `OnceLock<Box<T>>`. +//! +//! This is used to implement synchronization primitives that need allocation, +//! like the pthread versions. + +#![allow(dead_code)] // Only used on some platforms. + +use crate::mem::replace; +use crate::ptr::null_mut; +use crate::sync::atomic::AtomicPtr; +use crate::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed}; + +pub(crate) struct OnceBox<T> { + ptr: AtomicPtr<T>, +} + +impl<T> OnceBox<T> { + #[inline] + pub const fn new() -> Self { + Self { ptr: AtomicPtr::new(null_mut()) } + } + + /// Gets access to the value, assuming it is already initialized and this + /// initialization has been observed by the current thread. + /// + /// Since all modifications to the pointer have already been observed, the + /// pointer load in this function can be performed with relaxed ordering, + /// potentially allowing the optimizer to turn code like this: + /// ```rust, ignore + /// once_box.get_or_init(|| Box::new(42)); + /// unsafe { once_box.get_unchecked() } + /// ``` + /// into + /// ```rust, ignore + /// once_box.get_or_init(|| Box::new(42)) + /// ``` + /// + /// # Safety + /// This causes undefined behaviour if the assumption above is violated. + #[inline] + pub unsafe fn get_unchecked(&self) -> &T { + unsafe { &*self.ptr.load(Relaxed) } + } + + #[inline] + pub fn get_or_init(&self, f: impl FnOnce() -> Box<T>) -> &T { + let ptr = self.ptr.load(Acquire); + match unsafe { ptr.as_ref() } { + Some(val) => val, + None => self.initialize(f), + } + } + + #[inline] + pub fn take(&mut self) -> Option<Box<T>> { + let ptr = replace(self.ptr.get_mut(), null_mut()); + if !ptr.is_null() { Some(unsafe { Box::from_raw(ptr) }) } else { None } + } + + #[cold] + fn initialize(&self, f: impl FnOnce() -> Box<T>) -> &T { + let new_ptr = Box::into_raw(f()); + match self.ptr.compare_exchange(null_mut(), new_ptr, AcqRel, Acquire) { + Ok(_) => unsafe { &*new_ptr }, + Err(ptr) => { + // Lost the race to another thread. + // Drop the value we created, and use the one from the other thread instead. + drop(unsafe { Box::from_raw(new_ptr) }); + unsafe { &*ptr } + } + } + } +} + +unsafe impl<T: Send> Send for OnceBox<T> {} +unsafe impl<T: Send + Sync> Sync for OnceBox<T> {} + +impl<T> Drop for OnceBox<T> { + fn drop(&mut self) { + self.take(); + } +} |
