diff options
| author | Nathan <nathan.whitaker01@gmail.com> | 2019-07-22 17:31:35 -0400 |
|---|---|---|
| committer | Nathan <nathan.whitaker01@gmail.com> | 2019-07-23 10:14:46 -0400 |
| commit | 82dd54baf3fda351abd56ee4bda9f8464da2df67 (patch) | |
| tree | 868de9600669cd44ffc32156448697bd3d937596 | |
| parent | e1e0df8a49d8c172261be1d71e33632200629cce (diff) | |
| download | rust-82dd54baf3fda351abd56ee4bda9f8464da2df67.tar.gz rust-82dd54baf3fda351abd56ee4bda9f8464da2df67.zip | |
Modify CloudABI ReentrantMutex to use MaybeUninit
Remove uses of mem::uninitialized, which is now deprecated
| -rw-r--r-- | src/libstd/sys/cloudabi/mutex.rs | 32 |
1 files changed, 18 insertions, 14 deletions
diff --git a/src/libstd/sys/cloudabi/mutex.rs b/src/libstd/sys/cloudabi/mutex.rs index 5e191e31d5f..d1203a35369 100644 --- a/src/libstd/sys/cloudabi/mutex.rs +++ b/src/libstd/sys/cloudabi/mutex.rs @@ -1,5 +1,6 @@ use crate::cell::UnsafeCell; use crate::mem; +use crate::mem::MaybeUninit; use crate::sync::atomic::{AtomicU32, Ordering}; use crate::sys::cloudabi::abi; use crate::sys::rwlock::{self, RWLock}; @@ -47,25 +48,28 @@ impl Mutex { } pub struct ReentrantMutex { - lock: UnsafeCell<AtomicU32>, - recursion: UnsafeCell<u32>, + lock: UnsafeCell<MaybeUninit<AtomicU32>>, + recursion: UnsafeCell<MaybeUninit<u32>>, } impl ReentrantMutex { pub unsafe fn uninitialized() -> ReentrantMutex { - mem::uninitialized() + ReentrantMutex { + lock: UnsafeCell::new(MaybeUninit::uninit()), + recursion: UnsafeCell::new(MaybeUninit::uninit()) + } } pub unsafe fn init(&mut self) { - self.lock = UnsafeCell::new(AtomicU32::new(abi::LOCK_UNLOCKED.0)); - self.recursion = UnsafeCell::new(0); + self.lock = UnsafeCell::new(MaybeUninit::new(AtomicU32::new(abi::LOCK_UNLOCKED.0))); + self.recursion = UnsafeCell::new(MaybeUninit::new(0)); } pub unsafe fn try_lock(&self) -> bool { // Attempt to acquire the lock. let lock = self.lock.get(); let recursion = self.recursion.get(); - if let Err(old) = (*lock).compare_exchange( + if let Err(old) = (*(*lock).as_mut_ptr()).compare_exchange( abi::LOCK_UNLOCKED.0, __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, Ordering::Acquire, @@ -74,14 +78,14 @@ impl ReentrantMutex { // If we fail to acquire the lock, it may be the case // that we've already acquired it and may need to recurse. if old & !abi::LOCK_KERNEL_MANAGED.0 == __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 { - *recursion += 1; + *(*recursion).as_mut_ptr() += 1; true } else { false } } else { // Success. - assert_eq!(*recursion, 0, "Mutex has invalid recursion count"); + assert_eq!(*(*recursion).as_mut_ptr(), 0, "Mutex has invalid recursion count"); true } } @@ -112,14 +116,14 @@ impl ReentrantMutex { let lock = self.lock.get(); let recursion = self.recursion.get(); assert_eq!( - (*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, + (*(*lock).as_mut_ptr()).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, "This mutex is locked by a different thread" ); - if *recursion > 0 { - *recursion -= 1; - } else if !(*lock) + if *(*recursion).as_mut_ptr() > 0 { + *(*recursion).as_mut_ptr() -= 1; + } else if !(*(*lock).as_mut_ptr()) .compare_exchange( __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, abi::LOCK_UNLOCKED.0, @@ -139,10 +143,10 @@ impl ReentrantMutex { let lock = self.lock.get(); let recursion = self.recursion.get(); assert_eq!( - (*lock).load(Ordering::Relaxed), + (*(*lock).as_mut_ptr()).load(Ordering::Relaxed), abi::LOCK_UNLOCKED.0, "Attempted to destroy locked mutex" ); - assert_eq!(*recursion, 0, "Recursion counter invalid"); + assert_eq!(*(*recursion).as_mut_ptr(), 0, "Recursion counter invalid"); } } |
