about summary refs log tree commit diff
path: root/library/std/src/sys/wasm/atomics/mutex.rs
diff options
context:
space:
mode:
Diffstat (limited to 'library/std/src/sys/wasm/atomics/mutex.rs')
-rw-r--r--library/std/src/sys/wasm/atomics/mutex.rs94
1 files changed, 1 insertions, 93 deletions
diff --git a/library/std/src/sys/wasm/atomics/mutex.rs b/library/std/src/sys/wasm/atomics/mutex.rs
index 3a09f0bf9bb..1acc8392444 100644
--- a/library/std/src/sys/wasm/atomics/mutex.rs
+++ b/library/std/src/sys/wasm/atomics/mutex.rs
@@ -1,8 +1,6 @@
 use crate::arch::wasm32;
-use crate::cell::UnsafeCell;
 use crate::mem;
-use crate::sync::atomic::{AtomicU32, AtomicUsize, Ordering::SeqCst};
-use crate::sys::thread;
+use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst};
 
 pub struct Mutex {
     locked: AtomicUsize,
@@ -64,93 +62,3 @@ impl Mutex {
         self.locked.as_mut_ptr() as *mut i32
     }
 }
-
-pub struct ReentrantMutex {
-    owner: AtomicU32,
-    recursions: UnsafeCell<u32>,
-}
-
-unsafe impl Send for ReentrantMutex {}
-unsafe impl Sync for ReentrantMutex {}
-
-// Reentrant mutexes are similarly implemented to mutexes above except that
-// instead of "1" meaning unlocked we use the id of a thread to represent
-// whether it has locked a mutex. That way we have an atomic counter which
-// always holds the id of the thread that currently holds the lock (or 0 if the
-// lock is unlocked).
-//
-// Once a thread acquires a lock recursively, which it detects by looking at
-// the value that's already there, it will update a local `recursions` counter
-// in a nonatomic fashion (as we hold the lock). The lock is then fully
-// released when this recursion counter reaches 0.
-
-impl ReentrantMutex {
-    pub const unsafe fn uninitialized() -> ReentrantMutex {
-        ReentrantMutex { owner: AtomicU32::new(0), recursions: UnsafeCell::new(0) }
-    }
-
-    pub unsafe fn init(&self) {
-        // nothing to do...
-    }
-
-    pub unsafe fn lock(&self) {
-        let me = thread::my_id();
-        while let Err(owner) = self._try_lock(me) {
-            // SAFETY: the caller must guarantee that `self.ptr()` and `owner` are valid i32.
-            let val = unsafe { wasm32::memory_atomic_wait32(self.ptr(), owner as i32, -1) };
-            debug_assert!(val == 0 || val == 1);
-        }
-    }
-
-    #[inline]
-    pub unsafe fn try_lock(&self) -> bool {
-        unsafe { self._try_lock(thread::my_id()).is_ok() }
-    }
-
-    #[inline]
-    unsafe fn _try_lock(&self, id: u32) -> Result<(), u32> {
-        let id = id.checked_add(1).unwrap();
-        match self.owner.compare_exchange(0, id, SeqCst, SeqCst) {
-            // we transitioned from unlocked to locked
-            Ok(_) => {
-                debug_assert_eq!(*self.recursions.get(), 0);
-                Ok(())
-            }
-
-            // we currently own this lock, so let's update our count and return
-            // true.
-            Err(n) if n == id => {
-                *self.recursions.get() += 1;
-                Ok(())
-            }
-
-            // Someone else owns the lock, let our caller take care of it
-            Err(other) => Err(other),
-        }
-    }
-
-    pub unsafe fn unlock(&self) {
-        // If we didn't ever recursively lock the lock then we fully unlock the
-        // mutex and wake up a waiter, if any. Otherwise we decrement our
-        // recursive counter and let some one else take care of the zero.
-        match *self.recursions.get() {
-            0 => {
-                self.owner.swap(0, SeqCst);
-                // SAFETY: the caller must guarantee that `self.ptr()` is valid i32.
-                unsafe {
-                    wasm32::memory_atomic_notify(self.ptr() as *mut i32, 1);
-                } // wake up one waiter, if any
-            }
-            ref mut n => *n -= 1,
-        }
-    }
-
-    pub unsafe fn destroy(&self) {
-        // nothing to do...
-    }
-
-    #[inline]
-    fn ptr(&self) -> *mut i32 {
-        self.owner.as_mut_ptr() as *mut i32
-    }
-}