about summary refs log tree commit diff
path: root/library/std/src/sys/unix/locks/futex.rs
diff options
context:
space:
mode:
Diffstat (limited to 'library/std/src/sys/unix/locks/futex.rs')
-rw-r--r--library/std/src/sys/unix/locks/futex.rs98
1 files changed, 1 insertions, 97 deletions
diff --git a/library/std/src/sys/unix/locks/futex.rs b/library/std/src/sys/unix/locks/futex.rs
index b166e7c453c..7a63af1ad7c 100644
--- a/library/std/src/sys/unix/locks/futex.rs
+++ b/library/std/src/sys/unix/locks/futex.rs
@@ -1,6 +1,5 @@
-use crate::cell::UnsafeCell;
 use crate::sync::atomic::{
-    AtomicU32, AtomicUsize,
+    AtomicU32,
     Ordering::{Acquire, Relaxed, Release},
 };
 use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
@@ -163,98 +162,3 @@ impl Condvar {
         r
     }
 }
-
-/// A reentrant mutex. Used by stdout().lock() and friends.
-///
-/// The 'owner' field tracks which thread has locked the mutex.
-///
-/// We use current_thread_unique_ptr() as the thread identifier,
-/// which is just the address of a thread local variable.
-///
-/// If `owner` is set to the identifier of the current thread,
-/// we assume the mutex is already locked and instead of locking it again,
-/// we increment `lock_count`.
-///
-/// When unlocking, we decrement `lock_count`, and only unlock the mutex when
-/// it reaches zero.
-///
-/// `lock_count` is protected by the mutex and only accessed by the thread that has
-/// locked the mutex, so needs no synchronization.
-///
-/// `owner` can be checked by other threads that want to see if they already
-/// hold the lock, so needs to be atomic. If it compares equal, we're on the
-/// same thread that holds the mutex and memory access can use relaxed ordering
-/// since we're not dealing with multiple threads. If it compares unequal,
-/// synchronization is left to the mutex, making relaxed memory ordering for
-/// the `owner` field fine in all cases.
-pub struct ReentrantMutex {
-    mutex: Mutex,
-    owner: AtomicUsize,
-    lock_count: UnsafeCell<u32>,
-}
-
-unsafe impl Send for ReentrantMutex {}
-unsafe impl Sync for ReentrantMutex {}
-
-impl ReentrantMutex {
-    #[inline]
-    pub const unsafe fn uninitialized() -> Self {
-        Self { mutex: Mutex::new(), owner: AtomicUsize::new(0), lock_count: UnsafeCell::new(0) }
-    }
-
-    #[inline]
-    pub unsafe fn init(&self) {}
-
-    #[inline]
-    pub unsafe fn destroy(&self) {}
-
-    pub unsafe fn try_lock(&self) -> bool {
-        let this_thread = current_thread_unique_ptr();
-        if self.owner.load(Relaxed) == this_thread {
-            self.increment_lock_count();
-            true
-        } else if self.mutex.try_lock() {
-            self.owner.store(this_thread, Relaxed);
-            debug_assert_eq!(*self.lock_count.get(), 0);
-            *self.lock_count.get() = 1;
-            true
-        } else {
-            false
-        }
-    }
-
-    pub unsafe fn lock(&self) {
-        let this_thread = current_thread_unique_ptr();
-        if self.owner.load(Relaxed) == this_thread {
-            self.increment_lock_count();
-        } else {
-            self.mutex.lock();
-            self.owner.store(this_thread, Relaxed);
-            debug_assert_eq!(*self.lock_count.get(), 0);
-            *self.lock_count.get() = 1;
-        }
-    }
-
-    unsafe fn increment_lock_count(&self) {
-        *self.lock_count.get() = (*self.lock_count.get())
-            .checked_add(1)
-            .expect("lock count overflow in reentrant mutex");
-    }
-
-    pub unsafe fn unlock(&self) {
-        *self.lock_count.get() -= 1;
-        if *self.lock_count.get() == 0 {
-            self.owner.store(0, Relaxed);
-            self.mutex.unlock();
-        }
-    }
-}
-
-/// Get an address that is unique per running thread.
-///
-/// This can be used as a non-null usize-sized ID.
-pub fn current_thread_unique_ptr() -> usize {
-    // Use a non-drop type to make sure it's still available during thread destruction.
-    thread_local! { static X: u8 = const { 0 } }
-    X.with(|x| <*const _>::addr(x))
-}