about summary refs log tree commit diff
path: root/library/std/src/sys/pal/unix
diff options
context:
space:
mode:
Diffstat (limited to 'library/std/src/sys/pal/unix')
-rw-r--r--library/std/src/sys/pal/unix/locks/fuchsia_mutex.rs164
-rw-r--r--library/std/src/sys/pal/unix/locks/futex_condvar.rs56
-rw-r--r--library/std/src/sys/pal/unix/locks/futex_mutex.rs96
-rw-r--r--library/std/src/sys/pal/unix/locks/futex_rwlock.rs320
-rw-r--r--library/std/src/sys/pal/unix/locks/mod.rs31
-rw-r--r--library/std/src/sys/pal/unix/locks/pthread_condvar.rs206
-rw-r--r--library/std/src/sys/pal/unix/locks/pthread_mutex.rs148
-rw-r--r--library/std/src/sys/pal/unix/locks/queue_rwlock.rs557
-rw-r--r--library/std/src/sys/pal/unix/mod.rs1
9 files changed, 0 insertions, 1579 deletions
diff --git a/library/std/src/sys/pal/unix/locks/fuchsia_mutex.rs b/library/std/src/sys/pal/unix/locks/fuchsia_mutex.rs
deleted file mode 100644
index 5d89e5a13fd..00000000000
--- a/library/std/src/sys/pal/unix/locks/fuchsia_mutex.rs
+++ /dev/null
@@ -1,164 +0,0 @@
-//! A priority inheriting mutex for Fuchsia.
-//!
-//! This is a port of the [mutex in Fuchsia's libsync]. Contrary to the original,
-//! it does not abort the process when reentrant locking is detected, but deadlocks.
-//!
-//! Priority inheritance is achieved by storing the owning thread's handle in an
-//! atomic variable. Fuchsia's futex operations support setting an owner thread
-//! for a futex, which can boost that thread's priority while the futex is waited
-//! upon.
-//!
-//! libsync is licenced under the following BSD-style licence:
-//!
-//! Copyright 2016 The Fuchsia Authors.
-//!
-//! Redistribution and use in source and binary forms, with or without
-//! modification, are permitted provided that the following conditions are
-//! met:
-//!
-//!    * Redistributions of source code must retain the above copyright
-//!      notice, this list of conditions and the following disclaimer.
-//!    * Redistributions in binary form must reproduce the above
-//!      copyright notice, this list of conditions and the following
-//!      disclaimer in the documentation and/or other materials provided
-//!      with the distribution.
-//!
-//! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-//! "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-//! LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-//! A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-//! OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-//! SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-//! LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-//! DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-//! THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-//! (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-//! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//!
-//! [mutex in Fuchsia's libsync]: https://cs.opensource.google/fuchsia/fuchsia/+/main:zircon/system/ulib/sync/mutex.c
-
-use crate::sync::atomic::{
-    AtomicU32,
-    Ordering::{Acquire, Relaxed, Release},
-};
-use crate::sys::futex::zircon::{
-    zx_futex_wait, zx_futex_wake_single_owner, zx_handle_t, zx_thread_self, ZX_ERR_BAD_HANDLE,
-    ZX_ERR_BAD_STATE, ZX_ERR_INVALID_ARGS, ZX_ERR_TIMED_OUT, ZX_ERR_WRONG_TYPE, ZX_OK,
-    ZX_TIME_INFINITE,
-};
-
-// The lowest two bits of a `zx_handle_t` are always set, so the lowest bit is used to mark the
-// mutex as contested by clearing it.
-const CONTESTED_BIT: u32 = 1;
-// This can never be a valid `zx_handle_t`.
-const UNLOCKED: u32 = 0;
-
-pub struct Mutex {
-    futex: AtomicU32,
-}
-
-#[inline]
-fn to_state(owner: zx_handle_t) -> u32 {
-    owner
-}
-
-#[inline]
-fn to_owner(state: u32) -> zx_handle_t {
-    state | CONTESTED_BIT
-}
-
-#[inline]
-fn is_contested(state: u32) -> bool {
-    state & CONTESTED_BIT == 0
-}
-
-#[inline]
-fn mark_contested(state: u32) -> u32 {
-    state & !CONTESTED_BIT
-}
-
-impl Mutex {
-    #[inline]
-    pub const fn new() -> Mutex {
-        Mutex { futex: AtomicU32::new(UNLOCKED) }
-    }
-
-    #[inline]
-    pub fn try_lock(&self) -> bool {
-        let thread_self = unsafe { zx_thread_self() };
-        self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed).is_ok()
-    }
-
-    #[inline]
-    pub fn lock(&self) {
-        let thread_self = unsafe { zx_thread_self() };
-        if let Err(state) =
-            self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed)
-        {
-            unsafe {
-                self.lock_contested(state, thread_self);
-            }
-        }
-    }
-
-    /// # Safety
-    /// `thread_self` must be the handle for the current thread.
-    #[cold]
-    unsafe fn lock_contested(&self, mut state: u32, thread_self: zx_handle_t) {
-        let owned_state = mark_contested(to_state(thread_self));
-        loop {
-            // Mark the mutex as contested if it is not already.
-            let contested = mark_contested(state);
-            if is_contested(state)
-                || self.futex.compare_exchange(state, contested, Relaxed, Relaxed).is_ok()
-            {
-                // The mutex has been marked as contested, wait for the state to change.
-                unsafe {
-                    match zx_futex_wait(
-                        &self.futex,
-                        AtomicU32::new(contested),
-                        to_owner(state),
-                        ZX_TIME_INFINITE,
-                    ) {
-                        ZX_OK | ZX_ERR_BAD_STATE | ZX_ERR_TIMED_OUT => (),
-                        // Note that if a thread handle is reused after its associated thread
-                        // exits without unlocking the mutex, an arbitrary thread's priority
-                        // could be boosted by the wait, but there is currently no way to
-                        // prevent that.
-                        ZX_ERR_INVALID_ARGS | ZX_ERR_BAD_HANDLE | ZX_ERR_WRONG_TYPE => {
-                            panic!(
-                                "either the current thread is trying to lock a mutex it has
-                                already locked, or the previous owner did not unlock the mutex
-                                before exiting"
-                            )
-                        }
-                        error => panic!("unexpected error in zx_futex_wait: {error}"),
-                    }
-                }
-            }
-
-            // The state has changed or a wakeup occurred, try to lock the mutex.
-            match self.futex.compare_exchange(UNLOCKED, owned_state, Acquire, Relaxed) {
-                Ok(_) => return,
-                Err(updated) => state = updated,
-            }
-        }
-    }
-
-    #[inline]
-    pub unsafe fn unlock(&self) {
-        if is_contested(self.futex.swap(UNLOCKED, Release)) {
-            // The woken thread will mark the mutex as contested again,
-            // and return here, waking until there are no waiters left,
-            // in which case this is a noop.
-            self.wake();
-        }
-    }
-
-    #[cold]
-    fn wake(&self) {
-        unsafe {
-            zx_futex_wake_single_owner(&self.futex);
-        }
-    }
-}
diff --git a/library/std/src/sys/pal/unix/locks/futex_condvar.rs b/library/std/src/sys/pal/unix/locks/futex_condvar.rs
deleted file mode 100644
index 4bd65dd25c2..00000000000
--- a/library/std/src/sys/pal/unix/locks/futex_condvar.rs
+++ /dev/null
@@ -1,56 +0,0 @@
-use super::Mutex;
-use crate::sync::atomic::{AtomicU32, Ordering::Relaxed};
-use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
-use crate::time::Duration;
-
-pub struct Condvar {
-    // The value of this atomic is simply incremented on every notification.
-    // This is used by `.wait()` to not miss any notifications after
-    // unlocking the mutex and before waiting for notifications.
-    futex: AtomicU32,
-}
-
-impl Condvar {
-    #[inline]
-    pub const fn new() -> Self {
-        Self { futex: AtomicU32::new(0) }
-    }
-
-    // All the memory orderings here are `Relaxed`,
-    // because synchronization is done by unlocking and locking the mutex.
-
-    pub fn notify_one(&self) {
-        self.futex.fetch_add(1, Relaxed);
-        futex_wake(&self.futex);
-    }
-
-    pub fn notify_all(&self) {
-        self.futex.fetch_add(1, Relaxed);
-        futex_wake_all(&self.futex);
-    }
-
-    pub unsafe fn wait(&self, mutex: &Mutex) {
-        self.wait_optional_timeout(mutex, None);
-    }
-
-    pub unsafe fn wait_timeout(&self, mutex: &Mutex, timeout: Duration) -> bool {
-        self.wait_optional_timeout(mutex, Some(timeout))
-    }
-
-    unsafe fn wait_optional_timeout(&self, mutex: &Mutex, timeout: Option<Duration>) -> bool {
-        // Examine the notification counter _before_ we unlock the mutex.
-        let futex_value = self.futex.load(Relaxed);
-
-        // Unlock the mutex before going to sleep.
-        mutex.unlock();
-
-        // Wait, but only if there hasn't been any
-        // notification since we unlocked the mutex.
-        let r = futex_wait(&self.futex, futex_value, timeout);
-
-        // Lock the mutex again.
-        mutex.lock();
-
-        r
-    }
-}
diff --git a/library/std/src/sys/pal/unix/locks/futex_mutex.rs b/library/std/src/sys/pal/unix/locks/futex_mutex.rs
deleted file mode 100644
index c01229586c3..00000000000
--- a/library/std/src/sys/pal/unix/locks/futex_mutex.rs
+++ /dev/null
@@ -1,96 +0,0 @@
-use crate::sync::atomic::{
-    AtomicU32,
-    Ordering::{Acquire, Relaxed, Release},
-};
-use crate::sys::futex::{futex_wait, futex_wake};
-
-pub struct Mutex {
-    /// 0: unlocked
-    /// 1: locked, no other threads waiting
-    /// 2: locked, and other threads waiting (contended)
-    futex: AtomicU32,
-}
-
-impl Mutex {
-    #[inline]
-    pub const fn new() -> Self {
-        Self { futex: AtomicU32::new(0) }
-    }
-
-    #[inline]
-    pub fn try_lock(&self) -> bool {
-        self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_ok()
-    }
-
-    #[inline]
-    pub fn lock(&self) {
-        if self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_err() {
-            self.lock_contended();
-        }
-    }
-
-    #[cold]
-    fn lock_contended(&self) {
-        // Spin first to speed things up if the lock is released quickly.
-        let mut state = self.spin();
-
-        // If it's unlocked now, attempt to take the lock
-        // without marking it as contended.
-        if state == 0 {
-            match self.futex.compare_exchange(0, 1, Acquire, Relaxed) {
-                Ok(_) => return, // Locked!
-                Err(s) => state = s,
-            }
-        }
-
-        loop {
-            // Put the lock in contended state.
-            // We avoid an unnecessary write if it as already set to 2,
-            // to be friendlier for the caches.
-            if state != 2 && self.futex.swap(2, Acquire) == 0 {
-                // We changed it from 0 to 2, so we just successfully locked it.
-                return;
-            }
-
-            // Wait for the futex to change state, assuming it is still 2.
-            futex_wait(&self.futex, 2, None);
-
-            // Spin again after waking up.
-            state = self.spin();
-        }
-    }
-
-    fn spin(&self) -> u32 {
-        let mut spin = 100;
-        loop {
-            // We only use `load` (and not `swap` or `compare_exchange`)
-            // while spinning, to be easier on the caches.
-            let state = self.futex.load(Relaxed);
-
-            // We stop spinning when the mutex is unlocked (0),
-            // but also when it's contended (2).
-            if state != 1 || spin == 0 {
-                return state;
-            }
-
-            crate::hint::spin_loop();
-            spin -= 1;
-        }
-    }
-
-    #[inline]
-    pub unsafe fn unlock(&self) {
-        if self.futex.swap(0, Release) == 2 {
-            // We only wake up one thread. When that thread locks the mutex, it
-            // will mark the mutex as contended (2) (see lock_contended above),
-            // which makes sure that any other waiting threads will also be
-            // woken up eventually.
-            self.wake();
-        }
-    }
-
-    #[cold]
-    fn wake(&self) {
-        futex_wake(&self.futex);
-    }
-}
diff --git a/library/std/src/sys/pal/unix/locks/futex_rwlock.rs b/library/std/src/sys/pal/unix/locks/futex_rwlock.rs
deleted file mode 100644
index aa0de900238..00000000000
--- a/library/std/src/sys/pal/unix/locks/futex_rwlock.rs
+++ /dev/null
@@ -1,320 +0,0 @@
-use crate::sync::atomic::{
-    AtomicU32,
-    Ordering::{Acquire, Relaxed, Release},
-};
-use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
-
-pub struct RwLock {
-    // The state consists of a 30-bit reader counter, a 'readers waiting' flag, and a 'writers waiting' flag.
-    // Bits 0..30:
-    //   0: Unlocked
-    //   1..=0x3FFF_FFFE: Locked by N readers
-    //   0x3FFF_FFFF: Write locked
-    // Bit 30: Readers are waiting on this futex.
-    // Bit 31: Writers are waiting on the writer_notify futex.
-    state: AtomicU32,
-    // The 'condition variable' to notify writers through.
-    // Incremented on every signal.
-    writer_notify: AtomicU32,
-}
-
-const READ_LOCKED: u32 = 1;
-const MASK: u32 = (1 << 30) - 1;
-const WRITE_LOCKED: u32 = MASK;
-const MAX_READERS: u32 = MASK - 1;
-const READERS_WAITING: u32 = 1 << 30;
-const WRITERS_WAITING: u32 = 1 << 31;
-
-#[inline]
-fn is_unlocked(state: u32) -> bool {
-    state & MASK == 0
-}
-
-#[inline]
-fn is_write_locked(state: u32) -> bool {
-    state & MASK == WRITE_LOCKED
-}
-
-#[inline]
-fn has_readers_waiting(state: u32) -> bool {
-    state & READERS_WAITING != 0
-}
-
-#[inline]
-fn has_writers_waiting(state: u32) -> bool {
-    state & WRITERS_WAITING != 0
-}
-
-#[inline]
-fn is_read_lockable(state: u32) -> bool {
-    // This also returns false if the counter could overflow if we tried to read lock it.
-    //
-    // We don't allow read-locking if there's readers waiting, even if the lock is unlocked
-    // and there's no writers waiting. The only situation when this happens is after unlocking,
-    // at which point the unlocking thread might be waking up writers, which have priority over readers.
-    // The unlocking thread will clear the readers waiting bit and wake up readers, if necessary.
-    state & MASK < MAX_READERS && !has_readers_waiting(state) && !has_writers_waiting(state)
-}
-
-#[inline]
-fn has_reached_max_readers(state: u32) -> bool {
-    state & MASK == MAX_READERS
-}
-
-impl RwLock {
-    #[inline]
-    pub const fn new() -> Self {
-        Self { state: AtomicU32::new(0), writer_notify: AtomicU32::new(0) }
-    }
-
-    #[inline]
-    pub fn try_read(&self) -> bool {
-        self.state
-            .fetch_update(Acquire, Relaxed, |s| is_read_lockable(s).then(|| s + READ_LOCKED))
-            .is_ok()
-    }
-
-    #[inline]
-    pub fn read(&self) {
-        let state = self.state.load(Relaxed);
-        if !is_read_lockable(state)
-            || self
-                .state
-                .compare_exchange_weak(state, state + READ_LOCKED, Acquire, Relaxed)
-                .is_err()
-        {
-            self.read_contended();
-        }
-    }
-
-    #[inline]
-    pub unsafe fn read_unlock(&self) {
-        let state = self.state.fetch_sub(READ_LOCKED, Release) - READ_LOCKED;
-
-        // It's impossible for a reader to be waiting on a read-locked RwLock,
-        // except if there is also a writer waiting.
-        debug_assert!(!has_readers_waiting(state) || has_writers_waiting(state));
-
-        // Wake up a writer if we were the last reader and there's a writer waiting.
-        if is_unlocked(state) && has_writers_waiting(state) {
-            self.wake_writer_or_readers(state);
-        }
-    }
-
-    #[cold]
-    fn read_contended(&self) {
-        let mut state = self.spin_read();
-
-        loop {
-            // If we can lock it, lock it.
-            if is_read_lockable(state) {
-                match self.state.compare_exchange_weak(state, state + READ_LOCKED, Acquire, Relaxed)
-                {
-                    Ok(_) => return, // Locked!
-                    Err(s) => {
-                        state = s;
-                        continue;
-                    }
-                }
-            }
-
-            // Check for overflow.
-            if has_reached_max_readers(state) {
-                panic!("too many active read locks on RwLock");
-            }
-
-            // Make sure the readers waiting bit is set before we go to sleep.
-            if !has_readers_waiting(state) {
-                if let Err(s) =
-                    self.state.compare_exchange(state, state | READERS_WAITING, Relaxed, Relaxed)
-                {
-                    state = s;
-                    continue;
-                }
-            }
-
-            // Wait for the state to change.
-            futex_wait(&self.state, state | READERS_WAITING, None);
-
-            // Spin again after waking up.
-            state = self.spin_read();
-        }
-    }
-
-    #[inline]
-    pub fn try_write(&self) -> bool {
-        self.state
-            .fetch_update(Acquire, Relaxed, |s| is_unlocked(s).then(|| s + WRITE_LOCKED))
-            .is_ok()
-    }
-
-    #[inline]
-    pub fn write(&self) {
-        if self.state.compare_exchange_weak(0, WRITE_LOCKED, Acquire, Relaxed).is_err() {
-            self.write_contended();
-        }
-    }
-
-    #[inline]
-    pub unsafe fn write_unlock(&self) {
-        let state = self.state.fetch_sub(WRITE_LOCKED, Release) - WRITE_LOCKED;
-
-        debug_assert!(is_unlocked(state));
-
-        if has_writers_waiting(state) || has_readers_waiting(state) {
-            self.wake_writer_or_readers(state);
-        }
-    }
-
-    #[cold]
-    fn write_contended(&self) {
-        let mut state = self.spin_write();
-
-        let mut other_writers_waiting = 0;
-
-        loop {
-            // If it's unlocked, we try to lock it.
-            if is_unlocked(state) {
-                match self.state.compare_exchange_weak(
-                    state,
-                    state | WRITE_LOCKED | other_writers_waiting,
-                    Acquire,
-                    Relaxed,
-                ) {
-                    Ok(_) => return, // Locked!
-                    Err(s) => {
-                        state = s;
-                        continue;
-                    }
-                }
-            }
-
-            // Set the waiting bit indicating that we're waiting on it.
-            if !has_writers_waiting(state) {
-                if let Err(s) =
-                    self.state.compare_exchange(state, state | WRITERS_WAITING, Relaxed, Relaxed)
-                {
-                    state = s;
-                    continue;
-                }
-            }
-
-            // Other writers might be waiting now too, so we should make sure
-            // we keep that bit on once we manage lock it.
-            other_writers_waiting = WRITERS_WAITING;
-
-            // Examine the notification counter before we check if `state` has changed,
-            // to make sure we don't miss any notifications.
-            let seq = self.writer_notify.load(Acquire);
-
-            // Don't go to sleep if the lock has become available,
-            // or if the writers waiting bit is no longer set.
-            state = self.state.load(Relaxed);
-            if is_unlocked(state) || !has_writers_waiting(state) {
-                continue;
-            }
-
-            // Wait for the state to change.
-            futex_wait(&self.writer_notify, seq, None);
-
-            // Spin again after waking up.
-            state = self.spin_write();
-        }
-    }
-
-    /// Wake up waiting threads after unlocking.
-    ///
-    /// If both are waiting, this will wake up only one writer, but will fall
-    /// back to waking up readers if there was no writer to wake up.
-    #[cold]
-    fn wake_writer_or_readers(&self, mut state: u32) {
-        assert!(is_unlocked(state));
-
-        // The readers waiting bit might be turned on at any point now,
-        // since readers will block when there's anything waiting.
-        // Writers will just lock the lock though, regardless of the waiting bits,
-        // so we don't have to worry about the writer waiting bit.
-        //
-        // If the lock gets locked in the meantime, we don't have to do
-        // anything, because then the thread that locked the lock will take
-        // care of waking up waiters when it unlocks.
-
-        // If only writers are waiting, wake one of them up.
-        if state == WRITERS_WAITING {
-            match self.state.compare_exchange(state, 0, Relaxed, Relaxed) {
-                Ok(_) => {
-                    self.wake_writer();
-                    return;
-                }
-                Err(s) => {
-                    // Maybe some readers are now waiting too. So, continue to the next `if`.
-                    state = s;
-                }
-            }
-        }
-
-        // If both writers and readers are waiting, leave the readers waiting
-        // and only wake up one writer.
-        if state == READERS_WAITING + WRITERS_WAITING {
-            if self.state.compare_exchange(state, READERS_WAITING, Relaxed, Relaxed).is_err() {
-                // The lock got locked. Not our problem anymore.
-                return;
-            }
-            if self.wake_writer() {
-                return;
-            }
-            // No writers were actually blocked on futex_wait, so we continue
-            // to wake up readers instead, since we can't be sure if we notified a writer.
-            state = READERS_WAITING;
-        }
-
-        // If readers are waiting, wake them all up.
-        if state == READERS_WAITING {
-            if self.state.compare_exchange(state, 0, Relaxed, Relaxed).is_ok() {
-                futex_wake_all(&self.state);
-            }
-        }
-    }
-
-    /// This wakes one writer and returns true if we woke up a writer that was
-    /// blocked on futex_wait.
-    ///
-    /// If this returns false, it might still be the case that we notified a
-    /// writer that was about to go to sleep.
-    fn wake_writer(&self) -> bool {
-        self.writer_notify.fetch_add(1, Release);
-        futex_wake(&self.writer_notify)
-        // Note that FreeBSD and DragonFlyBSD don't tell us whether they woke
-        // up any threads or not, and always return `false` here. That still
-        // results in correct behaviour: it just means readers get woken up as
-        // well in case both readers and writers were waiting.
-    }
-
-    /// Spin for a while, but stop directly at the given condition.
-    #[inline]
-    fn spin_until(&self, f: impl Fn(u32) -> bool) -> u32 {
-        let mut spin = 100; // Chosen by fair dice roll.
-        loop {
-            let state = self.state.load(Relaxed);
-            if f(state) || spin == 0 {
-                return state;
-            }
-            crate::hint::spin_loop();
-            spin -= 1;
-        }
-    }
-
-    #[inline]
-    fn spin_write(&self) -> u32 {
-        // Stop spinning when it's unlocked or when there's waiting writers, to keep things somewhat fair.
-        self.spin_until(|state| is_unlocked(state) || has_writers_waiting(state))
-    }
-
-    #[inline]
-    fn spin_read(&self) -> u32 {
-        // Stop spinning when it's unlocked or read locked, or when there's waiting threads.
-        self.spin_until(|state| {
-            !is_write_locked(state) || has_readers_waiting(state) || has_writers_waiting(state)
-        })
-    }
-}
diff --git a/library/std/src/sys/pal/unix/locks/mod.rs b/library/std/src/sys/pal/unix/locks/mod.rs
deleted file mode 100644
index a49247310b5..00000000000
--- a/library/std/src/sys/pal/unix/locks/mod.rs
+++ /dev/null
@@ -1,31 +0,0 @@
-cfg_if::cfg_if! {
-    if #[cfg(any(
-        target_os = "linux",
-        target_os = "android",
-        all(target_os = "emscripten", target_feature = "atomics"),
-        target_os = "freebsd",
-        target_os = "openbsd",
-        target_os = "dragonfly",
-    ))] {
-        mod futex_mutex;
-        mod futex_rwlock;
-        mod futex_condvar;
-        pub(crate) use futex_mutex::Mutex;
-        pub(crate) use futex_rwlock::RwLock;
-        pub(crate) use futex_condvar::Condvar;
-    } else if #[cfg(target_os = "fuchsia")] {
-        mod fuchsia_mutex;
-        mod futex_rwlock;
-        mod futex_condvar;
-        pub(crate) use fuchsia_mutex::Mutex;
-        pub(crate) use futex_rwlock::RwLock;
-        pub(crate) use futex_condvar::Condvar;
-    } else {
-        mod pthread_mutex;
-        mod pthread_condvar;
-        mod queue_rwlock;
-        pub(crate) use pthread_mutex::Mutex;
-        pub(crate) use queue_rwlock::RwLock;
-        pub(crate) use pthread_condvar::Condvar;
-    }
-}
diff --git a/library/std/src/sys/pal/unix/locks/pthread_condvar.rs b/library/std/src/sys/pal/unix/locks/pthread_condvar.rs
deleted file mode 100644
index 2dc1b0c601e..00000000000
--- a/library/std/src/sys/pal/unix/locks/pthread_condvar.rs
+++ /dev/null
@@ -1,206 +0,0 @@
-use crate::cell::UnsafeCell;
-use crate::ptr;
-use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed};
-use crate::sys::locks::{pthread_mutex, Mutex};
-#[cfg(not(target_os = "nto"))]
-use crate::sys::time::TIMESPEC_MAX;
-#[cfg(target_os = "nto")]
-use crate::sys::time::TIMESPEC_MAX_CAPPED;
-use crate::sys_common::lazy_box::{LazyBox, LazyInit};
-use crate::time::Duration;
-
-struct AllocatedCondvar(UnsafeCell<libc::pthread_cond_t>);
-
-pub struct Condvar {
-    inner: LazyBox<AllocatedCondvar>,
-    mutex: AtomicPtr<libc::pthread_mutex_t>,
-}
-
-#[inline]
-fn raw(c: &Condvar) -> *mut libc::pthread_cond_t {
-    c.inner.0.get()
-}
-
-unsafe impl Send for AllocatedCondvar {}
-unsafe impl Sync for AllocatedCondvar {}
-
-impl LazyInit for AllocatedCondvar {
-    fn init() -> Box<Self> {
-        let condvar = Box::new(AllocatedCondvar(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER)));
-
-        cfg_if::cfg_if! {
-            if #[cfg(any(
-                target_os = "macos",
-                target_os = "ios",
-                target_os = "tvos",
-                target_os = "watchos",
-                target_os = "l4re",
-                target_os = "android",
-                target_os = "redox"
-            ))] {
-                // `pthread_condattr_setclock` is unfortunately not supported on these platforms.
-            } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] {
-                // NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet
-                // So on that platform, init() should always be called
-                // Moreover, that platform does not have pthread_condattr_setclock support,
-                // hence that initialization should be skipped as well
-                //
-                // Similar story for the 3DS (horizon).
-                let r = unsafe { libc::pthread_cond_init(condvar.0.get(), crate::ptr::null()) };
-                assert_eq!(r, 0);
-            } else {
-                use crate::mem::MaybeUninit;
-                let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
-                let r = unsafe { libc::pthread_condattr_init(attr.as_mut_ptr()) };
-                assert_eq!(r, 0);
-                let r = unsafe { libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC) };
-                assert_eq!(r, 0);
-                let r = unsafe { libc::pthread_cond_init(condvar.0.get(), attr.as_ptr()) };
-                assert_eq!(r, 0);
-                let r = unsafe { libc::pthread_condattr_destroy(attr.as_mut_ptr()) };
-                assert_eq!(r, 0);
-            }
-        }
-
-        condvar
-    }
-}
-
-impl Drop for AllocatedCondvar {
-    #[inline]
-    fn drop(&mut self) {
-        let r = unsafe { libc::pthread_cond_destroy(self.0.get()) };
-        if cfg!(target_os = "dragonfly") {
-            // On DragonFly pthread_cond_destroy() returns EINVAL if called on
-            // a condvar that was just initialized with
-            // libc::PTHREAD_COND_INITIALIZER. Once it is used or
-            // pthread_cond_init() is called, this behaviour no longer occurs.
-            debug_assert!(r == 0 || r == libc::EINVAL);
-        } else {
-            debug_assert_eq!(r, 0);
-        }
-    }
-}
-
-impl Condvar {
-    pub const fn new() -> Condvar {
-        Condvar { inner: LazyBox::new(), mutex: AtomicPtr::new(ptr::null_mut()) }
-    }
-
-    #[inline]
-    fn verify(&self, mutex: *mut libc::pthread_mutex_t) {
-        // Relaxed is okay here because we never read through `self.addr`, and only use it to
-        // compare addresses.
-        match self.mutex.compare_exchange(ptr::null_mut(), mutex, Relaxed, Relaxed) {
-            Ok(_) => {}                // Stored the address
-            Err(n) if n == mutex => {} // Lost a race to store the same address
-            _ => panic!("attempted to use a condition variable with two mutexes"),
-        }
-    }
-
-    #[inline]
-    pub fn notify_one(&self) {
-        let r = unsafe { libc::pthread_cond_signal(raw(self)) };
-        debug_assert_eq!(r, 0);
-    }
-
-    #[inline]
-    pub fn notify_all(&self) {
-        let r = unsafe { libc::pthread_cond_broadcast(raw(self)) };
-        debug_assert_eq!(r, 0);
-    }
-
-    #[inline]
-    pub unsafe fn wait(&self, mutex: &Mutex) {
-        let mutex = pthread_mutex::raw(mutex);
-        self.verify(mutex);
-        let r = libc::pthread_cond_wait(raw(self), mutex);
-        debug_assert_eq!(r, 0);
-    }
-
-    // This implementation is used on systems that support pthread_condattr_setclock
-    // where we configure condition variable to use monotonic clock (instead of
-    // default system clock). This approach avoids all problems that result
-    // from changes made to the system time.
-    #[cfg(not(any(
-        target_os = "macos",
-        target_os = "ios",
-        target_os = "tvos",
-        target_os = "watchos",
-        target_os = "android",
-        target_os = "espidf",
-        target_os = "horizon"
-    )))]
-    pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
-        use crate::sys::time::Timespec;
-
-        let mutex = pthread_mutex::raw(mutex);
-        self.verify(mutex);
-
-        #[cfg(not(target_os = "nto"))]
-        let timeout = Timespec::now(libc::CLOCK_MONOTONIC)
-            .checked_add_duration(&dur)
-            .and_then(|t| t.to_timespec())
-            .unwrap_or(TIMESPEC_MAX);
-
-        #[cfg(target_os = "nto")]
-        let timeout = Timespec::now(libc::CLOCK_MONOTONIC)
-            .checked_add_duration(&dur)
-            .and_then(|t| t.to_timespec_capped())
-            .unwrap_or(TIMESPEC_MAX_CAPPED);
-
-        let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout);
-        assert!(r == libc::ETIMEDOUT || r == 0);
-        r == 0
-    }
-
-    // This implementation is modeled after libcxx's condition_variable
-    // https://github.com/llvm-mirror/libcxx/blob/release_35/src/condition_variable.cpp#L46
-    // https://github.com/llvm-mirror/libcxx/blob/release_35/include/__mutex_base#L367
-    #[cfg(any(
-        target_os = "macos",
-        target_os = "ios",
-        target_os = "tvos",
-        target_os = "watchos",
-        target_os = "android",
-        target_os = "espidf",
-        target_os = "horizon"
-    ))]
-    pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
-        use crate::sys::time::SystemTime;
-        use crate::time::Instant;
-
-        let mutex = pthread_mutex::raw(mutex);
-        self.verify(mutex);
-
-        // OSX implementation of `pthread_cond_timedwait` is buggy
-        // with super long durations. When duration is greater than
-        // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait`
-        // in macOS Sierra returns error 316.
-        //
-        // This program demonstrates the issue:
-        // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c
-        //
-        // To work around this issue, and possible bugs of other OSes, timeout
-        // is clamped to 1000 years, which is allowable per the API of `wait_timeout`
-        // because of spurious wakeups.
-        let dur = Duration::min(dur, Duration::from_secs(1000 * 365 * 86400));
-
-        // pthread_cond_timedwait uses system time, but we want to report timeout
-        // based on stable time.
-        let now = Instant::now();
-
-        let timeout = SystemTime::now()
-            .t
-            .checked_add_duration(&dur)
-            .and_then(|t| t.to_timespec())
-            .unwrap_or(TIMESPEC_MAX);
-
-        let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout);
-        debug_assert!(r == libc::ETIMEDOUT || r == 0);
-
-        // ETIMEDOUT is not a totally reliable method of determining timeout due
-        // to clock shifts, so do the check ourselves
-        now.elapsed() < dur
-    }
-}
diff --git a/library/std/src/sys/pal/unix/locks/pthread_mutex.rs b/library/std/src/sys/pal/unix/locks/pthread_mutex.rs
deleted file mode 100644
index ee0794334fb..00000000000
--- a/library/std/src/sys/pal/unix/locks/pthread_mutex.rs
+++ /dev/null
@@ -1,148 +0,0 @@
-use crate::cell::UnsafeCell;
-use crate::io::Error;
-use crate::mem::{forget, MaybeUninit};
-use crate::sys::cvt_nz;
-use crate::sys_common::lazy_box::{LazyBox, LazyInit};
-
-struct AllocatedMutex(UnsafeCell<libc::pthread_mutex_t>);
-
-pub struct Mutex {
-    inner: LazyBox<AllocatedMutex>,
-}
-
-#[inline]
-pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t {
-    m.inner.0.get()
-}
-
-unsafe impl Send for AllocatedMutex {}
-unsafe impl Sync for AllocatedMutex {}
-
-impl LazyInit for AllocatedMutex {
-    fn init() -> Box<Self> {
-        let mutex = Box::new(AllocatedMutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)));
-
-        // Issue #33770
-        //
-        // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
-        // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you
-        // try to re-lock it from the same thread when you already hold a lock
-        // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_init.html).
-        // This is the case even if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
-        // (https://github.com/rust-lang/rust/issues/33770#issuecomment-220847521) -- in that
-        // case, `pthread_mutexattr_settype(PTHREAD_MUTEX_DEFAULT)` will of course be the same
-        // as setting it to `PTHREAD_MUTEX_NORMAL`, but not setting any mode will result in
-        // a Mutex where re-locking is UB.
-        //
-        // In practice, glibc takes advantage of this undefined behavior to
-        // implement hardware lock elision, which uses hardware transactional
-        // memory to avoid acquiring the lock. While a transaction is in
-        // progress, the lock appears to be unlocked. This isn't a problem for
-        // other threads since the transactional memory will abort if a conflict
-        // is detected, however no abort is generated when re-locking from the
-        // same thread.
-        //
-        // Since locking the same mutex twice will result in two aliasing &mut
-        // references, we instead create the mutex with type
-        // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to
-        // re-lock it from the same thread, thus avoiding undefined behavior.
-        unsafe {
-            let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
-            cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
-            let attr = PthreadMutexAttr(&mut attr);
-            cvt_nz(libc::pthread_mutexattr_settype(
-                attr.0.as_mut_ptr(),
-                libc::PTHREAD_MUTEX_NORMAL,
-            ))
-            .unwrap();
-            cvt_nz(libc::pthread_mutex_init(mutex.0.get(), attr.0.as_ptr())).unwrap();
-        }
-
-        mutex
-    }
-
-    fn destroy(mutex: Box<Self>) {
-        // We're not allowed to pthread_mutex_destroy a locked mutex,
-        // so check first if it's unlocked.
-        if unsafe { libc::pthread_mutex_trylock(mutex.0.get()) == 0 } {
-            unsafe { libc::pthread_mutex_unlock(mutex.0.get()) };
-            drop(mutex);
-        } else {
-            // The mutex is locked. This happens if a MutexGuard is leaked.
-            // In this case, we just leak the Mutex too.
-            forget(mutex);
-        }
-    }
-
-    fn cancel_init(_: Box<Self>) {
-        // In this case, we can just drop it without any checks,
-        // since it cannot have been locked yet.
-    }
-}
-
-impl Drop for AllocatedMutex {
-    #[inline]
-    fn drop(&mut self) {
-        let r = unsafe { libc::pthread_mutex_destroy(self.0.get()) };
-        if cfg!(target_os = "dragonfly") {
-            // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
-            // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
-            // Once it is used (locked/unlocked) or pthread_mutex_init() is called,
-            // this behaviour no longer occurs.
-            debug_assert!(r == 0 || r == libc::EINVAL);
-        } else {
-            debug_assert_eq!(r, 0);
-        }
-    }
-}
-
-impl Mutex {
-    #[inline]
-    pub const fn new() -> Mutex {
-        Mutex { inner: LazyBox::new() }
-    }
-
-    #[inline]
-    pub unsafe fn lock(&self) {
-        #[cold]
-        #[inline(never)]
-        fn fail(r: i32) -> ! {
-            let error = Error::from_raw_os_error(r);
-            panic!("failed to lock mutex: {error}");
-        }
-
-        let r = libc::pthread_mutex_lock(raw(self));
-        // As we set the mutex type to `PTHREAD_MUTEX_NORMAL` above, we expect
-        // the lock call to never fail. Unfortunately however, some platforms
-        // (Solaris) do not conform to the standard, and instead always provide
-        // deadlock detection. How kind of them! Unfortunately that means that
-        // we need to check the error code here. To save us from UB on other
-        // less well-behaved platforms in the future, we do it even on "good"
-        // platforms like macOS. See #120147 for more context.
-        if r != 0 {
-            fail(r)
-        }
-    }
-
-    #[inline]
-    pub unsafe fn unlock(&self) {
-        let r = libc::pthread_mutex_unlock(raw(self));
-        debug_assert_eq!(r, 0);
-    }
-
-    #[inline]
-    pub unsafe fn try_lock(&self) -> bool {
-        libc::pthread_mutex_trylock(raw(self)) == 0
-    }
-}
-
-pub(super) struct PthreadMutexAttr<'a>(pub &'a mut MaybeUninit<libc::pthread_mutexattr_t>);
-
-impl Drop for PthreadMutexAttr<'_> {
-    fn drop(&mut self) {
-        unsafe {
-            let result = libc::pthread_mutexattr_destroy(self.0.as_mut_ptr());
-            debug_assert_eq!(result, 0);
-        }
-    }
-}
diff --git a/library/std/src/sys/pal/unix/locks/queue_rwlock.rs b/library/std/src/sys/pal/unix/locks/queue_rwlock.rs
deleted file mode 100644
index 0f02a98dfdd..00000000000
--- a/library/std/src/sys/pal/unix/locks/queue_rwlock.rs
+++ /dev/null
@@ -1,557 +0,0 @@
-//! Efficient read-write locking without `pthread_rwlock_t`.
-//!
-//! The readers-writer lock provided by the `pthread` library has a number of
-//! problems which make it a suboptimal choice for `std`:
-//!
-//! * It is non-movable, so it needs to be allocated (lazily, to make the
-//! constructor `const`).
-//! * `pthread` is an external library, meaning the fast path of acquiring an
-//! uncontended lock cannot be inlined.
-//! * Some platforms (at least glibc before version 2.25) have buggy implementations
-//! that can easily lead to undefined behaviour in safe Rust code when not properly
-//! guarded against.
-//! * On some platforms (e.g. macOS), the lock is very slow.
-//!
-//! Therefore, we implement our own `RwLock`! Naively, one might reach for a
-//! spinlock, but those [can be quite problematic] when the lock is contended.
-//! Instead, this readers-writer lock copies its implementation strategy from
-//! the Windows [SRWLOCK] and the [usync] library. Spinning is still used for the
-//! fast path, but it is bounded: after spinning fails, threads will locklessly
-//! add an information structure containing a [`Thread`] handle into a queue of
-//! waiters associated with the lock. The lock owner, upon releasing the lock,
-//! will scan through the queue and wake up threads as appropriate, which will
-//! then again try to acquire the lock. The resulting [`RwLock`] is:
-//!
-//! * adaptive, since it spins before doing any heavywheight parking operations
-//! * allocation-free, modulo the per-thread [`Thread`] handle, which is
-//! allocated regardless when using threads created by `std`
-//! * writer-preferring, even if some readers may still slip through
-//! * unfair, which reduces context-switching and thus drastically improves
-//! performance
-//!
-//! and also quite fast in most cases.
-//!
-//! [can be quite problematic]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html
-//! [SRWLOCK]: https://learn.microsoft.com/en-us/windows/win32/sync/slim-reader-writer--srw--locks
-//! [usync]: https://crates.io/crates/usync
-//!
-//! # Implementation
-//!
-//! ## State
-//!
-//! A single [`AtomicPtr`] is used as state variable. The lowest three bits are used
-//! to indicate the meaning of the remaining bits:
-//!
-//! | [`LOCKED`] | [`QUEUED`] | [`QUEUE_LOCKED`] | Remaining    |                                                                                                                             |
-//! |:-----------|:-----------|:-----------------|:-------------|:----------------------------------------------------------------------------------------------------------------------------|
-//! | 0          | 0          | 0                | 0            | The lock is unlocked, no threads are waiting                                                                                |
-//! | 1          | 0          | 0                | 0            | The lock is write-locked, no threads waiting                                                                                |
-//! | 1          | 0          | 0                | n > 0        | The lock is read-locked with n readers                                                                                      |
-//! | 0          | 1          | *                | `*mut Node`  | The lock is unlocked, but some threads are waiting. Only writers may lock the lock                                          |
-//! | 1          | 1          | *                | `*mut Node`  | The lock is locked, but some threads are waiting. If the lock is read-locked, the last queue node contains the reader count |
-//!
-//! ## Waiter queue
-//!
-//! When threads are waiting on the lock (`QUEUE` is set), the lock state
-//! points to a queue of waiters, which is implemented as a linked list of
-//! nodes stored on the stack to avoid memory allocation. To enable lockless
-//! enqueuing of new nodes to the queue, the linked list is single-linked upon
-//! creation. Since when the lock is read-locked, the lock count is stored in
-//! the last link of the queue, threads have to traverse the queue to find the
-//! last element upon releasing the lock. To avoid having to traverse the whole
-//! list again and again, a pointer to the found tail is cached in the (current)
-//! first element of the queue.
-//!
-//! Also, while the lock is unfair for performance reasons, it is still best to
-//! wake the tail node first, which requires backlinks to previous nodes to be
-//! created. This is done at the same time as finding the tail, and thus a set
-//! tail field indicates the remaining portion of the queue is initialized.
-//!
-//! TLDR: Here's a diagram of what the queue looks like:
-//!
-//! ```text
-//! state
-//!   │
-//!   ▼
-//! ╭───────╮ next ╭───────╮ next ╭───────╮ next ╭───────╮
-//! │       ├─────►│       ├─────►│       ├─────►│ count │
-//! │       │      │       │      │       │      │       │
-//! │       │      │       │◄─────┤       │◄─────┤       │
-//! ╰───────╯      ╰───────╯ prev ╰───────╯ prev ╰───────╯
-//!                      │                           ▲
-//!                      └───────────────────────────┘
-//!                                  tail
-//! ```
-//!
-//! Invariants:
-//! 1. At least one node must contain a non-null, current `tail` field.
-//! 2. The first non-null `tail` field must be valid and current.
-//! 3. All nodes preceding this node must have a correct, non-null `next` field.
-//! 4. All nodes following this node must have a correct, non-null `prev` field.
-//!
-//! Access to the queue is controlled by the `QUEUE_LOCKED` bit, which threads
-//! try to set both after enqueuing themselves to eagerly add backlinks to the
-//! queue, which drastically improves performance, and after unlocking the lock
-//! to wake the next waiter(s). This is done atomically at the same time as the
-//! enqueuing/unlocking operation. The thread releasing the `QUEUE_LOCK` bit
-//! will check the state of the lock and wake up waiters as appropriate. This
-//! guarantees forward-progress even if the unlocking thread could not acquire
-//! the queue lock.
-//!
-//! ## Memory orderings
-//!
-//! To properly synchronize changes to the data protected by the lock, the lock
-//! is acquired and released with [`Acquire`] and [`Release`] ordering, respectively.
-//! To propagate the initialization of nodes, changes to the queue lock are also
-//! performed using these orderings.
-
-#![forbid(unsafe_op_in_unsafe_fn)]
-
-use crate::cell::OnceCell;
-use crate::hint::spin_loop;
-use crate::mem;
-use crate::ptr::{self, invalid_mut, null_mut, NonNull};
-use crate::sync::atomic::{
-    AtomicBool, AtomicPtr,
-    Ordering::{AcqRel, Acquire, Relaxed, Release},
-};
-use crate::sys_common::thread_info;
-use crate::thread::Thread;
-
-// Locking uses exponential backoff. `SPIN_COUNT` indicates how many times the
-// locking operation will be retried.
-// `spin_loop` will be called `2.pow(SPIN_COUNT) - 1` times.
-const SPIN_COUNT: usize = 7;
-
-type State = *mut ();
-type AtomicState = AtomicPtr<()>;
-
-const UNLOCKED: State = invalid_mut(0);
-const LOCKED: usize = 1;
-const QUEUED: usize = 2;
-const QUEUE_LOCKED: usize = 4;
-const SINGLE: usize = 8;
-const MASK: usize = !(QUEUE_LOCKED | QUEUED | LOCKED);
-
-/// Marks the state as write-locked, if possible.
-#[inline]
-fn write_lock(state: State) -> Option<State> {
-    let state = state.wrapping_byte_add(LOCKED);
-    if state.addr() & LOCKED == LOCKED { Some(state) } else { None }
-}
-
-/// Marks the state as read-locked, if possible.
-#[inline]
-fn read_lock(state: State) -> Option<State> {
-    if state.addr() & QUEUED == 0 && state.addr() != LOCKED {
-        Some(invalid_mut(state.addr().checked_add(SINGLE)? | LOCKED))
-    } else {
-        None
-    }
-}
-
-/// Masks the state, assuming it points to a queue node.
-///
-/// # Safety
-/// The state must contain a valid pointer to a queue node.
-#[inline]
-unsafe fn to_node(state: State) -> NonNull<Node> {
-    unsafe { NonNull::new_unchecked(state.mask(MASK)).cast() }
-}
-
-/// An atomic node pointer with relaxed operations.
-struct AtomicLink(AtomicPtr<Node>);
-
-impl AtomicLink {
-    fn new(v: Option<NonNull<Node>>) -> AtomicLink {
-        AtomicLink(AtomicPtr::new(v.map_or(null_mut(), NonNull::as_ptr)))
-    }
-
-    fn get(&self) -> Option<NonNull<Node>> {
-        NonNull::new(self.0.load(Relaxed))
-    }
-
-    fn set(&self, v: Option<NonNull<Node>>) {
-        self.0.store(v.map_or(null_mut(), NonNull::as_ptr), Relaxed);
-    }
-}
-
-#[repr(align(8))]
-struct Node {
-    next: AtomicLink,
-    prev: AtomicLink,
-    tail: AtomicLink,
-    write: bool,
-    thread: OnceCell<Thread>,
-    completed: AtomicBool,
-}
-
-impl Node {
-    /// Create a new queue node.
-    fn new(write: bool) -> Node {
-        Node {
-            next: AtomicLink::new(None),
-            prev: AtomicLink::new(None),
-            tail: AtomicLink::new(None),
-            write,
-            thread: OnceCell::new(),
-            completed: AtomicBool::new(false),
-        }
-    }
-
-    /// Prepare this node for waiting.
-    fn prepare(&mut self) {
-        // Fall back to creating an unnamed `Thread` handle to allow locking in
-        // TLS destructors.
-        self.thread
-            .get_or_init(|| thread_info::current_thread().unwrap_or_else(|| Thread::new(None)));
-        self.completed = AtomicBool::new(false);
-    }
-
-    /// Wait until this node is marked as completed.
-    ///
-    /// # Safety
-    /// May only be called from the thread that created the node.
-    unsafe fn wait(&self) {
-        while !self.completed.load(Acquire) {
-            unsafe {
-                self.thread.get().unwrap().park();
-            }
-        }
-    }
-
-    /// Atomically mark this node as completed. The node may not outlive this call.
-    unsafe fn complete(this: NonNull<Node>) {
-        // Since the node may be destroyed immediately after the completed flag
-        // is set, clone the thread handle before that.
-        let thread = unsafe { this.as_ref().thread.get().unwrap().clone() };
-        unsafe {
-            this.as_ref().completed.store(true, Release);
-        }
-        thread.unpark();
-    }
-}
-
-struct PanicGuard;
-
-impl Drop for PanicGuard {
-    fn drop(&mut self) {
-        rtabort!("tried to drop node in intrusive list.");
-    }
-}
-
-/// Add backlinks to the queue, returning the tail.
-///
-/// May be called from multiple threads at the same time, while the queue is not
-/// modified (this happens when unlocking multiple readers).
-///
-/// # Safety
-/// * `head` must point to a node in a valid queue.
-/// * `head` must be or be in front of the head of the queue at the time of the
-/// last removal.
-/// * The part of the queue starting with `head` must not be modified during this
-/// call.
-unsafe fn add_backlinks_and_find_tail(head: NonNull<Node>) -> NonNull<Node> {
-    let mut current = head;
-    let tail = loop {
-        let c = unsafe { current.as_ref() };
-        match c.tail.get() {
-            Some(tail) => break tail,
-            // SAFETY:
-            // All `next` fields before the first node with a `set` tail are
-            // non-null and valid (invariant 3).
-            None => unsafe {
-                let next = c.next.get().unwrap_unchecked();
-                next.as_ref().prev.set(Some(current));
-                current = next;
-            },
-        }
-    };
-
-    unsafe {
-        head.as_ref().tail.set(Some(tail));
-        tail
-    }
-}
-
-pub struct RwLock {
-    state: AtomicState,
-}
-
-impl RwLock {
-    #[inline]
-    pub const fn new() -> RwLock {
-        RwLock { state: AtomicPtr::new(UNLOCKED) }
-    }
-
-    #[inline]
-    pub fn try_read(&self) -> bool {
-        self.state.fetch_update(Acquire, Relaxed, read_lock).is_ok()
-    }
-
-    #[inline]
-    pub fn read(&self) {
-        if !self.try_read() {
-            self.lock_contended(false)
-        }
-    }
-
-    #[inline]
-    pub fn try_write(&self) -> bool {
-        // Atomically set the `LOCKED` bit. This is lowered to a single atomic
-        // instruction on most modern processors (e.g. "lock bts" on x86 and
-        // "ldseta" on modern AArch64), and therefore is more efficient than
-        // `fetch_update(lock(true))`, which can spuriously fail if a new node
-        // is appended to the queue.
-        self.state.fetch_or(LOCKED, Acquire).addr() & LOCKED == 0
-    }
-
-    #[inline]
-    pub fn write(&self) {
-        if !self.try_write() {
-            self.lock_contended(true)
-        }
-    }
-
-    #[cold]
-    fn lock_contended(&self, write: bool) {
-        let update = if write { write_lock } else { read_lock };
-        let mut node = Node::new(write);
-        let mut state = self.state.load(Relaxed);
-        let mut count = 0;
-        loop {
-            if let Some(next) = update(state) {
-                // The lock is available, try locking it.
-                match self.state.compare_exchange_weak(state, next, Acquire, Relaxed) {
-                    Ok(_) => return,
-                    Err(new) => state = new,
-                }
-            } else if state.addr() & QUEUED == 0 && count < SPIN_COUNT {
-                // If the lock is not available and no threads are queued, spin
-                // for a while, using exponential backoff to decrease cache
-                // contention.
-                for _ in 0..(1 << count) {
-                    spin_loop();
-                }
-                state = self.state.load(Relaxed);
-                count += 1;
-            } else {
-                // Fall back to parking. First, prepare the node.
-                node.prepare();
-
-                // If there are threads queued, set the `next` field to a
-                // pointer to the next node in the queue. Otherwise set it to
-                // the lock count if the state is read-locked or to zero if it
-                // is write-locked.
-                node.next.0 = AtomicPtr::new(state.mask(MASK).cast());
-                node.prev = AtomicLink::new(None);
-                let mut next = ptr::from_ref(&node)
-                    .map_addr(|addr| addr | QUEUED | (state.addr() & LOCKED))
-                    as State;
-
-                if state.addr() & QUEUED == 0 {
-                    // If this is the first node in the queue, set the tail field to
-                    // the node itself to ensure there is a current `tail` field in
-                    // the queue (invariants 1 and 2). This needs to use `set` to
-                    // avoid invalidating the new pointer.
-                    node.tail.set(Some(NonNull::from(&node)));
-                } else {
-                    // Otherwise, the tail of the queue is not known.
-                    node.tail.set(None);
-                    // Try locking the queue to eagerly add backlinks.
-                    next = next.map_addr(|addr| addr | QUEUE_LOCKED);
-                }
-
-                // Register the node, using release ordering to propagate our
-                // changes to the waking thread.
-                if let Err(new) = self.state.compare_exchange_weak(state, next, AcqRel, Relaxed) {
-                    // The state has changed, just try again.
-                    state = new;
-                    continue;
-                }
-
-                // The node is registered, so the structure must not be
-                // mutably accessed or destroyed while other threads may
-                // be accessing it. Guard against unwinds using a panic
-                // guard that aborts when dropped.
-                let guard = PanicGuard;
-
-                // If the current thread locked the queue, unlock it again,
-                // linking it in the process.
-                if state.addr() & (QUEUE_LOCKED | QUEUED) == QUEUED {
-                    unsafe {
-                        self.unlock_queue(next);
-                    }
-                }
-
-                // Wait until the node is removed from the queue.
-                // SAFETY: the node was created by the current thread.
-                unsafe {
-                    node.wait();
-                }
-
-                // The node was removed from the queue, disarm the guard.
-                mem::forget(guard);
-
-                // Reload the state and try again.
-                state = self.state.load(Relaxed);
-                count = 0;
-            }
-        }
-    }
-
-    #[inline]
-    pub unsafe fn read_unlock(&self) {
-        match self.state.fetch_update(Release, Acquire, |state| {
-            if state.addr() & QUEUED == 0 {
-                let count = state.addr() - (SINGLE | LOCKED);
-                Some(if count > 0 { invalid_mut(count | LOCKED) } else { UNLOCKED })
-            } else {
-                None
-            }
-        }) {
-            Ok(_) => {}
-            // There are waiters queued and the lock count was moved to the
-            // tail of the queue.
-            Err(state) => unsafe { self.read_unlock_contended(state) },
-        }
-    }
-
-    #[cold]
-    unsafe fn read_unlock_contended(&self, state: State) {
-        // The state was observed with acquire ordering above, so the current
-        // thread will observe all node initializations.
-
-        // SAFETY:
-        // Because new read-locks cannot be acquired while threads are queued,
-        // all queue-lock owners will observe the set `LOCKED` bit. Because they
-        // do not modify the queue while there is a lock owner, the queue will
-        // not be removed from here.
-        let tail = unsafe { add_backlinks_and_find_tail(to_node(state)).as_ref() };
-        // The lock count is stored in the `next` field of `tail`.
-        // Decrement it, making sure to observe all changes made to the queue
-        // by the other lock owners by using acquire-release ordering.
-        let was_last = tail.next.0.fetch_byte_sub(SINGLE, AcqRel).addr() - SINGLE == 0;
-        if was_last {
-            // SAFETY:
-            // Other threads cannot read-lock while threads are queued. Also,
-            // the `LOCKED` bit is still set, so there are no writers. Therefore,
-            // the current thread exclusively owns the lock.
-            unsafe { self.unlock_contended(state) }
-        }
-    }
-
-    #[inline]
-    pub unsafe fn write_unlock(&self) {
-        if let Err(state) =
-            self.state.compare_exchange(invalid_mut(LOCKED), UNLOCKED, Release, Relaxed)
-        {
-            // SAFETY:
-            // Since other threads cannot acquire the lock, the state can only
-            // have changed because there are threads queued on the lock.
-            unsafe { self.unlock_contended(state) }
-        }
-    }
-
-    /// # Safety
-    /// * The lock must be exclusively owned by this thread.
-    /// * There must be threads queued on the lock.
-    #[cold]
-    unsafe fn unlock_contended(&self, mut state: State) {
-        loop {
-            // Atomically release the lock and try to acquire the queue lock.
-            let next = state.map_addr(|a| (a & !LOCKED) | QUEUE_LOCKED);
-            match self.state.compare_exchange_weak(state, next, AcqRel, Relaxed) {
-                // The queue lock was acquired. Release it, waking up the next
-                // waiter in the process.
-                Ok(_) if state.addr() & QUEUE_LOCKED == 0 => unsafe {
-                    return self.unlock_queue(next);
-                },
-                // Another thread already holds the queue lock, leave waking up
-                // waiters to it.
-                Ok(_) => return,
-                Err(new) => state = new,
-            }
-        }
-    }
-
-    /// Unlocks the queue. If the lock is unlocked, wakes up the next eligible
-    /// thread(s).
-    ///
-    /// # Safety
-    /// The queue lock must be held by the current thread.
-    unsafe fn unlock_queue(&self, mut state: State) {
-        debug_assert_eq!(state.addr() & (QUEUED | QUEUE_LOCKED), QUEUED | QUEUE_LOCKED);
-
-        loop {
-            let tail = unsafe { add_backlinks_and_find_tail(to_node(state)) };
-
-            if state.addr() & LOCKED == LOCKED {
-                // Another thread has locked the lock. Leave waking up waiters
-                // to them by releasing the queue lock.
-                match self.state.compare_exchange_weak(
-                    state,
-                    state.mask(!QUEUE_LOCKED),
-                    Release,
-                    Acquire,
-                ) {
-                    Ok(_) => return,
-                    Err(new) => {
-                        state = new;
-                        continue;
-                    }
-                }
-            }
-
-            let is_writer = unsafe { tail.as_ref().write };
-            if is_writer && let Some(prev) = unsafe { tail.as_ref().prev.get() } {
-                // `tail` is a writer and there is a node before `tail`.
-                // Split off `tail`.
-
-                // There are no set `tail` links before the node pointed to by
-                // `state`, so the first non-null tail field will be current
-                // (invariant 2). Invariant 4 is fullfilled since `find_tail`
-                // was called on this node, which ensures all backlinks are set.
-                unsafe {
-                    to_node(state).as_ref().tail.set(Some(prev));
-                }
-
-                // Release the queue lock. Doing this by subtraction is more
-                // efficient on modern processors since it is a single instruction
-                // instead of an update loop, which will fail if new threads are
-                // added to the list.
-                self.state.fetch_byte_sub(QUEUE_LOCKED, Release);
-
-                // The tail was split off and the lock released. Mark the node as
-                // completed.
-                unsafe {
-                    return Node::complete(tail);
-                }
-            } else {
-                // The next waiter is a reader or the queue only consists of one
-                // waiter. Just wake all threads.
-
-                // The lock cannot be locked (checked above), so mark it as
-                // unlocked to reset the queue.
-                if let Err(new) =
-                    self.state.compare_exchange_weak(state, UNLOCKED, Release, Acquire)
-                {
-                    state = new;
-                    continue;
-                }
-
-                let mut current = tail;
-                loop {
-                    let prev = unsafe { current.as_ref().prev.get() };
-                    unsafe {
-                        Node::complete(current);
-                    }
-                    match prev {
-                        Some(prev) => current = prev,
-                        None => return,
-                    }
-                }
-            }
-        }
-    }
-}
diff --git a/library/std/src/sys/pal/unix/mod.rs b/library/std/src/sys/pal/unix/mod.rs
index 976a437c17f..04b8c5ca916 100644
--- a/library/std/src/sys/pal/unix/mod.rs
+++ b/library/std/src/sys/pal/unix/mod.rs
@@ -20,7 +20,6 @@ pub mod io;
 pub mod kernel_copy;
 #[cfg(target_os = "l4re")]
 mod l4re;
-pub mod locks;
 pub mod memchr;
 #[cfg(not(target_os = "l4re"))]
 pub mod net;