about summary refs log tree commit diff
diff options
context:
space:
mode:
authorJacob Pratt <jacob@jhpratt.dev>2025-08-22 22:00:46 -0400
committerGitHub <noreply@github.com>2025-08-22 22:00:46 -0400
commit2bd39222cdb9b6bb182c4de44cde99048942683b (patch)
tree513c66953c79e6292bd9625f3385aabb741112d7
parentbc4a6431eb579bd1332c36b27e3e176c9b1d17c3 (diff)
parentad499d076aa71ac16de4d08fa232446d39125df3 (diff)
downloadrust-2bd39222cdb9b6bb182c4de44cde99048942683b.tar.gz
rust-2bd39222cdb9b6bb182c4de44cde99048942683b.zip
Rollup merge of #144648 - connortsui20:nonpoison_rwlock, r=Mark-Simulacrum
Implementation: `#[feature(nonpoison_rwlock)]`

Tracking Issue: https://github.com/rust-lang/rust/issues/134645

This PR continues the effort made in https://github.com/rust-lang/rust/pull/144022 by adding the implementation of `nonpoison::rwlock`.

Many of the changes here are similar to the changes made to implement `nonpoison::mutex`. The only real difference is that this PR includes a reorganizing of the existing `poison::rwlock` file that hopefully makes both variants more readable.

### Related PRs

- `nonpoison_condvar` implementation: https://github.com/rust-lang/rust/pull/144651
- `nonpoison_once` implementation: https://github.com/rust-lang/rust/pull/144653
-rw-r--r--library/std/src/sync/nonpoison.rs5
-rw-r--r--library/std/src/sync/nonpoison/rwlock.rs1081
-rw-r--r--library/std/src/sync/poison/rwlock.rs699
-rw-r--r--library/std/tests/sync/lib.rs1
-rw-r--r--library/std/tests/sync/rwlock.rs946
-rw-r--r--tests/ui/suggestions/inner_type.fixed4
-rw-r--r--tests/ui/suggestions/inner_type.rs4
-rw-r--r--tests/ui/suggestions/inner_type.stderr8
8 files changed, 1996 insertions, 752 deletions
diff --git a/library/std/src/sync/nonpoison.rs b/library/std/src/sync/nonpoison.rs
index 2bbf226dc2c..b3ae376e70d 100644
--- a/library/std/src/sync/nonpoison.rs
+++ b/library/std/src/sync/nonpoison.rs
@@ -33,5 +33,10 @@ impl fmt::Display for WouldBlock {
 pub use self::mutex::MappedMutexGuard;
 #[unstable(feature = "nonpoison_mutex", issue = "134645")]
 pub use self::mutex::{Mutex, MutexGuard};
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+pub use self::rwlock::{MappedRwLockReadGuard, MappedRwLockWriteGuard};
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
 
 mod mutex;
+mod rwlock;
diff --git a/library/std/src/sync/nonpoison/rwlock.rs b/library/std/src/sync/nonpoison/rwlock.rs
new file mode 100644
index 00000000000..eb0aef99cc1
--- /dev/null
+++ b/library/std/src/sync/nonpoison/rwlock.rs
@@ -0,0 +1,1081 @@
+use crate::cell::UnsafeCell;
+use crate::fmt;
+use crate::marker::PhantomData;
+use crate::mem::{self, ManuallyDrop, forget};
+use crate::ops::{Deref, DerefMut};
+use crate::ptr::NonNull;
+use crate::sync::nonpoison::{TryLockResult, WouldBlock};
+use crate::sys::sync as sys;
+
+/// A reader-writer lock that does not keep track of lock poisoning.
+///
+/// For more information about reader-writer locks, check out the documentation for the poisoning
+/// variant of this lock (which can be found at [`poison::RwLock`]).
+///
+/// [`poison::RwLock`]: crate::sync::poison::RwLock
+///
+/// # Examples
+///
+/// ```
+/// #![feature(nonpoison_rwlock)]
+///
+/// use std::sync::nonpoison::RwLock;
+///
+/// let lock = RwLock::new(5);
+///
+/// // many reader locks can be held at once
+/// {
+///     let r1 = lock.read();
+///     let r2 = lock.read();
+///     assert_eq!(*r1, 5);
+///     assert_eq!(*r2, 5);
+/// } // read locks are dropped at this point
+///
+/// // only one write lock may be held, however
+/// {
+///     let mut w = lock.write();
+///     *w += 1;
+///     assert_eq!(*w, 6);
+/// } // write lock is dropped here
+/// ```
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonRwLock")]
+pub struct RwLock<T: ?Sized> {
+    /// The inner [`sys::RwLock`] that synchronizes thread access to the protected data.
+    inner: sys::RwLock,
+    /// The lock-protected data.
+    data: UnsafeCell<T>,
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Guards
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/// RAII structure used to release the shared read access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`read`] and [`try_read`] methods on
+/// [`RwLock`].
+///
+/// [`read`]: RwLock::read
+/// [`try_read`]: RwLock::try_read
+#[must_use = "if unused the RwLock will immediately unlock"]
+#[must_not_suspend = "holding a RwLockReadGuard across suspend \
+                      points can cause deadlocks, delays, \
+                      and cause Futures to not implement `Send`"]
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonRwLockReadGuard")]
+pub struct RwLockReadGuard<'rwlock, T: ?Sized + 'rwlock> {
+    /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of
+    /// `&'rwlock T` to avoid `noalias` violations, because a `RwLockReadGuard` instance only holds
+    /// immutability until it drops, not for its whole scope.
+    /// `NonNull` is preferable over `*const T` to allow for niche optimizations. `NonNull` is also
+    /// covariant over `T`, just like we would have with `&T`.
+    data: NonNull<T>,
+    /// A reference to the internal [`sys::RwLock`] that we have read-locked.
+    inner_lock: &'rwlock sys::RwLock,
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> !Send for RwLockReadGuard<'_, T> {}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
+
+/// RAII structure used to release the exclusive write access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`write`] and [`try_write`] methods
+/// on [`RwLock`].
+///
+/// [`write`]: RwLock::write
+/// [`try_write`]: RwLock::try_write
+#[must_use = "if unused the RwLock will immediately unlock"]
+#[must_not_suspend = "holding a RwLockWriteGuard across suspend \
+                      points can cause deadlocks, delays, \
+                      and cause Future's to not implement `Send`"]
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonRwLockWriteGuard")]
+pub struct RwLockWriteGuard<'rwlock, T: ?Sized + 'rwlock> {
+    /// A reference to the [`RwLock`] that we have write-locked.
+    lock: &'rwlock RwLock<T>,
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> !Send for RwLockWriteGuard<'_, T> {}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+unsafe impl<T: ?Sized + Sync> Sync for RwLockWriteGuard<'_, T> {}
+
+/// RAII structure used to release the shared read access of a lock when
+/// dropped, which can point to a subfield of the protected data.
+///
+/// This structure is created by the [`map`] and [`filter_map`] methods
+/// on [`RwLockReadGuard`].
+///
+/// [`map`]: RwLockReadGuard::map
+/// [`filter_map`]: RwLockReadGuard::filter_map
+#[must_use = "if unused the RwLock will immediately unlock"]
+#[must_not_suspend = "holding a MappedRwLockReadGuard across suspend \
+                      points can cause deadlocks, delays, \
+                      and cause Futures to not implement `Send`"]
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+#[clippy::has_significant_drop]
+pub struct MappedRwLockReadGuard<'rwlock, T: ?Sized + 'rwlock> {
+    /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of
+    /// `&'rwlock T` to avoid `noalias` violations, because a `MappedRwLockReadGuard` instance only
+    /// holds immutability until it drops, not for its whole scope.
+    /// `NonNull` is preferable over `*const T` to allow for niche optimizations. `NonNull` is also
+    /// covariant over `T`, just like we would have with `&T`.
+    data: NonNull<T>,
+    /// A reference to the internal [`sys::RwLock`] that we have read-locked.
+    inner_lock: &'rwlock sys::RwLock,
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> !Send for MappedRwLockReadGuard<'_, T> {}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+unsafe impl<T: ?Sized + Sync> Sync for MappedRwLockReadGuard<'_, T> {}
+
+/// RAII structure used to release the exclusive write access of a lock when
+/// dropped, which can point to a subfield of the protected data.
+///
+/// This structure is created by the [`map`] and [`filter_map`] methods
+/// on [`RwLockWriteGuard`].
+///
+/// [`map`]: RwLockWriteGuard::map
+/// [`filter_map`]: RwLockWriteGuard::filter_map
+#[must_use = "if unused the RwLock will immediately unlock"]
+#[must_not_suspend = "holding a MappedRwLockWriteGuard across suspend \
+                      points can cause deadlocks, delays, \
+                      and cause Future's to not implement `Send`"]
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+#[clippy::has_significant_drop]
+pub struct MappedRwLockWriteGuard<'rwlock, T: ?Sized + 'rwlock> {
+    /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of
+    /// `&'rwlock T` to avoid `noalias` violations, because a `MappedRwLockWriteGuard` instance only
+    /// holds uniquneness until it drops, not for its whole scope.
+    /// `NonNull` is preferable over `*const T` to allow for niche optimizations.
+    data: NonNull<T>,
+    /// `NonNull` is covariant over `T`, so we add a `PhantomData<&'rwlock mut T>` field here to
+    /// enforce the correct invariance over `T`.
+    _variance: PhantomData<&'rwlock mut T>,
+    /// A reference to the internal [`sys::RwLock`] that we have write-locked.
+    inner_lock: &'rwlock sys::RwLock,
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> !Send for MappedRwLockWriteGuard<'_, T> {}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+unsafe impl<T: ?Sized + Sync> Sync for MappedRwLockWriteGuard<'_, T> {}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Implementations
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+impl<T> RwLock<T> {
+    /// Creates a new instance of an `RwLock<T>` which is unlocked.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_rwlock)]
+    ///
+    /// use std::sync::nonpoison::RwLock;
+    ///
+    /// let lock = RwLock::new(5);
+    /// ```
+    #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    #[inline]
+    pub const fn new(t: T) -> RwLock<T> {
+        RwLock { inner: sys::RwLock::new(), data: UnsafeCell::new(t) }
+    }
+
+    /// Returns the contained value by cloning it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_rwlock)]
+    /// #![feature(lock_value_accessors)]
+    ///
+    /// use std::sync::nonpoison::RwLock;
+    ///
+    /// let mut lock = RwLock::new(7);
+    ///
+    /// assert_eq!(lock.get_cloned(), 7);
+    /// ```
+    #[unstable(feature = "lock_value_accessors", issue = "133407")]
+    // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn get_cloned(&self) -> T
+    where
+        T: Clone,
+    {
+        self.read().clone()
+    }
+
+    /// Sets the contained value.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_rwlock)]
+    /// #![feature(lock_value_accessors)]
+    ///
+    /// use std::sync::nonpoison::RwLock;
+    ///
+    /// let mut lock = RwLock::new(7);
+    ///
+    /// assert_eq!(lock.get_cloned(), 7);
+    /// lock.set(11);
+    /// assert_eq!(lock.get_cloned(), 11);
+    /// ```
+    #[unstable(feature = "lock_value_accessors", issue = "133407")]
+    // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn set(&self, value: T) {
+        if mem::needs_drop::<T>() {
+            // If the contained value has a non-trivial destructor, we
+            // call that destructor after the lock has been released.
+            drop(self.replace(value))
+        } else {
+            *self.write() = value;
+        }
+    }
+
+    /// Replaces the contained value with `value`, and returns the old contained value.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_rwlock)]
+    /// #![feature(lock_value_accessors)]
+    ///
+    /// use std::sync::nonpoison::RwLock;
+    ///
+    /// let mut lock = RwLock::new(7);
+    ///
+    /// assert_eq!(lock.replace(11), 7);
+    /// assert_eq!(lock.get_cloned(), 11);
+    /// ```
+    #[unstable(feature = "lock_value_accessors", issue = "133407")]
+    // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn replace(&self, value: T) -> T {
+        let mut guard = self.write();
+        mem::replace(&mut *guard, value)
+    }
+}
+
+impl<T: ?Sized> RwLock<T> {
+    /// Locks this `RwLock` with shared read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// The calling thread will be blocked until there are no more writers which
+    /// hold the lock. There may be other readers currently inside the lock when
+    /// this method returns. This method does not provide any guarantees with
+    /// respect to the ordering of whether contentious readers or writers will
+    /// acquire the lock first.
+    ///
+    /// Returns an RAII guard which will release this thread's shared access
+    /// once it is dropped.
+    ///
+    /// # Panics
+    ///
+    /// This function might panic when called if the lock is already held by the current thread.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_rwlock)]
+    ///
+    /// use std::sync::Arc;
+    /// use std::sync::nonpoison::RwLock;
+    /// use std::thread;
+    ///
+    /// let lock = Arc::new(RwLock::new(1));
+    /// let c_lock = Arc::clone(&lock);
+    ///
+    /// let n = lock.read();
+    /// assert_eq!(*n, 1);
+    ///
+    /// thread::spawn(move || {
+    ///     let r = c_lock.read();
+    /// }).join().unwrap();
+    /// ```
+    #[inline]
+    #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn read(&self) -> RwLockReadGuard<'_, T> {
+        unsafe {
+            self.inner.read();
+            RwLockReadGuard::new(self)
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with shared read access.
+    ///
+    /// If the access could not be granted at this time, then `Err` is returned.
+    /// Otherwise, an RAII guard is returned which will release the shared access
+    /// when it is dropped.
+    ///
+    /// This function does not block.
+    ///
+    /// This function does not provide any guarantees with respect to the ordering
+    /// of whether contentious readers or writers will acquire the lock first.
+    ///
+    /// # Errors
+    ///
+    /// This function will return the [`WouldBlock`] error if the `RwLock` could
+    /// not be acquired because it was already locked exclusively.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_rwlock)]
+    ///
+    /// use std::sync::nonpoison::RwLock;
+    ///
+    /// let lock = RwLock::new(1);
+    ///
+    /// match lock.try_read() {
+    ///     Ok(n) => assert_eq!(*n, 1),
+    ///     Err(_) => unreachable!(),
+    /// };
+    /// ```
+    #[inline]
+    #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<'_, T>> {
+        unsafe {
+            if self.inner.try_read() { Ok(RwLockReadGuard::new(self)) } else { Err(WouldBlock) }
+        }
+    }
+
+    /// Locks this `RwLock` with exclusive write access, blocking the current
+    /// thread until it can be acquired.
+    ///
+    /// This function will not return while other writers or other readers
+    /// currently have access to the lock.
+    ///
+    /// Returns an RAII guard which will drop the write access of this `RwLock`
+    /// when dropped.
+    ///
+    /// # Panics
+    ///
+    /// This function might panic when called if the lock is already held by the current thread.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_rwlock)]
+    ///
+    /// use std::sync::nonpoison::RwLock;
+    ///
+    /// let lock = RwLock::new(1);
+    ///
+    /// let mut n = lock.write();
+    /// *n = 2;
+    ///
+    /// assert!(lock.try_read().is_err());
+    /// ```
+    #[inline]
+    #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn write(&self) -> RwLockWriteGuard<'_, T> {
+        unsafe {
+            self.inner.write();
+            RwLockWriteGuard::new(self)
+        }
+    }
+
+    /// Attempts to lock this `RwLock` with exclusive write access.
+    ///
+    /// If the lock could not be acquired at this time, then `Err` is returned.
+    /// Otherwise, an RAII guard is returned which will release the lock when
+    /// it is dropped.
+    ///
+    /// This function does not block.
+    ///
+    /// This function does not provide any guarantees with respect to the ordering
+    /// of whether contentious readers or writers will acquire the lock first.
+    ///
+    /// # Errors
+    ///
+    /// This function will return the [`WouldBlock`] error if the `RwLock` could
+    /// not be acquired because it was already locked.
+    ///
+    /// [`WouldBlock`]: WouldBlock
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_rwlock)]
+    ///
+    /// use std::sync::nonpoison::RwLock;
+    ///
+    /// let lock = RwLock::new(1);
+    ///
+    /// let n = lock.read();
+    /// assert_eq!(*n, 1);
+    ///
+    /// assert!(lock.try_write().is_err());
+    /// ```
+    #[inline]
+    #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
+        unsafe {
+            if self.inner.try_write() { Ok(RwLockWriteGuard::new(self)) } else { Err(WouldBlock) }
+        }
+    }
+
+    /// Consumes this `RwLock`, returning the underlying data.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_rwlock)]
+    ///
+    /// use std::sync::nonpoison::RwLock;
+    ///
+    /// let lock = RwLock::new(String::new());
+    /// {
+    ///     let mut s = lock.write();
+    ///     *s = "modified".to_owned();
+    /// }
+    /// assert_eq!(lock.into_inner(), "modified");
+    /// ```
+    #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn into_inner(self) -> T
+    where
+        T: Sized,
+    {
+        self.data.into_inner()
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the `RwLock` mutably, no actual locking needs to
+    /// take place -- the mutable borrow statically guarantees no new locks can be acquired
+    /// while this reference exists. Note that this method does not clear any previously abandoned
+    /// locks (e.g., via [`forget()`] on a [`RwLockReadGuard`] or [`RwLockWriteGuard`]).
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_rwlock)]
+    ///
+    /// use std::sync::nonpoison::RwLock;
+    ///
+    /// let mut lock = RwLock::new(0);
+    /// *lock.get_mut() = 10;
+    /// assert_eq!(*lock.read(), 10);
+    /// ```
+    #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn get_mut(&mut self) -> &mut T {
+        self.data.get_mut()
+    }
+
+    /// Returns a raw pointer to the underlying data.
+    ///
+    /// The returned pointer is always non-null and properly aligned, but it is
+    /// the user's responsibility to ensure that any reads and writes through it
+    /// are properly synchronized to avoid data races, and that it is not read
+    /// or written through after the lock is dropped.
+    #[unstable(feature = "rwlock_data_ptr", issue = "140368")]
+    // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn data_ptr(&self) -> *mut T {
+        self.data.get()
+    }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let mut d = f.debug_struct("RwLock");
+        match self.try_read() {
+            Ok(guard) => {
+                d.field("data", &&*guard);
+            }
+            Err(WouldBlock) => {
+                d.field("data", &format_args!("<locked>"));
+            }
+        }
+        d.finish_non_exhaustive()
+    }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: Default> Default for RwLock<T> {
+    /// Creates a new `RwLock<T>`, with the `Default` value for T.
+    fn default() -> RwLock<T> {
+        RwLock::new(Default::default())
+    }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T> From<T> for RwLock<T> {
+    /// Creates a new instance of an `RwLock<T>` which is unlocked.
+    /// This is equivalent to [`RwLock::new`].
+    fn from(t: T) -> Self {
+        RwLock::new(t)
+    }
+}
+
+impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
+    /// Creates a new instance of `RwLockReadGuard<T>` from a `RwLock<T>`.
+    ///
+    /// # Safety
+    ///
+    /// This function is safe if and only if the same thread has successfully and safely called
+    /// `lock.inner.read()`, `lock.inner.try_read()`, or `lock.inner.downgrade()` before
+    /// instantiating this object.
+    unsafe fn new(lock: &'rwlock RwLock<T>) -> RwLockReadGuard<'rwlock, T> {
+        RwLockReadGuard {
+            data: unsafe { NonNull::new_unchecked(lock.data.get()) },
+            inner_lock: &lock.inner,
+        }
+    }
+
+    /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data, e.g.
+    /// an enum variant.
+    ///
+    /// The `RwLock` is already locked for reading, so this cannot fail.
+    ///
+    /// This is an associated function that needs to be used as
+    /// `RwLockReadGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the `RwLockReadGuard` used through
+    /// `Deref`.
+    ///
+    /// # Panics
+    ///
+    /// If the closure panics, the guard will be dropped (unlocked).
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'rwlock, U>
+    where
+        F: FnOnce(&T) -> &U,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
+        let data = NonNull::from(f(unsafe { orig.data.as_ref() }));
+        let orig = ManuallyDrop::new(orig);
+        MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }
+    }
+
+    /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data. The
+    /// original guard is returned as an `Err(...)` if the closure returns
+    /// `None`.
+    ///
+    /// The `RwLock` is already locked for reading, so this cannot fail.
+    ///
+    /// This is an associated function that needs to be used as
+    /// `RwLockReadGuard::filter_map(...)`. A method would interfere with methods
+    /// of the same name on the contents of the `RwLockReadGuard` used through
+    /// `Deref`.
+    ///
+    /// # Panics
+    ///
+    /// If the closure panics, the guard will be dropped (unlocked).
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'rwlock, U>, Self>
+    where
+        F: FnOnce(&T) -> Option<&U>,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
+        match f(unsafe { orig.data.as_ref() }) {
+            Some(data) => {
+                let data = NonNull::from(data);
+                let orig = ManuallyDrop::new(orig);
+                Ok(MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock })
+            }
+            None => Err(orig),
+        }
+    }
+}
+
+impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
+    /// Creates a new instance of `RwLockWriteGuard<T>` from a `RwLock<T>`.
+    ///
+    /// # Safety
+    ///
+    /// This function is safe if and only if the same thread has successfully and safely called
+    /// `lock.inner.write()`, `lock.inner.try_write()`, or `lock.inner.try_upgrade` before
+    /// instantiating this object.
+    unsafe fn new(lock: &'rwlock RwLock<T>) -> RwLockWriteGuard<'rwlock, T> {
+        RwLockWriteGuard { lock }
+    }
+
+    /// Downgrades a write-locked `RwLockWriteGuard` into a read-locked [`RwLockReadGuard`].
+    ///
+    /// Since we have the `RwLockWriteGuard`, the [`RwLock`] must already be locked for writing, so
+    /// this method cannot fail.
+    ///
+    /// After downgrading, other readers will be allowed to read the protected data.
+    ///
+    /// # Examples
+    ///
+    /// `downgrade` takes ownership of the `RwLockWriteGuard` and returns a [`RwLockReadGuard`].
+    ///
+    /// ```
+    /// #![feature(nonpoison_rwlock)]
+    /// #![feature(rwlock_downgrade)]
+    ///
+    /// use std::sync::nonpoison::{RwLock, RwLockWriteGuard};
+    ///
+    /// let rw = RwLock::new(0);
+    ///
+    /// let mut write_guard = rw.write();
+    /// *write_guard = 42;
+    ///
+    /// let read_guard = RwLockWriteGuard::downgrade(write_guard);
+    /// assert_eq!(42, *read_guard);
+    /// ```
+    ///
+    /// `downgrade` will _atomically_ change the state of the [`RwLock`] from exclusive mode into
+    /// shared mode. This means that it is impossible for another writing thread to get in between a
+    /// thread calling `downgrade` and any reads it performs after downgrading.
+    ///
+    /// ```
+    /// #![feature(nonpoison_rwlock)]
+    /// #![feature(rwlock_downgrade)]
+    ///
+    /// use std::sync::Arc;
+    /// use std::sync::nonpoison::{RwLock, RwLockWriteGuard};
+    ///
+    /// let rw = Arc::new(RwLock::new(1));
+    ///
+    /// // Put the lock in write mode.
+    /// let mut main_write_guard = rw.write();
+    ///
+    /// let rw_clone = rw.clone();
+    /// let evil_handle = std::thread::spawn(move || {
+    ///     // This will not return until the main thread drops the `main_read_guard`.
+    ///     let mut evil_guard = rw_clone.write();
+    ///
+    ///     assert_eq!(*evil_guard, 2);
+    ///     *evil_guard = 3;
+    /// });
+    ///
+    /// *main_write_guard = 2;
+    ///
+    /// // Atomically downgrade the write guard into a read guard.
+    /// let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
+    ///
+    /// // Since `downgrade` is atomic, the writer thread cannot have changed the protected data.
+    /// assert_eq!(*main_read_guard, 2, "`downgrade` was not atomic");
+    /// #
+    /// # drop(main_read_guard);
+    /// # evil_handle.join().unwrap();
+    /// #
+    /// # let final_check = rw.read();
+    /// # assert_eq!(*final_check, 3);
+    /// ```
+    #[unstable(feature = "rwlock_downgrade", issue = "128203")]
+    // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn downgrade(s: Self) -> RwLockReadGuard<'rwlock, T> {
+        let lock = s.lock;
+
+        // We don't want to call the destructor since that calls `write_unlock`.
+        forget(s);
+
+        // SAFETY: We take ownership of a write guard, so we must already have the `RwLock` in write
+        // mode, satisfying the `downgrade` contract.
+        unsafe { lock.inner.downgrade() };
+
+        // SAFETY: We have just successfully called `downgrade`, so we fulfill the safety contract.
+        unsafe { RwLockReadGuard::new(lock) }
+    }
+
+    /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data, e.g.
+    /// an enum variant.
+    ///
+    /// The `RwLock` is already locked for writing, so this cannot fail.
+    ///
+    /// This is an associated function that needs to be used as
+    /// `RwLockWriteGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the `RwLockWriteGuard` used through
+    /// `Deref`.
+    ///
+    /// # Panics
+    ///
+    /// If the closure panics, the guard will be dropped (unlocked).
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockWriteGuard<'rwlock, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
+        let data = NonNull::from(f(unsafe { &mut *orig.lock.data.get() }));
+        let orig = ManuallyDrop::new(orig);
+        MappedRwLockWriteGuard { data, inner_lock: &orig.lock.inner, _variance: PhantomData }
+    }
+
+    /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data. The
+    /// original guard is returned as an `Err(...)` if the closure returns
+    /// `None`.
+    ///
+    /// The `RwLock` is already locked for writing, so this cannot fail.
+    ///
+    /// This is an associated function that needs to be used as
+    /// `RwLockWriteGuard::filter_map(...)`. A method would interfere with methods
+    /// of the same name on the contents of the `RwLockWriteGuard` used through
+    /// `Deref`.
+    ///
+    /// # Panics
+    ///
+    /// If the closure panics, the guard will be dropped (unlocked).
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockWriteGuard<'rwlock, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
+        match f(unsafe { &mut *orig.lock.data.get() }) {
+            Some(data) => {
+                let data = NonNull::from(data);
+                let orig = ManuallyDrop::new(orig);
+                Ok(MappedRwLockWriteGuard {
+                    data,
+                    inner_lock: &orig.lock.inner,
+                    _variance: PhantomData,
+                })
+            }
+            None => Err(orig),
+        }
+    }
+}
+
+impl<'rwlock, T: ?Sized> MappedRwLockReadGuard<'rwlock, T> {
+    /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data,
+    /// e.g. an enum variant.
+    ///
+    /// The `RwLock` is already locked for reading, so this cannot fail.
+    ///
+    /// This is an associated function that needs to be used as
+    /// `MappedRwLockReadGuard::map(...)`. A method would interfere with
+    /// methods of the same name on the contents of the `MappedRwLockReadGuard`
+    /// used through `Deref`.
+    ///
+    /// # Panics
+    ///
+    /// If the closure panics, the guard will be dropped (unlocked).
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'rwlock, U>
+    where
+        F: FnOnce(&T) -> &U,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
+        let data = NonNull::from(f(unsafe { orig.data.as_ref() }));
+        let orig = ManuallyDrop::new(orig);
+        MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }
+    }
+
+    /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data.
+    /// The original guard is returned as an `Err(...)` if the closure returns
+    /// `None`.
+    ///
+    /// The `RwLock` is already locked for reading, so this cannot fail.
+    ///
+    /// This is an associated function that needs to be used as
+    /// `MappedRwLockReadGuard::filter_map(...)`. A method would interfere with
+    /// methods of the same name on the contents of the `MappedRwLockReadGuard`
+    /// used through `Deref`.
+    ///
+    /// # Panics
+    ///
+    /// If the closure panics, the guard will be dropped (unlocked).
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'rwlock, U>, Self>
+    where
+        F: FnOnce(&T) -> Option<&U>,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
+        match f(unsafe { orig.data.as_ref() }) {
+            Some(data) => {
+                let data = NonNull::from(data);
+                let orig = ManuallyDrop::new(orig);
+                Ok(MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock })
+            }
+            None => Err(orig),
+        }
+    }
+}
+
+impl<'rwlock, T: ?Sized> MappedRwLockWriteGuard<'rwlock, T> {
+    /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data,
+    /// e.g. an enum variant.
+    ///
+    /// The `RwLock` is already locked for writing, so this cannot fail.
+    ///
+    /// This is an associated function that needs to be used as
+    /// `MappedRwLockWriteGuard::map(...)`. A method would interfere with
+    /// methods of the same name on the contents of the `MappedRwLockWriteGuard`
+    /// used through `Deref`.
+    ///
+    /// # Panics
+    ///
+    /// If the closure panics, the guard will be dropped (unlocked).
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn map<U, F>(mut orig: Self, f: F) -> MappedRwLockWriteGuard<'rwlock, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
+        let data = NonNull::from(f(unsafe { orig.data.as_mut() }));
+        let orig = ManuallyDrop::new(orig);
+        MappedRwLockWriteGuard { data, inner_lock: orig.inner_lock, _variance: PhantomData }
+    }
+
+    /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data.
+    /// The original guard is returned as an `Err(...)` if the closure returns
+    /// `None`.
+    ///
+    /// The `RwLock` is already locked for writing, so this cannot fail.
+    ///
+    /// This is an associated function that needs to be used as
+    /// `MappedRwLockWriteGuard::filter_map(...)`. A method would interfere with
+    /// methods of the same name on the contents of the `MappedRwLockWriteGuard`
+    /// used through `Deref`.
+    ///
+    /// # Panics
+    ///
+    /// If the closure panics, the guard will be dropped (unlocked).
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+    pub fn filter_map<U, F>(
+        mut orig: Self,
+        f: F,
+    ) -> Result<MappedRwLockWriteGuard<'rwlock, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
+        match f(unsafe { orig.data.as_mut() }) {
+            Some(data) => {
+                let data = NonNull::from(data);
+                let orig = ManuallyDrop::new(orig);
+                Ok(MappedRwLockWriteGuard {
+                    data,
+                    inner_lock: orig.inner_lock,
+                    _variance: PhantomData,
+                })
+            }
+            None => Err(orig),
+        }
+    }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
+    fn drop(&mut self) {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
+        unsafe {
+            self.inner_lock.read_unlock();
+        }
+    }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> {
+    fn drop(&mut self) {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+        unsafe {
+            self.lock.inner.write_unlock();
+        }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> Drop for MappedRwLockReadGuard<'_, T> {
+    fn drop(&mut self) {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        unsafe {
+            self.inner_lock.read_unlock();
+        }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> Drop for MappedRwLockWriteGuard<'_, T> {
+    fn drop(&mut self) {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        unsafe {
+            self.inner_lock.write_unlock();
+        }
+    }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
+        unsafe { self.data.as_ref() }
+    }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+        unsafe { &*self.lock.data.get() }
+    }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+        unsafe { &mut *self.lock.data.get() }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> Deref for MappedRwLockReadGuard<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        unsafe { self.data.as_ref() }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> Deref for MappedRwLockWriteGuard<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        unsafe { self.data.as_ref() }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized> DerefMut for MappedRwLockWriteGuard<'_, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        unsafe { self.data.as_mut() }
+    }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockWriteGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedRwLockReadGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized + fmt::Display> fmt::Display for MappedRwLockReadGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedRwLockWriteGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl<T: ?Sized + fmt::Display> fmt::Display for MappedRwLockWriteGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
diff --git a/library/std/src/sync/poison/rwlock.rs b/library/std/src/sync/poison/rwlock.rs
index 2c92602bc87..0a463f3f9c7 100644
--- a/library/std/src/sync/poison/rwlock.rs
+++ b/library/std/src/sync/poison/rwlock.rs
@@ -80,16 +80,24 @@ use crate::sys::sync as sys;
 #[stable(feature = "rust1", since = "1.0.0")]
 #[cfg_attr(not(test), rustc_diagnostic_item = "RwLock")]
 pub struct RwLock<T: ?Sized> {
+    /// The inner [`sys::RwLock`] that synchronizes thread access to the protected data.
     inner: sys::RwLock,
+    /// A flag denoting if this `RwLock` has been poisoned.
     poison: poison::Flag,
+    /// The lock-protected data.
     data: UnsafeCell<T>,
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
 unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
 
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Guards
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
 /// RAII structure used to release the shared read access of a lock when
 /// dropped.
 ///
@@ -105,13 +113,15 @@ unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
 #[stable(feature = "rust1", since = "1.0.0")]
 #[clippy::has_significant_drop]
 #[cfg_attr(not(test), rustc_diagnostic_item = "RwLockReadGuard")]
-pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
-    // NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a
-    // `RwLockReadGuard` argument doesn't hold immutability for its whole scope, only until it drops.
-    // `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull`
-    // is preferable over `const* T` to allow for niche optimization.
+pub struct RwLockReadGuard<'rwlock, T: ?Sized + 'rwlock> {
+    /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of
+    /// `&'rwlock T` to avoid `noalias` violations, because a `RwLockReadGuard` instance only holds
+    /// immutability until it drops, not for its whole scope.
+    /// `NonNull` is preferable over `*const T` to allow for niche optimizations. `NonNull` is also
+    /// covariant over `T`, just like we would have with `&T`.
     data: NonNull<T>,
-    inner_lock: &'a sys::RwLock,
+    /// A reference to the internal [`sys::RwLock`] that we have read-locked.
+    inner_lock: &'rwlock sys::RwLock,
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -135,8 +145,10 @@ unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
 #[stable(feature = "rust1", since = "1.0.0")]
 #[clippy::has_significant_drop]
 #[cfg_attr(not(test), rustc_diagnostic_item = "RwLockWriteGuard")]
-pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
-    lock: &'a RwLock<T>,
+pub struct RwLockWriteGuard<'rwlock, T: ?Sized + 'rwlock> {
+    /// A reference to the [`RwLock`] that we have write-locked.
+    lock: &'rwlock RwLock<T>,
+    /// The poison guard. See the [`poison`] module for more information.
     poison: poison::Guard,
 }
 
@@ -160,13 +172,15 @@ unsafe impl<T: ?Sized + Sync> Sync for RwLockWriteGuard<'_, T> {}
                       and cause Futures to not implement `Send`"]
 #[unstable(feature = "mapped_lock_guards", issue = "117108")]
 #[clippy::has_significant_drop]
-pub struct MappedRwLockReadGuard<'a, T: ?Sized + 'a> {
-    // NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a
-    // `MappedRwLockReadGuard` argument doesn't hold immutability for its whole scope, only until it drops.
-    // `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull`
-    // is preferable over `const* T` to allow for niche optimization.
+pub struct MappedRwLockReadGuard<'rwlock, T: ?Sized + 'rwlock> {
+    /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of
+    /// `&'rwlock T` to avoid `noalias` violations, because a `MappedRwLockReadGuard` instance only
+    /// holds immutability until it drops, not for its whole scope.
+    /// `NonNull` is preferable over `*const T` to allow for niche optimizations. `NonNull` is also
+    /// covariant over `T`, just like we would have with `&T`.
     data: NonNull<T>,
-    inner_lock: &'a sys::RwLock,
+    /// A reference to the internal [`sys::RwLock`] that we have read-locked.
+    inner_lock: &'rwlock sys::RwLock,
 }
 
 #[unstable(feature = "mapped_lock_guards", issue = "117108")]
@@ -189,16 +203,21 @@ unsafe impl<T: ?Sized + Sync> Sync for MappedRwLockReadGuard<'_, T> {}
                       and cause Future's to not implement `Send`"]
 #[unstable(feature = "mapped_lock_guards", issue = "117108")]
 #[clippy::has_significant_drop]
-pub struct MappedRwLockWriteGuard<'a, T: ?Sized + 'a> {
-    // NB: we use a pointer instead of `&'a mut T` to avoid `noalias` violations, because a
-    // `MappedRwLockWriteGuard` argument doesn't hold uniqueness for its whole scope, only until it drops.
-    // `NonNull` is covariant over `T`, so we add a `PhantomData<&'a mut T>` field
-    // below for the correct variance over `T` (invariance).
+pub struct MappedRwLockWriteGuard<'rwlock, T: ?Sized + 'rwlock> {
+    /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of
+    /// `&'rwlock T` to avoid `noalias` violations, because a `MappedRwLockWriteGuard` instance only
+    /// holds uniquneness until it drops, not for its whole scope.
+    /// `NonNull` is preferable over `*const T` to allow for niche optimizations.
     data: NonNull<T>,
-    inner_lock: &'a sys::RwLock,
-    poison_flag: &'a poison::Flag,
-    poison: poison::Guard,
-    _variance: PhantomData<&'a mut T>,
+    /// `NonNull` is covariant over `T`, so we add a `PhantomData<&'rwlock mut T>` field here to
+    /// enforce the correct invariance over `T`.
+    _variance: PhantomData<&'rwlock mut T>,
+    /// A reference to the internal [`sys::RwLock`] that we have write-locked.
+    inner_lock: &'rwlock sys::RwLock,
+    /// A reference to the original `RwLock`'s poison state.
+    poison_flag: &'rwlock poison::Flag,
+    /// The poison guard. See the [`poison`] module for more information.
+    poison_guard: poison::Guard,
 }
 
 #[unstable(feature = "mapped_lock_guards", issue = "117108")]
@@ -207,6 +226,10 @@ impl<T: ?Sized> !Send for MappedRwLockWriteGuard<'_, T> {}
 #[unstable(feature = "mapped_lock_guards", issue = "117108")]
 unsafe impl<T: ?Sized + Sync> Sync for MappedRwLockWriteGuard<'_, T> {}
 
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Implementations
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
 impl<T> RwLock<T> {
     /// Creates a new instance of an `RwLock<T>` which is unlocked.
     ///
@@ -611,8 +634,8 @@ impl<T: ?Sized> RwLock<T> {
     ///
     /// Since this call borrows the `RwLock` mutably, no actual locking needs to
     /// take place -- the mutable borrow statically guarantees no new locks can be acquired
-    /// while this reference exists. Note that this method does not clear any previously abandoned locks
-    /// (e.g., via [`forget()`] on a [`RwLockReadGuard`] or [`RwLockWriteGuard`]).
+    /// while this reference exists. Note that this method does not clear any previously abandoned
+    /// locks (e.g., via [`forget()`] on a [`RwLockReadGuard`] or [`RwLockWriteGuard`]).
     ///
     /// # Errors
     ///
@@ -700,177 +723,7 @@ impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
             inner_lock: &lock.inner,
         })
     }
-}
-
-impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
-    /// Creates a new instance of `RwLockWriteGuard<T>` from a `RwLock<T>`.
-    // SAFETY: if and only if `lock.inner.write()` (or `lock.inner.try_write()`) has been
-    // successfully called from the same thread before instantiating this object.
-    unsafe fn new(lock: &'rwlock RwLock<T>) -> LockResult<RwLockWriteGuard<'rwlock, T>> {
-        poison::map_result(lock.poison.guard(), |guard| RwLockWriteGuard { lock, poison: guard })
-    }
-}
-
-#[stable(feature = "std_debug", since = "1.16.0")]
-impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'_, T> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        (**self).fmt(f)
-    }
-}
-
-#[stable(feature = "std_guard_impls", since = "1.20.0")]
-impl<T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'_, T> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        (**self).fmt(f)
-    }
-}
-
-#[stable(feature = "std_debug", since = "1.16.0")]
-impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockWriteGuard<'_, T> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        (**self).fmt(f)
-    }
-}
-
-#[stable(feature = "std_guard_impls", since = "1.20.0")]
-impl<T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'_, T> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        (**self).fmt(f)
-    }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedRwLockReadGuard<'_, T> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        (**self).fmt(f)
-    }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl<T: ?Sized + fmt::Display> fmt::Display for MappedRwLockReadGuard<'_, T> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        (**self).fmt(f)
-    }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedRwLockWriteGuard<'_, T> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        (**self).fmt(f)
-    }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl<T: ?Sized + fmt::Display> fmt::Display for MappedRwLockWriteGuard<'_, T> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        (**self).fmt(f)
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> {
-    type Target = T;
-
-    fn deref(&self) -> &T {
-        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
-        unsafe { self.data.as_ref() }
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
-    type Target = T;
-
-    fn deref(&self) -> &T {
-        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
-        unsafe { &*self.lock.data.get() }
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
-    fn deref_mut(&mut self) -> &mut T {
-        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
-        unsafe { &mut *self.lock.data.get() }
-    }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl<T: ?Sized> Deref for MappedRwLockReadGuard<'_, T> {
-    type Target = T;
-
-    fn deref(&self) -> &T {
-        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
-        // was created, and have been upheld throughout `map` and/or `filter_map`.
-        unsafe { self.data.as_ref() }
-    }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl<T: ?Sized> Deref for MappedRwLockWriteGuard<'_, T> {
-    type Target = T;
-
-    fn deref(&self) -> &T {
-        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
-        // was created, and have been upheld throughout `map` and/or `filter_map`.
-        unsafe { self.data.as_ref() }
-    }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl<T: ?Sized> DerefMut for MappedRwLockWriteGuard<'_, T> {
-    fn deref_mut(&mut self) -> &mut T {
-        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
-        // was created, and have been upheld throughout `map` and/or `filter_map`.
-        unsafe { self.data.as_mut() }
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
-    fn drop(&mut self) {
-        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
-        unsafe {
-            self.inner_lock.read_unlock();
-        }
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> {
-    fn drop(&mut self) {
-        self.lock.poison.done(&self.poison);
-        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
-        unsafe {
-            self.lock.inner.write_unlock();
-        }
-    }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl<T: ?Sized> Drop for MappedRwLockReadGuard<'_, T> {
-    fn drop(&mut self) {
-        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
-        // was created, and have been upheld throughout `map` and/or `filter_map`.
-        unsafe {
-            self.inner_lock.read_unlock();
-        }
-    }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl<T: ?Sized> Drop for MappedRwLockWriteGuard<'_, T> {
-    fn drop(&mut self) {
-        self.poison_flag.done(&self.poison);
-        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
-        // was created, and have been upheld throughout `map` and/or `filter_map`.
-        unsafe {
-            self.inner_lock.write_unlock();
-        }
-    }
-}
 
-impl<'a, T: ?Sized> RwLockReadGuard<'a, T> {
     /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data, e.g.
     /// an enum variant.
     ///
@@ -883,17 +736,18 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> {
     ///
     /// # Panics
     ///
-    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned.
+    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be
+    /// poisoned.
     #[unstable(feature = "mapped_lock_guards", issue = "117108")]
-    pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'a, U>
+    pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'rwlock, U>
     where
         F: FnOnce(&T) -> &U,
         U: ?Sized,
     {
         // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
         // was created, and have been upheld throughout `map` and/or `filter_map`.
-        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
-        // passed to it. If the closure panics, the guard will be dropped.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
         let data = NonNull::from(f(unsafe { orig.data.as_ref() }));
         let orig = ManuallyDrop::new(orig);
         MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }
@@ -912,17 +766,18 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> {
     ///
     /// # Panics
     ///
-    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned.
+    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be
+    /// poisoned.
     #[unstable(feature = "mapped_lock_guards", issue = "117108")]
-    pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'a, U>, Self>
+    pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'rwlock, U>, Self>
     where
         F: FnOnce(&T) -> Option<&U>,
         U: ?Sized,
     {
         // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
         // was created, and have been upheld throughout `map` and/or `filter_map`.
-        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
-        // passed to it. If the closure panics, the guard will be dropped.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
         match f(unsafe { orig.data.as_ref() }) {
             Some(data) => {
                 let data = NonNull::from(data);
@@ -934,71 +789,95 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> {
     }
 }
 
-impl<'a, T: ?Sized> MappedRwLockReadGuard<'a, T> {
-    /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data,
-    /// e.g. an enum variant.
-    ///
-    /// The `RwLock` is already locked for reading, so this cannot fail.
-    ///
-    /// This is an associated function that needs to be used as
-    /// `MappedRwLockReadGuard::map(...)`. A method would interfere with
-    /// methods of the same name on the contents of the `MappedRwLockReadGuard`
-    /// used through `Deref`.
+impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
+    /// Creates a new instance of `RwLockWriteGuard<T>` from a `RwLock<T>`.
     ///
-    /// # Panics
+    /// # Safety
     ///
-    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned.
-    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
-    pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'a, U>
-    where
-        F: FnOnce(&T) -> &U,
-        U: ?Sized,
-    {
-        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
-        // was created, and have been upheld throughout `map` and/or `filter_map`.
-        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
-        // passed to it. If the closure panics, the guard will be dropped.
-        let data = NonNull::from(f(unsafe { orig.data.as_ref() }));
-        let orig = ManuallyDrop::new(orig);
-        MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }
+    /// This function is safe if and only if the same thread has successfully and safely called
+    /// `lock.inner.write()`, `lock.inner.try_write()`, or `lock.inner.try_upgrade` before
+    /// instantiating this object.
+    unsafe fn new(lock: &'rwlock RwLock<T>) -> LockResult<RwLockWriteGuard<'rwlock, T>> {
+        poison::map_result(lock.poison.guard(), |guard| RwLockWriteGuard { lock, poison: guard })
     }
 
-    /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data.
-    /// The original guard is returned as an `Err(...)` if the closure returns
-    /// `None`.
+    /// Downgrades a write-locked `RwLockWriteGuard` into a read-locked [`RwLockReadGuard`].
     ///
-    /// The `RwLock` is already locked for reading, so this cannot fail.
+    /// Since we have the `RwLockWriteGuard`, the [`RwLock`] must already be locked for writing, so
+    /// this method cannot fail.
     ///
-    /// This is an associated function that needs to be used as
-    /// `MappedRwLockReadGuard::filter_map(...)`. A method would interfere with
-    /// methods of the same name on the contents of the `MappedRwLockReadGuard`
-    /// used through `Deref`.
+    /// After downgrading, other readers will be allowed to read the protected data.
     ///
-    /// # Panics
+    /// # Examples
     ///
-    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned.
-    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
-    pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'a, U>, Self>
-    where
-        F: FnOnce(&T) -> Option<&U>,
-        U: ?Sized,
-    {
-        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
-        // was created, and have been upheld throughout `map` and/or `filter_map`.
-        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
-        // passed to it. If the closure panics, the guard will be dropped.
-        match f(unsafe { orig.data.as_ref() }) {
-            Some(data) => {
-                let data = NonNull::from(data);
-                let orig = ManuallyDrop::new(orig);
-                Ok(MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock })
-            }
-            None => Err(orig),
-        }
+    /// `downgrade` takes ownership of the `RwLockWriteGuard` and returns a [`RwLockReadGuard`].
+    ///
+    /// ```
+    /// #![feature(rwlock_downgrade)]
+    ///
+    /// use std::sync::{RwLock, RwLockWriteGuard};
+    ///
+    /// let rw = RwLock::new(0);
+    ///
+    /// let mut write_guard = rw.write().unwrap();
+    /// *write_guard = 42;
+    ///
+    /// let read_guard = RwLockWriteGuard::downgrade(write_guard);
+    /// assert_eq!(42, *read_guard);
+    /// ```
+    ///
+    /// `downgrade` will _atomically_ change the state of the [`RwLock`] from exclusive mode into
+    /// shared mode. This means that it is impossible for another writing thread to get in between a
+    /// thread calling `downgrade` and any reads it performs after downgrading.
+    ///
+    /// ```
+    /// #![feature(rwlock_downgrade)]
+    ///
+    /// use std::sync::{Arc, RwLock, RwLockWriteGuard};
+    ///
+    /// let rw = Arc::new(RwLock::new(1));
+    ///
+    /// // Put the lock in write mode.
+    /// let mut main_write_guard = rw.write().unwrap();
+    ///
+    /// let rw_clone = rw.clone();
+    /// let evil_handle = std::thread::spawn(move || {
+    ///     // This will not return until the main thread drops the `main_read_guard`.
+    ///     let mut evil_guard = rw_clone.write().unwrap();
+    ///
+    ///     assert_eq!(*evil_guard, 2);
+    ///     *evil_guard = 3;
+    /// });
+    ///
+    /// *main_write_guard = 2;
+    ///
+    /// // Atomically downgrade the write guard into a read guard.
+    /// let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
+    ///
+    /// // Since `downgrade` is atomic, the writer thread cannot have changed the protected data.
+    /// assert_eq!(*main_read_guard, 2, "`downgrade` was not atomic");
+    /// #
+    /// # drop(main_read_guard);
+    /// # evil_handle.join().unwrap();
+    /// #
+    /// # let final_check = rw.read().unwrap();
+    /// # assert_eq!(*final_check, 3);
+    /// ```
+    #[unstable(feature = "rwlock_downgrade", issue = "128203")]
+    pub fn downgrade(s: Self) -> RwLockReadGuard<'rwlock, T> {
+        let lock = s.lock;
+
+        // We don't want to call the destructor since that calls `write_unlock`.
+        forget(s);
+
+        // SAFETY: We take ownership of a write guard, so we must already have the `RwLock` in write
+        // mode, satisfying the `downgrade` contract.
+        unsafe { lock.inner.downgrade() };
+
+        // SAFETY: We have just successfully called `downgrade`, so we fulfill the safety contract.
+        unsafe { RwLockReadGuard::new(lock).unwrap_or_else(PoisonError::into_inner) }
     }
-}
 
-impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
     /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data, e.g.
     /// an enum variant.
     ///
@@ -1013,22 +892,22 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
     ///
     /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned.
     #[unstable(feature = "mapped_lock_guards", issue = "117108")]
-    pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockWriteGuard<'a, U>
+    pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockWriteGuard<'rwlock, U>
     where
         F: FnOnce(&mut T) -> &mut U,
         U: ?Sized,
     {
         // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
         // was created, and have been upheld throughout `map` and/or `filter_map`.
-        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
-        // passed to it. If the closure panics, the guard will be dropped.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
         let data = NonNull::from(f(unsafe { &mut *orig.lock.data.get() }));
         let orig = ManuallyDrop::new(orig);
         MappedRwLockWriteGuard {
             data,
             inner_lock: &orig.lock.inner,
             poison_flag: &orig.lock.poison,
-            poison: orig.poison.clone(),
+            poison_guard: orig.poison.clone(),
             _variance: PhantomData,
         }
     }
@@ -1048,15 +927,15 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
     ///
     /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned.
     #[unstable(feature = "mapped_lock_guards", issue = "117108")]
-    pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, U>, Self>
+    pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockWriteGuard<'rwlock, U>, Self>
     where
         F: FnOnce(&mut T) -> Option<&mut U>,
         U: ?Sized,
     {
         // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
         // was created, and have been upheld throughout `map` and/or `filter_map`.
-        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
-        // passed to it. If the closure panics, the guard will be dropped.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
         match f(unsafe { &mut *orig.lock.data.get() }) {
             Some(data) => {
                 let data = NonNull::from(data);
@@ -1065,78 +944,82 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
                     data,
                     inner_lock: &orig.lock.inner,
                     poison_flag: &orig.lock.poison,
-                    poison: orig.poison.clone(),
+                    poison_guard: orig.poison.clone(),
                     _variance: PhantomData,
                 })
             }
             None => Err(orig),
         }
     }
+}
 
-    /// Downgrades a write-locked `RwLockWriteGuard` into a read-locked [`RwLockReadGuard`].
-    ///
-    /// This method will atomically change the state of the [`RwLock`] from exclusive mode into
-    /// shared mode. This means that it is impossible for a writing thread to get in between a
-    /// thread calling `downgrade` and the same thread reading whatever it wrote while it had the
-    /// [`RwLock`] in write mode.
-    ///
-    /// Note that since we have the `RwLockWriteGuard`, we know that the [`RwLock`] is already
-    /// locked for writing, so this method cannot fail.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// #![feature(rwlock_downgrade)]
-    /// use std::sync::{Arc, RwLock, RwLockWriteGuard};
-    ///
-    /// // The inner value starts as 0.
-    /// let rw = Arc::new(RwLock::new(0));
+impl<'rwlock, T: ?Sized> MappedRwLockReadGuard<'rwlock, T> {
+    /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data,
+    /// e.g. an enum variant.
     ///
-    /// // Put the lock in write mode.
-    /// let mut main_write_guard = rw.write().unwrap();
+    /// The `RwLock` is already locked for reading, so this cannot fail.
     ///
-    /// let evil = rw.clone();
-    /// let handle = std::thread::spawn(move || {
-    ///     // This will not return until the main thread drops the `main_read_guard`.
-    ///     let mut evil_guard = evil.write().unwrap();
+    /// This is an associated function that needs to be used as
+    /// `MappedRwLockReadGuard::map(...)`. A method would interfere with
+    /// methods of the same name on the contents of the `MappedRwLockReadGuard`
+    /// used through `Deref`.
     ///
-    ///     assert_eq!(*evil_guard, 1);
-    ///     *evil_guard = 2;
-    /// });
+    /// # Panics
     ///
-    /// // After spawning the writer thread, set the inner value to 1.
-    /// *main_write_guard = 1;
+    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be
+    /// poisoned.
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'rwlock, U>
+    where
+        F: FnOnce(&T) -> &U,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
+        let data = NonNull::from(f(unsafe { orig.data.as_ref() }));
+        let orig = ManuallyDrop::new(orig);
+        MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }
+    }
+
+    /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data.
+    /// The original guard is returned as an `Err(...)` if the closure returns
+    /// `None`.
     ///
-    /// // Atomically downgrade the write guard into a read guard.
-    /// let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
+    /// The `RwLock` is already locked for reading, so this cannot fail.
     ///
-    /// // Since `downgrade` is atomic, the writer thread cannot have set the inner value to 2.
-    /// assert_eq!(*main_read_guard, 1, "`downgrade` was not atomic");
+    /// This is an associated function that needs to be used as
+    /// `MappedRwLockReadGuard::filter_map(...)`. A method would interfere with
+    /// methods of the same name on the contents of the `MappedRwLockReadGuard`
+    /// used through `Deref`.
     ///
-    /// // Clean up everything now
-    /// drop(main_read_guard);
-    /// handle.join().unwrap();
+    /// # Panics
     ///
-    /// let final_check = rw.read().unwrap();
-    /// assert_eq!(*final_check, 2);
-    /// ```
-    #[unstable(feature = "rwlock_downgrade", issue = "128203")]
-    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, T> {
-        let lock = s.lock;
-
-        // We don't want to call the destructor since that calls `write_unlock`.
-        forget(s);
-
-        // SAFETY: We take ownership of a write guard, so we must already have the `RwLock` in write
-        // mode, satisfying the `downgrade` contract.
-        unsafe { lock.inner.downgrade() };
-
-        // SAFETY: We have just successfully called `downgrade`, so we fulfill the safety contract.
-        unsafe { RwLockReadGuard::new(lock).unwrap_or_else(PoisonError::into_inner) }
+    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be
+    /// poisoned.
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'rwlock, U>, Self>
+    where
+        F: FnOnce(&T) -> Option<&U>,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
+        match f(unsafe { orig.data.as_ref() }) {
+            Some(data) => {
+                let data = NonNull::from(data);
+                let orig = ManuallyDrop::new(orig);
+                Ok(MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock })
+            }
+            None => Err(orig),
+        }
     }
 }
 
-impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> {
+impl<'rwlock, T: ?Sized> MappedRwLockWriteGuard<'rwlock, T> {
     /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data,
     /// e.g. an enum variant.
     ///
@@ -1151,22 +1034,22 @@ impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> {
     ///
     /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned.
     #[unstable(feature = "mapped_lock_guards", issue = "117108")]
-    pub fn map<U, F>(mut orig: Self, f: F) -> MappedRwLockWriteGuard<'a, U>
+    pub fn map<U, F>(mut orig: Self, f: F) -> MappedRwLockWriteGuard<'rwlock, U>
     where
         F: FnOnce(&mut T) -> &mut U,
         U: ?Sized,
     {
         // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
         // was created, and have been upheld throughout `map` and/or `filter_map`.
-        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
-        // passed to it. If the closure panics, the guard will be dropped.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
         let data = NonNull::from(f(unsafe { orig.data.as_mut() }));
         let orig = ManuallyDrop::new(orig);
         MappedRwLockWriteGuard {
             data,
             inner_lock: orig.inner_lock,
             poison_flag: orig.poison_flag,
-            poison: orig.poison.clone(),
+            poison_guard: orig.poison_guard.clone(),
             _variance: PhantomData,
         }
     }
@@ -1186,15 +1069,18 @@ impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> {
     ///
     /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned.
     #[unstable(feature = "mapped_lock_guards", issue = "117108")]
-    pub fn filter_map<U, F>(mut orig: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, U>, Self>
+    pub fn filter_map<U, F>(
+        mut orig: Self,
+        f: F,
+    ) -> Result<MappedRwLockWriteGuard<'rwlock, U>, Self>
     where
         F: FnOnce(&mut T) -> Option<&mut U>,
         U: ?Sized,
     {
         // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
         // was created, and have been upheld throughout `map` and/or `filter_map`.
-        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
-        // passed to it. If the closure panics, the guard will be dropped.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the
+        // reference passed to it. If the closure panics, the guard will be dropped.
         match f(unsafe { orig.data.as_mut() }) {
             Some(data) => {
                 let data = NonNull::from(data);
@@ -1203,7 +1089,7 @@ impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> {
                     data,
                     inner_lock: orig.inner_lock,
                     poison_flag: orig.poison_flag,
-                    poison: orig.poison.clone(),
+                    poison_guard: orig.poison_guard.clone(),
                     _variance: PhantomData,
                 })
             }
@@ -1211,3 +1097,162 @@ impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> {
         }
     }
 }
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
+    fn drop(&mut self) {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
+        unsafe {
+            self.inner_lock.read_unlock();
+        }
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> {
+    fn drop(&mut self) {
+        self.lock.poison.done(&self.poison);
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+        unsafe {
+            self.lock.inner.write_unlock();
+        }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized> Drop for MappedRwLockReadGuard<'_, T> {
+    fn drop(&mut self) {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        unsafe {
+            self.inner_lock.read_unlock();
+        }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized> Drop for MappedRwLockWriteGuard<'_, T> {
+    fn drop(&mut self) {
+        self.poison_flag.done(&self.poison_guard);
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        unsafe {
+            self.inner_lock.write_unlock();
+        }
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
+        unsafe { self.data.as_ref() }
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+        unsafe { &*self.lock.data.get() }
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+        unsafe { &mut *self.lock.data.get() }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized> Deref for MappedRwLockReadGuard<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        unsafe { self.data.as_ref() }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized> Deref for MappedRwLockWriteGuard<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        unsafe { self.data.as_ref() }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized> DerefMut for MappedRwLockWriteGuard<'_, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        unsafe { self.data.as_mut() }
+    }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[stable(feature = "std_guard_impls", since = "1.20.0")]
+impl<T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockWriteGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[stable(feature = "std_guard_impls", since = "1.20.0")]
+impl<T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedRwLockReadGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized + fmt::Display> fmt::Display for MappedRwLockReadGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedRwLockWriteGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized + fmt::Display> fmt::Display for MappedRwLockWriteGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
diff --git a/library/std/tests/sync/lib.rs b/library/std/tests/sync/lib.rs
index 94f1fe96b6a..f874c2ba389 100644
--- a/library/std/tests/sync/lib.rs
+++ b/library/std/tests/sync/lib.rs
@@ -8,6 +8,7 @@
 #![feature(std_internals)]
 #![feature(sync_nonpoison)]
 #![feature(nonpoison_mutex)]
+#![feature(nonpoison_rwlock)]
 #![allow(internal_features)]
 #![feature(macro_metavar_expr_concat)] // For concatenating identifiers in macros.
 
diff --git a/library/std/tests/sync/rwlock.rs b/library/std/tests/sync/rwlock.rs
index 1d55a176948..eca15d2a4ad 100644
--- a/library/std/tests/sync/rwlock.rs
+++ b/library/std/tests/sync/rwlock.rs
@@ -29,239 +29,457 @@ fn test_needs_drop() {
     assert!(mem::needs_drop::<NonCopyNeedsDrop>());
 }
 
-#[derive(Clone, Eq, PartialEq, Debug)]
-struct Cloneable(i32);
-
-#[test]
-fn smoke() {
-    let l = RwLock::new(());
-    drop(l.read().unwrap());
-    drop(l.write().unwrap());
-    drop((l.read().unwrap(), l.read().unwrap()));
-    drop(l.write().unwrap());
-}
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Non-poison & Poison Tests
+////////////////////////////////////////////////////////////////////////////////////////////////////
+use super::nonpoison_and_poison_unwrap_test;
+
+nonpoison_and_poison_unwrap_test!(
+    name: smoke,
+    test_body: {
+        use locks::RwLock;
+
+        let l = RwLock::new(());
+        drop(maybe_unwrap(l.read()));
+        drop(maybe_unwrap(l.write()));
+        drop((maybe_unwrap(l.read()), maybe_unwrap(l.read())));
+        drop(maybe_unwrap(l.write()));
+    }
+);
 
-#[test]
 // FIXME: On macOS we use a provenance-incorrect implementation and Miri
 // catches that issue with a chance of around 1/1000.
 // See <https://github.com/rust-lang/rust/issues/121950> for details.
 #[cfg_attr(all(miri, target_os = "macos"), ignore)]
-fn frob() {
-    const N: u32 = 10;
-    const M: usize = if cfg!(miri) { 100 } else { 1000 };
+nonpoison_and_poison_unwrap_test!(
+    name: frob,
+    test_body: {
+        use locks::RwLock;
 
-    let r = Arc::new(RwLock::new(()));
+        const N: u32 = 10;
+        const M: usize = if cfg!(miri) { 100 } else { 1000 };
 
-    let (tx, rx) = channel::<()>();
-    for _ in 0..N {
-        let tx = tx.clone();
-        let r = r.clone();
-        thread::spawn(move || {
-            let mut rng = crate::common::test_rng();
-            for _ in 0..M {
-                if rng.random_bool(1.0 / (N as f64)) {
-                    drop(r.write().unwrap());
-                } else {
-                    drop(r.read().unwrap());
+        let r = Arc::new(RwLock::new(()));
+
+        let (tx, rx) = channel::<()>();
+        for _ in 0..N {
+            let tx = tx.clone();
+            let r = r.clone();
+            thread::spawn(move || {
+                let mut rng = crate::common::test_rng();
+                for _ in 0..M {
+                    if rng.random_bool(1.0 / (N as f64)) {
+                        drop(maybe_unwrap(r.write()));
+                    } else {
+                        drop(maybe_unwrap(r.read()));
+                    }
                 }
+                drop(tx);
+            });
+        }
+        drop(tx);
+        let _ = rx.recv();
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_rw_arc,
+    test_body: {
+        use locks::RwLock;
+
+        let arc = Arc::new(RwLock::new(0));
+        let arc2 = arc.clone();
+        let (tx, rx) = channel();
+
+        thread::spawn(move || {
+            let mut lock = maybe_unwrap(arc2.write());
+            for _ in 0..10 {
+                let tmp = *lock;
+                *lock = -1;
+                thread::yield_now();
+                *lock = tmp + 1;
             }
-            drop(tx);
+            tx.send(()).unwrap();
         });
+
+        // Readers try to catch the writer in the act
+        let mut children = Vec::new();
+        for _ in 0..5 {
+            let arc3 = arc.clone();
+            children.push(thread::spawn(move || {
+                let lock = maybe_unwrap(arc3.read());
+                assert!(*lock >= 0);
+            }));
+        }
+
+        // Wait for children to pass their asserts
+        for r in children {
+            assert!(r.join().is_ok());
+        }
+
+        // Wait for writer to finish
+        rx.recv().unwrap();
+        let lock = maybe_unwrap(arc.read());
+        assert_eq!(*lock, 10);
     }
-    drop(tx);
-    let _ = rx.recv();
-}
+);
 
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_poison_wr() {
-    let arc = Arc::new(RwLock::new(1));
-    let arc2 = arc.clone();
-    let _: Result<(), _> = thread::spawn(move || {
-        let _lock = arc2.write().unwrap();
-        panic!();
-    })
-    .join();
-    assert!(arc.read().is_err());
-}
+nonpoison_and_poison_unwrap_test!(
+    name: test_rw_arc_access_in_unwind,
+    test_body: {
+        use locks::RwLock;
+
+        let arc = Arc::new(RwLock::new(1));
+        let arc2 = arc.clone();
+        let _ = thread::spawn(move || -> () {
+            struct Unwinder {
+                i: Arc<RwLock<isize>>,
+            }
+            impl Drop for Unwinder {
+                fn drop(&mut self) {
+                    let mut lock = maybe_unwrap(self.i.write());
+                    *lock += 1;
+                }
+            }
+            let _u = Unwinder { i: arc2 };
+            panic!();
+        })
+        .join();
+        let lock = maybe_unwrap(arc.read());
+        assert_eq!(*lock, 2);
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_rwlock_unsized,
+    test_body: {
+        use locks::RwLock;
+
+        let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
+        {
+            let b = &mut *maybe_unwrap(rw.write());
+            b[0] = 4;
+            b[2] = 5;
+        }
+        let comp: &[i32] = &[4, 2, 5];
+        assert_eq!(&*maybe_unwrap(rw.read()), comp);
+    }
+);
 
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_poison_mapped_w_r() {
-    let arc = Arc::new(RwLock::new(1));
-    let arc2 = arc.clone();
-    let _: Result<(), _> = thread::spawn(move || {
-        let lock = arc2.write().unwrap();
-        let _lock = RwLockWriteGuard::map(lock, |val| val);
-        panic!();
-    })
-    .join();
-    assert!(arc.read().is_err());
-}
+nonpoison_and_poison_unwrap_test!(
+    name: test_into_inner,
+    test_body: {
+        use locks::RwLock;
 
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_poison_ww() {
-    let arc = Arc::new(RwLock::new(1));
-    assert!(!arc.is_poisoned());
-    let arc2 = arc.clone();
-    let _: Result<(), _> = thread::spawn(move || {
-        let _lock = arc2.write().unwrap();
-        panic!();
-    })
-    .join();
-    assert!(arc.write().is_err());
-    assert!(arc.is_poisoned());
-}
+        let m = RwLock::new(NonCopy(10));
+        assert_eq!(maybe_unwrap(m.into_inner()), NonCopy(10));
+    }
+);
 
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_poison_mapped_w_w() {
-    let arc = Arc::new(RwLock::new(1));
-    let arc2 = arc.clone();
-    let _: Result<(), _> = thread::spawn(move || {
-        let lock = arc2.write().unwrap();
-        let _lock = RwLockWriteGuard::map(lock, |val| val);
-        panic!();
-    })
-    .join();
-    assert!(arc.write().is_err());
-    assert!(arc.is_poisoned());
-}
+nonpoison_and_poison_unwrap_test!(
+    name: test_into_inner_drop,
+    test_body: {
+        use locks::RwLock;
 
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_no_poison_rr() {
-    let arc = Arc::new(RwLock::new(1));
-    let arc2 = arc.clone();
-    let _: Result<(), _> = thread::spawn(move || {
-        let _lock = arc2.read().unwrap();
-        panic!();
-    })
-    .join();
-    let lock = arc.read().unwrap();
-    assert_eq!(*lock, 1);
-}
+        struct Foo(Arc<AtomicUsize>);
+        impl Drop for Foo {
+            fn drop(&mut self) {
+                self.0.fetch_add(1, Ordering::SeqCst);
+            }
+        }
 
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_no_poison_mapped_r_r() {
-    let arc = Arc::new(RwLock::new(1));
-    let arc2 = arc.clone();
-    let _: Result<(), _> = thread::spawn(move || {
-        let lock = arc2.read().unwrap();
-        let _lock = RwLockReadGuard::map(lock, |val| val);
-        panic!();
-    })
-    .join();
-    let lock = arc.read().unwrap();
-    assert_eq!(*lock, 1);
-}
+        let num_drops = Arc::new(AtomicUsize::new(0));
+        let m = RwLock::new(Foo(num_drops.clone()));
+        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        {
+            let _inner = maybe_unwrap(m.into_inner());
+            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        }
+        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+    }
+);
 
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_no_poison_rw() {
-    let arc = Arc::new(RwLock::new(1));
-    let arc2 = arc.clone();
-    let _: Result<(), _> = thread::spawn(move || {
-        let _lock = arc2.read().unwrap();
-        panic!()
-    })
-    .join();
-    let lock = arc.write().unwrap();
-    assert_eq!(*lock, 1);
-}
+nonpoison_and_poison_unwrap_test!(
+    name: test_get_cloned,
+    test_body: {
+        use locks::RwLock;
 
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_no_poison_mapped_r_w() {
-    let arc = Arc::new(RwLock::new(1));
-    let arc2 = arc.clone();
-    let _: Result<(), _> = thread::spawn(move || {
-        let lock = arc2.read().unwrap();
-        let _lock = RwLockReadGuard::map(lock, |val| val);
-        panic!();
-    })
-    .join();
-    let lock = arc.write().unwrap();
-    assert_eq!(*lock, 1);
-}
+        #[derive(Clone, Eq, PartialEq, Debug)]
+        struct Cloneable(i32);
 
-#[test]
-fn test_rw_arc() {
-    let arc = Arc::new(RwLock::new(0));
-    let arc2 = arc.clone();
-    let (tx, rx) = channel();
-
-    thread::spawn(move || {
-        let mut lock = arc2.write().unwrap();
-        for _ in 0..10 {
-            let tmp = *lock;
-            *lock = -1;
-            thread::yield_now();
-            *lock = tmp + 1;
+        let m = RwLock::new(Cloneable(10));
+
+        assert_eq!(maybe_unwrap(m.get_cloned()), Cloneable(10));
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_get_mut,
+    test_body: {
+        use locks::RwLock;
+
+        let mut m = RwLock::new(NonCopy(10));
+        *maybe_unwrap(m.get_mut()) = NonCopy(20);
+        assert_eq!(maybe_unwrap(m.into_inner()), NonCopy(20));
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_set,
+    test_body: {
+        use locks::RwLock;
+
+        fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
+        where
+            T: Debug + Eq,
+        {
+            let m = RwLock::new(init());
+
+            assert_eq!(*maybe_unwrap(m.read()), init());
+            maybe_unwrap(m.set(value()));
+            assert_eq!(*maybe_unwrap(m.read()), value());
+        }
+
+        inner(|| NonCopy(10), || NonCopy(20));
+        inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_replace,
+    test_body: {
+        use locks::RwLock;
+
+        fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
+        where
+            T: Debug + Eq,
+        {
+            let m = RwLock::new(init());
+
+            assert_eq!(*maybe_unwrap(m.read()), init());
+            assert_eq!(maybe_unwrap(m.replace(value())), init());
+            assert_eq!(*maybe_unwrap(m.read()), value());
         }
-        tx.send(()).unwrap();
-    });
 
-    // Readers try to catch the writer in the act
-    let mut children = Vec::new();
-    for _ in 0..5 {
-        let arc3 = arc.clone();
-        children.push(thread::spawn(move || {
-            let lock = arc3.read().unwrap();
-            assert!(*lock >= 0);
-        }));
+        inner(|| NonCopy(10), || NonCopy(20));
+        inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_read_guard_covariance,
+    test_body: {
+        use locks::{RwLock, RwLockReadGuard};
+
+        fn do_stuff<'a>(_: RwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
+        let j: i32 = 5;
+        let lock = RwLock::new(&j);
+        {
+            let i = 6;
+            do_stuff(maybe_unwrap(lock.read()), &i);
+        }
+        drop(lock);
     }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_mapped_read_guard_covariance,
+    test_body: {
+        use locks::{RwLock, RwLockReadGuard, MappedRwLockReadGuard};
+
+        fn do_stuff<'a>(_: MappedRwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
+        let j: i32 = 5;
+        let lock = RwLock::new((&j, &j));
+        {
+            let i = 6;
+            let guard = maybe_unwrap(lock.read());
+            let guard = RwLockReadGuard::map(guard, |(val, _val)| val);
+            do_stuff(guard, &i);
+        }
+        drop(lock);
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_downgrade_basic,
+    test_body: {
+        use locks::{RwLock, RwLockWriteGuard};
+
+        let r = RwLock::new(());
 
-    // Wait for children to pass their asserts
-    for r in children {
-        assert!(r.join().is_ok());
+        let write_guard = maybe_unwrap(r.write());
+        let _read_guard = RwLockWriteGuard::downgrade(write_guard);
     }
+);
 
-    // Wait for writer to finish
-    rx.recv().unwrap();
-    let lock = arc.read().unwrap();
-    assert_eq!(*lock, 10);
-}
+// FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue.
+// See <https://github.com/rust-lang/rust/issues/121950> for details.
+#[cfg_attr(all(miri, target_os = "macos"), ignore)]
+nonpoison_and_poison_unwrap_test!(
+    name: test_downgrade_observe,
+    test_body: {
+        use locks::{RwLock, RwLockWriteGuard};
+
+        // Inspired by the test `test_rwlock_downgrade` from:
+        // https://github.com/Amanieu/parking_lot/blob/master/src/rwlock.rs
+
+        const W: usize = 20;
+        const N: usize = if cfg!(miri) { 40 } else { 100 };
+
+        // This test spawns `W` writer threads, where each will increment a counter `N` times,
+        // ensuring that the value they wrote has not changed after downgrading.
+
+        let rw = Arc::new(RwLock::new(0));
+
+        // Spawn the writers that will do `W * N` operations and checks.
+        let handles: Vec<_> = (0..W)
+            .map(|_| {
+                let rw = rw.clone();
+                thread::spawn(move || {
+                    for _ in 0..N {
+                        // Increment the counter.
+                        let mut write_guard = maybe_unwrap(rw.write());
+                        *write_guard += 1;
+                        let cur_val = *write_guard;
+
+                        // Downgrade the lock to read mode, where the value protected cannot be
+                        // modified.
+                        let read_guard = RwLockWriteGuard::downgrade(write_guard);
+                        assert_eq!(cur_val, *read_guard);
+                    }
+                })
+            })
+            .collect();
 
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_access_in_unwind() {
-    let arc = Arc::new(RwLock::new(1));
-    let arc2 = arc.clone();
-    let _ = thread::spawn(move || -> () {
-        struct Unwinder {
-            i: Arc<RwLock<isize>>,
+        for handle in handles {
+            handle.join().unwrap();
         }
-        impl Drop for Unwinder {
-            fn drop(&mut self) {
-                let mut lock = self.i.write().unwrap();
-                *lock += 1;
-            }
+
+        assert_eq!(*maybe_unwrap(rw.read()), W * N);
+    }
+);
+
+// FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue.
+// See <https://github.com/rust-lang/rust/issues/121950> for details.
+#[cfg_attr(all(miri, target_os = "macos"), ignore)]
+nonpoison_and_poison_unwrap_test!(
+    name: test_downgrade_atomic,
+    test_body: {
+        use locks::{RwLock, RwLockWriteGuard};
+
+        const NEW_VALUE: i32 = -1;
+
+        // This test checks that `downgrade` is atomic, meaning as soon as a write lock has been
+        // downgraded, the lock must be in read mode and no other threads can take the write lock to
+        // modify the protected value.
+
+        // `W` is the number of evil writer threads.
+        const W: usize = 20;
+        let rwlock = Arc::new(RwLock::new(0));
+
+        // Spawns many evil writer threads that will try and write to the locked value before the
+        // initial writer (who has the exclusive lock) can read after it downgrades.
+        // If the `RwLock` behaves correctly, then the initial writer should read the value it wrote
+        // itself as no other thread should be able to mutate the protected value.
+
+        // Put the lock in write mode, causing all future threads trying to access this go to sleep.
+        let mut main_write_guard = maybe_unwrap(rwlock.write());
+
+        // Spawn all of the evil writer threads. They will each increment the protected value by 1.
+        let handles: Vec<_> = (0..W)
+            .map(|_| {
+                let rwlock = rwlock.clone();
+                thread::spawn(move || {
+                    // Will go to sleep since the main thread initially has the write lock.
+                    let mut evil_guard = maybe_unwrap(rwlock.write());
+                    *evil_guard += 1;
+                })
+            })
+            .collect();
+
+        // Wait for a good amount of time so that evil threads go to sleep.
+        // Note: this is not strictly necessary...
+        let eternity = std::time::Duration::from_millis(42);
+        thread::sleep(eternity);
+
+        // Once everyone is asleep, set the value to `NEW_VALUE`.
+        *main_write_guard = NEW_VALUE;
+
+        // Atomically downgrade the write guard into a read guard.
+        let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
+
+        // If the above is not atomic, then it would be possible for an evil thread to get in front
+        // of this read and change the value to be non-negative.
+        assert_eq!(*main_read_guard, NEW_VALUE, "`downgrade` was not atomic");
+
+        // Drop the main read guard and allow the evil writer threads to start incrementing.
+        drop(main_read_guard);
+
+        for handle in handles {
+            handle.join().unwrap();
         }
-        let _u = Unwinder { i: arc2 };
-        panic!();
-    })
-    .join();
-    let lock = arc.read().unwrap();
-    assert_eq!(*lock, 2);
-}
+
+        let final_check = maybe_unwrap(rwlock.read());
+        assert_eq!(*final_check, W as i32 + NEW_VALUE);
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_mapping_mapped_guard,
+    test_body: {
+        use locks::{
+            RwLock, RwLockReadGuard, RwLockWriteGuard, MappedRwLockReadGuard, MappedRwLockWriteGuard
+        };
+
+        let arr = [0; 4];
+        let mut lock = RwLock::new(arr);
+        let guard = maybe_unwrap(lock.write());
+        let guard = RwLockWriteGuard::map(guard, |arr| &mut arr[..2]);
+        let mut guard = MappedRwLockWriteGuard::map(guard, |slice| &mut slice[1..]);
+        assert_eq!(guard.len(), 1);
+        guard[0] = 42;
+        drop(guard);
+        assert_eq!(*maybe_unwrap(lock.get_mut()), [0, 42, 0, 0]);
+
+        let guard = maybe_unwrap(lock.read());
+        let guard = RwLockReadGuard::map(guard, |arr| &arr[..2]);
+        let guard = MappedRwLockReadGuard::map(guard, |slice| &slice[1..]);
+        assert_eq!(*guard, [42]);
+        drop(guard);
+        assert_eq!(*maybe_unwrap(lock.get_mut()), [0, 42, 0, 0]);
+    }
+);
 
 #[test]
-fn test_rwlock_unsized() {
-    let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
-    {
-        let b = &mut *rw.write().unwrap();
-        b[0] = 4;
-        b[2] = 5;
+fn nonpoison_test_rwlock_try_write() {
+    use std::sync::nonpoison::{RwLock, RwLockReadGuard, WouldBlock};
+
+    let lock = RwLock::new(0isize);
+    let read_guard = lock.read();
+
+    let write_result = lock.try_write();
+    match write_result {
+        Err(WouldBlock) => (),
+        Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
+    }
+
+    drop(read_guard);
+    let mapped_read_guard = RwLockReadGuard::map(lock.read(), |_| &());
+
+    let write_result = lock.try_write();
+    match write_result {
+        Err(WouldBlock) => (),
+        Ok(_) => assert!(false, "try_write should not succeed while mapped_read_guard is in scope"),
     }
-    let comp: &[i32] = &[4, 2, 5];
-    assert_eq!(&*rw.read().unwrap(), comp);
+
+    drop(mapped_read_guard);
 }
 
 #[test]
-fn test_rwlock_try_write() {
+fn poison_test_rwlock_try_write() {
+    use std::sync::poison::{RwLock, RwLockReadGuard, TryLockError};
+
     let lock = RwLock::new(0isize);
     let read_guard = lock.read().unwrap();
 
@@ -285,6 +503,11 @@ fn test_rwlock_try_write() {
     drop(mapped_read_guard);
 }
 
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Poison Tests
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/// Creates a rwlock that is immediately poisoned.
 fn new_poisoned_rwlock<T>(value: T) -> RwLock<T> {
     let lock = RwLock::new(value);
 
@@ -301,30 +524,6 @@ fn new_poisoned_rwlock<T>(value: T) -> RwLock<T> {
 }
 
 #[test]
-fn test_into_inner() {
-    let m = RwLock::new(NonCopy(10));
-    assert_eq!(m.into_inner().unwrap(), NonCopy(10));
-}
-
-#[test]
-fn test_into_inner_drop() {
-    struct Foo(Arc<AtomicUsize>);
-    impl Drop for Foo {
-        fn drop(&mut self) {
-            self.0.fetch_add(1, Ordering::SeqCst);
-        }
-    }
-    let num_drops = Arc::new(AtomicUsize::new(0));
-    let m = RwLock::new(Foo(num_drops.clone()));
-    assert_eq!(num_drops.load(Ordering::SeqCst), 0);
-    {
-        let _inner = m.into_inner().unwrap();
-        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
-    }
-    assert_eq!(num_drops.load(Ordering::SeqCst), 1);
-}
-
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
 fn test_into_inner_poison() {
     let m = new_poisoned_rwlock(NonCopy(10));
@@ -336,15 +535,11 @@ fn test_into_inner_poison() {
 }
 
 #[test]
-fn test_get_cloned() {
-    let m = RwLock::new(Cloneable(10));
-
-    assert_eq!(m.get_cloned().unwrap(), Cloneable(10));
-}
-
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
 fn test_get_cloned_poison() {
+    #[derive(Clone, Eq, PartialEq, Debug)]
+    struct Cloneable(i32);
+
     let m = new_poisoned_rwlock(Cloneable(10));
 
     match m.get_cloned() {
@@ -354,13 +549,6 @@ fn test_get_cloned_poison() {
 }
 
 #[test]
-fn test_get_mut() {
-    let mut m = RwLock::new(NonCopy(10));
-    *m.get_mut().unwrap() = NonCopy(20);
-    assert_eq!(m.into_inner().unwrap(), NonCopy(20));
-}
-
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
 fn test_get_mut_poison() {
     let mut m = new_poisoned_rwlock(NonCopy(10));
@@ -372,23 +560,6 @@ fn test_get_mut_poison() {
 }
 
 #[test]
-fn test_set() {
-    fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
-    where
-        T: Debug + Eq,
-    {
-        let m = RwLock::new(init());
-
-        assert_eq!(*m.read().unwrap(), init());
-        m.set(value()).unwrap();
-        assert_eq!(*m.read().unwrap(), value());
-    }
-
-    inner(|| NonCopy(10), || NonCopy(20));
-    inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
-}
-
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
 fn test_set_poison() {
     fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
@@ -411,23 +582,6 @@ fn test_set_poison() {
 }
 
 #[test]
-fn test_replace() {
-    fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
-    where
-        T: Debug + Eq,
-    {
-        let m = RwLock::new(init());
-
-        assert_eq!(*m.read().unwrap(), init());
-        assert_eq!(m.replace(value()).unwrap(), init());
-        assert_eq!(*m.read().unwrap(), value());
-    }
-
-    inner(|| NonCopy(10), || NonCopy(20));
-    inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
-}
-
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
 fn test_replace_poison() {
     fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
@@ -450,49 +604,118 @@ fn test_replace_poison() {
 }
 
 #[test]
-fn test_read_guard_covariance() {
-    fn do_stuff<'a>(_: RwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
-    let j: i32 = 5;
-    let lock = RwLock::new(&j);
-    {
-        let i = 6;
-        do_stuff(lock.read().unwrap(), &i);
-    }
-    drop(lock);
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_poison_wr() {
+    let arc = Arc::new(RwLock::new(1));
+    let arc2 = arc.clone();
+    let _: Result<(), _> = thread::spawn(move || {
+        let _lock = arc2.write().unwrap();
+        panic!();
+    })
+    .join();
+    assert!(arc.read().is_err());
 }
 
 #[test]
-fn test_mapped_read_guard_covariance() {
-    fn do_stuff<'a>(_: MappedRwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
-    let j: i32 = 5;
-    let lock = RwLock::new((&j, &j));
-    {
-        let i = 6;
-        let guard = lock.read().unwrap();
-        let guard = RwLockReadGuard::map(guard, |(val, _val)| val);
-        do_stuff(guard, &i);
-    }
-    drop(lock);
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_poison_mapped_w_r() {
+    let arc = Arc::new(RwLock::new(1));
+    let arc2 = arc.clone();
+    let _: Result<(), _> = thread::spawn(move || {
+        let lock = arc2.write().unwrap();
+        let _lock = RwLockWriteGuard::map(lock, |val| val);
+        panic!();
+    })
+    .join();
+    assert!(arc.read().is_err());
 }
 
 #[test]
-fn test_mapping_mapped_guard() {
-    let arr = [0; 4];
-    let mut lock = RwLock::new(arr);
-    let guard = lock.write().unwrap();
-    let guard = RwLockWriteGuard::map(guard, |arr| &mut arr[..2]);
-    let mut guard = MappedRwLockWriteGuard::map(guard, |slice| &mut slice[1..]);
-    assert_eq!(guard.len(), 1);
-    guard[0] = 42;
-    drop(guard);
-    assert_eq!(*lock.get_mut().unwrap(), [0, 42, 0, 0]);
-
-    let guard = lock.read().unwrap();
-    let guard = RwLockReadGuard::map(guard, |arr| &arr[..2]);
-    let guard = MappedRwLockReadGuard::map(guard, |slice| &slice[1..]);
-    assert_eq!(*guard, [42]);
-    drop(guard);
-    assert_eq!(*lock.get_mut().unwrap(), [0, 42, 0, 0]);
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_poison_ww() {
+    let arc = Arc::new(RwLock::new(1));
+    assert!(!arc.is_poisoned());
+    let arc2 = arc.clone();
+    let _: Result<(), _> = thread::spawn(move || {
+        let _lock = arc2.write().unwrap();
+        panic!();
+    })
+    .join();
+    assert!(arc.write().is_err());
+    assert!(arc.is_poisoned());
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_poison_mapped_w_w() {
+    let arc = Arc::new(RwLock::new(1));
+    let arc2 = arc.clone();
+    let _: Result<(), _> = thread::spawn(move || {
+        let lock = arc2.write().unwrap();
+        let _lock = RwLockWriteGuard::map(lock, |val| val);
+        panic!();
+    })
+    .join();
+    assert!(arc.write().is_err());
+    assert!(arc.is_poisoned());
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_no_poison_rr() {
+    let arc = Arc::new(RwLock::new(1));
+    let arc2 = arc.clone();
+    let _: Result<(), _> = thread::spawn(move || {
+        let _lock = arc2.read().unwrap();
+        panic!();
+    })
+    .join();
+    let lock = arc.read().unwrap();
+    assert_eq!(*lock, 1);
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_no_poison_mapped_r_r() {
+    let arc = Arc::new(RwLock::new(1));
+    let arc2 = arc.clone();
+    let _: Result<(), _> = thread::spawn(move || {
+        let lock = arc2.read().unwrap();
+        let _lock = RwLockReadGuard::map(lock, |val| val);
+        panic!();
+    })
+    .join();
+    let lock = arc.read().unwrap();
+    assert_eq!(*lock, 1);
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_no_poison_rw() {
+    let arc = Arc::new(RwLock::new(1));
+    let arc2 = arc.clone();
+    let _: Result<(), _> = thread::spawn(move || {
+        let _lock = arc2.read().unwrap();
+        panic!()
+    })
+    .join();
+    let lock = arc.write().unwrap();
+    assert_eq!(*lock, 1);
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_no_poison_mapped_r_w() {
+    let arc = Arc::new(RwLock::new(1));
+    let arc2 = arc.clone();
+    let _: Result<(), _> = thread::spawn(move || {
+        let lock = arc2.read().unwrap();
+        let _lock = RwLockReadGuard::map(lock, |val| val);
+        panic!();
+    })
+    .join();
+    let lock = arc.write().unwrap();
+    assert_eq!(*lock, 1);
 }
 
 #[test]
@@ -638,114 +861,3 @@ fn panic_while_mapping_write_unlocked_poison() {
 
     drop(lock);
 }
-
-#[test]
-fn test_downgrade_basic() {
-    let r = RwLock::new(());
-
-    let write_guard = r.write().unwrap();
-    let _read_guard = RwLockWriteGuard::downgrade(write_guard);
-}
-
-#[test]
-// FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue.
-// See <https://github.com/rust-lang/rust/issues/121950> for details.
-#[cfg_attr(all(miri, target_os = "macos"), ignore)]
-fn test_downgrade_observe() {
-    // Taken from the test `test_rwlock_downgrade` from:
-    // https://github.com/Amanieu/parking_lot/blob/master/src/rwlock.rs
-
-    const W: usize = 20;
-    const N: usize = if cfg!(miri) { 40 } else { 100 };
-
-    // This test spawns `W` writer threads, where each will increment a counter `N` times, ensuring
-    // that the value they wrote has not changed after downgrading.
-
-    let rw = Arc::new(RwLock::new(0));
-
-    // Spawn the writers that will do `W * N` operations and checks.
-    let handles: Vec<_> = (0..W)
-        .map(|_| {
-            let rw = rw.clone();
-            thread::spawn(move || {
-                for _ in 0..N {
-                    // Increment the counter.
-                    let mut write_guard = rw.write().unwrap();
-                    *write_guard += 1;
-                    let cur_val = *write_guard;
-
-                    // Downgrade the lock to read mode, where the value protected cannot be modified.
-                    let read_guard = RwLockWriteGuard::downgrade(write_guard);
-                    assert_eq!(cur_val, *read_guard);
-                }
-            })
-        })
-        .collect();
-
-    for handle in handles {
-        handle.join().unwrap();
-    }
-
-    assert_eq!(*rw.read().unwrap(), W * N);
-}
-
-#[test]
-// FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue.
-// See <https://github.com/rust-lang/rust/issues/121950> for details.
-#[cfg_attr(all(miri, target_os = "macos"), ignore)]
-fn test_downgrade_atomic() {
-    const NEW_VALUE: i32 = -1;
-
-    // This test checks that `downgrade` is atomic, meaning as soon as a write lock has been
-    // downgraded, the lock must be in read mode and no other threads can take the write lock to
-    // modify the protected value.
-
-    // `W` is the number of evil writer threads.
-    const W: usize = 20;
-    let rwlock = Arc::new(RwLock::new(0));
-
-    // Spawns many evil writer threads that will try and write to the locked value before the
-    // initial writer (who has the exclusive lock) can read after it downgrades.
-    // If the `RwLock` behaves correctly, then the initial writer should read the value it wrote
-    // itself as no other thread should be able to mutate the protected value.
-
-    // Put the lock in write mode, causing all future threads trying to access this go to sleep.
-    let mut main_write_guard = rwlock.write().unwrap();
-
-    // Spawn all of the evil writer threads. They will each increment the protected value by 1.
-    let handles: Vec<_> = (0..W)
-        .map(|_| {
-            let rwlock = rwlock.clone();
-            thread::spawn(move || {
-                // Will go to sleep since the main thread initially has the write lock.
-                let mut evil_guard = rwlock.write().unwrap();
-                *evil_guard += 1;
-            })
-        })
-        .collect();
-
-    // Wait for a good amount of time so that evil threads go to sleep.
-    // Note: this is not strictly necessary...
-    let eternity = std::time::Duration::from_millis(42);
-    thread::sleep(eternity);
-
-    // Once everyone is asleep, set the value to `NEW_VALUE`.
-    *main_write_guard = NEW_VALUE;
-
-    // Atomically downgrade the write guard into a read guard.
-    let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
-
-    // If the above is not atomic, then it would be possible for an evil thread to get in front of
-    // this read and change the value to be non-negative.
-    assert_eq!(*main_read_guard, NEW_VALUE, "`downgrade` was not atomic");
-
-    // Drop the main read guard and allow the evil writer threads to start incrementing.
-    drop(main_read_guard);
-
-    for handle in handles {
-        handle.join().unwrap();
-    }
-
-    let final_check = rwlock.read().unwrap();
-    assert_eq!(*final_check, W as i32 + NEW_VALUE);
-}
diff --git a/tests/ui/suggestions/inner_type.fixed b/tests/ui/suggestions/inner_type.fixed
index 8174f8e204e..8671c43226c 100644
--- a/tests/ui/suggestions/inner_type.fixed
+++ b/tests/ui/suggestions/inner_type.fixed
@@ -31,10 +31,10 @@ fn main() {
     let another_item = std::sync::RwLock::new(Struct { p: 42_u32 });
 
     another_item.read().unwrap().method();
-    //~^ ERROR no method named `method` found for struct `RwLock<T>` in the current scope [E0599]
+    //~^ ERROR no method named `method` found for struct `std::sync::RwLock<T>` in the current scope [E0599]
     //~| HELP  use `.read().unwrap()` to borrow the `Struct<u32>`, blocking the current thread until it can be acquired
 
     another_item.write().unwrap().some_mutable_method();
-    //~^ ERROR no method named `some_mutable_method` found for struct `RwLock<T>` in the current scope [E0599]
+    //~^ ERROR no method named `some_mutable_method` found for struct `std::sync::RwLock<T>` in the current scope [E0599]
     //~| HELP use `.write().unwrap()` to mutably borrow the `Struct<u32>`, blocking the current thread until it can be acquired
 }
diff --git a/tests/ui/suggestions/inner_type.rs b/tests/ui/suggestions/inner_type.rs
index e4eaf07ca8b..793372c7deb 100644
--- a/tests/ui/suggestions/inner_type.rs
+++ b/tests/ui/suggestions/inner_type.rs
@@ -31,10 +31,10 @@ fn main() {
     let another_item = std::sync::RwLock::new(Struct { p: 42_u32 });
 
     another_item.method();
-    //~^ ERROR no method named `method` found for struct `RwLock<T>` in the current scope [E0599]
+    //~^ ERROR no method named `method` found for struct `std::sync::RwLock<T>` in the current scope [E0599]
     //~| HELP  use `.read().unwrap()` to borrow the `Struct<u32>`, blocking the current thread until it can be acquired
 
     another_item.some_mutable_method();
-    //~^ ERROR no method named `some_mutable_method` found for struct `RwLock<T>` in the current scope [E0599]
+    //~^ ERROR no method named `some_mutable_method` found for struct `std::sync::RwLock<T>` in the current scope [E0599]
     //~| HELP use `.write().unwrap()` to mutably borrow the `Struct<u32>`, blocking the current thread until it can be acquired
 }
diff --git a/tests/ui/suggestions/inner_type.stderr b/tests/ui/suggestions/inner_type.stderr
index 017ddb5ad6d..d2f7fa92bc6 100644
--- a/tests/ui/suggestions/inner_type.stderr
+++ b/tests/ui/suggestions/inner_type.stderr
@@ -46,11 +46,11 @@ help: use `.lock().unwrap()` to borrow the `Struct<u32>`, blocking the current t
 LL |     another_item.lock().unwrap().method();
    |                 ++++++++++++++++
 
-error[E0599]: no method named `method` found for struct `RwLock<T>` in the current scope
+error[E0599]: no method named `method` found for struct `std::sync::RwLock<T>` in the current scope
   --> $DIR/inner_type.rs:33:18
    |
 LL |     another_item.method();
-   |                  ^^^^^^ method not found in `RwLock<Struct<u32>>`
+   |                  ^^^^^^ method not found in `std::sync::RwLock<Struct<u32>>`
    |
 note: the method `method` exists on the type `Struct<u32>`
   --> $DIR/inner_type.rs:9:5
@@ -62,11 +62,11 @@ help: use `.read().unwrap()` to borrow the `Struct<u32>`, blocking the current t
 LL |     another_item.read().unwrap().method();
    |                 ++++++++++++++++
 
-error[E0599]: no method named `some_mutable_method` found for struct `RwLock<T>` in the current scope
+error[E0599]: no method named `some_mutable_method` found for struct `std::sync::RwLock<T>` in the current scope
   --> $DIR/inner_type.rs:37:18
    |
 LL |     another_item.some_mutable_method();
-   |                  ^^^^^^^^^^^^^^^^^^^ method not found in `RwLock<Struct<u32>>`
+   |                  ^^^^^^^^^^^^^^^^^^^ method not found in `std::sync::RwLock<Struct<u32>>`
    |
 note: the method `some_mutable_method` exists on the type `Struct<u32>`
   --> $DIR/inner_type.rs:11:5