about summary refs log tree commit diff
diff options
context:
space:
mode:
authorConnor Tsui <connor.tsui20@gmail.com>2025-07-29 19:27:24 +0200
committerConnor Tsui <connor.tsui20@gmail.com>2025-08-09 08:41:27 +0200
commitad499d076aa71ac16de4d08fa232446d39125df3 (patch)
treeaa7ed14d31f9711115a96e49af793d5dc2999982
parentd8a0df075d312448bfdb4cbf4d87bf5d1c1508b7 (diff)
downloadrust-ad499d076aa71ac16de4d08fa232446d39125df3.tar.gz
rust-ad499d076aa71ac16de4d08fa232446d39125df3.zip
add nonpoison and poison rwlock tests
Adds tests for the `nonpoison::RwLock` variant by using a macro to
duplicate the existing `poison` tests.

Note that all of the tests here are adapted from the existing `poison`
tests.
-rw-r--r--library/std/tests/sync/lib.rs1
-rw-r--r--library/std/tests/sync/rwlock.rs660
2 files changed, 381 insertions, 280 deletions
diff --git a/library/std/tests/sync/lib.rs b/library/std/tests/sync/lib.rs
index 94f1fe96b6a..f874c2ba389 100644
--- a/library/std/tests/sync/lib.rs
+++ b/library/std/tests/sync/lib.rs
@@ -8,6 +8,7 @@
 #![feature(std_internals)]
 #![feature(sync_nonpoison)]
 #![feature(nonpoison_mutex)]
+#![feature(nonpoison_rwlock)]
 #![allow(internal_features)]
 #![feature(macro_metavar_expr_concat)] // For concatenating identifiers in macros.
 
diff --git a/library/std/tests/sync/rwlock.rs b/library/std/tests/sync/rwlock.rs
index ddf2f78dff1..eca15d2a4ad 100644
--- a/library/std/tests/sync/rwlock.rs
+++ b/library/std/tests/sync/rwlock.rs
@@ -32,354 +32,454 @@ fn test_needs_drop() {
 ////////////////////////////////////////////////////////////////////////////////////////////////////
 // Non-poison & Poison Tests
 ////////////////////////////////////////////////////////////////////////////////////////////////////
+use super::nonpoison_and_poison_unwrap_test;
+
+nonpoison_and_poison_unwrap_test!(
+    name: smoke,
+    test_body: {
+        use locks::RwLock;
+
+        let l = RwLock::new(());
+        drop(maybe_unwrap(l.read()));
+        drop(maybe_unwrap(l.write()));
+        drop((maybe_unwrap(l.read()), maybe_unwrap(l.read())));
+        drop(maybe_unwrap(l.write()));
+    }
+);
 
-#[test]
-fn smoke() {
-    let l = RwLock::new(());
-    drop(l.read().unwrap());
-    drop(l.write().unwrap());
-    drop((l.read().unwrap(), l.read().unwrap()));
-    drop(l.write().unwrap());
-}
-
-#[test]
 // FIXME: On macOS we use a provenance-incorrect implementation and Miri
 // catches that issue with a chance of around 1/1000.
 // See <https://github.com/rust-lang/rust/issues/121950> for details.
 #[cfg_attr(all(miri, target_os = "macos"), ignore)]
-fn frob() {
-    const N: u32 = 10;
-    const M: usize = if cfg!(miri) { 100 } else { 1000 };
+nonpoison_and_poison_unwrap_test!(
+    name: frob,
+    test_body: {
+        use locks::RwLock;
 
-    let r = Arc::new(RwLock::new(()));
+        const N: u32 = 10;
+        const M: usize = if cfg!(miri) { 100 } else { 1000 };
 
-    let (tx, rx) = channel::<()>();
-    for _ in 0..N {
-        let tx = tx.clone();
-        let r = r.clone();
-        thread::spawn(move || {
-            let mut rng = crate::common::test_rng();
-            for _ in 0..M {
-                if rng.random_bool(1.0 / (N as f64)) {
-                    drop(r.write().unwrap());
-                } else {
-                    drop(r.read().unwrap());
+        let r = Arc::new(RwLock::new(()));
+
+        let (tx, rx) = channel::<()>();
+        for _ in 0..N {
+            let tx = tx.clone();
+            let r = r.clone();
+            thread::spawn(move || {
+                let mut rng = crate::common::test_rng();
+                for _ in 0..M {
+                    if rng.random_bool(1.0 / (N as f64)) {
+                        drop(maybe_unwrap(r.write()));
+                    } else {
+                        drop(maybe_unwrap(r.read()));
+                    }
                 }
+                drop(tx);
+            });
+        }
+        drop(tx);
+        let _ = rx.recv();
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_rw_arc,
+    test_body: {
+        use locks::RwLock;
+
+        let arc = Arc::new(RwLock::new(0));
+        let arc2 = arc.clone();
+        let (tx, rx) = channel();
+
+        thread::spawn(move || {
+            let mut lock = maybe_unwrap(arc2.write());
+            for _ in 0..10 {
+                let tmp = *lock;
+                *lock = -1;
+                thread::yield_now();
+                *lock = tmp + 1;
             }
-            drop(tx);
+            tx.send(()).unwrap();
         });
-    }
-    drop(tx);
-    let _ = rx.recv();
-}
 
-#[test]
-fn test_rw_arc() {
-    let arc = Arc::new(RwLock::new(0));
-    let arc2 = arc.clone();
-    let (tx, rx) = channel();
-
-    thread::spawn(move || {
-        let mut lock = arc2.write().unwrap();
-        for _ in 0..10 {
-            let tmp = *lock;
-            *lock = -1;
-            thread::yield_now();
-            *lock = tmp + 1;
+        // Readers try to catch the writer in the act
+        let mut children = Vec::new();
+        for _ in 0..5 {
+            let arc3 = arc.clone();
+            children.push(thread::spawn(move || {
+                let lock = maybe_unwrap(arc3.read());
+                assert!(*lock >= 0);
+            }));
         }
-        tx.send(()).unwrap();
-    });
 
-    // Readers try to catch the writer in the act
-    let mut children = Vec::new();
-    for _ in 0..5 {
-        let arc3 = arc.clone();
-        children.push(thread::spawn(move || {
-            let lock = arc3.read().unwrap();
-            assert!(*lock >= 0);
-        }));
-    }
+        // Wait for children to pass their asserts
+        for r in children {
+            assert!(r.join().is_ok());
+        }
 
-    // Wait for children to pass their asserts
-    for r in children {
-        assert!(r.join().is_ok());
+        // Wait for writer to finish
+        rx.recv().unwrap();
+        let lock = maybe_unwrap(arc.read());
+        assert_eq!(*lock, 10);
     }
+);
 
-    // Wait for writer to finish
-    rx.recv().unwrap();
-    let lock = arc.read().unwrap();
-    assert_eq!(*lock, 10);
-}
-
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_access_in_unwind() {
-    let arc = Arc::new(RwLock::new(1));
-    let arc2 = arc.clone();
-    let _ = thread::spawn(move || -> () {
-        struct Unwinder {
-            i: Arc<RwLock<isize>>,
-        }
-        impl Drop for Unwinder {
-            fn drop(&mut self) {
-                let mut lock = self.i.write().unwrap();
-                *lock += 1;
+nonpoison_and_poison_unwrap_test!(
+    name: test_rw_arc_access_in_unwind,
+    test_body: {
+        use locks::RwLock;
+
+        let arc = Arc::new(RwLock::new(1));
+        let arc2 = arc.clone();
+        let _ = thread::spawn(move || -> () {
+            struct Unwinder {
+                i: Arc<RwLock<isize>>,
             }
+            impl Drop for Unwinder {
+                fn drop(&mut self) {
+                    let mut lock = maybe_unwrap(self.i.write());
+                    *lock += 1;
+                }
+            }
+            let _u = Unwinder { i: arc2 };
+            panic!();
+        })
+        .join();
+        let lock = maybe_unwrap(arc.read());
+        assert_eq!(*lock, 2);
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_rwlock_unsized,
+    test_body: {
+        use locks::RwLock;
+
+        let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
+        {
+            let b = &mut *maybe_unwrap(rw.write());
+            b[0] = 4;
+            b[2] = 5;
         }
-        let _u = Unwinder { i: arc2 };
-        panic!();
-    })
-    .join();
-    let lock = arc.read().unwrap();
-    assert_eq!(*lock, 2);
-}
+        let comp: &[i32] = &[4, 2, 5];
+        assert_eq!(&*maybe_unwrap(rw.read()), comp);
+    }
+);
 
-#[test]
-fn test_rwlock_unsized() {
-    let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
-    {
-        let b = &mut *rw.write().unwrap();
-        b[0] = 4;
-        b[2] = 5;
+nonpoison_and_poison_unwrap_test!(
+    name: test_into_inner,
+    test_body: {
+        use locks::RwLock;
+
+        let m = RwLock::new(NonCopy(10));
+        assert_eq!(maybe_unwrap(m.into_inner()), NonCopy(10));
     }
-    let comp: &[i32] = &[4, 2, 5];
-    assert_eq!(&*rw.read().unwrap(), comp);
-}
+);
 
-#[test]
-fn test_into_inner() {
-    let m = RwLock::new(NonCopy(10));
-    assert_eq!(m.into_inner().unwrap(), NonCopy(10));
-}
+nonpoison_and_poison_unwrap_test!(
+    name: test_into_inner_drop,
+    test_body: {
+        use locks::RwLock;
 
-#[test]
-fn test_into_inner_drop() {
-    struct Foo(Arc<AtomicUsize>);
-    impl Drop for Foo {
-        fn drop(&mut self) {
-            self.0.fetch_add(1, Ordering::SeqCst);
+        struct Foo(Arc<AtomicUsize>);
+        impl Drop for Foo {
+            fn drop(&mut self) {
+                self.0.fetch_add(1, Ordering::SeqCst);
+            }
         }
-    }
-    let num_drops = Arc::new(AtomicUsize::new(0));
-    let m = RwLock::new(Foo(num_drops.clone()));
-    assert_eq!(num_drops.load(Ordering::SeqCst), 0);
-    {
-        let _inner = m.into_inner().unwrap();
+
+        let num_drops = Arc::new(AtomicUsize::new(0));
+        let m = RwLock::new(Foo(num_drops.clone()));
         assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        {
+            let _inner = maybe_unwrap(m.into_inner());
+            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        }
+        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
     }
-    assert_eq!(num_drops.load(Ordering::SeqCst), 1);
-}
-
-#[test]
-fn test_get_cloned() {
-    #[derive(Clone, Eq, PartialEq, Debug)]
-    struct Cloneable(i32);
-
-    let m = RwLock::new(Cloneable(10));
+);
 
-    assert_eq!(m.get_cloned().unwrap(), Cloneable(10));
-}
+nonpoison_and_poison_unwrap_test!(
+    name: test_get_cloned,
+    test_body: {
+        use locks::RwLock;
 
-#[test]
-fn test_get_mut() {
-    let mut m = RwLock::new(NonCopy(10));
-    *m.get_mut().unwrap() = NonCopy(20);
-    assert_eq!(m.into_inner().unwrap(), NonCopy(20));
-}
+        #[derive(Clone, Eq, PartialEq, Debug)]
+        struct Cloneable(i32);
 
-#[test]
-fn test_set() {
-    fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
-    where
-        T: Debug + Eq,
-    {
-        let m = RwLock::new(init());
+        let m = RwLock::new(Cloneable(10));
 
-        assert_eq!(*m.read().unwrap(), init());
-        m.set(value()).unwrap();
-        assert_eq!(*m.read().unwrap(), value());
+        assert_eq!(maybe_unwrap(m.get_cloned()), Cloneable(10));
     }
+);
 
-    inner(|| NonCopy(10), || NonCopy(20));
-    inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
-}
-
-#[test]
-fn test_replace() {
-    fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
-    where
-        T: Debug + Eq,
-    {
-        let m = RwLock::new(init());
+nonpoison_and_poison_unwrap_test!(
+    name: test_get_mut,
+    test_body: {
+        use locks::RwLock;
 
-        assert_eq!(*m.read().unwrap(), init());
-        assert_eq!(m.replace(value()).unwrap(), init());
-        assert_eq!(*m.read().unwrap(), value());
+        let mut m = RwLock::new(NonCopy(10));
+        *maybe_unwrap(m.get_mut()) = NonCopy(20);
+        assert_eq!(maybe_unwrap(m.into_inner()), NonCopy(20));
     }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_set,
+    test_body: {
+        use locks::RwLock;
+
+        fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
+        where
+            T: Debug + Eq,
+        {
+            let m = RwLock::new(init());
+
+            assert_eq!(*maybe_unwrap(m.read()), init());
+            maybe_unwrap(m.set(value()));
+            assert_eq!(*maybe_unwrap(m.read()), value());
+        }
 
-    inner(|| NonCopy(10), || NonCopy(20));
-    inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
-}
-
-#[test]
-fn test_read_guard_covariance() {
-    fn do_stuff<'a>(_: RwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
-    let j: i32 = 5;
-    let lock = RwLock::new(&j);
-    {
-        let i = 6;
-        do_stuff(lock.read().unwrap(), &i);
+        inner(|| NonCopy(10), || NonCopy(20));
+        inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
     }
-    drop(lock);
-}
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_replace,
+    test_body: {
+        use locks::RwLock;
+
+        fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
+        where
+            T: Debug + Eq,
+        {
+            let m = RwLock::new(init());
+
+            assert_eq!(*maybe_unwrap(m.read()), init());
+            assert_eq!(maybe_unwrap(m.replace(value())), init());
+            assert_eq!(*maybe_unwrap(m.read()), value());
+        }
 
-#[test]
-fn test_mapped_read_guard_covariance() {
-    fn do_stuff<'a>(_: MappedRwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
-    let j: i32 = 5;
-    let lock = RwLock::new((&j, &j));
-    {
-        let i = 6;
-        let guard = lock.read().unwrap();
-        let guard = RwLockReadGuard::map(guard, |(val, _val)| val);
-        do_stuff(guard, &i);
+        inner(|| NonCopy(10), || NonCopy(20));
+        inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
     }
-    drop(lock);
-}
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_read_guard_covariance,
+    test_body: {
+        use locks::{RwLock, RwLockReadGuard};
+
+        fn do_stuff<'a>(_: RwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
+        let j: i32 = 5;
+        let lock = RwLock::new(&j);
+        {
+            let i = 6;
+            do_stuff(maybe_unwrap(lock.read()), &i);
+        }
+        drop(lock);
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_mapped_read_guard_covariance,
+    test_body: {
+        use locks::{RwLock, RwLockReadGuard, MappedRwLockReadGuard};
+
+        fn do_stuff<'a>(_: MappedRwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
+        let j: i32 = 5;
+        let lock = RwLock::new((&j, &j));
+        {
+            let i = 6;
+            let guard = maybe_unwrap(lock.read());
+            let guard = RwLockReadGuard::map(guard, |(val, _val)| val);
+            do_stuff(guard, &i);
+        }
+        drop(lock);
+    }
+);
 
-#[test]
-fn test_downgrade_basic() {
-    let r = RwLock::new(());
+nonpoison_and_poison_unwrap_test!(
+    name: test_downgrade_basic,
+    test_body: {
+        use locks::{RwLock, RwLockWriteGuard};
 
-    let write_guard = r.write().unwrap();
-    let _read_guard = RwLockWriteGuard::downgrade(write_guard);
-}
+        let r = RwLock::new(());
+
+        let write_guard = maybe_unwrap(r.write());
+        let _read_guard = RwLockWriteGuard::downgrade(write_guard);
+    }
+);
 
-#[test]
 // FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue.
 // See <https://github.com/rust-lang/rust/issues/121950> for details.
 #[cfg_attr(all(miri, target_os = "macos"), ignore)]
-fn test_downgrade_observe() {
-    // Inspired by the test `test_rwlock_downgrade` from:
-    // https://github.com/Amanieu/parking_lot/blob/master/src/rwlock.rs
-
-    const W: usize = 20;
-    const N: usize = if cfg!(miri) { 40 } else { 100 };
-
-    // This test spawns `W` writer threads, where each will increment a counter `N` times, ensuring
-    // that the value they wrote has not changed after downgrading.
-
-    let rw = Arc::new(RwLock::new(0));
-
-    // Spawn the writers that will do `W * N` operations and checks.
-    let handles: Vec<_> = (0..W)
-        .map(|_| {
-            let rw = rw.clone();
-            thread::spawn(move || {
-                for _ in 0..N {
-                    // Increment the counter.
-                    let mut write_guard = rw.write().unwrap();
-                    *write_guard += 1;
-                    let cur_val = *write_guard;
-
-                    // Downgrade the lock to read mode, where the value protected cannot be modified.
-                    let read_guard = RwLockWriteGuard::downgrade(write_guard);
-                    assert_eq!(cur_val, *read_guard);
-                }
+nonpoison_and_poison_unwrap_test!(
+    name: test_downgrade_observe,
+    test_body: {
+        use locks::{RwLock, RwLockWriteGuard};
+
+        // Inspired by the test `test_rwlock_downgrade` from:
+        // https://github.com/Amanieu/parking_lot/blob/master/src/rwlock.rs
+
+        const W: usize = 20;
+        const N: usize = if cfg!(miri) { 40 } else { 100 };
+
+        // This test spawns `W` writer threads, where each will increment a counter `N` times,
+        // ensuring that the value they wrote has not changed after downgrading.
+
+        let rw = Arc::new(RwLock::new(0));
+
+        // Spawn the writers that will do `W * N` operations and checks.
+        let handles: Vec<_> = (0..W)
+            .map(|_| {
+                let rw = rw.clone();
+                thread::spawn(move || {
+                    for _ in 0..N {
+                        // Increment the counter.
+                        let mut write_guard = maybe_unwrap(rw.write());
+                        *write_guard += 1;
+                        let cur_val = *write_guard;
+
+                        // Downgrade the lock to read mode, where the value protected cannot be
+                        // modified.
+                        let read_guard = RwLockWriteGuard::downgrade(write_guard);
+                        assert_eq!(cur_val, *read_guard);
+                    }
+                })
             })
-        })
-        .collect();
+            .collect();
 
-    for handle in handles {
-        handle.join().unwrap();
-    }
+        for handle in handles {
+            handle.join().unwrap();
+        }
 
-    assert_eq!(*rw.read().unwrap(), W * N);
-}
+        assert_eq!(*maybe_unwrap(rw.read()), W * N);
+    }
+);
 
-#[test]
 // FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue.
 // See <https://github.com/rust-lang/rust/issues/121950> for details.
 #[cfg_attr(all(miri, target_os = "macos"), ignore)]
-fn test_downgrade_atomic() {
-    const NEW_VALUE: i32 = -1;
+nonpoison_and_poison_unwrap_test!(
+    name: test_downgrade_atomic,
+    test_body: {
+        use locks::{RwLock, RwLockWriteGuard};
+
+        const NEW_VALUE: i32 = -1;
+
+        // This test checks that `downgrade` is atomic, meaning as soon as a write lock has been
+        // downgraded, the lock must be in read mode and no other threads can take the write lock to
+        // modify the protected value.
+
+        // `W` is the number of evil writer threads.
+        const W: usize = 20;
+        let rwlock = Arc::new(RwLock::new(0));
+
+        // Spawns many evil writer threads that will try and write to the locked value before the
+        // initial writer (who has the exclusive lock) can read after it downgrades.
+        // If the `RwLock` behaves correctly, then the initial writer should read the value it wrote
+        // itself as no other thread should be able to mutate the protected value.
+
+        // Put the lock in write mode, causing all future threads trying to access this go to sleep.
+        let mut main_write_guard = maybe_unwrap(rwlock.write());
+
+        // Spawn all of the evil writer threads. They will each increment the protected value by 1.
+        let handles: Vec<_> = (0..W)
+            .map(|_| {
+                let rwlock = rwlock.clone();
+                thread::spawn(move || {
+                    // Will go to sleep since the main thread initially has the write lock.
+                    let mut evil_guard = maybe_unwrap(rwlock.write());
+                    *evil_guard += 1;
+                })
+            })
+            .collect();
 
-    // This test checks that `downgrade` is atomic, meaning as soon as a write lock has been
-    // downgraded, the lock must be in read mode and no other threads can take the write lock to
-    // modify the protected value.
+        // Wait for a good amount of time so that evil threads go to sleep.
+        // Note: this is not strictly necessary...
+        let eternity = std::time::Duration::from_millis(42);
+        thread::sleep(eternity);
 
-    // `W` is the number of evil writer threads.
-    const W: usize = 20;
-    let rwlock = Arc::new(RwLock::new(0));
+        // Once everyone is asleep, set the value to `NEW_VALUE`.
+        *main_write_guard = NEW_VALUE;
 
-    // Spawns many evil writer threads that will try and write to the locked value before the
-    // initial writer (who has the exclusive lock) can read after it downgrades.
-    // If the `RwLock` behaves correctly, then the initial writer should read the value it wrote
-    // itself as no other thread should be able to mutate the protected value.
+        // Atomically downgrade the write guard into a read guard.
+        let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
 
-    // Put the lock in write mode, causing all future threads trying to access this go to sleep.
-    let mut main_write_guard = rwlock.write().unwrap();
+        // If the above is not atomic, then it would be possible for an evil thread to get in front
+        // of this read and change the value to be non-negative.
+        assert_eq!(*main_read_guard, NEW_VALUE, "`downgrade` was not atomic");
 
-    // Spawn all of the evil writer threads. They will each increment the protected value by 1.
-    let handles: Vec<_> = (0..W)
-        .map(|_| {
-            let rwlock = rwlock.clone();
-            thread::spawn(move || {
-                // Will go to sleep since the main thread initially has the write lock.
-                let mut evil_guard = rwlock.write().unwrap();
-                *evil_guard += 1;
-            })
-        })
-        .collect();
+        // Drop the main read guard and allow the evil writer threads to start incrementing.
+        drop(main_read_guard);
 
-    // Wait for a good amount of time so that evil threads go to sleep.
-    // Note: this is not strictly necessary...
-    let eternity = std::time::Duration::from_millis(42);
-    thread::sleep(eternity);
+        for handle in handles {
+            handle.join().unwrap();
+        }
 
-    // Once everyone is asleep, set the value to `NEW_VALUE`.
-    *main_write_guard = NEW_VALUE;
+        let final_check = maybe_unwrap(rwlock.read());
+        assert_eq!(*final_check, W as i32 + NEW_VALUE);
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_mapping_mapped_guard,
+    test_body: {
+        use locks::{
+            RwLock, RwLockReadGuard, RwLockWriteGuard, MappedRwLockReadGuard, MappedRwLockWriteGuard
+        };
+
+        let arr = [0; 4];
+        let mut lock = RwLock::new(arr);
+        let guard = maybe_unwrap(lock.write());
+        let guard = RwLockWriteGuard::map(guard, |arr| &mut arr[..2]);
+        let mut guard = MappedRwLockWriteGuard::map(guard, |slice| &mut slice[1..]);
+        assert_eq!(guard.len(), 1);
+        guard[0] = 42;
+        drop(guard);
+        assert_eq!(*maybe_unwrap(lock.get_mut()), [0, 42, 0, 0]);
+
+        let guard = maybe_unwrap(lock.read());
+        let guard = RwLockReadGuard::map(guard, |arr| &arr[..2]);
+        let guard = MappedRwLockReadGuard::map(guard, |slice| &slice[1..]);
+        assert_eq!(*guard, [42]);
+        drop(guard);
+        assert_eq!(*maybe_unwrap(lock.get_mut()), [0, 42, 0, 0]);
+    }
+);
+
+#[test]
+fn nonpoison_test_rwlock_try_write() {
+    use std::sync::nonpoison::{RwLock, RwLockReadGuard, WouldBlock};
 
-    // Atomically downgrade the write guard into a read guard.
-    let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
+    let lock = RwLock::new(0isize);
+    let read_guard = lock.read();
 
-    // If the above is not atomic, then it would be possible for an evil thread to get in front of
-    // this read and change the value to be non-negative.
-    assert_eq!(*main_read_guard, NEW_VALUE, "`downgrade` was not atomic");
+    let write_result = lock.try_write();
+    match write_result {
+        Err(WouldBlock) => (),
+        Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
+    }
 
-    // Drop the main read guard and allow the evil writer threads to start incrementing.
-    drop(main_read_guard);
+    drop(read_guard);
+    let mapped_read_guard = RwLockReadGuard::map(lock.read(), |_| &());
 
-    for handle in handles {
-        handle.join().unwrap();
+    let write_result = lock.try_write();
+    match write_result {
+        Err(WouldBlock) => (),
+        Ok(_) => assert!(false, "try_write should not succeed while mapped_read_guard is in scope"),
     }
 
-    let final_check = rwlock.read().unwrap();
-    assert_eq!(*final_check, W as i32 + NEW_VALUE);
+    drop(mapped_read_guard);
 }
 
 #[test]
-fn test_mapping_mapped_guard() {
-    let arr = [0; 4];
-    let mut lock = RwLock::new(arr);
-    let guard = lock.write().unwrap();
-    let guard = RwLockWriteGuard::map(guard, |arr| &mut arr[..2]);
-    let mut guard = MappedRwLockWriteGuard::map(guard, |slice| &mut slice[1..]);
-    assert_eq!(guard.len(), 1);
-    guard[0] = 42;
-    drop(guard);
-    assert_eq!(*lock.get_mut().unwrap(), [0, 42, 0, 0]);
-
-    let guard = lock.read().unwrap();
-    let guard = RwLockReadGuard::map(guard, |arr| &arr[..2]);
-    let guard = MappedRwLockReadGuard::map(guard, |slice| &slice[1..]);
-    assert_eq!(*guard, [42]);
-    drop(guard);
-    assert_eq!(*lock.get_mut().unwrap(), [0, 42, 0, 0]);
-}
+fn poison_test_rwlock_try_write() {
+    use std::sync::poison::{RwLock, RwLockReadGuard, TryLockError};
 
-#[test]
-fn test_rwlock_try_write() {
     let lock = RwLock::new(0isize);
     let read_guard = lock.read().unwrap();