diff options
| author | Amanieu d'Antras <amanieu@gmail.com> | 2016-05-26 06:32:15 +0100 |
|---|---|---|
| committer | Amanieu d'Antras <amanieu@gmail.com> | 2016-06-02 14:34:00 +0100 |
| commit | fc4b35612550d833cefcd586cb13ebc0dc5a51e1 (patch) | |
| tree | 85e4c23d9928c4c6512d726acd1c719535701ef5 /src/libstd/sys | |
| parent | f3c68a0fdf119a3b285c33c38f9f7eebd053c853 (diff) | |
| download | rust-fc4b35612550d833cefcd586cb13ebc0dc5a51e1.tar.gz rust-fc4b35612550d833cefcd586cb13ebc0dc5a51e1.zip | |
Fix rwlock successfully acquiring a write lock after a read lock
Diffstat (limited to 'src/libstd/sys')
| -rw-r--r-- | src/libstd/sys/unix/rwlock.rs | 39 |
1 files changed, 27 insertions, 12 deletions
diff --git a/src/libstd/sys/unix/rwlock.rs b/src/libstd/sys/unix/rwlock.rs index 72ab70aeac4..fbd4e1d1208 100644 --- a/src/libstd/sys/unix/rwlock.rs +++ b/src/libstd/sys/unix/rwlock.rs @@ -10,10 +10,12 @@ use libc; use cell::UnsafeCell; +use sync::atomic::{AtomicUsize, Ordering}; pub struct RWLock { inner: UnsafeCell<libc::pthread_rwlock_t>, write_locked: UnsafeCell<bool>, + num_readers: AtomicUsize, } unsafe impl Send for RWLock {} @@ -24,6 +26,7 @@ impl RWLock { RWLock { inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), write_locked: UnsafeCell::new(false), + num_readers: AtomicUsize::new(0), } } #[inline] @@ -54,23 +57,31 @@ impl RWLock { panic!("rwlock read lock would result in deadlock"); } else { debug_assert_eq!(r, 0); + self.num_readers.fetch_add(1, Ordering::Relaxed); } } #[inline] pub unsafe fn try_read(&self) -> bool { let r = libc::pthread_rwlock_tryrdlock(self.inner.get()); - if r == 0 && *self.write_locked.get() { - self.raw_unlock(); - false + if r == 0 { + if *self.write_locked.get() { + self.raw_unlock(); + false + } else { + self.num_readers.fetch_add(1, Ordering::Relaxed); + true + } } else { - r == 0 + false } } #[inline] pub unsafe fn write(&self) { let r = libc::pthread_rwlock_wrlock(self.inner.get()); - // see comments above for why we check for EDEADLK and write_locked - if r == libc::EDEADLK || *self.write_locked.get() { + // See comments above for why we check for EDEADLK and write_locked. We + // also need to check that num_readers is 0. + if r == libc::EDEADLK || *self.write_locked.get() || + self.num_readers.load(Ordering::Relaxed) != 0 { if r == 0 { self.raw_unlock(); } @@ -83,12 +94,14 @@ impl RWLock { #[inline] pub unsafe fn try_write(&self) -> bool { let r = libc::pthread_rwlock_trywrlock(self.inner.get()); - if r == 0 && *self.write_locked.get() { - self.raw_unlock(); - false - } else if r == 0 { - *self.write_locked.get() = true; - true + if r == 0 { + if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 { + self.raw_unlock(); + false + } else { + *self.write_locked.get() = true; + true + } } else { false } @@ -101,10 +114,12 @@ impl RWLock { #[inline] pub unsafe fn read_unlock(&self) { debug_assert!(!*self.write_locked.get()); + self.num_readers.fetch_sub(1, Ordering::Relaxed); self.raw_unlock(); } #[inline] pub unsafe fn write_unlock(&self) { + debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0); debug_assert!(*self.write_locked.get()); *self.write_locked.get() = false; self.raw_unlock(); |
