diff options
| author | Amanieu d'Antras <amanieu@gmail.com> | 2016-05-25 06:07:54 +0100 |
|---|---|---|
| committer | Amanieu d'Antras <amanieu@gmail.com> | 2016-06-02 13:31:01 +0100 |
| commit | 960d1b74c5b8db59b871af50d948168f64b1be8a (patch) | |
| tree | cad7c2677c445b28a4d974e84eac926ee4916204 /src/libstd/sys | |
| parent | d73f5e65ecbcb6a0acb908b54226edfccf47eccc (diff) | |
| download | rust-960d1b74c5b8db59b871af50d948168f64b1be8a.tar.gz rust-960d1b74c5b8db59b871af50d948168f64b1be8a.zip | |
Don't allow pthread_rwlock_t to recursively lock itself
This is allowed by POSIX and can happen on glibc with processors that support hardware lock elision.
Diffstat (limited to 'src/libstd/sys')
| -rw-r--r-- | src/libstd/sys/unix/rwlock.rs | 61 |
1 files changed, 52 insertions, 9 deletions
diff --git a/src/libstd/sys/unix/rwlock.rs b/src/libstd/sys/unix/rwlock.rs index 44bd5d895f2..72ab70aeac4 100644 --- a/src/libstd/sys/unix/rwlock.rs +++ b/src/libstd/sys/unix/rwlock.rs @@ -11,14 +11,20 @@ use libc; use cell::UnsafeCell; -pub struct RWLock { inner: UnsafeCell<libc::pthread_rwlock_t> } +pub struct RWLock { + inner: UnsafeCell<libc::pthread_rwlock_t>, + write_locked: UnsafeCell<bool>, +} unsafe impl Send for RWLock {} unsafe impl Sync for RWLock {} impl RWLock { pub const fn new() -> RWLock { - RWLock { inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER) } + RWLock { + inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), + write_locked: UnsafeCell::new(false), + } } #[inline] pub unsafe fn read(&self) { @@ -35,7 +41,16 @@ impl RWLock { // // We roughly maintain the deadlocking behavior by panicking to ensure // that this lock acquisition does not succeed. - if r == libc::EDEADLK { + // + // We also check whether there this lock is already write locked. This + // is only possible if it was write locked by the current thread and + // the implementation allows recursive locking. The POSIX standard + // doesn't require recursivly locking a rwlock to deadlock, but we can't + // allow that because it could lead to aliasing issues. + if r == libc::EDEADLK || *self.write_locked.get() { + if r == 0 { + self.raw_unlock(); + } panic!("rwlock read lock would result in deadlock"); } else { debug_assert_eq!(r, 0); @@ -43,29 +58,57 @@ impl RWLock { } #[inline] pub unsafe fn try_read(&self) -> bool { - libc::pthread_rwlock_tryrdlock(self.inner.get()) == 0 + let r = libc::pthread_rwlock_tryrdlock(self.inner.get()); + if r == 0 && *self.write_locked.get() { + self.raw_unlock(); + false + } else { + r == 0 + } } #[inline] pub unsafe fn write(&self) { let r = libc::pthread_rwlock_wrlock(self.inner.get()); - // see comments above for why we check for EDEADLK - if r == libc::EDEADLK { + // see comments above for why we check for EDEADLK and write_locked + if r == libc::EDEADLK || *self.write_locked.get() { + if r == 0 { + self.raw_unlock(); + } panic!("rwlock write lock would result in deadlock"); } else { debug_assert_eq!(r, 0); } + *self.write_locked.get() = true; } #[inline] pub unsafe fn try_write(&self) -> bool { - libc::pthread_rwlock_trywrlock(self.inner.get()) == 0 + let r = libc::pthread_rwlock_trywrlock(self.inner.get()); + if r == 0 && *self.write_locked.get() { + self.raw_unlock(); + false + } else if r == 0 { + *self.write_locked.get() = true; + true + } else { + false + } } #[inline] - pub unsafe fn read_unlock(&self) { + unsafe fn raw_unlock(&self) { let r = libc::pthread_rwlock_unlock(self.inner.get()); debug_assert_eq!(r, 0); } #[inline] - pub unsafe fn write_unlock(&self) { self.read_unlock() } + pub unsafe fn read_unlock(&self) { + debug_assert!(!*self.write_locked.get()); + self.raw_unlock(); + } + #[inline] + pub unsafe fn write_unlock(&self) { + debug_assert!(*self.write_locked.get()); + *self.write_locked.get() = false; + self.raw_unlock(); + } #[inline] pub unsafe fn destroy(&self) { let r = libc::pthread_rwlock_destroy(self.inner.get()); |
