about summary refs log tree commit diff
path: root/src/libstd/sys
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2016-06-03 04:09:31 -0700
committerbors <bors@rust-lang.org>2016-06-03 04:09:31 -0700
commit9552bcdd92dfd09049ce9dd299b4bfc513ac075d (patch)
tree30e4ca00974aaf0c10b6a8ac546cd5e63026fda0 /src/libstd/sys
parent95206f438f1573e95601f06b315a151de010e92f (diff)
parentfc4b35612550d833cefcd586cb13ebc0dc5a51e1 (diff)
downloadrust-9552bcdd92dfd09049ce9dd299b4bfc513ac075d.tar.gz
rust-9552bcdd92dfd09049ce9dd299b4bfc513ac075d.zip
Auto merge of #33861 - Amanieu:lock_elision_fix, r=alexcrichton
Make sure Mutex and RwLock can't be re-locked on the same thread

Fixes #33770

r? @alexcrichton
Diffstat (limited to 'src/libstd/sys')
-rw-r--r--src/libstd/sys/common/mutex.rs6
-rw-r--r--src/libstd/sys/unix/mutex.rs33
-rw-r--r--src/libstd/sys/unix/rwlock.rs76
-rw-r--r--src/libstd/sys/windows/mutex.rs2
4 files changed, 108 insertions, 9 deletions
diff --git a/src/libstd/sys/common/mutex.rs b/src/libstd/sys/common/mutex.rs
index 5a6dfe7fb1a..7a2183c522f 100644
--- a/src/libstd/sys/common/mutex.rs
+++ b/src/libstd/sys/common/mutex.rs
@@ -27,6 +27,12 @@ impl Mutex {
     /// first used with any of the functions below.
     pub const fn new() -> Mutex { Mutex(imp::Mutex::new()) }
 
+    /// Prepare the mutex for use.
+    ///
+    /// This should be called once the mutex is at a stable memory address.
+    #[inline]
+    pub unsafe fn init(&mut self) { self.0.init() }
+
     /// Locks the mutex blocking the current thread until it is available.
     ///
     /// Behavior is undefined if the mutex has been moved between this and any
diff --git a/src/libstd/sys/unix/mutex.rs b/src/libstd/sys/unix/mutex.rs
index 4e4abcfbeee..52cf3f97c5c 100644
--- a/src/libstd/sys/unix/mutex.rs
+++ b/src/libstd/sys/unix/mutex.rs
@@ -30,6 +30,39 @@ impl Mutex {
         Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
     }
     #[inline]
+    pub unsafe fn init(&mut self) {
+        // Issue #33770
+        //
+        // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
+        // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you
+        // try to re-lock it from the same thread when you already hold a lock.
+        //
+        // In practice, glibc takes advantage of this undefined behavior to
+        // implement hardware lock elision, which uses hardware transactional
+        // memory to avoid acquiring the lock. While a transaction is in
+        // progress, the lock appears to be unlocked. This isn't a problem for
+        // other threads since the transactional memory will abort if a conflict
+        // is detected, however no abort is generated if re-locking from the
+        // same thread.
+        //
+        // Since locking the same mutex twice will result in two aliasing &mut
+        // references, we instead create the mutex with type
+        // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to
+        // re-lock it from the same thread, thus avoiding undefined behavior.
+        //
+        // We can't do anything for StaticMutex, but that type is deprecated
+        // anyways.
+        let mut attr: libc::pthread_mutexattr_t = mem::uninitialized();
+        let r = libc::pthread_mutexattr_init(&mut attr);
+        debug_assert_eq!(r, 0);
+        let r = libc::pthread_mutexattr_settype(&mut attr, libc::PTHREAD_MUTEX_NORMAL);
+        debug_assert_eq!(r, 0);
+        let r = libc::pthread_mutex_init(self.inner.get(), &attr);
+        debug_assert_eq!(r, 0);
+        let r = libc::pthread_mutexattr_destroy(&mut attr);
+        debug_assert_eq!(r, 0);
+    }
+    #[inline]
     pub unsafe fn lock(&self) {
         let r = libc::pthread_mutex_lock(self.inner.get());
         debug_assert_eq!(r, 0);
diff --git a/src/libstd/sys/unix/rwlock.rs b/src/libstd/sys/unix/rwlock.rs
index 44bd5d895f2..fbd4e1d1208 100644
--- a/src/libstd/sys/unix/rwlock.rs
+++ b/src/libstd/sys/unix/rwlock.rs
@@ -10,15 +10,24 @@
 
 use libc;
 use cell::UnsafeCell;
+use sync::atomic::{AtomicUsize, Ordering};
 
-pub struct RWLock { inner: UnsafeCell<libc::pthread_rwlock_t> }
+pub struct RWLock {
+    inner: UnsafeCell<libc::pthread_rwlock_t>,
+    write_locked: UnsafeCell<bool>,
+    num_readers: AtomicUsize,
+}
 
 unsafe impl Send for RWLock {}
 unsafe impl Sync for RWLock {}
 
 impl RWLock {
     pub const fn new() -> RWLock {
-        RWLock { inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER) }
+        RWLock {
+            inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
+            write_locked: UnsafeCell::new(false),
+            num_readers: AtomicUsize::new(0),
+        }
     }
     #[inline]
     pub unsafe fn read(&self) {
@@ -35,37 +44,86 @@ impl RWLock {
         //
         // We roughly maintain the deadlocking behavior by panicking to ensure
         // that this lock acquisition does not succeed.
-        if r == libc::EDEADLK {
+        //
+        // We also check whether there this lock is already write locked. This
+        // is only possible if it was write locked by the current thread and
+        // the implementation allows recursive locking. The POSIX standard
+        // doesn't require recursivly locking a rwlock to deadlock, but we can't
+        // allow that because it could lead to aliasing issues.
+        if r == libc::EDEADLK || *self.write_locked.get() {
+            if r == 0 {
+                self.raw_unlock();
+            }
             panic!("rwlock read lock would result in deadlock");
         } else {
             debug_assert_eq!(r, 0);
+            self.num_readers.fetch_add(1, Ordering::Relaxed);
         }
     }
     #[inline]
     pub unsafe fn try_read(&self) -> bool {
-        libc::pthread_rwlock_tryrdlock(self.inner.get()) == 0
+        let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
+        if r == 0 {
+            if *self.write_locked.get() {
+                self.raw_unlock();
+                false
+            } else {
+                self.num_readers.fetch_add(1, Ordering::Relaxed);
+                true
+            }
+        } else {
+            false
+        }
     }
     #[inline]
     pub unsafe fn write(&self) {
         let r = libc::pthread_rwlock_wrlock(self.inner.get());
-        // see comments above for why we check for EDEADLK
-        if r == libc::EDEADLK {
+        // See comments above for why we check for EDEADLK and write_locked. We
+        // also need to check that num_readers is 0.
+        if r == libc::EDEADLK || *self.write_locked.get() ||
+           self.num_readers.load(Ordering::Relaxed) != 0 {
+            if r == 0 {
+                self.raw_unlock();
+            }
             panic!("rwlock write lock would result in deadlock");
         } else {
             debug_assert_eq!(r, 0);
         }
+        *self.write_locked.get() = true;
     }
     #[inline]
     pub unsafe fn try_write(&self) -> bool {
-        libc::pthread_rwlock_trywrlock(self.inner.get()) == 0
+        let r = libc::pthread_rwlock_trywrlock(self.inner.get());
+        if r == 0 {
+            if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
+                self.raw_unlock();
+                false
+            } else {
+                *self.write_locked.get() = true;
+                true
+            }
+        } else {
+            false
+        }
     }
     #[inline]
-    pub unsafe fn read_unlock(&self) {
+    unsafe fn raw_unlock(&self) {
         let r = libc::pthread_rwlock_unlock(self.inner.get());
         debug_assert_eq!(r, 0);
     }
     #[inline]
-    pub unsafe fn write_unlock(&self) { self.read_unlock() }
+    pub unsafe fn read_unlock(&self) {
+        debug_assert!(!*self.write_locked.get());
+        self.num_readers.fetch_sub(1, Ordering::Relaxed);
+        self.raw_unlock();
+    }
+    #[inline]
+    pub unsafe fn write_unlock(&self) {
+        debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
+        debug_assert!(*self.write_locked.get());
+        *self.write_locked.get() = false;
+        self.raw_unlock();
+    }
     #[inline]
     pub unsafe fn destroy(&self) {
         let r = libc::pthread_rwlock_destroy(self.inner.get());
diff --git a/src/libstd/sys/windows/mutex.rs b/src/libstd/sys/windows/mutex.rs
index b770156582d..8762b34e3da 100644
--- a/src/libstd/sys/windows/mutex.rs
+++ b/src/libstd/sys/windows/mutex.rs
@@ -64,6 +64,8 @@ impl Mutex {
             held: UnsafeCell::new(false),
         }
     }
+    #[inline]
+    pub unsafe fn init(&mut self) {}
     pub unsafe fn lock(&self) {
         match kind() {
             Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),