about summary refs log tree commit diff
path: root/src/libstd/sys
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstd/sys')
-rw-r--r--src/libstd/sys/unix/fs.rs4
-rw-r--r--src/libstd/sys/unix/mutex.rs10
-rw-r--r--src/libstd/sys/unix/rwlock.rs48
3 files changed, 40 insertions, 22 deletions
diff --git a/src/libstd/sys/unix/fs.rs b/src/libstd/sys/unix/fs.rs
index 2cfc63d9492..80cf6a5dbc2 100644
--- a/src/libstd/sys/unix/fs.rs
+++ b/src/libstd/sys/unix/fs.rs
@@ -703,6 +703,10 @@ impl File {
             | opts.get_access_mode()?
             | opts.get_creation_mode()?
             | (opts.custom_flags as c_int & !libc::O_ACCMODE);
+        // The third argument of `open64` is documented to have type `mode_t`. On
+        // some platforms (like macOS, where `open64` is actually `open`), `mode_t` is `u16`.
+        // However, since this is a variadic function, C integer promotion rules mean that on
+        // the ABI level, this still gets passed as `c_int` (aka `u32` on Unix platforms).
         let fd = cvt_r(|| unsafe { open64(path.as_ptr(), flags, opts.mode as c_int) })?;
         let fd = FileDesc::new(fd);
 
diff --git a/src/libstd/sys/unix/mutex.rs b/src/libstd/sys/unix/mutex.rs
index 103d87e3d2f..45c600f75f5 100644
--- a/src/libstd/sys/unix/mutex.rs
+++ b/src/libstd/sys/unix/mutex.rs
@@ -28,14 +28,20 @@ impl Mutex {
         //
         // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
         // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you
-        // try to re-lock it from the same thread when you already hold a lock.
+        // try to re-lock it from the same thread when you already hold a lock
+        // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_init.html).
+        // This is the case even if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
+        // (https://github.com/rust-lang/rust/issues/33770#issuecomment-220847521) -- in that
+        // case, `pthread_mutexattr_settype(PTHREAD_MUTEX_DEFAULT)` will of course be the same
+        // as setting it to `PTHREAD_MUTEX_NORMAL`, but not setting any mode will result in
+        // a Mutex where re-locking is UB.
         //
         // In practice, glibc takes advantage of this undefined behavior to
         // implement hardware lock elision, which uses hardware transactional
         // memory to avoid acquiring the lock. While a transaction is in
         // progress, the lock appears to be unlocked. This isn't a problem for
         // other threads since the transactional memory will abort if a conflict
-        // is detected, however no abort is generated if re-locking from the
+        // is detected, however no abort is generated when re-locking from the
         // same thread.
         //
         // Since locking the same mutex twice will result in two aliasing &mut
diff --git a/src/libstd/sys/unix/rwlock.rs b/src/libstd/sys/unix/rwlock.rs
index 079dea671ef..2b5067a34f6 100644
--- a/src/libstd/sys/unix/rwlock.rs
+++ b/src/libstd/sys/unix/rwlock.rs
@@ -22,32 +22,33 @@ impl RWLock {
     pub unsafe fn read(&self) {
         let r = libc::pthread_rwlock_rdlock(self.inner.get());
 
-        // According to the pthread_rwlock_rdlock spec, this function **may**
-        // fail with EDEADLK if a deadlock is detected. On the other hand
-        // pthread mutexes will *never* return EDEADLK if they are initialized
-        // as the "fast" kind (which ours always are). As a result, a deadlock
-        // situation may actually return from the call to pthread_rwlock_rdlock
-        // instead of blocking forever (as mutexes and Windows rwlocks do). Note
-        // that not all unix implementations, however, will return EDEADLK for
-        // their rwlocks.
+        // According to POSIX, when a thread tries to acquire this read lock
+        // while it already holds the write lock
+        // (or vice versa, or tries to acquire the write lock twice),
+        // "the call shall either deadlock or return [EDEADLK]"
+        // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html,
+        // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_rdlock.html).
+        // So, in principle, all we have to do here is check `r == 0` to be sure we properly
+        // got the lock.
         //
-        // We roughly maintain the deadlocking behavior by panicking to ensure
-        // that this lock acquisition does not succeed.
-        //
-        // We also check whether this lock is already write locked. This
-        // is only possible if it was write locked by the current thread and
-        // the implementation allows recursive locking. The POSIX standard
-        // doesn't require recursively locking a rwlock to deadlock, but we can't
-        // allow that because it could lead to aliasing issues.
+        // However, (at least) glibc before version 2.25 does not conform to this spec,
+        // and can return `r == 0` even when this thread already holds the write lock.
+        // We thus check for this situation ourselves and panic when detecting that a thread
+        // got the write lock more than once, or got a read and a write lock.
         if r == libc::EAGAIN {
             panic!("rwlock maximum reader count exceeded");
         } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) {
+            // Above, we make sure to only access `write_locked` when `r == 0` to avoid
+            // data races.
             if r == 0 {
+                // `pthread_rwlock_rdlock` succeeded when it should not have.
                 self.raw_unlock();
             }
             panic!("rwlock read lock would result in deadlock");
         } else {
-            assert_eq!(r, 0);
+            // According to POSIX, for a properly initialized rwlock this can only
+            // return EAGAIN or EDEADLK or 0. We rely on that.
+            debug_assert_eq!(r, 0);
             self.num_readers.fetch_add(1, Ordering::Relaxed);
         }
     }
@@ -56,6 +57,7 @@ impl RWLock {
         let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
         if r == 0 {
             if *self.write_locked.get() {
+                // `pthread_rwlock_tryrdlock` succeeded when it should not have.
                 self.raw_unlock();
                 false
             } else {
@@ -69,17 +71,22 @@ impl RWLock {
     #[inline]
     pub unsafe fn write(&self) {
         let r = libc::pthread_rwlock_wrlock(self.inner.get());
-        // See comments above for why we check for EDEADLK and write_locked. We
-        // also need to check that num_readers is 0.
+        // See comments above for why we check for EDEADLK and write_locked. For the same reason,
+        // we also need to check that there are no readers (tracked in `num_readers`).
         if r == libc::EDEADLK
-            || *self.write_locked.get()
+            || (r == 0 && *self.write_locked.get())
             || self.num_readers.load(Ordering::Relaxed) != 0
         {
+            // Above, we make sure to only access `write_locked` when `r == 0` to avoid
+            // data races.
             if r == 0 {
+                // `pthread_rwlock_wrlock` succeeded when it should not have.
                 self.raw_unlock();
             }
             panic!("rwlock write lock would result in deadlock");
         } else {
+            // According to POSIX, for a properly initialized rwlock this can only
+            // return EDEADLK or 0. We rely on that.
             debug_assert_eq!(r, 0);
         }
         *self.write_locked.get() = true;
@@ -89,6 +96,7 @@ impl RWLock {
         let r = libc::pthread_rwlock_trywrlock(self.inner.get());
         if r == 0 {
             if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
+                // `pthread_rwlock_trywrlock` succeeded when it should not have.
                 self.raw_unlock();
                 false
             } else {