about summary refs log tree commit diff
path: root/src/libstd/sys
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstd/sys')
-rw-r--r--src/libstd/sys/wasm/condvar_atomics.rs104
-rw-r--r--src/libstd/sys/wasm/mod.rs22
-rw-r--r--src/libstd/sys/wasm/mutex_atomics.rs163
-rw-r--r--src/libstd/sys/wasm/rwlock_atomics.rs161
-rw-r--r--src/libstd/sys/wasm/thread.rs21
-rw-r--r--src/libstd/sys/wasm/thread_local_atomics.rs32
6 files changed, 499 insertions, 4 deletions
diff --git a/src/libstd/sys/wasm/condvar_atomics.rs b/src/libstd/sys/wasm/condvar_atomics.rs
new file mode 100644
index 00000000000..5c55fd0a618
--- /dev/null
+++ b/src/libstd/sys/wasm/condvar_atomics.rs
@@ -0,0 +1,104 @@
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use arch::wasm32::atomic;
+use cmp;
+use mem;
+use sync::atomic::{AtomicUsize, Ordering::SeqCst};
+use sys::mutex::Mutex;
+use time::Duration;
+
+pub struct Condvar {
+    cnt: AtomicUsize,
+}
+
+// Condition variables are implemented with a simple counter internally that is
+// likely to cause spurious wakeups. Blocking on a condition variable will first
+// read the value of the internal counter, unlock the given mutex, and then
+// block if and only if the counter's value is still the same. Notifying a
+// condition variable will modify the counter (add one for now) and then wake up
+// a thread waiting on the address of the counter.
+//
+// A thread waiting on the condition variable will as a result avoid going to
+// sleep if it's notified after the lock is unlocked but before it fully goes to
+// sleep. A sleeping thread is guaranteed to be woken up at some point as it can
+// only be woken up with a call to `wake`.
+//
+// Note that it's possible for 2 or more threads to be woken up by a call to
+// `notify_one` with this implementation. That can happen where the modification
+// of `cnt` causes any threads in the middle of `wait` to avoid going to sleep,
+// and the subsequent `wake` may wake up a thread that's actually blocking. We
+// consider this a spurious wakeup, though, which all users of condition
+// variables must already be prepared to handle. As a result, this source of
+// spurious wakeups is currently though to be ok, although it may be problematic
+// later on if it causes too many spurious wakeups.
+
+impl Condvar {
+    pub const fn new() -> Condvar {
+        Condvar { cnt: AtomicUsize::new(0) }
+    }
+
+    #[inline]
+    pub unsafe fn init(&mut self) {
+        // nothing to do
+    }
+
+    pub unsafe fn notify_one(&self) {
+        self.cnt.fetch_add(1, SeqCst);
+        atomic::wake(self.ptr(), 1);
+    }
+
+    #[inline]
+    pub unsafe fn notify_all(&self) {
+        self.cnt.fetch_add(1, SeqCst);
+        atomic::wake(self.ptr(), -1); // -1 == "wake everyone"
+    }
+
+    pub unsafe fn wait(&self, mutex: &Mutex) {
+        // "atomically block and unlock" implemented by loading our current
+        // counter's value, unlocking the mutex, and blocking if the counter
+        // still has the same value.
+        //
+        // Notifications happen by incrementing the counter and then waking a
+        // thread. Incrementing the counter after we unlock the mutex will
+        // prevent us from sleeping and otherwise the call to `wake` will
+        // wake us up once we're asleep.
+        let ticket = self.cnt.load(SeqCst) as i32;
+        mutex.unlock();
+        let val = atomic::wait_i32(self.ptr(), ticket, -1);
+        // 0 == woken, 1 == not equal to `ticket`, 2 == timeout (shouldn't happen)
+        debug_assert!(val == 0 || val == 1);
+        mutex.lock();
+    }
+
+    pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+        let ticket = self.cnt.load(SeqCst) as i32;
+        mutex.unlock();
+        let nanos = dur.as_nanos();
+        let nanos = cmp::min(i64::max_value() as u128, nanos);
+
+        // If the return value is 2 then a timeout happened, so we return
+        // `false` as we weren't actually notified.
+        let ret = atomic::wait_i32(self.ptr(), ticket, nanos as i64) != 2;
+        mutex.lock();
+        return ret
+    }
+
+    #[inline]
+    pub unsafe fn destroy(&self) {
+        // nothing to do
+    }
+
+    #[inline]
+    fn ptr(&self) -> *mut i32 {
+        assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>());
+        &self.cnt as *const AtomicUsize as *mut i32
+    }
+}
diff --git a/src/libstd/sys/wasm/mod.rs b/src/libstd/sys/wasm/mod.rs
index c02e5e809c8..e11b4d71aae 100644
--- a/src/libstd/sys/wasm/mod.rs
+++ b/src/libstd/sys/wasm/mod.rs
@@ -36,24 +36,38 @@ pub mod args;
 #[cfg(feature = "backtrace")]
 pub mod backtrace;
 pub mod cmath;
-pub mod condvar;
 pub mod env;
 pub mod fs;
 pub mod memchr;
-pub mod mutex;
 pub mod net;
 pub mod os;
 pub mod os_str;
 pub mod path;
 pub mod pipe;
 pub mod process;
-pub mod rwlock;
 pub mod stack_overflow;
 pub mod thread;
-pub mod thread_local;
 pub mod time;
 pub mod stdio;
 
+cfg_if! {
+    if #[cfg(target_feature = "atomics")] {
+        #[path = "condvar_atomics.rs"]
+        pub mod condvar;
+        #[path = "mutex_atomics.rs"]
+        pub mod mutex;
+        #[path = "rwlock_atomics.rs"]
+        pub mod rwlock;
+        #[path = "thread_local_atomics.rs"]
+        pub mod thread_local;
+    } else {
+        pub mod condvar;
+        pub mod mutex;
+        pub mod rwlock;
+        pub mod thread_local;
+    }
+}
+
 #[cfg(not(test))]
 pub fn init() {
 }
diff --git a/src/libstd/sys/wasm/mutex_atomics.rs b/src/libstd/sys/wasm/mutex_atomics.rs
new file mode 100644
index 00000000000..ced6c17ef96
--- /dev/null
+++ b/src/libstd/sys/wasm/mutex_atomics.rs
@@ -0,0 +1,163 @@
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use arch::wasm32::atomic;
+use cell::UnsafeCell;
+use mem;
+use sync::atomic::{AtomicUsize, AtomicU64, Ordering::SeqCst};
+
+pub struct Mutex {
+    locked: AtomicUsize,
+}
+
+// Mutexes have a pretty simple implementation where they contain an `i32`
+// internally that is 0 when unlocked and 1 when the mutex is locked.
+// Acquisition has a fast path where it attempts to cmpxchg the 0 to a 1, and
+// if it fails it then waits for a notification. Releasing a lock is then done
+// by swapping in 0 and then notifying any waiters, if present.
+
+impl Mutex {
+    pub const fn new() -> Mutex {
+        Mutex { locked: AtomicUsize::new(0) }
+    }
+
+    #[inline]
+    pub unsafe fn init(&mut self) {
+        // nothing to do
+    }
+
+    pub unsafe fn lock(&self) {
+        while !self.try_lock() {
+            let val = atomic::wait_i32(
+                self.ptr(),
+                1,  // we expect our mutex is locked
+                -1, // wait infinitely
+            );
+            // we should have either woke up (0) or got a not-equal due to a
+            // race (1). We should never time out (2)
+            debug_assert!(val == 0 || val == 1);
+        }
+    }
+
+    pub unsafe fn unlock(&self) {
+        let prev = self.locked.swap(0, SeqCst);
+        debug_assert_eq!(prev, 1);
+        atomic::wake(self.ptr(), 1); // wake up one waiter, if any
+    }
+
+    #[inline]
+    pub unsafe fn try_lock(&self) -> bool {
+        self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok()
+    }
+
+    #[inline]
+    pub unsafe fn destroy(&self) {
+        // nothing to do
+    }
+
+    #[inline]
+    fn ptr(&self) -> *mut i32 {
+        assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>());
+        &self.locked as *const AtomicUsize as *mut isize as *mut i32
+    }
+}
+
+pub struct ReentrantMutex {
+    owner: AtomicU64,
+    recursions: UnsafeCell<u32>,
+}
+
+unsafe impl Send for ReentrantMutex {}
+unsafe impl Sync for ReentrantMutex {}
+
+// Reentrant mutexes are similarly implemented to mutexs above except that
+// instead of "1" meaning unlocked we use the id of a thread to represent
+// whether it has locked a mutex. That way we have an atomic counter which
+// always holds the id of the thread that currently holds the lock (or 0 if the
+// lock is unlocked).
+//
+// Once a thread acquires a lock recursively, which it detects by looking at
+// the value that's already there, it will update a local `recursions` counter
+// in a nonatomic fashion (as we hold the lock). The lock is then fully
+// released when this recursion counter reaches 0.
+
+impl ReentrantMutex {
+    pub unsafe fn uninitialized() -> ReentrantMutex {
+        ReentrantMutex {
+            owner: AtomicU64::new(0),
+            recursions: UnsafeCell::new(0),
+        }
+    }
+
+    pub unsafe fn init(&mut self) {
+        // nothing to do...
+    }
+
+    pub unsafe fn lock(&self) {
+        let me = thread_id();
+        while let Err(owner) = self._try_lock(me) {
+            let val = atomic::wait_i64(self.ptr(), owner as i64, -1);
+            debug_assert!(val == 0 || val == 1);
+        }
+    }
+
+    #[inline]
+    pub unsafe fn try_lock(&self) -> bool {
+        self._try_lock(thread_id()).is_ok()
+    }
+
+    #[inline]
+    unsafe fn _try_lock(&self, id: u64) -> Result<(), u64> {
+        let id = id.checked_add(1).unwrap(); // make sure `id` isn't 0
+        match self.owner.compare_exchange(0, id, SeqCst, SeqCst) {
+            // we transitioned from unlocked to locked
+            Ok(_) => {
+                debug_assert_eq!(*self.recursions.get(), 0);
+                Ok(())
+            }
+
+            // we currently own this lock, so let's update our count and return
+            // true.
+            Err(n) if n == id => {
+                *self.recursions.get() += 1;
+                Ok(())
+            }
+
+            // Someone else owns the lock, let our caller take care of it
+            Err(other) => Err(other),
+        }
+    }
+
+    pub unsafe fn unlock(&self) {
+        // If we didn't ever recursively lock the lock then we fully unlock the
+        // mutex and wake up a waiter, if any. Otherwise we decrement our
+        // recursive counter and let some one else take care of the zero.
+        match *self.recursions.get() {
+            0 => {
+                self.owner.swap(0, SeqCst);
+                atomic::wake(self.ptr() as *mut i32, 1); // wake up one waiter, if any
+            }
+            ref mut n => *n -= 1,
+        }
+    }
+
+    pub unsafe fn destroy(&self) {
+        // nothing to do...
+    }
+
+    #[inline]
+    fn ptr(&self) -> *mut i64 {
+        &self.owner as *const AtomicU64 as *mut i64
+    }
+}
+
+fn thread_id() -> u64 {
+    panic!("thread ids not implemented on wasm with atomics yet")
+}
diff --git a/src/libstd/sys/wasm/rwlock_atomics.rs b/src/libstd/sys/wasm/rwlock_atomics.rs
new file mode 100644
index 00000000000..3623333cc86
--- /dev/null
+++ b/src/libstd/sys/wasm/rwlock_atomics.rs
@@ -0,0 +1,161 @@
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use cell::UnsafeCell;
+use sys::mutex::Mutex;
+use sys::condvar::Condvar;
+
+pub struct RWLock {
+    lock: Mutex,
+    cond: Condvar,
+    state: UnsafeCell<State>,
+}
+
+enum State {
+    Unlocked,
+    Reading(usize),
+    Writing,
+}
+
+unsafe impl Send for RWLock {}
+unsafe impl Sync for RWLock {}
+
+// This rwlock implementation is a relatively simple implementation which has a
+// condition variable for readers/writers as well as a mutex protecting the
+// internal state of the lock. A current downside of the implementation is that
+// unlocking the lock will notify *all* waiters rather than just readers or just
+// writers. This can cause lots of "thundering stampede" problems. While
+// hopefully correct this implementation is very likely to want to be changed in
+// the future.
+
+impl RWLock {
+    pub const fn new() -> RWLock {
+        RWLock {
+            lock: Mutex::new(),
+            cond: Condvar::new(),
+            state: UnsafeCell::new(State::Unlocked),
+        }
+    }
+
+    #[inline]
+    pub unsafe fn read(&self) {
+        self.lock.lock();
+        while !(*self.state.get()).inc_readers() {
+            self.cond.wait(&self.lock);
+        }
+        self.lock.unlock();
+    }
+
+    #[inline]
+    pub unsafe fn try_read(&self) -> bool {
+        self.lock.lock();
+        let ok = (*self.state.get()).inc_readers();
+        self.lock.unlock();
+        return ok
+    }
+
+    #[inline]
+    pub unsafe fn write(&self) {
+        self.lock.lock();
+        while !(*self.state.get()).inc_writers() {
+            self.cond.wait(&self.lock);
+        }
+        self.lock.unlock();
+    }
+
+    #[inline]
+    pub unsafe fn try_write(&self) -> bool {
+        self.lock.lock();
+        let ok = (*self.state.get()).inc_writers();
+        self.lock.unlock();
+        return ok
+    }
+
+    #[inline]
+    pub unsafe fn read_unlock(&self) {
+        self.lock.lock();
+        let notify = (*self.state.get()).dec_readers();
+        self.lock.unlock();
+        if notify {
+            // FIXME: should only wake up one of these some of the time
+            self.cond.notify_all();
+        }
+    }
+
+    #[inline]
+    pub unsafe fn write_unlock(&self) {
+        self.lock.lock();
+        (*self.state.get()).dec_writers();
+        self.lock.unlock();
+        // FIXME: should only wake up one of these some of the time
+        self.cond.notify_all();
+    }
+
+    #[inline]
+    pub unsafe fn destroy(&self) {
+        self.lock.destroy();
+        self.cond.destroy();
+    }
+}
+
+impl State {
+    fn inc_readers(&mut self) -> bool {
+        match *self {
+            State::Unlocked => {
+                *self = State::Reading(1);
+                true
+            }
+            State::Reading(ref mut cnt) => {
+                *cnt += 1;
+                true
+            }
+            State::Writing => false
+        }
+    }
+
+    fn inc_writers(&mut self) -> bool {
+        match *self {
+            State::Unlocked => {
+                *self = State::Writing;
+                true
+            }
+            State::Reading(_) |
+            State::Writing => false
+        }
+    }
+
+    fn dec_readers(&mut self) -> bool {
+        let zero = match *self {
+            State::Reading(ref mut cnt) => {
+                *cnt -= 1;
+                *cnt == 0
+            }
+            State::Unlocked |
+            State::Writing => invalid(),
+        };
+        if zero {
+            *self = State::Unlocked;
+        }
+        zero
+    }
+
+    fn dec_writers(&mut self) {
+        match *self {
+            State::Writing => {}
+            State::Unlocked |
+            State::Reading(_) => invalid(),
+        }
+        *self = State::Unlocked;
+    }
+}
+
+fn invalid() -> ! {
+    panic!("inconsistent rwlock");
+}
diff --git a/src/libstd/sys/wasm/thread.rs b/src/libstd/sys/wasm/thread.rs
index 8173a624211..bef6c1f3490 100644
--- a/src/libstd/sys/wasm/thread.rs
+++ b/src/libstd/sys/wasm/thread.rs
@@ -33,10 +33,31 @@ impl Thread {
         // nope
     }
 
+    #[cfg(not(target_feature = "atomics"))]
     pub fn sleep(_dur: Duration) {
         panic!("can't sleep");
     }
 
+    #[cfg(target_feature = "atomics")]
+    pub fn sleep(dur: Duration) {
+        use arch::wasm32::atomic;
+        use cmp;
+
+        // Use an atomic wait to block the current thread artificially with a
+        // timeout listed. Note that we should never be notified (return value
+        // of 0) or our comparison should never fail (return value of 1) so we
+        // should always only resume execution through a timeout (return value
+        // 2).
+        let mut nanos = dur.as_nanos();
+        while nanos > 0 {
+            let amt = cmp::min(i64::max_value() as u128, nanos);
+            let mut x = 0;
+            let val = unsafe { atomic::wait_i32(&mut x, 0, amt as i64) };
+            debug_assert_eq!(val, 2);
+            nanos -= amt;
+        }
+    }
+
     pub fn join(self) {
         match self.0 {}
     }
diff --git a/src/libstd/sys/wasm/thread_local_atomics.rs b/src/libstd/sys/wasm/thread_local_atomics.rs
new file mode 100644
index 00000000000..1394013b4a3
--- /dev/null
+++ b/src/libstd/sys/wasm/thread_local_atomics.rs
@@ -0,0 +1,32 @@
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub type Key = usize;
+
+pub unsafe fn create(_dtor: Option<unsafe extern fn(*mut u8)>) -> Key {
+    panic!("TLS on wasm with atomics not implemented yet");
+}
+
+pub unsafe fn set(_key: Key, _value: *mut u8) {
+    panic!("TLS on wasm with atomics not implemented yet");
+}
+
+pub unsafe fn get(_key: Key) -> *mut u8 {
+    panic!("TLS on wasm with atomics not implemented yet");
+}
+
+pub unsafe fn destroy(_key: Key) {
+    panic!("TLS on wasm with atomics not implemented yet");
+}
+
+#[inline]
+pub fn requires_synchronized_create() -> bool {
+    false
+}