about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--library/std/src/os/xous/services/ticktimer.rs4
-rw-r--r--library/std/src/sys/xous/locks/condvar.rs111
-rw-r--r--library/std/src/sys/xous/locks/mod.rs7
-rw-r--r--library/std/src/sys/xous/locks/mutex.rs116
-rw-r--r--library/std/src/sys/xous/locks/rwlock.rs72
-rw-r--r--library/std/src/sys/xous/mod.rs1
6 files changed, 310 insertions, 1 deletions
diff --git a/library/std/src/os/xous/services/ticktimer.rs b/library/std/src/os/xous/services/ticktimer.rs
index 447f8f9304a..7759303fdbe 100644
--- a/library/std/src/os/xous/services/ticktimer.rs
+++ b/library/std/src/os/xous/services/ticktimer.rs
@@ -8,6 +8,8 @@ pub(crate) enum TicktimerScalar {
     UnlockMutex(usize /* cookie */),
     WaitForCondition(usize /* cookie */, usize /* timeout (ms) */),
     NotifyCondition(usize /* cookie */, usize /* count */),
+    FreeMutex(usize /* cookie */),
+    FreeCondition(usize /* cookie */),
 }
 
 impl Into<[usize; 5]> for TicktimerScalar {
@@ -19,6 +21,8 @@ impl Into<[usize; 5]> for TicktimerScalar {
             TicktimerScalar::UnlockMutex(cookie) => [7, cookie, 0, 0, 0],
             TicktimerScalar::WaitForCondition(cookie, timeout_ms) => [8, cookie, timeout_ms, 0, 0],
             TicktimerScalar::NotifyCondition(cookie, count) => [9, cookie, count, 0, 0],
+            TicktimerScalar::FreeMutex(cookie) => [10, cookie, 0, 0, 0],
+            TicktimerScalar::FreeCondition(cookie) => [11, cookie, 0, 0, 0],
         }
     }
 }
diff --git a/library/std/src/sys/xous/locks/condvar.rs b/library/std/src/sys/xous/locks/condvar.rs
new file mode 100644
index 00000000000..1bb38dfa341
--- /dev/null
+++ b/library/std/src/sys/xous/locks/condvar.rs
@@ -0,0 +1,111 @@
+use super::mutex::Mutex;
+use crate::os::xous::ffi::{blocking_scalar, scalar};
+use crate::os::xous::services::ticktimer_server;
+use crate::sync::Mutex as StdMutex;
+use crate::time::Duration;
+
+// The implementation is inspired by Andrew D. Birrell's paper
+// "Implementing Condition Variables with Semaphores"
+
+pub struct Condvar {
+    counter: StdMutex<usize>,
+}
+
+unsafe impl Send for Condvar {}
+unsafe impl Sync for Condvar {}
+
+impl Condvar {
+    #[inline]
+    #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
+    pub const fn new() -> Condvar {
+        Condvar { counter: StdMutex::new(0) }
+    }
+
+    pub fn notify_one(&self) {
+        let mut counter = self.counter.lock().unwrap();
+        if *counter <= 0 {
+            return;
+        } else {
+            *counter -= 1;
+        }
+        let result = blocking_scalar(
+            ticktimer_server(),
+            crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), 1).into(),
+        );
+        drop(counter);
+        result.expect("failure to send NotifyCondition command");
+    }
+
+    pub fn notify_all(&self) {
+        let mut counter = self.counter.lock().unwrap();
+        if *counter <= 0 {
+            return;
+        }
+        let result = blocking_scalar(
+            ticktimer_server(),
+            crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), *counter)
+                .into(),
+        );
+        *counter = 0;
+        drop(counter);
+
+        result.expect("failure to send NotifyCondition command");
+    }
+
+    fn index(&self) -> usize {
+        self as *const Condvar as usize
+    }
+
+    pub unsafe fn wait(&self, mutex: &Mutex) {
+        let mut counter = self.counter.lock().unwrap();
+        *counter += 1;
+        unsafe { mutex.unlock() };
+        drop(counter);
+
+        let result = blocking_scalar(
+            ticktimer_server(),
+            crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), 0).into(),
+        );
+        unsafe { mutex.lock() };
+
+        result.expect("Ticktimer: failure to send WaitForCondition command");
+    }
+
+    pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+        let mut counter = self.counter.lock().unwrap();
+        *counter += 1;
+        unsafe { mutex.unlock() };
+        drop(counter);
+
+        let mut millis = dur.as_millis() as usize;
+        if millis == 0 {
+            millis = 1;
+        }
+
+        let result = blocking_scalar(
+            ticktimer_server(),
+            crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), millis)
+                .into(),
+        );
+        unsafe { mutex.lock() };
+
+        let result = result.expect("Ticktimer: failure to send WaitForCondition command")[0] == 0;
+
+        // If we awoke due to a timeout, decrement the wake count, as that would not have
+        // been done in the `notify()` call.
+        if !result {
+            *self.counter.lock().unwrap() -= 1;
+        }
+        result
+    }
+}
+
+impl Drop for Condvar {
+    fn drop(&mut self) {
+        scalar(
+            ticktimer_server(),
+            crate::os::xous::services::TicktimerScalar::FreeCondition(self.index()).into(),
+        )
+        .ok();
+    }
+}
diff --git a/library/std/src/sys/xous/locks/mod.rs b/library/std/src/sys/xous/locks/mod.rs
new file mode 100644
index 00000000000..f3c5c5d9fb0
--- /dev/null
+++ b/library/std/src/sys/xous/locks/mod.rs
@@ -0,0 +1,7 @@
+mod condvar;
+mod mutex;
+mod rwlock;
+
+pub use condvar::*;
+pub use mutex::*;
+pub use rwlock::*;
diff --git a/library/std/src/sys/xous/locks/mutex.rs b/library/std/src/sys/xous/locks/mutex.rs
new file mode 100644
index 00000000000..ea51776d54e
--- /dev/null
+++ b/library/std/src/sys/xous/locks/mutex.rs
@@ -0,0 +1,116 @@
+use crate::os::xous::ffi::{blocking_scalar, do_yield, scalar};
+use crate::os::xous::services::ticktimer_server;
+use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed, Ordering::SeqCst};
+
+pub struct Mutex {
+    /// The "locked" value indicates how many threads are waiting on this
+    /// Mutex. Possible values are:
+    ///     0: The lock is unlocked
+    ///     1: The lock is locked and uncontended
+    ///   >=2: The lock is locked and contended
+    ///
+    /// A lock is "contended" when there is more than one thread waiting
+    /// for a lock, or it is locked for long periods of time. Rather than
+    /// spinning, these locks send a Message to the ticktimer server
+    /// requesting that they be woken up when a lock is unlocked.
+    locked: AtomicUsize,
+
+    /// Whether this Mutex ever was contended, and therefore made a trip
+    /// to the ticktimer server. If this was never set, then we were never
+    /// on the slow path and can skip deregistering the mutex.
+    contended: AtomicBool,
+}
+
+impl Mutex {
+    #[inline]
+    #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
+    pub const fn new() -> Mutex {
+        Mutex { locked: AtomicUsize::new(0), contended: AtomicBool::new(false) }
+    }
+
+    fn index(&self) -> usize {
+        self as *const Mutex as usize
+    }
+
+    #[inline]
+    pub unsafe fn lock(&self) {
+        // Try multiple times to acquire the lock without resorting to the ticktimer
+        // server. For locks that are held for a short amount of time, this will
+        // result in the ticktimer server never getting invoked. The `locked` value
+        // will be either 0 or 1.
+        for _attempts in 0..3 {
+            if unsafe { self.try_lock() } {
+                return;
+            }
+            do_yield();
+        }
+
+        // Try one more time to lock. If the lock is released between the previous code and
+        // here, then the inner `locked` value will be 1 at the end of this. If it was not
+        // locked, then the value will be more than 1, for example if there are multiple other
+        // threads waiting on this lock.
+        if unsafe { self.try_lock_or_poison() } {
+            return;
+        }
+
+        // When this mutex is dropped, we will need to deregister it with the server.
+        self.contended.store(true, Relaxed);
+
+        // The lock is now "contended". When the lock is released, a Message will get sent to the
+        // ticktimer server to wake it up. Note that this may already have happened, so the actual
+        // value of `lock` may be anything (0, 1, 2, ...).
+        blocking_scalar(
+            ticktimer_server(),
+            crate::os::xous::services::TicktimerScalar::LockMutex(self.index()).into(),
+        )
+        .expect("failure to send LockMutex command");
+    }
+
+    #[inline]
+    pub unsafe fn unlock(&self) {
+        let prev = self.locked.fetch_sub(1, SeqCst);
+
+        // If the previous value was 1, then this was a "fast path" unlock, so no
+        // need to involve the Ticktimer server
+        if prev == 1 {
+            return;
+        }
+
+        // If it was 0, then something has gone seriously wrong and the counter
+        // has just wrapped around.
+        if prev == 0 {
+            panic!("mutex lock count underflowed");
+        }
+
+        // Unblock one thread that is waiting on this message.
+        scalar(
+            ticktimer_server(),
+            crate::os::xous::services::TicktimerScalar::UnlockMutex(self.index()).into(),
+        )
+        .expect("failure to send UnlockMutex command");
+    }
+
+    #[inline]
+    pub unsafe fn try_lock(&self) -> bool {
+        self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok()
+    }
+
+    #[inline]
+    pub unsafe fn try_lock_or_poison(&self) -> bool {
+        self.locked.fetch_add(1, SeqCst) == 0
+    }
+}
+
+impl Drop for Mutex {
+    fn drop(&mut self) {
+        // If there was Mutex contention, then we involved the ticktimer. Free
+        // the resources associated with this Mutex as it is deallocated.
+        if self.contended.load(Relaxed) {
+            scalar(
+                ticktimer_server(),
+                crate::os::xous::services::TicktimerScalar::FreeMutex(self.index()).into(),
+            )
+            .ok();
+        }
+    }
+}
diff --git a/library/std/src/sys/xous/locks/rwlock.rs b/library/std/src/sys/xous/locks/rwlock.rs
new file mode 100644
index 00000000000..618da758adf
--- /dev/null
+++ b/library/std/src/sys/xous/locks/rwlock.rs
@@ -0,0 +1,72 @@
+use crate::os::xous::ffi::do_yield;
+use crate::sync::atomic::{AtomicIsize, Ordering::SeqCst};
+
+pub struct RwLock {
+    /// The "mode" value indicates how many threads are waiting on this
+    /// Mutex. Possible values are:
+    ///    -1: The lock is locked for writing
+    ///     0: The lock is unlocked
+    ///   >=1: The lock is locked for reading
+    ///
+    /// This currently spins waiting for the lock to be freed. An
+    /// optimization would be to involve the ticktimer server to
+    /// coordinate unlocks.
+    mode: AtomicIsize,
+}
+
+unsafe impl Send for RwLock {}
+unsafe impl Sync for RwLock {}
+
+impl RwLock {
+    #[inline]
+    #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
+    pub const fn new() -> RwLock {
+        RwLock { mode: AtomicIsize::new(0) }
+    }
+
+    #[inline]
+    pub unsafe fn read(&self) {
+        while !unsafe { self.try_read() } {
+            do_yield();
+        }
+    }
+
+    #[inline]
+    pub unsafe fn try_read(&self) -> bool {
+        // Non-atomically determine the current value.
+        let current = self.mode.load(SeqCst);
+
+        // If it's currently locked for writing, then we cannot read.
+        if current < 0 {
+            return false;
+        }
+
+        // Attempt to lock. If the `current` value has changed, then this
+        // operation will fail and we will not obtain the lock even if we
+        // could potentially keep it.
+        let new = current + 1;
+        self.mode.compare_exchange(current, new, SeqCst, SeqCst).is_ok()
+    }
+
+    #[inline]
+    pub unsafe fn write(&self) {
+        while !unsafe { self.try_write() } {
+            do_yield();
+        }
+    }
+
+    #[inline]
+    pub unsafe fn try_write(&self) -> bool {
+        self.mode.compare_exchange(0, -1, SeqCst, SeqCst).is_ok()
+    }
+
+    #[inline]
+    pub unsafe fn read_unlock(&self) {
+        self.mode.fetch_sub(1, SeqCst);
+    }
+
+    #[inline]
+    pub unsafe fn write_unlock(&self) {
+        assert_eq!(self.mode.compare_exchange(-1, 0, SeqCst, SeqCst), Ok(-1));
+    }
+}
diff --git a/library/std/src/sys/xous/mod.rs b/library/std/src/sys/xous/mod.rs
index 79d9c00f64a..6d5c218d195 100644
--- a/library/std/src/sys/xous/mod.rs
+++ b/library/std/src/sys/xous/mod.rs
@@ -11,7 +11,6 @@ pub mod env;
 pub mod fs;
 #[path = "../unsupported/io.rs"]
 pub mod io;
-#[path = "../unsupported/locks/mod.rs"]
 pub mod locks;
 #[path = "../unsupported/net.rs"]
 pub mod net;