about summary refs log tree commit diff
path: root/library/std/src/sys/itron
diff options
context:
space:
mode:
Diffstat (limited to 'library/std/src/sys/itron')
-rw-r--r--library/std/src/sys/itron/abi.rs155
-rw-r--r--library/std/src/sys/itron/condvar.rs294
-rw-r--r--library/std/src/sys/itron/error.rs159
-rw-r--r--library/std/src/sys/itron/mutex.rs183
-rw-r--r--library/std/src/sys/itron/spin.rs164
-rw-r--r--library/std/src/sys/itron/task.rs44
-rw-r--r--library/std/src/sys/itron/thread.rs352
-rw-r--r--library/std/src/sys/itron/time.rs123
-rw-r--r--library/std/src/sys/itron/time/tests.rs33
9 files changed, 1507 insertions, 0 deletions
diff --git a/library/std/src/sys/itron/abi.rs b/library/std/src/sys/itron/abi.rs
new file mode 100644
index 00000000000..f99ee4fa897
--- /dev/null
+++ b/library/std/src/sys/itron/abi.rs
@@ -0,0 +1,155 @@
+//! ABI for μITRON derivatives
+pub type int_t = crate::os::raw::c_int;
+pub type uint_t = crate::os::raw::c_uint;
+pub type bool_t = int_t;
+
+/// Kernel object ID
+pub type ID = int_t;
+
+/// The current task.
+pub const TSK_SELF: ID = 0;
+
+/// Relative time
+pub type RELTIM = u32;
+
+/// Timeout (a valid `RELTIM` value or `TMO_FEVR`)
+pub type TMO = u32;
+
+/// The infinite timeout value
+pub const TMO_FEVR: TMO = TMO::MAX;
+
+/// The maximum valid value of `RELTIM`
+pub const TMAX_RELTIM: RELTIM = 4_000_000_000;
+
+/// System time
+pub type SYSTIM = u64;
+
+/// Error code type
+pub type ER = int_t;
+
+/// Error code type, `ID` on success
+pub type ER_ID = int_t;
+
+/// Task or interrupt priority
+pub type PRI = int_t;
+
+/// The special value of `PRI` representing the current task's priority.
+pub const TPRI_SELF: PRI = 0;
+
+/// Object attributes
+pub type ATR = uint_t;
+
+/// Use the priority inheritance protocol
+#[cfg(target_os = "solid_asp3")]
+pub const TA_INHERIT: ATR = 0x02;
+
+/// Activate the task on creation
+pub const TA_ACT: ATR = 0x01;
+
+/// The maximum count of a semaphore
+pub const TMAX_MAXSEM: uint_t = uint_t::MAX;
+
+/// Callback parameter
+pub type EXINF = isize;
+
+/// Task entrypoint
+pub type TASK = Option<unsafe extern "C" fn(EXINF)>;
+
+// Error codes
+pub const E_OK: ER = 0;
+pub const E_SYS: ER = -5;
+pub const E_NOSPT: ER = -9;
+pub const E_RSFN: ER = -10;
+pub const E_RSATR: ER = -11;
+pub const E_PAR: ER = -17;
+pub const E_ID: ER = -18;
+pub const E_CTX: ER = -25;
+pub const E_MACV: ER = -26;
+pub const E_OACV: ER = -27;
+pub const E_ILUSE: ER = -28;
+pub const E_NOMEM: ER = -33;
+pub const E_NOID: ER = -34;
+pub const E_NORES: ER = -35;
+pub const E_OBJ: ER = -41;
+pub const E_NOEXS: ER = -42;
+pub const E_QOVR: ER = -43;
+pub const E_RLWAI: ER = -49;
+pub const E_TMOUT: ER = -50;
+pub const E_DLT: ER = -51;
+pub const E_CLS: ER = -52;
+pub const E_RASTER: ER = -53;
+pub const E_WBLK: ER = -57;
+pub const E_BOVR: ER = -58;
+pub const E_COMM: ER = -65;
+
+#[derive(Clone, Copy)]
+#[repr(C)]
+pub struct T_CSEM {
+    pub sematr: ATR,
+    pub isemcnt: uint_t,
+    pub maxsem: uint_t,
+}
+
+#[derive(Clone, Copy)]
+#[repr(C)]
+pub struct T_CMTX {
+    pub mtxatr: ATR,
+    pub ceilpri: PRI,
+}
+
+#[derive(Clone, Copy)]
+#[repr(C)]
+pub struct T_CTSK {
+    pub tskatr: ATR,
+    pub exinf: EXINF,
+    pub task: TASK,
+    pub itskpri: PRI,
+    pub stksz: usize,
+    pub stk: *mut u8,
+}
+
+extern "C" {
+    #[link_name = "__asp3_acre_tsk"]
+    pub fn acre_tsk(pk_ctsk: *const T_CTSK) -> ER_ID;
+    #[link_name = "__asp3_get_tid"]
+    pub fn get_tid(p_tskid: *mut ID) -> ER;
+    #[link_name = "__asp3_dly_tsk"]
+    pub fn dly_tsk(dlytim: RELTIM) -> ER;
+    #[link_name = "__asp3_ter_tsk"]
+    pub fn ter_tsk(tskid: ID) -> ER;
+    #[link_name = "__asp3_del_tsk"]
+    pub fn del_tsk(tskid: ID) -> ER;
+    #[link_name = "__asp3_get_pri"]
+    pub fn get_pri(tskid: ID, p_tskpri: *mut PRI) -> ER;
+    #[link_name = "__asp3_rot_rdq"]
+    pub fn rot_rdq(tskpri: PRI) -> ER;
+    #[link_name = "__asp3_slp_tsk"]
+    pub fn slp_tsk() -> ER;
+    #[link_name = "__asp3_tslp_tsk"]
+    pub fn tslp_tsk(tmout: TMO) -> ER;
+    #[link_name = "__asp3_wup_tsk"]
+    pub fn wup_tsk(tskid: ID) -> ER;
+    #[link_name = "__asp3_unl_cpu"]
+    pub fn unl_cpu() -> ER;
+    #[link_name = "__asp3_dis_dsp"]
+    pub fn dis_dsp() -> ER;
+    #[link_name = "__asp3_ena_dsp"]
+    pub fn ena_dsp() -> ER;
+    #[link_name = "__asp3_sns_dsp"]
+    pub fn sns_dsp() -> bool_t;
+    #[link_name = "__asp3_get_tim"]
+    pub fn get_tim(p_systim: *mut SYSTIM) -> ER;
+    #[link_name = "__asp3_acre_mtx"]
+    pub fn acre_mtx(pk_cmtx: *const T_CMTX) -> ER_ID;
+    #[link_name = "__asp3_del_mtx"]
+    pub fn del_mtx(tskid: ID) -> ER;
+    #[link_name = "__asp3_loc_mtx"]
+    pub fn loc_mtx(mtxid: ID) -> ER;
+    #[link_name = "__asp3_ploc_mtx"]
+    pub fn ploc_mtx(mtxid: ID) -> ER;
+    #[link_name = "__asp3_tloc_mtx"]
+    pub fn tloc_mtx(mtxid: ID, tmout: TMO) -> ER;
+    #[link_name = "__asp3_unl_mtx"]
+    pub fn unl_mtx(mtxid: ID) -> ER;
+    pub fn exd_tsk() -> ER;
+}
diff --git a/library/std/src/sys/itron/condvar.rs b/library/std/src/sys/itron/condvar.rs
new file mode 100644
index 00000000000..dac4b8abfc4
--- /dev/null
+++ b/library/std/src/sys/itron/condvar.rs
@@ -0,0 +1,294 @@
+//! POSIX conditional variable implementation based on user-space wait queues.
+use super::{abi, error::expect_success_aborting, spin::SpinMutex, task, time::with_tmos_strong};
+use crate::{mem::replace, ptr::NonNull, sys::mutex::Mutex, time::Duration};
+
+// The implementation is inspired by the queue-based implementation shown in
+// Andrew D. Birrell's paper "Implementing Condition Variables with Semaphores"
+
+pub struct Condvar {
+    waiters: SpinMutex<waiter_queue::WaiterQueue>,
+}
+
+unsafe impl Send for Condvar {}
+unsafe impl Sync for Condvar {}
+
+pub type MovableCondvar = Condvar;
+
+impl Condvar {
+    pub const fn new() -> Condvar {
+        Condvar { waiters: SpinMutex::new(waiter_queue::WaiterQueue::new()) }
+    }
+
+    pub unsafe fn init(&mut self) {}
+
+    pub unsafe fn notify_one(&self) {
+        self.waiters.with_locked(|waiters| {
+            if let Some(task) = waiters.pop_front() {
+                // Unpark the task
+                match unsafe { abi::wup_tsk(task) } {
+                    // The task already has a token.
+                    abi::E_QOVR => {}
+                    // Can't undo the effect; abort the program on failure
+                    er => {
+                        expect_success_aborting(er, &"wup_tsk");
+                    }
+                }
+            }
+        });
+    }
+
+    pub unsafe fn notify_all(&self) {
+        self.waiters.with_locked(|waiters| {
+            while let Some(task) = waiters.pop_front() {
+                // Unpark the task
+                match unsafe { abi::wup_tsk(task) } {
+                    // The task already has a token.
+                    abi::E_QOVR => {}
+                    // Can't undo the effect; abort the program on failure
+                    er => {
+                        expect_success_aborting(er, &"wup_tsk");
+                    }
+                }
+            }
+        });
+    }
+
+    pub unsafe fn wait(&self, mutex: &Mutex) {
+        // Construct `Waiter`.
+        let mut waiter = waiter_queue::Waiter::new();
+        let waiter = NonNull::from(&mut waiter);
+
+        self.waiters.with_locked(|waiters| unsafe {
+            waiters.insert(waiter);
+        });
+
+        unsafe { mutex.unlock() };
+
+        // Wait until `waiter` is removed from the queue
+        loop {
+            // Park the current task
+            expect_success_aborting(unsafe { abi::slp_tsk() }, &"slp_tsk");
+
+            if !self.waiters.with_locked(|waiters| unsafe { waiters.is_queued(waiter) }) {
+                break;
+            }
+        }
+
+        unsafe { mutex.lock() };
+    }
+
+    pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+        // Construct and pin `Waiter`
+        let mut waiter = waiter_queue::Waiter::new();
+        let waiter = NonNull::from(&mut waiter);
+
+        self.waiters.with_locked(|waiters| unsafe {
+            waiters.insert(waiter);
+        });
+
+        unsafe { mutex.unlock() };
+
+        // Park the current task and do not wake up until the timeout elapses
+        // or the task gets woken up by `notify_*`
+        match with_tmos_strong(dur, |tmo| {
+            let er = unsafe { abi::tslp_tsk(tmo) };
+            if er == 0 {
+                // We were unparked. Are we really dequeued?
+                if self.waiters.with_locked(|waiters| unsafe { waiters.is_queued(waiter) }) {
+                    // No we are not. Continue waiting.
+                    return abi::E_TMOUT;
+                }
+            }
+            er
+        }) {
+            abi::E_TMOUT => {}
+            er => {
+                expect_success_aborting(er, &"tslp_tsk");
+            }
+        }
+
+        // Remove `waiter` from `self.waiters`. If `waiter` is still in
+        // `waiters`, it means we woke up because of a timeout. Otherwise,
+        // we woke up because of `notify_*`.
+        let success = self.waiters.with_locked(|waiters| unsafe { !waiters.remove(waiter) });
+
+        unsafe { mutex.lock() };
+        success
+    }
+
+    pub unsafe fn destroy(&self) {}
+}
+
+mod waiter_queue {
+    use super::*;
+
+    pub struct WaiterQueue {
+        head: Option<ListHead>,
+    }
+
+    #[derive(Copy, Clone)]
+    struct ListHead {
+        first: NonNull<Waiter>,
+        last: NonNull<Waiter>,
+    }
+
+    unsafe impl Send for ListHead {}
+    unsafe impl Sync for ListHead {}
+
+    pub struct Waiter {
+        // These fields are only accessed through `&[mut] WaiterQueue`.
+        /// The waiting task's ID. Will be zeroed when the task is woken up
+        /// and removed from a queue.
+        task: abi::ID,
+        priority: abi::PRI,
+        prev: Option<NonNull<Waiter>>,
+        next: Option<NonNull<Waiter>>,
+    }
+
+    unsafe impl Send for Waiter {}
+    unsafe impl Sync for Waiter {}
+
+    impl Waiter {
+        #[inline]
+        pub fn new() -> Self {
+            let task = task::current_task_id();
+            let priority = task::task_priority(abi::TSK_SELF);
+
+            // Zeroness of `Waiter::task` indicates whether the `Waiter` is
+            // linked to a queue or not. This invariant is important for
+            // the correctness.
+            debug_assert_ne!(task, 0);
+
+            Self { task, priority, prev: None, next: None }
+        }
+    }
+
+    impl WaiterQueue {
+        #[inline]
+        pub const fn new() -> Self {
+            Self { head: None }
+        }
+
+        /// # Safety
+        ///
+        ///  - The caller must own `*waiter_ptr`. The caller will lose the
+        ///    ownership until `*waiter_ptr` is removed from `self`.
+        ///
+        ///  - `*waiter_ptr` must be valid until it's removed from the queue.
+        ///
+        ///  - `*waiter_ptr` must not have been previously inserted to a `WaiterQueue`.
+        ///
+        pub unsafe fn insert(&mut self, mut waiter_ptr: NonNull<Waiter>) {
+            unsafe {
+                let waiter = waiter_ptr.as_mut();
+
+                debug_assert!(waiter.prev.is_none());
+                debug_assert!(waiter.next.is_none());
+
+                if let Some(head) = &mut self.head {
+                    // Find the insertion position and insert `waiter`
+                    let insert_after = {
+                        let mut cursor = head.last;
+                        loop {
+                            if waiter.priority <= cursor.as_ref().priority {
+                                // `cursor` and all previous waiters have the same or higher
+                                // priority than `current_task_priority`. Insert the new
+                                // waiter right after `cursor`.
+                                break Some(cursor);
+                            }
+                            cursor = if let Some(prev) = cursor.as_ref().prev {
+                                prev
+                            } else {
+                                break None;
+                            };
+                        }
+                    };
+
+                    if let Some(mut insert_after) = insert_after {
+                        // Insert `waiter` after `insert_after`
+                        let insert_before = insert_after.as_ref().prev;
+
+                        waiter.prev = Some(insert_after);
+                        insert_after.as_mut().next = Some(waiter_ptr);
+
+                        waiter.next = insert_before;
+                        if let Some(mut insert_before) = insert_before {
+                            insert_before.as_mut().prev = Some(waiter_ptr);
+                        }
+                    } else {
+                        // Insert `waiter` to the front
+                        waiter.next = Some(head.first);
+                        head.first.as_mut().prev = Some(waiter_ptr);
+                        head.first = waiter_ptr;
+                    }
+                } else {
+                    // `waiter` is the only element
+                    self.head = Some(ListHead { first: waiter_ptr, last: waiter_ptr });
+                }
+            }
+        }
+
+        /// Given a `Waiter` that was previously inserted to `self`, remove
+        /// it from `self` if it's still there.
+        #[inline]
+        pub unsafe fn remove(&mut self, mut waiter_ptr: NonNull<Waiter>) -> bool {
+            unsafe {
+                let waiter = waiter_ptr.as_mut();
+                if waiter.task != 0 {
+                    let head = self.head.as_mut().unwrap();
+
+                    match (waiter.prev, waiter.next) {
+                        (Some(mut prev), Some(mut next)) => {
+                            prev.as_mut().next = Some(next);
+                            next.as_mut().next = Some(prev);
+                        }
+                        (None, Some(mut next)) => {
+                            head.first = next;
+                            next.as_mut().next = None;
+                        }
+                        (Some(mut prev), None) => {
+                            prev.as_mut().next = None;
+                            head.last = prev;
+                        }
+                        (None, None) => {
+                            self.head = None;
+                        }
+                    }
+
+                    waiter.task = 0;
+
+                    true
+                } else {
+                    false
+                }
+            }
+        }
+
+        /// Given a `Waiter` that was previously inserted to `self`, return a
+        /// flag indicating whether it's still in `self`.
+        #[inline]
+        pub unsafe fn is_queued(&self, waiter: NonNull<Waiter>) -> bool {
+            unsafe { waiter.as_ref().task != 0 }
+        }
+
+        pub fn pop_front(&mut self) -> Option<abi::ID> {
+            unsafe {
+                let head = self.head.as_mut()?;
+                let waiter = head.first.as_mut();
+
+                // Get the ID
+                let id = replace(&mut waiter.task, 0);
+
+                // Unlink the waiter
+                if let Some(mut next) = waiter.next {
+                    head.first = next;
+                    next.as_mut().prev = None;
+                } else {
+                    self.head = None;
+                }
+
+                Some(id)
+            }
+        }
+    }
+}
diff --git a/library/std/src/sys/itron/error.rs b/library/std/src/sys/itron/error.rs
new file mode 100644
index 00000000000..830c60d329e
--- /dev/null
+++ b/library/std/src/sys/itron/error.rs
@@ -0,0 +1,159 @@
+use crate::{fmt, io::ErrorKind};
+
+use super::abi;
+
+/// Wraps a μITRON error code.
+#[derive(Debug, Copy, Clone)]
+pub struct ItronError {
+    er: abi::ER,
+}
+
+impl ItronError {
+    /// Construct `ItronError` from the specified error code. Returns `None` if the
+    /// error code does not represent a failure or warning.
+    #[inline]
+    pub fn new(er: abi::ER) -> Option<Self> {
+        if er < 0 { Some(Self { er }) } else { None }
+    }
+
+    /// Returns `Ok(er)` if `er` represents a success or `Err(_)` otherwise.
+    #[inline]
+    pub fn err_if_negative(er: abi::ER) -> Result<abi::ER, Self> {
+        if let Some(error) = Self::new(er) { Err(error) } else { Ok(er) }
+    }
+
+    /// Get the raw error code.
+    #[inline]
+    pub fn as_raw(&self) -> abi::ER {
+        self.er
+    }
+}
+
+impl fmt::Display for ItronError {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        // Allow the platforms to extend `error_name`
+        if let Some(name) = crate::sys::error::error_name(self.er) {
+            write!(f, "{} ({})", name, self.er)
+        } else {
+            write!(f, "{}", self.er)
+        }
+    }
+}
+
+/// Describe the specified μITRON error code. Returns `None` if it's an
+/// undefined error code.
+pub fn error_name(er: abi::ER) -> Option<&'static str> {
+    match er {
+        // Success
+        er if er >= 0 => None,
+
+        // μITRON 4.0
+        abi::E_SYS => Some("system error"),
+        abi::E_NOSPT => Some("unsupported function"),
+        abi::E_RSFN => Some("reserved function code"),
+        abi::E_RSATR => Some("reserved attribute"),
+        abi::E_PAR => Some("parameter error"),
+        abi::E_ID => Some("invalid ID number"),
+        abi::E_CTX => Some("context error"),
+        abi::E_MACV => Some("memory access violation"),
+        abi::E_OACV => Some("object access violation"),
+        abi::E_ILUSE => Some("illegal service call use"),
+        abi::E_NOMEM => Some("insufficient memory"),
+        abi::E_NOID => Some("no ID number available"),
+        abi::E_OBJ => Some("object state error"),
+        abi::E_NOEXS => Some("non-existent object"),
+        abi::E_QOVR => Some("queue overflow"),
+        abi::E_RLWAI => Some("forced release from waiting"),
+        abi::E_TMOUT => Some("polling failure or timeout"),
+        abi::E_DLT => Some("waiting object deleted"),
+        abi::E_CLS => Some("waiting object state changed"),
+        abi::E_WBLK => Some("non-blocking code accepted"),
+        abi::E_BOVR => Some("buffer overflow"),
+
+        // The TOPPERS third generation kernels
+        abi::E_NORES => Some("insufficient system resources"),
+        abi::E_RASTER => Some("termination request raised"),
+        abi::E_COMM => Some("communication failure"),
+
+        _ => None,
+    }
+}
+
+pub fn decode_error_kind(er: abi::ER) -> ErrorKind {
+    match er {
+        // Success
+        er if er >= 0 => ErrorKind::Uncategorized,
+
+        // μITRON 4.0
+        // abi::E_SYS
+        abi::E_NOSPT => ErrorKind::Unsupported, // Some("unsupported function"),
+        abi::E_RSFN => ErrorKind::InvalidInput, // Some("reserved function code"),
+        abi::E_RSATR => ErrorKind::InvalidInput, // Some("reserved attribute"),
+        abi::E_PAR => ErrorKind::InvalidInput,  // Some("parameter error"),
+        abi::E_ID => ErrorKind::NotFound,       // Some("invalid ID number"),
+        // abi::E_CTX
+        abi::E_MACV => ErrorKind::PermissionDenied, // Some("memory access violation"),
+        abi::E_OACV => ErrorKind::PermissionDenied, // Some("object access violation"),
+        // abi::E_ILUSE
+        abi::E_NOMEM => ErrorKind::OutOfMemory, // Some("insufficient memory"),
+        abi::E_NOID => ErrorKind::OutOfMemory,  // Some("no ID number available"),
+        // abi::E_OBJ
+        abi::E_NOEXS => ErrorKind::NotFound, // Some("non-existent object"),
+        // abi::E_QOVR
+        abi::E_RLWAI => ErrorKind::Interrupted, // Some("forced release from waiting"),
+        abi::E_TMOUT => ErrorKind::TimedOut,    // Some("polling failure or timeout"),
+        // abi::E_DLT
+        // abi::E_CLS
+        // abi::E_WBLK
+        // abi::E_BOVR
+
+        // The TOPPERS third generation kernels
+        abi::E_NORES => ErrorKind::OutOfMemory, // Some("insufficient system resources"),
+        // abi::E_RASTER
+        // abi::E_COMM
+        _ => ErrorKind::Uncategorized,
+    }
+}
+
+/// Similar to `ItronError::err_if_negative(er).expect()` except that, while
+/// panicking, it prints the message to `panic_output` and aborts the program
+/// instead. This ensures the error message is not obscured by double
+/// panicking.
+///
+/// This is useful for diagnosing creation failures of synchronization
+/// primitives that are used by `std`'s internal mechanisms. Such failures
+/// are common when the system is mis-configured to provide a too-small pool for
+/// kernel objects.
+#[inline]
+pub fn expect_success(er: abi::ER, msg: &&str) -> abi::ER {
+    match ItronError::err_if_negative(er) {
+        Ok(x) => x,
+        Err(e) => fail(e, msg),
+    }
+}
+
+/// Similar to `ItronError::err_if_negative(er).expect()` but aborts instead.
+///
+/// Use this where panicking is not allowed or the effect of the failure
+/// would be persistent.
+#[inline]
+pub fn expect_success_aborting(er: abi::ER, msg: &&str) -> abi::ER {
+    match ItronError::err_if_negative(er) {
+        Ok(x) => x,
+        Err(e) => fail_aborting(e, msg),
+    }
+}
+
+#[cold]
+pub fn fail(e: impl fmt::Display, msg: &&str) -> ! {
+    if crate::thread::panicking() {
+        fail_aborting(e, msg)
+    } else {
+        panic!("{} failed: {}", *msg, e)
+    }
+}
+
+#[cold]
+pub fn fail_aborting(e: impl fmt::Display, msg: &&str) -> ! {
+    rtabort!("{} failed: {}", *msg, e)
+}
diff --git a/library/std/src/sys/itron/mutex.rs b/library/std/src/sys/itron/mutex.rs
new file mode 100644
index 00000000000..e01f595ac54
--- /dev/null
+++ b/library/std/src/sys/itron/mutex.rs
@@ -0,0 +1,183 @@
+//! Mutex implementation backed by μITRON mutexes. Assumes `acre_mtx` and
+//! `TA_INHERIT` are available.
+use super::{
+    abi,
+    error::{expect_success, expect_success_aborting, fail, ItronError},
+    spin::SpinIdOnceCell,
+};
+use crate::cell::UnsafeCell;
+
+pub struct Mutex {
+    /// The ID of the underlying mutex object
+    mtx: SpinIdOnceCell<()>,
+}
+
+pub type MovableMutex = Mutex;
+
+/// Create a mutex object. This function never panics.
+fn new_mtx() -> Result<abi::ID, ItronError> {
+    ItronError::err_if_negative(unsafe {
+        abi::acre_mtx(&abi::T_CMTX {
+            // Priority inheritance mutex
+            mtxatr: abi::TA_INHERIT,
+            // Unused
+            ceilpri: 0,
+        })
+    })
+}
+
+impl Mutex {
+    pub const fn new() -> Mutex {
+        Mutex { mtx: SpinIdOnceCell::new() }
+    }
+
+    pub unsafe fn init(&mut self) {
+        // Initialize `self.mtx` eagerly
+        let id = new_mtx().unwrap_or_else(|e| fail(e, &"acre_mtx"));
+        unsafe { self.mtx.set_unchecked((id, ())) };
+    }
+
+    /// Get the inner mutex's ID, which is lazily created.
+    fn raw(&self) -> abi::ID {
+        match self.mtx.get_or_try_init(|| new_mtx().map(|id| (id, ()))) {
+            Ok((id, ())) => id,
+            Err(e) => fail(e, &"acre_mtx"),
+        }
+    }
+
+    pub unsafe fn lock(&self) {
+        let mtx = self.raw();
+        expect_success(unsafe { abi::loc_mtx(mtx) }, &"loc_mtx");
+    }
+
+    pub unsafe fn unlock(&self) {
+        let mtx = unsafe { self.mtx.get_unchecked().0 };
+        expect_success_aborting(unsafe { abi::unl_mtx(mtx) }, &"unl_mtx");
+    }
+
+    pub unsafe fn try_lock(&self) -> bool {
+        let mtx = self.raw();
+        match unsafe { abi::ploc_mtx(mtx) } {
+            abi::E_TMOUT => false,
+            er => {
+                expect_success(er, &"ploc_mtx");
+                true
+            }
+        }
+    }
+
+    pub unsafe fn destroy(&self) {
+        if let Some(mtx) = self.mtx.get().map(|x| x.0) {
+            expect_success_aborting(unsafe { abi::del_mtx(mtx) }, &"del_mtx");
+        }
+    }
+}
+
+pub(super) struct MutexGuard<'a>(&'a Mutex);
+
+impl<'a> MutexGuard<'a> {
+    #[inline]
+    pub(super) fn lock(x: &'a Mutex) -> Self {
+        unsafe { x.lock() };
+        Self(x)
+    }
+}
+
+impl Drop for MutexGuard<'_> {
+    #[inline]
+    fn drop(&mut self) {
+        unsafe { self.0.unlock() };
+    }
+}
+
+// All empty stubs because this platform does not yet support threads, so lock
+// acquisition always succeeds.
+pub struct ReentrantMutex {
+    /// The ID of the underlying mutex object
+    mtx: abi::ID,
+    /// The lock count.
+    count: UnsafeCell<usize>,
+}
+
+unsafe impl Send for ReentrantMutex {}
+unsafe impl Sync for ReentrantMutex {}
+
+impl ReentrantMutex {
+    pub const unsafe fn uninitialized() -> ReentrantMutex {
+        ReentrantMutex { mtx: 0, count: UnsafeCell::new(0) }
+    }
+
+    pub unsafe fn init(&mut self) {
+        self.mtx = expect_success(
+            unsafe {
+                abi::acre_mtx(&abi::T_CMTX {
+                    // Priority inheritance mutex
+                    mtxatr: abi::TA_INHERIT,
+                    // Unused
+                    ceilpri: 0,
+                })
+            },
+            &"acre_mtx",
+        );
+    }
+
+    pub unsafe fn lock(&self) {
+        match unsafe { abi::loc_mtx(self.mtx) } {
+            abi::E_OBJ => {
+                // Recursive lock
+                unsafe {
+                    let count = &mut *self.count.get();
+                    if let Some(new_count) = count.checked_add(1) {
+                        *count = new_count;
+                    } else {
+                        // counter overflow
+                        rtabort!("lock count overflow");
+                    }
+                }
+            }
+            er => {
+                expect_success(er, &"loc_mtx");
+            }
+        }
+    }
+
+    pub unsafe fn unlock(&self) {
+        unsafe {
+            let count = &mut *self.count.get();
+            if *count > 0 {
+                *count -= 1;
+                return;
+            }
+        }
+
+        expect_success_aborting(unsafe { abi::unl_mtx(self.mtx) }, &"unl_mtx");
+    }
+
+    pub unsafe fn try_lock(&self) -> bool {
+        let er = unsafe { abi::ploc_mtx(self.mtx) };
+        if er == abi::E_OBJ {
+            // Recursive lock
+            unsafe {
+                let count = &mut *self.count.get();
+                if let Some(new_count) = count.checked_add(1) {
+                    *count = new_count;
+                } else {
+                    // counter overflow
+                    rtabort!("lock count overflow");
+                }
+            }
+            true
+        } else if er == abi::E_TMOUT {
+            // Locked by another thread
+            false
+        } else {
+            expect_success(er, &"ploc_mtx");
+            // Top-level lock by the current thread
+            true
+        }
+    }
+
+    pub unsafe fn destroy(&self) {
+        expect_success_aborting(unsafe { abi::del_mtx(self.mtx) }, &"del_mtx");
+    }
+}
diff --git a/library/std/src/sys/itron/spin.rs b/library/std/src/sys/itron/spin.rs
new file mode 100644
index 00000000000..d0149d1f037
--- /dev/null
+++ b/library/std/src/sys/itron/spin.rs
@@ -0,0 +1,164 @@
+use super::abi;
+use crate::{
+    cell::UnsafeCell,
+    convert::TryFrom,
+    mem::MaybeUninit,
+    sync::atomic::{AtomicBool, AtomicUsize, Ordering},
+};
+
+/// A mutex implemented by `dis_dsp` (for intra-core synchronization) and a
+/// spinlock (for inter-core synchronization).
+pub struct SpinMutex<T = ()> {
+    locked: AtomicBool,
+    data: UnsafeCell<T>,
+}
+
+impl<T> SpinMutex<T> {
+    #[inline]
+    pub const fn new(x: T) -> Self {
+        Self { locked: AtomicBool::new(false), data: UnsafeCell::new(x) }
+    }
+
+    /// Acquire a lock.
+    #[inline]
+    pub fn with_locked<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
+        struct SpinMutexGuard<'a>(&'a AtomicBool);
+
+        impl Drop for SpinMutexGuard<'_> {
+            #[inline]
+            fn drop(&mut self) {
+                self.0.store(false, Ordering::Release);
+                unsafe { abi::ena_dsp() };
+            }
+        }
+
+        let _guard;
+        if unsafe { abi::sns_dsp() } == 0 {
+            let er = unsafe { abi::dis_dsp() };
+            debug_assert!(er >= 0);
+
+            // Wait until the current processor acquires a lock.
+            while self.locked.swap(true, Ordering::Acquire) {}
+
+            _guard = SpinMutexGuard(&self.locked);
+        }
+
+        f(unsafe { &mut *self.data.get() })
+    }
+}
+
+/// `OnceCell<(abi::ID, T)>` implemented by `dis_dsp` (for intra-core
+/// synchronization) and a spinlock (for inter-core synchronization).
+///
+/// It's assumed that `0` is not a valid ID, and all kernel
+/// object IDs fall into range `1..=usize::MAX`.
+pub struct SpinIdOnceCell<T = ()> {
+    id: AtomicUsize,
+    spin: SpinMutex<()>,
+    extra: UnsafeCell<MaybeUninit<T>>,
+}
+
+const ID_UNINIT: usize = 0;
+
+impl<T> SpinIdOnceCell<T> {
+    #[inline]
+    pub const fn new() -> Self {
+        Self {
+            id: AtomicUsize::new(ID_UNINIT),
+            extra: UnsafeCell::new(MaybeUninit::uninit()),
+            spin: SpinMutex::new(()),
+        }
+    }
+
+    #[inline]
+    pub fn get(&self) -> Option<(abi::ID, &T)> {
+        match self.id.load(Ordering::Acquire) {
+            ID_UNINIT => None,
+            id => Some((id as abi::ID, unsafe { (&*self.extra.get()).assume_init_ref() })),
+        }
+    }
+
+    #[inline]
+    pub fn get_mut(&mut self) -> Option<(abi::ID, &mut T)> {
+        match *self.id.get_mut() {
+            ID_UNINIT => None,
+            id => Some((id as abi::ID, unsafe { (&mut *self.extra.get()).assume_init_mut() })),
+        }
+    }
+
+    #[inline]
+    pub unsafe fn get_unchecked(&self) -> (abi::ID, &T) {
+        (self.id.load(Ordering::Acquire) as abi::ID, unsafe {
+            (&*self.extra.get()).assume_init_ref()
+        })
+    }
+
+    /// Assign the content without checking if it's already initialized or
+    /// being initialized.
+    pub unsafe fn set_unchecked(&self, (id, extra): (abi::ID, T)) {
+        debug_assert!(self.get().is_none());
+
+        // Assumption: A positive `abi::ID` fits in `usize`.
+        debug_assert!(id >= 0);
+        debug_assert!(usize::try_from(id).is_ok());
+        let id = id as usize;
+
+        unsafe { *self.extra.get() = MaybeUninit::new(extra) };
+        self.id.store(id, Ordering::Release);
+    }
+
+    /// Gets the contents of the cell, initializing it with `f` if
+    /// the cell was empty. If the cell was empty and `f` failed, an
+    /// error is returned.
+    ///
+    /// Warning: `f` must not perform a blocking operation, which
+    /// includes panicking.
+    #[inline]
+    pub fn get_or_try_init<F, E>(&self, f: F) -> Result<(abi::ID, &T), E>
+    where
+        F: FnOnce() -> Result<(abi::ID, T), E>,
+    {
+        // Fast path
+        if let Some(x) = self.get() {
+            return Ok(x);
+        }
+
+        self.initialize(f)?;
+
+        debug_assert!(self.get().is_some());
+
+        // Safety: The inner value has been initialized
+        Ok(unsafe { self.get_unchecked() })
+    }
+
+    fn initialize<F, E>(&self, f: F) -> Result<(), E>
+    where
+        F: FnOnce() -> Result<(abi::ID, T), E>,
+    {
+        self.spin.with_locked(|_| {
+            if self.id.load(Ordering::Relaxed) == ID_UNINIT {
+                let (initialized_id, initialized_extra) = f()?;
+
+                // Assumption: A positive `abi::ID` fits in `usize`.
+                debug_assert!(initialized_id >= 0);
+                debug_assert!(usize::try_from(initialized_id).is_ok());
+                let initialized_id = initialized_id as usize;
+
+                // Store the initialized contents. Use the release ordering to
+                // make sure the write is visible to the callers of `get`.
+                unsafe { *self.extra.get() = MaybeUninit::new(initialized_extra) };
+                self.id.store(initialized_id, Ordering::Release);
+            }
+            Ok(())
+        })
+    }
+}
+
+impl<T> Drop for SpinIdOnceCell<T> {
+    #[inline]
+    fn drop(&mut self) {
+        if self.get_mut().is_some() {
+            unsafe { (&mut *self.extra.get()).assume_init_drop() };
+        }
+    }
+}
diff --git a/library/std/src/sys/itron/task.rs b/library/std/src/sys/itron/task.rs
new file mode 100644
index 00000000000..94beb50a254
--- /dev/null
+++ b/library/std/src/sys/itron/task.rs
@@ -0,0 +1,44 @@
+use super::{
+    abi,
+    error::{fail, fail_aborting, ItronError},
+};
+
+use crate::mem::MaybeUninit;
+
+/// Get the ID of the task in Running state. Panics on failure.
+#[inline]
+pub fn current_task_id() -> abi::ID {
+    try_current_task_id().unwrap_or_else(|e| fail(e, &"get_tid"))
+}
+
+/// Get the ID of the task in Running state. Aborts on failure.
+#[inline]
+pub fn current_task_id_aborting() -> abi::ID {
+    try_current_task_id().unwrap_or_else(|e| fail_aborting(e, &"get_tid"))
+}
+
+/// Get the ID of the task in Running state.
+#[inline]
+pub fn try_current_task_id() -> Result<abi::ID, ItronError> {
+    unsafe {
+        let mut out = MaybeUninit::uninit();
+        ItronError::err_if_negative(abi::get_tid(out.as_mut_ptr()))?;
+        Ok(out.assume_init())
+    }
+}
+
+/// Get the specified task's priority. Panics on failure.
+#[inline]
+pub fn task_priority(task: abi::ID) -> abi::PRI {
+    try_task_priority(task).unwrap_or_else(|e| fail(e, &"get_pri"))
+}
+
+/// Get the specified task's priority.
+#[inline]
+pub fn try_task_priority(task: abi::ID) -> Result<abi::PRI, ItronError> {
+    unsafe {
+        let mut out = MaybeUninit::uninit();
+        ItronError::err_if_negative(abi::get_pri(task, out.as_mut_ptr()))?;
+        Ok(out.assume_init())
+    }
+}
diff --git a/library/std/src/sys/itron/thread.rs b/library/std/src/sys/itron/thread.rs
new file mode 100644
index 00000000000..4feb9c5a6d7
--- /dev/null
+++ b/library/std/src/sys/itron/thread.rs
@@ -0,0 +1,352 @@
+//! Thread implementation backed by μITRON tasks. Assumes `acre_tsk` and
+//! `exd_tsk` are available.
+use super::{
+    abi,
+    error::{expect_success, expect_success_aborting, ItronError},
+    task,
+    time::dur2reltims,
+};
+use crate::{
+    cell::UnsafeCell,
+    convert::TryFrom,
+    ffi::CStr,
+    hint, io,
+    mem::ManuallyDrop,
+    sync::atomic::{AtomicUsize, Ordering},
+    sys::thread_local_dtor::run_dtors,
+    time::Duration,
+};
+
+pub struct Thread {
+    inner: ManuallyDrop<Box<ThreadInner>>,
+
+    /// The ID of the underlying task.
+    task: abi::ID,
+}
+
+/// State data shared between a parent thread and child thread. It's dropped on
+/// a transition to one of the final states.
+struct ThreadInner {
+    /// This field is used on thread creation to pass a closure from
+    /// `Thread::new` to the created task.
+    start: UnsafeCell<ManuallyDrop<Box<dyn FnOnce()>>>,
+
+    /// A state machine. Each transition is annotated with `[...]` in the
+    /// source code.
+    ///
+    /// ```text
+    ///
+    ///    <P>: parent, <C>: child, (?): don't-care
+    ///
+    ///       DETACHED (-1)  -------------------->  EXITED (?)
+    ///                        <C>finish/exd_tsk
+    ///          ^
+    ///          |
+    ///          | <P>detach
+    ///          |
+    ///
+    ///       INIT (0)  ----------------------->  FINISHED (-1)
+    ///                        <C>finish
+    ///          |                                    |
+    ///          | <P>join/slp_tsk                    | <P>join/del_tsk
+    ///          |                                    | <P>detach/del_tsk
+    ///          v                                    v
+    ///
+    ///       JOINING                              JOINED (?)
+    ///     (parent_tid)
+    ///                                            ^
+    ///             \                             /
+    ///              \  <C>finish/wup_tsk        / <P>slp_tsk-complete/ter_tsk
+    ///               \                         /                      & del_tsk
+    ///                \                       /
+    ///                 '--> JOIN_FINALIZE ---'
+    ///                          (-1)
+    ///
+    lifecycle: AtomicUsize,
+}
+
+// Safety: The only `!Sync` field, `ThreadInner::start`, is only touched by
+//         the task represented by `ThreadInner`.
+unsafe impl Sync for ThreadInner {}
+
+const LIFECYCLE_INIT: usize = 0;
+const LIFECYCLE_FINISHED: usize = usize::MAX;
+const LIFECYCLE_DETACHED: usize = usize::MAX;
+const LIFECYCLE_JOIN_FINALIZE: usize = usize::MAX;
+const LIFECYCLE_DETACHED_OR_JOINED: usize = usize::MAX;
+const LIFECYCLE_EXITED_OR_FINISHED_OR_JOIN_FINALIZE: usize = usize::MAX;
+// there's no single value for `JOINING`
+
+pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * crate::mem::size_of::<usize>();
+
+impl Thread {
+    /// # Safety
+    ///
+    /// See `thread::Builder::spawn_unchecked` for safety requirements.
+    pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+        // Inherit the current task's priority
+        let current_task = task::try_current_task_id().map_err(|e| e.as_io_error())?;
+        let priority = task::try_task_priority(current_task).map_err(|e| e.as_io_error())?;
+
+        let inner = Box::new(ThreadInner {
+            start: UnsafeCell::new(ManuallyDrop::new(p)),
+            lifecycle: AtomicUsize::new(LIFECYCLE_INIT),
+        });
+
+        unsafe extern "C" fn trampoline(exinf: isize) {
+            // Safety: `ThreadInner` is alive at this point
+            let inner = unsafe { &*(exinf as *const ThreadInner) };
+
+            // Safety: Since `trampoline` is called only once for each
+            //         `ThreadInner` and only `trampoline` touches `start`,
+            //         `start` contains contents and is safe to mutably borrow.
+            let p = unsafe { ManuallyDrop::take(&mut *inner.start.get()) };
+            p();
+
+            // Fix the current thread's state just in case, so that the
+            // destructors won't abort
+            // Safety: Not really unsafe
+            let _ = unsafe { abi::unl_cpu() };
+            let _ = unsafe { abi::ena_dsp() };
+
+            // Run TLS destructors now because they are not
+            // called automatically for terminated tasks.
+            unsafe { run_dtors() };
+
+            let old_lifecycle = inner
+                .lifecycle
+                .swap(LIFECYCLE_EXITED_OR_FINISHED_OR_JOIN_FINALIZE, Ordering::Release);
+
+            match old_lifecycle {
+                LIFECYCLE_DETACHED => {
+                    // [DETACHED → EXITED]
+                    // No one will ever join, so we'll ask the collector task to
+                    // delete the task.
+
+                    // In this case, `inner`'s ownership has been moved to us,
+                    // And we are responsible for dropping it. The acquire
+                    // ordering is not necessary because the parent thread made
+                    // no memory acccess needing synchronization since the call
+                    // to `acre_tsk`.
+                    // Safety: See above.
+                    let _ = unsafe { Box::from_raw(inner as *const _ as *mut ThreadInner) };
+
+                    // Safety: There are no pinned references to the stack
+                    unsafe { terminate_and_delete_current_task() };
+                }
+                LIFECYCLE_INIT => {
+                    // [INIT → FINISHED]
+                    // The parent hasn't decided whether to join or detach this
+                    // thread yet. Whichever option the parent chooses,
+                    // it'll have to delete this task.
+                    // Since the parent might drop `*inner` as soon as it sees
+                    // `FINISHED`, the release ordering must be used in the
+                    // above `swap` call.
+                }
+                parent_tid => {
+                    // Since the parent might drop `*inner` and terminate us as
+                    // soon as it sees `JOIN_FINALIZE`, the release ordering
+                    // must be used in the above `swap` call.
+
+                    // [JOINING → JOIN_FINALIZE]
+                    // Wake up the parent task.
+                    expect_success(
+                        unsafe {
+                            let mut er = abi::wup_tsk(parent_tid as _);
+                            if er == abi::E_QOVR {
+                                // `E_QOVR` indicates there's already
+                                // a parking token
+                                er = abi::E_OK;
+                            }
+                            er
+                        },
+                        &"wup_tsk",
+                    );
+                }
+            }
+        }
+
+        let inner_ptr = (&*inner) as *const ThreadInner;
+
+        let new_task = ItronError::err_if_negative(unsafe {
+            abi::acre_tsk(&abi::T_CTSK {
+                // Activate this task immediately
+                tskatr: abi::TA_ACT,
+                exinf: inner_ptr as abi::EXINF,
+                // The entry point
+                task: Some(trampoline),
+                itskpri: priority,
+                stksz: stack,
+                // Let the kernel allocate the stack,
+                stk: crate::ptr::null_mut(),
+            })
+        })
+        .map_err(|e| e.as_io_error())?;
+
+        Ok(Self { inner: ManuallyDrop::new(inner), task: new_task })
+    }
+
+    pub fn yield_now() {
+        expect_success(unsafe { abi::rot_rdq(abi::TPRI_SELF) }, &"rot_rdq");
+    }
+
+    pub fn set_name(_name: &CStr) {
+        // nope
+    }
+
+    pub fn sleep(dur: Duration) {
+        for timeout in dur2reltims(dur) {
+            expect_success(unsafe { abi::dly_tsk(timeout) }, &"dly_tsk");
+        }
+    }
+
+    pub fn join(mut self) {
+        let inner = &*self.inner;
+        // Get the current task ID. Panicking here would cause a resource leak,
+        // so just abort on failure.
+        let current_task = task::current_task_id_aborting();
+        debug_assert!(usize::try_from(current_task).is_ok());
+        debug_assert_ne!(current_task as usize, LIFECYCLE_INIT);
+        debug_assert_ne!(current_task as usize, LIFECYCLE_DETACHED);
+
+        let current_task = current_task as usize;
+
+        match inner.lifecycle.swap(current_task, Ordering::Acquire) {
+            LIFECYCLE_INIT => {
+                // [INIT → JOINING]
+                // The child task will transition the state to `JOIN_FINALIZE`
+                // and wake us up.
+                loop {
+                    expect_success_aborting(unsafe { abi::slp_tsk() }, &"slp_tsk");
+                    // To synchronize with the child task's memory accesses to
+                    // `inner` up to the point of the assignment of
+                    // `JOIN_FINALIZE`, `Ordering::Acquire` must be used for the
+                    // `load`.
+                    if inner.lifecycle.load(Ordering::Acquire) == LIFECYCLE_JOIN_FINALIZE {
+                        break;
+                    }
+                }
+
+                // [JOIN_FINALIZE → JOINED]
+            }
+            LIFECYCLE_FINISHED => {
+                // [FINISHED → JOINED]
+                // To synchronize with the child task's memory accesses to
+                // `inner` up to the point of the assignment of `FINISHED`,
+                // `Ordering::Acquire` must be used for the above `swap` call`.
+            }
+            _ => unsafe { hint::unreachable_unchecked() },
+        }
+
+        // Terminate and delete the task
+        // Safety: `self.task` still represents a task we own (because this
+        //         method or `detach_inner` is called only once for each
+        //         `Thread`). The task indicated that it's safe to delete by
+        //         entering the `FINISHED` or `JOIN_FINALIZE` state.
+        unsafe { terminate_and_delete_task(self.task) };
+
+        // In either case, we are responsible for dropping `inner`.
+        // Safety: The contents of `self.inner` will not be accessed hereafter
+        let _inner = unsafe { ManuallyDrop::take(&mut self.inner) };
+
+        // Skip the destructor (because it would attempt to detach the thread)
+        crate::mem::forget(self);
+    }
+}
+
+impl Drop for Thread {
+    fn drop(&mut self) {
+        // Detach the thread.
+        match self.inner.lifecycle.swap(LIFECYCLE_DETACHED_OR_JOINED, Ordering::Acquire) {
+            LIFECYCLE_INIT => {
+                // [INIT → DETACHED]
+                // When the time comes, the child will figure out that no
+                // one will ever join it.
+                // The ownership of `self.inner` is moved to the child thread.
+                // However, the release ordering is not necessary because we
+                // made no memory acccess needing synchronization since the call
+                // to `acre_tsk`.
+            }
+            LIFECYCLE_FINISHED => {
+                // [FINISHED → JOINED]
+                // The task has already decided that we should delete the task.
+                // To synchronize with the child task's memory accesses to
+                // `inner` up to the point of the assignment of `FINISHED`,
+                // the acquire ordering is required for the above `swap` call.
+
+                // Terminate and delete the task
+                // Safety: `self.task` still represents a task we own (because
+                //         this method or `join_inner` is called only once for
+                //         each `Thread`). The task  indicated that it's safe to
+                //         delete by entering the `FINISHED` state.
+                unsafe { terminate_and_delete_task(self.task) };
+
+                // Wwe are responsible for dropping `inner`.
+                // Safety: The contents of `self.inner` will not be accessed
+                //         hereafter
+                unsafe { ManuallyDrop::drop(&mut self.inner) };
+            }
+            _ => unsafe { hint::unreachable_unchecked() },
+        }
+    }
+}
+
+pub mod guard {
+    pub type Guard = !;
+    pub unsafe fn current() -> Option<Guard> {
+        None
+    }
+    pub unsafe fn init() -> Option<Guard> {
+        None
+    }
+}
+
+/// Terminate and delete the specified task.
+///
+/// This function will abort if `deleted_task` refers to the calling task.
+///
+/// It is assumed that the specified task is solely managed by the caller -
+/// i.e., other threads must not "resuscitate" the specified task or delete it
+/// prematurely while this function is still in progress. It is allowed for the
+/// specified task to exit by its own.
+///
+/// # Safety
+///
+/// The task must be safe to terminate. This is in general not true
+/// because there might be pinned references to the task's stack.
+unsafe fn terminate_and_delete_task(deleted_task: abi::ID) {
+    // Terminate the task
+    // Safety: Upheld by the caller
+    match unsafe { abi::ter_tsk(deleted_task) } {
+        // Indicates the task is already dormant, ignore it
+        abi::E_OBJ => {}
+        er => {
+            expect_success_aborting(er, &"ter_tsk");
+        }
+    }
+
+    // Delete the task
+    // Safety: Upheld by the caller
+    expect_success_aborting(unsafe { abi::del_tsk(deleted_task) }, &"del_tsk");
+}
+
+/// Terminate and delete the calling task.
+///
+/// Atomicity is not required - i.e., it can be assumed that other threads won't
+/// `ter_tsk` the calling task while this function is still in progress. (This
+/// property makes it easy to implement this operation on μITRON-derived kernels
+/// that don't support `exd_tsk`.)
+///
+/// # Safety
+///
+/// The task must be safe to terminate. This is in general not true
+/// because there might be pinned references to the task's stack.
+unsafe fn terminate_and_delete_current_task() -> ! {
+    expect_success_aborting(unsafe { abi::exd_tsk() }, &"exd_tsk");
+    // Safety: `exd_tsk` never returns on success
+    unsafe { crate::hint::unreachable_unchecked() };
+}
+
+pub fn available_concurrency() -> io::Result<crate::num::NonZeroUsize> {
+    super::unsupported()
+}
diff --git a/library/std/src/sys/itron/time.rs b/library/std/src/sys/itron/time.rs
new file mode 100644
index 00000000000..6a992ad1d3c
--- /dev/null
+++ b/library/std/src/sys/itron/time.rs
@@ -0,0 +1,123 @@
+use super::{abi, error::expect_success};
+use crate::{convert::TryInto, mem::MaybeUninit, time::Duration};
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+pub struct Instant(abi::SYSTIM);
+
+impl Instant {
+    pub fn now() -> Instant {
+        // Safety: The provided pointer is valid
+        unsafe {
+            let mut out = MaybeUninit::uninit();
+            expect_success(abi::get_tim(out.as_mut_ptr()), &"get_tim");
+            Instant(out.assume_init())
+        }
+    }
+
+    pub const fn zero() -> Instant {
+        Instant(0)
+    }
+
+    pub fn actually_monotonic() -> bool {
+        // There are ways to change the system time
+        false
+    }
+
+    pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
+        self.0.checked_sub(other.0).map(|ticks| {
+            // `SYSTIM` is measured in microseconds
+            Duration::from_micros(ticks)
+        })
+    }
+
+    pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
+        // `SYSTIM` is measured in microseconds
+        let ticks = other.as_micros();
+
+        Some(Instant(self.0.checked_add(ticks.try_into().ok()?)?))
+    }
+
+    pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
+        // `SYSTIM` is measured in microseconds
+        let ticks = other.as_micros();
+
+        Some(Instant(self.0.checked_sub(ticks.try_into().ok()?)?))
+    }
+}
+
+/// Split `Duration` into zero or more `RELTIM`s.
+#[inline]
+pub fn dur2reltims(dur: Duration) -> impl Iterator<Item = abi::RELTIM> {
+    // `RELTIM` is microseconds
+    let mut ticks = dur.as_micros();
+
+    crate::iter::from_fn(move || {
+        if ticks == 0 {
+            None
+        } else if ticks <= abi::TMAX_RELTIM as u128 {
+            Some(crate::mem::replace(&mut ticks, 0) as abi::RELTIM)
+        } else {
+            ticks -= abi::TMAX_RELTIM as u128;
+            Some(abi::TMAX_RELTIM)
+        }
+    })
+}
+
+/// Split `Duration` into one or more `TMO`s.
+#[inline]
+fn dur2tmos(dur: Duration) -> impl Iterator<Item = abi::TMO> {
+    // `TMO` is microseconds
+    let mut ticks = dur.as_micros();
+    let mut end = false;
+
+    crate::iter::from_fn(move || {
+        if end {
+            None
+        } else if ticks <= abi::TMAX_RELTIM as u128 {
+            end = true;
+            Some(crate::mem::replace(&mut ticks, 0) as abi::TMO)
+        } else {
+            ticks -= abi::TMAX_RELTIM as u128;
+            Some(abi::TMAX_RELTIM)
+        }
+    })
+}
+
+/// Split `Duration` into one or more API calls with timeout.
+#[inline]
+pub fn with_tmos(dur: Duration, mut f: impl FnMut(abi::TMO) -> abi::ER) -> abi::ER {
+    let mut er = abi::E_TMOUT;
+    for tmo in dur2tmos(dur) {
+        er = f(tmo);
+        if er != abi::E_TMOUT {
+            break;
+        }
+    }
+    er
+}
+
+/// Split `Duration` into one or more API calls with timeout. This function can
+/// handle spurious wakeups.
+#[inline]
+pub fn with_tmos_strong(dur: Duration, mut f: impl FnMut(abi::TMO) -> abi::ER) -> abi::ER {
+    // `TMO` and `SYSTIM` are microseconds.
+    // Clamp at `SYSTIM::MAX` for performance reasons. This shouldn't cause
+    // a problem in practice. (`u64::MAX` μs ≈ 584942 years)
+    let ticks = dur.as_micros().min(abi::SYSTIM::MAX as u128) as abi::SYSTIM;
+
+    let start = Instant::now().0;
+    let mut elapsed = 0;
+    let mut er = abi::E_TMOUT;
+    while elapsed <= ticks {
+        er = f(elapsed.min(abi::TMAX_RELTIM as abi::SYSTIM) as abi::TMO);
+        if er != abi::E_TMOUT {
+            break;
+        }
+        elapsed = Instant::now().0.wrapping_sub(start);
+    }
+
+    er
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/library/std/src/sys/itron/time/tests.rs b/library/std/src/sys/itron/time/tests.rs
new file mode 100644
index 00000000000..d14035d9da4
--- /dev/null
+++ b/library/std/src/sys/itron/time/tests.rs
@@ -0,0 +1,33 @@
+use super::*;
+
+fn reltim2dur(t: u64) -> Duration {
+    Duration::from_micros(t)
+}
+
+#[test]
+fn test_dur2reltims() {
+    assert_eq!(dur2reltims(reltim2dur(0)).collect::<Vec<_>>(), vec![]);
+    assert_eq!(dur2reltims(reltim2dur(42)).collect::<Vec<_>>(), vec![42]);
+    assert_eq!(
+        dur2reltims(reltim2dur(abi::TMAX_RELTIM as u64)).collect::<Vec<_>>(),
+        vec![abi::TMAX_RELTIM]
+    );
+    assert_eq!(
+        dur2reltims(reltim2dur(abi::TMAX_RELTIM as u64 + 10000)).collect::<Vec<_>>(),
+        vec![abi::TMAX_RELTIM, 10000]
+    );
+}
+
+#[test]
+fn test_dur2tmos() {
+    assert_eq!(dur2tmos(reltim2dur(0)).collect::<Vec<_>>(), vec![0]);
+    assert_eq!(dur2tmos(reltim2dur(42)).collect::<Vec<_>>(), vec![42]);
+    assert_eq!(
+        dur2tmos(reltim2dur(abi::TMAX_RELTIM as u64)).collect::<Vec<_>>(),
+        vec![abi::TMAX_RELTIM]
+    );
+    assert_eq!(
+        dur2tmos(reltim2dur(abi::TMAX_RELTIM as u64 + 10000)).collect::<Vec<_>>(),
+        vec![abi::TMAX_RELTIM, 10000]
+    );
+}