diff options
Diffstat (limited to 'src/libstd/sys')
| -rw-r--r-- | src/libstd/sys/wasm/mutex_atomics.rs | 23 | ||||
| -rw-r--r-- | src/libstd/sys/wasm/thread.rs | 46 | ||||
| -rw-r--r-- | src/libstd/sys/wasm/thread_local_atomics.rs | 53 |
3 files changed, 102 insertions, 20 deletions
diff --git a/src/libstd/sys/wasm/mutex_atomics.rs b/src/libstd/sys/wasm/mutex_atomics.rs index ced6c17ef96..762e807096f 100644 --- a/src/libstd/sys/wasm/mutex_atomics.rs +++ b/src/libstd/sys/wasm/mutex_atomics.rs @@ -11,7 +11,8 @@ use arch::wasm32::atomic; use cell::UnsafeCell; use mem; -use sync::atomic::{AtomicUsize, AtomicU64, Ordering::SeqCst}; +use sync::atomic::{AtomicUsize, AtomicU32, Ordering::SeqCst}; +use sys::thread; pub struct Mutex { locked: AtomicUsize, @@ -70,7 +71,7 @@ impl Mutex { } pub struct ReentrantMutex { - owner: AtomicU64, + owner: AtomicU32, recursions: UnsafeCell<u32>, } @@ -91,7 +92,7 @@ unsafe impl Sync for ReentrantMutex {} impl ReentrantMutex { pub unsafe fn uninitialized() -> ReentrantMutex { ReentrantMutex { - owner: AtomicU64::new(0), + owner: AtomicU32::new(0), recursions: UnsafeCell::new(0), } } @@ -101,20 +102,20 @@ impl ReentrantMutex { } pub unsafe fn lock(&self) { - let me = thread_id(); + let me = thread::my_id(); while let Err(owner) = self._try_lock(me) { - let val = atomic::wait_i64(self.ptr(), owner as i64, -1); + let val = atomic::wait_i32(self.ptr(), owner as i32, -1); debug_assert!(val == 0 || val == 1); } } #[inline] pub unsafe fn try_lock(&self) -> bool { - self._try_lock(thread_id()).is_ok() + self._try_lock(thread::my_id()).is_ok() } #[inline] - unsafe fn _try_lock(&self, id: u64) -> Result<(), u64> { + unsafe fn _try_lock(&self, id: u32) -> Result<(), u32> { let id = id.checked_add(1).unwrap(); // make sure `id` isn't 0 match self.owner.compare_exchange(0, id, SeqCst, SeqCst) { // we transitioned from unlocked to locked @@ -153,11 +154,7 @@ impl ReentrantMutex { } #[inline] - fn ptr(&self) -> *mut i64 { - &self.owner as *const AtomicU64 as *mut i64 + fn ptr(&self) -> *mut i32 { + &self.owner as *const AtomicU32 as *mut i32 } } - -fn thread_id() -> u64 { - panic!("thread ids not implemented on wasm with atomics yet") -} diff --git a/src/libstd/sys/wasm/thread.rs b/src/libstd/sys/wasm/thread.rs index bef6c1f3490..4ad89c42b92 100644 --- a/src/libstd/sys/wasm/thread.rs +++ b/src/libstd/sys/wasm/thread.rs @@ -69,3 +69,49 @@ pub mod guard { pub unsafe fn init() -> Option<Guard> { None } pub unsafe fn deinit() {} } + +cfg_if! { + if #[cfg(all(target_feature = "atomics", feature = "wasm-bindgen-threads"))] { + #[link(wasm_import_module = "__wbindgen_thread_xform__")] + extern { + fn __wbindgen_current_id() -> u32; + fn __wbindgen_tcb_get() -> u32; + fn __wbindgen_tcb_set(ptr: u32); + } + pub fn my_id() -> u32 { + unsafe { __wbindgen_current_id() } + } + + // These are currently only ever used in `thread_local_atomics.rs`, if + // you'd like to use them be sure to update that and make sure everyone + // agrees what's what. + pub fn tcb_get() -> *mut u8 { + use mem; + assert_eq!(mem::size_of::<*mut u8>(), mem::size_of::<u32>()); + unsafe { __wbindgen_tcb_get() as *mut u8 } + } + + pub fn tcb_set(ptr: *mut u8) { + unsafe { __wbindgen_tcb_set(ptr as u32); } + } + + // FIXME: still need something for hooking exiting a thread to free + // data... + + } else if #[cfg(target_feature = "atomics")] { + pub fn my_id() -> u32 { + panic!("thread ids not implemented on wasm with atomics yet") + } + + pub fn tcb_get() -> *mut u8 { + panic!("thread local data not implemented on wasm with atomics yet") + } + + pub fn tcb_set(ptr: *mut u8) { + panic!("thread local data not implemented on wasm with atomics yet") + } + } else { + // stubbed out because no functions actually access these intrinsics + // unless atomics are enabled + } +} diff --git a/src/libstd/sys/wasm/thread_local_atomics.rs b/src/libstd/sys/wasm/thread_local_atomics.rs index 1394013b4a3..acfe60719f2 100644 --- a/src/libstd/sys/wasm/thread_local_atomics.rs +++ b/src/libstd/sys/wasm/thread_local_atomics.rs @@ -8,22 +8,61 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use sys::thread; +use sync::atomic::{AtomicUsize, Ordering::SeqCst}; + +const MAX_KEYS: usize = 128; +static NEXT_KEY: AtomicUsize = AtomicUsize::new(0); + +struct ThreadControlBlock { + keys: [*mut u8; MAX_KEYS], +} + +impl ThreadControlBlock { + fn new() -> ThreadControlBlock { + ThreadControlBlock { + keys: [0 as *mut u8; MAX_KEYS], + } + } + + fn get() -> *mut ThreadControlBlock { + let ptr = thread::tcb_get(); + if !ptr.is_null() { + return ptr as *mut ThreadControlBlock + } + let tcb = Box::into_raw(Box::new(ThreadControlBlock::new())); + thread::tcb_set(tcb as *mut u8); + tcb + } +} + pub type Key = usize; -pub unsafe fn create(_dtor: Option<unsafe extern fn(*mut u8)>) -> Key { - panic!("TLS on wasm with atomics not implemented yet"); +pub unsafe fn create(dtor: Option<unsafe extern fn(*mut u8)>) -> Key { + drop(dtor); // FIXME: need to figure out how to hook thread exit to run this + let key = NEXT_KEY.fetch_add(1, SeqCst); + if key >= MAX_KEYS { + NEXT_KEY.store(MAX_KEYS, SeqCst); + panic!("cannot allocate space for more TLS keys"); + } + // offset by 1 so we never hand out 0. This is currently required by + // `sys_common/thread_local.rs` where it can't cope with keys of value 0 + // because it messes up the atomic management. + return key + 1 } -pub unsafe fn set(_key: Key, _value: *mut u8) { - panic!("TLS on wasm with atomics not implemented yet"); +pub unsafe fn set(key: Key, value: *mut u8) { + (*ThreadControlBlock::get()).keys[key - 1] = value; } -pub unsafe fn get(_key: Key) -> *mut u8 { - panic!("TLS on wasm with atomics not implemented yet"); +pub unsafe fn get(key: Key) -> *mut u8 { + (*ThreadControlBlock::get()).keys[key - 1] } pub unsafe fn destroy(_key: Key) { - panic!("TLS on wasm with atomics not implemented yet"); + // FIXME: should implement this somehow, this isn't typically called but it + // can be called if two threads race to initialize a TLS slot and one ends + // up not being needed. } #[inline] |
