diff options
Diffstat (limited to 'library/std/src/sys/unix')
| -rw-r--r-- | library/std/src/sys/unix/fd.rs | 19 | ||||
| -rw-r--r-- | library/std/src/sys/unix/fs.rs | 5 | ||||
| -rw-r--r-- | library/std/src/sys/unix/futex.rs | 47 | ||||
| -rw-r--r-- | library/std/src/sys/unix/locks/mod.rs | 1 | ||||
| -rw-r--r-- | library/std/src/sys/unix/mod.rs | 1 | ||||
| -rw-r--r-- | library/std/src/sys/unix/os.rs | 22 | ||||
| -rw-r--r-- | library/std/src/sys/unix/process/process_common.rs | 3 | ||||
| -rw-r--r-- | library/std/src/sys/unix/thread_parker.rs | 265 | ||||
| -rw-r--r-- | library/std/src/sys/unix/time.rs | 4 | ||||
| -rw-r--r-- | library/std/src/sys/unix/weak.rs | 32 |
10 files changed, 352 insertions, 47 deletions
diff --git a/library/std/src/sys/unix/fd.rs b/library/std/src/sys/unix/fd.rs index 3de7c68a686..40a64585802 100644 --- a/library/std/src/sys/unix/fd.rs +++ b/library/std/src/sys/unix/fd.rs @@ -11,6 +11,21 @@ use crate::sys_common::{AsInner, FromInner, IntoInner}; use libc::{c_int, c_void}; +#[cfg(any( + target_os = "android", + target_os = "linux", + target_os = "emscripten", + target_os = "l4re" +))] +use libc::off64_t; +#[cfg(not(any( + target_os = "linux", + target_os = "emscripten", + target_os = "l4re", + target_os = "android" +)))] +use libc::off_t as off64_t; + #[derive(Debug)] pub struct FileDesc(OwnedFd); @@ -109,7 +124,7 @@ impl FileDesc { self.as_raw_fd(), buf.as_mut_ptr() as *mut c_void, cmp::min(buf.len(), READ_LIMIT), - offset as i64, + offset as off64_t, )) .map(|n| n as usize) } @@ -176,7 +191,7 @@ impl FileDesc { self.as_raw_fd(), buf.as_ptr() as *const c_void, cmp::min(buf.len(), READ_LIMIT), - offset as i64, + offset as off64_t, )) .map(|n| n as usize) } diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs index 7181451de57..27fc7accdae 100644 --- a/library/std/src/sys/unix/fs.rs +++ b/library/std/src/sys/unix/fs.rs @@ -966,7 +966,7 @@ impl File { SeekFrom::End(off) => (libc::SEEK_END, off), SeekFrom::Current(off) => (libc::SEEK_CUR, off), }; - let n = cvt(unsafe { lseek64(self.as_raw_fd(), pos, whence) })?; + let n = cvt(unsafe { lseek64(self.as_raw_fd(), pos as off64_t, whence) })?; Ok(n as u64) } @@ -1647,8 +1647,9 @@ mod remove_dir_impl { fn remove_dir_all_recursive(parent_fd: Option<RawFd>, path: &CStr) -> io::Result<()> { // try opening as directory let fd = match openat_nofollow_dironly(parent_fd, &path) { - Err(err) if err.raw_os_error() == Some(libc::ENOTDIR) => { + Err(err) if matches!(err.raw_os_error(), Some(libc::ENOTDIR | libc::ELOOP)) => { // not a directory - don't traverse further + // (for symlinks, older Linux kernels may return ELOOP instead of ENOTDIR) return match parent_fd { // unlink... Some(parent_fd) => { diff --git a/library/std/src/sys/unix/futex.rs b/library/std/src/sys/unix/futex.rs index 62760373a6a..c12ee169e79 100644 --- a/library/std/src/sys/unix/futex.rs +++ b/library/std/src/sys/unix/futex.rs @@ -52,25 +52,6 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) - } } -#[cfg(target_os = "emscripten")] -pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) { - extern "C" { - fn emscripten_futex_wait( - addr: *const AtomicU32, - val: libc::c_uint, - max_wait_ms: libc::c_double, - ) -> libc::c_int; - } - - unsafe { - emscripten_futex_wait( - futex, - expected, - timeout.map_or(crate::f64::INFINITY, |d| d.as_secs_f64() * 1000.0), - ); - } -} - /// Wake up one thread that's blocked on futex_wait on this futex. /// /// Returns true if this actually woke up such a thread, @@ -101,10 +82,32 @@ pub fn futex_wake_all(futex: &AtomicU32) { } #[cfg(target_os = "emscripten")] -pub fn futex_wake(futex: &AtomicU32) -> bool { - extern "C" { - fn emscripten_futex_wake(addr: *const AtomicU32, count: libc::c_int) -> libc::c_int; +extern "C" { + fn emscripten_futex_wake(addr: *const AtomicU32, count: libc::c_int) -> libc::c_int; + fn emscripten_futex_wait( + addr: *const AtomicU32, + val: libc::c_uint, + max_wait_ms: libc::c_double, + ) -> libc::c_int; +} + +#[cfg(target_os = "emscripten")] +pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool { + unsafe { + emscripten_futex_wait( + futex, + expected, + timeout.map_or(f64::INFINITY, |d| d.as_secs_f64() * 1000.0), + ) != -libc::ETIMEDOUT } +} +#[cfg(target_os = "emscripten")] +pub fn futex_wake(futex: &AtomicU32) -> bool { unsafe { emscripten_futex_wake(futex, 1) > 0 } } + +#[cfg(target_os = "emscripten")] +pub fn futex_wake_all(futex: &AtomicU32) { + unsafe { emscripten_futex_wake(futex, i32::MAX) }; +} diff --git a/library/std/src/sys/unix/locks/mod.rs b/library/std/src/sys/unix/locks/mod.rs index 17796f8894b..3e39c8b9b23 100644 --- a/library/std/src/sys/unix/locks/mod.rs +++ b/library/std/src/sys/unix/locks/mod.rs @@ -2,6 +2,7 @@ cfg_if::cfg_if! { if #[cfg(any( target_os = "linux", target_os = "android", + all(target_os = "emscripten", target_feature = "atomics"), ))] { mod futex; mod futex_rwlock; diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs index aedeb02e656..8e909aab7f0 100644 --- a/library/std/src/sys/unix/mod.rs +++ b/library/std/src/sys/unix/mod.rs @@ -39,6 +39,7 @@ pub mod stdio; pub mod thread; pub mod thread_local_dtor; pub mod thread_local_key; +pub mod thread_parker; pub mod time; #[cfg(target_os = "espidf")] diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs index 1be733ba106..92bea9346d8 100644 --- a/library/std/src/sys/unix/os.rs +++ b/library/std/src/sys/unix/os.rs @@ -427,7 +427,7 @@ pub fn current_exe() -> io::Result<PathBuf> { crate::fs::read_to_string("sys:exe").map(PathBuf::from) } -#[cfg(any(target_os = "fuchsia", target_os = "l4re"))] +#[cfg(target_os = "l4re")] pub fn current_exe() -> io::Result<PathBuf> { use crate::io::ErrorKind; Err(io::const_io_error!(ErrorKind::Unsupported, "Not yet implemented!")) @@ -451,6 +451,26 @@ pub fn current_exe() -> io::Result<PathBuf> { super::unsupported::unsupported() } +#[cfg(target_os = "fuchsia")] +pub fn current_exe() -> io::Result<PathBuf> { + use crate::io::ErrorKind; + + #[cfg(test)] + use realstd::env; + + #[cfg(not(test))] + use crate::env; + + let exe_path = env::args().next().ok_or(io::const_io_error!( + ErrorKind::Uncategorized, + "an executable path was not found because no arguments were provided through argv" + ))?; + let path = PathBuf::from(exe_path); + + // Prepend the current working directory to the path if it's not absolute. + if !path.is_absolute() { getcwd().map(|cwd| cwd.join(path)) } else { Ok(path) } +} + pub struct Env { iter: vec::IntoIter<(OsString, OsString)>, } diff --git a/library/std/src/sys/unix/process/process_common.rs b/library/std/src/sys/unix/process/process_common.rs index 27bee714f5b..bca1b65a7fc 100644 --- a/library/std/src/sys/unix/process/process_common.rs +++ b/library/std/src/sys/unix/process/process_common.rs @@ -35,7 +35,8 @@ cfg_if::cfg_if! { // Android with api less than 21 define sig* functions inline, so it is not // available for dynamic link. Implementing sigemptyset and sigaddset allow us // to support older Android version (independent of libc version). -// The following implementations are based on https://git.io/vSkNf +// The following implementations are based on +// https://github.com/aosp-mirror/platform_bionic/blob/ad8dcd6023294b646e5a8288c0ed431b0845da49/libc/include/android/legacy_signal_inlines.h cfg_if::cfg_if! { if #[cfg(target_os = "android")] { pub unsafe fn sigemptyset(set: *mut libc::sigset_t) -> libc::c_int { diff --git a/library/std/src/sys/unix/thread_parker.rs b/library/std/src/sys/unix/thread_parker.rs new file mode 100644 index 00000000000..fd83f2f73d6 --- /dev/null +++ b/library/std/src/sys/unix/thread_parker.rs @@ -0,0 +1,265 @@ +//! Thread parking without `futex` using the `pthread` synchronization primitives. + +#![cfg(not(any( + target_os = "linux", + target_os = "android", + all(target_os = "emscripten", target_feature = "atomics") +)))] + +use crate::cell::UnsafeCell; +use crate::marker::PhantomPinned; +use crate::pin::Pin; +use crate::ptr::addr_of_mut; +use crate::sync::atomic::AtomicUsize; +use crate::sync::atomic::Ordering::SeqCst; +use crate::time::Duration; + +const EMPTY: usize = 0; +const PARKED: usize = 1; +const NOTIFIED: usize = 2; + +unsafe fn lock(lock: *mut libc::pthread_mutex_t) { + let r = libc::pthread_mutex_lock(lock); + debug_assert_eq!(r, 0); +} + +unsafe fn unlock(lock: *mut libc::pthread_mutex_t) { + let r = libc::pthread_mutex_unlock(lock); + debug_assert_eq!(r, 0); +} + +unsafe fn notify_one(cond: *mut libc::pthread_cond_t) { + let r = libc::pthread_cond_signal(cond); + debug_assert_eq!(r, 0); +} + +unsafe fn wait(cond: *mut libc::pthread_cond_t, lock: *mut libc::pthread_mutex_t) { + let r = libc::pthread_cond_wait(cond, lock); + debug_assert_eq!(r, 0); +} + +const TIMESPEC_MAX: libc::timespec = + libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 }; + +unsafe fn wait_timeout( + cond: *mut libc::pthread_cond_t, + lock: *mut libc::pthread_mutex_t, + dur: Duration, +) { + // Use the system clock on systems that do not support pthread_condattr_setclock. + // This unfortunately results in problems when the system time changes. + #[cfg(any(target_os = "macos", target_os = "ios", target_os = "espidf"))] + let (now, dur) = { + use super::time::SystemTime; + use crate::cmp::min; + + // OSX implementation of `pthread_cond_timedwait` is buggy + // with super long durations. When duration is greater than + // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait` + // in macOS Sierra return error 316. + // + // This program demonstrates the issue: + // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c + // + // To work around this issue, and possible bugs of other OSes, timeout + // is clamped to 1000 years, which is allowable per the API of `park_timeout` + // because of spurious wakeups. + let dur = min(dur, Duration::from_secs(1000 * 365 * 86400)); + let now = SystemTime::now().t; + (now, dur) + }; + // Use the monotonic clock on other systems. + #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "espidf")))] + let (now, dur) = { + use super::time::Timespec; + + (Timespec::now(libc::CLOCK_MONOTONIC), dur) + }; + + let timeout = now.checked_add_duration(&dur).map(|t| t.t).unwrap_or(TIMESPEC_MAX); + let r = libc::pthread_cond_timedwait(cond, lock, &timeout); + debug_assert!(r == libc::ETIMEDOUT || r == 0); +} + +pub struct Parker { + state: AtomicUsize, + lock: UnsafeCell<libc::pthread_mutex_t>, + cvar: UnsafeCell<libc::pthread_cond_t>, + // The `pthread` primitives require a stable address, so make this struct `!Unpin`. + _pinned: PhantomPinned, +} + +impl Parker { + /// Construct the UNIX parker in-place. + /// + /// # Safety + /// The constructed parker must never be moved. + pub unsafe fn new(parker: *mut Parker) { + // Use the default mutex implementation to allow for simpler initialization. + // This could lead to undefined behaviour when deadlocking. This is avoided + // by not deadlocking. Note in particular the unlocking operation before any + // panic, as code after the panic could try to park again. + addr_of_mut!((*parker).state).write(AtomicUsize::new(EMPTY)); + addr_of_mut!((*parker).lock).write(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)); + + cfg_if::cfg_if! { + if #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "l4re", + target_os = "android", + target_os = "redox" + ))] { + addr_of_mut!((*parker).cvar).write(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER)); + } else if #[cfg(target_os = "espidf")] { + let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), crate::ptr::null()); + assert_eq!(r, 0); + } else { + use crate::mem::MaybeUninit; + let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit(); + let r = libc::pthread_condattr_init(attr.as_mut_ptr()); + assert_eq!(r, 0); + let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC); + assert_eq!(r, 0); + let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), attr.as_ptr()); + assert_eq!(r, 0); + let r = libc::pthread_condattr_destroy(attr.as_mut_ptr()); + assert_eq!(r, 0); + } + } + } + + // This implementation doesn't require `unsafe`, but other implementations + // may assume this is only called by the thread that owns the Parker. + pub unsafe fn park(self: Pin<&Self>) { + // If we were previously notified then we consume this notification and + // return quickly. + if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { + return; + } + + // Otherwise we need to coordinate going to sleep + lock(self.lock.get()); + match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + Ok(_) => {} + Err(NOTIFIED) => { + // We must read here, even though we know it will be `NOTIFIED`. + // This is because `unpark` may have been called again since we read + // `NOTIFIED` in the `compare_exchange` above. We must perform an + // acquire operation that synchronizes with that `unpark` to observe + // any writes it made before the call to unpark. To do that we must + // read from the write it made to `state`. + let old = self.state.swap(EMPTY, SeqCst); + + unlock(self.lock.get()); + + assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + return; + } // should consume this notification, so prohibit spurious wakeups in next park. + Err(_) => { + unlock(self.lock.get()); + + panic!("inconsistent park state") + } + } + + loop { + wait(self.cvar.get(), self.lock.get()); + + match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) { + Ok(_) => break, // got a notification + Err(_) => {} // spurious wakeup, go back to sleep + } + } + + unlock(self.lock.get()); + } + + // This implementation doesn't require `unsafe`, but other implementations + // may assume this is only called by the thread that owns the Parker. Use + // `Pin` to guarantee a stable address for the mutex and condition variable. + pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { + // Like `park` above we have a fast path for an already-notified thread, and + // afterwards we start coordinating for a sleep. + // return quickly. + if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { + return; + } + + lock(self.lock.get()); + match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + Ok(_) => {} + Err(NOTIFIED) => { + // We must read again here, see `park`. + let old = self.state.swap(EMPTY, SeqCst); + unlock(self.lock.get()); + + assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + return; + } // should consume this notification, so prohibit spurious wakeups in next park. + Err(_) => { + unlock(self.lock.get()); + panic!("inconsistent park_timeout state") + } + } + + // Wait with a timeout, and if we spuriously wake up or otherwise wake up + // from a notification we just want to unconditionally set the state back to + // empty, either consuming a notification or un-flagging ourselves as + // parked. + wait_timeout(self.cvar.get(), self.lock.get(), dur); + + match self.state.swap(EMPTY, SeqCst) { + NOTIFIED => unlock(self.lock.get()), // got a notification, hurray! + PARKED => unlock(self.lock.get()), // no notification, alas + n => { + unlock(self.lock.get()); + panic!("inconsistent park_timeout state: {n}") + } + } + } + + pub fn unpark(self: Pin<&Self>) { + // To ensure the unparked thread will observe any writes we made + // before this call, we must perform a release operation that `park` + // can synchronize with. To do that we must write `NOTIFIED` even if + // `state` is already `NOTIFIED`. That is why this must be a swap + // rather than a compare-and-swap that returns if it reads `NOTIFIED` + // on failure. + match self.state.swap(NOTIFIED, SeqCst) { + EMPTY => return, // no one was waiting + NOTIFIED => return, // already unparked + PARKED => {} // gotta go wake someone up + _ => panic!("inconsistent state in unpark"), + } + + // There is a period between when the parked thread sets `state` to + // `PARKED` (or last checked `state` in the case of a spurious wake + // up) and when it actually waits on `cvar`. If we were to notify + // during this period it would be ignored and then when the parked + // thread went to sleep it would never wake up. Fortunately, it has + // `lock` locked at this stage so we can acquire `lock` to wait until + // it is ready to receive the notification. + // + // Releasing `lock` before the call to `notify_one` means that when the + // parked thread wakes it doesn't get woken only to have to wait for us + // to release `lock`. + unsafe { + lock(self.lock.get()); + unlock(self.lock.get()); + notify_one(self.cvar.get()); + } + } +} + +impl Drop for Parker { + fn drop(&mut self) { + unsafe { + libc::pthread_cond_destroy(self.cvar.get_mut()); + libc::pthread_mutex_destroy(self.lock.get_mut()); + } + } +} + +unsafe impl Sync for Parker {} +unsafe impl Send for Parker {} diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs index 498c94d0cdc..d43ceec9c8a 100644 --- a/library/std/src/sys/unix/time.rs +++ b/library/std/src/sys/unix/time.rs @@ -132,7 +132,7 @@ mod inner { #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct SystemTime { - t: Timespec, + pub(in crate::sys::unix) t: Timespec, } pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() }; @@ -279,7 +279,7 @@ mod inner { #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct SystemTime { - t: Timespec, + pub(in crate::sys::unix) t: Timespec, } pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() }; diff --git a/library/std/src/sys/unix/weak.rs b/library/std/src/sys/unix/weak.rs index da63c068384..e4ff21b25bd 100644 --- a/library/std/src/sys/unix/weak.rs +++ b/library/std/src/sys/unix/weak.rs @@ -25,7 +25,8 @@ use crate::ffi::CStr; use crate::marker::PhantomData; use crate::mem; -use crate::sync::atomic::{self, AtomicUsize, Ordering}; +use crate::ptr; +use crate::sync::atomic::{self, AtomicPtr, Ordering}; // We can use true weak linkage on ELF targets. #[cfg(not(any(target_os = "macos", target_os = "ios")))] @@ -83,13 +84,13 @@ pub(crate) macro dlsym { } pub(crate) struct DlsymWeak<F> { name: &'static str, - addr: AtomicUsize, + func: AtomicPtr<libc::c_void>, _marker: PhantomData<F>, } impl<F> DlsymWeak<F> { pub(crate) const fn new(name: &'static str) -> Self { - DlsymWeak { name, addr: AtomicUsize::new(1), _marker: PhantomData } + DlsymWeak { name, func: AtomicPtr::new(ptr::invalid_mut(1)), _marker: PhantomData } } #[inline] @@ -97,11 +98,11 @@ impl<F> DlsymWeak<F> { unsafe { // Relaxed is fine here because we fence before reading through the // pointer (see the comment below). - match self.addr.load(Ordering::Relaxed) { - 1 => self.initialize(), - 0 => None, - addr => { - let func = mem::transmute_copy::<usize, F>(&addr); + match self.func.load(Ordering::Relaxed) { + func if func.addr() == 1 => self.initialize(), + func if func.is_null() => None, + func => { + let func = mem::transmute_copy::<*mut libc::c_void, F>(&func); // The caller is presumably going to read through this value // (by calling the function we've dlsymed). This means we'd // need to have loaded it with at least C11's consume @@ -129,25 +130,22 @@ impl<F> DlsymWeak<F> { // Cold because it should only happen during first-time initialization. #[cold] unsafe fn initialize(&self) -> Option<F> { - assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>()); + assert_eq!(mem::size_of::<F>(), mem::size_of::<*mut libc::c_void>()); let val = fetch(self.name); // This synchronizes with the acquire fence in `get`. - self.addr.store(val, Ordering::Release); + self.func.store(val, Ordering::Release); - match val { - 0 => None, - addr => Some(mem::transmute_copy::<usize, F>(&addr)), - } + if val.is_null() { None } else { Some(mem::transmute_copy::<*mut libc::c_void, F>(&val)) } } } -unsafe fn fetch(name: &str) -> usize { +unsafe fn fetch(name: &str) -> *mut libc::c_void { let name = match CStr::from_bytes_with_nul(name.as_bytes()) { Ok(cstr) => cstr, - Err(..) => return 0, + Err(..) => return ptr::null_mut(), }; - libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr()) as usize + libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr()) } #[cfg(not(any(target_os = "linux", target_os = "android")))] |
