diff options
Diffstat (limited to 'library/std/src/sys/unix')
| -rw-r--r-- | library/std/src/sys/unix/fd.rs | 19 | ||||
| -rw-r--r-- | library/std/src/sys/unix/fs.rs | 114 | ||||
| -rw-r--r-- | library/std/src/sys/unix/futex.rs | 230 | ||||
| -rw-r--r-- | library/std/src/sys/unix/locks/futex.rs | 98 | ||||
| -rw-r--r-- | library/std/src/sys/unix/locks/futex_rwlock.rs | 4 | ||||
| -rw-r--r-- | library/std/src/sys/unix/locks/mod.rs | 8 | ||||
| -rw-r--r-- | library/std/src/sys/unix/locks/pthread_remutex.rs | 46 | ||||
| -rw-r--r-- | library/std/src/sys/unix/mod.rs | 1 | ||||
| -rw-r--r-- | library/std/src/sys/unix/os.rs | 22 | ||||
| -rw-r--r-- | library/std/src/sys/unix/process/process_common.rs | 3 | ||||
| -rw-r--r-- | library/std/src/sys/unix/process/process_common/tests.rs | 6 | ||||
| -rw-r--r-- | library/std/src/sys/unix/process/process_unix.rs | 3 | ||||
| -rw-r--r-- | library/std/src/sys/unix/thread_parker.rs | 269 | ||||
| -rw-r--r-- | library/std/src/sys/unix/time.rs | 231 | ||||
| -rw-r--r-- | library/std/src/sys/unix/weak.rs | 32 |
15 files changed, 705 insertions, 381 deletions
diff --git a/library/std/src/sys/unix/fd.rs b/library/std/src/sys/unix/fd.rs index 3de7c68a686..40a64585802 100644 --- a/library/std/src/sys/unix/fd.rs +++ b/library/std/src/sys/unix/fd.rs @@ -11,6 +11,21 @@ use crate::sys_common::{AsInner, FromInner, IntoInner}; use libc::{c_int, c_void}; +#[cfg(any( + target_os = "android", + target_os = "linux", + target_os = "emscripten", + target_os = "l4re" +))] +use libc::off64_t; +#[cfg(not(any( + target_os = "linux", + target_os = "emscripten", + target_os = "l4re", + target_os = "android" +)))] +use libc::off_t as off64_t; + #[derive(Debug)] pub struct FileDesc(OwnedFd); @@ -109,7 +124,7 @@ impl FileDesc { self.as_raw_fd(), buf.as_mut_ptr() as *mut c_void, cmp::min(buf.len(), READ_LIMIT), - offset as i64, + offset as off64_t, )) .map(|n| n as usize) } @@ -176,7 +191,7 @@ impl FileDesc { self.as_raw_fd(), buf.as_ptr() as *const c_void, cmp::min(buf.len(), READ_LIMIT), - offset as i64, + offset as off64_t, )) .map(|n| n as usize) } diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs index 7181451de57..61fb2814018 100644 --- a/library/std/src/sys/unix/fs.rs +++ b/library/std/src/sys/unix/fs.rs @@ -113,10 +113,19 @@ cfg_has_statx! {{ // This is needed to check if btime is supported by the filesystem. stx_mask: u32, stx_btime: libc::statx_timestamp, + // With statx, we can overcome 32-bit `time_t` too. + #[cfg(target_pointer_width = "32")] + stx_atime: libc::statx_timestamp, + #[cfg(target_pointer_width = "32")] + stx_ctime: libc::statx_timestamp, + #[cfg(target_pointer_width = "32")] + stx_mtime: libc::statx_timestamp, + } - // We prefer `statx` on Linux if available, which contains file creation time. - // Default `stat64` contains no creation time. + // We prefer `statx` on Linux if available, which contains file creation time, + // as well as 64-bit timestamps of all kinds. + // Default `stat64` contains no creation time and may have 32-bit `time_t`. unsafe fn try_statx( fd: c_int, path: *const c_char, @@ -192,6 +201,13 @@ cfg_has_statx! {{ let extra = StatxExtraFields { stx_mask: buf.stx_mask, stx_btime: buf.stx_btime, + // Store full times to avoid 32-bit `time_t` truncation. + #[cfg(target_pointer_width = "32")] + stx_atime: buf.stx_atime, + #[cfg(target_pointer_width = "32")] + stx_ctime: buf.stx_ctime, + #[cfg(target_pointer_width = "32")] + stx_mtime: buf.stx_mtime, }; Some(Ok(FileAttr { stat, statx_extra_fields: Some(extra) })) @@ -310,6 +326,36 @@ cfg_has_statx! {{ fn from_stat64(stat: stat64) -> Self { Self { stat, statx_extra_fields: None } } + + #[cfg(target_pointer_width = "32")] + pub fn stx_mtime(&self) -> Option<&libc::statx_timestamp> { + if let Some(ext) = &self.statx_extra_fields { + if (ext.stx_mask & libc::STATX_MTIME) != 0 { + return Some(&ext.stx_mtime); + } + } + None + } + + #[cfg(target_pointer_width = "32")] + pub fn stx_atime(&self) -> Option<&libc::statx_timestamp> { + if let Some(ext) = &self.statx_extra_fields { + if (ext.stx_mask & libc::STATX_ATIME) != 0 { + return Some(&ext.stx_atime); + } + } + None + } + + #[cfg(target_pointer_width = "32")] + pub fn stx_ctime(&self) -> Option<&libc::statx_timestamp> { + if let Some(ext) = &self.statx_extra_fields { + if (ext.stx_mask & libc::STATX_CTIME) != 0 { + return Some(&ext.stx_ctime); + } + } + None + } } } else { impl FileAttr { @@ -335,24 +381,15 @@ impl FileAttr { #[cfg(target_os = "netbsd")] impl FileAttr { pub fn modified(&self) -> io::Result<SystemTime> { - Ok(SystemTime::from(libc::timespec { - tv_sec: self.stat.st_mtime as libc::time_t, - tv_nsec: self.stat.st_mtimensec as libc::c_long, - })) + Ok(SystemTime::new(self.stat.st_mtime as i64, self.stat.st_mtimensec as i64)) } pub fn accessed(&self) -> io::Result<SystemTime> { - Ok(SystemTime::from(libc::timespec { - tv_sec: self.stat.st_atime as libc::time_t, - tv_nsec: self.stat.st_atimensec as libc::c_long, - })) + Ok(SystemTime::new(self.stat.st_atime as i64, self.stat.st_atimensec as i64)) } pub fn created(&self) -> io::Result<SystemTime> { - Ok(SystemTime::from(libc::timespec { - tv_sec: self.stat.st_birthtime as libc::time_t, - tv_nsec: self.stat.st_birthtimensec as libc::c_long, - })) + Ok(SystemTime::new(self.stat.st_birthtime as i64, self.stat.st_birthtimensec as i64)) } } @@ -360,34 +397,36 @@ impl FileAttr { impl FileAttr { #[cfg(all(not(target_os = "vxworks"), not(target_os = "espidf")))] pub fn modified(&self) -> io::Result<SystemTime> { - Ok(SystemTime::from(libc::timespec { - tv_sec: self.stat.st_mtime as libc::time_t, - tv_nsec: self.stat.st_mtime_nsec as _, - })) + #[cfg(target_pointer_width = "32")] + cfg_has_statx! { + if let Some(mtime) = self.stx_mtime() { + return Ok(SystemTime::new(mtime.tv_sec, mtime.tv_nsec as i64)); + } + } + + Ok(SystemTime::new(self.stat.st_mtime as i64, self.stat.st_mtime_nsec as i64)) } #[cfg(any(target_os = "vxworks", target_os = "espidf"))] pub fn modified(&self) -> io::Result<SystemTime> { - Ok(SystemTime::from(libc::timespec { - tv_sec: self.stat.st_mtime as libc::time_t, - tv_nsec: 0, - })) + Ok(SystemTime::new(self.stat.st_mtime as i64, 0)) } #[cfg(all(not(target_os = "vxworks"), not(target_os = "espidf")))] pub fn accessed(&self) -> io::Result<SystemTime> { - Ok(SystemTime::from(libc::timespec { - tv_sec: self.stat.st_atime as libc::time_t, - tv_nsec: self.stat.st_atime_nsec as _, - })) + #[cfg(target_pointer_width = "32")] + cfg_has_statx! { + if let Some(atime) = self.stx_atime() { + return Ok(SystemTime::new(atime.tv_sec, atime.tv_nsec as i64)); + } + } + + Ok(SystemTime::new(self.stat.st_atime as i64, self.stat.st_atime_nsec as i64)) } #[cfg(any(target_os = "vxworks", target_os = "espidf"))] pub fn accessed(&self) -> io::Result<SystemTime> { - Ok(SystemTime::from(libc::timespec { - tv_sec: self.stat.st_atime as libc::time_t, - tv_nsec: 0, - })) + Ok(SystemTime::new(self.stat.st_atime as i64, 0)) } #[cfg(any( @@ -397,10 +436,7 @@ impl FileAttr { target_os = "ios" ))] pub fn created(&self) -> io::Result<SystemTime> { - Ok(SystemTime::from(libc::timespec { - tv_sec: self.stat.st_birthtime as libc::time_t, - tv_nsec: self.stat.st_birthtime_nsec as libc::c_long, - })) + Ok(SystemTime::new(self.stat.st_birthtime as i64, self.stat.st_birthtime_nsec as i64)) } #[cfg(not(any( @@ -413,10 +449,7 @@ impl FileAttr { cfg_has_statx! { if let Some(ext) = &self.statx_extra_fields { return if (ext.stx_mask & libc::STATX_BTIME) != 0 { - Ok(SystemTime::from(libc::timespec { - tv_sec: ext.stx_btime.tv_sec as libc::time_t, - tv_nsec: ext.stx_btime.tv_nsec as _, - })) + Ok(SystemTime::new(ext.stx_btime.tv_sec, ext.stx_btime.tv_nsec as i64)) } else { Err(io::const_io_error!( io::ErrorKind::Uncategorized, @@ -966,7 +999,7 @@ impl File { SeekFrom::End(off) => (libc::SEEK_END, off), SeekFrom::Current(off) => (libc::SEEK_CUR, off), }; - let n = cvt(unsafe { lseek64(self.as_raw_fd(), pos, whence) })?; + let n = cvt(unsafe { lseek64(self.as_raw_fd(), pos as off64_t, whence) })?; Ok(n as u64) } @@ -1647,8 +1680,9 @@ mod remove_dir_impl { fn remove_dir_all_recursive(parent_fd: Option<RawFd>, path: &CStr) -> io::Result<()> { // try opening as directory let fd = match openat_nofollow_dironly(parent_fd, &path) { - Err(err) if err.raw_os_error() == Some(libc::ENOTDIR) => { + Err(err) if matches!(err.raw_os_error(), Some(libc::ENOTDIR | libc::ELOOP)) => { // not a directory - don't traverse further + // (for symlinks, older Linux kernels may return ELOOP instead of ENOTDIR) return match parent_fd { // unlink... Some(parent_fd) => { diff --git a/library/std/src/sys/unix/futex.rs b/library/std/src/sys/unix/futex.rs index 62760373a6a..c1966d67078 100644 --- a/library/std/src/sys/unix/futex.rs +++ b/library/std/src/sys/unix/futex.rs @@ -1,7 +1,10 @@ #![cfg(any( target_os = "linux", target_os = "android", - all(target_os = "emscripten", target_feature = "atomics") + all(target_os = "emscripten", target_feature = "atomics"), + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", ))] use crate::sync::atomic::AtomicU32; @@ -12,7 +15,7 @@ use crate::time::Duration; /// Returns directly if the futex doesn't hold the expected value. /// /// Returns false on timeout, and true in all other cases. -#[cfg(any(target_os = "linux", target_os = "android"))] +#[cfg(any(target_os = "linux", target_os = "android", target_os = "freebsd"))] pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool { use super::time::Timespec; use crate::ptr::null; @@ -21,8 +24,9 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) - // Calculate the timeout as an absolute timespec. // // Overflows are rounded up to an infinite timeout (None). - let timespec = - timeout.and_then(|d| Some(Timespec::now(libc::CLOCK_MONOTONIC).checked_add_duration(&d)?)); + let timespec = timeout + .and_then(|d| Some(Timespec::now(libc::CLOCK_MONOTONIC).checked_add_duration(&d)?)) + .and_then(|t| t.to_timespec()); loop { // No need to wait if the value already changed. @@ -30,18 +34,43 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) - return true; } - // Use FUTEX_WAIT_BITSET rather than FUTEX_WAIT to be able to give an - // absolute time rather than a relative time. let r = unsafe { - libc::syscall( - libc::SYS_futex, - futex as *const AtomicU32, - libc::FUTEX_WAIT_BITSET | libc::FUTEX_PRIVATE_FLAG, - expected, - timespec.as_ref().map_or(null(), |t| &t.t as *const libc::timespec), - null::<u32>(), // This argument is unused for FUTEX_WAIT_BITSET. - !0u32, // A full bitmask, to make it behave like a regular FUTEX_WAIT. - ) + cfg_if::cfg_if! { + if #[cfg(target_os = "freebsd")] { + // FreeBSD doesn't have futex(), but it has + // _umtx_op(UMTX_OP_WAIT_UINT_PRIVATE), which is nearly + // identical. It supports absolute timeouts through a flag + // in the _umtx_time struct. + let umtx_timeout = timespec.map(|t| libc::_umtx_time { + _timeout: t, + _flags: libc::UMTX_ABSTIME, + _clockid: libc::CLOCK_MONOTONIC as u32, + }); + let umtx_timeout_ptr = umtx_timeout.as_ref().map_or(null(), |t| t as *const _); + let umtx_timeout_size = umtx_timeout.as_ref().map_or(0, |t| crate::mem::size_of_val(t)); + libc::_umtx_op( + futex as *const AtomicU32 as *mut _, + libc::UMTX_OP_WAIT_UINT_PRIVATE, + expected as libc::c_ulong, + crate::ptr::invalid_mut(umtx_timeout_size), + umtx_timeout_ptr as *mut _, + ) + } else if #[cfg(any(target_os = "linux", target_os = "android"))] { + // Use FUTEX_WAIT_BITSET rather than FUTEX_WAIT to be able to give an + // absolute time rather than a relative time. + libc::syscall( + libc::SYS_futex, + futex as *const AtomicU32, + libc::FUTEX_WAIT_BITSET | libc::FUTEX_PRIVATE_FLAG, + expected, + timespec.as_ref().map_or(null(), |t| t as *const libc::timespec), + null::<u32>(), // This argument is unused for FUTEX_WAIT_BITSET. + !0u32, // A full bitmask, to make it behave like a regular FUTEX_WAIT. + ) + } else { + compile_error!("unknown target_os"); + } + } }; match (r < 0).then(super::os::errno) { @@ -52,59 +81,164 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) - } } -#[cfg(target_os = "emscripten")] -pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) { - extern "C" { - fn emscripten_futex_wait( - addr: *const AtomicU32, - val: libc::c_uint, - max_wait_ms: libc::c_double, - ) -> libc::c_int; - } - - unsafe { - emscripten_futex_wait( - futex, - expected, - timeout.map_or(crate::f64::INFINITY, |d| d.as_secs_f64() * 1000.0), - ); - } -} - /// Wake up one thread that's blocked on futex_wait on this futex. /// /// Returns true if this actually woke up such a thread, /// or false if no thread was waiting on this futex. +/// +/// On some platforms, this always returns false. #[cfg(any(target_os = "linux", target_os = "android"))] pub fn futex_wake(futex: &AtomicU32) -> bool { + let ptr = futex as *const AtomicU32; + let op = libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG; + unsafe { libc::syscall(libc::SYS_futex, ptr, op, 1) > 0 } +} + +/// Wake up all threads that are waiting on futex_wait on this futex. +#[cfg(any(target_os = "linux", target_os = "android"))] +pub fn futex_wake_all(futex: &AtomicU32) { + let ptr = futex as *const AtomicU32; + let op = libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG; + unsafe { + libc::syscall(libc::SYS_futex, ptr, op, i32::MAX); + } +} + +// FreeBSD doesn't tell us how many threads are woken up, so this always returns false. +#[cfg(target_os = "freebsd")] +pub fn futex_wake(futex: &AtomicU32) -> bool { + use crate::ptr::null_mut; unsafe { - libc::syscall( - libc::SYS_futex, - futex as *const AtomicU32, - libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG, + libc::_umtx_op( + futex as *const AtomicU32 as *mut _, + libc::UMTX_OP_WAKE_PRIVATE, 1, - ) > 0 + null_mut(), + null_mut(), + ) + }; + false +} + +#[cfg(target_os = "freebsd")] +pub fn futex_wake_all(futex: &AtomicU32) { + use crate::ptr::null_mut; + unsafe { + libc::_umtx_op( + futex as *const AtomicU32 as *mut _, + libc::UMTX_OP_WAKE_PRIVATE, + i32::MAX as libc::c_ulong, + null_mut(), + null_mut(), + ) + }; +} + +#[cfg(target_os = "openbsd")] +pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool { + use crate::convert::TryInto; + use crate::ptr::{null, null_mut}; + let timespec = timeout.and_then(|d| { + Some(libc::timespec { + // Sleep forever if the timeout is longer than fits in a timespec. + tv_sec: d.as_secs().try_into().ok()?, + // This conversion never truncates, as subsec_nanos is always <1e9. + tv_nsec: d.subsec_nanos() as _, + }) + }); + + let r = unsafe { + libc::futex( + futex as *const AtomicU32 as *mut u32, + libc::FUTEX_WAIT, + expected as i32, + timespec.as_ref().map_or(null(), |t| t as *const libc::timespec), + null_mut(), + ) + }; + + r == 0 || super::os::errno() != libc::ETIMEDOUT +} + +#[cfg(target_os = "openbsd")] +pub fn futex_wake(futex: &AtomicU32) -> bool { + use crate::ptr::{null, null_mut}; + unsafe { + libc::futex(futex as *const AtomicU32 as *mut u32, libc::FUTEX_WAKE, 1, null(), null_mut()) + > 0 } } -/// Wake up all threads that are waiting on futex_wait on this futex. -#[cfg(any(target_os = "linux", target_os = "android"))] +#[cfg(target_os = "openbsd")] pub fn futex_wake_all(futex: &AtomicU32) { + use crate::ptr::{null, null_mut}; unsafe { - libc::syscall( - libc::SYS_futex, - futex as *const AtomicU32, - libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG, + libc::futex( + futex as *const AtomicU32 as *mut u32, + libc::FUTEX_WAKE, i32::MAX, + null(), + null_mut(), ); } } -#[cfg(target_os = "emscripten")] +#[cfg(target_os = "dragonfly")] +pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool { + use crate::convert::TryFrom; + + // A timeout of 0 means infinite. + // We round smaller timeouts up to 1 millisecond. + // Overflows are rounded up to an infinite timeout. + let timeout_ms = + timeout.and_then(|d| Some(i32::try_from(d.as_millis()).ok()?.max(1))).unwrap_or(0); + + let r = unsafe { + libc::umtx_sleep(futex as *const AtomicU32 as *const i32, expected as i32, timeout_ms) + }; + + r == 0 || super::os::errno() != libc::ETIMEDOUT +} + +// DragonflyBSD doesn't tell us how many threads are woken up, so this always returns false. +#[cfg(target_os = "dragonfly")] pub fn futex_wake(futex: &AtomicU32) -> bool { - extern "C" { - fn emscripten_futex_wake(addr: *const AtomicU32, count: libc::c_int) -> libc::c_int; + unsafe { libc::umtx_wakeup(futex as *const AtomicU32 as *const i32, 1) }; + false +} + +#[cfg(target_os = "dragonfly")] +pub fn futex_wake_all(futex: &AtomicU32) { + unsafe { libc::umtx_wakeup(futex as *const AtomicU32 as *const i32, i32::MAX) }; +} + +#[cfg(target_os = "emscripten")] +extern "C" { + fn emscripten_futex_wake(addr: *const AtomicU32, count: libc::c_int) -> libc::c_int; + fn emscripten_futex_wait( + addr: *const AtomicU32, + val: libc::c_uint, + max_wait_ms: libc::c_double, + ) -> libc::c_int; +} + +#[cfg(target_os = "emscripten")] +pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool { + unsafe { + emscripten_futex_wait( + futex, + expected, + timeout.map_or(f64::INFINITY, |d| d.as_secs_f64() * 1000.0), + ) != -libc::ETIMEDOUT } +} +#[cfg(target_os = "emscripten")] +pub fn futex_wake(futex: &AtomicU32) -> bool { unsafe { emscripten_futex_wake(futex, 1) > 0 } } + +#[cfg(target_os = "emscripten")] +pub fn futex_wake_all(futex: &AtomicU32) { + unsafe { emscripten_futex_wake(futex, i32::MAX) }; +} diff --git a/library/std/src/sys/unix/locks/futex.rs b/library/std/src/sys/unix/locks/futex.rs index b166e7c453c..7a63af1ad7c 100644 --- a/library/std/src/sys/unix/locks/futex.rs +++ b/library/std/src/sys/unix/locks/futex.rs @@ -1,6 +1,5 @@ -use crate::cell::UnsafeCell; use crate::sync::atomic::{ - AtomicU32, AtomicUsize, + AtomicU32, Ordering::{Acquire, Relaxed, Release}, }; use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all}; @@ -163,98 +162,3 @@ impl Condvar { r } } - -/// A reentrant mutex. Used by stdout().lock() and friends. -/// -/// The 'owner' field tracks which thread has locked the mutex. -/// -/// We use current_thread_unique_ptr() as the thread identifier, -/// which is just the address of a thread local variable. -/// -/// If `owner` is set to the identifier of the current thread, -/// we assume the mutex is already locked and instead of locking it again, -/// we increment `lock_count`. -/// -/// When unlocking, we decrement `lock_count`, and only unlock the mutex when -/// it reaches zero. -/// -/// `lock_count` is protected by the mutex and only accessed by the thread that has -/// locked the mutex, so needs no synchronization. -/// -/// `owner` can be checked by other threads that want to see if they already -/// hold the lock, so needs to be atomic. If it compares equal, we're on the -/// same thread that holds the mutex and memory access can use relaxed ordering -/// since we're not dealing with multiple threads. If it compares unequal, -/// synchronization is left to the mutex, making relaxed memory ordering for -/// the `owner` field fine in all cases. -pub struct ReentrantMutex { - mutex: Mutex, - owner: AtomicUsize, - lock_count: UnsafeCell<u32>, -} - -unsafe impl Send for ReentrantMutex {} -unsafe impl Sync for ReentrantMutex {} - -impl ReentrantMutex { - #[inline] - pub const unsafe fn uninitialized() -> Self { - Self { mutex: Mutex::new(), owner: AtomicUsize::new(0), lock_count: UnsafeCell::new(0) } - } - - #[inline] - pub unsafe fn init(&self) {} - - #[inline] - pub unsafe fn destroy(&self) {} - - pub unsafe fn try_lock(&self) -> bool { - let this_thread = current_thread_unique_ptr(); - if self.owner.load(Relaxed) == this_thread { - self.increment_lock_count(); - true - } else if self.mutex.try_lock() { - self.owner.store(this_thread, Relaxed); - debug_assert_eq!(*self.lock_count.get(), 0); - *self.lock_count.get() = 1; - true - } else { - false - } - } - - pub unsafe fn lock(&self) { - let this_thread = current_thread_unique_ptr(); - if self.owner.load(Relaxed) == this_thread { - self.increment_lock_count(); - } else { - self.mutex.lock(); - self.owner.store(this_thread, Relaxed); - debug_assert_eq!(*self.lock_count.get(), 0); - *self.lock_count.get() = 1; - } - } - - unsafe fn increment_lock_count(&self) { - *self.lock_count.get() = (*self.lock_count.get()) - .checked_add(1) - .expect("lock count overflow in reentrant mutex"); - } - - pub unsafe fn unlock(&self) { - *self.lock_count.get() -= 1; - if *self.lock_count.get() == 0 { - self.owner.store(0, Relaxed); - self.mutex.unlock(); - } - } -} - -/// Get an address that is unique per running thread. -/// -/// This can be used as a non-null usize-sized ID. -pub fn current_thread_unique_ptr() -> usize { - // Use a non-drop type to make sure it's still available during thread destruction. - thread_local! { static X: u8 = const { 0 } } - X.with(|x| <*const _>::addr(x)) -} diff --git a/library/std/src/sys/unix/locks/futex_rwlock.rs b/library/std/src/sys/unix/locks/futex_rwlock.rs index e42edb25858..57f6d58840d 100644 --- a/library/std/src/sys/unix/locks/futex_rwlock.rs +++ b/library/std/src/sys/unix/locks/futex_rwlock.rs @@ -284,6 +284,10 @@ impl RwLock { fn wake_writer(&self) -> bool { self.writer_notify.fetch_add(1, Release); futex_wake(&self.writer_notify) + // Note that FreeBSD and DragonFlyBSD don't tell us whether they woke + // up any threads or not, and always return `false` here. That still + // results in correct behaviour: it just means readers get woken up as + // well in case both readers and writers were waiting. } /// Spin for a while, but stop directly at the given condition. diff --git a/library/std/src/sys/unix/locks/mod.rs b/library/std/src/sys/unix/locks/mod.rs index e0404f40c69..04c5c489fc9 100644 --- a/library/std/src/sys/unix/locks/mod.rs +++ b/library/std/src/sys/unix/locks/mod.rs @@ -2,18 +2,20 @@ cfg_if::cfg_if! { if #[cfg(any( target_os = "linux", target_os = "android", + all(target_os = "emscripten", target_feature = "atomics"), + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", ))] { mod futex; mod futex_rwlock; - pub use futex::{Mutex, MovableMutex, Condvar, MovableCondvar, ReentrantMutex}; + pub use futex::{Mutex, MovableMutex, Condvar, MovableCondvar}; pub use futex_rwlock::{RwLock, MovableRwLock}; } else { mod pthread_mutex; - mod pthread_remutex; mod pthread_rwlock; mod pthread_condvar; pub use pthread_mutex::{Mutex, MovableMutex}; - pub use pthread_remutex::ReentrantMutex; pub use pthread_rwlock::{RwLock, MovableRwLock}; pub use pthread_condvar::{Condvar, MovableCondvar}; } diff --git a/library/std/src/sys/unix/locks/pthread_remutex.rs b/library/std/src/sys/unix/locks/pthread_remutex.rs deleted file mode 100644 index b006181ee3a..00000000000 --- a/library/std/src/sys/unix/locks/pthread_remutex.rs +++ /dev/null @@ -1,46 +0,0 @@ -use super::pthread_mutex::PthreadMutexAttr; -use crate::cell::UnsafeCell; -use crate::mem::MaybeUninit; -use crate::sys::cvt_nz; - -pub struct ReentrantMutex { - inner: UnsafeCell<libc::pthread_mutex_t>, -} - -unsafe impl Send for ReentrantMutex {} -unsafe impl Sync for ReentrantMutex {} - -impl ReentrantMutex { - pub const unsafe fn uninitialized() -> ReentrantMutex { - ReentrantMutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) } - } - - pub unsafe fn init(&self) { - let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit(); - cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap(); - let attr = PthreadMutexAttr(&mut attr); - cvt_nz(libc::pthread_mutexattr_settype(attr.0.as_mut_ptr(), libc::PTHREAD_MUTEX_RECURSIVE)) - .unwrap(); - cvt_nz(libc::pthread_mutex_init(self.inner.get(), attr.0.as_ptr())).unwrap(); - } - - pub unsafe fn lock(&self) { - let result = libc::pthread_mutex_lock(self.inner.get()); - debug_assert_eq!(result, 0); - } - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - libc::pthread_mutex_trylock(self.inner.get()) == 0 - } - - pub unsafe fn unlock(&self) { - let result = libc::pthread_mutex_unlock(self.inner.get()); - debug_assert_eq!(result, 0); - } - - pub unsafe fn destroy(&self) { - let result = libc::pthread_mutex_destroy(self.inner.get()); - debug_assert_eq!(result, 0); - } -} diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs index aedeb02e656..8e909aab7f0 100644 --- a/library/std/src/sys/unix/mod.rs +++ b/library/std/src/sys/unix/mod.rs @@ -39,6 +39,7 @@ pub mod stdio; pub mod thread; pub mod thread_local_dtor; pub mod thread_local_key; +pub mod thread_parker; pub mod time; #[cfg(target_os = "espidf")] diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs index 1be733ba106..92bea9346d8 100644 --- a/library/std/src/sys/unix/os.rs +++ b/library/std/src/sys/unix/os.rs @@ -427,7 +427,7 @@ pub fn current_exe() -> io::Result<PathBuf> { crate::fs::read_to_string("sys:exe").map(PathBuf::from) } -#[cfg(any(target_os = "fuchsia", target_os = "l4re"))] +#[cfg(target_os = "l4re")] pub fn current_exe() -> io::Result<PathBuf> { use crate::io::ErrorKind; Err(io::const_io_error!(ErrorKind::Unsupported, "Not yet implemented!")) @@ -451,6 +451,26 @@ pub fn current_exe() -> io::Result<PathBuf> { super::unsupported::unsupported() } +#[cfg(target_os = "fuchsia")] +pub fn current_exe() -> io::Result<PathBuf> { + use crate::io::ErrorKind; + + #[cfg(test)] + use realstd::env; + + #[cfg(not(test))] + use crate::env; + + let exe_path = env::args().next().ok_or(io::const_io_error!( + ErrorKind::Uncategorized, + "an executable path was not found because no arguments were provided through argv" + ))?; + let path = PathBuf::from(exe_path); + + // Prepend the current working directory to the path if it's not absolute. + if !path.is_absolute() { getcwd().map(|cwd| cwd.join(path)) } else { Ok(path) } +} + pub struct Env { iter: vec::IntoIter<(OsString, OsString)>, } diff --git a/library/std/src/sys/unix/process/process_common.rs b/library/std/src/sys/unix/process/process_common.rs index 27bee714f5b..bca1b65a7fc 100644 --- a/library/std/src/sys/unix/process/process_common.rs +++ b/library/std/src/sys/unix/process/process_common.rs @@ -35,7 +35,8 @@ cfg_if::cfg_if! { // Android with api less than 21 define sig* functions inline, so it is not // available for dynamic link. Implementing sigemptyset and sigaddset allow us // to support older Android version (independent of libc version). -// The following implementations are based on https://git.io/vSkNf +// The following implementations are based on +// https://github.com/aosp-mirror/platform_bionic/blob/ad8dcd6023294b646e5a8288c0ed431b0845da49/libc/include/android/legacy_signal_inlines.h cfg_if::cfg_if! { if #[cfg(target_os = "android")] { pub unsafe fn sigemptyset(set: *mut libc::sigset_t) -> libc::c_int { diff --git a/library/std/src/sys/unix/process/process_common/tests.rs b/library/std/src/sys/unix/process/process_common/tests.rs index 9f1a645372f..1956b3692a7 100644 --- a/library/std/src/sys/unix/process/process_common/tests.rs +++ b/library/std/src/sys/unix/process/process_common/tests.rs @@ -3,7 +3,7 @@ use super::*; use crate::ffi::OsStr; use crate::mem; use crate::ptr; -use crate::sys::cvt; +use crate::sys::{cvt, cvt_nz}; macro_rules! t { ($e:expr) => { @@ -39,7 +39,7 @@ fn test_process_mask() { let mut old_set = mem::MaybeUninit::<libc::sigset_t>::uninit(); t!(cvt(sigemptyset(set.as_mut_ptr()))); t!(cvt(sigaddset(set.as_mut_ptr(), libc::SIGINT))); - t!(cvt(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), old_set.as_mut_ptr()))); + t!(cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), old_set.as_mut_ptr()))); cmd.stdin(Stdio::MakePipe); cmd.stdout(Stdio::MakePipe); @@ -48,7 +48,7 @@ fn test_process_mask() { let stdin_write = pipes.stdin.take().unwrap(); let stdout_read = pipes.stdout.take().unwrap(); - t!(cvt(libc::pthread_sigmask(libc::SIG_SETMASK, old_set.as_ptr(), ptr::null_mut()))); + t!(cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, old_set.as_ptr(), ptr::null_mut()))); t!(cvt(libc::kill(cat.id() as libc::pid_t, libc::SIGINT))); // We need to wait until SIGINT is definitely delivered. The diff --git a/library/std/src/sys/unix/process/process_unix.rs b/library/std/src/sys/unix/process/process_unix.rs index 3d305cd7310..d48faaa88fb 100644 --- a/library/std/src/sys/unix/process/process_unix.rs +++ b/library/std/src/sys/unix/process/process_unix.rs @@ -328,6 +328,7 @@ impl Command { #[cfg(not(target_os = "emscripten"))] { use crate::mem::MaybeUninit; + use crate::sys::cvt_nz; // Reset signal handling so the child process starts in a // standardized state. libstd ignores SIGPIPE, and signal-handling // libraries often set a mask. Child processes inherit ignored @@ -337,7 +338,7 @@ impl Command { // we're about to run. let mut set = MaybeUninit::<libc::sigset_t>::uninit(); cvt(sigemptyset(set.as_mut_ptr()))?; - cvt(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), ptr::null_mut()))?; + cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), ptr::null_mut()))?; #[cfg(target_os = "android")] // see issue #88585 { diff --git a/library/std/src/sys/unix/thread_parker.rs b/library/std/src/sys/unix/thread_parker.rs new file mode 100644 index 00000000000..30ed2ec7f54 --- /dev/null +++ b/library/std/src/sys/unix/thread_parker.rs @@ -0,0 +1,269 @@ +//! Thread parking without `futex` using the `pthread` synchronization primitives. + +#![cfg(not(any( + target_os = "linux", + target_os = "android", + all(target_os = "emscripten", target_feature = "atomics"), + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", +)))] + +use crate::cell::UnsafeCell; +use crate::marker::PhantomPinned; +use crate::pin::Pin; +use crate::ptr::addr_of_mut; +use crate::sync::atomic::AtomicUsize; +use crate::sync::atomic::Ordering::SeqCst; +use crate::time::Duration; + +const EMPTY: usize = 0; +const PARKED: usize = 1; +const NOTIFIED: usize = 2; + +unsafe fn lock(lock: *mut libc::pthread_mutex_t) { + let r = libc::pthread_mutex_lock(lock); + debug_assert_eq!(r, 0); +} + +unsafe fn unlock(lock: *mut libc::pthread_mutex_t) { + let r = libc::pthread_mutex_unlock(lock); + debug_assert_eq!(r, 0); +} + +unsafe fn notify_one(cond: *mut libc::pthread_cond_t) { + let r = libc::pthread_cond_signal(cond); + debug_assert_eq!(r, 0); +} + +unsafe fn wait(cond: *mut libc::pthread_cond_t, lock: *mut libc::pthread_mutex_t) { + let r = libc::pthread_cond_wait(cond, lock); + debug_assert_eq!(r, 0); +} + +const TIMESPEC_MAX: libc::timespec = + libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 }; + +unsafe fn wait_timeout( + cond: *mut libc::pthread_cond_t, + lock: *mut libc::pthread_mutex_t, + dur: Duration, +) { + // Use the system clock on systems that do not support pthread_condattr_setclock. + // This unfortunately results in problems when the system time changes. + #[cfg(any(target_os = "macos", target_os = "ios", target_os = "espidf"))] + let (now, dur) = { + use super::time::SystemTime; + use crate::cmp::min; + + // OSX implementation of `pthread_cond_timedwait` is buggy + // with super long durations. When duration is greater than + // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait` + // in macOS Sierra return error 316. + // + // This program demonstrates the issue: + // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c + // + // To work around this issue, and possible bugs of other OSes, timeout + // is clamped to 1000 years, which is allowable per the API of `park_timeout` + // because of spurious wakeups. + let dur = min(dur, Duration::from_secs(1000 * 365 * 86400)); + let now = SystemTime::now().t; + (now, dur) + }; + // Use the monotonic clock on other systems. + #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "espidf")))] + let (now, dur) = { + use super::time::Timespec; + + (Timespec::now(libc::CLOCK_MONOTONIC), dur) + }; + + let timeout = + now.checked_add_duration(&dur).and_then(|t| t.to_timespec()).unwrap_or(TIMESPEC_MAX); + let r = libc::pthread_cond_timedwait(cond, lock, &timeout); + debug_assert!(r == libc::ETIMEDOUT || r == 0); +} + +pub struct Parker { + state: AtomicUsize, + lock: UnsafeCell<libc::pthread_mutex_t>, + cvar: UnsafeCell<libc::pthread_cond_t>, + // The `pthread` primitives require a stable address, so make this struct `!Unpin`. + _pinned: PhantomPinned, +} + +impl Parker { + /// Construct the UNIX parker in-place. + /// + /// # Safety + /// The constructed parker must never be moved. + pub unsafe fn new(parker: *mut Parker) { + // Use the default mutex implementation to allow for simpler initialization. + // This could lead to undefined behaviour when deadlocking. This is avoided + // by not deadlocking. Note in particular the unlocking operation before any + // panic, as code after the panic could try to park again. + addr_of_mut!((*parker).state).write(AtomicUsize::new(EMPTY)); + addr_of_mut!((*parker).lock).write(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)); + + cfg_if::cfg_if! { + if #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "l4re", + target_os = "android", + target_os = "redox" + ))] { + addr_of_mut!((*parker).cvar).write(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER)); + } else if #[cfg(target_os = "espidf")] { + let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), crate::ptr::null()); + assert_eq!(r, 0); + } else { + use crate::mem::MaybeUninit; + let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit(); + let r = libc::pthread_condattr_init(attr.as_mut_ptr()); + assert_eq!(r, 0); + let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC); + assert_eq!(r, 0); + let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), attr.as_ptr()); + assert_eq!(r, 0); + let r = libc::pthread_condattr_destroy(attr.as_mut_ptr()); + assert_eq!(r, 0); + } + } + } + + // This implementation doesn't require `unsafe`, but other implementations + // may assume this is only called by the thread that owns the Parker. + pub unsafe fn park(self: Pin<&Self>) { + // If we were previously notified then we consume this notification and + // return quickly. + if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { + return; + } + + // Otherwise we need to coordinate going to sleep + lock(self.lock.get()); + match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + Ok(_) => {} + Err(NOTIFIED) => { + // We must read here, even though we know it will be `NOTIFIED`. + // This is because `unpark` may have been called again since we read + // `NOTIFIED` in the `compare_exchange` above. We must perform an + // acquire operation that synchronizes with that `unpark` to observe + // any writes it made before the call to unpark. To do that we must + // read from the write it made to `state`. + let old = self.state.swap(EMPTY, SeqCst); + + unlock(self.lock.get()); + + assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + return; + } // should consume this notification, so prohibit spurious wakeups in next park. + Err(_) => { + unlock(self.lock.get()); + + panic!("inconsistent park state") + } + } + + loop { + wait(self.cvar.get(), self.lock.get()); + + match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) { + Ok(_) => break, // got a notification + Err(_) => {} // spurious wakeup, go back to sleep + } + } + + unlock(self.lock.get()); + } + + // This implementation doesn't require `unsafe`, but other implementations + // may assume this is only called by the thread that owns the Parker. Use + // `Pin` to guarantee a stable address for the mutex and condition variable. + pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { + // Like `park` above we have a fast path for an already-notified thread, and + // afterwards we start coordinating for a sleep. + // return quickly. + if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { + return; + } + + lock(self.lock.get()); + match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + Ok(_) => {} + Err(NOTIFIED) => { + // We must read again here, see `park`. + let old = self.state.swap(EMPTY, SeqCst); + unlock(self.lock.get()); + + assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + return; + } // should consume this notification, so prohibit spurious wakeups in next park. + Err(_) => { + unlock(self.lock.get()); + panic!("inconsistent park_timeout state") + } + } + + // Wait with a timeout, and if we spuriously wake up or otherwise wake up + // from a notification we just want to unconditionally set the state back to + // empty, either consuming a notification or un-flagging ourselves as + // parked. + wait_timeout(self.cvar.get(), self.lock.get(), dur); + + match self.state.swap(EMPTY, SeqCst) { + NOTIFIED => unlock(self.lock.get()), // got a notification, hurray! + PARKED => unlock(self.lock.get()), // no notification, alas + n => { + unlock(self.lock.get()); + panic!("inconsistent park_timeout state: {n}") + } + } + } + + pub fn unpark(self: Pin<&Self>) { + // To ensure the unparked thread will observe any writes we made + // before this call, we must perform a release operation that `park` + // can synchronize with. To do that we must write `NOTIFIED` even if + // `state` is already `NOTIFIED`. That is why this must be a swap + // rather than a compare-and-swap that returns if it reads `NOTIFIED` + // on failure. + match self.state.swap(NOTIFIED, SeqCst) { + EMPTY => return, // no one was waiting + NOTIFIED => return, // already unparked + PARKED => {} // gotta go wake someone up + _ => panic!("inconsistent state in unpark"), + } + + // There is a period between when the parked thread sets `state` to + // `PARKED` (or last checked `state` in the case of a spurious wake + // up) and when it actually waits on `cvar`. If we were to notify + // during this period it would be ignored and then when the parked + // thread went to sleep it would never wake up. Fortunately, it has + // `lock` locked at this stage so we can acquire `lock` to wait until + // it is ready to receive the notification. + // + // Releasing `lock` before the call to `notify_one` means that when the + // parked thread wakes it doesn't get woken only to have to wait for us + // to release `lock`. + unsafe { + lock(self.lock.get()); + unlock(self.lock.get()); + notify_one(self.cvar.get()); + } + } +} + +impl Drop for Parker { + fn drop(&mut self) { + unsafe { + libc::pthread_cond_destroy(self.cvar.get_mut()); + libc::pthread_mutex_destroy(self.lock.get_mut()); + } + } +} + +unsafe impl Sync for Parker {} +unsafe impl Send for Parker {} diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs index 498c94d0cdc..ac8355188bb 100644 --- a/library/std/src/sys/unix/time.rs +++ b/library/std/src/sys/unix/time.rs @@ -1,21 +1,63 @@ -use crate::cmp::Ordering; +use crate::fmt; use crate::time::Duration; -use core::hash::{Hash, Hasher}; - -pub use self::inner::{Instant, SystemTime, UNIX_EPOCH}; +pub use self::inner::Instant; use crate::convert::TryInto; const NSEC_PER_SEC: u64 = 1_000_000_000; +pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() }; + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct SystemTime { + pub(in crate::sys::unix) t: Timespec, +} -#[derive(Copy, Clone)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub(in crate::sys::unix) struct Timespec { - pub t: libc::timespec, + tv_sec: i64, + tv_nsec: i64, +} + +impl SystemTime { + pub fn new(tv_sec: i64, tv_nsec: i64) -> SystemTime { + SystemTime { t: Timespec::new(tv_sec, tv_nsec) } + } + + pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { + self.t.sub_timespec(&other.t) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime { t: self.t.checked_add_duration(other)? }) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime { t: self.t.checked_sub_duration(other)? }) + } +} + +impl From<libc::timespec> for SystemTime { + fn from(t: libc::timespec) -> SystemTime { + SystemTime { t: Timespec::from(t) } + } +} + +impl fmt::Debug for SystemTime { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SystemTime") + .field("tv_sec", &self.t.tv_sec) + .field("tv_nsec", &self.t.tv_nsec) + .finish() + } } impl Timespec { const fn zero() -> Timespec { - Timespec { t: libc::timespec { tv_sec: 0, tv_nsec: 0 } } + Timespec { tv_sec: 0, tv_nsec: 0 } + } + + fn new(tv_sec: i64, tv_nsec: i64) -> Timespec { + Timespec { tv_sec, tv_nsec } } pub fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> { @@ -23,22 +65,22 @@ impl Timespec { // NOTE(eddyb) two aspects of this `if`-`else` are required for LLVM // to optimize it into a branchless form (see also #75545): // - // 1. `self.t.tv_sec - other.t.tv_sec` shows up as a common expression + // 1. `self.tv_sec - other.tv_sec` shows up as a common expression // in both branches, i.e. the `else` must have its `- 1` // subtraction after the common one, not interleaved with it - // (it used to be `self.t.tv_sec - 1 - other.t.tv_sec`) + // (it used to be `self.tv_sec - 1 - other.tv_sec`) // // 2. the `Duration::new` call (or any other additional complexity) // is outside of the `if`-`else`, not duplicated in both branches // // Ideally this code could be rearranged such that it more // directly expresses the lower-cost behavior we want from it. - let (secs, nsec) = if self.t.tv_nsec >= other.t.tv_nsec { - ((self.t.tv_sec - other.t.tv_sec) as u64, (self.t.tv_nsec - other.t.tv_nsec) as u32) + let (secs, nsec) = if self.tv_nsec >= other.tv_nsec { + ((self.tv_sec - other.tv_sec) as u64, (self.tv_nsec - other.tv_nsec) as u32) } else { ( - (self.t.tv_sec - other.t.tv_sec - 1) as u64, - self.t.tv_nsec as u32 + (NSEC_PER_SEC as u32) - other.t.tv_nsec as u32, + (self.tv_sec - other.tv_sec - 1) as u64, + self.tv_nsec as u32 + (NSEC_PER_SEC as u32) - other.tv_nsec as u32, ) }; @@ -54,89 +96,65 @@ impl Timespec { pub fn checked_add_duration(&self, other: &Duration) -> Option<Timespec> { let mut secs = other .as_secs() - .try_into() // <- target type would be `libc::time_t` + .try_into() // <- target type would be `i64` .ok() - .and_then(|secs| self.t.tv_sec.checked_add(secs))?; + .and_then(|secs| self.tv_sec.checked_add(secs))?; // Nano calculations can't overflow because nanos are <1B which fit // in a u32. - let mut nsec = other.subsec_nanos() + self.t.tv_nsec as u32; + let mut nsec = other.subsec_nanos() + self.tv_nsec as u32; if nsec >= NSEC_PER_SEC as u32 { nsec -= NSEC_PER_SEC as u32; secs = secs.checked_add(1)?; } - Some(Timespec { t: libc::timespec { tv_sec: secs, tv_nsec: nsec as _ } }) + Some(Timespec::new(secs, nsec as i64)) } pub fn checked_sub_duration(&self, other: &Duration) -> Option<Timespec> { let mut secs = other .as_secs() - .try_into() // <- target type would be `libc::time_t` + .try_into() // <- target type would be `i64` .ok() - .and_then(|secs| self.t.tv_sec.checked_sub(secs))?; + .and_then(|secs| self.tv_sec.checked_sub(secs))?; // Similar to above, nanos can't overflow. - let mut nsec = self.t.tv_nsec as i32 - other.subsec_nanos() as i32; + let mut nsec = self.tv_nsec as i32 - other.subsec_nanos() as i32; if nsec < 0 { nsec += NSEC_PER_SEC as i32; secs = secs.checked_sub(1)?; } - Some(Timespec { t: libc::timespec { tv_sec: secs, tv_nsec: nsec as _ } }) + Some(Timespec::new(secs, nsec as i64)) } -} -impl PartialEq for Timespec { - fn eq(&self, other: &Timespec) -> bool { - self.t.tv_sec == other.t.tv_sec && self.t.tv_nsec == other.t.tv_nsec + pub fn to_timespec(&self) -> Option<libc::timespec> { + use crate::convert::TryInto; + Some(libc::timespec { + tv_sec: self.tv_sec.try_into().ok()?, + tv_nsec: self.tv_nsec.try_into().ok()?, + }) } } -impl Eq for Timespec {} - -impl PartialOrd for Timespec { - fn partial_cmp(&self, other: &Timespec) -> Option<Ordering> { - Some(self.cmp(other)) - } -} - -impl Ord for Timespec { - fn cmp(&self, other: &Timespec) -> Ordering { - let me = (self.t.tv_sec, self.t.tv_nsec); - let other = (other.t.tv_sec, other.t.tv_nsec); - me.cmp(&other) - } -} - -impl Hash for Timespec { - fn hash<H: Hasher>(&self, state: &mut H) { - self.t.tv_sec.hash(state); - self.t.tv_nsec.hash(state); +impl From<libc::timespec> for Timespec { + fn from(t: libc::timespec) -> Timespec { + Timespec::new(t.tv_sec as i64, t.tv_nsec as i64) } } #[cfg(any(target_os = "macos", target_os = "ios"))] mod inner { - use crate::fmt; use crate::sync::atomic::{AtomicU64, Ordering}; use crate::sys::cvt; use crate::sys_common::mul_div_u64; use crate::time::Duration; - use super::Timespec; - use super::NSEC_PER_SEC; + use super::{SystemTime, Timespec, NSEC_PER_SEC}; #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] pub struct Instant { t: u64, } - #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] - pub struct SystemTime { - t: Timespec, - } - - pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() }; - #[repr(C)] #[derive(Copy, Clone)] struct mach_timebase_info { @@ -178,41 +196,17 @@ mod inner { cvt(unsafe { libc::gettimeofday(&mut s, ptr::null_mut()) }).unwrap(); return SystemTime::from(s); } + } - pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { - self.t.sub_timespec(&other.t) - } - - pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { - Some(SystemTime { t: self.t.checked_add_duration(other)? }) - } - - pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { - Some(SystemTime { t: self.t.checked_sub_duration(other)? }) + impl From<libc::timeval> for Timespec { + fn from(t: libc::timeval) -> Timespec { + Timespec::new(t.tv_sec as i64, 1000 * t.tv_usec as i64) } } impl From<libc::timeval> for SystemTime { fn from(t: libc::timeval) -> SystemTime { - SystemTime::from(libc::timespec { - tv_sec: t.tv_sec, - tv_nsec: (t.tv_usec * 1000) as libc::c_long, - }) - } - } - - impl From<libc::timespec> for SystemTime { - fn from(t: libc::timespec) -> SystemTime { - SystemTime { t: Timespec { t } } - } - } - - impl fmt::Debug for SystemTime { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SystemTime") - .field("tv_sec", &self.t.t.tv_sec) - .field("tv_nsec", &self.t.t.tv_nsec) - .finish() + SystemTime { t: Timespec::from(t) } } } @@ -270,20 +264,13 @@ mod inner { use crate::sys::cvt; use crate::time::Duration; - use super::Timespec; + use super::{SystemTime, Timespec}; #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Instant { t: Timespec, } - #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] - pub struct SystemTime { - t: Timespec, - } - - pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() }; - impl Instant { pub fn now() -> Instant { Instant { t: Timespec::now(libc::CLOCK_MONOTONIC) } @@ -305,8 +292,8 @@ mod inner { impl fmt::Debug for Instant { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Instant") - .field("tv_sec", &self.t.t.tv_sec) - .field("tv_nsec", &self.t.t.tv_nsec) + .field("tv_sec", &self.t.tv_sec) + .field("tv_nsec", &self.t.tv_nsec) .finish() } } @@ -315,33 +302,6 @@ mod inner { pub fn now() -> SystemTime { SystemTime { t: Timespec::now(libc::CLOCK_REALTIME) } } - - pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { - self.t.sub_timespec(&other.t) - } - - pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { - Some(SystemTime { t: self.t.checked_add_duration(other)? }) - } - - pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { - Some(SystemTime { t: self.t.checked_sub_duration(other)? }) - } - } - - impl From<libc::timespec> for SystemTime { - fn from(t: libc::timespec) -> SystemTime { - SystemTime { t: Timespec { t } } - } - } - - impl fmt::Debug for SystemTime { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SystemTime") - .field("tv_sec", &self.t.t.tv_sec) - .field("tv_nsec", &self.t.t.tv_nsec) - .finish() - } } #[cfg(not(any(target_os = "dragonfly", target_os = "espidf")))] @@ -351,9 +311,36 @@ mod inner { impl Timespec { pub fn now(clock: clock_t) -> Timespec { + // Try to use 64-bit time in preparation for Y2038. + #[cfg(all(target_os = "linux", target_env = "gnu", target_pointer_width = "32"))] + { + use crate::sys::weak::weak; + + // __clock_gettime64 was added to 32-bit arches in glibc 2.34, + // and it handles both vDSO calls and ENOSYS fallbacks itself. + weak!(fn __clock_gettime64(libc::clockid_t, *mut __timespec64) -> libc::c_int); + + #[repr(C)] + struct __timespec64 { + tv_sec: i64, + #[cfg(target_endian = "big")] + _padding: i32, + tv_nsec: i32, + #[cfg(target_endian = "little")] + _padding: i32, + } + + if let Some(clock_gettime64) = __clock_gettime64.get() { + let mut t = MaybeUninit::uninit(); + cvt(unsafe { clock_gettime64(clock, t.as_mut_ptr()) }).unwrap(); + let t = unsafe { t.assume_init() }; + return Timespec { tv_sec: t.tv_sec, tv_nsec: t.tv_nsec as i64 }; + } + } + let mut t = MaybeUninit::uninit(); cvt(unsafe { libc::clock_gettime(clock, t.as_mut_ptr()) }).unwrap(); - Timespec { t: unsafe { t.assume_init() } } + Timespec::from(unsafe { t.assume_init() }) } } } diff --git a/library/std/src/sys/unix/weak.rs b/library/std/src/sys/unix/weak.rs index da63c068384..e4ff21b25bd 100644 --- a/library/std/src/sys/unix/weak.rs +++ b/library/std/src/sys/unix/weak.rs @@ -25,7 +25,8 @@ use crate::ffi::CStr; use crate::marker::PhantomData; use crate::mem; -use crate::sync::atomic::{self, AtomicUsize, Ordering}; +use crate::ptr; +use crate::sync::atomic::{self, AtomicPtr, Ordering}; // We can use true weak linkage on ELF targets. #[cfg(not(any(target_os = "macos", target_os = "ios")))] @@ -83,13 +84,13 @@ pub(crate) macro dlsym { } pub(crate) struct DlsymWeak<F> { name: &'static str, - addr: AtomicUsize, + func: AtomicPtr<libc::c_void>, _marker: PhantomData<F>, } impl<F> DlsymWeak<F> { pub(crate) const fn new(name: &'static str) -> Self { - DlsymWeak { name, addr: AtomicUsize::new(1), _marker: PhantomData } + DlsymWeak { name, func: AtomicPtr::new(ptr::invalid_mut(1)), _marker: PhantomData } } #[inline] @@ -97,11 +98,11 @@ impl<F> DlsymWeak<F> { unsafe { // Relaxed is fine here because we fence before reading through the // pointer (see the comment below). - match self.addr.load(Ordering::Relaxed) { - 1 => self.initialize(), - 0 => None, - addr => { - let func = mem::transmute_copy::<usize, F>(&addr); + match self.func.load(Ordering::Relaxed) { + func if func.addr() == 1 => self.initialize(), + func if func.is_null() => None, + func => { + let func = mem::transmute_copy::<*mut libc::c_void, F>(&func); // The caller is presumably going to read through this value // (by calling the function we've dlsymed). This means we'd // need to have loaded it with at least C11's consume @@ -129,25 +130,22 @@ impl<F> DlsymWeak<F> { // Cold because it should only happen during first-time initialization. #[cold] unsafe fn initialize(&self) -> Option<F> { - assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>()); + assert_eq!(mem::size_of::<F>(), mem::size_of::<*mut libc::c_void>()); let val = fetch(self.name); // This synchronizes with the acquire fence in `get`. - self.addr.store(val, Ordering::Release); + self.func.store(val, Ordering::Release); - match val { - 0 => None, - addr => Some(mem::transmute_copy::<usize, F>(&addr)), - } + if val.is_null() { None } else { Some(mem::transmute_copy::<*mut libc::c_void, F>(&val)) } } } -unsafe fn fetch(name: &str) -> usize { +unsafe fn fetch(name: &str) -> *mut libc::c_void { let name = match CStr::from_bytes_with_nul(name.as_bytes()) { Ok(cstr) => cstr, - Err(..) => return 0, + Err(..) => return ptr::null_mut(), }; - libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr()) as usize + libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr()) } #[cfg(not(any(target_os = "linux", target_os = "android")))] |
