diff options
Diffstat (limited to 'library/std/src')
207 files changed, 4642 insertions, 1241 deletions
diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs index bb786bd59dc..a834b36697c 100644 --- a/library/std/src/alloc.rs +++ b/library/std/src/alloc.rs @@ -56,7 +56,7 @@ #![deny(unsafe_op_in_unsafe_fn)] #![stable(feature = "alloc_module", since = "1.28.0")] -use core::intrinsics; +use core::hint; use core::ptr::NonNull; use core::sync::atomic::{AtomicPtr, Ordering}; use core::{mem, ptr}; @@ -172,7 +172,7 @@ impl System { let new_size = new_layout.size(); // `realloc` probably checks for `new_size >= old_layout.size()` or something similar. - intrinsics::assume(new_size >= old_layout.size()); + hint::assert_unchecked(new_size >= old_layout.size()); let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size); let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?; @@ -264,7 +264,7 @@ unsafe impl Allocator for System { // SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller new_size if old_layout.align() == new_layout.align() => unsafe { // `realloc` probably checks for `new_size <= old_layout.size()` or something similar. - intrinsics::assume(new_size <= old_layout.size()); + hint::assert_unchecked(new_size <= old_layout.size()); let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size); let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?; diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs index 835e35eac34..475b3e7eb93 100644 --- a/library/std/src/backtrace.rs +++ b/library/std/src/backtrace.rs @@ -467,7 +467,7 @@ impl RawFrame { match self { RawFrame::Actual(frame) => frame.ip(), #[cfg(test)] - RawFrame::Fake => crate::ptr::invalid_mut(1), + RawFrame::Fake => crate::ptr::without_provenance_mut(1), } } } diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs index 39e94902cfe..0d4c1fa05cc 100644 --- a/library/std/src/collections/hash/map.rs +++ b/library/std/src/collections/hash/map.rs @@ -356,6 +356,7 @@ impl<K, V, S> HashMap<K, V, S> { /// /// In the current implementation, iterating over keys takes O(capacity) time /// instead of O(len) because it internally visits empty buckets too. + #[rustc_lint_query_instability] #[stable(feature = "rust1", since = "1.0.0")] pub fn keys(&self) -> Keys<'_, K, V> { Keys { inner: self.iter() } @@ -417,6 +418,7 @@ impl<K, V, S> HashMap<K, V, S> { /// /// In the current implementation, iterating over values takes O(capacity) time /// instead of O(len) because it internally visits empty buckets too. + #[rustc_lint_query_instability] #[stable(feature = "rust1", since = "1.0.0")] pub fn values(&self) -> Values<'_, K, V> { Values { inner: self.iter() } @@ -449,6 +451,7 @@ impl<K, V, S> HashMap<K, V, S> { /// /// In the current implementation, iterating over values takes O(capacity) time /// instead of O(len) because it internally visits empty buckets too. + #[rustc_lint_query_instability] #[stable(feature = "map_values_mut", since = "1.10.0")] pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { ValuesMut { inner: self.iter_mut() } @@ -2232,6 +2235,18 @@ impl<'a, K, V> Iterator for Iter<'a, K, V> { fn size_hint(&self) -> (usize, Option<usize>) { self.base.size_hint() } + #[inline] + fn count(self) -> usize { + self.base.len() + } + #[inline] + fn fold<B, F>(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.base.fold(init, f) + } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> ExactSizeIterator for Iter<'_, K, V> { @@ -2256,6 +2271,18 @@ impl<'a, K, V> Iterator for IterMut<'a, K, V> { fn size_hint(&self) -> (usize, Option<usize>) { self.base.size_hint() } + #[inline] + fn count(self) -> usize { + self.base.len() + } + #[inline] + fn fold<B, F>(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.base.fold(init, f) + } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> ExactSizeIterator for IterMut<'_, K, V> { @@ -2290,6 +2317,18 @@ impl<K, V> Iterator for IntoIter<K, V> { fn size_hint(&self) -> (usize, Option<usize>) { self.base.size_hint() } + #[inline] + fn count(self) -> usize { + self.base.len() + } + #[inline] + fn fold<B, F>(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.base.fold(init, f) + } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> ExactSizeIterator for IntoIter<K, V> { @@ -2320,6 +2359,18 @@ impl<'a, K, V> Iterator for Keys<'a, K, V> { fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } + #[inline] + fn count(self) -> usize { + self.inner.len() + } + #[inline] + fn fold<B, F>(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, |acc, (k, _)| f(acc, k)) + } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> ExactSizeIterator for Keys<'_, K, V> { @@ -2343,6 +2394,18 @@ impl<'a, K, V> Iterator for Values<'a, K, V> { fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } + #[inline] + fn count(self) -> usize { + self.inner.len() + } + #[inline] + fn fold<B, F>(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, |acc, (_, v)| f(acc, v)) + } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> ExactSizeIterator for Values<'_, K, V> { @@ -2366,6 +2429,18 @@ impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } + #[inline] + fn count(self) -> usize { + self.inner.len() + } + #[inline] + fn fold<B, F>(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, |acc, (_, v)| f(acc, v)) + } } #[stable(feature = "map_values_mut", since = "1.10.0")] impl<K, V> ExactSizeIterator for ValuesMut<'_, K, V> { @@ -2396,6 +2471,18 @@ impl<K, V> Iterator for IntoKeys<K, V> { fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } + #[inline] + fn count(self) -> usize { + self.inner.len() + } + #[inline] + fn fold<B, F>(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, |acc, (k, _)| f(acc, k)) + } } #[stable(feature = "map_into_keys_values", since = "1.54.0")] impl<K, V> ExactSizeIterator for IntoKeys<K, V> { @@ -2426,6 +2513,18 @@ impl<K, V> Iterator for IntoValues<K, V> { fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } + #[inline] + fn count(self) -> usize { + self.inner.len() + } + #[inline] + fn fold<B, F>(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, |acc, (_, v)| f(acc, v)) + } } #[stable(feature = "map_into_keys_values", since = "1.54.0")] impl<K, V> ExactSizeIterator for IntoValues<K, V> { @@ -2456,6 +2555,14 @@ impl<'a, K, V> Iterator for Drain<'a, K, V> { fn size_hint(&self) -> (usize, Option<usize>) { self.base.size_hint() } + #[inline] + fn fold<B, F>(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.base.fold(init, f) + } } #[stable(feature = "drain", since = "1.6.0")] impl<K, V> ExactSizeIterator for Drain<'_, K, V> { diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs index 8bc59608290..dcb2fa0f771 100644 --- a/library/std/src/collections/hash/set.rs +++ b/library/std/src/collections/hash/set.rs @@ -1500,6 +1500,18 @@ impl<'a, K> Iterator for Iter<'a, K> { fn size_hint(&self) -> (usize, Option<usize>) { self.base.size_hint() } + #[inline] + fn count(self) -> usize { + self.base.len() + } + #[inline] + fn fold<B, F>(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.base.fold(init, f) + } } #[stable(feature = "rust1", since = "1.0.0")] impl<K> ExactSizeIterator for Iter<'_, K> { @@ -1530,6 +1542,18 @@ impl<K> Iterator for IntoIter<K> { fn size_hint(&self) -> (usize, Option<usize>) { self.base.size_hint() } + #[inline] + fn count(self) -> usize { + self.base.len() + } + #[inline] + fn fold<B, F>(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.base.fold(init, f) + } } #[stable(feature = "rust1", since = "1.0.0")] impl<K> ExactSizeIterator for IntoIter<K> { @@ -1560,6 +1584,14 @@ impl<'a, K> Iterator for Drain<'a, K> { fn size_hint(&self) -> (usize, Option<usize>) { self.base.size_hint() } + #[inline] + fn fold<B, F>(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.base.fold(init, f) + } } #[stable(feature = "rust1", since = "1.0.0")] impl<K> ExactSizeIterator for Drain<'_, K> { @@ -1639,6 +1671,15 @@ where let (_, upper) = self.iter.size_hint(); (0, upper) } + + #[inline] + fn fold<B, F>(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, |acc, elt| if self.other.contains(elt) { f(acc, elt) } else { acc }) + } } #[stable(feature = "std_debug", since = "1.16.0")] @@ -1691,6 +1732,15 @@ where let (_, upper) = self.iter.size_hint(); (0, upper) } + + #[inline] + fn fold<B, F>(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, |acc, elt| if self.other.contains(elt) { acc } else { f(acc, elt) }) + } } #[stable(feature = "fused", since = "1.26.0")] @@ -1736,6 +1786,14 @@ where fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } + #[inline] + fn fold<B, F>(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, f) + } } #[stable(feature = "fused", since = "1.26.0")] @@ -1800,6 +1858,18 @@ where fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } + #[inline] + fn count(self) -> usize { + self.iter.count() + } + #[inline] + fn fold<B, F>(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, f) + } } #[allow(dead_code)] diff --git a/library/std/src/env.rs b/library/std/src/env.rs index 30ac0512348..5bd20ebe208 100644 --- a/library/std/src/env.rs +++ b/library/std/src/env.rs @@ -78,7 +78,7 @@ pub fn current_dir() -> io::Result<PathBuf> { /// assert!(env::set_current_dir(&root).is_ok()); /// println!("Successfully changed working directory to {}!", root.display()); /// ``` -#[doc(alias = "chdir")] +#[doc(alias = "chdir", alias = "SetCurrentDirectory", alias = "SetCurrentDirectoryW")] #[stable(feature = "env", since = "1.0.0")] pub fn set_current_dir<P: AsRef<Path>>(path: P) -> io::Result<()> { os_imp::chdir(path.as_ref()) @@ -655,6 +655,7 @@ pub fn home_dir() -> Option<PathBuf> { /// } /// ``` #[must_use] +#[doc(alias = "GetTempPath", alias = "GetTempPath2")] #[stable(feature = "env", since = "1.0.0")] pub fn temp_dir() -> PathBuf { os_imp::temp_dir() diff --git a/library/std/src/f32.rs b/library/std/src/f32.rs index c3506175715..b60d7a72411 100644 --- a/library/std/src/f32.rs +++ b/library/std/src/f32.rs @@ -102,8 +102,6 @@ impl f32 { /// # Examples /// /// ``` - /// #![feature(round_ties_even)] - /// /// let f = 3.3_f32; /// let g = -3.3_f32; /// let h = 3.5_f32; @@ -116,7 +114,7 @@ impl f32 { /// ``` #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] - #[unstable(feature = "round_ties_even", issue = "96710")] + #[stable(feature = "round_ties_even", since = "1.77.0")] #[inline] pub fn round_ties_even(self) -> f32 { unsafe { intrinsics::rintf32(self) } diff --git a/library/std/src/f64.rs b/library/std/src/f64.rs index e4b7bfeeb84..88f992b3957 100644 --- a/library/std/src/f64.rs +++ b/library/std/src/f64.rs @@ -102,8 +102,6 @@ impl f64 { /// # Examples /// /// ``` - /// #![feature(round_ties_even)] - /// /// let f = 3.3_f64; /// let g = -3.3_f64; /// let h = 3.5_f64; @@ -116,7 +114,7 @@ impl f64 { /// ``` #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] - #[unstable(feature = "round_ties_even", issue = "96710")] + #[stable(feature = "round_ties_even", since = "1.77.0")] #[inline] pub fn round_ties_even(self) -> f64 { unsafe { intrinsics::rintf64(self) } diff --git a/library/std/src/ffi/mod.rs b/library/std/src/ffi/mod.rs index 97e78d17786..818571ddaaa 100644 --- a/library/std/src/ffi/mod.rs +++ b/library/std/src/ffi/mod.rs @@ -127,6 +127,11 @@ //! trait, which provides a [`from_wide`] method to convert a native Windows //! string (without the terminating nul character) to an [`OsString`]. //! +//! ## Other platforms +//! +//! Many other platforms provide their own extension traits in a +//! `std::os::*::ffi` module. +//! //! ## On all platforms //! //! On all platforms, [`OsStr`] consists of a sequence of bytes that is encoded as a superset of @@ -135,6 +140,8 @@ //! For limited, inexpensive conversions from and to bytes, see [`OsStr::as_encoded_bytes`] and //! [`OsStr::from_encoded_bytes_unchecked`]. //! +//! For basic string processing, see [`OsStr::slice_encoded_bytes`]. +//! //! [Unicode scalar value]: https://www.unicode.org/glossary/#unicode_scalar_value //! [Unicode code point]: https://www.unicode.org/glossary/#code_point //! [`env::set_var()`]: crate::env::set_var "env::set_var" diff --git a/library/std/src/ffi/os_str.rs b/library/std/src/ffi/os_str.rs index 81973182148..28747ad8f34 100644 --- a/library/std/src/ffi/os_str.rs +++ b/library/std/src/ffi/os_str.rs @@ -9,7 +9,7 @@ use crate::hash::{Hash, Hasher}; use crate::ops::{self, Range}; use crate::rc::Rc; use crate::slice; -use crate::str::{from_utf8 as str_from_utf8, FromStr}; +use crate::str::FromStr; use crate::sync::Arc; use crate::sys::os_str::{Buf, Slice}; @@ -997,42 +997,15 @@ impl OsStr { /// ``` #[unstable(feature = "os_str_slice", issue = "118485")] pub fn slice_encoded_bytes<R: ops::RangeBounds<usize>>(&self, range: R) -> &Self { - #[track_caller] - fn check_valid_boundary(bytes: &[u8], index: usize) { - if index == 0 || index == bytes.len() { - return; - } - - // Fast path - if bytes[index - 1].is_ascii() || bytes[index].is_ascii() { - return; - } - - let (before, after) = bytes.split_at(index); - - // UTF-8 takes at most 4 bytes per codepoint, so we don't - // need to check more than that. - let after = after.get(..4).unwrap_or(after); - match str_from_utf8(after) { - Ok(_) => return, - Err(err) if err.valid_up_to() != 0 => return, - Err(_) => (), - } - - for len in 2..=4.min(index) { - let before = &before[index - len..]; - if str_from_utf8(before).is_ok() { - return; - } - } - - panic!("byte index {index} is not an OsStr boundary"); - } - let encoded_bytes = self.as_encoded_bytes(); let Range { start, end } = slice::range(range, ..encoded_bytes.len()); - check_valid_boundary(encoded_bytes, start); - check_valid_boundary(encoded_bytes, end); + + // `check_public_boundary` should panic if the index does not lie on an + // `OsStr` boundary as described above. It's possible to do this in an + // encoding-agnostic way, but details of the internal encoding might + // permit a more efficient implementation. + self.inner.check_public_boundary(start); + self.inner.check_public_boundary(end); // SAFETY: `slice::range` ensures that `start` and `end` are valid let slice = unsafe { encoded_bytes.get_unchecked(start..end) }; diff --git a/library/std/src/ffi/os_str/tests.rs b/library/std/src/ffi/os_str/tests.rs index 60cde376d32..b020e05eaab 100644 --- a/library/std/src/ffi/os_str/tests.rs +++ b/library/std/src/ffi/os_str/tests.rs @@ -194,15 +194,65 @@ fn slice_encoded_bytes() { } #[test] -#[should_panic(expected = "byte index 2 is not an OsStr boundary")] +#[should_panic] +fn slice_out_of_bounds() { + let crab = OsStr::new("🦀"); + let _ = crab.slice_encoded_bytes(..5); +} + +#[test] +#[should_panic] fn slice_mid_char() { let crab = OsStr::new("🦀"); let _ = crab.slice_encoded_bytes(..2); } +#[cfg(unix)] +#[test] +#[should_panic(expected = "byte index 1 is not an OsStr boundary")] +fn slice_invalid_data() { + use crate::os::unix::ffi::OsStrExt; + + let os_string = OsStr::from_bytes(b"\xFF\xFF"); + let _ = os_string.slice_encoded_bytes(1..); +} + +#[cfg(unix)] +#[test] +#[should_panic(expected = "byte index 1 is not an OsStr boundary")] +fn slice_partial_utf8() { + use crate::os::unix::ffi::{OsStrExt, OsStringExt}; + + let part_crab = OsStr::from_bytes(&"🦀".as_bytes()[..3]); + let mut os_string = OsString::from_vec(vec![0xFF]); + os_string.push(part_crab); + let _ = os_string.slice_encoded_bytes(1..); +} + +#[cfg(unix)] +#[test] +fn slice_invalid_edge() { + use crate::os::unix::ffi::{OsStrExt, OsStringExt}; + + let os_string = OsStr::from_bytes(b"a\xFFa"); + assert_eq!(os_string.slice_encoded_bytes(..1), "a"); + assert_eq!(os_string.slice_encoded_bytes(1..), OsStr::from_bytes(b"\xFFa")); + assert_eq!(os_string.slice_encoded_bytes(..2), OsStr::from_bytes(b"a\xFF")); + assert_eq!(os_string.slice_encoded_bytes(2..), "a"); + + let os_string = OsStr::from_bytes(&"abc🦀".as_bytes()[..6]); + assert_eq!(os_string.slice_encoded_bytes(..3), "abc"); + assert_eq!(os_string.slice_encoded_bytes(3..), OsStr::from_bytes(b"\xF0\x9F\xA6")); + + let mut os_string = OsString::from_vec(vec![0xFF]); + os_string.push("🦀"); + assert_eq!(os_string.slice_encoded_bytes(..1), OsStr::from_bytes(b"\xFF")); + assert_eq!(os_string.slice_encoded_bytes(1..), "🦀"); +} + #[cfg(windows)] #[test] -#[should_panic(expected = "byte index 3 is not an OsStr boundary")] +#[should_panic(expected = "byte index 3 lies between surrogate codepoints")] fn slice_between_surrogates() { use crate::os::windows::ffi::OsStringExt; @@ -216,10 +266,14 @@ fn slice_between_surrogates() { fn slice_surrogate_edge() { use crate::os::windows::ffi::OsStringExt; - let os_string = OsString::from_wide(&[0xD800]); - let mut with_crab = os_string.clone(); - with_crab.push("🦀"); + let surrogate = OsString::from_wide(&[0xD800]); + let mut pre_crab = surrogate.clone(); + pre_crab.push("🦀"); + assert_eq!(pre_crab.slice_encoded_bytes(..3), surrogate); + assert_eq!(pre_crab.slice_encoded_bytes(3..), "🦀"); - assert_eq!(with_crab.slice_encoded_bytes(..3), os_string); - assert_eq!(with_crab.slice_encoded_bytes(3..), "🦀"); + let mut post_crab = OsString::from("🦀"); + post_crab.push(&surrogate); + assert_eq!(post_crab.slice_encoded_bytes(..4), "🦀"); + assert_eq!(post_crab.slice_encoded_bytes(4..), surrogate); } diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs index c4a92927937..6b1dd1b5af4 100644 --- a/library/std/src/fs.rs +++ b/library/std/src/fs.rs @@ -260,7 +260,8 @@ pub fn read<P: AsRef<Path>>(path: P) -> io::Result<Vec<u8>> { fn inner(path: &Path) -> io::Result<Vec<u8>> { let mut file = File::open(path)?; let size = file.metadata().map(|m| m.len() as usize).ok(); - let mut bytes = Vec::with_capacity(size.unwrap_or(0)); + let mut bytes = Vec::new(); + bytes.try_reserve_exact(size.unwrap_or(0)).map_err(|_| io::ErrorKind::OutOfMemory)?; io::default_read_to_end(&mut file, &mut bytes, size)?; Ok(bytes) } @@ -302,7 +303,8 @@ pub fn read_to_string<P: AsRef<Path>>(path: P) -> io::Result<String> { fn inner(path: &Path) -> io::Result<String> { let mut file = File::open(path)?; let size = file.metadata().map(|m| m.len() as usize).ok(); - let mut string = String::with_capacity(size.unwrap_or(0)); + let mut string = String::new(); + string.try_reserve_exact(size.unwrap_or(0)).map_err(|_| io::ErrorKind::OutOfMemory)?; io::default_read_to_string(&mut file, &mut string, size)?; Ok(string) } @@ -426,7 +428,7 @@ impl File { /// Ok(()) /// } /// ``` - #[stable(feature = "file_create_new", since = "CURRENT_RUSTC_VERSION")] + #[stable(feature = "file_create_new", since = "1.77.0")] pub fn create_new<P: AsRef<Path>>(path: P) -> io::Result<File> { OpenOptions::new().read(true).write(true).create_new(true).open(path.as_ref()) } @@ -654,6 +656,7 @@ impl File { /// /// Note that this method alters the permissions of the underlying file, /// even though it takes `&self` rather than `&mut self`. + #[doc(alias = "fchmod", alias = "SetFileInformationByHandle")] #[stable(feature = "set_permissions_atomic", since = "1.16.0")] pub fn set_permissions(&self, perm: Permissions) -> io::Result<()> { self.inner.set_permissions(perm.0) @@ -774,14 +777,14 @@ impl Read for &File { // Reserves space in the buffer based on the file size when available. fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { let size = buffer_capacity_required(self); - buf.reserve(size.unwrap_or(0)); + buf.try_reserve(size.unwrap_or(0)).map_err(|_| io::ErrorKind::OutOfMemory)?; io::default_read_to_end(self, buf, size) } // Reserves space in the buffer based on the file size when available. fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> { let size = buffer_capacity_required(self); - buf.reserve(size.unwrap_or(0)); + buf.try_reserve(size.unwrap_or(0)).map_err(|_| io::ErrorKind::OutOfMemory)?; io::default_read_to_string(self, buf, size) } } @@ -1312,6 +1315,7 @@ impl Metadata { /// Ok(()) /// } /// ``` + #[doc(alias = "mtime", alias = "ftLastWriteTime")] #[stable(feature = "fs_time", since = "1.10.0")] pub fn modified(&self) -> io::Result<SystemTime> { self.0.modified().map(FromInner::from_inner) @@ -1347,6 +1351,7 @@ impl Metadata { /// Ok(()) /// } /// ``` + #[doc(alias = "atime", alias = "ftLastAccessTime")] #[stable(feature = "fs_time", since = "1.10.0")] pub fn accessed(&self) -> io::Result<SystemTime> { self.0.accessed().map(FromInner::from_inner) @@ -1379,6 +1384,7 @@ impl Metadata { /// Ok(()) /// } /// ``` + #[doc(alias = "btime", alias = "birthtime", alias = "ftCreationTime")] #[stable(feature = "fs_time", since = "1.10.0")] pub fn created(&self) -> io::Result<SystemTime> { self.0.created().map(FromInner::from_inner) @@ -1877,6 +1883,7 @@ impl AsInner<fs_imp::DirEntry> for DirEntry { /// Ok(()) /// } /// ``` +#[doc(alias = "rm", alias = "unlink", alias = "DeleteFile")] #[stable(feature = "rust1", since = "1.0.0")] pub fn remove_file<P: AsRef<Path>>(path: P) -> io::Result<()> { fs_imp::unlink(path.as_ref()) @@ -1915,6 +1922,7 @@ pub fn remove_file<P: AsRef<Path>>(path: P) -> io::Result<()> { /// Ok(()) /// } /// ``` +#[doc(alias = "stat")] #[stable(feature = "rust1", since = "1.0.0")] pub fn metadata<P: AsRef<Path>>(path: P) -> io::Result<Metadata> { fs_imp::stat(path.as_ref()).map(Metadata) @@ -1949,6 +1957,7 @@ pub fn metadata<P: AsRef<Path>>(path: P) -> io::Result<Metadata> { /// Ok(()) /// } /// ``` +#[doc(alias = "lstat")] #[stable(feature = "symlink_metadata", since = "1.1.0")] pub fn symlink_metadata<P: AsRef<Path>>(path: P) -> io::Result<Metadata> { fs_imp::lstat(path.as_ref()).map(Metadata) @@ -1992,6 +2001,7 @@ pub fn symlink_metadata<P: AsRef<Path>>(path: P) -> io::Result<Metadata> { /// Ok(()) /// } /// ``` +#[doc(alias = "mv", alias = "MoveFile", alias = "MoveFileEx")] #[stable(feature = "rust1", since = "1.0.0")] pub fn rename<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<()> { fs_imp::rename(from.as_ref(), to.as_ref()) @@ -2050,6 +2060,9 @@ pub fn rename<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<()> /// Ok(()) /// } /// ``` +#[doc(alias = "cp")] +#[doc(alias = "CopyFile", alias = "CopyFileEx")] +#[doc(alias = "fclonefileat", alias = "fcopyfile")] #[stable(feature = "rust1", since = "1.0.0")] pub fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<u64> { fs_imp::copy(from.as_ref(), to.as_ref()) @@ -2094,6 +2107,7 @@ pub fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<u64> { /// Ok(()) /// } /// ``` +#[doc(alias = "CreateHardLink", alias = "linkat")] #[stable(feature = "rust1", since = "1.0.0")] pub fn hard_link<P: AsRef<Path>, Q: AsRef<Path>>(original: P, link: Q) -> io::Result<()> { fs_imp::link(original.as_ref(), link.as_ref()) @@ -2243,7 +2257,7 @@ pub fn canonicalize<P: AsRef<Path>>(path: P) -> io::Result<PathBuf> { /// Ok(()) /// } /// ``` -#[doc(alias = "mkdir")] +#[doc(alias = "mkdir", alias = "CreateDirectory")] #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "fs_create_dir")] pub fn create_dir<P: AsRef<Path>>(path: P) -> io::Result<()> { @@ -2324,7 +2338,7 @@ pub fn create_dir_all<P: AsRef<Path>>(path: P) -> io::Result<()> { /// Ok(()) /// } /// ``` -#[doc(alias = "rmdir")] +#[doc(alias = "rmdir", alias = "RemoveDirectory")] #[stable(feature = "rust1", since = "1.0.0")] pub fn remove_dir<P: AsRef<Path>>(path: P) -> io::Result<()> { fs_imp::rmdir(path.as_ref()) @@ -2447,6 +2461,7 @@ pub fn remove_dir_all<P: AsRef<Path>>(path: P) -> io::Result<()> { /// Ok(()) /// } /// ``` +#[doc(alias = "ls", alias = "opendir", alias = "FindFirstFile", alias = "FindNextFile")] #[stable(feature = "rust1", since = "1.0.0")] pub fn read_dir<P: AsRef<Path>>(path: P) -> io::Result<ReadDir> { fs_imp::readdir(path.as_ref()).map(ReadDir) @@ -2482,6 +2497,7 @@ pub fn read_dir<P: AsRef<Path>>(path: P) -> io::Result<ReadDir> { /// Ok(()) /// } /// ``` +#[doc(alias = "chmod", alias = "SetFileAttributes")] #[stable(feature = "set_permissions", since = "1.1.0")] pub fn set_permissions<P: AsRef<Path>>(path: P, perm: Permissions) -> io::Result<()> { fs_imp::set_perm(path.as_ref(), perm.0) diff --git a/library/std/src/io/buffered/bufreader.rs b/library/std/src/io/buffered/bufreader.rs index 6c7494a6a6f..e0dc9f96ae9 100644 --- a/library/std/src/io/buffered/bufreader.rs +++ b/library/std/src/io/buffered/bufreader.rs @@ -26,8 +26,7 @@ use buffer::Buffer; /// unwrapping the `BufReader<R>` with [`BufReader::into_inner`] can also cause /// data loss. /// -// HACK(#78696): can't use `crate` for associated items -/// [`TcpStream::read`]: super::super::super::net::TcpStream::read +/// [`TcpStream::read`]: crate::net::TcpStream::read /// [`TcpStream`]: crate::net::TcpStream /// /// # Examples @@ -345,6 +344,7 @@ impl<R: ?Sized + Read> Read for BufReader<R> { // delegate to the inner implementation. fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { let inner_buf = self.buffer(); + buf.try_reserve(inner_buf.len()).map_err(|_| io::ErrorKind::OutOfMemory)?; buf.extend_from_slice(inner_buf); let nread = inner_buf.len(); self.discard_buffer(); diff --git a/library/std/src/io/buffered/bufwriter.rs b/library/std/src/io/buffered/bufwriter.rs index 95ba82e1e07..665d8602c08 100644 --- a/library/std/src/io/buffered/bufwriter.rs +++ b/library/std/src/io/buffered/bufwriter.rs @@ -62,8 +62,7 @@ use crate::ptr; /// together by the buffer and will all be written out in one system call when /// the `stream` is flushed. /// -// HACK(#78696): can't use `crate` for associated items -/// [`TcpStream::write`]: super::super::super::net::TcpStream::write +/// [`TcpStream::write`]: crate::net::TcpStream::write /// [`TcpStream`]: crate::net::TcpStream /// [`flush`]: BufWriter::flush #[stable(feature = "rust1", since = "1.0.0")] diff --git a/library/std/src/io/error.rs b/library/std/src/io/error.rs index b63091deac2..13cc0511e10 100644 --- a/library/std/src/io/error.rs +++ b/library/std/src/io/error.rs @@ -816,12 +816,12 @@ impl Error { } } - /// Attempt to downgrade the inner error to `E` if any. + /// Attempt to downcast the inner error to `E` if any. /// /// If this [`Error`] was constructed via [`new`] then this function will /// attempt to perform downgrade on it, otherwise it will return [`Err`]. /// - /// If downgrade succeeds, it will return [`Ok`], otherwise it will also + /// If the downcast succeeds, it will return [`Ok`], otherwise it will also /// return [`Err`]. /// /// [`new`]: Error::new @@ -852,13 +852,39 @@ impl Error { /// impl From<io::Error> for E { /// fn from(err: io::Error) -> E { /// err.downcast::<E>() - /// .map(|b| *b) /// .unwrap_or_else(E::Io) /// } /// } + /// + /// impl From<E> for io::Error { + /// fn from(err: E) -> io::Error { + /// match err { + /// E::Io(io_error) => io_error, + /// e => io::Error::new(io::ErrorKind::Other, e), + /// } + /// } + /// } + /// + /// # fn main() { + /// let e = E::SomeOtherVariant; + /// // Convert it to an io::Error + /// let io_error = io::Error::from(e); + /// // Cast it back to the original variant + /// let e = E::from(io_error); + /// assert!(matches!(e, E::SomeOtherVariant)); + /// + /// let io_error = io::Error::from(io::ErrorKind::AlreadyExists); + /// // Convert it to E + /// let e = E::from(io_error); + /// // Cast it back to the original variant + /// let io_error = io::Error::from(e); + /// assert_eq!(io_error.kind(), io::ErrorKind::AlreadyExists); + /// assert!(io_error.get_ref().is_none()); + /// assert!(io_error.raw_os_error().is_none()); + /// # } /// ``` #[unstable(feature = "io_error_downcast", issue = "99262")] - pub fn downcast<E>(self) -> result::Result<Box<E>, Self> + pub fn downcast<E>(self) -> result::Result<E, Self> where E: error::Error + Send + Sync + 'static, { @@ -872,7 +898,7 @@ impl Error { // And the compiler should be able to eliminate the branch // that produces `Err` here since b.error.is::<E>() // returns true. - Ok(res.unwrap()) + Ok(*res.unwrap()) } repr_data => Err(Self { repr: Repr::new(repr_data) }), } diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs index db175659770..c053e047b1a 100644 --- a/library/std/src/io/error/repr_bitpacked.rs +++ b/library/std/src/io/error/repr_bitpacked.rs @@ -174,7 +174,10 @@ impl Repr { pub(super) fn new_os(code: RawOsError) -> Self { let utagged = ((code as usize) << 32) | TAG_OS; // Safety: `TAG_OS` is not zero, so the result of the `|` is not 0. - let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData); + let res = Self( + unsafe { NonNull::new_unchecked(ptr::without_provenance_mut(utagged)) }, + PhantomData, + ); // quickly smoke-check we encoded the right thing (This generally will // only run in std's tests, unless the user uses -Zbuild-std) debug_assert!( @@ -188,7 +191,10 @@ impl Repr { pub(super) fn new_simple(kind: ErrorKind) -> Self { let utagged = ((kind as usize) << 32) | TAG_SIMPLE; // Safety: `TAG_SIMPLE` is not zero, so the result of the `|` is not 0. - let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData); + let res = Self( + unsafe { NonNull::new_unchecked(ptr::without_provenance_mut(utagged)) }, + PhantomData, + ); // quickly smoke-check we encoded the right thing (This generally will // only run in std's tests, unless the user uses -Zbuild-std) debug_assert!( diff --git a/library/std/src/io/error/tests.rs b/library/std/src/io/error/tests.rs index 36d52aef03c..fc6db2825e8 100644 --- a/library/std/src/io/error/tests.rs +++ b/library/std/src/io/error/tests.rs @@ -157,7 +157,7 @@ impl error::Error for E {} fn test_std_io_error_downcast() { // Case 1: custom error, downcast succeeds let io_error = Error::new(ErrorKind::Other, Bojji(true)); - let e: Box<Bojji> = io_error.downcast().unwrap(); + let e: Bojji = io_error.downcast().unwrap(); assert!(e.0); // Case 2: custom error, downcast fails @@ -166,7 +166,7 @@ fn test_std_io_error_downcast() { // ensures that the custom error is intact assert_eq!(ErrorKind::Other, io_error.kind()); - let e: Box<Bojji> = io_error.downcast().unwrap(); + let e: Bojji = io_error.downcast().unwrap(); assert!(e.0); // Case 3: os error diff --git a/library/std/src/io/impls.rs b/library/std/src/io/impls.rs index d8c8d933eb4..557e64dc867 100644 --- a/library/std/src/io/impls.rs +++ b/library/std/src/io/impls.rs @@ -303,8 +303,9 @@ impl Read for &[u8] { #[inline] fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { - buf.extend_from_slice(*self); let len = self.len(); + buf.try_reserve(len).map_err(|_| ErrorKind::OutOfMemory)?; + buf.extend_from_slice(*self); *self = &self[len..]; Ok(len) } @@ -451,7 +452,7 @@ impl<A: Allocator> Read for VecDeque<u8, A> { fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { // The total len is known upfront so we can reserve it in a single call. let len = self.len(); - buf.reserve(len); + buf.try_reserve(len).map_err(|_| ErrorKind::OutOfMemory)?; let (front, back) = self.as_slices(); buf.extend_from_slice(front); diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs index e3aa973741f..102db62fced 100644 --- a/library/std/src/io/mod.rs +++ b/library/std/src/io/mod.rs @@ -430,6 +430,8 @@ pub(crate) fn default_read_to_end<R: Read + ?Sized>( loop { match r.read(&mut probe) { Ok(n) => { + // there is no way to recover from allocation failure here + // because the data has already been read. buf.extend_from_slice(&probe[..n]); return Ok(n); } @@ -462,7 +464,8 @@ pub(crate) fn default_read_to_end<R: Read + ?Sized>( } if buf.len() == buf.capacity() { - buf.reserve(PROBE_SIZE); // buf is full, need more space + // buf is full, need more space + buf.try_reserve(PROBE_SIZE).map_err(|_| ErrorKind::OutOfMemory)?; } let mut spare = buf.spare_capacity_mut(); @@ -575,10 +578,7 @@ where F: FnOnce(&mut [u8]) -> Result<usize>, { let n = read(cursor.ensure_init().init_mut())?; - unsafe { - // SAFETY: we initialised using `ensure_init` so there is no uninit data to advance to. - cursor.advance(n); - } + cursor.advance(n); Ok(()) } @@ -815,6 +815,39 @@ pub trait Read { /// file.) /// /// [`std::fs::read`]: crate::fs::read + /// + /// ## Implementing `read_to_end` + /// + /// When implementing the `io::Read` trait, it is recommended to allocate + /// memory using [`Vec::try_reserve`]. However, this behavior is not guaranteed + /// by all implementations, and `read_to_end` may not handle out-of-memory + /// situations gracefully. + /// + /// ```no_run + /// # use std::io::{self, BufRead}; + /// # struct Example { example_datasource: io::Empty } impl Example { + /// # fn get_some_data_for_the_example(&self) -> &'static [u8] { &[] } + /// fn read_to_end(&mut self, dest_vec: &mut Vec<u8>) -> io::Result<usize> { + /// let initial_vec_len = dest_vec.len(); + /// loop { + /// let src_buf = self.example_datasource.fill_buf()?; + /// if src_buf.is_empty() { + /// break; + /// } + /// dest_vec.try_reserve(src_buf.len()).map_err(|_| io::ErrorKind::OutOfMemory)?; + /// dest_vec.extend_from_slice(src_buf); + /// + /// // Any irreversible side effects should happen after `try_reserve` succeeds, + /// // to avoid losing data on allocation error. + /// let read = src_buf.len(); + /// self.example_datasource.consume(read); + /// } + /// Ok(dest_vec.len() - initial_vec_len) + /// } + /// # } + /// ``` + /// + /// [`Vec::try_reserve`]: crate::vec::Vec::try_reserve #[stable(feature = "rust1", since = "1.0.0")] fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Result<usize> { default_read_to_end(self, buf, None) @@ -958,7 +991,10 @@ pub trait Read { } if cursor.written() == prev_written { - return Err(Error::new(ErrorKind::UnexpectedEof, "failed to fill buffer")); + return Err(error::const_io_error!( + ErrorKind::UnexpectedEof, + "failed to fill whole buffer" + )); } } @@ -2636,6 +2672,42 @@ impl<T: Read, U: Read> Read for Chain<T, U> { } self.second.read_vectored(bufs) } + + #[inline] + fn is_read_vectored(&self) -> bool { + self.first.is_read_vectored() || self.second.is_read_vectored() + } + + fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Result<usize> { + let mut read = 0; + if !self.done_first { + read += self.first.read_to_end(buf)?; + self.done_first = true; + } + read += self.second.read_to_end(buf)?; + Ok(read) + } + + // We don't override `read_to_string` here because an UTF-8 sequence could + // be split between the two parts of the chain + + fn read_buf(&mut self, mut buf: BorrowedCursor<'_>) -> Result<()> { + if buf.capacity() == 0 { + return Ok(()); + } + + if !self.done_first { + let old_len = buf.written(); + self.first.read_buf(buf.reborrow())?; + + if buf.written() != old_len { + return Ok(()); + } else { + self.done_first = true; + } + } + self.second.read_buf(buf) + } } #[stable(feature = "chain_bufread", since = "1.9.0")] @@ -2643,9 +2715,7 @@ impl<T: BufRead, U: BufRead> BufRead for Chain<T, U> { fn fill_buf(&mut self) -> Result<&[u8]> { if !self.done_first { match self.first.fill_buf()? { - buf if buf.is_empty() => { - self.done_first = true; - } + buf if buf.is_empty() => self.done_first = true, buf => return Ok(buf), } } @@ -2655,6 +2725,24 @@ impl<T: BufRead, U: BufRead> BufRead for Chain<T, U> { fn consume(&mut self, amt: usize) { if !self.done_first { self.first.consume(amt) } else { self.second.consume(amt) } } + + fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> Result<usize> { + let mut read = 0; + if !self.done_first { + let n = self.first.read_until(byte, buf)?; + read += n; + + match buf.last() { + Some(b) if *b == byte && n != 0 => return Ok(read), + _ => self.done_first = true, + } + } + read += self.second.read_until(byte, buf)?; + Ok(read) + } + + // We don't override `read_line` here because an UTF-8 sequence could be + // split between the two parts of the chain } impl<T, U> SizeHint for Chain<T, U> { @@ -2871,7 +2959,7 @@ impl<T: Read> Read for Take<T> { unsafe { // SAFETY: filled bytes have been filled and therefore initialized - buf.advance(filled); + buf.advance_unchecked(filled); // SAFETY: new_init bytes of buf's unfilled buffer have been initialized buf.set_init(new_init); } diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs index 05b21eeb40f..261b570dee7 100644 --- a/library/std/src/io/stdio.rs +++ b/library/std/src/io/stdio.rs @@ -8,7 +8,9 @@ use crate::io::prelude::*; use crate::cell::{Cell, RefCell}; use crate::fmt; use crate::fs::File; -use crate::io::{self, BorrowedCursor, BufReader, IoSlice, IoSliceMut, LineWriter, Lines}; +use crate::io::{ + self, BorrowedCursor, BufReader, IoSlice, IoSliceMut, LineWriter, Lines, SpecReadByte, +}; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sync::{Arc, Mutex, MutexGuard, OnceLock, ReentrantMutex, ReentrantMutexGuard}; use crate::sys::stdio; @@ -483,6 +485,13 @@ impl Read for StdinLock<'_> { } } +impl SpecReadByte for StdinLock<'_> { + #[inline] + fn spec_read_byte(&mut self) -> Option<io::Result<u8>> { + BufReader::spec_read_byte(&mut *self.inner) + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl BufRead for StdinLock<'_> { fn fill_buf(&mut self) -> io::Result<&[u8]> { diff --git a/library/std/src/io/tests.rs b/library/std/src/io/tests.rs index bda5b721adc..5396f7f6e21 100644 --- a/library/std/src/io/tests.rs +++ b/library/std/src/io/tests.rs @@ -1,6 +1,6 @@ use super::{repeat, BorrowedBuf, Cursor, SeekFrom}; use crate::cmp::{self, min}; -use crate::io::{self, IoSlice, IoSliceMut}; +use crate::io::{self, IoSlice, IoSliceMut, DEFAULT_BUF_SIZE}; use crate::io::{BufRead, BufReader, Read, Seek, Write}; use crate::mem::MaybeUninit; use crate::ops::Deref; @@ -262,6 +262,17 @@ fn chain_bufread() { } #[test] +fn chain_splitted_char() { + let chain = b"\xc3".chain(b"\xa9".as_slice()); + assert_eq!(crate::io::read_to_string(chain).unwrap(), "é"); + + let mut chain = b"\xc3".chain(b"\xa9\n".as_slice()); + let mut buf = String::new(); + assert_eq!(chain.read_line(&mut buf).unwrap(), 3); + assert_eq!(buf, "é\n"); +} + +#[test] fn bufreader_size_hint() { let testdata = b"ABCDEFGHIJKL"; let mut buf_reader = BufReader::new(&testdata[..]); @@ -652,3 +663,32 @@ fn bench_take_read_buf(b: &mut test::Bencher) { [255; 128].take(64).read_buf(buf.unfilled()).unwrap(); }); } + +// Issue #120603 +#[test] +#[should_panic] +fn read_buf_broken_read() { + struct MalformedRead; + + impl Read for MalformedRead { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + // broken length calculation + Ok(buf.len() + 1) + } + } + + let _ = BufReader::new(MalformedRead).fill_buf(); +} + +#[test] +fn read_buf_full_read() { + struct FullRead; + + impl Read for FullRead { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + Ok(buf.len()) + } + } + + assert_eq!(BufReader::new(FullRead).fill_buf().unwrap().len(), DEFAULT_BUF_SIZE); +} diff --git a/library/std/src/io/util.rs b/library/std/src/io/util.rs index 6bc8f181c90..16eaed15e72 100644 --- a/library/std/src/io/util.rs +++ b/library/std/src/io/util.rs @@ -198,12 +198,22 @@ impl Read for Repeat { // SAFETY: the entire unfilled portion of buf has been initialized unsafe { - buf.advance(remaining); + buf.advance_unchecked(remaining); } Ok(()) } + /// This function is not supported by `io::Repeat`, because there's no end of its data + fn read_to_end(&mut self, _: &mut Vec<u8>) -> io::Result<usize> { + Err(io::Error::from(io::ErrorKind::OutOfMemory)) + } + + /// This function is not supported by `io::Repeat`, because there's no end of its data + fn read_to_string(&mut self, _: &mut String) -> io::Result<usize> { + Err(io::Error::from(io::ErrorKind::OutOfMemory)) + } + #[inline] fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { let mut nwritten = 0; diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index 7a8d9d0ceec..c6cd2c6786a 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -262,15 +262,20 @@ )] #![cfg_attr(any(windows, target_os = "uefi"), feature(round_char_boundary))] #![cfg_attr(target_os = "xous", feature(slice_ptr_len))] +#![cfg_attr(target_family = "wasm", feature(stdarch_wasm_atomic_wait))] +#![cfg_attr( + all(any(target_arch = "x86_64", target_arch = "x86"), target_os = "uefi"), + feature(stdarch_x86_has_cpuid) +)] // // Language features: // tidy-alphabetical-start -#![cfg_attr(not(bootstrap), feature(cfg_sanitizer_cfi))] #![feature(alloc_error_handler)] #![feature(allocator_internals)] #![feature(allow_internal_unsafe)] #![feature(allow_internal_unstable)] #![feature(c_unwind)] +#![feature(cfg_sanitizer_cfi)] #![feature(cfg_target_thread_local)] #![feature(cfi_encoding)] #![feature(concat_idents)] @@ -323,13 +328,14 @@ #![feature(float_gamma)] #![feature(float_minimum_maximum)] #![feature(float_next_up_down)] +#![feature(generic_nonzero)] #![feature(hasher_prefixfree_extras)] #![feature(hashmap_internals)] +#![feature(hint_assert_unchecked)] #![feature(ip)] #![feature(maybe_uninit_slice)] #![feature(maybe_uninit_uninit_array)] #![feature(maybe_uninit_write_slice)] -#![feature(offset_of)] #![feature(panic_can_unwind)] #![feature(panic_info_message)] #![feature(panic_internals)] @@ -337,14 +343,14 @@ #![feature(portable_simd)] #![feature(prelude_2024)] #![feature(ptr_as_uninit)] -#![feature(raw_os_nonzero)] -#![feature(round_ties_even)] +#![feature(ptr_mask)] #![feature(slice_internals)] #![feature(slice_ptr_get)] #![feature(slice_range)] #![feature(std_internals)] #![feature(str_internals)] #![feature(strict_provenance)] +#![feature(strict_provenance_atomic_ptr)] // tidy-alphabetical-end // // Library features (alloc): @@ -365,6 +371,11 @@ #![feature(panic_unwind)] // tidy-alphabetical-end // +// Library features (std_detect): +// tidy-alphabetical-start +#![feature(stdarch_internal)] +// tidy-alphabetical-end +// // Only for re-exporting: // tidy-alphabetical-start #![feature(assert_matches)] @@ -380,7 +391,6 @@ #![feature(get_many_mut)] #![feature(lazy_cell)] #![feature(log_syntax)] -#![feature(stdsimd)] #![feature(test)] #![feature(trace_macros)] // tidy-alphabetical-end @@ -619,13 +629,16 @@ pub mod arch { #[stable(feature = "simd_aarch64", since = "1.60.0")] pub use std_detect::is_aarch64_feature_detected; + #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] + pub use std_detect::is_arm_feature_detected; + #[unstable(feature = "is_riscv_feature_detected", issue = "111192")] + pub use std_detect::is_riscv_feature_detected; #[stable(feature = "simd_x86", since = "1.27.0")] pub use std_detect::is_x86_feature_detected; - #[unstable(feature = "stdsimd", issue = "48556")] - pub use std_detect::{ - is_arm_feature_detected, is_mips64_feature_detected, is_mips_feature_detected, - is_powerpc64_feature_detected, is_powerpc_feature_detected, is_riscv_feature_detected, - }; + #[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")] + pub use std_detect::{is_mips64_feature_detected, is_mips_feature_detected}; + #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] + pub use std_detect::{is_powerpc64_feature_detected, is_powerpc_feature_detected}; } // This was stabilized in the crate root so we have to keep it there. diff --git a/library/std/src/num.rs b/library/std/src/num.rs index 55f6ddcf77f..1343fdfd1df 100644 --- a/library/std/src/num.rs +++ b/library/std/src/num.rs @@ -16,6 +16,16 @@ pub use core::num::Wrapping; #[stable(feature = "rust1", since = "1.0.0")] pub use core::num::{FpCategory, ParseFloatError, ParseIntError, TryFromIntError}; +#[unstable( + feature = "nonzero_internals", + reason = "implementation detail which may disappear or be replaced at any time", + issue = "none" +)] +pub use core::num::ZeroablePrimitive; + +#[unstable(feature = "generic_nonzero", issue = "120257")] +pub use core::num::NonZero; + #[stable(feature = "signed_nonzero", since = "1.34.0")] pub use core::num::{NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize}; #[stable(feature = "nonzero", since = "1.28.0")] diff --git a/library/std/src/os/fd/owned.rs b/library/std/src/os/fd/owned.rs index 24f2bdcf421..a4c2dc8b1ed 100644 --- a/library/std/src/os/fd/owned.rs +++ b/library/std/src/os/fd/owned.rs @@ -288,6 +288,7 @@ impl AsFd for fs::File { #[stable(feature = "io_safety", since = "1.63.0")] impl From<fs::File> for OwnedFd { + /// Takes ownership of a [`File`](fs::File)'s underlying file descriptor. #[inline] fn from(file: fs::File) -> OwnedFd { file.into_inner().into_inner().into_inner() @@ -296,6 +297,8 @@ impl From<fs::File> for OwnedFd { #[stable(feature = "io_safety", since = "1.63.0")] impl From<OwnedFd> for fs::File { + /// Returns a [`File`](fs::File) that takes ownership of the given + /// file descriptor. #[inline] fn from(owned_fd: OwnedFd) -> Self { Self::from_inner(FromInner::from_inner(FromInner::from_inner(owned_fd))) @@ -312,6 +315,7 @@ impl AsFd for crate::net::TcpStream { #[stable(feature = "io_safety", since = "1.63.0")] impl From<crate::net::TcpStream> for OwnedFd { + /// Takes ownership of a [`TcpStream`](crate::net::TcpStream)'s socket file descriptor. #[inline] fn from(tcp_stream: crate::net::TcpStream) -> OwnedFd { tcp_stream.into_inner().into_socket().into_inner().into_inner().into() @@ -338,6 +342,7 @@ impl AsFd for crate::net::TcpListener { #[stable(feature = "io_safety", since = "1.63.0")] impl From<crate::net::TcpListener> for OwnedFd { + /// Takes ownership of a [`TcpListener`](crate::net::TcpListener)'s socket file descriptor. #[inline] fn from(tcp_listener: crate::net::TcpListener) -> OwnedFd { tcp_listener.into_inner().into_socket().into_inner().into_inner().into() @@ -364,6 +369,7 @@ impl AsFd for crate::net::UdpSocket { #[stable(feature = "io_safety", since = "1.63.0")] impl From<crate::net::UdpSocket> for OwnedFd { + /// Takes ownership of a [`UdpSocket`](crate::net::UdpSocket)'s file descriptor. #[inline] fn from(udp_socket: crate::net::UdpSocket) -> OwnedFd { udp_socket.into_inner().into_socket().into_inner().into_inner().into() diff --git a/library/std/src/os/linux/process.rs b/library/std/src/os/linux/process.rs index 51af432d056..2ba67a6dd1a 100644 --- a/library/std/src/os/linux/process.rs +++ b/library/std/src/os/linux/process.rs @@ -149,8 +149,7 @@ pub trait CommandExt: Sealed { /// The pidfd can be retrieved from the child with [`pidfd`] or [`take_pidfd`]. /// /// A pidfd will only be created if it is possible to do so - /// in a guaranteed race-free manner (e.g. if the `clone3` system call - /// is supported). Otherwise, [`pidfd`] will return an error. + /// in a guaranteed race-free manner. Otherwise, [`pidfd`] will return an error. /// /// If a pidfd has been successfully created and not been taken from the `Child` /// then calls to `kill()`, `wait()` and `try_wait()` will use the pidfd diff --git a/library/std/src/os/unix/fs.rs b/library/std/src/os/unix/fs.rs index e995d5133f8..058e9b90cc7 100644 --- a/library/std/src/os/unix/fs.rs +++ b/library/std/src/os/unix/fs.rs @@ -397,7 +397,6 @@ pub trait OpenOptionsExt { /// /// ```no_run /// # #![feature(rustc_private)] - /// use libc; /// use std::fs::OpenOptions; /// use std::os::unix::fs::OpenOptionsExt; /// diff --git a/library/std/src/os/unix/net/datagram.rs b/library/std/src/os/unix/net/datagram.rs index 34db54235f1..3b7b610fdf9 100644 --- a/library/std/src/os/unix/net/datagram.rs +++ b/library/std/src/os/unix/net/datagram.rs @@ -1024,6 +1024,7 @@ impl AsFd for UnixDatagram { #[stable(feature = "io_safety", since = "1.63.0")] impl From<UnixDatagram> for OwnedFd { + /// Takes ownership of a [`UnixDatagram`]'s socket file descriptor. #[inline] fn from(unix_datagram: UnixDatagram) -> OwnedFd { unsafe { OwnedFd::from_raw_fd(unix_datagram.into_raw_fd()) } diff --git a/library/std/src/os/unix/net/listener.rs b/library/std/src/os/unix/net/listener.rs index 8bf1e2dca6f..d64a43bc20b 100644 --- a/library/std/src/os/unix/net/listener.rs +++ b/library/std/src/os/unix/net/listener.rs @@ -80,7 +80,12 @@ impl UnixListener { target_os = "horizon" ))] const backlog: libc::c_int = 128; - #[cfg(any(target_os = "linux", target_os = "freebsd", target_os = "openbsd"))] + #[cfg(any( + target_os = "linux", + target_os = "freebsd", + target_os = "openbsd", + target_os = "macos" + ))] const backlog: libc::c_int = -1; #[cfg(not(any( target_os = "windows", @@ -88,6 +93,7 @@ impl UnixListener { target_os = "linux", target_os = "freebsd", target_os = "openbsd", + target_os = "macos", target_os = "espidf", target_os = "horizon" )))] @@ -340,6 +346,7 @@ impl From<OwnedFd> for UnixListener { #[stable(feature = "io_safety", since = "1.63.0")] impl From<UnixListener> for OwnedFd { + /// Takes ownership of a [`UnixListener`]'s socket file descriptor. #[inline] fn from(listener: UnixListener) -> OwnedFd { listener.0.into_inner().into_inner() diff --git a/library/std/src/os/unix/net/stream.rs b/library/std/src/os/unix/net/stream.rs index 41290e0017a..e117f616caf 100644 --- a/library/std/src/os/unix/net/stream.rs +++ b/library/std/src/os/unix/net/stream.rs @@ -752,6 +752,7 @@ impl AsFd for UnixStream { #[stable(feature = "io_safety", since = "1.63.0")] impl From<UnixStream> for OwnedFd { + /// Takes ownership of a [`UnixStream`]'s socket file descriptor. #[inline] fn from(unix_stream: UnixStream) -> OwnedFd { unsafe { OwnedFd::from_raw_fd(unix_stream.into_raw_fd()) } diff --git a/library/std/src/os/unix/process.rs b/library/std/src/os/unix/process.rs index ac551030492..e45457b2e42 100644 --- a/library/std/src/os/unix/process.rs +++ b/library/std/src/os/unix/process.rs @@ -362,6 +362,8 @@ impl FromRawFd for process::Stdio { #[stable(feature = "io_safety", since = "1.63.0")] impl From<OwnedFd> for process::Stdio { + /// Takes ownership of a file descriptor and returns a [`Stdio`](process::Stdio) + /// that can attach a stream to it. #[inline] fn from(fd: OwnedFd) -> process::Stdio { let fd = sys::fd::FileDesc::from_inner(fd); @@ -428,6 +430,7 @@ impl AsFd for crate::process::ChildStdin { #[stable(feature = "io_safety", since = "1.63.0")] impl From<crate::process::ChildStdin> for OwnedFd { + /// Takes ownership of a [`ChildStdin`](crate::process::ChildStdin)'s file descriptor. #[inline] fn from(child_stdin: crate::process::ChildStdin) -> OwnedFd { child_stdin.into_inner().into_inner().into_inner() @@ -458,6 +461,7 @@ impl AsFd for crate::process::ChildStdout { #[stable(feature = "io_safety", since = "1.63.0")] impl From<crate::process::ChildStdout> for OwnedFd { + /// Takes ownership of a [`ChildStdout`](crate::process::ChildStdout)'s file descriptor. #[inline] fn from(child_stdout: crate::process::ChildStdout) -> OwnedFd { child_stdout.into_inner().into_inner().into_inner() @@ -488,6 +492,7 @@ impl AsFd for crate::process::ChildStderr { #[stable(feature = "io_safety", since = "1.63.0")] impl From<crate::process::ChildStderr> for OwnedFd { + /// Takes ownership of a [`ChildStderr`](crate::process::ChildStderr)'s file descriptor. #[inline] fn from(child_stderr: crate::process::ChildStderr) -> OwnedFd { child_stderr.into_inner().into_inner().into_inner() diff --git a/library/std/src/os/wasi/fs.rs b/library/std/src/os/wasi/fs.rs index 3da8c835511..4525c3aa914 100644 --- a/library/std/src/os/wasi/fs.rs +++ b/library/std/src/os/wasi/fs.rs @@ -173,51 +173,61 @@ pub trait FileExt { /// /// This corresponds to the `fd_tell` syscall and is similar to /// `seek` where you offset 0 bytes from the current position. + #[doc(alias = "fd_tell")] fn tell(&self) -> io::Result<u64>; /// Adjust the flags associated with this file. /// /// This corresponds to the `fd_fdstat_set_flags` syscall. + #[doc(alias = "fd_fdstat_set_flags")] fn fdstat_set_flags(&self, flags: u16) -> io::Result<()>; /// Adjust the rights associated with this file. /// /// This corresponds to the `fd_fdstat_set_rights` syscall. + #[doc(alias = "fd_fdstat_set_rights")] fn fdstat_set_rights(&self, rights: u64, inheriting: u64) -> io::Result<()>; /// Provide file advisory information on a file descriptor. /// /// This corresponds to the `fd_advise` syscall. + #[doc(alias = "fd_advise")] fn advise(&self, offset: u64, len: u64, advice: u8) -> io::Result<()>; /// Force the allocation of space in a file. /// /// This corresponds to the `fd_allocate` syscall. + #[doc(alias = "fd_allocate")] fn allocate(&self, offset: u64, len: u64) -> io::Result<()>; /// Create a directory. /// /// This corresponds to the `path_create_directory` syscall. + #[doc(alias = "path_create_directory")] fn create_directory<P: AsRef<Path>>(&self, dir: P) -> io::Result<()>; /// Read the contents of a symbolic link. /// /// This corresponds to the `path_readlink` syscall. + #[doc(alias = "path_readlink")] fn read_link<P: AsRef<Path>>(&self, path: P) -> io::Result<PathBuf>; /// Return the attributes of a file or directory. /// /// This corresponds to the `path_filestat_get` syscall. + #[doc(alias = "path_filestat_get")] fn metadata_at<P: AsRef<Path>>(&self, lookup_flags: u32, path: P) -> io::Result<Metadata>; /// Unlink a file. /// /// This corresponds to the `path_unlink_file` syscall. + #[doc(alias = "path_unlink_file")] fn remove_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()>; /// Remove a directory. /// /// This corresponds to the `path_remove_directory` syscall. + #[doc(alias = "path_remove_directory")] fn remove_directory<P: AsRef<Path>>(&self, path: P) -> io::Result<()>; } @@ -359,6 +369,7 @@ pub trait OpenOptionsExt { /// Open a file or directory. /// /// This corresponds to the `path_open` syscall. + #[doc(alias = "path_open")] fn open_at<P: AsRef<Path>>(&self, file: &File, path: P) -> io::Result<File>; } @@ -500,6 +511,7 @@ impl DirEntryExt for fs::DirEntry { /// Create a hard link. /// /// This corresponds to the `path_link` syscall. +#[doc(alias = "path_link")] pub fn link<P: AsRef<Path>, U: AsRef<Path>>( old_fd: &File, old_flags: u32, @@ -518,6 +530,7 @@ pub fn link<P: AsRef<Path>, U: AsRef<Path>>( /// Rename a file or directory. /// /// This corresponds to the `path_rename` syscall. +#[doc(alias = "path_rename")] pub fn rename<P: AsRef<Path>, U: AsRef<Path>>( old_fd: &File, old_path: P, @@ -534,6 +547,7 @@ pub fn rename<P: AsRef<Path>, U: AsRef<Path>>( /// Create a symbolic link. /// /// This corresponds to the `path_symlink` syscall. +#[doc(alias = "path_symlink")] pub fn symlink<P: AsRef<Path>, U: AsRef<Path>>( old_path: P, fd: &File, diff --git a/library/std/src/os/windows/fs.rs b/library/std/src/os/windows/fs.rs index 1b013d1c154..e9d7a13e9d5 100644 --- a/library/std/src/os/windows/fs.rs +++ b/library/std/src/os/windows/fs.rs @@ -59,7 +59,7 @@ pub trait FileExt { /// function, it is set to the end of the write. /// /// When writing beyond the end of the file, the file is appropriately - /// extended and the intermediate bytes are left uninitialized. + /// extended and the intermediate bytes are set to zero. /// /// Note that similar to `File::write`, it is not an error to return a /// short write. When returning from such a short write, the file pointer diff --git a/library/std/src/os/windows/io/handle.rs b/library/std/src/os/windows/io/handle.rs index b0540872c0b..458c3bb036d 100644 --- a/library/std/src/os/windows/io/handle.rs +++ b/library/std/src/os/windows/io/handle.rs @@ -502,6 +502,7 @@ impl AsHandle for fs::File { #[stable(feature = "io_safety", since = "1.63.0")] impl From<fs::File> for OwnedHandle { + /// Takes ownership of a [`File`](fs::File)'s underlying file handle. #[inline] fn from(file: fs::File) -> OwnedHandle { file.into_inner().into_inner().into_inner() @@ -510,6 +511,7 @@ impl From<fs::File> for OwnedHandle { #[stable(feature = "io_safety", since = "1.63.0")] impl From<OwnedHandle> for fs::File { + /// Returns a [`File`](fs::File) that takes ownership of the given handle. #[inline] fn from(owned: OwnedHandle) -> Self { Self::from_inner(FromInner::from_inner(FromInner::from_inner(owned))) @@ -574,6 +576,7 @@ impl AsHandle for crate::process::ChildStdin { #[stable(feature = "io_safety", since = "1.63.0")] impl From<crate::process::ChildStdin> for OwnedHandle { + /// Takes ownership of a [`ChildStdin`](crate::process::ChildStdin)'s file handle. #[inline] fn from(child_stdin: crate::process::ChildStdin) -> OwnedHandle { unsafe { OwnedHandle::from_raw_handle(child_stdin.into_raw_handle()) } @@ -590,6 +593,7 @@ impl AsHandle for crate::process::ChildStdout { #[stable(feature = "io_safety", since = "1.63.0")] impl From<crate::process::ChildStdout> for OwnedHandle { + /// Takes ownership of a [`ChildStdout`](crate::process::ChildStdout)'s file handle. #[inline] fn from(child_stdout: crate::process::ChildStdout) -> OwnedHandle { unsafe { OwnedHandle::from_raw_handle(child_stdout.into_raw_handle()) } @@ -606,6 +610,7 @@ impl AsHandle for crate::process::ChildStderr { #[stable(feature = "io_safety", since = "1.63.0")] impl From<crate::process::ChildStderr> for OwnedHandle { + /// Takes ownership of a [`ChildStderr`](crate::process::ChildStderr)'s file handle. #[inline] fn from(child_stderr: crate::process::ChildStderr) -> OwnedHandle { unsafe { OwnedHandle::from_raw_handle(child_stderr.into_raw_handle()) } diff --git a/library/std/src/os/windows/io/socket.rs b/library/std/src/os/windows/io/socket.rs index 65f161f32e7..6ffdf907c8e 100644 --- a/library/std/src/os/windows/io/socket.rs +++ b/library/std/src/os/windows/io/socket.rs @@ -319,6 +319,7 @@ impl AsSocket for crate::net::TcpStream { #[stable(feature = "io_safety", since = "1.63.0")] impl From<crate::net::TcpStream> for OwnedSocket { + /// Takes ownership of a [`TcpStream`](crate::net::TcpStream)'s socket. #[inline] fn from(tcp_stream: crate::net::TcpStream) -> OwnedSocket { unsafe { OwnedSocket::from_raw_socket(tcp_stream.into_raw_socket()) } @@ -343,6 +344,7 @@ impl AsSocket for crate::net::TcpListener { #[stable(feature = "io_safety", since = "1.63.0")] impl From<crate::net::TcpListener> for OwnedSocket { + /// Takes ownership of a [`TcpListener`](crate::net::TcpListener)'s socket. #[inline] fn from(tcp_listener: crate::net::TcpListener) -> OwnedSocket { unsafe { OwnedSocket::from_raw_socket(tcp_listener.into_raw_socket()) } @@ -367,6 +369,7 @@ impl AsSocket for crate::net::UdpSocket { #[stable(feature = "io_safety", since = "1.63.0")] impl From<crate::net::UdpSocket> for OwnedSocket { + /// Takes ownership of a [`UdpSocket`](crate::net::UdpSocket)'s underlying socket. #[inline] fn from(udp_socket: crate::net::UdpSocket) -> OwnedSocket { unsafe { OwnedSocket::from_raw_socket(udp_socket.into_raw_socket()) } diff --git a/library/std/src/os/windows/process.rs b/library/std/src/os/windows/process.rs index 5bf0154eae3..1be3acf5d43 100644 --- a/library/std/src/os/windows/process.rs +++ b/library/std/src/os/windows/process.rs @@ -24,6 +24,8 @@ impl FromRawHandle for process::Stdio { #[stable(feature = "io_safety", since = "1.63.0")] impl From<OwnedHandle> for process::Stdio { + /// Takes ownership of a handle and returns a [`Stdio`](process::Stdio) + /// that can attach a stream to it. fn from(handle: OwnedHandle) -> process::Stdio { let handle = sys::handle::Handle::from_inner(handle); let io = sys::process::Stdio::Handle(handle); @@ -56,6 +58,7 @@ impl IntoRawHandle for process::Child { #[stable(feature = "io_safety", since = "1.63.0")] impl From<process::Child> for OwnedHandle { + /// Takes ownership of a [`Child`](process::Child)'s process handle. fn from(child: process::Child) -> OwnedHandle { child.into_inner().into_handle().into_inner() } diff --git a/library/std/src/os/xous/ffi.rs b/library/std/src/os/xous/ffi.rs index 8be7fbb102f..7fe84db515c 100644 --- a/library/std/src/os/xous/ffi.rs +++ b/library/std/src/os/xous/ffi.rs @@ -88,29 +88,31 @@ fn lend_impl( let a3 = opcode; let a4 = data.as_ptr() as usize; let a5 = data.len(); - let mut a6 = arg1; - let mut a7 = arg2; + let a6 = arg1; + let a7 = arg2; + let mut ret1; + let mut ret2; unsafe { core::arch::asm!( "ecall", inlateout("a0") a0, - inlateout("a1") a1 => _, - inlateout("a2") a2 => _, + inlateout("a1") a1 => ret1, + inlateout("a2") a2 => ret2, inlateout("a3") a3 => _, inlateout("a4") a4 => _, inlateout("a5") a5 => _, - inlateout("a6") a6, - inlateout("a7") a7, + inlateout("a6") a6 => _, + inlateout("a7") a7 => _, ) }; let result = a0; if result == SyscallResult::MemoryReturned as usize { - Ok((a6, a7)) + Ok((ret1, ret2)) } else if result == SyscallResult::Error as usize { - Err(a1.into()) + Err(ret1.into()) } else { Err(Error::InternalError) } @@ -405,7 +407,7 @@ pub(crate) unsafe fn map_memory<T>( pub(crate) unsafe fn unmap_memory<T>(range: *mut [T]) -> Result<(), Error> { let mut a0 = Syscall::UnmapMemory as usize; let mut a1 = range.as_mut_ptr() as usize; - let a2 = range.len(); + let a2 = range.len() * core::mem::size_of::<T>(); let a3 = 0; let a4 = 0; let a5 = 0; @@ -450,7 +452,7 @@ pub(crate) unsafe fn update_memory_flags<T>( ) -> Result<(), Error> { let mut a0 = Syscall::UpdateMemoryFlags as usize; let mut a1 = range.as_mut_ptr() as usize; - let a2 = range.len(); + let a2 = range.len() * core::mem::size_of::<T>(); let a3 = new_flags.bits(); let a4 = 0; // Process ID is currently None let a5 = 0; diff --git a/library/std/src/os/xous/services.rs b/library/std/src/os/xous/services.rs index 5c219f1fbb9..a75be1b8570 100644 --- a/library/std/src/os/xous/services.rs +++ b/library/std/src/os/xous/services.rs @@ -1,9 +1,15 @@ use crate::os::xous::ffi::Connection; use core::sync::atomic::{AtomicU32, Ordering}; +mod dns; +pub(crate) use dns::*; + mod log; pub(crate) use log::*; +mod net; +pub(crate) use net::*; + mod systime; pub(crate) use systime::*; diff --git a/library/std/src/os/xous/services/dns.rs b/library/std/src/os/xous/services/dns.rs new file mode 100644 index 00000000000..a7d88f4892c --- /dev/null +++ b/library/std/src/os/xous/services/dns.rs @@ -0,0 +1,28 @@ +use crate::os::xous::ffi::Connection; +use crate::os::xous::services::connect; +use core::sync::atomic::{AtomicU32, Ordering}; + +#[repr(usize)] +pub(crate) enum DnsLendMut { + RawLookup = 6, +} + +impl Into<usize> for DnsLendMut { + fn into(self) -> usize { + self as usize + } +} + +/// Return a `Connection` to the DNS lookup server. This server is used for +/// querying domain name values. +pub(crate) fn dns_server() -> Connection { + static DNS_CONNECTION: AtomicU32 = AtomicU32::new(0); + let cid = DNS_CONNECTION.load(Ordering::Relaxed); + if cid != 0 { + return cid.into(); + } + + let cid = connect("_DNS Resolver Middleware_").unwrap(); + DNS_CONNECTION.store(cid.into(), Ordering::Relaxed); + cid +} diff --git a/library/std/src/os/xous/services/log.rs b/library/std/src/os/xous/services/log.rs index e6bae929eac..55a501dc7d0 100644 --- a/library/std/src/os/xous/services/log.rs +++ b/library/std/src/os/xous/services/log.rs @@ -45,6 +45,17 @@ impl<'a> Into<[usize; 5]> for LogScalar<'a> { } } +pub(crate) enum LogLend { + StandardOutput = 1, + StandardError = 2, +} + +impl Into<usize> for LogLend { + fn into(self) -> usize { + self as usize + } +} + /// Return a `Connection` to the log server, which is used for printing messages to /// the console and reporting panics. If the log server has not yet started, this /// will block until the server is running. It is safe to call this multiple times, diff --git a/library/std/src/os/xous/services/net.rs b/library/std/src/os/xous/services/net.rs new file mode 100644 index 00000000000..26d337dcef1 --- /dev/null +++ b/library/std/src/os/xous/services/net.rs @@ -0,0 +1,95 @@ +use crate::os::xous::ffi::Connection; +use crate::os::xous::services::connect; +use core::sync::atomic::{AtomicU32, Ordering}; + +pub(crate) enum NetBlockingScalar { + StdGetTtlUdp(u16 /* fd */), /* 36 */ + StdSetTtlUdp(u16 /* fd */, u32 /* ttl */), /* 37 */ + StdGetTtlTcp(u16 /* fd */), /* 36 */ + StdSetTtlTcp(u16 /* fd */, u32 /* ttl */), /* 37 */ + StdGetNodelay(u16 /* fd */), /* 38 */ + StdSetNodelay(u16 /* fd */, bool), /* 39 */ + StdTcpClose(u16 /* fd */), /* 34 */ + StdUdpClose(u16 /* fd */), /* 41 */ + StdTcpStreamShutdown(u16 /* fd */, crate::net::Shutdown /* how */), /* 46 */ +} + +pub(crate) enum NetLendMut { + StdTcpConnect, /* 30 */ + StdTcpTx(u16 /* fd */), /* 31 */ + StdTcpPeek(u16 /* fd */, bool /* nonblocking */), /* 32 */ + StdTcpRx(u16 /* fd */, bool /* nonblocking */), /* 33 */ + StdGetAddress(u16 /* fd */), /* 35 */ + StdUdpBind, /* 40 */ + StdUdpRx(u16 /* fd */), /* 42 */ + StdUdpTx(u16 /* fd */), /* 43 */ + StdTcpListen, /* 44 */ + StdTcpAccept(u16 /* fd */), /* 45 */ +} + +impl Into<usize> for NetLendMut { + fn into(self) -> usize { + match self { + NetLendMut::StdTcpConnect => 30, + NetLendMut::StdTcpTx(fd) => 31 | ((fd as usize) << 16), + NetLendMut::StdTcpPeek(fd, blocking) => { + 32 | ((fd as usize) << 16) | if blocking { 0x8000 } else { 0 } + } + NetLendMut::StdTcpRx(fd, blocking) => { + 33 | ((fd as usize) << 16) | if blocking { 0x8000 } else { 0 } + } + NetLendMut::StdGetAddress(fd) => 35 | ((fd as usize) << 16), + NetLendMut::StdUdpBind => 40, + NetLendMut::StdUdpRx(fd) => 42 | ((fd as usize) << 16), + NetLendMut::StdUdpTx(fd) => 43 | ((fd as usize) << 16), + NetLendMut::StdTcpListen => 44, + NetLendMut::StdTcpAccept(fd) => 45 | ((fd as usize) << 16), + } + } +} + +impl<'a> Into<[usize; 5]> for NetBlockingScalar { + fn into(self) -> [usize; 5] { + match self { + NetBlockingScalar::StdGetTtlTcp(fd) => [36 | ((fd as usize) << 16), 0, 0, 0, 0], + NetBlockingScalar::StdGetTtlUdp(fd) => [36 | ((fd as usize) << 16), 0, 0, 0, 1], + NetBlockingScalar::StdSetTtlTcp(fd, ttl) => { + [37 | ((fd as usize) << 16), ttl as _, 0, 0, 0] + } + NetBlockingScalar::StdSetTtlUdp(fd, ttl) => { + [37 | ((fd as usize) << 16), ttl as _, 0, 0, 1] + } + NetBlockingScalar::StdGetNodelay(fd) => [38 | ((fd as usize) << 16), 0, 0, 0, 0], + NetBlockingScalar::StdSetNodelay(fd, enabled) => { + [39 | ((fd as usize) << 16), if enabled { 1 } else { 0 }, 0, 0, 1] + } + NetBlockingScalar::StdTcpClose(fd) => [34 | ((fd as usize) << 16), 0, 0, 0, 0], + NetBlockingScalar::StdUdpClose(fd) => [41 | ((fd as usize) << 16), 0, 0, 0, 0], + NetBlockingScalar::StdTcpStreamShutdown(fd, how) => [ + 46 | ((fd as usize) << 16), + match how { + crate::net::Shutdown::Read => 1, + crate::net::Shutdown::Write => 2, + crate::net::Shutdown::Both => 3, + }, + 0, + 0, + 0, + ], + } + } +} + +/// Return a `Connection` to the Network server. This server provides all +/// OS-level networking functions. +pub(crate) fn net_server() -> Connection { + static NET_CONNECTION: AtomicU32 = AtomicU32::new(0); + let cid = NET_CONNECTION.load(Ordering::Relaxed); + if cid != 0 { + return cid.into(); + } + + let cid = connect("_Middleware Network Server_").unwrap(); + NET_CONNECTION.store(cid.into(), Ordering::Relaxed); + cid +} diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs index 7f6b563d729..3728d5b64b8 100644 --- a/library/std/src/panic.rs +++ b/library/std/src/panic.rs @@ -5,7 +5,7 @@ use crate::any::Any; use crate::collections; use crate::panicking; -use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::atomic::{AtomicU8, Ordering}; use crate::sync::{Mutex, RwLock}; use crate::thread::Result; @@ -228,7 +228,7 @@ impl BacktraceStyle { if cfg!(feature = "backtrace") { Some(BacktraceStyle::Full) } else { None } } - fn as_usize(self) -> usize { + fn as_u8(self) -> u8 { match self { BacktraceStyle::Short => 1, BacktraceStyle::Full => 2, @@ -236,7 +236,7 @@ impl BacktraceStyle { } } - fn from_usize(s: usize) -> Option<Self> { + fn from_u8(s: u8) -> Option<Self> { Some(match s { 0 => return None, 1 => BacktraceStyle::Short, @@ -251,7 +251,7 @@ impl BacktraceStyle { // that backtrace. // // Internally stores equivalent of an Option<BacktraceStyle>. -static SHOULD_CAPTURE: AtomicUsize = AtomicUsize::new(0); +static SHOULD_CAPTURE: AtomicU8 = AtomicU8::new(0); /// Configure whether the default panic hook will capture and display a /// backtrace. @@ -264,7 +264,7 @@ pub fn set_backtrace_style(style: BacktraceStyle) { // If the `backtrace` feature of this crate isn't enabled, skip setting. return; } - SHOULD_CAPTURE.store(style.as_usize(), Ordering::Release); + SHOULD_CAPTURE.store(style.as_u8(), Ordering::Release); } /// Checks whether the standard library's panic hook will capture and print a @@ -296,7 +296,7 @@ pub fn get_backtrace_style() -> Option<BacktraceStyle> { // to optimize away callers. return None; } - if let Some(style) = BacktraceStyle::from_usize(SHOULD_CAPTURE.load(Ordering::Acquire)) { + if let Some(style) = BacktraceStyle::from_u8(SHOULD_CAPTURE.load(Ordering::Acquire)) { return Some(style); } diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs index c80f15d8a61..c8306c1b597 100644 --- a/library/std/src/panicking.rs +++ b/library/std/src/panicking.rs @@ -337,8 +337,9 @@ pub mod panic_count { #[doc(hidden)] #[cfg(not(feature = "panic_immediate_abort"))] #[unstable(feature = "update_panic_count", issue = "none")] -// FIXME: Use `SyncUnsafeCell` instead of allowing `static_mut_ref` lint -#[cfg_attr(not(bootstrap), allow(static_mut_ref))] +// FIXME: Use `SyncUnsafeCell` instead of allowing `static_mut_refs` lint +#[cfg_attr(bootstrap, allow(static_mut_ref))] +#[cfg_attr(not(bootstrap), allow(static_mut_refs))] pub mod panic_count { use crate::cell::Cell; use crate::sync::atomic::{AtomicUsize, Ordering}; diff --git a/library/std/src/prelude/mod.rs b/library/std/src/prelude/mod.rs index 1b29c887d21..7d44d2e4b5d 100644 --- a/library/std/src/prelude/mod.rs +++ b/library/std/src/prelude/mod.rs @@ -132,13 +132,13 @@ pub mod rust_2021 { /// The 2024 version of the prelude of The Rust Standard Library. /// /// See the [module-level documentation](self) for more. -#[unstable(feature = "prelude_2024", issue = "none")] +#[unstable(feature = "prelude_2024", issue = "121042")] pub mod rust_2024 { - #[unstable(feature = "prelude_2024", issue = "none")] + #[unstable(feature = "prelude_2024", issue = "121042")] #[doc(no_inline)] pub use super::v1::*; - #[unstable(feature = "prelude_2024", issue = "none")] + #[unstable(feature = "prelude_2024", issue = "121042")] #[doc(no_inline)] pub use core::prelude::rust_2024::*; } diff --git a/library/std/src/process.rs b/library/std/src/process.rs index 4a7f5d8e0be..669affa266a 100644 --- a/library/std/src/process.rs +++ b/library/std/src/process.rs @@ -171,7 +171,7 @@ pub struct Child { /// The handle for writing to the child's standard input (stdin), if it /// has been captured. You might find it helpful to do /// - /// ```compile_fail,E0425 + /// ```ignore (incomplete) /// let stdin = child.stdin.take().unwrap(); /// ``` /// @@ -183,7 +183,7 @@ pub struct Child { /// The handle for reading from the child's standard output (stdout), if it /// has been captured. You might find it helpful to do /// - /// ```compile_fail,E0425 + /// ```ignore (incomplete) /// let stdout = child.stdout.take().unwrap(); /// ``` /// @@ -195,7 +195,7 @@ pub struct Child { /// The handle for reading from the child's standard error (stderr), if it /// has been captured. You might find it helpful to do /// - /// ```compile_fail,E0425 + /// ```ignore (incomplete) /// let stderr = child.stderr.take().unwrap(); /// ``` /// diff --git a/library/std/src/sync/mutex.rs b/library/std/src/sync/mutex.rs index 184c406e326..920143b7ac7 100644 --- a/library/std/src/sync/mutex.rs +++ b/library/std/src/sync/mutex.rs @@ -404,7 +404,7 @@ impl<T: ?Sized> Mutex<T> { /// assert_eq!(*x, 1); /// ``` #[inline] - #[stable(feature = "mutex_unpoison", since = "CURRENT_RUSTC_VERSION")] + #[stable(feature = "mutex_unpoison", since = "1.77.0")] pub fn clear_poison(&self) { self.poison.clear(); } diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs index b8873a3b59a..6d068613f8f 100644 --- a/library/std/src/sync/once_lock.rs +++ b/library/std/src/sync/once_lock.rs @@ -13,7 +13,7 @@ use crate::sync::Once; /// /// # Examples /// -/// Using `OnceCell` to store a function’s previously computed value (a.k.a. +/// Using `OnceLock` to store a function’s previously computed value (a.k.a. /// ‘lazy static’ or ‘memoizing’): /// /// ``` diff --git a/library/std/src/sync/poison.rs b/library/std/src/sync/poison.rs index 741312d5537..1e978bec4b4 100644 --- a/library/std/src/sync/poison.rs +++ b/library/std/src/sync/poison.rs @@ -1,9 +1,13 @@ use crate::error::Error; use crate::fmt; + +#[cfg(panic = "unwind")] use crate::sync::atomic::{AtomicBool, Ordering}; +#[cfg(panic = "unwind")] use crate::thread; pub struct Flag { + #[cfg(panic = "unwind")] failed: AtomicBool, } @@ -21,7 +25,10 @@ pub struct Flag { impl Flag { #[inline] pub const fn new() -> Flag { - Flag { failed: AtomicBool::new(false) } + Flag { + #[cfg(panic = "unwind")] + failed: AtomicBool::new(false), + } } /// Check the flag for an unguarded borrow, where we only care about existing poison. @@ -33,11 +40,15 @@ impl Flag { /// Check the flag for a guarded borrow, where we may also set poison when `done`. #[inline] pub fn guard(&self) -> LockResult<Guard> { - let ret = Guard { panicking: thread::panicking() }; + let ret = Guard { + #[cfg(panic = "unwind")] + panicking: thread::panicking(), + }; if self.get() { Err(PoisonError::new(ret)) } else { Ok(ret) } } #[inline] + #[cfg(panic = "unwind")] pub fn done(&self, guard: &Guard) { if !guard.panicking && thread::panicking() { self.failed.store(true, Ordering::Relaxed); @@ -45,17 +56,30 @@ impl Flag { } #[inline] + #[cfg(not(panic = "unwind"))] + pub fn done(&self, _guard: &Guard) {} + + #[inline] + #[cfg(panic = "unwind")] pub fn get(&self) -> bool { self.failed.load(Ordering::Relaxed) } + #[inline(always)] + #[cfg(not(panic = "unwind"))] + pub fn get(&self) -> bool { + false + } + #[inline] pub fn clear(&self) { + #[cfg(panic = "unwind")] self.failed.store(false, Ordering::Relaxed) } } pub struct Guard { + #[cfg(panic = "unwind")] panicking: bool, } @@ -95,6 +119,8 @@ pub struct Guard { #[stable(feature = "rust1", since = "1.0.0")] pub struct PoisonError<T> { guard: T, + #[cfg(not(panic = "unwind"))] + _never: !, } /// An enumeration of possible errors associated with a [`TryLockResult`] which @@ -165,11 +191,27 @@ impl<T> PoisonError<T> { /// /// This is generally created by methods like [`Mutex::lock`](crate::sync::Mutex::lock) /// or [`RwLock::read`](crate::sync::RwLock::read). + /// + /// This method may panic if std was built with `panic="abort"`. + #[cfg(panic = "unwind")] #[stable(feature = "sync_poison", since = "1.2.0")] pub fn new(guard: T) -> PoisonError<T> { PoisonError { guard } } + /// Creates a `PoisonError`. + /// + /// This is generally created by methods like [`Mutex::lock`](crate::sync::Mutex::lock) + /// or [`RwLock::read`](crate::sync::RwLock::read). + /// + /// This method may panic if std was built with `panic="abort"`. + #[cfg(not(panic = "unwind"))] + #[stable(feature = "sync_poison", since = "1.2.0")] + #[track_caller] + pub fn new(_guard: T) -> PoisonError<T> { + panic!("PoisonError created in a libstd built with panic=\"abort\"") + } + /// Consumes this error indicating that a lock is poisoned, returning the /// underlying guard to allow access regardless. /// @@ -225,6 +267,7 @@ impl<T> From<PoisonError<T>> for TryLockError<T> { impl<T> fmt::Debug for TryLockError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { + #[cfg(panic = "unwind")] TryLockError::Poisoned(..) => "Poisoned(..)".fmt(f), TryLockError::WouldBlock => "WouldBlock".fmt(f), } @@ -235,6 +278,7 @@ impl<T> fmt::Debug for TryLockError<T> { impl<T> fmt::Display for TryLockError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { + #[cfg(panic = "unwind")] TryLockError::Poisoned(..) => "poisoned lock: another task failed inside", TryLockError::WouldBlock => "try_lock failed because the operation would block", } @@ -247,6 +291,7 @@ impl<T> Error for TryLockError<T> { #[allow(deprecated, deprecated_in_future)] fn description(&self) -> &str { match *self { + #[cfg(panic = "unwind")] TryLockError::Poisoned(ref p) => p.description(), TryLockError::WouldBlock => "try_lock failed because the operation would block", } @@ -255,6 +300,7 @@ impl<T> Error for TryLockError<T> { #[allow(deprecated)] fn cause(&self) -> Option<&dyn Error> { match *self { + #[cfg(panic = "unwind")] TryLockError::Poisoned(ref p) => Some(p), _ => None, } @@ -267,6 +313,7 @@ where { match result { Ok(t) => Ok(f(t)), + #[cfg(panic = "unwind")] Err(PoisonError { guard }) => Err(PoisonError::new(f(guard))), } } diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs index 23d3dd0707a..c5d846b85aa 100644 --- a/library/std/src/sync/rwlock.rs +++ b/library/std/src/sync/rwlock.rs @@ -408,7 +408,7 @@ impl<T: ?Sized> RwLock<T> { /// assert_eq!(*guard, 1); /// ``` #[inline] - #[stable(feature = "mutex_unpoison", since = "CURRENT_RUSTC_VERSION")] + #[stable(feature = "mutex_unpoison", since = "1.77.0")] pub fn clear_poison(&self) { self.poison.clear(); } diff --git a/library/std/src/sys/cmath.rs b/library/std/src/sys/cmath.rs new file mode 100644 index 00000000000..99df503b82d --- /dev/null +++ b/library/std/src/sys/cmath.rs @@ -0,0 +1,88 @@ +#![cfg(not(test))] + +// These symbols are all defined by `libm`, +// or by `compiler-builtins` on unsupported platforms. +extern "C" { + pub fn acos(n: f64) -> f64; + pub fn asin(n: f64) -> f64; + pub fn atan(n: f64) -> f64; + pub fn atan2(a: f64, b: f64) -> f64; + pub fn cbrt(n: f64) -> f64; + pub fn cbrtf(n: f32) -> f32; + pub fn cosh(n: f64) -> f64; + pub fn expm1(n: f64) -> f64; + pub fn expm1f(n: f32) -> f32; + pub fn fdim(a: f64, b: f64) -> f64; + pub fn fdimf(a: f32, b: f32) -> f32; + #[cfg_attr(target_env = "msvc", link_name = "_hypot")] + pub fn hypot(x: f64, y: f64) -> f64; + #[cfg_attr(target_env = "msvc", link_name = "_hypotf")] + pub fn hypotf(x: f32, y: f32) -> f32; + pub fn log1p(n: f64) -> f64; + pub fn log1pf(n: f32) -> f32; + pub fn sinh(n: f64) -> f64; + pub fn tan(n: f64) -> f64; + pub fn tanh(n: f64) -> f64; + pub fn tgamma(n: f64) -> f64; + pub fn tgammaf(n: f32) -> f32; + pub fn lgamma_r(n: f64, s: &mut i32) -> f64; + pub fn lgammaf_r(n: f32, s: &mut i32) -> f32; + + cfg_if::cfg_if! { + if #[cfg(not(all(target_os = "windows", target_env = "msvc", target_arch = "x86")))] { + pub fn acosf(n: f32) -> f32; + pub fn asinf(n: f32) -> f32; + pub fn atan2f(a: f32, b: f32) -> f32; + pub fn atanf(n: f32) -> f32; + pub fn coshf(n: f32) -> f32; + pub fn sinhf(n: f32) -> f32; + pub fn tanf(n: f32) -> f32; + pub fn tanhf(n: f32) -> f32; + }} +} + +// On 32-bit x86 MSVC these functions aren't defined, so we just define shims +// which promote everything to f64, perform the calculation, and then demote +// back to f32. While not precisely correct should be "correct enough" for now. +cfg_if::cfg_if! { +if #[cfg(all(target_os = "windows", target_env = "msvc", target_arch = "x86"))] { + #[inline] + pub unsafe fn acosf(n: f32) -> f32 { + f64::acos(n as f64) as f32 + } + + #[inline] + pub unsafe fn asinf(n: f32) -> f32 { + f64::asin(n as f64) as f32 + } + + #[inline] + pub unsafe fn atan2f(n: f32, b: f32) -> f32 { + f64::atan2(n as f64, b as f64) as f32 + } + + #[inline] + pub unsafe fn atanf(n: f32) -> f32 { + f64::atan(n as f64) as f32 + } + + #[inline] + pub unsafe fn coshf(n: f32) -> f32 { + f64::cosh(n as f64) as f32 + } + + #[inline] + pub unsafe fn sinhf(n: f32) -> f32 { + f64::sinh(n as f64) as f32 + } + + #[inline] + pub unsafe fn tanf(n: f32) -> f32 { + f64::tan(n as f64) as f32 + } + + #[inline] + pub unsafe fn tanhf(n: f32) -> f32 { + f64::tanh(n as f64) as f32 + } +}} diff --git a/library/std/src/sys/pal/unix/locks/futex_condvar.rs b/library/std/src/sys/locks/condvar/futex.rs index 4bd65dd25c2..3ad93ce07f7 100644 --- a/library/std/src/sys/pal/unix/locks/futex_condvar.rs +++ b/library/std/src/sys/locks/condvar/futex.rs @@ -1,6 +1,6 @@ -use super::Mutex; use crate::sync::atomic::{AtomicU32, Ordering::Relaxed}; use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all}; +use crate::sys::locks::Mutex; use crate::time::Duration; pub struct Condvar { diff --git a/library/std/src/sys/pal/itron/condvar.rs b/library/std/src/sys/locks/condvar/itron.rs index 7a47cc6696a..4c6f5e9dad2 100644 --- a/library/std/src/sys/pal/itron/condvar.rs +++ b/library/std/src/sys/locks/condvar/itron.rs @@ -1,5 +1,7 @@ //! POSIX conditional variable implementation based on user-space wait queues. -use super::{abi, error::expect_success_aborting, spin::SpinMutex, task, time::with_tmos_strong}; +use crate::sys::pal::itron::{ + abi, error::expect_success_aborting, spin::SpinMutex, task, time::with_tmos_strong, +}; use crate::{mem::replace, ptr::NonNull, sys::locks::Mutex, time::Duration}; // The implementation is inspired by the queue-based implementation shown in diff --git a/library/std/src/sys/locks/condvar/mod.rs b/library/std/src/sys/locks/condvar/mod.rs new file mode 100644 index 00000000000..126a42a2a4c --- /dev/null +++ b/library/std/src/sys/locks/condvar/mod.rs @@ -0,0 +1,36 @@ +cfg_if::cfg_if! { + if #[cfg(any( + target_os = "linux", + target_os = "android", + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", + target_os = "fuchsia", + all(target_family = "wasm", target_feature = "atomics"), + target_os = "hermit", + ))] { + mod futex; + pub use futex::Condvar; + } else if #[cfg(target_family = "unix")] { + mod pthread; + pub use pthread::Condvar; + } else if #[cfg(target_os = "windows")] { + mod windows; + pub use windows::Condvar; + } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { + mod sgx; + pub use sgx::Condvar; + } else if #[cfg(target_os = "solid_asp3")] { + mod itron; + pub use itron::Condvar; + } else if #[cfg(target_os = "teeos")] { + mod teeos; + pub use teeos::Condvar; + } else if #[cfg(target_os = "xous")] { + mod xous; + pub use xous::Condvar; + } else { + mod no_threads; + pub use no_threads::Condvar; + } +} diff --git a/library/std/src/sys/pal/unsupported/locks/condvar.rs b/library/std/src/sys/locks/condvar/no_threads.rs index 3f0943b50ee..3f0943b50ee 100644 --- a/library/std/src/sys/pal/unsupported/locks/condvar.rs +++ b/library/std/src/sys/locks/condvar/no_threads.rs diff --git a/library/std/src/sys/pal/unix/locks/pthread_condvar.rs b/library/std/src/sys/locks/condvar/pthread.rs index 2dc1b0c601e..094738d5a3f 100644 --- a/library/std/src/sys/pal/unix/locks/pthread_condvar.rs +++ b/library/std/src/sys/locks/condvar/pthread.rs @@ -1,7 +1,7 @@ use crate::cell::UnsafeCell; use crate::ptr; use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed}; -use crate::sys::locks::{pthread_mutex, Mutex}; +use crate::sys::locks::{mutex, Mutex}; #[cfg(not(target_os = "nto"))] use crate::sys::time::TIMESPEC_MAX; #[cfg(target_os = "nto")] @@ -112,7 +112,7 @@ impl Condvar { #[inline] pub unsafe fn wait(&self, mutex: &Mutex) { - let mutex = pthread_mutex::raw(mutex); + let mutex = mutex::raw(mutex); self.verify(mutex); let r = libc::pthread_cond_wait(raw(self), mutex); debug_assert_eq!(r, 0); @@ -134,7 +134,7 @@ impl Condvar { pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { use crate::sys::time::Timespec; - let mutex = pthread_mutex::raw(mutex); + let mutex = mutex::raw(mutex); self.verify(mutex); #[cfg(not(target_os = "nto"))] @@ -170,7 +170,7 @@ impl Condvar { use crate::sys::time::SystemTime; use crate::time::Instant; - let mutex = pthread_mutex::raw(mutex); + let mutex = mutex::raw(mutex); self.verify(mutex); // OSX implementation of `pthread_cond_timedwait` is buggy diff --git a/library/std/src/sys/pal/sgx/condvar.rs b/library/std/src/sys/locks/condvar/sgx.rs index aa1174664ae..cabd3250275 100644 --- a/library/std/src/sys/pal/sgx/condvar.rs +++ b/library/std/src/sys/locks/condvar/sgx.rs @@ -1,9 +1,8 @@ use crate::sys::locks::Mutex; +use crate::sys::pal::waitqueue::{SpinMutex, WaitQueue, WaitVariable}; use crate::sys_common::lazy_box::{LazyBox, LazyInit}; use crate::time::Duration; -use super::waitqueue::{SpinMutex, WaitQueue, WaitVariable}; - /// FIXME: `UnsafeList` is not movable. struct AllocatedCondvar(SpinMutex<WaitVariable<()>>); diff --git a/library/std/src/sys/pal/teeos/locks/condvar.rs b/library/std/src/sys/locks/condvar/teeos.rs index c08e8145b8c..c08e8145b8c 100644 --- a/library/std/src/sys/pal/teeos/locks/condvar.rs +++ b/library/std/src/sys/locks/condvar/teeos.rs diff --git a/library/std/src/sys/pal/windows/locks/condvar.rs b/library/std/src/sys/locks/condvar/windows.rs index 953bcc27dee..28a288335d2 100644 --- a/library/std/src/sys/pal/windows/locks/condvar.rs +++ b/library/std/src/sys/locks/condvar/windows.rs @@ -27,7 +27,7 @@ impl Condvar { let r = c::SleepConditionVariableSRW( self.inner.get(), mutex::raw(mutex), - crate::sys::pal::windows::dur2timeout(dur), + crate::sys::pal::dur2timeout(dur), 0, ); if r == 0 { diff --git a/library/std/src/sys/locks/condvar/xous.rs b/library/std/src/sys/locks/condvar/xous.rs new file mode 100644 index 00000000000..0e51449e0af --- /dev/null +++ b/library/std/src/sys/locks/condvar/xous.rs @@ -0,0 +1,148 @@ +use crate::os::xous::ffi::{blocking_scalar, scalar}; +use crate::os::xous::services::{ticktimer_server, TicktimerScalar}; +use crate::sys::locks::Mutex; +use crate::time::Duration; +use core::sync::atomic::{AtomicUsize, Ordering}; + +// The implementation is inspired by Andrew D. Birrell's paper +// "Implementing Condition Variables with Semaphores" + +const NOTIFY_TRIES: usize = 3; + +pub struct Condvar { + counter: AtomicUsize, + timed_out: AtomicUsize, +} + +unsafe impl Send for Condvar {} +unsafe impl Sync for Condvar {} + +impl Condvar { + #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] + pub const fn new() -> Condvar { + Condvar { counter: AtomicUsize::new(0), timed_out: AtomicUsize::new(0) } + } + + fn notify_some(&self, to_notify: usize) { + // Assumption: The Mutex protecting this condvar is locked throughout the + // entirety of this call, preventing calls to `wait` and `wait_timeout`. + + // Logic check: Ensure that there aren't any missing waiters. Remove any that + // timed-out, ensuring the counter doesn't underflow. + assert!(self.timed_out.load(Ordering::Relaxed) <= self.counter.load(Ordering::Relaxed)); + self.counter.fetch_sub(self.timed_out.swap(0, Ordering::Relaxed), Ordering::Relaxed); + + // Figure out how many threads to notify. Note that it is impossible for `counter` + // to increase during this operation because Mutex is locked. However, it is + // possible for `counter` to decrease due to a condvar timing out, in which + // case the corresponding `timed_out` will increase accordingly. + let Ok(waiter_count) = + self.counter.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |counter| { + if counter == 0 { + return None; + } else { + Some(counter - counter.min(to_notify)) + } + }) + else { + // No threads are waiting on this condvar + return; + }; + + let mut remaining_to_wake = waiter_count.min(to_notify); + if remaining_to_wake == 0 { + return; + } + for _wake_tries in 0..NOTIFY_TRIES { + let result = blocking_scalar( + ticktimer_server(), + TicktimerScalar::NotifyCondition(self.index(), remaining_to_wake).into(), + ) + .expect("failure to send NotifyCondition command"); + + // Remove the list of waiters that were notified + remaining_to_wake -= result[0]; + + // Also remove the number of waiters that timed out. Clamp it to 0 in order to + // ensure we don't wait forever in case the waiter woke up between the time + // we counted the remaining waiters and now. + remaining_to_wake = + remaining_to_wake.saturating_sub(self.timed_out.swap(0, Ordering::Relaxed)); + if remaining_to_wake == 0 { + return; + } + crate::thread::yield_now(); + } + } + + pub fn notify_one(&self) { + self.notify_some(1) + } + + pub fn notify_all(&self) { + self.notify_some(self.counter.load(Ordering::Relaxed)) + } + + fn index(&self) -> usize { + core::ptr::from_ref(self).addr() + } + + /// Unlock the given Mutex and wait for the notification. Wait at most + /// `ms` milliseconds, or pass `0` to wait forever. + /// + /// Returns `true` if the condition was received, `false` if it timed out + fn wait_ms(&self, mutex: &Mutex, ms: usize) -> bool { + self.counter.fetch_add(1, Ordering::Relaxed); + unsafe { mutex.unlock() }; + + // Threading concern: There is a chance that the `notify` thread wakes up here before + // we have a chance to wait for the condition. This is fine because we've recorded + // the fact that we're waiting by incrementing the counter. + let result = blocking_scalar( + ticktimer_server(), + TicktimerScalar::WaitForCondition(self.index(), ms).into(), + ); + let awoken = result.expect("Ticktimer: failure to send WaitForCondition command")[0] == 0; + + // If we awoke due to a timeout, increment the `timed_out` counter so that the + // main loop of `notify` knows there's a timeout. + // + // This is done with the Mutex still unlocked, because the Mutex might still + // be locked by the `notify` process above. + if !awoken { + self.timed_out.fetch_add(1, Ordering::Relaxed); + } + + unsafe { mutex.lock() }; + awoken + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + // Wait for 0 ms, which is a special case to "wait forever" + self.wait_ms(mutex, 0); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let mut millis = dur.as_millis() as usize; + // Ensure we don't wait for 0 ms, which would cause us to wait forever + if millis == 0 { + millis = 1; + } + self.wait_ms(mutex, millis) + } +} + +impl Drop for Condvar { + fn drop(&mut self) { + let remaining_count = self.counter.load(Ordering::Relaxed); + let timed_out = self.timed_out.load(Ordering::Relaxed); + assert!( + remaining_count - timed_out == 0, + "counter was {} and timed_out was {} not 0", + remaining_count, + timed_out + ); + scalar(ticktimer_server(), TicktimerScalar::FreeCondition(self.index()).into()).ok(); + } +} diff --git a/library/std/src/sys/pal/unsupported/locks/mod.rs b/library/std/src/sys/locks/mod.rs index 0e0f9eccb21..0bdc4a1e1db 100644 --- a/library/std/src/sys/pal/unsupported/locks/mod.rs +++ b/library/std/src/sys/locks/mod.rs @@ -1,6 +1,7 @@ mod condvar; mod mutex; mod rwlock; + pub use condvar::Condvar; pub use mutex::Mutex; pub use rwlock::RwLock; diff --git a/library/std/src/sys/pal/unix/locks/fuchsia_mutex.rs b/library/std/src/sys/locks/mutex/fuchsia.rs index 5d89e5a13fd..5d89e5a13fd 100644 --- a/library/std/src/sys/pal/unix/locks/fuchsia_mutex.rs +++ b/library/std/src/sys/locks/mutex/fuchsia.rs diff --git a/library/std/src/sys/pal/unix/locks/futex_mutex.rs b/library/std/src/sys/locks/mutex/futex.rs index c01229586c3..c01229586c3 100644 --- a/library/std/src/sys/pal/unix/locks/futex_mutex.rs +++ b/library/std/src/sys/locks/mutex/futex.rs diff --git a/library/std/src/sys/pal/itron/mutex.rs b/library/std/src/sys/locks/mutex/itron.rs index 1f6cc419476..a134eb2d1be 100644 --- a/library/std/src/sys/pal/itron/mutex.rs +++ b/library/std/src/sys/locks/mutex/itron.rs @@ -1,6 +1,6 @@ //! Mutex implementation backed by μITRON mutexes. Assumes `acre_mtx` and //! `TA_INHERIT` are available. -use super::{ +use crate::sys::pal::itron::{ abi, error::{expect_success, expect_success_aborting, fail, ItronError}, spin::SpinIdOnceCell, @@ -66,20 +66,3 @@ impl Drop for Mutex { } } } - -pub(super) struct MutexGuard<'a>(&'a Mutex); - -impl<'a> MutexGuard<'a> { - #[inline] - pub(super) fn lock(x: &'a Mutex) -> Self { - x.lock(); - Self(x) - } -} - -impl Drop for MutexGuard<'_> { - #[inline] - fn drop(&mut self) { - unsafe { self.0.unlock() }; - } -} diff --git a/library/std/src/sys/locks/mutex/mod.rs b/library/std/src/sys/locks/mutex/mod.rs new file mode 100644 index 00000000000..710cb91fb14 --- /dev/null +++ b/library/std/src/sys/locks/mutex/mod.rs @@ -0,0 +1,38 @@ +cfg_if::cfg_if! { + if #[cfg(any( + target_os = "linux", + target_os = "android", + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", + all(target_family = "wasm", target_feature = "atomics"), + target_os = "hermit", + ))] { + mod futex; + pub use futex::Mutex; + } else if #[cfg(target_os = "fuchsia")] { + mod fuchsia; + pub use fuchsia::Mutex; + } else if #[cfg(any( + target_family = "unix", + target_os = "teeos", + ))] { + mod pthread; + pub use pthread::{Mutex, raw}; + } else if #[cfg(target_os = "windows")] { + mod windows; + pub use windows::{Mutex, raw}; + } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { + mod sgx; + pub use sgx::Mutex; + } else if #[cfg(target_os = "solid_asp3")] { + mod itron; + pub use itron::Mutex; + } else if #[cfg(target_os = "xous")] { + mod xous; + pub use xous::Mutex; + } else { + mod no_threads; + pub use no_threads::Mutex; + } +} diff --git a/library/std/src/sys/pal/unsupported/locks/mutex.rs b/library/std/src/sys/locks/mutex/no_threads.rs index 4a13c55fb8b..4a13c55fb8b 100644 --- a/library/std/src/sys/pal/unsupported/locks/mutex.rs +++ b/library/std/src/sys/locks/mutex/no_threads.rs diff --git a/library/std/src/sys/pal/unix/locks/pthread_mutex.rs b/library/std/src/sys/locks/mutex/pthread.rs index 8a78bc1fd73..ee0794334fb 100644 --- a/library/std/src/sys/pal/unix/locks/pthread_mutex.rs +++ b/library/std/src/sys/locks/mutex/pthread.rs @@ -1,4 +1,5 @@ use crate::cell::UnsafeCell; +use crate::io::Error; use crate::mem::{forget, MaybeUninit}; use crate::sys::cvt_nz; use crate::sys_common::lazy_box::{LazyBox, LazyInit}; @@ -103,8 +104,24 @@ impl Mutex { #[inline] pub unsafe fn lock(&self) { + #[cold] + #[inline(never)] + fn fail(r: i32) -> ! { + let error = Error::from_raw_os_error(r); + panic!("failed to lock mutex: {error}"); + } + let r = libc::pthread_mutex_lock(raw(self)); - debug_assert_eq!(r, 0); + // As we set the mutex type to `PTHREAD_MUTEX_NORMAL` above, we expect + // the lock call to never fail. Unfortunately however, some platforms + // (Solaris) do not conform to the standard, and instead always provide + // deadlock detection. How kind of them! Unfortunately that means that + // we need to check the error code here. To save us from UB on other + // less well-behaved platforms in the future, we do it even on "good" + // platforms like macOS. See #120147 for more context. + if r != 0 { + fail(r) + } } #[inline] diff --git a/library/std/src/sys/pal/sgx/mutex.rs b/library/std/src/sys/locks/mutex/sgx.rs index 0dbf020ebe0..d37bd02adf8 100644 --- a/library/std/src/sys/pal/sgx/mutex.rs +++ b/library/std/src/sys/locks/mutex/sgx.rs @@ -1,4 +1,4 @@ -use super::waitqueue::{try_lock_or_false, SpinMutex, WaitQueue, WaitVariable}; +use crate::sys::pal::waitqueue::{try_lock_or_false, SpinMutex, WaitQueue, WaitVariable}; use crate::sys_common::lazy_box::{LazyBox, LazyInit}; /// FIXME: `UnsafeList` is not movable. diff --git a/library/std/src/sys/pal/windows/locks/mutex.rs b/library/std/src/sys/locks/mutex/windows.rs index ef2f84082cd..ef2f84082cd 100644 --- a/library/std/src/sys/pal/windows/locks/mutex.rs +++ b/library/std/src/sys/locks/mutex/windows.rs diff --git a/library/std/src/sys/pal/xous/locks/mutex.rs b/library/std/src/sys/locks/mutex/xous.rs index ea51776d54e..a8c9518ff0b 100644 --- a/library/std/src/sys/pal/xous/locks/mutex.rs +++ b/library/std/src/sys/locks/mutex/xous.rs @@ -1,5 +1,5 @@ -use crate::os::xous::ffi::{blocking_scalar, do_yield, scalar}; -use crate::os::xous::services::ticktimer_server; +use crate::os::xous::ffi::{blocking_scalar, do_yield}; +use crate::os::xous::services::{ticktimer_server, TicktimerScalar}; use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed, Ordering::SeqCst}; pub struct Mutex { @@ -29,7 +29,7 @@ impl Mutex { } fn index(&self) -> usize { - self as *const Mutex as usize + core::ptr::from_ref(self).addr() } #[inline] @@ -83,11 +83,8 @@ impl Mutex { } // Unblock one thread that is waiting on this message. - scalar( - ticktimer_server(), - crate::os::xous::services::TicktimerScalar::UnlockMutex(self.index()).into(), - ) - .expect("failure to send UnlockMutex command"); + blocking_scalar(ticktimer_server(), TicktimerScalar::UnlockMutex(self.index()).into()) + .expect("failure to send UnlockMutex command"); } #[inline] @@ -106,11 +103,8 @@ impl Drop for Mutex { // If there was Mutex contention, then we involved the ticktimer. Free // the resources associated with this Mutex as it is deallocated. if self.contended.load(Relaxed) { - scalar( - ticktimer_server(), - crate::os::xous::services::TicktimerScalar::FreeMutex(self.index()).into(), - ) - .ok(); + blocking_scalar(ticktimer_server(), TicktimerScalar::FreeMutex(self.index()).into()) + .ok(); } } } diff --git a/library/std/src/sys/pal/unix/locks/futex_rwlock.rs b/library/std/src/sys/locks/rwlock/futex.rs index aa0de900238..aa0de900238 100644 --- a/library/std/src/sys/pal/unix/locks/futex_rwlock.rs +++ b/library/std/src/sys/locks/rwlock/futex.rs diff --git a/library/std/src/sys/locks/rwlock/mod.rs b/library/std/src/sys/locks/rwlock/mod.rs new file mode 100644 index 00000000000..0564f1fe6fa --- /dev/null +++ b/library/std/src/sys/locks/rwlock/mod.rs @@ -0,0 +1,36 @@ +cfg_if::cfg_if! { + if #[cfg(any( + target_os = "linux", + target_os = "android", + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", + target_os = "fuchsia", + all(target_family = "wasm", target_feature = "atomics"), + target_os = "hermit", + ))] { + mod futex; + pub use futex::RwLock; + } else if #[cfg(target_family = "unix")] { + mod queue; + pub use queue::RwLock; + } else if #[cfg(target_os = "windows")] { + mod windows; + pub use windows::RwLock; + } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { + mod sgx; + pub use sgx::RwLock; + } else if #[cfg(target_os = "solid_asp3")] { + mod solid; + pub use solid::RwLock; + } else if #[cfg(target_os = "teeos")] { + mod teeos; + pub use teeos::RwLock; + } else if #[cfg(target_os = "xous")] { + mod xous; + pub use xous::RwLock; + } else { + mod no_threads; + pub use no_threads::RwLock; + } +} diff --git a/library/std/src/sys/pal/unsupported/locks/rwlock.rs b/library/std/src/sys/locks/rwlock/no_threads.rs index 789ef9b29e5..789ef9b29e5 100644 --- a/library/std/src/sys/pal/unsupported/locks/rwlock.rs +++ b/library/std/src/sys/locks/rwlock/no_threads.rs diff --git a/library/std/src/sys/locks/rwlock/queue.rs b/library/std/src/sys/locks/rwlock/queue.rs new file mode 100644 index 00000000000..dce966086b8 --- /dev/null +++ b/library/std/src/sys/locks/rwlock/queue.rs @@ -0,0 +1,557 @@ +//! Efficient read-write locking without `pthread_rwlock_t`. +//! +//! The readers-writer lock provided by the `pthread` library has a number of +//! problems which make it a suboptimal choice for `std`: +//! +//! * It is non-movable, so it needs to be allocated (lazily, to make the +//! constructor `const`). +//! * `pthread` is an external library, meaning the fast path of acquiring an +//! uncontended lock cannot be inlined. +//! * Some platforms (at least glibc before version 2.25) have buggy implementations +//! that can easily lead to undefined behaviour in safe Rust code when not properly +//! guarded against. +//! * On some platforms (e.g. macOS), the lock is very slow. +//! +//! Therefore, we implement our own `RwLock`! Naively, one might reach for a +//! spinlock, but those [can be quite problematic] when the lock is contended. +//! Instead, this readers-writer lock copies its implementation strategy from +//! the Windows [SRWLOCK] and the [usync] library. Spinning is still used for the +//! fast path, but it is bounded: after spinning fails, threads will locklessly +//! add an information structure containing a [`Thread`] handle into a queue of +//! waiters associated with the lock. The lock owner, upon releasing the lock, +//! will scan through the queue and wake up threads as appropriate, which will +//! then again try to acquire the lock. The resulting [`RwLock`] is: +//! +//! * adaptive, since it spins before doing any heavywheight parking operations +//! * allocation-free, modulo the per-thread [`Thread`] handle, which is +//! allocated regardless when using threads created by `std` +//! * writer-preferring, even if some readers may still slip through +//! * unfair, which reduces context-switching and thus drastically improves +//! performance +//! +//! and also quite fast in most cases. +//! +//! [can be quite problematic]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html +//! [SRWLOCK]: https://learn.microsoft.com/en-us/windows/win32/sync/slim-reader-writer--srw--locks +//! [usync]: https://crates.io/crates/usync +//! +//! # Implementation +//! +//! ## State +//! +//! A single [`AtomicPtr`] is used as state variable. The lowest three bits are used +//! to indicate the meaning of the remaining bits: +//! +//! | [`LOCKED`] | [`QUEUED`] | [`QUEUE_LOCKED`] | Remaining | | +//! |:-----------|:-----------|:-----------------|:-------------|:----------------------------------------------------------------------------------------------------------------------------| +//! | 0 | 0 | 0 | 0 | The lock is unlocked, no threads are waiting | +//! | 1 | 0 | 0 | 0 | The lock is write-locked, no threads waiting | +//! | 1 | 0 | 0 | n > 0 | The lock is read-locked with n readers | +//! | 0 | 1 | * | `*mut Node` | The lock is unlocked, but some threads are waiting. Only writers may lock the lock | +//! | 1 | 1 | * | `*mut Node` | The lock is locked, but some threads are waiting. If the lock is read-locked, the last queue node contains the reader count | +//! +//! ## Waiter queue +//! +//! When threads are waiting on the lock (`QUEUE` is set), the lock state +//! points to a queue of waiters, which is implemented as a linked list of +//! nodes stored on the stack to avoid memory allocation. To enable lockless +//! enqueuing of new nodes to the queue, the linked list is single-linked upon +//! creation. Since when the lock is read-locked, the lock count is stored in +//! the last link of the queue, threads have to traverse the queue to find the +//! last element upon releasing the lock. To avoid having to traverse the whole +//! list again and again, a pointer to the found tail is cached in the (current) +//! first element of the queue. +//! +//! Also, while the lock is unfair for performance reasons, it is still best to +//! wake the tail node first, which requires backlinks to previous nodes to be +//! created. This is done at the same time as finding the tail, and thus a set +//! tail field indicates the remaining portion of the queue is initialized. +//! +//! TLDR: Here's a diagram of what the queue looks like: +//! +//! ```text +//! state +//! │ +//! ▼ +//! ╭───────╮ next ╭───────╮ next ╭───────╮ next ╭───────╮ +//! │ ├─────►│ ├─────►│ ├─────►│ count │ +//! │ │ │ │ │ │ │ │ +//! │ │ │ │◄─────┤ │◄─────┤ │ +//! ╰───────╯ ╰───────╯ prev ╰───────╯ prev ╰───────╯ +//! │ ▲ +//! └───────────────────────────┘ +//! tail +//! ``` +//! +//! Invariants: +//! 1. At least one node must contain a non-null, current `tail` field. +//! 2. The first non-null `tail` field must be valid and current. +//! 3. All nodes preceding this node must have a correct, non-null `next` field. +//! 4. All nodes following this node must have a correct, non-null `prev` field. +//! +//! Access to the queue is controlled by the `QUEUE_LOCKED` bit, which threads +//! try to set both after enqueuing themselves to eagerly add backlinks to the +//! queue, which drastically improves performance, and after unlocking the lock +//! to wake the next waiter(s). This is done atomically at the same time as the +//! enqueuing/unlocking operation. The thread releasing the `QUEUE_LOCK` bit +//! will check the state of the lock and wake up waiters as appropriate. This +//! guarantees forward-progress even if the unlocking thread could not acquire +//! the queue lock. +//! +//! ## Memory orderings +//! +//! To properly synchronize changes to the data protected by the lock, the lock +//! is acquired and released with [`Acquire`] and [`Release`] ordering, respectively. +//! To propagate the initialization of nodes, changes to the queue lock are also +//! performed using these orderings. + +#![forbid(unsafe_op_in_unsafe_fn)] + +use crate::cell::OnceCell; +use crate::hint::spin_loop; +use crate::mem; +use crate::ptr::{self, null_mut, without_provenance_mut, NonNull}; +use crate::sync::atomic::{ + AtomicBool, AtomicPtr, + Ordering::{AcqRel, Acquire, Relaxed, Release}, +}; +use crate::sys_common::thread_info; +use crate::thread::Thread; + +// Locking uses exponential backoff. `SPIN_COUNT` indicates how many times the +// locking operation will be retried. +// `spin_loop` will be called `2.pow(SPIN_COUNT) - 1` times. +const SPIN_COUNT: usize = 7; + +type State = *mut (); +type AtomicState = AtomicPtr<()>; + +const UNLOCKED: State = without_provenance_mut(0); +const LOCKED: usize = 1; +const QUEUED: usize = 2; +const QUEUE_LOCKED: usize = 4; +const SINGLE: usize = 8; +const MASK: usize = !(QUEUE_LOCKED | QUEUED | LOCKED); + +/// Marks the state as write-locked, if possible. +#[inline] +fn write_lock(state: State) -> Option<State> { + let state = state.wrapping_byte_add(LOCKED); + if state.addr() & LOCKED == LOCKED { Some(state) } else { None } +} + +/// Marks the state as read-locked, if possible. +#[inline] +fn read_lock(state: State) -> Option<State> { + if state.addr() & QUEUED == 0 && state.addr() != LOCKED { + Some(without_provenance_mut(state.addr().checked_add(SINGLE)? | LOCKED)) + } else { + None + } +} + +/// Masks the state, assuming it points to a queue node. +/// +/// # Safety +/// The state must contain a valid pointer to a queue node. +#[inline] +unsafe fn to_node(state: State) -> NonNull<Node> { + unsafe { NonNull::new_unchecked(state.mask(MASK)).cast() } +} + +/// An atomic node pointer with relaxed operations. +struct AtomicLink(AtomicPtr<Node>); + +impl AtomicLink { + fn new(v: Option<NonNull<Node>>) -> AtomicLink { + AtomicLink(AtomicPtr::new(v.map_or(null_mut(), NonNull::as_ptr))) + } + + fn get(&self) -> Option<NonNull<Node>> { + NonNull::new(self.0.load(Relaxed)) + } + + fn set(&self, v: Option<NonNull<Node>>) { + self.0.store(v.map_or(null_mut(), NonNull::as_ptr), Relaxed); + } +} + +#[repr(align(8))] +struct Node { + next: AtomicLink, + prev: AtomicLink, + tail: AtomicLink, + write: bool, + thread: OnceCell<Thread>, + completed: AtomicBool, +} + +impl Node { + /// Create a new queue node. + fn new(write: bool) -> Node { + Node { + next: AtomicLink::new(None), + prev: AtomicLink::new(None), + tail: AtomicLink::new(None), + write, + thread: OnceCell::new(), + completed: AtomicBool::new(false), + } + } + + /// Prepare this node for waiting. + fn prepare(&mut self) { + // Fall back to creating an unnamed `Thread` handle to allow locking in + // TLS destructors. + self.thread + .get_or_init(|| thread_info::current_thread().unwrap_or_else(|| Thread::new(None))); + self.completed = AtomicBool::new(false); + } + + /// Wait until this node is marked as completed. + /// + /// # Safety + /// May only be called from the thread that created the node. + unsafe fn wait(&self) { + while !self.completed.load(Acquire) { + unsafe { + self.thread.get().unwrap().park(); + } + } + } + + /// Atomically mark this node as completed. The node may not outlive this call. + unsafe fn complete(this: NonNull<Node>) { + // Since the node may be destroyed immediately after the completed flag + // is set, clone the thread handle before that. + let thread = unsafe { this.as_ref().thread.get().unwrap().clone() }; + unsafe { + this.as_ref().completed.store(true, Release); + } + thread.unpark(); + } +} + +struct PanicGuard; + +impl Drop for PanicGuard { + fn drop(&mut self) { + rtabort!("tried to drop node in intrusive list."); + } +} + +/// Add backlinks to the queue, returning the tail. +/// +/// May be called from multiple threads at the same time, while the queue is not +/// modified (this happens when unlocking multiple readers). +/// +/// # Safety +/// * `head` must point to a node in a valid queue. +/// * `head` must be or be in front of the head of the queue at the time of the +/// last removal. +/// * The part of the queue starting with `head` must not be modified during this +/// call. +unsafe fn add_backlinks_and_find_tail(head: NonNull<Node>) -> NonNull<Node> { + let mut current = head; + let tail = loop { + let c = unsafe { current.as_ref() }; + match c.tail.get() { + Some(tail) => break tail, + // SAFETY: + // All `next` fields before the first node with a `set` tail are + // non-null and valid (invariant 3). + None => unsafe { + let next = c.next.get().unwrap_unchecked(); + next.as_ref().prev.set(Some(current)); + current = next; + }, + } + }; + + unsafe { + head.as_ref().tail.set(Some(tail)); + tail + } +} + +pub struct RwLock { + state: AtomicState, +} + +impl RwLock { + #[inline] + pub const fn new() -> RwLock { + RwLock { state: AtomicPtr::new(UNLOCKED) } + } + + #[inline] + pub fn try_read(&self) -> bool { + self.state.fetch_update(Acquire, Relaxed, read_lock).is_ok() + } + + #[inline] + pub fn read(&self) { + if !self.try_read() { + self.lock_contended(false) + } + } + + #[inline] + pub fn try_write(&self) -> bool { + // Atomically set the `LOCKED` bit. This is lowered to a single atomic + // instruction on most modern processors (e.g. "lock bts" on x86 and + // "ldseta" on modern AArch64), and therefore is more efficient than + // `fetch_update(lock(true))`, which can spuriously fail if a new node + // is appended to the queue. + self.state.fetch_or(LOCKED, Acquire).addr() & LOCKED == 0 + } + + #[inline] + pub fn write(&self) { + if !self.try_write() { + self.lock_contended(true) + } + } + + #[cold] + fn lock_contended(&self, write: bool) { + let update = if write { write_lock } else { read_lock }; + let mut node = Node::new(write); + let mut state = self.state.load(Relaxed); + let mut count = 0; + loop { + if let Some(next) = update(state) { + // The lock is available, try locking it. + match self.state.compare_exchange_weak(state, next, Acquire, Relaxed) { + Ok(_) => return, + Err(new) => state = new, + } + } else if state.addr() & QUEUED == 0 && count < SPIN_COUNT { + // If the lock is not available and no threads are queued, spin + // for a while, using exponential backoff to decrease cache + // contention. + for _ in 0..(1 << count) { + spin_loop(); + } + state = self.state.load(Relaxed); + count += 1; + } else { + // Fall back to parking. First, prepare the node. + node.prepare(); + + // If there are threads queued, set the `next` field to a + // pointer to the next node in the queue. Otherwise set it to + // the lock count if the state is read-locked or to zero if it + // is write-locked. + node.next.0 = AtomicPtr::new(state.mask(MASK).cast()); + node.prev = AtomicLink::new(None); + let mut next = ptr::from_ref(&node) + .map_addr(|addr| addr | QUEUED | (state.addr() & LOCKED)) + as State; + + if state.addr() & QUEUED == 0 { + // If this is the first node in the queue, set the tail field to + // the node itself to ensure there is a current `tail` field in + // the queue (invariants 1 and 2). This needs to use `set` to + // avoid invalidating the new pointer. + node.tail.set(Some(NonNull::from(&node))); + } else { + // Otherwise, the tail of the queue is not known. + node.tail.set(None); + // Try locking the queue to eagerly add backlinks. + next = next.map_addr(|addr| addr | QUEUE_LOCKED); + } + + // Register the node, using release ordering to propagate our + // changes to the waking thread. + if let Err(new) = self.state.compare_exchange_weak(state, next, AcqRel, Relaxed) { + // The state has changed, just try again. + state = new; + continue; + } + + // The node is registered, so the structure must not be + // mutably accessed or destroyed while other threads may + // be accessing it. Guard against unwinds using a panic + // guard that aborts when dropped. + let guard = PanicGuard; + + // If the current thread locked the queue, unlock it again, + // linking it in the process. + if state.addr() & (QUEUE_LOCKED | QUEUED) == QUEUED { + unsafe { + self.unlock_queue(next); + } + } + + // Wait until the node is removed from the queue. + // SAFETY: the node was created by the current thread. + unsafe { + node.wait(); + } + + // The node was removed from the queue, disarm the guard. + mem::forget(guard); + + // Reload the state and try again. + state = self.state.load(Relaxed); + count = 0; + } + } + } + + #[inline] + pub unsafe fn read_unlock(&self) { + match self.state.fetch_update(Release, Acquire, |state| { + if state.addr() & QUEUED == 0 { + let count = state.addr() - (SINGLE | LOCKED); + Some(if count > 0 { without_provenance_mut(count | LOCKED) } else { UNLOCKED }) + } else { + None + } + }) { + Ok(_) => {} + // There are waiters queued and the lock count was moved to the + // tail of the queue. + Err(state) => unsafe { self.read_unlock_contended(state) }, + } + } + + #[cold] + unsafe fn read_unlock_contended(&self, state: State) { + // The state was observed with acquire ordering above, so the current + // thread will observe all node initializations. + + // SAFETY: + // Because new read-locks cannot be acquired while threads are queued, + // all queue-lock owners will observe the set `LOCKED` bit. Because they + // do not modify the queue while there is a lock owner, the queue will + // not be removed from here. + let tail = unsafe { add_backlinks_and_find_tail(to_node(state)).as_ref() }; + // The lock count is stored in the `next` field of `tail`. + // Decrement it, making sure to observe all changes made to the queue + // by the other lock owners by using acquire-release ordering. + let was_last = tail.next.0.fetch_byte_sub(SINGLE, AcqRel).addr() - SINGLE == 0; + if was_last { + // SAFETY: + // Other threads cannot read-lock while threads are queued. Also, + // the `LOCKED` bit is still set, so there are no writers. Therefore, + // the current thread exclusively owns the lock. + unsafe { self.unlock_contended(state) } + } + } + + #[inline] + pub unsafe fn write_unlock(&self) { + if let Err(state) = + self.state.compare_exchange(without_provenance_mut(LOCKED), UNLOCKED, Release, Relaxed) + { + // SAFETY: + // Since other threads cannot acquire the lock, the state can only + // have changed because there are threads queued on the lock. + unsafe { self.unlock_contended(state) } + } + } + + /// # Safety + /// * The lock must be exclusively owned by this thread. + /// * There must be threads queued on the lock. + #[cold] + unsafe fn unlock_contended(&self, mut state: State) { + loop { + // Atomically release the lock and try to acquire the queue lock. + let next = state.map_addr(|a| (a & !LOCKED) | QUEUE_LOCKED); + match self.state.compare_exchange_weak(state, next, AcqRel, Relaxed) { + // The queue lock was acquired. Release it, waking up the next + // waiter in the process. + Ok(_) if state.addr() & QUEUE_LOCKED == 0 => unsafe { + return self.unlock_queue(next); + }, + // Another thread already holds the queue lock, leave waking up + // waiters to it. + Ok(_) => return, + Err(new) => state = new, + } + } + } + + /// Unlocks the queue. If the lock is unlocked, wakes up the next eligible + /// thread(s). + /// + /// # Safety + /// The queue lock must be held by the current thread. + unsafe fn unlock_queue(&self, mut state: State) { + debug_assert_eq!(state.addr() & (QUEUED | QUEUE_LOCKED), QUEUED | QUEUE_LOCKED); + + loop { + let tail = unsafe { add_backlinks_and_find_tail(to_node(state)) }; + + if state.addr() & LOCKED == LOCKED { + // Another thread has locked the lock. Leave waking up waiters + // to them by releasing the queue lock. + match self.state.compare_exchange_weak( + state, + state.mask(!QUEUE_LOCKED), + Release, + Acquire, + ) { + Ok(_) => return, + Err(new) => { + state = new; + continue; + } + } + } + + let is_writer = unsafe { tail.as_ref().write }; + if is_writer && let Some(prev) = unsafe { tail.as_ref().prev.get() } { + // `tail` is a writer and there is a node before `tail`. + // Split off `tail`. + + // There are no set `tail` links before the node pointed to by + // `state`, so the first non-null tail field will be current + // (invariant 2). Invariant 4 is fullfilled since `find_tail` + // was called on this node, which ensures all backlinks are set. + unsafe { + to_node(state).as_ref().tail.set(Some(prev)); + } + + // Release the queue lock. Doing this by subtraction is more + // efficient on modern processors since it is a single instruction + // instead of an update loop, which will fail if new threads are + // added to the list. + self.state.fetch_byte_sub(QUEUE_LOCKED, Release); + + // The tail was split off and the lock released. Mark the node as + // completed. + unsafe { + return Node::complete(tail); + } + } else { + // The next waiter is a reader or the queue only consists of one + // waiter. Just wake all threads. + + // The lock cannot be locked (checked above), so mark it as + // unlocked to reset the queue. + if let Err(new) = + self.state.compare_exchange_weak(state, UNLOCKED, Release, Acquire) + { + state = new; + continue; + } + + let mut current = tail; + loop { + let prev = unsafe { current.as_ref().prev.get() }; + unsafe { + Node::complete(current); + } + match prev { + Some(prev) => current = prev, + None => return, + } + } + } + } + } +} diff --git a/library/std/src/sys/pal/sgx/rwlock.rs b/library/std/src/sys/locks/rwlock/sgx.rs index d89de18ca5f..136dea597bb 100644 --- a/library/std/src/sys/pal/sgx/rwlock.rs +++ b/library/std/src/sys/locks/rwlock/sgx.rs @@ -1,16 +1,15 @@ #[cfg(test)] mod tests; -use crate::num::NonZeroUsize; -use crate::sys_common::lazy_box::{LazyBox, LazyInit}; - -use super::waitqueue::{ +use crate::alloc::Layout; +use crate::num::NonZero; +use crate::sys::pal::waitqueue::{ try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable, }; -use crate::alloc::Layout; +use crate::sys_common::lazy_box::{LazyBox, LazyInit}; struct AllocatedRwLock { - readers: SpinMutex<WaitVariable<Option<NonZeroUsize>>>, + readers: SpinMutex<WaitVariable<Option<NonZero<usize>>>>, writer: SpinMutex<WaitVariable<bool>>, } @@ -53,8 +52,7 @@ impl RwLock { // Another thread has passed the lock to us } else { // No waiting writers, acquire the read lock - *rguard.lock_var_mut() = - NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1); + *rguard.lock_var_mut() = NonZero::new(rguard.lock_var().map_or(0, |n| n.get()) + 1); } } @@ -68,8 +66,7 @@ impl RwLock { false } else { // No waiting writers, acquire the read lock - *rguard.lock_var_mut() = - NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1); + *rguard.lock_var_mut() = NonZero::new(rguard.lock_var().map_or(0, |n| n.get()) + 1); true } } @@ -108,10 +105,10 @@ impl RwLock { #[inline] unsafe fn __read_unlock( &self, - mut rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZeroUsize>>>, + mut rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZero<usize>>>>, wguard: SpinMutexGuard<'_, WaitVariable<bool>>, ) { - *rguard.lock_var_mut() = NonZeroUsize::new(rguard.lock_var().unwrap().get() - 1); + *rguard.lock_var_mut() = NonZero::new(rguard.lock_var().unwrap().get() - 1); if rguard.lock_var().is_some() { // There are other active readers } else { @@ -137,7 +134,7 @@ impl RwLock { #[inline] unsafe fn __write_unlock( &self, - rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZeroUsize>>>, + rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZero<usize>>>>, wguard: SpinMutexGuard<'_, WaitVariable<bool>>, ) { match WaitQueue::notify_one(wguard) { diff --git a/library/std/src/sys/pal/solid/rwlock.rs b/library/std/src/sys/locks/rwlock/solid.rs index ecb4eb83b9b..9bf6f5dbb73 100644 --- a/library/std/src/sys/pal/solid/rwlock.rs +++ b/library/std/src/sys/locks/rwlock/solid.rs @@ -1,5 +1,5 @@ //! A readers-writer lock implementation backed by the SOLID kernel extension. -use super::{ +use crate::sys::pal::{ abi, itron::{ error::{expect_success, expect_success_aborting, fail, ItronError}, diff --git a/library/std/src/sys/pal/teeos/locks/rwlock.rs b/library/std/src/sys/locks/rwlock/teeos.rs index 27cdb88788f..27cdb88788f 100644 --- a/library/std/src/sys/pal/teeos/locks/rwlock.rs +++ b/library/std/src/sys/locks/rwlock/teeos.rs diff --git a/library/std/src/sys/pal/windows/locks/rwlock.rs b/library/std/src/sys/locks/rwlock/windows.rs index e69415baac4..e69415baac4 100644 --- a/library/std/src/sys/pal/windows/locks/rwlock.rs +++ b/library/std/src/sys/locks/rwlock/windows.rs diff --git a/library/std/src/sys/pal/xous/locks/rwlock.rs b/library/std/src/sys/locks/rwlock/xous.rs index 618da758adf..ab45b33e1f6 100644 --- a/library/std/src/sys/pal/xous/locks/rwlock.rs +++ b/library/std/src/sys/locks/rwlock/xous.rs @@ -1,5 +1,5 @@ -use crate::os::xous::ffi::do_yield; -use crate::sync::atomic::{AtomicIsize, Ordering::SeqCst}; +use crate::sync::atomic::{AtomicIsize, Ordering::Acquire}; +use crate::thread::yield_now; pub struct RwLock { /// The "mode" value indicates how many threads are waiting on this @@ -14,6 +14,9 @@ pub struct RwLock { mode: AtomicIsize, } +const RWLOCK_WRITING: isize = -1; +const RWLOCK_FREE: isize = 0; + unsafe impl Send for RwLock {} unsafe impl Sync for RwLock {} @@ -21,52 +24,51 @@ impl RwLock { #[inline] #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] pub const fn new() -> RwLock { - RwLock { mode: AtomicIsize::new(0) } + RwLock { mode: AtomicIsize::new(RWLOCK_FREE) } } #[inline] pub unsafe fn read(&self) { while !unsafe { self.try_read() } { - do_yield(); + yield_now(); } } #[inline] pub unsafe fn try_read(&self) -> bool { - // Non-atomically determine the current value. - let current = self.mode.load(SeqCst); - - // If it's currently locked for writing, then we cannot read. - if current < 0 { - return false; - } - - // Attempt to lock. If the `current` value has changed, then this - // operation will fail and we will not obtain the lock even if we - // could potentially keep it. - let new = current + 1; - self.mode.compare_exchange(current, new, SeqCst, SeqCst).is_ok() + self.mode + .fetch_update( + Acquire, + Acquire, + |v| if v == RWLOCK_WRITING { None } else { Some(v + 1) }, + ) + .is_ok() } #[inline] pub unsafe fn write(&self) { while !unsafe { self.try_write() } { - do_yield(); + yield_now(); } } #[inline] pub unsafe fn try_write(&self) -> bool { - self.mode.compare_exchange(0, -1, SeqCst, SeqCst).is_ok() + self.mode.compare_exchange(RWLOCK_FREE, RWLOCK_WRITING, Acquire, Acquire).is_ok() } #[inline] pub unsafe fn read_unlock(&self) { - self.mode.fetch_sub(1, SeqCst); + let previous = self.mode.fetch_sub(1, Acquire); + assert!(previous != RWLOCK_FREE); + assert!(previous != RWLOCK_WRITING); } #[inline] pub unsafe fn write_unlock(&self) { - assert_eq!(self.mode.compare_exchange(-1, 0, SeqCst, SeqCst), Ok(-1)); + assert_eq!( + self.mode.compare_exchange(RWLOCK_WRITING, RWLOCK_FREE, Acquire, Acquire), + Ok(RWLOCK_WRITING) + ); } } diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs index bbdcb32606c..d77ac7eb027 100644 --- a/library/std/src/sys/mod.rs +++ b/library/std/src/sys/mod.rs @@ -5,6 +5,11 @@ mod pal; mod personality; +pub mod cmath; +pub mod locks; +pub mod os_str; +pub mod path; + // FIXME(117276): remove this, move feature implementations into individual // submodules. pub use pal::*; diff --git a/library/std/src/sys/pal/unix/os_str.rs b/library/std/src/sys/os_str/bytes.rs index 7bd2f656a24..4ca3f1cd185 100644 --- a/library/std/src/sys/pal/unix/os_str.rs +++ b/library/std/src/sys/os_str/bytes.rs @@ -14,7 +14,6 @@ use crate::sys_common::{AsInner, IntoInner}; use core::str::Utf8Chunks; #[cfg(test)] -#[path = "../unix/os_str/tests.rs"] mod tests; #[derive(Hash)] @@ -212,6 +211,49 @@ impl Slice { unsafe { mem::transmute(s) } } + #[track_caller] + #[inline] + pub fn check_public_boundary(&self, index: usize) { + if index == 0 || index == self.inner.len() { + return; + } + if index < self.inner.len() + && (self.inner[index - 1].is_ascii() || self.inner[index].is_ascii()) + { + return; + } + + slow_path(&self.inner, index); + + /// We're betting that typical splits will involve an ASCII character. + /// + /// Putting the expensive checks in a separate function generates notably + /// better assembly. + #[track_caller] + #[inline(never)] + fn slow_path(bytes: &[u8], index: usize) { + let (before, after) = bytes.split_at(index); + + // UTF-8 takes at most 4 bytes per codepoint, so we don't + // need to check more than that. + let after = after.get(..4).unwrap_or(after); + match str::from_utf8(after) { + Ok(_) => return, + Err(err) if err.valid_up_to() != 0 => return, + Err(_) => (), + } + + for len in 2..=4.min(index) { + let before = &before[index - len..]; + if str::from_utf8(before).is_ok() { + return; + } + } + + panic!("byte index {index} is not an OsStr boundary"); + } + } + #[inline] pub fn from_str(s: &str) -> &Slice { unsafe { Slice::from_encoded_bytes_unchecked(s.as_bytes()) } diff --git a/library/std/src/sys/pal/unix/os_str/tests.rs b/library/std/src/sys/os_str/bytes/tests.rs index e2a99045e41..e2a99045e41 100644 --- a/library/std/src/sys/pal/unix/os_str/tests.rs +++ b/library/std/src/sys/os_str/bytes/tests.rs diff --git a/library/std/src/sys/os_str/mod.rs b/library/std/src/sys/os_str/mod.rs new file mode 100644 index 00000000000..b509729475b --- /dev/null +++ b/library/std/src/sys/os_str/mod.rs @@ -0,0 +1,12 @@ +cfg_if::cfg_if! { + if #[cfg(any( + target_os = "windows", + target_os = "uefi", + ))] { + mod wtf8; + pub use wtf8::{Buf, Slice}; + } else { + mod bytes; + pub use bytes::{Buf, Slice}; + } +} diff --git a/library/std/src/sys/pal/windows/os_str.rs b/library/std/src/sys/os_str/wtf8.rs index 237854fac4e..352bd735903 100644 --- a/library/std/src/sys/pal/windows/os_str.rs +++ b/library/std/src/sys/os_str/wtf8.rs @@ -6,7 +6,7 @@ use crate::fmt; use crate::mem; use crate::rc::Rc; use crate::sync::Arc; -use crate::sys_common::wtf8::{Wtf8, Wtf8Buf}; +use crate::sys_common::wtf8::{check_utf8_boundary, Wtf8, Wtf8Buf}; use crate::sys_common::{AsInner, FromInner, IntoInner}; #[derive(Clone, Hash)] @@ -171,6 +171,11 @@ impl Slice { mem::transmute(Wtf8::from_bytes_unchecked(s)) } + #[track_caller] + pub fn check_public_boundary(&self, index: usize) { + check_utf8_boundary(&self.inner, index); + } + #[inline] pub fn from_str(s: &str) -> &Slice { unsafe { mem::transmute(Wtf8::from_str(s)) } diff --git a/library/std/src/sys/pal/common/alloc.rs b/library/std/src/sys/pal/common/alloc.rs index b7357460f39..8cf9ef68047 100644 --- a/library/std/src/sys/pal/common/alloc.rs +++ b/library/std/src/sys/pal/common/alloc.rs @@ -16,7 +16,7 @@ use crate::ptr; target_arch = "sparc", target_arch = "wasm32", target_arch = "hexagon", - all(target_arch = "riscv32", not(target_os = "espidf")), + all(target_arch = "riscv32", not(any(target_os = "espidf", target_os = "zkvm"))), all(target_arch = "xtensa", not(target_os = "espidf")), ))] pub const MIN_ALIGN: usize = 8; @@ -32,11 +32,11 @@ pub const MIN_ALIGN: usize = 8; target_arch = "wasm64", ))] pub const MIN_ALIGN: usize = 16; -// The allocator on the esp-idf platform guarantees 4 byte alignment. -#[cfg(any( - all(target_arch = "riscv32", target_os = "espidf"), +// The allocator on the esp-idf and zkvm platforms guarantee 4 byte alignment. +#[cfg(all(any( + all(target_arch = "riscv32", any(target_os = "espidf", target_os = "zkvm")), all(target_arch = "xtensa", target_os = "espidf"), -))] +)))] pub const MIN_ALIGN: usize = 4; pub unsafe fn realloc_fallback( diff --git a/library/std/src/sys/pal/common/small_c_string.rs b/library/std/src/sys/pal/common/small_c_string.rs index af9b18e372d..37812fc0659 100644 --- a/library/std/src/sys/pal/common/small_c_string.rs +++ b/library/std/src/sys/pal/common/small_c_string.rs @@ -15,22 +15,28 @@ const NUL_ERR: io::Error = io::const_io_error!(io::ErrorKind::InvalidInput, "file name contained an unexpected NUL byte"); #[inline] -pub fn run_path_with_cstr<T, F>(path: &Path, f: F) -> io::Result<T> -where - F: FnOnce(&CStr) -> io::Result<T>, -{ +pub fn run_path_with_cstr<T>(path: &Path, f: &dyn Fn(&CStr) -> io::Result<T>) -> io::Result<T> { run_with_cstr(path.as_os_str().as_encoded_bytes(), f) } #[inline] -pub fn run_with_cstr<T, F>(bytes: &[u8], f: F) -> io::Result<T> -where - F: FnOnce(&CStr) -> io::Result<T>, -{ +pub fn run_with_cstr<T>(bytes: &[u8], f: &dyn Fn(&CStr) -> io::Result<T>) -> io::Result<T> { + // Dispatch and dyn erase the closure type to prevent mono bloat. + // See https://github.com/rust-lang/rust/pull/121101. if bytes.len() >= MAX_STACK_ALLOCATION { - return run_with_cstr_allocating(bytes, f); + run_with_cstr_allocating(bytes, f) + } else { + unsafe { run_with_cstr_stack(bytes, f) } } +} +/// # Safety +/// +/// `bytes` must have a length less than `MAX_STACK_ALLOCATION`. +unsafe fn run_with_cstr_stack<T>( + bytes: &[u8], + f: &dyn Fn(&CStr) -> io::Result<T>, +) -> io::Result<T> { let mut buf = MaybeUninit::<[u8; MAX_STACK_ALLOCATION]>::uninit(); let buf_ptr = buf.as_mut_ptr() as *mut u8; @@ -47,10 +53,7 @@ where #[cold] #[inline(never)] -fn run_with_cstr_allocating<T, F>(bytes: &[u8], f: F) -> io::Result<T> -where - F: FnOnce(&CStr) -> io::Result<T>, -{ +fn run_with_cstr_allocating<T>(bytes: &[u8], f: &dyn Fn(&CStr) -> io::Result<T>) -> io::Result<T> { match CString::new(bytes) { Ok(s) => f(&s), Err(_) => Err(NUL_ERR), diff --git a/library/std/src/sys/pal/common/tests.rs b/library/std/src/sys/pal/common/tests.rs index 32dc18ee1cf..e72d02203da 100644 --- a/library/std/src/sys/pal/common/tests.rs +++ b/library/std/src/sys/pal/common/tests.rs @@ -7,7 +7,7 @@ use core::iter::repeat; #[test] fn stack_allocation_works() { let path = Path::new("abc"); - let result = run_path_with_cstr(path, |p| { + let result = run_path_with_cstr(path, &|p| { assert_eq!(p, &*CString::new(path.as_os_str().as_encoded_bytes()).unwrap()); Ok(42) }); @@ -17,14 +17,14 @@ fn stack_allocation_works() { #[test] fn stack_allocation_fails() { let path = Path::new("ab\0"); - assert!(run_path_with_cstr::<(), _>(path, |_| unreachable!()).is_err()); + assert!(run_path_with_cstr::<()>(path, &|_| unreachable!()).is_err()); } #[test] fn heap_allocation_works() { let path = repeat("a").take(384).collect::<String>(); let path = Path::new(&path); - let result = run_path_with_cstr(path, |p| { + let result = run_path_with_cstr(path, &|p| { assert_eq!(p, &*CString::new(path.as_os_str().as_encoded_bytes()).unwrap()); Ok(42) }); @@ -36,7 +36,7 @@ fn heap_allocation_fails() { let mut path = repeat("a").take(384).collect::<String>(); path.push('\0'); let path = Path::new(&path); - assert!(run_path_with_cstr::<(), _>(path, |_| unreachable!()).is_err()); + assert!(run_path_with_cstr::<()>(path, &|_| unreachable!()).is_err()); } #[bench] @@ -44,7 +44,7 @@ fn bench_stack_path_alloc(b: &mut test::Bencher) { let path = repeat("a").take(383).collect::<String>(); let p = Path::new(&path); b.iter(|| { - run_path_with_cstr(p, |cstr| { + run_path_with_cstr(p, &|cstr| { black_box(cstr); Ok(()) }) @@ -57,7 +57,7 @@ fn bench_heap_path_alloc(b: &mut test::Bencher) { let path = repeat("a").take(384).collect::<String>(); let p = Path::new(&path); b.iter(|| { - run_path_with_cstr(p, |cstr| { + run_path_with_cstr(p, &|cstr| { black_box(cstr); Ok(()) }) diff --git a/library/std/src/sys/pal/common/thread_local/fast_local.rs b/library/std/src/sys/pal/common/thread_local/fast_local.rs index 9206588be06..646dcd7f3a3 100644 --- a/library/std/src/sys/pal/common/thread_local/fast_local.rs +++ b/library/std/src/sys/pal/common/thread_local/fast_local.rs @@ -13,8 +13,9 @@ pub macro thread_local_inner { (@key $t:ty, const $init:expr) => {{ #[inline] #[deny(unsafe_op_in_unsafe_fn)] - // FIXME: Use `SyncUnsafeCell` instead of allowing `static_mut_ref` lint - #[cfg_attr(not(bootstrap), allow(static_mut_ref))] + // FIXME: Use `SyncUnsafeCell` instead of allowing `static_mut_refs` lint + #[cfg_attr(bootstrap, allow(static_mut_ref))] + #[cfg_attr(not(bootstrap), allow(static_mut_refs))] unsafe fn __getit( _init: $crate::option::Option<&mut $crate::option::Option<$t>>, ) -> $crate::option::Option<&'static $t> { @@ -94,7 +95,8 @@ pub macro thread_local_inner { if let $crate::option::Option::Some(init) = init { if let $crate::option::Option::Some(value) = init.take() { return value; - } else if $crate::cfg!(debug_assertions) { + } + if $crate::cfg!(debug_assertions) { $crate::unreachable!("missing default value"); } } diff --git a/library/std/src/sys/pal/common/thread_local/os_local.rs b/library/std/src/sys/pal/common/thread_local/os_local.rs index 7cf29192122..3edffd7e443 100644 --- a/library/std/src/sys/pal/common/thread_local/os_local.rs +++ b/library/std/src/sys/pal/common/thread_local/os_local.rs @@ -176,7 +176,7 @@ unsafe extern "C" fn destroy_value<T: 'static>(ptr: *mut u8) { if let Err(_) = panic::catch_unwind(|| unsafe { let ptr = Box::from_raw(ptr as *mut Value<T>); let key = ptr.key; - key.os.set(ptr::invalid_mut(1)); + key.os.set(ptr::without_provenance_mut(1)); drop(ptr); key.os.set(ptr::null_mut()); }) { diff --git a/library/std/src/sys/pal/common/thread_local/static_local.rs b/library/std/src/sys/pal/common/thread_local/static_local.rs index 51cba66fad7..4f2b6868962 100644 --- a/library/std/src/sys/pal/common/thread_local/static_local.rs +++ b/library/std/src/sys/pal/common/thread_local/static_local.rs @@ -11,8 +11,9 @@ pub macro thread_local_inner { (@key $t:ty, const $init:expr) => {{ #[inline] // see comments below #[deny(unsafe_op_in_unsafe_fn)] - // FIXME: Use `SyncUnsafeCell` instead of allowing `static_mut_ref` lint - #[cfg_attr(not(bootstrap), allow(static_mut_ref))] + // FIXME: Use `SyncUnsafeCell` instead of allowing `static_mut_refs` lint + #[cfg_attr(bootstrap, allow(static_mut_ref))] + #[cfg_attr(not(bootstrap), allow(static_mut_refs))] unsafe fn __getit( _init: $crate::option::Option<&mut $crate::option::Option<$t>>, ) -> $crate::option::Option<&'static $t> { diff --git a/library/std/src/sys/pal/hermit/fs.rs b/library/std/src/sys/pal/hermit/fs.rs index 694482a8a30..d4da53fd3df 100644 --- a/library/std/src/sys/pal/hermit/fs.rs +++ b/library/std/src/sys/pal/hermit/fs.rs @@ -269,7 +269,7 @@ impl OpenOptions { impl File { pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> { - run_path_with_cstr(path, |path| File::open_c(&path, opts)) + run_path_with_cstr(path, &|path| File::open_c(&path, opts)) } pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> { @@ -421,7 +421,7 @@ pub fn readdir(_p: &Path) -> io::Result<ReadDir> { } pub fn unlink(path: &Path) -> io::Result<()> { - run_path_with_cstr(path, |path| cvt(unsafe { abi::unlink(path.as_ptr()) }).map(|_| ())) + run_path_with_cstr(path, &|path| cvt(unsafe { abi::unlink(path.as_ptr()) }).map(|_| ())) } pub fn rename(_old: &Path, _new: &Path) -> io::Result<()> { diff --git a/library/std/src/sys/pal/hermit/mod.rs b/library/std/src/sys/pal/hermit/mod.rs index 937603cfd8a..ada408107dc 100644 --- a/library/std/src/sys/pal/hermit/mod.rs +++ b/library/std/src/sys/pal/hermit/mod.rs @@ -19,8 +19,6 @@ use crate::os::raw::c_char; pub mod alloc; pub mod args; -#[path = "../unix/cmath.rs"] -pub mod cmath; pub mod env; pub mod fd; pub mod fs; @@ -30,10 +28,6 @@ pub mod io; pub mod memchr; pub mod net; pub mod os; -#[path = "../unix/os_str.rs"] -pub mod os_str; -#[path = "../unix/path.rs"] -pub mod path; #[path = "../unsupported/pipe.rs"] pub mod pipe; #[path = "../unsupported/process.rs"] @@ -45,16 +39,6 @@ pub mod thread_local_dtor; pub mod thread_local_key; pub mod time; -#[path = "../unix/locks"] -pub mod locks { - mod futex_condvar; - mod futex_mutex; - mod futex_rwlock; - pub(crate) use futex_condvar::Condvar; - pub(crate) use futex_mutex::Mutex; - pub(crate) use futex_rwlock::RwLock; -} - use crate::io::ErrorKind; use crate::os::hermit::abi; diff --git a/library/std/src/sys/pal/hermit/net.rs b/library/std/src/sys/pal/hermit/net.rs index 3cf63fccf2e..871a2ccdfa4 100644 --- a/library/std/src/sys/pal/hermit/net.rs +++ b/library/std/src/sys/pal/hermit/net.rs @@ -156,7 +156,7 @@ impl Socket { ) })?; unsafe { - buf.advance(ret as usize); + buf.advance_unchecked(ret as usize); } Ok(()) } diff --git a/library/std/src/sys/pal/hermit/thread.rs b/library/std/src/sys/pal/hermit/thread.rs index 3384906a15e..fee80c02d4a 100644 --- a/library/std/src/sys/pal/hermit/thread.rs +++ b/library/std/src/sys/pal/hermit/thread.rs @@ -5,7 +5,7 @@ use super::thread_local_dtor::run_dtors; use crate::ffi::CStr; use crate::io; use crate::mem; -use crate::num::NonZeroUsize; +use crate::num::NonZero; use crate::ptr; use crate::time::Duration; @@ -97,8 +97,8 @@ impl Thread { } } -pub fn available_parallelism() -> io::Result<NonZeroUsize> { - unsafe { Ok(NonZeroUsize::new_unchecked(abi::get_processor_count())) } +pub fn available_parallelism() -> io::Result<NonZero<usize>> { + unsafe { Ok(NonZero::new_unchecked(abi::get_processor_count())) } } pub mod guard { diff --git a/library/std/src/sys/pal/itron/thread.rs b/library/std/src/sys/pal/itron/thread.rs index ae0f718535b..9c1387bf408 100644 --- a/library/std/src/sys/pal/itron/thread.rs +++ b/library/std/src/sys/pal/itron/thread.rs @@ -11,6 +11,7 @@ use crate::{ ffi::CStr, hint, io, mem::ManuallyDrop, + num::NonZero, ptr::NonNull, sync::atomic::{AtomicUsize, Ordering}, sys::thread_local_dtor::run_dtors, @@ -363,6 +364,6 @@ unsafe fn terminate_and_delete_current_task() -> ! { unsafe { crate::hint::unreachable_unchecked() }; } -pub fn available_parallelism() -> io::Result<crate::num::NonZeroUsize> { +pub fn available_parallelism() -> io::Result<NonZero<usize>> { super::unsupported() } diff --git a/library/std/src/sys/pal/mod.rs b/library/std/src/sys/pal/mod.rs index 66b2a4b8885..041b7c35582 100644 --- a/library/std/src/sys/pal/mod.rs +++ b/library/std/src/sys/pal/mod.rs @@ -55,6 +55,9 @@ cfg_if::cfg_if! { } else if #[cfg(target_os = "teeos")] { mod teeos; pub use self::teeos::*; + } else if #[cfg(target_os = "zkvm")] { + mod zkvm; + pub use self::zkvm::*; } else { mod unsupported; pub use self::unsupported::*; diff --git a/library/std/src/sys/pal/sgx/abi/tls/mod.rs b/library/std/src/sys/pal/sgx/abi/tls/mod.rs index 09c4ab3d3e9..6762a43b483 100644 --- a/library/std/src/sys/pal/sgx/abi/tls/mod.rs +++ b/library/std/src/sys/pal/sgx/abi/tls/mod.rs @@ -3,7 +3,7 @@ mod sync_bitset; use self::sync_bitset::*; use crate::cell::Cell; use crate::mem; -use crate::num::NonZeroUsize; +use crate::num::NonZero; use crate::ptr; use crate::sync::atomic::{AtomicUsize, Ordering}; @@ -30,7 +30,7 @@ extern "C" { #[derive(Copy, Clone)] #[repr(C)] -pub struct Key(NonZeroUsize); +pub struct Key(NonZero<usize>); impl Key { fn to_index(self) -> usize { @@ -38,7 +38,7 @@ impl Key { } fn from_index(index: usize) -> Self { - Key(NonZeroUsize::new(index + 1).unwrap()) + Key(NonZero::new(index + 1).unwrap()) } pub fn as_usize(self) -> usize { @@ -46,7 +46,7 @@ impl Key { } pub fn from_usize(index: usize) -> Self { - Key(NonZeroUsize::new(index).unwrap()) + Key(NonZero::new(index).unwrap()) } } diff --git a/library/std/src/sys/pal/sgx/abi/usercalls/raw.rs b/library/std/src/sys/pal/sgx/abi/usercalls/raw.rs index 10c1456d4fd..943b771498f 100644 --- a/library/std/src/sys/pal/sgx/abi/usercalls/raw.rs +++ b/library/std/src/sys/pal/sgx/abi/usercalls/raw.rs @@ -3,14 +3,15 @@ #[unstable(feature = "sgx_platform", issue = "56975")] pub use fortanix_sgx_abi::*; -use crate::num::NonZeroU64; +use crate::num::NonZero; use crate::ptr::NonNull; #[repr(C)] struct UsercallReturn(u64, u64); extern "C" { - fn usercall(nr: NonZeroU64, p1: u64, p2: u64, abort: u64, p3: u64, p4: u64) -> UsercallReturn; + fn usercall(nr: NonZero<u64>, p1: u64, p2: u64, abort: u64, p3: u64, p4: u64) + -> UsercallReturn; } /// Performs the raw usercall operation as defined in the ABI calling convention. @@ -26,7 +27,7 @@ extern "C" { #[unstable(feature = "sgx_platform", issue = "56975")] #[inline] pub unsafe fn do_usercall( - nr: NonZeroU64, + nr: NonZero<u64>, p1: u64, p2: u64, p3: u64, @@ -194,7 +195,7 @@ macro_rules! enclave_usercalls_internal_define_usercalls { #[inline(always)] pub unsafe fn $f($n1: $t1, $n2: $t2, $n3: $t3, $n4: $t4) -> $r { ReturnValue::from_registers(stringify!($f), unsafe { do_usercall( - rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + rtunwrap!(Some, NonZero::new(Usercalls::$f as Register)), RegisterArgument::into_register($n1), RegisterArgument::into_register($n2), RegisterArgument::into_register($n3), @@ -210,7 +211,7 @@ macro_rules! enclave_usercalls_internal_define_usercalls { #[inline(always)] pub unsafe fn $f($n1: $t1, $n2: $t2, $n3: $t3) -> $r { ReturnValue::from_registers(stringify!($f), unsafe { do_usercall( - rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + rtunwrap!(Some, NonZero::new(Usercalls::$f as Register)), RegisterArgument::into_register($n1), RegisterArgument::into_register($n2), RegisterArgument::into_register($n3), @@ -226,7 +227,7 @@ macro_rules! enclave_usercalls_internal_define_usercalls { #[inline(always)] pub unsafe fn $f($n1: $t1, $n2: $t2) -> $r { ReturnValue::from_registers(stringify!($f), unsafe { do_usercall( - rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + rtunwrap!(Some, NonZero::new(Usercalls::$f as Register)), RegisterArgument::into_register($n1), RegisterArgument::into_register($n2), 0,0, @@ -241,7 +242,7 @@ macro_rules! enclave_usercalls_internal_define_usercalls { #[inline(always)] pub unsafe fn $f($n1: $t1) -> $r { ReturnValue::from_registers(stringify!($f), unsafe { do_usercall( - rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + rtunwrap!(Some, NonZero::new(Usercalls::$f as Register)), RegisterArgument::into_register($n1), 0,0,0, return_type_is_abort!($r) @@ -255,7 +256,7 @@ macro_rules! enclave_usercalls_internal_define_usercalls { #[inline(always)] pub unsafe fn $f() -> $r { ReturnValue::from_registers(stringify!($f), unsafe { do_usercall( - rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + rtunwrap!(Some, NonZero::new(Usercalls::$f as Register)), 0,0,0,0, return_type_is_abort!($r) ) }) diff --git a/library/std/src/sys/pal/sgx/mod.rs b/library/std/src/sys/pal/sgx/mod.rs index 09d3f7638ca..8ef3495884f 100644 --- a/library/std/src/sys/pal/sgx/mod.rs +++ b/library/std/src/sys/pal/sgx/mod.rs @@ -9,12 +9,8 @@ use crate::io::ErrorKind; use crate::sync::atomic::{AtomicBool, Ordering}; pub mod abi; -mod waitqueue; - pub mod alloc; pub mod args; -#[path = "../unix/cmath.rs"] -pub mod cmath; pub mod env; pub mod fd; #[path = "../unsupported/fs.rs"] @@ -24,9 +20,6 @@ pub mod io; pub mod memchr; pub mod net; pub mod os; -#[path = "../unix/os_str.rs"] -pub mod os_str; -pub mod path; #[path = "../unsupported/pipe.rs"] pub mod pipe; #[path = "../unsupported/process.rs"] @@ -36,16 +29,7 @@ pub mod thread; pub mod thread_local_key; pub mod thread_parking; pub mod time; - -mod condvar; -mod mutex; -mod rwlock; - -pub mod locks { - pub use super::condvar::*; - pub use super::mutex::*; - pub use super::rwlock::*; -} +pub mod waitqueue; // SAFETY: must be called only once during runtime initialization. // NOTE: this is not guaranteed to run, for example when Rust code is called externally. diff --git a/library/std/src/sys/pal/sgx/net.rs b/library/std/src/sys/pal/sgx/net.rs index 03620a08f2c..c4d5da1627c 100644 --- a/library/std/src/sys/pal/sgx/net.rs +++ b/library/std/src/sys/pal/sgx/net.rs @@ -542,7 +542,4 @@ pub mod netc { pub sin6_flowinfo: u32, pub sin6_scope_id: u32, } - - #[derive(Copy, Clone)] - pub struct sockaddr {} } diff --git a/library/std/src/sys/pal/sgx/thread.rs b/library/std/src/sys/pal/sgx/thread.rs index 7ac9d1d64b4..c797fde7fbd 100644 --- a/library/std/src/sys/pal/sgx/thread.rs +++ b/library/std/src/sys/pal/sgx/thread.rs @@ -2,7 +2,7 @@ use super::unsupported; use crate::ffi::CStr; use crate::io; -use crate::num::NonZeroUsize; +use crate::num::NonZero; use crate::time::Duration; use super::abi::usercalls; @@ -142,7 +142,7 @@ impl Thread { } } -pub fn available_parallelism() -> io::Result<NonZeroUsize> { +pub fn available_parallelism() -> io::Result<NonZero<usize>> { unsupported() } diff --git a/library/std/src/sys/pal/sgx/waitqueue/mod.rs b/library/std/src/sys/pal/sgx/waitqueue/mod.rs index 25eca61d67b..2d952b7ebbc 100644 --- a/library/std/src/sys/pal/sgx/waitqueue/mod.rs +++ b/library/std/src/sys/pal/sgx/waitqueue/mod.rs @@ -16,7 +16,7 @@ mod tests; mod spin_mutex; mod unsafe_list; -use crate::num::NonZeroUsize; +use crate::num::NonZero; use crate::ops::{Deref, DerefMut}; use crate::panic::{self, AssertUnwindSafe}; use crate::time::Duration; @@ -68,7 +68,7 @@ impl<T> WaitVariable<T> { #[derive(Copy, Clone)] pub enum NotifiedTcs { Single(Tcs), - All { count: NonZeroUsize }, + All { count: NonZero<usize> }, } /// An RAII guard that will notify a set of target threads as well as unlock @@ -252,7 +252,7 @@ impl WaitQueue { entry_guard.wake = true; } - if let Some(count) = NonZeroUsize::new(count) { + if let Some(count) = NonZero::new(count) { Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::All { count } }) } else { Err(guard) diff --git a/library/std/src/sys/pal/solid/abi/fs.rs b/library/std/src/sys/pal/solid/abi/fs.rs index 32800bd9a9d..49526f4c9cd 100644 --- a/library/std/src/sys/pal/solid/abi/fs.rs +++ b/library/std/src/sys/pal/solid/abi/fs.rs @@ -1,9 +1,8 @@ //! `solid_fs.h` use crate::os::raw::{c_char, c_int, c_uchar}; pub use libc::{ - blksize_t, dev_t, ino_t, off_t, stat, time_t, O_APPEND, O_CREAT, O_EXCL, O_RDONLY, O_RDWR, - O_TRUNC, O_WRONLY, SEEK_CUR, SEEK_END, SEEK_SET, S_IEXEC, S_IFBLK, S_IFCHR, S_IFDIR, S_IFIFO, - S_IFMT, S_IFREG, S_IREAD, S_IWRITE, + ino_t, off_t, stat, time_t, O_APPEND, O_CREAT, O_EXCL, O_RDONLY, O_RDWR, O_TRUNC, O_WRONLY, + SEEK_CUR, SEEK_END, SEEK_SET, S_IFBLK, S_IFCHR, S_IFDIR, S_IFIFO, S_IFMT, S_IFREG, S_IWRITE, }; pub const O_ACCMODE: c_int = 0x3; diff --git a/library/std/src/sys/pal/solid/abi/sockets.rs b/library/std/src/sys/pal/solid/abi/sockets.rs index eb06a6dd927..11c430360ce 100644 --- a/library/std/src/sys/pal/solid/abi/sockets.rs +++ b/library/std/src/sys/pal/solid/abi/sockets.rs @@ -1,5 +1,5 @@ use crate::os::raw::{c_char, c_uint, c_void}; -pub use libc::{c_int, c_long, size_t, ssize_t, suseconds_t, time_t, timeval}; +pub use libc::{c_int, c_long, size_t, ssize_t, timeval}; pub const SOLID_NET_ERR_BASE: c_int = -2000; pub const EINPROGRESS: c_int = SOLID_NET_ERR_BASE - libc::EINPROGRESS; diff --git a/library/std/src/sys/pal/solid/fs.rs b/library/std/src/sys/pal/solid/fs.rs index 6c66b93a3e1..a6c1336109a 100644 --- a/library/std/src/sys/pal/solid/fs.rs +++ b/library/std/src/sys/pal/solid/fs.rs @@ -388,7 +388,7 @@ impl File { // Safety: `num_bytes_read` bytes were written to the unfilled // portion of the buffer - cursor.advance(num_bytes_read); + cursor.advance_unchecked(num_bytes_read); Ok(()) } diff --git a/library/std/src/sys/pal/solid/mod.rs b/library/std/src/sys/pal/solid/mod.rs index 5af83653cf8..9ada7d130f0 100644 --- a/library/std/src/sys/pal/solid/mod.rs +++ b/library/std/src/sys/pal/solid/mod.rs @@ -2,27 +2,23 @@ #![allow(missing_docs, nonstandard_style)] #![deny(unsafe_op_in_unsafe_fn)] -mod abi; +pub mod abi; #[path = "../itron"] -mod itron { - pub(super) mod abi; - pub mod condvar; - pub(super) mod error; - pub mod mutex; - pub(super) mod spin; - pub(super) mod task; +pub mod itron { + pub mod abi; + pub mod error; + pub mod spin; + pub mod task; pub mod thread; pub mod thread_parking; - pub(super) mod time; + pub mod time; use super::unsupported; } pub mod alloc; #[path = "../unsupported/args.rs"] pub mod args; -#[path = "../unix/cmath.rs"] -pub mod cmath; pub mod env; // `error` is `pub(crate)` so that it can be accessed by `itron/error.rs` as // `crate::sys::error` @@ -31,9 +27,6 @@ pub mod fs; pub mod io; pub mod net; pub mod os; -#[path = "../unix/os_str.rs"] -pub mod os_str; -pub mod path; #[path = "../unsupported/pipe.rs"] pub mod pipe; #[path = "../unsupported/process.rs"] @@ -46,14 +39,6 @@ pub mod thread_local_key; pub use self::itron::thread_parking; pub mod time; -mod rwlock; - -pub mod locks { - pub use super::itron::condvar::*; - pub use super::itron::mutex::*; - pub use super::rwlock::*; -} - // SAFETY: must be called only once during runtime initialization. // NOTE: this is not guaranteed to run, for example when Rust code is called externally. pub unsafe fn init(_argc: isize, _argv: *const *const u8, _sigpipe: u8) {} diff --git a/library/std/src/sys/pal/solid/net.rs b/library/std/src/sys/pal/solid/net.rs index 1c310648a3d..6ea874e509e 100644 --- a/library/std/src/sys/pal/solid/net.rs +++ b/library/std/src/sys/pal/solid/net.rs @@ -209,7 +209,7 @@ impl Socket { netc::recv(self.as_raw_fd(), buf.as_mut().as_mut_ptr().cast(), buf.capacity(), flags) })?; unsafe { - buf.advance(ret as usize); + buf.advance_unchecked(ret as usize); } Ok(()) } diff --git a/library/std/src/sys/pal/solid/os.rs b/library/std/src/sys/pal/solid/os.rs index ff81544ba91..5ceab3b27e0 100644 --- a/library/std/src/sys/pal/solid/os.rs +++ b/library/std/src/sys/pal/solid/os.rs @@ -172,7 +172,7 @@ pub fn env() -> Env { pub fn getenv(k: &OsStr) -> Option<OsString> { // environment variables with a nul byte can't be set, so their value is // always None as well - run_with_cstr(k.as_bytes(), |k| { + run_with_cstr(k.as_bytes(), &|k| { let _guard = env_read_lock(); let v = unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char; @@ -190,8 +190,8 @@ pub fn getenv(k: &OsStr) -> Option<OsString> { } pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { - run_with_cstr(k.as_bytes(), |k| { - run_with_cstr(v.as_bytes(), |v| { + run_with_cstr(k.as_bytes(), &|k| { + run_with_cstr(v.as_bytes(), &|v| { let _guard = ENV_LOCK.write(); cvt_env(unsafe { libc::setenv(k.as_ptr(), v.as_ptr(), 1) }).map(drop) }) @@ -199,7 +199,7 @@ pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { } pub fn unsetenv(n: &OsStr) -> io::Result<()> { - run_with_cstr(n.as_bytes(), |nbuf| { + run_with_cstr(n.as_bytes(), &|nbuf| { let _guard = ENV_LOCK.write(); cvt_env(unsafe { libc::unsetenv(nbuf.as_ptr()) }).map(drop) }) diff --git a/library/std/src/sys/pal/teeos/locks/mod.rs b/library/std/src/sys/pal/teeos/locks/mod.rs deleted file mode 100644 index c58e9c7fd45..00000000000 --- a/library/std/src/sys/pal/teeos/locks/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -pub mod condvar; -#[path = "../../unix/locks/pthread_mutex.rs"] -pub mod mutex; -pub mod rwlock; - -pub(crate) use condvar::Condvar; -pub(crate) use mutex::Mutex; -pub(crate) use rwlock::RwLock; diff --git a/library/std/src/sys/pal/teeos/mod.rs b/library/std/src/sys/pal/teeos/mod.rs index ed8c54b2c36..51ef96a69a0 100644 --- a/library/std/src/sys/pal/teeos/mod.rs +++ b/library/std/src/sys/pal/teeos/mod.rs @@ -11,11 +11,8 @@ pub use self::rand::hashmap_random_keys; pub mod alloc; #[path = "../unsupported/args.rs"] pub mod args; -#[path = "../unix/cmath.rs"] -pub mod cmath; #[path = "../unsupported/env.rs"] pub mod env; -pub mod locks; //pub mod fd; #[path = "../unsupported/fs.rs"] pub mod fs; @@ -27,10 +24,6 @@ pub mod net; #[path = "../unsupported/once.rs"] pub mod once; pub mod os; -#[path = "../unix/os_str.rs"] -pub mod os_str; -#[path = "../unix/path.rs"] -pub mod path; #[path = "../unsupported/pipe.rs"] pub mod pipe; #[path = "../unsupported/process.rs"] diff --git a/library/std/src/sys/pal/teeos/net.rs b/library/std/src/sys/pal/teeos/net.rs index 0df681dbfa5..fed95205027 100644 --- a/library/std/src/sys/pal/teeos/net.rs +++ b/library/std/src/sys/pal/teeos/net.rs @@ -364,9 +364,6 @@ pub mod netc { pub sin6_flowinfo: u32, pub sin6_scope_id: u32, } - - #[derive(Copy, Clone)] - pub struct sockaddr {} } pub type Socket = UdpSocket; diff --git a/library/std/src/sys/pal/teeos/thread.rs b/library/std/src/sys/pal/teeos/thread.rs index 155f333f906..77f9040ead5 100644 --- a/library/std/src/sys/pal/teeos/thread.rs +++ b/library/std/src/sys/pal/teeos/thread.rs @@ -4,7 +4,7 @@ use crate::cmp; use crate::ffi::CStr; use crate::io; use crate::mem; -use crate::num::NonZeroUsize; +use crate::num::NonZero; use crate::ptr; use crate::sys::os; use crate::time::Duration; @@ -140,7 +140,7 @@ impl Drop for Thread { // Note: Both `sched_getaffinity` and `sysconf` are available but not functional on // teeos, so this function always returns an Error! -pub fn available_parallelism() -> io::Result<NonZeroUsize> { +pub fn available_parallelism() -> io::Result<NonZero<usize>> { Err(io::Error::new( io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform", diff --git a/library/std/src/sys/pal/uefi/helpers.rs b/library/std/src/sys/pal/uefi/helpers.rs index 9837cc89f2d..ba53ed88f37 100644 --- a/library/std/src/sys/pal/uefi/helpers.rs +++ b/library/std/src/sys/pal/uefi/helpers.rs @@ -146,3 +146,11 @@ pub(crate) fn image_handle_protocol<T>(protocol_guid: Guid) -> Option<NonNull<T> let system_handle = uefi::env::try_image_handle()?; open_protocol(system_handle, protocol_guid).ok() } + +/// Get RuntimeServices +pub(crate) fn runtime_services() -> Option<NonNull<r_efi::efi::RuntimeServices>> { + let system_table: NonNull<r_efi::efi::SystemTable> = + crate::os::uefi::env::try_system_table()?.cast(); + let runtime_services = unsafe { (*system_table.as_ptr()).runtime_services }; + NonNull::new(runtime_services) +} diff --git a/library/std/src/sys/pal/uefi/mod.rs b/library/std/src/sys/pal/uefi/mod.rs index 4edc00e3ea0..ff8e3bd32ad 100644 --- a/library/std/src/sys/pal/uefi/mod.rs +++ b/library/std/src/sys/pal/uefi/mod.rs @@ -14,35 +14,26 @@ pub mod alloc; pub mod args; -#[path = "../unix/cmath.rs"] -pub mod cmath; pub mod env; #[path = "../unsupported/fs.rs"] pub mod fs; #[path = "../unsupported/io.rs"] pub mod io; -#[path = "../unsupported/locks/mod.rs"] -pub mod locks; #[path = "../unsupported/net.rs"] pub mod net; #[path = "../unsupported/once.rs"] pub mod once; pub mod os; -#[path = "../windows/os_str.rs"] -pub mod os_str; -pub mod path; #[path = "../unsupported/pipe.rs"] pub mod pipe; #[path = "../unsupported/process.rs"] pub mod process; pub mod stdio; -#[path = "../unsupported/thread.rs"] pub mod thread; #[path = "../unsupported/thread_local_key.rs"] pub mod thread_local_key; #[path = "../unsupported/thread_parking.rs"] pub mod thread_parking; -#[path = "../unsupported/time.rs"] pub mod time; mod helpers; diff --git a/library/std/src/sys/pal/uefi/path.rs b/library/std/src/sys/pal/uefi/path.rs deleted file mode 100644 index 106682eee56..00000000000 --- a/library/std/src/sys/pal/uefi/path.rs +++ /dev/null @@ -1,25 +0,0 @@ -use super::unsupported; -use crate::ffi::OsStr; -use crate::io; -use crate::path::{Path, PathBuf, Prefix}; - -pub const MAIN_SEP_STR: &str = "\\"; -pub const MAIN_SEP: char = '\\'; - -#[inline] -pub fn is_sep_byte(b: u8) -> bool { - b == b'\\' -} - -#[inline] -pub fn is_verbatim_sep(b: u8) -> bool { - b == b'\\' -} - -pub fn parse_prefix(_p: &OsStr) -> Option<Prefix<'_>> { - None -} - -pub(crate) fn absolute(_path: &Path) -> io::Result<PathBuf> { - unsupported() -} diff --git a/library/std/src/sys/pal/uefi/tests.rs b/library/std/src/sys/pal/uefi/tests.rs index 8806eda3ac0..5eb36da922b 100644 --- a/library/std/src/sys/pal/uefi/tests.rs +++ b/library/std/src/sys/pal/uefi/tests.rs @@ -1,4 +1,6 @@ use super::alloc::*; +use super::time::*; +use crate::time::Duration; #[test] fn align() { @@ -19,3 +21,21 @@ fn align() { } } } + +#[test] +fn epoch() { + let t = r_efi::system::Time { + year: 1970, + month: 1, + day: 1, + hour: 0, + minute: 0, + second: 0, + nanosecond: 0, + timezone: r_efi::efi::UNSPECIFIED_TIMEZONE, + daylight: 0, + pad1: 0, + pad2: 0, + }; + assert_eq!(system_time_internal::uefi_time_to_duration(t), Duration::new(0, 0)); +} diff --git a/library/std/src/sys/pal/uefi/thread.rs b/library/std/src/sys/pal/uefi/thread.rs new file mode 100644 index 00000000000..3d8fa27251f --- /dev/null +++ b/library/std/src/sys/pal/uefi/thread.rs @@ -0,0 +1,60 @@ +use super::unsupported; +use crate::ffi::CStr; +use crate::io; +use crate::num::NonZero; +use crate::ptr::NonNull; +use crate::time::Duration; + +pub struct Thread(!); + +pub const DEFAULT_MIN_STACK_SIZE: usize = 4096; + +impl Thread { + // unsafe: see thread::Builder::spawn_unchecked for safety requirements + pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> { + unsupported() + } + + pub fn yield_now() { + // do nothing + } + + pub fn set_name(_name: &CStr) { + // nope + } + + pub fn sleep(dur: Duration) { + let boot_services: NonNull<r_efi::efi::BootServices> = + crate::os::uefi::env::boot_services().expect("can't sleep").cast(); + let mut dur_ms = dur.as_micros(); + // ceil up to the nearest microsecond + if dur.subsec_nanos() % 1000 > 0 { + dur_ms += 1; + } + + while dur_ms > 0 { + let ms = crate::cmp::min(dur_ms, usize::MAX as u128); + let _ = unsafe { ((*boot_services.as_ptr()).stall)(ms as usize) }; + dur_ms -= ms; + } + } + + pub fn join(self) { + self.0 + } +} + +pub fn available_parallelism() -> io::Result<NonZero<usize>> { + // UEFI is single threaded + Ok(NonZero::new(1).unwrap()) +} + +pub mod guard { + pub type Guard = !; + pub unsafe fn current() -> Option<Guard> { + None + } + pub unsafe fn init() -> Option<Guard> { + None + } +} diff --git a/library/std/src/sys/pal/uefi/time.rs b/library/std/src/sys/pal/uefi/time.rs new file mode 100644 index 00000000000..76562cf9f51 --- /dev/null +++ b/library/std/src/sys/pal/uefi/time.rs @@ -0,0 +1,221 @@ +use crate::time::Duration; + +const SECS_IN_MINUTE: u64 = 60; +const SECS_IN_HOUR: u64 = SECS_IN_MINUTE * 60; +const SECS_IN_DAY: u64 = SECS_IN_HOUR * 24; + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct Instant(Duration); + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct SystemTime(Duration); + +pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0)); + +impl Instant { + pub fn now() -> Instant { + // If we have a timestamp protocol, use it. + if let Some(x) = instant_internal::timestamp_protocol() { + return x; + } + + if let Some(x) = instant_internal::platform_specific() { + return x; + } + + panic!("time not implemented on this platform") + } + + pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> { + self.0.checked_sub(other.0) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant(self.0.checked_add(*other)?)) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant(self.0.checked_sub(*other)?)) + } +} + +impl SystemTime { + pub fn now() -> SystemTime { + system_time_internal::now() + .unwrap_or_else(|| panic!("time not implemented on this platform")) + } + + pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { + self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime(self.0.checked_add(*other)?)) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime(self.0.checked_sub(*other)?)) + } +} + +pub(crate) mod system_time_internal { + use super::super::helpers; + use super::*; + use crate::mem::MaybeUninit; + use crate::ptr::NonNull; + use r_efi::efi::{RuntimeServices, Time}; + + pub fn now() -> Option<SystemTime> { + let runtime_services: NonNull<RuntimeServices> = helpers::runtime_services()?; + let mut t: MaybeUninit<Time> = MaybeUninit::uninit(); + let r = unsafe { + ((*runtime_services.as_ptr()).get_time)(t.as_mut_ptr(), crate::ptr::null_mut()) + }; + + if r.is_error() { + return None; + } + + let t = unsafe { t.assume_init() }; + + Some(SystemTime(uefi_time_to_duration(t))) + } + + // This algorithm is based on the one described in the post + // https://blog.reverberate.org/2020/05/12/optimizing-date-algorithms.html + pub const fn uefi_time_to_duration(t: r_efi::system::Time) -> Duration { + assert!(t.month <= 12); + assert!(t.month != 0); + + const YEAR_BASE: u32 = 4800; /* Before min year, multiple of 400. */ + + // Calculate the number of days since 1/1/1970 + // Use 1 March as the start + let (m_adj, overflow): (u32, bool) = (t.month as u32).overflowing_sub(3); + let (carry, adjust): (u32, u32) = if overflow { (1, 12) } else { (0, 0) }; + let y_adj: u32 = (t.year as u32) + YEAR_BASE - carry; + let month_days: u32 = (m_adj.wrapping_add(adjust) * 62719 + 769) / 2048; + let leap_days: u32 = y_adj / 4 - y_adj / 100 + y_adj / 400; + let days: u32 = y_adj * 365 + leap_days + month_days + (t.day as u32 - 1) - 2472632; + + let localtime_epoch: u64 = (days as u64) * SECS_IN_DAY + + (t.second as u64) + + (t.minute as u64) * SECS_IN_MINUTE + + (t.hour as u64) * SECS_IN_HOUR; + + let utc_epoch: u64 = if t.timezone == r_efi::efi::UNSPECIFIED_TIMEZONE { + localtime_epoch + } else { + (localtime_epoch as i64 + (t.timezone as i64) * SECS_IN_MINUTE as i64) as u64 + }; + + Duration::new(utc_epoch, t.nanosecond) + } +} + +pub(crate) mod instant_internal { + use super::super::helpers; + use super::*; + use crate::mem::MaybeUninit; + use crate::ptr::NonNull; + use crate::sync::atomic::{AtomicPtr, Ordering}; + use crate::sys_common::mul_div_u64; + use r_efi::protocols::timestamp; + + const NS_PER_SEC: u64 = 1_000_000_000; + + pub fn timestamp_protocol() -> Option<Instant> { + fn try_handle(handle: NonNull<crate::ffi::c_void>) -> Option<u64> { + let protocol: NonNull<timestamp::Protocol> = + helpers::open_protocol(handle, timestamp::PROTOCOL_GUID).ok()?; + let mut properties: MaybeUninit<timestamp::Properties> = MaybeUninit::uninit(); + + let r = unsafe { ((*protocol.as_ptr()).get_properties)(properties.as_mut_ptr()) }; + if r.is_error() { + return None; + } + + let freq = unsafe { properties.assume_init().frequency }; + let ts = unsafe { ((*protocol.as_ptr()).get_timestamp)() }; + Some(mul_div_u64(ts, NS_PER_SEC, freq)) + } + + static LAST_VALID_HANDLE: AtomicPtr<crate::ffi::c_void> = + AtomicPtr::new(crate::ptr::null_mut()); + + if let Some(handle) = NonNull::new(LAST_VALID_HANDLE.load(Ordering::Acquire)) { + if let Some(ns) = try_handle(handle) { + return Some(Instant(Duration::from_nanos(ns))); + } + } + + if let Ok(handles) = helpers::locate_handles(timestamp::PROTOCOL_GUID) { + for handle in handles { + if let Some(ns) = try_handle(handle) { + LAST_VALID_HANDLE.store(handle.as_ptr(), Ordering::Release); + return Some(Instant(Duration::from_nanos(ns))); + } + } + } + + None + } + + pub fn platform_specific() -> Option<Instant> { + cfg_if::cfg_if! { + if #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] { + timestamp_rdtsc().map(Instant) + } else { + None + } + } + } + + #[cfg(target_arch = "x86_64")] + fn timestamp_rdtsc() -> Option<Duration> { + if !crate::arch::x86_64::has_cpuid() { + return None; + } + + static FREQUENCY: crate::sync::OnceLock<u64> = crate::sync::OnceLock::new(); + + // Get Frequency in Mhz + // Inspired by [`edk2/UefiCpuPkg/Library/CpuTimerLib/CpuTimerLib.c`](https://github.com/tianocore/edk2/blob/master/UefiCpuPkg/Library/CpuTimerLib/CpuTimerLib.c) + let freq = FREQUENCY + .get_or_try_init(|| { + let cpuid = unsafe { crate::arch::x86_64::__cpuid(0x15) }; + if cpuid.eax == 0 || cpuid.ebx == 0 || cpuid.ecx == 0 { + return Err(()); + } + Ok(mul_div_u64(cpuid.ecx as u64, cpuid.ebx as u64, cpuid.eax as u64)) + }) + .ok()?; + + let ts = unsafe { crate::arch::x86_64::_rdtsc() }; + let ns = mul_div_u64(ts, 1000, *freq); + Some(Duration::from_nanos(ns)) + } + + #[cfg(target_arch = "x86")] + fn timestamp_rdtsc() -> Option<Duration> { + if !crate::arch::x86::has_cpuid() { + return None; + } + + static FREQUENCY: crate::sync::OnceLock<u64> = crate::sync::OnceLock::new(); + + let freq = FREQUENCY + .get_or_try_init(|| { + let cpuid = unsafe { crate::arch::x86::__cpuid(0x15) }; + if cpuid.eax == 0 || cpuid.ebx == 0 || cpuid.ecx == 0 { + return Err(()); + } + Ok(mul_div_u64(cpuid.ecx as u64, cpuid.ebx as u64, cpuid.eax as u64)) + }) + .ok()?; + + let ts = unsafe { crate::arch::x86::_rdtsc() }; + let ns = mul_div_u64(ts, 1000, *freq); + Some(Duration::from_nanos(ns)) + } +} diff --git a/library/std/src/sys/pal/unix/args.rs b/library/std/src/sys/pal/unix/args.rs index 9f7dcc0416e..78e82d9c194 100644 --- a/library/std/src/sys/pal/unix/args.rs +++ b/library/std/src/sys/pal/unix/args.rs @@ -201,9 +201,9 @@ mod imp { // As _NSGetArgc and _NSGetArgv aren't mentioned in iOS docs // and use underscores in their names - they're most probably - // are considered private and therefore should be avoided - // Here is another way to get arguments using Objective C - // runtime + // are considered private and therefore should be avoided. + // Here is another way to get arguments using the Objective-C + // runtime. // // In general it looks like: // res = Vec::new() @@ -213,53 +213,60 @@ mod imp { // res #[cfg(any(target_os = "ios", target_os = "tvos", target_os = "watchos"))] pub fn args() -> Args { - use crate::ffi::OsString; + use crate::ffi::{c_char, c_void, OsString}; use crate::mem; use crate::str; - extern "C" { - fn sel_registerName(name: *const libc::c_uchar) -> Sel; - fn objc_getClass(class_name: *const libc::c_uchar) -> NsId; - } + type Sel = *const c_void; + type NsId = *const c_void; + type NSUInteger = usize; - #[cfg(target_arch = "aarch64")] extern "C" { - fn objc_msgSend(obj: NsId, sel: Sel) -> NsId; - #[allow(clashing_extern_declarations)] - #[link_name = "objc_msgSend"] - fn objc_msgSend_ul(obj: NsId, sel: Sel, i: libc::c_ulong) -> NsId; - } + fn sel_registerName(name: *const c_char) -> Sel; + fn objc_getClass(class_name: *const c_char) -> NsId; - #[cfg(not(target_arch = "aarch64"))] - extern "C" { - fn objc_msgSend(obj: NsId, sel: Sel, ...) -> NsId; - #[allow(clashing_extern_declarations)] - #[link_name = "objc_msgSend"] - fn objc_msgSend_ul(obj: NsId, sel: Sel, ...) -> NsId; + // This must be transmuted to an appropriate function pointer type before being called. + fn objc_msgSend(); } - type Sel = *const libc::c_void; - type NsId = *const libc::c_void; + const MSG_SEND_PTR: unsafe extern "C" fn() = objc_msgSend; + const MSG_SEND_NO_ARGUMENTS_RETURN_PTR: unsafe extern "C" fn(NsId, Sel) -> *const c_void = + unsafe { mem::transmute(MSG_SEND_PTR) }; + const MSG_SEND_NO_ARGUMENTS_RETURN_NSUINTEGER: unsafe extern "C" fn( + NsId, + Sel, + ) -> NSUInteger = unsafe { mem::transmute(MSG_SEND_PTR) }; + const MSG_SEND_NSINTEGER_ARGUMENT_RETURN_PTR: unsafe extern "C" fn( + NsId, + Sel, + NSUInteger, + ) + -> *const c_void = unsafe { mem::transmute(MSG_SEND_PTR) }; let mut res = Vec::new(); unsafe { - let process_info_sel = - sel_registerName(c"processInfo".as_ptr() as *const libc::c_uchar); - let arguments_sel = sel_registerName(c"arguments".as_ptr() as *const libc::c_uchar); - let utf8_sel = sel_registerName(c"UTF8String".as_ptr() as *const libc::c_uchar); - let count_sel = sel_registerName(c"count".as_ptr() as *const libc::c_uchar); - let object_at_sel = - sel_registerName(c"objectAtIndex:".as_ptr() as *const libc::c_uchar); - - let klass = objc_getClass(c"NSProcessInfo".as_ptr() as *const libc::c_uchar); - let info = objc_msgSend(klass, process_info_sel); - let args = objc_msgSend(info, arguments_sel); - - let cnt: usize = mem::transmute(objc_msgSend(args, count_sel)); + let process_info_sel = sel_registerName(c"processInfo".as_ptr()); + let arguments_sel = sel_registerName(c"arguments".as_ptr()); + let count_sel = sel_registerName(c"count".as_ptr()); + let object_at_index_sel = sel_registerName(c"objectAtIndex:".as_ptr()); + let utf8string_sel = sel_registerName(c"UTF8String".as_ptr()); + + let klass = objc_getClass(c"NSProcessInfo".as_ptr()); + // `+[NSProcessInfo processInfo]` returns an object with +0 retain count, so no need to manually `retain/release`. + let info = MSG_SEND_NO_ARGUMENTS_RETURN_PTR(klass, process_info_sel); + + // `-[NSProcessInfo arguments]` returns an object with +0 retain count, so no need to manually `retain/release`. + let args = MSG_SEND_NO_ARGUMENTS_RETURN_PTR(info, arguments_sel); + + let cnt = MSG_SEND_NO_ARGUMENTS_RETURN_NSUINTEGER(args, count_sel); for i in 0..cnt { - let tmp = objc_msgSend_ul(args, object_at_sel, i as libc::c_ulong); - let utf_c_str: *const libc::c_char = mem::transmute(objc_msgSend(tmp, utf8_sel)); + // `-[NSArray objectAtIndex:]` returns an object whose lifetime is tied to the array, so no need to manually `retain/release`. + let ns_string = + MSG_SEND_NSINTEGER_ARGUMENT_RETURN_PTR(args, object_at_index_sel, i); + // The lifetime of this pointer is tied to the NSString, as well as the current autorelease pool, which is why we heap-allocate the string below. + let utf_c_str: *const c_char = + MSG_SEND_NO_ARGUMENTS_RETURN_PTR(ns_string, utf8string_sel).cast(); let bytes = CStr::from_ptr(utf_c_str).to_bytes(); res.push(OsString::from(str::from_utf8(bytes).unwrap())) } diff --git a/library/std/src/sys/pal/unix/cmath.rs b/library/std/src/sys/pal/unix/cmath.rs deleted file mode 100644 index 5346d229116..00000000000 --- a/library/std/src/sys/pal/unix/cmath.rs +++ /dev/null @@ -1,37 +0,0 @@ -#![cfg(not(test))] - -// These symbols are all defined by `libm`, -// or by `compiler-builtins` on unsupported platforms. - -extern "C" { - pub fn acos(n: f64) -> f64; - pub fn acosf(n: f32) -> f32; - pub fn asin(n: f64) -> f64; - pub fn asinf(n: f32) -> f32; - pub fn atan(n: f64) -> f64; - pub fn atan2(a: f64, b: f64) -> f64; - pub fn atan2f(a: f32, b: f32) -> f32; - pub fn atanf(n: f32) -> f32; - pub fn cbrt(n: f64) -> f64; - pub fn cbrtf(n: f32) -> f32; - pub fn cosh(n: f64) -> f64; - pub fn coshf(n: f32) -> f32; - pub fn expm1(n: f64) -> f64; - pub fn expm1f(n: f32) -> f32; - pub fn fdim(a: f64, b: f64) -> f64; - pub fn fdimf(a: f32, b: f32) -> f32; - pub fn hypot(x: f64, y: f64) -> f64; - pub fn hypotf(x: f32, y: f32) -> f32; - pub fn log1p(n: f64) -> f64; - pub fn log1pf(n: f32) -> f32; - pub fn sinh(n: f64) -> f64; - pub fn sinhf(n: f32) -> f32; - pub fn tan(n: f64) -> f64; - pub fn tanf(n: f32) -> f32; - pub fn tanh(n: f64) -> f64; - pub fn tanhf(n: f32) -> f32; - pub fn tgamma(n: f64) -> f64; - pub fn tgammaf(n: f32) -> f32; - pub fn lgamma_r(n: f64, s: &mut i32) -> f64; - pub fn lgammaf_r(n: f32, s: &mut i32) -> f32; -} diff --git a/library/std/src/sys/pal/unix/fd.rs b/library/std/src/sys/pal/unix/fd.rs index bf1fb3123c4..a1c0321876f 100644 --- a/library/std/src/sys/pal/unix/fd.rs +++ b/library/std/src/sys/pal/unix/fd.rs @@ -161,7 +161,7 @@ impl FileDesc { // Safety: `ret` bytes were written to the initialized portion of the buffer unsafe { - cursor.advance(ret as usize); + cursor.advance_unchecked(ret as usize); } Ok(()) } diff --git a/library/std/src/sys/pal/unix/fs.rs b/library/std/src/sys/pal/unix/fs.rs index 6d0b892ea2f..c75323ef775 100644 --- a/library/std/src/sys/pal/unix/fs.rs +++ b/library/std/src/sys/pal/unix/fs.rs @@ -1118,7 +1118,7 @@ impl OpenOptions { impl File { pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> { - run_path_with_cstr(path, |path| File::open_c(path, opts)) + run_path_with_cstr(path, &|path| File::open_c(path, opts)) } pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> { @@ -1394,7 +1394,7 @@ impl DirBuilder { } pub fn mkdir(&self, p: &Path) -> io::Result<()> { - run_path_with_cstr(p, |p| cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) }).map(|_| ())) + run_path_with_cstr(p, &|p| cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) }).map(|_| ())) } pub fn set_mode(&mut self, mode: u32) { @@ -1575,7 +1575,7 @@ impl fmt::Debug for File { } pub fn readdir(path: &Path) -> io::Result<ReadDir> { - let ptr = run_path_with_cstr(path, |p| unsafe { Ok(libc::opendir(p.as_ptr())) })?; + let ptr = run_path_with_cstr(path, &|p| unsafe { Ok(libc::opendir(p.as_ptr())) })?; if ptr.is_null() { Err(Error::last_os_error()) } else { @@ -1586,27 +1586,27 @@ pub fn readdir(path: &Path) -> io::Result<ReadDir> { } pub fn unlink(p: &Path) -> io::Result<()> { - run_path_with_cstr(p, |p| cvt(unsafe { libc::unlink(p.as_ptr()) }).map(|_| ())) + run_path_with_cstr(p, &|p| cvt(unsafe { libc::unlink(p.as_ptr()) }).map(|_| ())) } pub fn rename(old: &Path, new: &Path) -> io::Result<()> { - run_path_with_cstr(old, |old| { - run_path_with_cstr(new, |new| { + run_path_with_cstr(old, &|old| { + run_path_with_cstr(new, &|new| { cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) }).map(|_| ()) }) }) } pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> { - run_path_with_cstr(p, |p| cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) }).map(|_| ())) + run_path_with_cstr(p, &|p| cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) }).map(|_| ())) } pub fn rmdir(p: &Path) -> io::Result<()> { - run_path_with_cstr(p, |p| cvt(unsafe { libc::rmdir(p.as_ptr()) }).map(|_| ())) + run_path_with_cstr(p, &|p| cvt(unsafe { libc::rmdir(p.as_ptr()) }).map(|_| ())) } pub fn readlink(p: &Path) -> io::Result<PathBuf> { - run_path_with_cstr(p, |c_path| { + run_path_with_cstr(p, &|c_path| { let p = c_path.as_ptr(); let mut buf = Vec::with_capacity(256); @@ -1635,16 +1635,16 @@ pub fn readlink(p: &Path) -> io::Result<PathBuf> { } pub fn symlink(original: &Path, link: &Path) -> io::Result<()> { - run_path_with_cstr(original, |original| { - run_path_with_cstr(link, |link| { + run_path_with_cstr(original, &|original| { + run_path_with_cstr(link, &|link| { cvt(unsafe { libc::symlink(original.as_ptr(), link.as_ptr()) }).map(|_| ()) }) }) } pub fn link(original: &Path, link: &Path) -> io::Result<()> { - run_path_with_cstr(original, |original| { - run_path_with_cstr(link, |link| { + run_path_with_cstr(original, &|original| { + run_path_with_cstr(link, &|link| { cfg_if::cfg_if! { if #[cfg(any(target_os = "vxworks", target_os = "redox", target_os = "android", target_os = "espidf", target_os = "horizon", target_os = "vita"))] { // VxWorks, Redox and ESP-IDF lack `linkat`, so use `link` instead. POSIX leaves @@ -1678,7 +1678,7 @@ pub fn link(original: &Path, link: &Path) -> io::Result<()> { } pub fn stat(p: &Path) -> io::Result<FileAttr> { - run_path_with_cstr(p, |p| { + run_path_with_cstr(p, &|p| { cfg_has_statx! { if let Some(ret) = unsafe { try_statx( libc::AT_FDCWD, @@ -1697,7 +1697,7 @@ pub fn stat(p: &Path) -> io::Result<FileAttr> { } pub fn lstat(p: &Path) -> io::Result<FileAttr> { - run_path_with_cstr(p, |p| { + run_path_with_cstr(p, &|p| { cfg_has_statx! { if let Some(ret) = unsafe { try_statx( libc::AT_FDCWD, @@ -1716,7 +1716,7 @@ pub fn lstat(p: &Path) -> io::Result<FileAttr> { } pub fn canonicalize(p: &Path) -> io::Result<PathBuf> { - let r = run_path_with_cstr(p, |path| unsafe { + let r = run_path_with_cstr(p, &|path| unsafe { Ok(libc::realpath(path.as_ptr(), ptr::null_mut())) })?; if r.is_null() { @@ -1879,7 +1879,7 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> { // Opportunistically attempt to create a copy-on-write clone of `from` // using `fclonefileat`. if HAS_FCLONEFILEAT.load(Ordering::Relaxed) { - let clonefile_result = run_path_with_cstr(to, |to| { + let clonefile_result = run_path_with_cstr(to, &|to| { cvt(unsafe { fclonefileat(reader.as_raw_fd(), libc::AT_FDCWD, to.as_ptr(), 0) }) }); match clonefile_result { @@ -1925,7 +1925,7 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> { } pub fn chown(path: &Path, uid: u32, gid: u32) -> io::Result<()> { - run_path_with_cstr(path, |path| { + run_path_with_cstr(path, &|path| { cvt(unsafe { libc::chown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) }) .map(|_| ()) }) @@ -1937,7 +1937,7 @@ pub fn fchown(fd: c_int, uid: u32, gid: u32) -> io::Result<()> { } pub fn lchown(path: &Path, uid: u32, gid: u32) -> io::Result<()> { - run_path_with_cstr(path, |path| { + run_path_with_cstr(path, &|path| { cvt(unsafe { libc::lchown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) }) .map(|_| ()) }) @@ -1945,7 +1945,7 @@ pub fn lchown(path: &Path, uid: u32, gid: u32) -> io::Result<()> { #[cfg(not(any(target_os = "fuchsia", target_os = "vxworks")))] pub fn chroot(dir: &Path) -> io::Result<()> { - run_path_with_cstr(dir, |dir| cvt(unsafe { libc::chroot(dir.as_ptr()) }).map(|_| ())) + run_path_with_cstr(dir, &|dir| cvt(unsafe { libc::chroot(dir.as_ptr()) }).map(|_| ())) } pub use remove_dir_impl::remove_dir_all; @@ -2140,7 +2140,7 @@ mod remove_dir_impl { if attr.file_type().is_symlink() { crate::fs::remove_file(p) } else { - run_path_with_cstr(p, |p| remove_dir_all_recursive(None, &p)) + run_path_with_cstr(p, &|p| remove_dir_all_recursive(None, &p)) } } diff --git a/library/std/src/sys/pal/unix/futex.rs b/library/std/src/sys/pal/unix/futex.rs index d310be6c7a1..26161a9af79 100644 --- a/library/std/src/sys/pal/unix/futex.rs +++ b/library/std/src/sys/pal/unix/futex.rs @@ -53,7 +53,7 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) - futex as *const AtomicU32 as *mut _, libc::UMTX_OP_WAIT_UINT_PRIVATE, expected as libc::c_ulong, - crate::ptr::invalid_mut(umtx_timeout_size), + crate::ptr::without_provenance_mut(umtx_timeout_size), umtx_timeout_ptr as *mut _, ) } else if #[cfg(any(target_os = "linux", target_os = "android"))] { diff --git a/library/std/src/sys/pal/unix/locks/mod.rs b/library/std/src/sys/pal/unix/locks/mod.rs deleted file mode 100644 index b2e0e49ad73..00000000000 --- a/library/std/src/sys/pal/unix/locks/mod.rs +++ /dev/null @@ -1,31 +0,0 @@ -cfg_if::cfg_if! { - if #[cfg(any( - target_os = "linux", - target_os = "android", - all(target_os = "emscripten", target_feature = "atomics"), - target_os = "freebsd", - target_os = "openbsd", - target_os = "dragonfly", - ))] { - mod futex_mutex; - mod futex_rwlock; - mod futex_condvar; - pub(crate) use futex_mutex::Mutex; - pub(crate) use futex_rwlock::RwLock; - pub(crate) use futex_condvar::Condvar; - } else if #[cfg(target_os = "fuchsia")] { - mod fuchsia_mutex; - mod futex_rwlock; - mod futex_condvar; - pub(crate) use fuchsia_mutex::Mutex; - pub(crate) use futex_rwlock::RwLock; - pub(crate) use futex_condvar::Condvar; - } else { - mod pthread_mutex; - mod pthread_rwlock; - mod pthread_condvar; - pub(crate) use pthread_mutex::Mutex; - pub(crate) use pthread_rwlock::RwLock; - pub(crate) use pthread_condvar::Condvar; - } -} diff --git a/library/std/src/sys/pal/unix/locks/pthread_rwlock.rs b/library/std/src/sys/pal/unix/locks/pthread_rwlock.rs deleted file mode 100644 index 04662be9d82..00000000000 --- a/library/std/src/sys/pal/unix/locks/pthread_rwlock.rs +++ /dev/null @@ -1,195 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::mem::forget; -use crate::sync::atomic::{AtomicUsize, Ordering}; -use crate::sys_common::lazy_box::{LazyBox, LazyInit}; - -struct AllocatedRwLock { - inner: UnsafeCell<libc::pthread_rwlock_t>, - write_locked: UnsafeCell<bool>, // guarded by the `inner` RwLock - num_readers: AtomicUsize, -} - -unsafe impl Send for AllocatedRwLock {} -unsafe impl Sync for AllocatedRwLock {} - -pub struct RwLock { - inner: LazyBox<AllocatedRwLock>, -} - -impl LazyInit for AllocatedRwLock { - fn init() -> Box<Self> { - Box::new(AllocatedRwLock { - inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), - write_locked: UnsafeCell::new(false), - num_readers: AtomicUsize::new(0), - }) - } - - fn destroy(mut rwlock: Box<Self>) { - // We're not allowed to pthread_rwlock_destroy a locked rwlock, - // so check first if it's unlocked. - if *rwlock.write_locked.get_mut() || *rwlock.num_readers.get_mut() != 0 { - // The rwlock is locked. This happens if a RwLock{Read,Write}Guard is leaked. - // In this case, we just leak the RwLock too. - forget(rwlock); - } - } - - fn cancel_init(_: Box<Self>) { - // In this case, we can just drop it without any checks, - // since it cannot have been locked yet. - } -} - -impl AllocatedRwLock { - #[inline] - unsafe fn raw_unlock(&self) { - let r = libc::pthread_rwlock_unlock(self.inner.get()); - debug_assert_eq!(r, 0); - } -} - -impl Drop for AllocatedRwLock { - fn drop(&mut self) { - let r = unsafe { libc::pthread_rwlock_destroy(self.inner.get()) }; - // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a - // rwlock that was just initialized with - // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked) - // or pthread_rwlock_init() is called, this behaviour no longer occurs. - if cfg!(target_os = "dragonfly") { - debug_assert!(r == 0 || r == libc::EINVAL); - } else { - debug_assert_eq!(r, 0); - } - } -} - -impl RwLock { - #[inline] - pub const fn new() -> RwLock { - RwLock { inner: LazyBox::new() } - } - - #[inline] - pub fn read(&self) { - let lock = &*self.inner; - let r = unsafe { libc::pthread_rwlock_rdlock(lock.inner.get()) }; - - // According to POSIX, when a thread tries to acquire this read lock - // while it already holds the write lock - // (or vice versa, or tries to acquire the write lock twice), - // "the call shall either deadlock or return [EDEADLK]" - // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html, - // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_rdlock.html). - // So, in principle, all we have to do here is check `r == 0` to be sure we properly - // got the lock. - // - // However, (at least) glibc before version 2.25 does not conform to this spec, - // and can return `r == 0` even when this thread already holds the write lock. - // We thus check for this situation ourselves and panic when detecting that a thread - // got the write lock more than once, or got a read and a write lock. - if r == libc::EAGAIN { - panic!("rwlock maximum reader count exceeded"); - } else if r == libc::EDEADLK || (r == 0 && unsafe { *lock.write_locked.get() }) { - // Above, we make sure to only access `write_locked` when `r == 0` to avoid - // data races. - if r == 0 { - // `pthread_rwlock_rdlock` succeeded when it should not have. - unsafe { - lock.raw_unlock(); - } - } - panic!("rwlock read lock would result in deadlock"); - } else { - // POSIX does not make guarantees about all the errors that may be returned. - // See issue #94705 for more details. - assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r); - lock.num_readers.fetch_add(1, Ordering::Relaxed); - } - } - - #[inline] - pub fn try_read(&self) -> bool { - let lock = &*self.inner; - let r = unsafe { libc::pthread_rwlock_tryrdlock(lock.inner.get()) }; - if r == 0 { - if unsafe { *lock.write_locked.get() } { - // `pthread_rwlock_tryrdlock` succeeded when it should not have. - unsafe { - lock.raw_unlock(); - } - false - } else { - lock.num_readers.fetch_add(1, Ordering::Relaxed); - true - } - } else { - false - } - } - - #[inline] - pub fn write(&self) { - let lock = &*self.inner; - let r = unsafe { libc::pthread_rwlock_wrlock(lock.inner.get()) }; - // See comments above for why we check for EDEADLK and write_locked. For the same reason, - // we also need to check that there are no readers (tracked in `num_readers`). - if r == libc::EDEADLK - || (r == 0 && unsafe { *lock.write_locked.get() }) - || lock.num_readers.load(Ordering::Relaxed) != 0 - { - // Above, we make sure to only access `write_locked` when `r == 0` to avoid - // data races. - if r == 0 { - // `pthread_rwlock_wrlock` succeeded when it should not have. - unsafe { - lock.raw_unlock(); - } - } - panic!("rwlock write lock would result in deadlock"); - } else { - // According to POSIX, for a properly initialized rwlock this can only - // return EDEADLK or 0. We rely on that. - debug_assert_eq!(r, 0); - } - - unsafe { - *lock.write_locked.get() = true; - } - } - - #[inline] - pub unsafe fn try_write(&self) -> bool { - let lock = &*self.inner; - let r = libc::pthread_rwlock_trywrlock(lock.inner.get()); - if r == 0 { - if *lock.write_locked.get() || lock.num_readers.load(Ordering::Relaxed) != 0 { - // `pthread_rwlock_trywrlock` succeeded when it should not have. - lock.raw_unlock(); - false - } else { - *lock.write_locked.get() = true; - true - } - } else { - false - } - } - - #[inline] - pub unsafe fn read_unlock(&self) { - let lock = &*self.inner; - debug_assert!(!*lock.write_locked.get()); - lock.num_readers.fetch_sub(1, Ordering::Relaxed); - lock.raw_unlock(); - } - - #[inline] - pub unsafe fn write_unlock(&self) { - let lock = &*self.inner; - debug_assert_eq!(lock.num_readers.load(Ordering::Relaxed), 0); - debug_assert!(*lock.write_locked.get()); - *lock.write_locked.get() = false; - lock.raw_unlock(); - } -} diff --git a/library/std/src/sys/pal/unix/mod.rs b/library/std/src/sys/pal/unix/mod.rs index b5da5f870ec..04b8c5ca916 100644 --- a/library/std/src/sys/pal/unix/mod.rs +++ b/library/std/src/sys/pal/unix/mod.rs @@ -11,8 +11,6 @@ pub mod weak; pub mod alloc; pub mod android; pub mod args; -#[path = "../unix/cmath.rs"] -pub mod cmath; pub mod env; pub mod fd; pub mod fs; @@ -22,15 +20,12 @@ pub mod io; pub mod kernel_copy; #[cfg(target_os = "l4re")] mod l4re; -pub mod locks; pub mod memchr; #[cfg(not(target_os = "l4re"))] pub mod net; #[cfg(target_os = "l4re")] pub use self::l4re::net; pub mod os; -pub mod os_str; -pub mod path; pub mod pipe; pub mod process; pub mod rand; diff --git a/library/std/src/sys/pal/unix/net.rs b/library/std/src/sys/pal/unix/net.rs index 8f537de7026..1b6a6bb2c5c 100644 --- a/library/std/src/sys/pal/unix/net.rs +++ b/library/std/src/sys/pal/unix/net.rs @@ -272,7 +272,7 @@ impl Socket { ) })?; unsafe { - buf.advance(ret as usize); + buf.advance_unchecked(ret as usize); } Ok(()) } diff --git a/library/std/src/sys/pal/unix/os.rs b/library/std/src/sys/pal/unix/os.rs index 881b3a25c51..af2b9db4685 100644 --- a/library/std/src/sys/pal/unix/os.rs +++ b/library/std/src/sys/pal/unix/os.rs @@ -186,7 +186,7 @@ pub fn chdir(_p: &path::Path) -> io::Result<()> { #[cfg(not(target_os = "espidf"))] pub fn chdir(p: &path::Path) -> io::Result<()> { - let result = run_path_with_cstr(p, |p| unsafe { Ok(libc::chdir(p.as_ptr())) })?; + let result = run_path_with_cstr(p, &|p| unsafe { Ok(libc::chdir(p.as_ptr())) })?; if result == 0 { Ok(()) } else { Err(io::Error::last_os_error()) } } @@ -643,7 +643,7 @@ pub fn env() -> Env { pub fn getenv(k: &OsStr) -> Option<OsString> { // environment variables with a nul byte can't be set, so their value is // always None as well - run_with_cstr(k.as_bytes(), |k| { + run_with_cstr(k.as_bytes(), &|k| { let _guard = env_read_lock(); let v = unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char; @@ -661,8 +661,8 @@ pub fn getenv(k: &OsStr) -> Option<OsString> { } pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { - run_with_cstr(k.as_bytes(), |k| { - run_with_cstr(v.as_bytes(), |v| { + run_with_cstr(k.as_bytes(), &|k| { + run_with_cstr(v.as_bytes(), &|v| { let _guard = ENV_LOCK.write(); cvt(unsafe { libc::setenv(k.as_ptr(), v.as_ptr(), 1) }).map(drop) }) @@ -670,7 +670,7 @@ pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { } pub fn unsetenv(n: &OsStr) -> io::Result<()> { - run_with_cstr(n.as_bytes(), |nbuf| { + run_with_cstr(n.as_bytes(), &|nbuf| { let _guard = ENV_LOCK.write(); cvt(unsafe { libc::unsetenv(nbuf.as_ptr()) }).map(drop) }) diff --git a/library/std/src/sys/pal/unix/process/process_common/tests.rs b/library/std/src/sys/pal/unix/process/process_common/tests.rs index 4e41efc9096..823b4a56336 100644 --- a/library/std/src/sys/pal/unix/process/process_common/tests.rs +++ b/library/std/src/sys/pal/unix/process/process_common/tests.rs @@ -170,7 +170,7 @@ fn test_program_kind() { )))] #[test] fn unix_exit_statuses() { - use crate::num::NonZeroI32; + use crate::num::NonZero; use crate::os::unix::process::ExitStatusExt; use crate::process::*; @@ -182,7 +182,7 @@ fn unix_exit_statuses() { assert_eq!(exit_status.code(), Some(exit_code)); - if let Ok(nz) = NonZeroI32::try_from(exit_code) { + if let Ok(nz) = NonZero::try_from(exit_code) { assert!(!exit_status.success()); let es_error = exit_status.exit_ok().unwrap_err(); assert_eq!(es_error.code().unwrap(), i32::from(nz)); diff --git a/library/std/src/sys/pal/unix/process/process_fuchsia.rs b/library/std/src/sys/pal/unix/process/process_fuchsia.rs index 9931c2af2f1..b6a74fb4831 100644 --- a/library/std/src/sys/pal/unix/process/process_fuchsia.rs +++ b/library/std/src/sys/pal/unix/process/process_fuchsia.rs @@ -1,7 +1,7 @@ use crate::fmt; use crate::io; use crate::mem; -use crate::num::{NonZeroI32, NonZeroI64}; +use crate::num::NonZero; use crate::ptr; use crate::sys::process::process_common::*; @@ -240,7 +240,7 @@ pub struct ExitStatus(i64); impl ExitStatus { pub fn exit_ok(&self) -> Result<(), ExitStatusError> { - match NonZeroI64::try_from(self.0) { + match NonZero::try_from(self.0) { /* was nonzero */ Ok(failure) => Err(ExitStatusError(failure)), /* was zero, couldn't convert */ Err(_) => Ok(()), } @@ -314,7 +314,7 @@ impl fmt::Display for ExitStatus { } #[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub struct ExitStatusError(NonZeroI64); +pub struct ExitStatusError(NonZero<i64>); impl Into<ExitStatus> for ExitStatusError { fn into(self) -> ExitStatus { @@ -323,7 +323,7 @@ impl Into<ExitStatus> for ExitStatusError { } impl ExitStatusError { - pub fn code(self) -> Option<NonZeroI32> { + pub fn code(self) -> Option<NonZero<i32>> { // fixme: affected by the same bug as ExitStatus::code() ExitStatus(self.0.into()).code().map(|st| st.try_into().unwrap()) } diff --git a/library/std/src/sys/pal/unix/process/process_unix.rs b/library/std/src/sys/pal/unix/process/process_unix.rs index fac6d92439e..d5a77085725 100644 --- a/library/std/src/sys/pal/unix/process/process_unix.rs +++ b/library/std/src/sys/pal/unix/process/process_unix.rs @@ -1,11 +1,10 @@ use crate::fmt; use crate::io::{self, Error, ErrorKind}; use crate::mem; -use crate::num::NonZeroI32; +use crate::num::NonZero; use crate::sys; use crate::sys::cvt; use crate::sys::process::process_common::*; -use core::ffi::NonZero_c_int; #[cfg(target_os = "linux")] use crate::os::linux::process::PidFd; @@ -147,8 +146,7 @@ impl Command { #[cfg(not(target_os = "linux"))] let pidfd = -1; - // Safety: We obtained the pidfd from calling `clone3` with - // `CLONE_PIDFD` so it's valid an otherwise unowned. + // Safety: We obtained the pidfd (on Linux) using SOCK_SEQPACKET, so it's valid. let mut p = unsafe { Process::new(pid, pidfd) }; let mut bytes = [0; 8]; @@ -936,7 +934,7 @@ impl ExitStatus { // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html. If it is not // true for a platform pretending to be Unix, the tests (our doctests, and also // process_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too. - match NonZero_c_int::try_from(self.0) { + match NonZero::try_from(self.0) { /* was nonzero */ Ok(failure) => Err(ExitStatusError(failure)), /* was zero, couldn't convert */ Err(_) => Ok(()), } @@ -1093,7 +1091,7 @@ impl fmt::Display for ExitStatus { } #[derive(PartialEq, Eq, Clone, Copy)] -pub struct ExitStatusError(NonZero_c_int); +pub struct ExitStatusError(NonZero<c_int>); impl Into<ExitStatus> for ExitStatusError { fn into(self) -> ExitStatus { @@ -1108,7 +1106,7 @@ impl fmt::Debug for ExitStatusError { } impl ExitStatusError { - pub fn code(self) -> Option<NonZeroI32> { + pub fn code(self) -> Option<NonZero<i32>> { ExitStatus(self.0.into()).code().map(|st| st.try_into().unwrap()) } } diff --git a/library/std/src/sys/pal/unix/process/process_unix/tests.rs b/library/std/src/sys/pal/unix/process/process_unix/tests.rs index 6e952ed7c42..0a6c6ec19fc 100644 --- a/library/std/src/sys/pal/unix/process/process_unix/tests.rs +++ b/library/std/src/sys/pal/unix/process/process_unix/tests.rs @@ -62,13 +62,14 @@ fn test_command_fork_no_unwind() { } #[test] -#[cfg(target_os = "linux")] +#[cfg(target_os = "linux")] // pidfds are a linux-specific concept fn test_command_pidfd() { use crate::assert_matches::assert_matches; use crate::os::fd::{AsRawFd, RawFd}; use crate::os::linux::process::{ChildExt, CommandExt}; use crate::process::Command; + // pidfds require the pidfd_open syscall let our_pid = crate::process::id(); let pidfd = unsafe { libc::syscall(libc::SYS_pidfd_open, our_pid, 0) }; let pidfd_open_available = if pidfd >= 0 { @@ -81,7 +82,9 @@ fn test_command_pidfd() { // always exercise creation attempts let mut child = Command::new("false").create_pidfd(true).spawn().unwrap(); - // but only check if we know that the kernel supports pidfds + // but only check if we know that the kernel supports pidfds. + // We don't assert the precise value, since the standard library + // might have opened other file descriptors before our code runs. if pidfd_open_available { assert!(child.pidfd().is_ok()); } @@ -97,4 +100,17 @@ fn test_command_pidfd() { child.kill().expect("failed to kill child"); let status = child.wait().expect("error waiting on pidfd"); assert_eq!(status.signal(), Some(libc::SIGKILL)); + + let _ = Command::new("echo") + .create_pidfd(false) + .spawn() + .unwrap() + .pidfd() + .expect_err("pidfd should not have been created when create_pid(false) is set"); + + let _ = Command::new("echo") + .spawn() + .unwrap() + .pidfd() + .expect_err("pidfd should not have been created"); } diff --git a/library/std/src/sys/pal/unix/process/process_unsupported.rs b/library/std/src/sys/pal/unix/process/process_unsupported.rs index 9453c8a384e..33d359d3f84 100644 --- a/library/std/src/sys/pal/unix/process/process_unsupported.rs +++ b/library/std/src/sys/pal/unix/process/process_unsupported.rs @@ -1,9 +1,8 @@ use crate::fmt; use crate::io; -use crate::num::NonZeroI32; +use crate::num::NonZero; use crate::sys::pal::unix::unsupported::*; use crate::sys::process::process_common::*; -use core::ffi::NonZero_c_int; use libc::{c_int, pid_t}; @@ -59,7 +58,7 @@ mod wait_status; pub use wait_status::ExitStatus; #[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub struct ExitStatusError(NonZero_c_int); +pub struct ExitStatusError(NonZero<c_int>); impl Into<ExitStatus> for ExitStatusError { fn into(self) -> ExitStatus { @@ -68,7 +67,7 @@ impl Into<ExitStatus> for ExitStatusError { } impl ExitStatusError { - pub fn code(self) -> Option<NonZeroI32> { + pub fn code(self) -> Option<NonZero<i32>> { ExitStatus::from(c_int::from(self.0)).code().map(|st| st.try_into().unwrap()) } } diff --git a/library/std/src/sys/pal/unix/process/process_unsupported/wait_status.rs b/library/std/src/sys/pal/unix/process/process_unsupported/wait_status.rs index 72b7ae18cff..e6dfadcf4a4 100644 --- a/library/std/src/sys/pal/unix/process/process_unsupported/wait_status.rs +++ b/library/std/src/sys/pal/unix/process/process_unsupported/wait_status.rs @@ -1,10 +1,9 @@ //! Emulated wait status for non-Unix #[cfg(unix) platforms //! //! Separate module to facilitate testing against a real Unix implementation. -use core::ffi::NonZero_c_int; - use crate::ffi::c_int; use crate::fmt; +use crate::num::NonZero; use super::ExitStatusError; @@ -50,7 +49,7 @@ impl ExitStatus { // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html. If it is not // true for a platform pretending to be Unix, the tests (our doctests, and also // process_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too. - match NonZero_c_int::try_from(self.wait_status) { + match NonZero::try_from(self.wait_status) { /* was nonzero */ Ok(failure) => Err(ExitStatusError(failure)), /* was zero, couldn't convert */ Err(_) => Ok(()), } diff --git a/library/std/src/sys/pal/unix/process/process_vxworks.rs b/library/std/src/sys/pal/unix/process/process_vxworks.rs index 1ff2b2fb383..76179e0910d 100644 --- a/library/std/src/sys/pal/unix/process/process_vxworks.rs +++ b/library/std/src/sys/pal/unix/process/process_vxworks.rs @@ -1,11 +1,10 @@ use crate::fmt; use crate::io::{self, Error, ErrorKind}; -use crate::num::NonZeroI32; +use crate::num::NonZero; use crate::sys; use crate::sys::cvt; use crate::sys::process::process_common::*; use crate::sys_common::thread; -use core::ffi::NonZero_c_int; use libc::RTP_ID; use libc::{self, c_char, c_int}; @@ -197,7 +196,7 @@ impl ExitStatus { // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html. If it is not // true for a platform pretending to be Unix, the tests (our doctests, and also // process_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too. - match NonZero_c_int::try_from(self.0) { + match NonZero::try_from(self.0) { Ok(failure) => Err(ExitStatusError(failure)), Err(_) => Ok(()), } @@ -249,7 +248,7 @@ impl fmt::Display for ExitStatus { } #[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub struct ExitStatusError(NonZero_c_int); +pub struct ExitStatusError(NonZero<c_int>); impl Into<ExitStatus> for ExitStatusError { fn into(self) -> ExitStatus { @@ -258,7 +257,7 @@ impl Into<ExitStatus> for ExitStatusError { } impl ExitStatusError { - pub fn code(self) -> Option<NonZeroI32> { + pub fn code(self) -> Option<NonZero<i32>> { ExitStatus(self.0.into()).code().map(|st| st.try_into().unwrap()) } } diff --git a/library/std/src/sys/pal/unix/rand.rs b/library/std/src/sys/pal/unix/rand.rs index cf0fe0f47c5..5c32957bc51 100644 --- a/library/std/src/sys/pal/unix/rand.rs +++ b/library/std/src/sys/pal/unix/rand.rs @@ -62,7 +62,7 @@ mod imp { unsafe { getrandom(buf.as_mut_ptr().cast(), buf.len(), libc::GRND_NONBLOCK) } } - #[cfg(any(target_os = "espidf", target_os = "horizon", target_os = "freebsd"))] + #[cfg(any(target_os = "espidf", target_os = "horizon", target_os = "freebsd", netbsd10))] fn getrandom(buf: &mut [u8]) -> libc::ssize_t { unsafe { libc::getrandom(buf.as_mut_ptr().cast(), buf.len(), 0) } } @@ -72,7 +72,8 @@ mod imp { target_os = "android", target_os = "espidf", target_os = "horizon", - target_os = "freebsd" + target_os = "freebsd", + netbsd10 )))] fn getrandom_fill_bytes(_buf: &mut [u8]) -> bool { false @@ -83,7 +84,8 @@ mod imp { target_os = "android", target_os = "espidf", target_os = "horizon", - target_os = "freebsd" + target_os = "freebsd", + netbsd10 ))] fn getrandom_fill_bytes(v: &mut [u8]) -> bool { use crate::sync::atomic::{AtomicBool, Ordering}; @@ -106,7 +108,18 @@ mod imp { // supported on the current kernel. // // Also fall back in case it is disabled by something like - // seccomp or inside of virtual machines. + // seccomp or inside of docker. + // + // If the `getrandom` syscall is not implemented in the current kernel version it should return an + // `ENOSYS` error. Docker also blocks the whole syscall inside unprivileged containers, and + // returns `EPERM` (instead of `ENOSYS`) when a program tries to invoke the syscall. Because of + // that we need to check for *both* `ENOSYS` and `EPERM`. + // + // Note that Docker's behavior is breaking other projects (notably glibc), so they're planning + // to update their filtering to return `ENOSYS` in a future release: + // + // https://github.com/moby/moby/issues/42680 + // GETRANDOM_UNAVAILABLE.store(true, Ordering::Relaxed); return false; } else if err == libc::EAGAIN { @@ -219,7 +232,7 @@ mod imp { } // FIXME: once the 10.x release becomes the minimum, this can be dropped for simplification. -#[cfg(target_os = "netbsd")] +#[cfg(all(target_os = "netbsd", not(netbsd10)))] mod imp { use crate::ptr; diff --git a/library/std/src/sys/pal/unix/thread.rs b/library/std/src/sys/pal/unix/thread.rs index 7e4a01a5ecd..97976407bb4 100644 --- a/library/std/src/sys/pal/unix/thread.rs +++ b/library/std/src/sys/pal/unix/thread.rs @@ -2,7 +2,7 @@ use crate::cmp; use crate::ffi::CStr; use crate::io; use crate::mem; -use crate::num::NonZeroUsize; +use crate::num::NonZero; use crate::ptr; use crate::sys::{os, stack_overflow}; use crate::time::Duration; @@ -306,7 +306,7 @@ fn truncate_cstr<const MAX_WITH_NUL: usize>(cstr: &CStr) -> [libc::c_char; MAX_W result } -pub fn available_parallelism() -> io::Result<NonZeroUsize> { +pub fn available_parallelism() -> io::Result<NonZero<usize>> { cfg_if::cfg_if! { if #[cfg(any( target_os = "android", @@ -338,7 +338,7 @@ pub fn available_parallelism() -> io::Result<NonZeroUsize> { // some old MIPS kernels were buggy and zero-initialized the mask if // none was explicitly set. // In that case we use the sysconf fallback. - if let Some(count) = NonZeroUsize::new(count) { + if let Some(count) = NonZero::new(count) { return Ok(count) } } @@ -351,7 +351,7 @@ pub fn available_parallelism() -> io::Result<NonZeroUsize> { let count = cpus as usize; // Cover the unusual situation where we were able to get the quota but not the affinity mask let count = count.min(quota); - Ok(unsafe { NonZeroUsize::new_unchecked(count) }) + Ok(unsafe { NonZero::new_unchecked(count) }) } } } else if #[cfg(any( @@ -375,7 +375,7 @@ pub fn available_parallelism() -> io::Result<NonZeroUsize> { ) == 0 { let count = libc::CPU_COUNT(&set) as usize; if count > 0 { - return Ok(NonZeroUsize::new_unchecked(count)); + return Ok(NonZero::new_unchecked(count)); } } } @@ -397,7 +397,7 @@ pub fn available_parallelism() -> io::Result<NonZeroUsize> { } } libc::_cpuset_destroy(set); - if let Some(count) = NonZeroUsize::new(count) { + if let Some(count) = NonZero::new(count) { return Ok(count); } } @@ -433,7 +433,7 @@ pub fn available_parallelism() -> io::Result<NonZeroUsize> { } } - Ok(unsafe { NonZeroUsize::new_unchecked(cpus as usize) }) + Ok(unsafe { NonZero::new_unchecked(cpus as usize) }) } else if #[cfg(target_os = "nto")] { unsafe { use libc::_syspage_ptr; @@ -441,7 +441,7 @@ pub fn available_parallelism() -> io::Result<NonZeroUsize> { Err(io::const_io_error!(io::ErrorKind::NotFound, "No syspage available")) } else { let cpus = (*_syspage_ptr).num_cpu; - NonZeroUsize::new(cpus as usize) + NonZero::new(cpus as usize) .ok_or(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform")) } } @@ -456,7 +456,7 @@ pub fn available_parallelism() -> io::Result<NonZeroUsize> { return Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform")); } - Ok(NonZeroUsize::new_unchecked(sinfo.cpu_count as usize)) + Ok(NonZero::new_unchecked(sinfo.cpu_count as usize)) } } else { // FIXME: implement on vxWorks, Redox, l4re @@ -847,11 +847,31 @@ pub mod guard { let stackptr = get_stack_start_aligned()?; let guardaddr = stackptr.addr(); // Technically the number of guard pages is tunable and controlled - // by the security.bsd.stack_guard_page sysctl, but there are - // few reasons to change it from the default. The default value has - // been 1 ever since FreeBSD 11.1 and 10.4. - const GUARD_PAGES: usize = 1; - let guard = guardaddr..guardaddr + GUARD_PAGES * page_size; + // by the security.bsd.stack_guard_page sysctl. + // By default it is 1, checking once is enough since it is + // a boot time config value. + static LOCK: crate::sync::OnceLock<usize> = crate::sync::OnceLock::new(); + let guard = guardaddr + ..guardaddr + + *LOCK.get_or_init(|| { + use crate::sys::weak::dlsym; + dlsym!(fn sysctlbyname(*const libc::c_char, *mut libc::c_void, *mut libc::size_t, *const libc::c_void, libc::size_t) -> libc::c_int); + let mut guard: usize = 0; + let mut size = crate::mem::size_of_val(&guard); + let oid = crate::ffi::CStr::from_bytes_with_nul( + b"security.bsd.stack_guard_page\0", + ) + .unwrap(); + match sysctlbyname.get() { + Some(fcn) => { + if fcn(oid.as_ptr(), &mut guard as *mut _ as *mut _, &mut size as *mut _ as *mut _, crate::ptr::null_mut(), 0) == 0 { + return guard; + } + return 1; + }, + _ => { return 1; } + } + }) * page_size; Some(guard) } else if cfg!(target_os = "openbsd") { // OpenBSD stack already includes a guard page, and stack is diff --git a/library/std/src/sys/pal/unix/thread_local_dtor.rs b/library/std/src/sys/pal/unix/thread_local_dtor.rs index 58f7ab84101..8857f96501c 100644 --- a/library/std/src/sys/pal/unix/thread_local_dtor.rs +++ b/library/std/src/sys/pal/unix/thread_local_dtor.rs @@ -11,13 +11,14 @@ // Note, however, that we run on lots older linuxes, as well as cross // compiling from a newer linux to an older linux, so we also have a // fallback implementation to use as well. -#[cfg_attr(bootstrap, allow(unexpected_cfgs))] #[cfg(any( target_os = "linux", target_os = "android", target_os = "fuchsia", target_os = "redox", - target_os = "hurd" + target_os = "hurd", + target_os = "netbsd", + target_os = "dragonfly" ))] // FIXME: The Rust compiler currently omits weakly function definitions (i.e., // __cxa_thread_atexit_impl) and its metadata from LLVM IR. @@ -115,7 +116,8 @@ pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { target_os = "vxworks", target_os = "horizon", target_os = "emscripten", - target_os = "aix" + target_os = "aix", + target_os = "freebsd", ))] #[cfg_attr(target_family = "wasm", allow(unused))] // might remain unused depending on target details (e.g. wasm32-unknown-emscripten) pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { diff --git a/library/std/src/sys/pal/unix/thread_parking/netbsd.rs b/library/std/src/sys/pal/unix/thread_parking/netbsd.rs index 3be08122138..5eeb37f8763 100644 --- a/library/std/src/sys/pal/unix/thread_parking/netbsd.rs +++ b/library/std/src/sys/pal/unix/thread_parking/netbsd.rs @@ -25,7 +25,7 @@ pub fn current() -> ThreadId { #[inline] pub fn park(hint: usize) { unsafe { - ___lwp_park60(0, 0, ptr::null_mut(), 0, ptr::invalid(hint), ptr::null()); + ___lwp_park60(0, 0, ptr::null_mut(), 0, ptr::without_provenance(hint), ptr::null()); } } @@ -40,13 +40,20 @@ pub fn park_timeout(dur: Duration, hint: usize) { // Timeout needs to be mutable since it is modified on NetBSD 9.0 and // above. unsafe { - ___lwp_park60(CLOCK_MONOTONIC, 0, &mut timeout, 0, ptr::invalid(hint), ptr::null()); + ___lwp_park60( + CLOCK_MONOTONIC, + 0, + &mut timeout, + 0, + ptr::without_provenance(hint), + ptr::null(), + ); } } #[inline] pub fn unpark(tid: ThreadId, hint: usize) { unsafe { - _lwp_unpark(tid, ptr::invalid(hint)); + _lwp_unpark(tid, ptr::without_provenance(hint)); } } diff --git a/library/std/src/sys/pal/unix/weak.rs b/library/std/src/sys/pal/unix/weak.rs index 61088ff16ed..48cc8633e93 100644 --- a/library/std/src/sys/pal/unix/weak.rs +++ b/library/std/src/sys/pal/unix/weak.rs @@ -80,7 +80,11 @@ pub(crate) struct DlsymWeak<F> { impl<F> DlsymWeak<F> { pub(crate) const fn new(name: &'static str) -> Self { - DlsymWeak { name, func: AtomicPtr::new(ptr::invalid_mut(1)), _marker: PhantomData } + DlsymWeak { + name, + func: AtomicPtr::new(ptr::without_provenance_mut(1)), + _marker: PhantomData, + } } #[inline] diff --git a/library/std/src/sys/pal/unsupported/mod.rs b/library/std/src/sys/pal/unsupported/mod.rs index e1a38de6471..9ce275ee72d 100644 --- a/library/std/src/sys/pal/unsupported/mod.rs +++ b/library/std/src/sys/pal/unsupported/mod.rs @@ -2,19 +2,12 @@ pub mod alloc; pub mod args; -#[path = "../unix/cmath.rs"] -pub mod cmath; pub mod env; pub mod fs; pub mod io; -pub mod locks; pub mod net; pub mod once; pub mod os; -#[path = "../unix/os_str.rs"] -pub mod os_str; -#[path = "../unix/path.rs"] -pub mod path; pub mod pipe; pub mod process; pub mod stdio; diff --git a/library/std/src/sys/pal/unsupported/net.rs b/library/std/src/sys/pal/unsupported/net.rs index bbc52703f96..931fe9ba246 100644 --- a/library/std/src/sys/pal/unsupported/net.rs +++ b/library/std/src/sys/pal/unsupported/net.rs @@ -364,7 +364,4 @@ pub mod netc { pub sin6_flowinfo: u32, pub sin6_scope_id: u32, } - - #[derive(Copy, Clone)] - pub struct sockaddr {} } diff --git a/library/std/src/sys/pal/unsupported/process.rs b/library/std/src/sys/pal/unsupported/process.rs index a639afcc674..6a989dd3e76 100644 --- a/library/std/src/sys/pal/unsupported/process.rs +++ b/library/std/src/sys/pal/unsupported/process.rs @@ -2,7 +2,7 @@ use crate::ffi::OsStr; use crate::fmt; use crate::io; use crate::marker::PhantomData; -use crate::num::NonZeroI32; +use crate::num::NonZero; use crate::path::Path; use crate::sys::fs::File; use crate::sys::pipe::AnonPipe; @@ -170,7 +170,7 @@ impl Into<ExitStatus> for ExitStatusError { } impl ExitStatusError { - pub fn code(self) -> Option<NonZeroI32> { + pub fn code(self) -> Option<NonZero<i32>> { self.0 } } diff --git a/library/std/src/sys/pal/unsupported/thread.rs b/library/std/src/sys/pal/unsupported/thread.rs index a8db251de20..cd1ae7f7d11 100644 --- a/library/std/src/sys/pal/unsupported/thread.rs +++ b/library/std/src/sys/pal/unsupported/thread.rs @@ -1,7 +1,7 @@ use super::unsupported; use crate::ffi::CStr; use crate::io; -use crate::num::NonZeroUsize; +use crate::num::NonZero; use crate::time::Duration; pub struct Thread(!); @@ -31,7 +31,7 @@ impl Thread { } } -pub fn available_parallelism() -> io::Result<NonZeroUsize> { +pub fn available_parallelism() -> io::Result<NonZero<usize>> { unsupported() } diff --git a/library/std/src/sys/pal/wasi/fd.rs b/library/std/src/sys/pal/wasi/fd.rs index d7295a799da..8966e4b80ad 100644 --- a/library/std/src/sys/pal/wasi/fd.rs +++ b/library/std/src/sys/pal/wasi/fd.rs @@ -60,7 +60,7 @@ impl WasiFd { }]; match wasi::fd_read(self.as_raw_fd() as wasi::Fd, &bufs) { Ok(n) => { - buf.advance(n); + buf.advance_unchecked(n); Ok(()) } Err(e) => Err(err2io(e)), diff --git a/library/std/src/sys/pal/wasi/fs.rs b/library/std/src/sys/pal/wasi/fs.rs index e8238665452..529b82e0198 100644 --- a/library/std/src/sys/pal/wasi/fs.rs +++ b/library/std/src/sys/pal/wasi/fs.rs @@ -698,7 +698,7 @@ fn open_at(fd: &WasiFd, path: &Path, opts: &OpenOptions) -> io::Result<File> { /// Note that this can fail if `p` doesn't look like it can be opened relative /// to any pre-opened file descriptor. fn open_parent(p: &Path) -> io::Result<(ManuallyDrop<WasiFd>, PathBuf)> { - run_path_with_cstr(p, |p| { + run_path_with_cstr(p, &|p| { let mut buf = Vec::<u8>::with_capacity(512); loop { unsafe { diff --git a/library/std/src/sys/pal/wasi/mod.rs b/library/std/src/sys/pal/wasi/mod.rs index 5919cc506d9..084b8e0e216 100644 --- a/library/std/src/sys/pal/wasi/mod.rs +++ b/library/std/src/sys/pal/wasi/mod.rs @@ -20,8 +20,6 @@ use crate::mem; #[path = "../unix/alloc.rs"] pub mod alloc; pub mod args; -#[path = "../unix/cmath.rs"] -pub mod cmath; pub mod env; pub mod fd; pub mod fs; @@ -32,10 +30,6 @@ pub mod io; pub mod net; pub mod os; -#[path = "../unix/os_str.rs"] -pub mod os_str; -#[path = "../unix/path.rs"] -pub mod path; #[path = "../unsupported/pipe.rs"] pub mod pipe; #[path = "../unsupported/process.rs"] @@ -49,20 +43,7 @@ pub mod thread_local_key; pub mod time; cfg_if::cfg_if! { - if #[cfg(target_feature = "atomics")] { - #[path = "../unix/locks"] - pub mod locks { - #![allow(unsafe_op_in_unsafe_fn)] - mod futex_condvar; - mod futex_mutex; - mod futex_rwlock; - pub(crate) use futex_condvar::Condvar; - pub(crate) use futex_mutex::Mutex; - pub(crate) use futex_rwlock::RwLock; - } - } else { - #[path = "../unsupported/locks/mod.rs"] - pub mod locks; + if #[cfg(not(target_feature = "atomics"))] { #[path = "../unsupported/once.rs"] pub mod once; #[path = "../unsupported/thread_parking.rs"] diff --git a/library/std/src/sys/pal/wasi/net.rs b/library/std/src/sys/pal/wasi/net.rs index 2239880ffbe..2098d05db0b 100644 --- a/library/std/src/sys/pal/wasi/net.rs +++ b/library/std/src/sys/pal/wasi/net.rs @@ -538,7 +538,4 @@ pub mod netc { pub sin6_flowinfo: u32, pub sin6_scope_id: u32, } - - #[derive(Copy, Clone)] - pub struct sockaddr {} } diff --git a/library/std/src/sys/pal/wasi/os.rs b/library/std/src/sys/pal/wasi/os.rs index 530d3602172..d62ff8a2f18 100644 --- a/library/std/src/sys/pal/wasi/os.rs +++ b/library/std/src/sys/pal/wasi/os.rs @@ -95,7 +95,7 @@ pub fn getcwd() -> io::Result<PathBuf> { } pub fn chdir(p: &path::Path) -> io::Result<()> { - let result = run_path_with_cstr(p, |p| unsafe { Ok(libc::chdir(p.as_ptr())) })?; + let result = run_path_with_cstr(p, &|p| unsafe { Ok(libc::chdir(p.as_ptr())) })?; match result == (0 as libc::c_int) { true => Ok(()), false => Err(io::Error::last_os_error()), @@ -227,7 +227,7 @@ pub fn env() -> Env { pub fn getenv(k: &OsStr) -> Option<OsString> { // environment variables with a nul byte can't be set, so their value is // always None as well - run_with_cstr(k.as_bytes(), |k| { + run_with_cstr(k.as_bytes(), &|k| { let _guard = env_read_lock(); let v = unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char; @@ -245,8 +245,8 @@ pub fn getenv(k: &OsStr) -> Option<OsString> { } pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { - run_with_cstr(k.as_bytes(), |k| { - run_with_cstr(v.as_bytes(), |v| unsafe { + run_with_cstr(k.as_bytes(), &|k| { + run_with_cstr(v.as_bytes(), &|v| unsafe { let _guard = env_write_lock(); cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop) }) @@ -254,7 +254,7 @@ pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { } pub fn unsetenv(n: &OsStr) -> io::Result<()> { - run_with_cstr(n.as_bytes(), |nbuf| unsafe { + run_with_cstr(n.as_bytes(), &|nbuf| unsafe { let _guard = env_write_lock(); cvt(libc::unsetenv(nbuf.as_ptr())).map(drop) }) diff --git a/library/std/src/sys/pal/wasi/thread.rs b/library/std/src/sys/pal/wasi/thread.rs index a0eefa8811a..77d8b4378e7 100644 --- a/library/std/src/sys/pal/wasi/thread.rs +++ b/library/std/src/sys/pal/wasi/thread.rs @@ -1,7 +1,7 @@ use crate::ffi::CStr; use crate::io; use crate::mem; -use crate::num::NonZeroUsize; +use crate::num::NonZero; use crate::sys::unsupported; use crate::time::Duration; @@ -186,7 +186,7 @@ impl Thread { } } -pub fn available_parallelism() -> io::Result<NonZeroUsize> { +pub fn available_parallelism() -> io::Result<NonZero<usize>> { unsupported() } diff --git a/library/std/src/sys/pal/wasm/atomics/thread.rs b/library/std/src/sys/pal/wasm/atomics/thread.rs index 714b7049227..49f936f1449 100644 --- a/library/std/src/sys/pal/wasm/atomics/thread.rs +++ b/library/std/src/sys/pal/wasm/atomics/thread.rs @@ -1,6 +1,6 @@ use crate::ffi::CStr; use crate::io; -use crate::num::NonZeroUsize; +use crate::num::NonZero; use crate::sys::unsupported; use crate::time::Duration; @@ -40,7 +40,7 @@ impl Thread { pub fn join(self) {} } -pub fn available_parallelism() -> io::Result<NonZeroUsize> { +pub fn available_parallelism() -> io::Result<NonZero<usize>> { unsupported() } diff --git a/library/std/src/sys/pal/wasm/mod.rs b/library/std/src/sys/pal/wasm/mod.rs index 6c05b56e1bf..40b15120e6d 100644 --- a/library/std/src/sys/pal/wasm/mod.rs +++ b/library/std/src/sys/pal/wasm/mod.rs @@ -19,8 +19,6 @@ pub mod alloc; #[path = "../unsupported/args.rs"] pub mod args; -#[path = "../unix/cmath.rs"] -pub mod cmath; pub mod env; #[path = "../unsupported/fs.rs"] pub mod fs; @@ -30,10 +28,6 @@ pub mod io; pub mod net; #[path = "../unsupported/os.rs"] pub mod os; -#[path = "../unix/os_str.rs"] -pub mod os_str; -#[path = "../unix/path.rs"] -pub mod path; #[path = "../unsupported/pipe.rs"] pub mod pipe; #[path = "../unsupported/process.rs"] @@ -49,23 +43,11 @@ pub mod time; cfg_if::cfg_if! { if #[cfg(target_feature = "atomics")] { - #[path = "../unix/locks"] - pub mod locks { - #![allow(unsafe_op_in_unsafe_fn)] - mod futex_condvar; - mod futex_mutex; - mod futex_rwlock; - pub(crate) use futex_condvar::Condvar; - pub(crate) use futex_mutex::Mutex; - pub(crate) use futex_rwlock::RwLock; - } #[path = "atomics/futex.rs"] pub mod futex; #[path = "atomics/thread.rs"] pub mod thread; } else { - #[path = "../unsupported/locks/mod.rs"] - pub mod locks; #[path = "../unsupported/once.rs"] pub mod once; #[path = "../unsupported/thread.rs"] diff --git a/library/std/src/sys/pal/windows/alloc.rs b/library/std/src/sys/pal/windows/alloc.rs index d53ea16005f..270eca37b14 100644 --- a/library/std/src/sys/pal/windows/alloc.rs +++ b/library/std/src/sys/pal/windows/alloc.rs @@ -95,7 +95,7 @@ static HEAP: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut()); #[inline] fn init_or_get_process_heap() -> c::HANDLE { let heap = HEAP.load(Ordering::Relaxed); - if heap.is_null() { + if core::intrinsics::unlikely(heap.is_null()) { // `HEAP` has not yet been successfully initialized let heap = unsafe { GetProcessHeap() }; if !heap.is_null() { @@ -115,6 +115,16 @@ fn init_or_get_process_heap() -> c::HANDLE { } } +#[inline(never)] +fn process_heap_alloc(flags: c::DWORD, dwBytes: c::SIZE_T) -> c::LPVOID { + let heap = init_or_get_process_heap(); + if core::intrinsics::unlikely(heap.is_null()) { + return ptr::null_mut(); + } + // SAFETY: `heap` is a non-null handle returned by `GetProcessHeap`. + unsafe { HeapAlloc(heap, flags, dwBytes) } +} + // Get a non-null handle to the default heap of the current process. // SAFETY: `HEAP` must have been successfully initialized. #[inline] @@ -133,25 +143,17 @@ struct Header(*mut u8); // initialized. #[inline] unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 { - let heap = init_or_get_process_heap(); - if heap.is_null() { - // Allocation has failed, could not get the current process heap. - return ptr::null_mut(); - } - // Allocated memory will be either zeroed or uninitialized. let flags = if zeroed { HEAP_ZERO_MEMORY } else { 0 }; if layout.align() <= MIN_ALIGN { - // SAFETY: `heap` is a non-null handle returned by `GetProcessHeap`. // The returned pointer points to the start of an allocated block. - unsafe { HeapAlloc(heap, flags, layout.size()) as *mut u8 } + process_heap_alloc(flags, layout.size()) as *mut u8 } else { // Allocate extra padding in order to be able to satisfy the alignment. let total = layout.align() + layout.size(); - // SAFETY: `heap` is a non-null handle returned by `GetProcessHeap`. - let ptr = unsafe { HeapAlloc(heap, flags, total) as *mut u8 }; + let ptr = process_heap_alloc(flags, total) as *mut u8; if ptr.is_null() { // Allocation has failed. return ptr::null_mut(); diff --git a/library/std/src/sys/pal/windows/args.rs b/library/std/src/sys/pal/windows/args.rs index fbbdbc21265..2ecfe088d10 100644 --- a/library/std/src/sys/pal/windows/args.rs +++ b/library/std/src/sys/pal/windows/args.rs @@ -10,7 +10,7 @@ use super::os::current_exe; use crate::ffi::OsString; use crate::fmt; use crate::io; -use crate::num::NonZeroU16; +use crate::num::NonZero; use crate::os::windows::prelude::*; use crate::path::{Path, PathBuf}; use crate::sys::path::get_long_path; @@ -21,12 +21,12 @@ use crate::vec; use crate::iter; -/// This is the const equivalent to `NonZeroU16::new(n).unwrap()` +/// This is the const equivalent to `NonZero::new(n).unwrap()` /// /// FIXME: This can be removed once `Option::unwrap` is stably const. /// See the `const_option` feature (#67441). -const fn non_zero_u16(n: u16) -> NonZeroU16 { - match NonZeroU16::new(n) { +const fn non_zero_u16(n: u16) -> NonZero<u16> { + match NonZero::new(n) { Some(n) => n, None => panic!("called `unwrap` on a `None` value"), } @@ -69,10 +69,10 @@ fn parse_lp_cmd_line<'a, F: Fn() -> OsString>( lp_cmd_line: Option<WStrUnits<'a>>, exe_name: F, ) -> Vec<OsString> { - const BACKSLASH: NonZeroU16 = non_zero_u16(b'\\' as u16); - const QUOTE: NonZeroU16 = non_zero_u16(b'"' as u16); - const TAB: NonZeroU16 = non_zero_u16(b'\t' as u16); - const SPACE: NonZeroU16 = non_zero_u16(b' ' as u16); + const BACKSLASH: NonZero<u16> = non_zero_u16(b'\\' as u16); + const QUOTE: NonZero<u16> = non_zero_u16(b'"' as u16); + const TAB: NonZero<u16> = non_zero_u16(b'\t' as u16); + const SPACE: NonZero<u16> = non_zero_u16(b' ' as u16); let mut ret_val = Vec::new(); // If the cmd line pointer is null or it points to an empty string then diff --git a/library/std/src/sys/pal/windows/c.rs b/library/std/src/sys/pal/windows/c.rs index d55d9bace81..6b12d7db8b0 100644 --- a/library/std/src/sys/pal/windows/c.rs +++ b/library/std/src/sys/pal/windows/c.rs @@ -7,17 +7,17 @@ use crate::ffi::CStr; use crate::mem; +use crate::num::NonZero; pub use crate::os::raw::c_int; use crate::os::raw::{c_char, c_long, c_longlong, c_uint, c_ulong, c_ushort, c_void}; use crate::os::windows::io::{AsRawHandle, BorrowedHandle}; use crate::ptr; -use core::ffi::NonZero_c_ulong; mod windows_sys; pub use windows_sys::*; pub type DWORD = c_ulong; -pub type NonZeroDWORD = NonZero_c_ulong; +pub type NonZeroDWORD = NonZero<c_ulong>; pub type LARGE_INTEGER = c_longlong; #[cfg_attr(target_vendor = "uwp", allow(unused))] pub type LONG = c_long; @@ -47,7 +47,7 @@ pub use FD_SET as fd_set; pub use LINGER as linger; pub use TIMEVAL as timeval; -pub const INVALID_HANDLE_VALUE: HANDLE = ::core::ptr::invalid_mut(-1i32 as _); +pub const INVALID_HANDLE_VALUE: HANDLE = ::core::ptr::without_provenance_mut(-1i32 as _); // https://learn.microsoft.com/en-us/cpp/c-runtime-library/exit-success-exit-failure?view=msvc-170 pub const EXIT_SUCCESS: u32 = 0; diff --git a/library/std/src/sys/pal/windows/c/README.md b/library/std/src/sys/pal/windows/c/README.md new file mode 100644 index 00000000000..d458e55efbc --- /dev/null +++ b/library/std/src/sys/pal/windows/c/README.md @@ -0,0 +1,9 @@ +The `windows_sys.rs` file is autogenerated from `bindings.txt` and must not +be edited manually. + +To add bindings, edit `bindings.txt` then regenerate using the following command: + + ./x run generate-windows-sys && ./x fmt library/std + +If you need to override generated functions or types then add them to +`library/std/src/sys/pal/windows/c.rs`. diff --git a/library/std/src/sys/pal/windows/c/windows_sys.lst b/library/std/src/sys/pal/windows/c/bindings.txt index f91e1054a04..726f1c3df82 100644 --- a/library/std/src/sys/pal/windows/c/windows_sys.lst +++ b/library/std/src/sys/pal/windows/c/bindings.txt @@ -1,7 +1,6 @@ --out windows_sys.rs --config flatten std --filter -// tidy-alphabetical-start !Windows.Win32.Foundation.INVALID_HANDLE_VALUE Windows.Wdk.Storage.FileSystem.FILE_COMPLETE_IF_OPLOCKED Windows.Wdk.Storage.FileSystem.FILE_CONTAINS_EXTENDED_CREATE_INFORMATION @@ -2592,5 +2591,3 @@ Windows.Win32.System.Threading.WakeAllConditionVariable Windows.Win32.System.Threading.WakeConditionVariable Windows.Win32.System.WindowsProgramming.PROGRESS_CONTINUE Windows.Win32.UI.Shell.GetUserProfileDirectoryW -// tidy-alphabetical-end - diff --git a/library/std/src/sys/pal/windows/c/windows_sys.rs b/library/std/src/sys/pal/windows/c/windows_sys.rs index b38b70c8983..c386b66a722 100644 --- a/library/std/src/sys/pal/windows/c/windows_sys.rs +++ b/library/std/src/sys/pal/windows/c/windows_sys.rs @@ -1,9 +1,3 @@ -// This file is autogenerated. -// -// To add bindings, edit windows_sys.lst then use `./x run generate-windows-sys` to -// regenerate the bindings. -// -// ignore-tidy-filelength // Bindings generated by `windows-bindgen` 0.52.0 #![allow(non_snake_case, non_upper_case_globals, non_camel_case_types, dead_code, clippy::all)] @@ -4351,3 +4345,4 @@ impl ::core::clone::Clone for XSAVE_FORMAT { *self } } +// ignore-tidy-filelength diff --git a/library/std/src/sys/pal/windows/cmath.rs b/library/std/src/sys/pal/windows/cmath.rs deleted file mode 100644 index 36578d5a34e..00000000000 --- a/library/std/src/sys/pal/windows/cmath.rs +++ /dev/null @@ -1,96 +0,0 @@ -#![cfg(not(test))] - -use core::ffi::{c_double, c_float, c_int}; - -extern "C" { - pub fn acos(n: c_double) -> c_double; - pub fn asin(n: c_double) -> c_double; - pub fn atan(n: c_double) -> c_double; - pub fn atan2(a: c_double, b: c_double) -> c_double; - pub fn cbrt(n: c_double) -> c_double; - pub fn cbrtf(n: c_float) -> c_float; - pub fn cosh(n: c_double) -> c_double; - pub fn expm1(n: c_double) -> c_double; - pub fn expm1f(n: c_float) -> c_float; - pub fn fdim(a: c_double, b: c_double) -> c_double; - pub fn fdimf(a: c_float, b: c_float) -> c_float; - #[cfg_attr(target_env = "msvc", link_name = "_hypot")] - pub fn hypot(x: c_double, y: c_double) -> c_double; - #[cfg_attr(target_env = "msvc", link_name = "_hypotf")] - pub fn hypotf(x: c_float, y: c_float) -> c_float; - pub fn log1p(n: c_double) -> c_double; - pub fn log1pf(n: c_float) -> c_float; - pub fn sinh(n: c_double) -> c_double; - pub fn tan(n: c_double) -> c_double; - pub fn tanh(n: c_double) -> c_double; - pub fn tgamma(n: c_double) -> c_double; - pub fn tgammaf(n: c_float) -> c_float; - pub fn lgamma_r(n: c_double, s: &mut c_int) -> c_double; - pub fn lgammaf_r(n: c_float, s: &mut c_int) -> c_float; -} - -pub use self::shims::*; - -#[cfg(not(all(target_env = "msvc", target_arch = "x86")))] -mod shims { - use core::ffi::c_float; - - extern "C" { - pub fn acosf(n: c_float) -> c_float; - pub fn asinf(n: c_float) -> c_float; - pub fn atan2f(a: c_float, b: c_float) -> c_float; - pub fn atanf(n: c_float) -> c_float; - pub fn coshf(n: c_float) -> c_float; - pub fn sinhf(n: c_float) -> c_float; - pub fn tanf(n: c_float) -> c_float; - pub fn tanhf(n: c_float) -> c_float; - } -} - -// On 32-bit x86 MSVC these functions aren't defined, so we just define shims -// which promote everything to f64, perform the calculation, and then demote -// back to f32. While not precisely correct should be "correct enough" for now. -#[cfg(all(target_env = "msvc", target_arch = "x86"))] -mod shims { - use core::ffi::c_float; - - #[inline] - pub unsafe fn acosf(n: c_float) -> c_float { - f64::acos(n as f64) as c_float - } - - #[inline] - pub unsafe fn asinf(n: c_float) -> c_float { - f64::asin(n as f64) as c_float - } - - #[inline] - pub unsafe fn atan2f(n: c_float, b: c_float) -> c_float { - f64::atan2(n as f64, b as f64) as c_float - } - - #[inline] - pub unsafe fn atanf(n: c_float) -> c_float { - f64::atan(n as f64) as c_float - } - - #[inline] - pub unsafe fn coshf(n: c_float) -> c_float { - f64::cosh(n as f64) as c_float - } - - #[inline] - pub unsafe fn sinhf(n: c_float) -> c_float { - f64::sinh(n as f64) as c_float - } - - #[inline] - pub unsafe fn tanf(n: c_float) -> c_float { - f64::tan(n as f64) as c_float - } - - #[inline] - pub unsafe fn tanhf(n: c_float) -> c_float { - f64::tanh(n as f64) as c_float - } -} diff --git a/library/std/src/sys/pal/windows/fs.rs b/library/std/src/sys/pal/windows/fs.rs index 42484543686..b82a83ae7a3 100644 --- a/library/std/src/sys/pal/windows/fs.rs +++ b/library/std/src/sys/pal/windows/fs.rs @@ -16,8 +16,8 @@ use crate::sys::{c, cvt, Align8}; use crate::sys_common::{AsInner, FromInner, IntoInner}; use crate::thread; -use super::path::maybe_verbatim; use super::{api, to_u16s, IoResult}; +use crate::sys::path::maybe_verbatim; pub struct File { handle: Handle, @@ -112,6 +112,13 @@ impl fmt::Debug for ReadDir { impl Iterator for ReadDir { type Item = io::Result<DirEntry>; fn next(&mut self) -> Option<io::Result<DirEntry>> { + if self.handle.0 == c::INVALID_HANDLE_VALUE { + // This iterator was initialized with an `INVALID_HANDLE_VALUE` as its handle. + // Simply return `None` because this is only the case when `FindFirstFileW` in + // the construction of this iterator returns `ERROR_FILE_NOT_FOUND` which means + // no matchhing files can be found. + return None; + } if let Some(first) = self.first.take() { if let Some(e) = DirEntry::new(&self.root, &first) { return Some(Ok(e)); @@ -1068,6 +1075,7 @@ pub fn readdir(p: &Path) -> io::Result<ReadDir> { unsafe { let mut wfd = mem::zeroed(); let find_handle = c::FindFirstFileW(path.as_ptr(), &mut wfd); + if find_handle != c::INVALID_HANDLE_VALUE { Ok(ReadDir { handle: FindNextFileHandle(find_handle), @@ -1075,7 +1083,31 @@ pub fn readdir(p: &Path) -> io::Result<ReadDir> { first: Some(wfd), }) } else { - Err(Error::last_os_error()) + // The status `ERROR_FILE_NOT_FOUND` is returned by the `FindFirstFileW` function + // if no matching files can be found, but not necessarily that the path to find the + // files in does not exist. + // + // Hence, a check for whether the path to search in exists is added when the last + // os error returned by Windows is `ERROR_FILE_NOT_FOUND` to handle this scenario. + // If that is the case, an empty `ReadDir` iterator is returned as it returns `None` + // in the initial `.next()` invocation because `ERROR_NO_MORE_FILES` would have been + // returned by the `FindNextFileW` function. + // + // See issue #120040: https://github.com/rust-lang/rust/issues/120040. + let last_error = api::get_last_error(); + if last_error.code == c::ERROR_FILE_NOT_FOUND { + return Ok(ReadDir { + handle: FindNextFileHandle(find_handle), + root: Arc::new(root), + first: None, + }); + } + + // Just return the error constructed from the raw OS error if the above is not the case. + // + // Note: `ERROR_PATH_NOT_FOUND` would have been returned by the `FindFirstFileW` function + // when the path to search in does not exist in the first place. + Err(Error::from_raw_os_error(last_error.code as i32)) } } } diff --git a/library/std/src/sys/pal/windows/handle.rs b/library/std/src/sys/pal/windows/handle.rs index c4495f81a5a..3f85bb0a099 100644 --- a/library/std/src/sys/pal/windows/handle.rs +++ b/library/std/src/sys/pal/windows/handle.rs @@ -121,7 +121,7 @@ impl Handle { Ok(read) => { // Safety: `read` bytes were written to the initialized portion of the buffer unsafe { - cursor.advance(read); + cursor.advance_unchecked(read); } Ok(()) } diff --git a/library/std/src/sys/pal/windows/io.rs b/library/std/src/sys/pal/windows/io.rs index 649826d25ce..b73d9f3ff4c 100644 --- a/library/std/src/sys/pal/windows/io.rs +++ b/library/std/src/sys/pal/windows/io.rs @@ -97,20 +97,6 @@ unsafe fn handle_is_console(handle: BorrowedHandle<'_>) -> bool { return true; } - // At this point, we *could* have a false negative. We can determine that this is a true - // negative if we can detect the presence of a console on any of the standard I/O streams. If - // another stream has a console, then we know we're in a Windows console and can therefore - // trust the negative. - for std_handle in [c::STD_INPUT_HANDLE, c::STD_OUTPUT_HANDLE, c::STD_ERROR_HANDLE] { - let std_handle = c::GetStdHandle(std_handle); - if !std_handle.is_null() - && std_handle != handle - && c::GetConsoleMode(std_handle, &mut out) != 0 - { - return false; - } - } - // Otherwise, we fall back to an msys hack to see if we can detect the presence of a pty. msys_tty_on(handle) } diff --git a/library/std/src/sys/pal/windows/locks/mod.rs b/library/std/src/sys/pal/windows/locks/mod.rs deleted file mode 100644 index 0e0f9eccb21..00000000000 --- a/library/std/src/sys/pal/windows/locks/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -mod condvar; -mod mutex; -mod rwlock; -pub use condvar::Condvar; -pub use mutex::Mutex; -pub use rwlock::RwLock; diff --git a/library/std/src/sys/pal/windows/mod.rs b/library/std/src/sys/pal/windows/mod.rs index 8b722f01a5d..b47d213df34 100644 --- a/library/std/src/sys/pal/windows/mod.rs +++ b/library/std/src/sys/pal/windows/mod.rs @@ -15,17 +15,13 @@ pub mod compat; pub mod alloc; pub mod args; pub mod c; -pub mod cmath; pub mod env; pub mod fs; pub mod handle; pub mod io; -pub mod locks; pub mod memchr; pub mod net; pub mod os; -pub mod os_str; -pub mod path; pub mod pipe; pub mod process; pub mod rand; @@ -212,7 +208,7 @@ pub fn to_u16s<S: AsRef<OsStr>>(s: S) -> crate::io::Result<Vec<u16>> { // Once the syscall has completed (errors bail out early) the second closure is // yielded the data which has been read from the syscall. The return value // from this closure is then the return value of the function. -fn fill_utf16_buf<F1, F2, T>(mut f1: F1, f2: F2) -> crate::io::Result<T> +pub fn fill_utf16_buf<F1, F2, T>(mut f1: F1, f2: F2) -> crate::io::Result<T> where F1: FnMut(*mut u16, c::DWORD) -> c::DWORD, F2: FnOnce(&[u16]) -> T, @@ -276,7 +272,7 @@ where } } -fn os2path(s: &[u16]) -> PathBuf { +pub fn os2path(s: &[u16]) -> PathBuf { PathBuf::from(OsString::from_wide(s)) } diff --git a/library/std/src/sys/pal/windows/net.rs b/library/std/src/sys/pal/windows/net.rs index c34e01e000a..e37fbe9ef83 100644 --- a/library/std/src/sys/pal/windows/net.rs +++ b/library/std/src/sys/pal/windows/net.rs @@ -234,7 +234,7 @@ impl Socket { } } _ => { - unsafe { buf.advance(result as usize) }; + unsafe { buf.advance_unchecked(result as usize) }; Ok(()) } } diff --git a/library/std/src/sys/pal/windows/os.rs b/library/std/src/sys/pal/windows/os.rs index 829dd5eb97a..374c9845ea4 100644 --- a/library/std/src/sys/pal/windows/os.rs +++ b/library/std/src/sys/pal/windows/os.rs @@ -318,13 +318,33 @@ pub fn temp_dir() -> PathBuf { super::fill_utf16_buf(|buf, sz| unsafe { c::GetTempPath2W(sz, buf) }, super::os2path).unwrap() } -#[cfg(not(target_vendor = "uwp"))] +#[cfg(all(not(target_vendor = "uwp"), not(target_vendor = "win7")))] +fn home_dir_crt() -> Option<PathBuf> { + unsafe { + // Defined in processthreadsapi.h. + const CURRENT_PROCESS_TOKEN: usize = -4_isize as usize; + + super::fill_utf16_buf( + |buf, mut sz| { + match c::GetUserProfileDirectoryW( + ptr::without_provenance_mut(CURRENT_PROCESS_TOKEN), + buf, + &mut sz, + ) { + 0 if api::get_last_error().code != c::ERROR_INSUFFICIENT_BUFFER => 0, + 0 => sz, + _ => sz - 1, // sz includes the null terminator + } + }, + super::os2path, + ) + .ok() + } +} + +#[cfg(target_vendor = "win7")] fn home_dir_crt() -> Option<PathBuf> { unsafe { - // The magic constant -4 can be used as the token passed to GetUserProfileDirectoryW below - // instead of us having to go through these multiple steps to get a token. However this is - // not implemented on Windows 7, only Windows 8 and up. When we drop support for Windows 7 - // we can simplify this code. See #90144 for details. use crate::sys::handle::Handle; let me = c::GetCurrentProcess(); diff --git a/library/std/src/sys/pal/windows/pipe.rs b/library/std/src/sys/pal/windows/pipe.rs index 7624e746f5c..fd10df82d8b 100644 --- a/library/std/src/sys/pal/windows/pipe.rs +++ b/library/std/src/sys/pal/windows/pipe.rs @@ -273,7 +273,7 @@ impl AnonPipe { Err(e) => Err(e), Ok(n) => { unsafe { - buf.advance(n); + buf.advance_unchecked(n); } Ok(()) } diff --git a/library/std/src/sys/pal/windows/process.rs b/library/std/src/sys/pal/windows/process.rs index 9ec775959fd..6a94d377140 100644 --- a/library/std/src/sys/pal/windows/process.rs +++ b/library/std/src/sys/pal/windows/process.rs @@ -12,7 +12,7 @@ use crate::fmt; use crate::io::{self, Error, ErrorKind}; use crate::mem; use crate::mem::MaybeUninit; -use crate::num::NonZeroI32; +use crate::num::NonZero; use crate::os::windows::ffi::{OsStrExt, OsStringExt}; use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle, FromRawHandle, IntoRawHandle}; use crate::path::{Path, PathBuf}; @@ -747,7 +747,7 @@ impl Into<ExitStatus> for ExitStatusError { } impl ExitStatusError { - pub fn code(self) -> Option<NonZeroI32> { + pub fn code(self) -> Option<NonZero<i32>> { Some((u32::from(self.0) as i32).try_into().unwrap()) } } diff --git a/library/std/src/sys/pal/windows/thread.rs b/library/std/src/sys/pal/windows/thread.rs index 1fe74493519..0f709e2ec7b 100644 --- a/library/std/src/sys/pal/windows/thread.rs +++ b/library/std/src/sys/pal/windows/thread.rs @@ -1,6 +1,6 @@ use crate::ffi::CStr; use crate::io; -use crate::num::NonZeroUsize; +use crate::num::NonZero; use crate::os::windows::io::AsRawHandle; use crate::os::windows::io::HandleOrNull; use crate::ptr; @@ -110,7 +110,7 @@ impl Thread { } } -pub fn available_parallelism() -> io::Result<NonZeroUsize> { +pub fn available_parallelism() -> io::Result<NonZero<usize>> { let res = unsafe { let mut sysinfo: c::SYSTEM_INFO = crate::mem::zeroed(); c::GetSystemInfo(&mut sysinfo); @@ -121,7 +121,7 @@ pub fn available_parallelism() -> io::Result<NonZeroUsize> { io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform", )), - cpus => Ok(unsafe { NonZeroUsize::new_unchecked(cpus) }), + cpus => Ok(unsafe { NonZero::new_unchecked(cpus) }), } } diff --git a/library/std/src/sys/pal/windows/thread_local_key/tests.rs b/library/std/src/sys/pal/windows/thread_local_key/tests.rs index c739f0caf3e..4119f990968 100644 --- a/library/std/src/sys/pal/windows/thread_local_key/tests.rs +++ b/library/std/src/sys/pal/windows/thread_local_key/tests.rs @@ -13,8 +13,8 @@ fn smoke() { unsafe { assert!(K1.get().is_null()); assert!(K2.get().is_null()); - K1.set(ptr::invalid_mut(1)); - K2.set(ptr::invalid_mut(2)); + K1.set(ptr::without_provenance_mut(1)); + K2.set(ptr::without_provenance_mut(2)); assert_eq!(K1.get() as usize, 1); assert_eq!(K2.get() as usize, 2); } diff --git a/library/std/src/sys/pal/windows/thread_parking.rs b/library/std/src/sys/pal/windows/thread_parking.rs index eb9167cd855..343b530b15e 100644 --- a/library/std/src/sys/pal/windows/thread_parking.rs +++ b/library/std/src/sys/pal/windows/thread_parking.rs @@ -220,7 +220,7 @@ impl Parker { } fn keyed_event_handle() -> c::HANDLE { - const INVALID: c::HANDLE = ptr::invalid_mut(!0); + const INVALID: c::HANDLE = ptr::without_provenance_mut(!0); static HANDLE: AtomicPtr<crate::ffi::c_void> = AtomicPtr::new(INVALID); match HANDLE.load(Relaxed) { INVALID => { diff --git a/library/std/src/sys/pal/xous/alloc.rs b/library/std/src/sys/pal/xous/alloc.rs index b3a3e691e0d..0d540e95520 100644 --- a/library/std/src/sys/pal/xous/alloc.rs +++ b/library/std/src/sys/pal/xous/alloc.rs @@ -1,7 +1,15 @@ use crate::alloc::{GlobalAlloc, Layout, System}; +#[cfg(not(test))] +#[export_name = "_ZN16__rust_internals3std3sys4xous5alloc8DLMALLOCE"] static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::Dlmalloc::new(); +#[cfg(test)] +extern "Rust" { + #[link_name = "_ZN16__rust_internals3std3sys4xous5alloc8DLMALLOCE"] + static mut DLMALLOC: dlmalloc::Dlmalloc; +} + #[stable(feature = "alloc_system_type", since = "1.28.0")] unsafe impl GlobalAlloc for System { #[inline] diff --git a/library/std/src/sys/pal/xous/locks/condvar.rs b/library/std/src/sys/pal/xous/locks/condvar.rs deleted file mode 100644 index 1bb38dfa341..00000000000 --- a/library/std/src/sys/pal/xous/locks/condvar.rs +++ /dev/null @@ -1,111 +0,0 @@ -use super::mutex::Mutex; -use crate::os::xous::ffi::{blocking_scalar, scalar}; -use crate::os::xous::services::ticktimer_server; -use crate::sync::Mutex as StdMutex; -use crate::time::Duration; - -// The implementation is inspired by Andrew D. Birrell's paper -// "Implementing Condition Variables with Semaphores" - -pub struct Condvar { - counter: StdMutex<usize>, -} - -unsafe impl Send for Condvar {} -unsafe impl Sync for Condvar {} - -impl Condvar { - #[inline] - #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] - pub const fn new() -> Condvar { - Condvar { counter: StdMutex::new(0) } - } - - pub fn notify_one(&self) { - let mut counter = self.counter.lock().unwrap(); - if *counter <= 0 { - return; - } else { - *counter -= 1; - } - let result = blocking_scalar( - ticktimer_server(), - crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), 1).into(), - ); - drop(counter); - result.expect("failure to send NotifyCondition command"); - } - - pub fn notify_all(&self) { - let mut counter = self.counter.lock().unwrap(); - if *counter <= 0 { - return; - } - let result = blocking_scalar( - ticktimer_server(), - crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), *counter) - .into(), - ); - *counter = 0; - drop(counter); - - result.expect("failure to send NotifyCondition command"); - } - - fn index(&self) -> usize { - self as *const Condvar as usize - } - - pub unsafe fn wait(&self, mutex: &Mutex) { - let mut counter = self.counter.lock().unwrap(); - *counter += 1; - unsafe { mutex.unlock() }; - drop(counter); - - let result = blocking_scalar( - ticktimer_server(), - crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), 0).into(), - ); - unsafe { mutex.lock() }; - - result.expect("Ticktimer: failure to send WaitForCondition command"); - } - - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - let mut counter = self.counter.lock().unwrap(); - *counter += 1; - unsafe { mutex.unlock() }; - drop(counter); - - let mut millis = dur.as_millis() as usize; - if millis == 0 { - millis = 1; - } - - let result = blocking_scalar( - ticktimer_server(), - crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), millis) - .into(), - ); - unsafe { mutex.lock() }; - - let result = result.expect("Ticktimer: failure to send WaitForCondition command")[0] == 0; - - // If we awoke due to a timeout, decrement the wake count, as that would not have - // been done in the `notify()` call. - if !result { - *self.counter.lock().unwrap() -= 1; - } - result - } -} - -impl Drop for Condvar { - fn drop(&mut self) { - scalar( - ticktimer_server(), - crate::os::xous::services::TicktimerScalar::FreeCondition(self.index()).into(), - ) - .ok(); - } -} diff --git a/library/std/src/sys/pal/xous/locks/mod.rs b/library/std/src/sys/pal/xous/locks/mod.rs deleted file mode 100644 index f3c5c5d9fb0..00000000000 --- a/library/std/src/sys/pal/xous/locks/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod condvar; -mod mutex; -mod rwlock; - -pub use condvar::*; -pub use mutex::*; -pub use rwlock::*; diff --git a/library/std/src/sys/pal/xous/mod.rs b/library/std/src/sys/pal/xous/mod.rs index c2550dcfd83..7914a255aea 100644 --- a/library/std/src/sys/pal/xous/mod.rs +++ b/library/std/src/sys/pal/xous/mod.rs @@ -3,24 +3,14 @@ pub mod alloc; #[path = "../unsupported/args.rs"] pub mod args; -#[path = "../unix/cmath.rs"] -pub mod cmath; #[path = "../unsupported/env.rs"] pub mod env; #[path = "../unsupported/fs.rs"] pub mod fs; #[path = "../unsupported/io.rs"] pub mod io; -pub mod locks; -#[path = "../unsupported/net.rs"] pub mod net; -#[path = "../unsupported/once.rs"] -pub mod once; pub mod os; -#[path = "../unix/os_str.rs"] -pub mod os_str; -#[path = "../unix/path.rs"] -pub mod path; #[path = "../unsupported/pipe.rs"] pub mod pipe; #[path = "../unsupported/process.rs"] diff --git a/library/std/src/sys/pal/xous/net/dns.rs b/library/std/src/sys/pal/xous/net/dns.rs new file mode 100644 index 00000000000..63056324bfb --- /dev/null +++ b/library/std/src/sys/pal/xous/net/dns.rs @@ -0,0 +1,127 @@ +use crate::io; +use crate::net::{Ipv4Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; +use crate::os::xous::ffi::lend_mut; +use crate::os::xous::services::{dns_server, DnsLendMut}; +use core::convert::{TryFrom, TryInto}; + +pub struct DnsError { + pub code: u8, +} + +#[repr(C, align(4096))] +struct LookupHostQuery([u8; 4096]); + +pub struct LookupHost { + data: LookupHostQuery, + port: u16, + offset: usize, + count: usize, +} + +impl LookupHost { + pub fn port(&self) -> u16 { + self.port + } +} + +impl Iterator for LookupHost { + type Item = SocketAddr; + fn next(&mut self) -> Option<SocketAddr> { + if self.offset >= self.data.0.len() { + return None; + } + match self.data.0.get(self.offset) { + Some(&4) => { + self.offset += 1; + if self.offset + 4 > self.data.0.len() { + return None; + } + let result = Some(SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::new( + self.data.0[self.offset], + self.data.0[self.offset + 1], + self.data.0[self.offset + 2], + self.data.0[self.offset + 3], + ), + self.port, + ))); + self.offset += 4; + result + } + Some(&6) => { + self.offset += 1; + if self.offset + 16 > self.data.0.len() { + return None; + } + let mut new_addr = [0u8; 16]; + for (src, octet) in self.data.0[(self.offset + 1)..(self.offset + 16 + 1)] + .iter() + .zip(new_addr.iter_mut()) + { + *octet = *src; + } + let result = + Some(SocketAddr::V6(SocketAddrV6::new(new_addr.into(), self.port, 0, 0))); + self.offset += 16; + result + } + _ => None, + } + } +} + +pub fn lookup(query: &str, port: u16) -> Result<LookupHost, DnsError> { + let mut result = LookupHost { data: LookupHostQuery([0u8; 4096]), offset: 0, count: 0, port }; + + // Copy the query into the message that gets sent to the DNS server + for (query_byte, result_byte) in query.as_bytes().iter().zip(result.data.0.iter_mut()) { + *result_byte = *query_byte; + } + + lend_mut( + dns_server(), + DnsLendMut::RawLookup.into(), + &mut result.data.0, + 0, + query.as_bytes().len(), + ) + .unwrap(); + if result.data.0[0] != 0 { + return Err(DnsError { code: result.data.0[1] }); + } + assert_eq!(result.offset, 0); + result.count = result.data.0[1] as usize; + + // Advance the offset to the first record + result.offset = 2; + Ok(result) +} + +impl TryFrom<&str> for LookupHost { + type Error = io::Error; + + fn try_from(s: &str) -> io::Result<LookupHost> { + macro_rules! try_opt { + ($e:expr, $msg:expr) => { + match $e { + Some(r) => r, + None => return Err(io::const_io_error!(io::ErrorKind::InvalidInput, &$msg)), + } + }; + } + + // split the string by ':' and convert the second part to u16 + let (host, port_str) = try_opt!(s.rsplit_once(':'), "invalid socket address"); + let port: u16 = try_opt!(port_str.parse().ok(), "invalid port value"); + (host, port).try_into() + } +} + +impl TryFrom<(&str, u16)> for LookupHost { + type Error = io::Error; + + fn try_from(v: (&str, u16)) -> io::Result<LookupHost> { + lookup(v.0, v.1) + .map_err(|_e| io::const_io_error!(io::ErrorKind::InvalidInput, &"DNS failure")) + } +} diff --git a/library/std/src/sys/pal/xous/net/mod.rs b/library/std/src/sys/pal/xous/net/mod.rs new file mode 100644 index 00000000000..dd8b765aa74 --- /dev/null +++ b/library/std/src/sys/pal/xous/net/mod.rs @@ -0,0 +1,81 @@ +mod dns; + +mod tcpstream; +pub use tcpstream::*; + +mod tcplistener; +pub use tcplistener::*; + +mod udp; +pub use udp::*; + +// this structure needs to be synchronized with what's in net/src/api.rs +#[repr(C)] +#[derive(Debug)] +enum NetError { + // Ok = 0, + Unaddressable = 1, + SocketInUse = 2, + // AccessDenied = 3, + Invalid = 4, + // Finished = 5, + LibraryError = 6, + // AlreadyUsed = 7, + TimedOut = 8, + WouldBlock = 9, +} + +#[repr(C, align(4096))] +struct ConnectRequest { + raw: [u8; 4096], +} + +#[repr(C, align(4096))] +struct SendData { + raw: [u8; 4096], +} + +#[repr(C, align(4096))] +pub struct ReceiveData { + raw: [u8; 4096], +} + +#[repr(C, align(4096))] +pub struct GetAddress { + raw: [u8; 4096], +} + +pub use dns::LookupHost; + +#[allow(nonstandard_style)] +pub mod netc { + pub const AF_INET: u8 = 0; + pub const AF_INET6: u8 = 1; + pub type sa_family_t = u8; + + #[derive(Copy, Clone)] + pub struct in_addr { + pub s_addr: u32, + } + + #[derive(Copy, Clone)] + pub struct sockaddr_in { + pub sin_family: sa_family_t, + pub sin_port: u16, + pub sin_addr: in_addr, + } + + #[derive(Copy, Clone)] + pub struct in6_addr { + pub s6_addr: [u8; 16], + } + + #[derive(Copy, Clone)] + pub struct sockaddr_in6 { + pub sin6_family: sa_family_t, + pub sin6_port: u16, + pub sin6_addr: in6_addr, + pub sin6_flowinfo: u32, + pub sin6_scope_id: u32, + } +} diff --git a/library/std/src/sys/pal/xous/net/tcplistener.rs b/library/std/src/sys/pal/xous/net/tcplistener.rs new file mode 100644 index 00000000000..47305013083 --- /dev/null +++ b/library/std/src/sys/pal/xous/net/tcplistener.rs @@ -0,0 +1,247 @@ +use super::*; +use crate::fmt; +use crate::io; +use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; +use crate::os::xous::services; +use crate::sync::Arc; +use core::convert::TryInto; +use core::sync::atomic::{AtomicBool, AtomicU16, AtomicUsize, Ordering}; + +macro_rules! unimpl { + () => { + return Err(io::const_io_error!( + io::ErrorKind::Unsupported, + &"This function is not yet implemented", + )); + }; +} + +#[derive(Clone)] +pub struct TcpListener { + fd: Arc<AtomicU16>, + local: SocketAddr, + handle_count: Arc<AtomicUsize>, + nonblocking: Arc<AtomicBool>, +} + +impl TcpListener { + pub fn bind(socketaddr: io::Result<&SocketAddr>) -> io::Result<TcpListener> { + let mut addr = *socketaddr?; + + let fd = TcpListener::bind_inner(&mut addr)?; + return Ok(TcpListener { + fd: Arc::new(AtomicU16::new(fd)), + local: addr, + handle_count: Arc::new(AtomicUsize::new(1)), + nonblocking: Arc::new(AtomicBool::new(false)), + }); + } + + /// This returns the raw fd of a Listener, so that it can also be used by the + /// accept routine to replenish the Listener object after its handle has been converted into + /// a TcpStream object. + fn bind_inner(addr: &mut SocketAddr) -> io::Result<u16> { + // Construct the request + let mut connect_request = ConnectRequest { raw: [0u8; 4096] }; + + // Serialize the StdUdpBind structure. This is done "manually" because we don't want to + // make an auto-serdes (like bincode or rkyv) crate a dependency of Xous. + let port_bytes = addr.port().to_le_bytes(); + connect_request.raw[0] = port_bytes[0]; + connect_request.raw[1] = port_bytes[1]; + match addr.ip() { + IpAddr::V4(addr) => { + connect_request.raw[2] = 4; + for (dest, src) in connect_request.raw[3..].iter_mut().zip(addr.octets()) { + *dest = src; + } + } + IpAddr::V6(addr) => { + connect_request.raw[2] = 6; + for (dest, src) in connect_request.raw[3..].iter_mut().zip(addr.octets()) { + *dest = src; + } + } + } + + let Ok((_, valid)) = crate::os::xous::ffi::lend_mut( + services::net_server(), + services::NetLendMut::StdTcpListen.into(), + &mut connect_request.raw, + 0, + 4096, + ) else { + return Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Invalid response")); + }; + + // The first four bytes should be zero upon success, and will be nonzero + // for an error. + let response = connect_request.raw; + if response[0] != 0 || valid == 0 { + let errcode = response[1]; + if errcode == NetError::SocketInUse as u8 { + return Err(io::const_io_error!(io::ErrorKind::ResourceBusy, &"Socket in use")); + } else if errcode == NetError::Invalid as u8 { + return Err(io::const_io_error!( + io::ErrorKind::AddrNotAvailable, + &"Invalid address" + )); + } else if errcode == NetError::LibraryError as u8 { + return Err(io::const_io_error!(io::ErrorKind::Other, &"Library error")); + } else { + return Err(io::const_io_error!( + io::ErrorKind::Other, + &"Unable to connect or internal error" + )); + } + } + let fd = response[1] as usize; + if addr.port() == 0 { + // oddly enough, this is a valid port and it means "give me something valid, up to you what that is" + let assigned_port = u16::from_le_bytes(response[2..4].try_into().unwrap()); + addr.set_port(assigned_port); + } + // println!("TcpListening with file handle of {}\r\n", fd); + Ok(fd.try_into().unwrap()) + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + Ok(self.local) + } + + pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { + let mut receive_request = ReceiveData { raw: [0u8; 4096] }; + + if self.nonblocking.load(Ordering::Relaxed) { + // nonblocking + receive_request.raw[0] = 0; + } else { + // blocking + receive_request.raw[0] = 1; + } + + if let Ok((_offset, _valid)) = crate::os::xous::ffi::lend_mut( + services::net_server(), + services::NetLendMut::StdTcpAccept(self.fd.load(Ordering::Relaxed)).into(), + &mut receive_request.raw, + 0, + 0, + ) { + if receive_request.raw[0] != 0 { + // error case + if receive_request.raw[1] == NetError::TimedOut as u8 { + return Err(io::const_io_error!(io::ErrorKind::TimedOut, &"accept timed out",)); + } else if receive_request.raw[1] == NetError::WouldBlock as u8 { + return Err(io::const_io_error!( + io::ErrorKind::WouldBlock, + &"accept would block", + )); + } else if receive_request.raw[1] == NetError::LibraryError as u8 { + return Err(io::const_io_error!(io::ErrorKind::Other, &"Library error")); + } else { + return Err(io::const_io_error!(io::ErrorKind::Other, &"library error",)); + } + } else { + // accept successful + let rr = &receive_request.raw; + let stream_fd = u16::from_le_bytes(rr[1..3].try_into().unwrap()); + let port = u16::from_le_bytes(rr[20..22].try_into().unwrap()); + let addr = if rr[3] == 4 { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(rr[4], rr[5], rr[6], rr[7])), port) + } else if rr[3] == 6 { + SocketAddr::new( + IpAddr::V6(Ipv6Addr::new( + u16::from_be_bytes(rr[4..6].try_into().unwrap()), + u16::from_be_bytes(rr[6..8].try_into().unwrap()), + u16::from_be_bytes(rr[8..10].try_into().unwrap()), + u16::from_be_bytes(rr[10..12].try_into().unwrap()), + u16::from_be_bytes(rr[12..14].try_into().unwrap()), + u16::from_be_bytes(rr[14..16].try_into().unwrap()), + u16::from_be_bytes(rr[16..18].try_into().unwrap()), + u16::from_be_bytes(rr[18..20].try_into().unwrap()), + )), + port, + ) + } else { + return Err(io::const_io_error!(io::ErrorKind::Other, &"library error",)); + }; + + // replenish the listener + let mut local_copy = self.local.clone(); // port is non-0 by this time, but the method signature needs a mut + let new_fd = TcpListener::bind_inner(&mut local_copy)?; + self.fd.store(new_fd, Ordering::Relaxed); + + // now return a stream converted from the old stream's fd + Ok((TcpStream::from_listener(stream_fd, self.local.port(), port, addr), addr)) + } + } else { + Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Unable to accept")) + } + } + + pub fn duplicate(&self) -> io::Result<TcpListener> { + self.handle_count.fetch_add(1, Ordering::Relaxed); + Ok(self.clone()) + } + + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + if ttl > 255 { + return Err(io::Error::new(io::ErrorKind::InvalidInput, "TTL must be less than 256")); + } + crate::os::xous::ffi::blocking_scalar( + services::net_server(), + services::NetBlockingScalar::StdSetTtlTcp(self.fd.load(Ordering::Relaxed), ttl).into(), + ) + .or(Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Unexpected return value"))) + .map(|_| ()) + } + + pub fn ttl(&self) -> io::Result<u32> { + Ok(crate::os::xous::ffi::blocking_scalar( + services::net_server(), + services::NetBlockingScalar::StdGetTtlTcp(self.fd.load(Ordering::Relaxed)).into(), + ) + .or(Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Unexpected return value"))) + .map(|res| res[0] as _)?) + } + + pub fn set_only_v6(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + + pub fn only_v6(&self) -> io::Result<bool> { + unimpl!(); + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + // this call doesn't have a meaning on our platform, but we can at least not panic if it's used. + Ok(None) + } + + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + self.nonblocking.store(nonblocking, Ordering::Relaxed); + Ok(()) + } +} + +impl fmt::Debug for TcpListener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "TCP listening on {:?}", self.local) + } +} + +impl Drop for TcpListener { + fn drop(&mut self) { + if self.handle_count.fetch_sub(1, Ordering::Relaxed) == 1 { + // only drop if we're the last clone + crate::os::xous::ffi::blocking_scalar( + services::net_server(), + crate::os::xous::services::NetBlockingScalar::StdTcpClose( + self.fd.load(Ordering::Relaxed), + ) + .into(), + ) + .unwrap(); + } + } +} diff --git a/library/std/src/sys/pal/xous/net/tcpstream.rs b/library/std/src/sys/pal/xous/net/tcpstream.rs new file mode 100644 index 00000000000..7149678118a --- /dev/null +++ b/library/std/src/sys/pal/xous/net/tcpstream.rs @@ -0,0 +1,435 @@ +use super::*; +use crate::fmt; +use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut}; +use crate::net::{IpAddr, Ipv4Addr, Shutdown, SocketAddr, SocketAddrV4, SocketAddrV6}; +use crate::os::xous::services; +use crate::sync::Arc; +use crate::time::Duration; +use core::sync::atomic::{AtomicBool, AtomicU32, AtomicUsize, Ordering}; + +macro_rules! unimpl { + () => { + return Err(io::const_io_error!( + io::ErrorKind::Unsupported, + &"This function is not yet implemented", + )); + }; +} + +enum ReadOrPeek { + Read, + Peek, +} + +#[derive(Clone)] +pub struct TcpStream { + fd: u16, + local_port: u16, + remote_port: u16, + peer_addr: SocketAddr, + // milliseconds + read_timeout: Arc<AtomicU32>, + // milliseconds + write_timeout: Arc<AtomicU32>, + handle_count: Arc<AtomicUsize>, + nonblocking: Arc<AtomicBool>, +} + +fn sockaddr_to_buf(duration: Duration, addr: &SocketAddr, buf: &mut [u8]) { + // Construct the request. + let port_bytes = addr.port().to_le_bytes(); + buf[0] = port_bytes[0]; + buf[1] = port_bytes[1]; + for (dest, src) in buf[2..].iter_mut().zip((duration.as_millis() as u64).to_le_bytes()) { + *dest = src; + } + match addr.ip() { + IpAddr::V4(addr) => { + buf[10] = 4; + for (dest, src) in buf[11..].iter_mut().zip(addr.octets()) { + *dest = src; + } + } + IpAddr::V6(addr) => { + buf[10] = 6; + for (dest, src) in buf[11..].iter_mut().zip(addr.octets()) { + *dest = src; + } + } + } +} + +impl TcpStream { + pub(crate) fn from_listener( + fd: u16, + local_port: u16, + remote_port: u16, + peer_addr: SocketAddr, + ) -> TcpStream { + TcpStream { + fd, + local_port, + remote_port, + peer_addr, + read_timeout: Arc::new(AtomicU32::new(0)), + write_timeout: Arc::new(AtomicU32::new(0)), + handle_count: Arc::new(AtomicUsize::new(1)), + nonblocking: Arc::new(AtomicBool::new(false)), + } + } + + pub fn connect(socketaddr: io::Result<&SocketAddr>) -> io::Result<TcpStream> { + Self::connect_timeout(socketaddr?, Duration::ZERO) + } + + pub fn connect_timeout(addr: &SocketAddr, duration: Duration) -> io::Result<TcpStream> { + let mut connect_request = ConnectRequest { raw: [0u8; 4096] }; + + // Construct the request. + sockaddr_to_buf(duration, &addr, &mut connect_request.raw); + + let Ok((_, valid)) = crate::os::xous::ffi::lend_mut( + services::net_server(), + services::NetLendMut::StdTcpConnect.into(), + &mut connect_request.raw, + 0, + 4096, + ) else { + return Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Invalid response")); + }; + + // The first four bytes should be zero upon success, and will be nonzero + // for an error. + let response = connect_request.raw; + if response[0] != 0 || valid == 0 { + // errcode is a u8 but stuck in a u16 where the upper byte is invalid. Mask & decode accordingly. + let errcode = response[0]; + if errcode == NetError::SocketInUse as u8 { + return Err(io::const_io_error!(io::ErrorKind::ResourceBusy, &"Socket in use",)); + } else if errcode == NetError::Unaddressable as u8 { + return Err(io::const_io_error!( + io::ErrorKind::AddrNotAvailable, + &"Invalid address", + )); + } else { + return Err(io::const_io_error!( + io::ErrorKind::InvalidInput, + &"Unable to connect or internal error", + )); + } + } + let fd = u16::from_le_bytes([response[2], response[3]]); + let local_port = u16::from_le_bytes([response[4], response[5]]); + let remote_port = u16::from_le_bytes([response[6], response[7]]); + // println!( + // "Connected with local port of {}, remote port of {}, file handle of {}", + // local_port, remote_port, fd + // ); + Ok(TcpStream { + fd, + local_port, + remote_port, + peer_addr: *addr, + read_timeout: Arc::new(AtomicU32::new(0)), + write_timeout: Arc::new(AtomicU32::new(0)), + handle_count: Arc::new(AtomicUsize::new(1)), + nonblocking: Arc::new(AtomicBool::new(false)), + }) + } + + pub fn set_read_timeout(&self, timeout: Option<Duration>) -> io::Result<()> { + if let Some(to) = timeout { + if to.is_zero() { + return Err(io::const_io_error!( + io::ErrorKind::InvalidInput, + &"Zero is an invalid timeout", + )); + } + } + self.read_timeout.store( + timeout.map(|t| t.as_millis().min(u32::MAX as u128) as u32).unwrap_or_default(), + Ordering::Relaxed, + ); + Ok(()) + } + + pub fn set_write_timeout(&self, timeout: Option<Duration>) -> io::Result<()> { + if let Some(to) = timeout { + if to.is_zero() { + return Err(io::const_io_error!( + io::ErrorKind::InvalidInput, + &"Zero is an invalid timeout", + )); + } + } + self.write_timeout.store( + timeout.map(|t| t.as_millis().min(u32::MAX as u128) as u32).unwrap_or_default(), + Ordering::Relaxed, + ); + Ok(()) + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + match self.read_timeout.load(Ordering::Relaxed) { + 0 => Ok(None), + t => Ok(Some(Duration::from_millis(t as u64))), + } + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + match self.write_timeout.load(Ordering::Relaxed) { + 0 => Ok(None), + t => Ok(Some(Duration::from_millis(t as u64))), + } + } + + fn read_or_peek(&self, buf: &mut [u8], op: ReadOrPeek) -> io::Result<usize> { + let mut receive_request = ReceiveData { raw: [0u8; 4096] }; + let data_to_read = buf.len().min(receive_request.raw.len()); + + let opcode = match op { + ReadOrPeek::Read => { + services::NetLendMut::StdTcpRx(self.fd, self.nonblocking.load(Ordering::Relaxed)) + } + ReadOrPeek::Peek => { + services::NetLendMut::StdTcpPeek(self.fd, self.nonblocking.load(Ordering::Relaxed)) + } + }; + + let Ok((offset, length)) = crate::os::xous::ffi::lend_mut( + services::net_server(), + opcode.into(), + &mut receive_request.raw, + // Reuse the `offset` as the read timeout + self.read_timeout.load(Ordering::Relaxed) as usize, + data_to_read, + ) else { + return Err(io::const_io_error!( + io::ErrorKind::InvalidInput, + &"Library failure: wrong message type or messaging error" + )); + }; + + if offset != 0 { + for (dest, src) in buf.iter_mut().zip(receive_request.raw[..length].iter()) { + *dest = *src; + } + Ok(length) + } else { + let result = receive_request.raw; + if result[0] != 0 { + if result[1] == 8 { + // timed out + return Err(io::const_io_error!(io::ErrorKind::TimedOut, &"Timeout",)); + } + if result[1] == 9 { + // would block + return Err(io::const_io_error!(io::ErrorKind::WouldBlock, &"Would block",)); + } + } + Err(io::const_io_error!(io::ErrorKind::Other, &"recv_slice failure")) + } + } + + pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> { + self.read_or_peek(buf, ReadOrPeek::Peek) + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.read_or_peek(buf, ReadOrPeek::Read) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + crate::io::default_read_vectored(|b| self.read(b), bufs) + } + + pub fn read_buf(&self, cursor: BorrowedCursor<'_>) -> io::Result<()> { + crate::io::default_read_buf(|buf| self.read(buf), cursor) + } + + pub fn is_read_vectored(&self) -> bool { + false + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + let mut send_request = SendData { raw: [0u8; 4096] }; + for (dest, src) in send_request.raw.iter_mut().zip(buf) { + *dest = *src; + } + let buf_len = send_request.raw.len().min(buf.len()); + + let (_offset, _valid) = crate::os::xous::ffi::lend_mut( + services::net_server(), + services::NetLendMut::StdTcpTx(self.fd).into(), + &mut send_request.raw, + // Reuse the offset as the timeout + self.write_timeout.load(Ordering::Relaxed) as usize, + buf_len, + ) + .or(Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Internal error")))?; + + if send_request.raw[0] != 0 { + if send_request.raw[4] == 8 { + // timed out + return Err(io::const_io_error!( + io::ErrorKind::BrokenPipe, + &"Timeout or connection closed", + )); + } else if send_request.raw[4] == 9 { + // would block + return Err(io::const_io_error!(io::ErrorKind::WouldBlock, &"Would block",)); + } else { + return Err(io::const_io_error!( + io::ErrorKind::InvalidInput, + &"Error when sending", + )); + } + } + Ok(u32::from_le_bytes([ + send_request.raw[4], + send_request.raw[5], + send_request.raw[6], + send_request.raw[7], + ]) as usize) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + crate::io::default_write_vectored(|b| self.write(b), bufs) + } + + pub fn is_write_vectored(&self) -> bool { + false + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + Ok(self.peer_addr) + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + let mut get_addr = GetAddress { raw: [0u8; 4096] }; + + let Ok((_offset, _valid)) = crate::os::xous::ffi::lend_mut( + services::net_server(), + services::NetLendMut::StdGetAddress(self.fd).into(), + &mut get_addr.raw, + 0, + 0, + ) else { + return Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Internal error")); + }; + let mut i = get_addr.raw.iter(); + match *i.next().unwrap() { + 4 => Ok(SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::new( + *i.next().unwrap(), + *i.next().unwrap(), + *i.next().unwrap(), + *i.next().unwrap(), + ), + self.local_port, + ))), + 6 => { + let mut new_addr = [0u8; 16]; + for (src, octet) in i.zip(new_addr.iter_mut()) { + *octet = *src; + } + Ok(SocketAddr::V6(SocketAddrV6::new(new_addr.into(), self.local_port, 0, 0))) + } + _ => Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Internal error")), + } + } + + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + crate::os::xous::ffi::blocking_scalar( + services::net_server(), + services::NetBlockingScalar::StdTcpStreamShutdown(self.fd, how).into(), + ) + .or(Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Unexpected return value"))) + .map(|_| ()) + } + + pub fn duplicate(&self) -> io::Result<TcpStream> { + self.handle_count.fetch_add(1, Ordering::Relaxed); + Ok(self.clone()) + } + + pub fn set_linger(&self, _: Option<Duration>) -> io::Result<()> { + unimpl!(); + } + + pub fn linger(&self) -> io::Result<Option<Duration>> { + unimpl!(); + } + + pub fn set_nodelay(&self, enabled: bool) -> io::Result<()> { + crate::os::xous::ffi::blocking_scalar( + services::net_server(), + services::NetBlockingScalar::StdSetNodelay(self.fd, enabled).into(), + ) + .or(Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Unexpected return value"))) + .map(|_| ()) + } + + pub fn nodelay(&self) -> io::Result<bool> { + Ok(crate::os::xous::ffi::blocking_scalar( + services::net_server(), + services::NetBlockingScalar::StdGetNodelay(self.fd).into(), + ) + .or(Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Unexpected return value"))) + .map(|res| res[0] != 0)?) + } + + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + if ttl > 255 { + return Err(io::Error::new(io::ErrorKind::InvalidInput, "TTL must be less than 256")); + } + crate::os::xous::ffi::blocking_scalar( + services::net_server(), + services::NetBlockingScalar::StdSetTtlTcp(self.fd, ttl).into(), + ) + .or(Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Unexpected return value"))) + .map(|_| ()) + } + + pub fn ttl(&self) -> io::Result<u32> { + Ok(crate::os::xous::ffi::blocking_scalar( + services::net_server(), + services::NetBlockingScalar::StdGetTtlTcp(self.fd).into(), + ) + .or(Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Unexpected return value"))) + .map(|res| res[0] as _)?) + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + // this call doesn't have a meaning on our platform, but we can at least not panic if it's used. + Ok(None) + } + + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + self.nonblocking.store(nonblocking, Ordering::SeqCst); + Ok(()) + } +} + +impl fmt::Debug for TcpStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "TCP connection to {:?} port {} to local port {}", + self.peer_addr, self.remote_port, self.local_port + ) + } +} + +impl Drop for TcpStream { + fn drop(&mut self) { + if self.handle_count.fetch_sub(1, Ordering::Relaxed) == 1 { + // only drop if we're the last clone + crate::os::xous::ffi::blocking_scalar( + services::net_server(), + services::NetBlockingScalar::StdTcpClose(self.fd).into(), + ) + .unwrap(); + } + } +} diff --git a/library/std/src/sys/pal/xous/net/udp.rs b/library/std/src/sys/pal/xous/net/udp.rs new file mode 100644 index 00000000000..cafa5b3bde8 --- /dev/null +++ b/library/std/src/sys/pal/xous/net/udp.rs @@ -0,0 +1,471 @@ +use super::*; +use crate::cell::Cell; +use crate::fmt; +use crate::io; +use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; +use crate::os::xous::services; +use crate::sync::Arc; +use crate::time::Duration; +use core::convert::TryInto; +use core::sync::atomic::{AtomicUsize, Ordering}; + +macro_rules! unimpl { + () => { + return Err(io::const_io_error!( + io::ErrorKind::Unsupported, + &"This function is not yet implemented", + )); + }; +} + +#[derive(Clone)] +pub struct UdpSocket { + fd: u16, + local: SocketAddr, + remote: Cell<Option<SocketAddr>>, + // in milliseconds. The setting applies only to `recv` calls after the timeout is set. + read_timeout: Cell<u64>, + // in milliseconds. The setting applies only to `send` calls after the timeout is set. + write_timeout: Cell<u64>, + handle_count: Arc<AtomicUsize>, + nonblocking: Cell<bool>, +} + +impl UdpSocket { + pub fn bind(socketaddr: io::Result<&SocketAddr>) -> io::Result<UdpSocket> { + let addr = socketaddr?; + // Construct the request + let mut connect_request = ConnectRequest { raw: [0u8; 4096] }; + + // Serialize the StdUdpBind structure. This is done "manually" because we don't want to + // make an auto-serdes (like bincode or rkyv) crate a dependency of Xous. + let port_bytes = addr.port().to_le_bytes(); + connect_request.raw[0] = port_bytes[0]; + connect_request.raw[1] = port_bytes[1]; + match addr.ip() { + IpAddr::V4(addr) => { + connect_request.raw[2] = 4; + for (dest, src) in connect_request.raw[3..].iter_mut().zip(addr.octets()) { + *dest = src; + } + } + IpAddr::V6(addr) => { + connect_request.raw[2] = 6; + for (dest, src) in connect_request.raw[3..].iter_mut().zip(addr.octets()) { + *dest = src; + } + } + } + + let response = crate::os::xous::ffi::lend_mut( + services::net_server(), + services::NetLendMut::StdUdpBind.into(), + &mut connect_request.raw, + 0, + 4096, + ); + + if let Ok((_, valid)) = response { + // The first four bytes should be zero upon success, and will be nonzero + // for an error. + let response = connect_request.raw; + if response[0] != 0 || valid == 0 { + let errcode = response[1]; + if errcode == NetError::SocketInUse as u8 { + return Err(io::const_io_error!(io::ErrorKind::ResourceBusy, &"Socket in use")); + } else if errcode == NetError::Invalid as u8 { + return Err(io::const_io_error!( + io::ErrorKind::InvalidInput, + &"Port can't be 0 or invalid address" + )); + } else if errcode == NetError::LibraryError as u8 { + return Err(io::const_io_error!(io::ErrorKind::Other, &"Library error")); + } else { + return Err(io::const_io_error!( + io::ErrorKind::Other, + &"Unable to connect or internal error" + )); + } + } + let fd = response[1] as u16; + return Ok(UdpSocket { + fd, + local: *addr, + remote: Cell::new(None), + read_timeout: Cell::new(0), + write_timeout: Cell::new(0), + handle_count: Arc::new(AtomicUsize::new(1)), + nonblocking: Cell::new(false), + }); + } + Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Invalid response")) + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + match self.remote.get() { + Some(dest) => Ok(dest), + None => Err(io::const_io_error!(io::ErrorKind::NotConnected, &"No peer specified")), + } + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + Ok(self.local) + } + + fn recv_inner(&self, buf: &mut [u8], do_peek: bool) -> io::Result<(usize, SocketAddr)> { + let mut receive_request = ReceiveData { raw: [0u8; 4096] }; + + if self.nonblocking.get() { + // nonblocking + receive_request.raw[0] = 0; + } else { + // blocking + receive_request.raw[0] = 1; + for (&s, d) in self + .read_timeout + .get() + .to_le_bytes() + .iter() + .zip(receive_request.raw[1..9].iter_mut()) + { + *d = s; + } + } + if let Ok((_offset, _valid)) = crate::os::xous::ffi::lend_mut( + services::net_server(), + services::NetLendMut::StdUdpRx(self.fd).into(), + &mut receive_request.raw, + if do_peek { 1 } else { 0 }, + 0, + ) { + if receive_request.raw[0] != 0 { + // error case + if receive_request.raw[1] == NetError::TimedOut as u8 { + return Err(io::const_io_error!(io::ErrorKind::TimedOut, &"recv timed out",)); + } else if receive_request.raw[1] == NetError::WouldBlock as u8 { + return Err(io::const_io_error!( + io::ErrorKind::WouldBlock, + &"recv would block", + )); + } else if receive_request.raw[1] == NetError::LibraryError as u8 { + return Err(io::const_io_error!(io::ErrorKind::Other, &"Library error")); + } else { + return Err(io::const_io_error!(io::ErrorKind::Other, &"library error",)); + } + } else { + let rr = &receive_request.raw; + let rxlen = u16::from_le_bytes(rr[1..3].try_into().unwrap()); + let port = u16::from_le_bytes(rr[20..22].try_into().unwrap()); + let addr = if rr[3] == 4 { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(rr[4], rr[5], rr[6], rr[7])), port) + } else if rr[3] == 6 { + SocketAddr::new( + IpAddr::V6(Ipv6Addr::new( + u16::from_be_bytes(rr[4..6].try_into().unwrap()), + u16::from_be_bytes(rr[6..8].try_into().unwrap()), + u16::from_be_bytes(rr[8..10].try_into().unwrap()), + u16::from_be_bytes(rr[10..12].try_into().unwrap()), + u16::from_be_bytes(rr[12..14].try_into().unwrap()), + u16::from_be_bytes(rr[14..16].try_into().unwrap()), + u16::from_be_bytes(rr[16..18].try_into().unwrap()), + u16::from_be_bytes(rr[18..20].try_into().unwrap()), + )), + port, + ) + } else { + return Err(io::const_io_error!(io::ErrorKind::Other, &"library error",)); + }; + for (&s, d) in rr[22..22 + rxlen as usize].iter().zip(buf.iter_mut()) { + *d = s; + } + Ok((rxlen as usize, addr)) + } + } else { + Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Unable to recv")) + } + } + + pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + self.recv_inner(buf, false) + } + + pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> { + self.recv_from(buf).map(|(len, _addr)| len) + } + + pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + self.recv_inner(buf, true) + } + + pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> { + self.peek_from(buf).map(|(len, _addr)| len) + } + + pub fn connect(&self, maybe_addr: io::Result<&SocketAddr>) -> io::Result<()> { + let addr = maybe_addr?; + self.remote.set(Some(*addr)); + Ok(()) + } + + pub fn send(&self, buf: &[u8]) -> io::Result<usize> { + if let Some(addr) = self.remote.get() { + self.send_to(buf, &addr) + } else { + Err(io::const_io_error!(io::ErrorKind::NotConnected, &"No remote specified")) + } + } + + pub fn send_to(&self, buf: &[u8], addr: &SocketAddr) -> io::Result<usize> { + let mut tx_req = SendData { raw: [0u8; 4096] }; + + // Construct the request. + let port_bytes = addr.port().to_le_bytes(); + tx_req.raw[0] = port_bytes[0]; + tx_req.raw[1] = port_bytes[1]; + match addr.ip() { + IpAddr::V4(addr) => { + tx_req.raw[2] = 4; + for (dest, src) in tx_req.raw[3..].iter_mut().zip(addr.octets()) { + *dest = src; + } + } + IpAddr::V6(addr) => { + tx_req.raw[2] = 6; + for (dest, src) in tx_req.raw[3..].iter_mut().zip(addr.octets()) { + *dest = src; + } + } + } + let len = buf.len() as u16; + let len_bytes = len.to_le_bytes(); + tx_req.raw[19] = len_bytes[0]; + tx_req.raw[20] = len_bytes[1]; + for (&s, d) in buf.iter().zip(tx_req.raw[21..].iter_mut()) { + *d = s; + } + + // let buf = unsafe { + // xous::MemoryRange::new( + // &mut tx_req as *mut SendData as usize, + // core::mem::size_of::<SendData>(), + // ) + // .unwrap() + // }; + + // write time-outs are implemented on the caller side. Basically, if the Net crate server + // is too busy to take the call immediately: retry, until the timeout is reached. + let now = crate::time::Instant::now(); + let write_timeout = if self.nonblocking.get() { + // nonblocking + core::time::Duration::ZERO + } else { + // blocking + if self.write_timeout.get() == 0 { + // forever + core::time::Duration::from_millis(u64::MAX) + } else { + // or this amount of time + core::time::Duration::from_millis(self.write_timeout.get()) + } + }; + loop { + let response = crate::os::xous::ffi::try_lend_mut( + services::net_server(), + services::NetLendMut::StdUdpTx(self.fd).into(), + &mut tx_req.raw, + 0, + 4096, + ); + match response { + Ok((_, valid)) => { + let response = &tx_req.raw; + if response[0] != 0 || valid == 0 { + let errcode = response[1]; + if errcode == NetError::SocketInUse as u8 { + return Err(io::const_io_error!( + io::ErrorKind::ResourceBusy, + &"Socket in use" + )); + } else if errcode == NetError::Invalid as u8 { + return Err(io::const_io_error!( + io::ErrorKind::InvalidInput, + &"Socket not valid" + )); + } else if errcode == NetError::LibraryError as u8 { + return Err(io::const_io_error!( + io::ErrorKind::Other, + &"Library error" + )); + } else { + return Err(io::const_io_error!( + io::ErrorKind::Other, + &"Unable to connect" + )); + } + } else { + // no error + return Ok(len as usize); + } + } + Err(crate::os::xous::ffi::Error::ServerQueueFull) => { + if now.elapsed() >= write_timeout { + return Err(io::const_io_error!( + io::ErrorKind::WouldBlock, + &"Write timed out" + )); + } else { + // question: do we want to do something a bit more gentle than immediately retrying? + crate::thread::yield_now(); + } + } + _ => return Err(io::const_io_error!(io::ErrorKind::Other, &"Library error")), + } + } + } + + pub fn duplicate(&self) -> io::Result<UdpSocket> { + self.handle_count.fetch_add(1, Ordering::Relaxed); + Ok(self.clone()) + } + + pub fn set_read_timeout(&self, timeout: Option<Duration>) -> io::Result<()> { + if let Some(d) = timeout { + if d.is_zero() { + return Err(io::const_io_error!( + io::ErrorKind::InvalidInput, + &"Zero duration is invalid" + )); + } + } + self.read_timeout + .set(timeout.map(|t| t.as_millis().min(u64::MAX as u128) as u64).unwrap_or_default()); + Ok(()) + } + + pub fn set_write_timeout(&self, timeout: Option<Duration>) -> io::Result<()> { + if let Some(d) = timeout { + if d.is_zero() { + return Err(io::const_io_error!( + io::ErrorKind::InvalidInput, + &"Zero duration is invalid" + )); + } + } + self.write_timeout + .set(timeout.map(|t| t.as_millis().min(u64::MAX as u128) as u64).unwrap_or_default()); + Ok(()) + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + match self.read_timeout.get() { + 0 => Ok(None), + t => Ok(Some(Duration::from_millis(t as u64))), + } + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + match self.write_timeout.get() { + 0 => Ok(None), + t => Ok(Some(Duration::from_millis(t as u64))), + } + } + + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + if ttl > 255 { + return Err(io::Error::new(io::ErrorKind::InvalidInput, "TTL must be less than 256")); + } + crate::os::xous::ffi::blocking_scalar( + services::net_server(), + services::NetBlockingScalar::StdSetTtlUdp(self.fd, ttl).into(), + ) + .or(Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Unexpected return value"))) + .map(|_| ()) + } + + pub fn ttl(&self) -> io::Result<u32> { + Ok(crate::os::xous::ffi::blocking_scalar( + services::net_server(), + services::NetBlockingScalar::StdGetTtlUdp(self.fd).into(), + ) + .or(Err(io::const_io_error!(io::ErrorKind::InvalidInput, &"Unexpected return value"))) + .map(|res| res[0] as _)?) + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + // this call doesn't have a meaning on our platform, but we can at least not panic if it's used. + Ok(None) + } + + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + self.nonblocking.set(nonblocking); + Ok(()) + } + + // ------------- smoltcp base stack does not have multicast or broadcast support --------------- + pub fn set_broadcast(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + + pub fn broadcast(&self) -> io::Result<bool> { + unimpl!(); + } + + pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + + pub fn multicast_loop_v4(&self) -> io::Result<bool> { + unimpl!(); + } + + pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> { + unimpl!(); + } + + pub fn multicast_ttl_v4(&self) -> io::Result<u32> { + unimpl!(); + } + + pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + + pub fn multicast_loop_v6(&self) -> io::Result<bool> { + unimpl!(); + } + + pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + unimpl!(); + } + + pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + unimpl!(); + } + + pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + unimpl!(); + } + + pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + unimpl!(); + } +} + +impl fmt::Debug for UdpSocket { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "UDP listening on {:?} to {:?}", self.local, self.remote.get(),) + } +} + +impl Drop for UdpSocket { + fn drop(&mut self) { + if self.handle_count.fetch_sub(1, Ordering::Relaxed) == 1 { + // only drop if we're the last clone + crate::os::xous::ffi::blocking_scalar( + services::net_server(), + services::NetBlockingScalar::StdUdpClose(self.fd).into(), + ) + .unwrap(); + } + } +} diff --git a/library/std/src/sys/pal/xous/stdio.rs b/library/std/src/sys/pal/xous/stdio.rs index 2ac694641ba..11608964b52 100644 --- a/library/std/src/sys/pal/xous/stdio.rs +++ b/library/std/src/sys/pal/xous/stdio.rs @@ -5,7 +5,7 @@ pub struct Stdout {} pub struct Stderr; use crate::os::xous::ffi::{lend, try_lend, try_scalar, Connection}; -use crate::os::xous::services::{log_server, try_connect, LogScalar}; +use crate::os::xous::services::{log_server, try_connect, LogLend, LogScalar}; impl Stdin { pub const fn new() -> Stdin { @@ -27,7 +27,7 @@ impl Stdout { impl io::Write for Stdout { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { - #[repr(align(4096))] + #[repr(C, align(4096))] struct LendBuffer([u8; 4096]); let mut lend_buffer = LendBuffer([0u8; 4096]); let connection = log_server(); @@ -35,7 +35,8 @@ impl io::Write for Stdout { for (dest, src) in lend_buffer.0.iter_mut().zip(chunk) { *dest = *src; } - lend(connection, 1, &lend_buffer.0, 0, chunk.len()).unwrap(); + lend(connection, LogLend::StandardOutput.into(), &lend_buffer.0, 0, chunk.len()) + .unwrap(); } Ok(buf.len()) } @@ -53,7 +54,7 @@ impl Stderr { impl io::Write for Stderr { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { - #[repr(align(4096))] + #[repr(C, align(4096))] struct LendBuffer([u8; 4096]); let mut lend_buffer = LendBuffer([0u8; 4096]); let connection = log_server(); @@ -61,7 +62,8 @@ impl io::Write for Stderr { for (dest, src) in lend_buffer.0.iter_mut().zip(chunk) { *dest = *src; } - lend(connection, 1, &lend_buffer.0, 0, chunk.len()).unwrap(); + lend(connection, LogLend::StandardError.into(), &lend_buffer.0, 0, chunk.len()) + .unwrap(); } Ok(buf.len()) } diff --git a/library/std/src/sys/pal/xous/thread.rs b/library/std/src/sys/pal/xous/thread.rs index 78c68de7bf3..21f5954d6e2 100644 --- a/library/std/src/sys/pal/xous/thread.rs +++ b/library/std/src/sys/pal/xous/thread.rs @@ -1,6 +1,6 @@ use crate::ffi::CStr; use crate::io; -use crate::num::NonZeroUsize; +use crate::num::NonZero; use crate::os::xous::ffi::{ blocking_scalar, create_thread, do_yield, join_thread, map_memory, update_memory_flags, MemoryFlags, Syscall, ThreadId, @@ -68,14 +68,18 @@ impl Thread { ) .map_err(|code| io::Error::from_raw_os_error(code as i32))?; - extern "C" fn thread_start(main: *mut usize, guard_page_pre: usize, stack_size: usize) { + extern "C" fn thread_start( + main: *mut usize, + guard_page_pre: usize, + stack_size: usize, + ) -> ! { unsafe { - // Finally, let's run some code. + // Run the contents of the new thread. Box::from_raw(main as *mut Box<dyn FnOnce()>)(); } // Destroy TLS, which will free the TLS page and call the destructor for - // any thread local storage. + // any thread local storage (if any). unsafe { crate::sys::thread_local_key::destroy_tls(); } @@ -128,9 +132,9 @@ impl Thread { } } -pub fn available_parallelism() -> io::Result<NonZeroUsize> { +pub fn available_parallelism() -> io::Result<NonZero<usize>> { // We're unicore right now. - Ok(unsafe { NonZeroUsize::new_unchecked(1) }) + Ok(unsafe { NonZero::new_unchecked(1) }) } pub mod guard { diff --git a/library/std/src/sys/pal/xous/thread_local_key.rs b/library/std/src/sys/pal/xous/thread_local_key.rs index 3771ea65700..59a668c3df6 100644 --- a/library/std/src/sys/pal/xous/thread_local_key.rs +++ b/library/std/src/sys/pal/xous/thread_local_key.rs @@ -23,10 +23,25 @@ pub type Dtor = unsafe extern "C" fn(*mut u8); const TLS_MEMORY_SIZE: usize = 4096; -/// TLS keys start at `1` to mimic POSIX. +/// TLS keys start at `1`. Index `0` is unused +#[cfg(not(test))] +#[export_name = "_ZN16__rust_internals3std3sys4xous16thread_local_key13TLS_KEY_INDEXE"] static TLS_KEY_INDEX: AtomicUsize = AtomicUsize::new(1); -fn tls_ptr_addr() -> *mut usize { +#[cfg(not(test))] +#[export_name = "_ZN16__rust_internals3std3sys4xous16thread_local_key9DTORSE"] +static DTORS: AtomicPtr<Node> = AtomicPtr::new(ptr::null_mut()); + +#[cfg(test)] +extern "Rust" { + #[link_name = "_ZN16__rust_internals3std3sys4xous16thread_local_key13TLS_KEY_INDEXE"] + static TLS_KEY_INDEX: AtomicUsize; + + #[link_name = "_ZN16__rust_internals3std3sys4xous16thread_local_key9DTORSE"] + static DTORS: AtomicPtr<Node>; +} + +fn tls_ptr_addr() -> *mut *mut u8 { let mut tp: usize; unsafe { asm!( @@ -34,50 +49,50 @@ fn tls_ptr_addr() -> *mut usize { out(reg) tp, ); } - core::ptr::from_exposed_addr_mut::<usize>(tp) + core::ptr::from_exposed_addr_mut::<*mut u8>(tp) } /// Create an area of memory that's unique per thread. This area will /// contain all thread local pointers. -fn tls_ptr() -> *mut usize { - let mut tp = tls_ptr_addr(); +fn tls_table() -> &'static mut [*mut u8] { + let tp = tls_ptr_addr(); + if !tp.is_null() { + return unsafe { + core::slice::from_raw_parts_mut(tp, TLS_MEMORY_SIZE / core::mem::size_of::<*mut u8>()) + }; + } // If the TP register is `0`, then this thread hasn't initialized // its TLS yet. Allocate a new page to store this memory. - if tp.is_null() { - tp = unsafe { - map_memory( - None, - None, - TLS_MEMORY_SIZE / core::mem::size_of::<usize>(), - MemoryFlags::R | MemoryFlags::W, - ) - } + let tp = unsafe { + map_memory( + None, + None, + TLS_MEMORY_SIZE / core::mem::size_of::<*mut u8>(), + MemoryFlags::R | MemoryFlags::W, + ) .expect("Unable to allocate memory for thread local storage") - .as_mut_ptr(); + }; - unsafe { - // Key #0 is currently unused. - (tp).write_volatile(0); + for val in tp.iter() { + assert!(*val as usize == 0); + } - // Set the thread's `$tp` register - asm!( - "mv tp, {}", - in(reg) tp as usize, - ); - } + unsafe { + // Set the thread's `$tp` register + asm!( + "mv tp, {}", + in(reg) tp.as_mut_ptr() as usize, + ); } tp } -/// Allocate a new TLS key. These keys are shared among all threads. -fn tls_alloc() -> usize { - TLS_KEY_INDEX.fetch_add(1, SeqCst) -} - #[inline] pub unsafe fn create(dtor: Option<Dtor>) -> Key { - let key = tls_alloc(); + // Allocate a new TLS key. These keys are shared among all threads. + #[allow(unused_unsafe)] + let key = unsafe { TLS_KEY_INDEX.fetch_add(1, SeqCst) }; if let Some(f) = dtor { unsafe { register_dtor(key, f) }; } @@ -87,18 +102,20 @@ pub unsafe fn create(dtor: Option<Dtor>) -> Key { #[inline] pub unsafe fn set(key: Key, value: *mut u8) { assert!((key < 1022) && (key >= 1)); - unsafe { tls_ptr().add(key).write_volatile(value as usize) }; + let table = tls_table(); + table[key] = value; } #[inline] pub unsafe fn get(key: Key) -> *mut u8 { assert!((key < 1022) && (key >= 1)); - core::ptr::from_exposed_addr_mut::<u8>(unsafe { tls_ptr().add(key).read_volatile() }) + tls_table()[key] } #[inline] pub unsafe fn destroy(_key: Key) { - panic!("can't destroy keys on Xous"); + // Just leak the key. Probably not great on long-running systems that create + // lots of TLS variables, but in practice that's not an issue. } // ------------------------------------------------------------------------- @@ -127,8 +144,6 @@ pub unsafe fn destroy(_key: Key) { // key but also a slot for the destructor queue on windows. An optimization for // another day! -static DTORS: AtomicPtr<Node> = AtomicPtr::new(ptr::null_mut()); - struct Node { dtor: Dtor, key: Key, @@ -138,10 +153,12 @@ struct Node { unsafe fn register_dtor(key: Key, dtor: Dtor) { let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() })); - let mut head = DTORS.load(SeqCst); + #[allow(unused_unsafe)] + let mut head = unsafe { DTORS.load(SeqCst) }; loop { node.next = head; - match DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) { + #[allow(unused_unsafe)] + match unsafe { DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) } { Ok(_) => return, // nothing to drop, we successfully added the node to the list Err(cur) => head = cur, } @@ -155,6 +172,7 @@ pub unsafe fn destroy_tls() { if tp.is_null() { return; } + unsafe { run_dtors() }; // Finally, free the TLS array @@ -169,12 +187,19 @@ pub unsafe fn destroy_tls() { unsafe fn run_dtors() { let mut any_run = true; + + // Run the destructor "some" number of times. This is 5x on Windows, + // so we copy it here. This allows TLS variables to create new + // TLS variables upon destruction that will also get destroyed. + // Keep going until we run out of tries or until we have nothing + // left to destroy. for _ in 0..5 { if !any_run { break; } any_run = false; - let mut cur = DTORS.load(SeqCst); + #[allow(unused_unsafe)] + let mut cur = unsafe { DTORS.load(SeqCst) }; while !cur.is_null() { let ptr = unsafe { get((*cur).key) }; diff --git a/library/std/src/sys/pal/xous/thread_parking.rs b/library/std/src/sys/pal/xous/thread_parking.rs index aa39c6d2718..0bd0462d77d 100644 --- a/library/std/src/sys/pal/xous/thread_parking.rs +++ b/library/std/src/sys/pal/xous/thread_parking.rs @@ -29,31 +29,40 @@ impl Parker { // Change NOTIFIED to EMPTY and EMPTY to PARKED. let state = self.state.fetch_sub(1, Acquire); if state == NOTIFIED { + // The state has gone from NOTIFIED (1) to EMPTY (0) return; } + // The state has gone from EMPTY (0) to PARKED (-1) + assert!(state == EMPTY); - // The state was set to PARKED. Wait until the `unpark` wakes us up. + // The state is now PARKED (-1). Wait until the `unpark` wakes us up. blocking_scalar( ticktimer_server(), TicktimerScalar::WaitForCondition(self.index(), 0).into(), ) .expect("failed to send WaitForCondition command"); - self.state.swap(EMPTY, Acquire); + let state = self.state.swap(EMPTY, Acquire); + assert!(state == NOTIFIED || state == PARKED); } pub unsafe fn park_timeout(self: Pin<&Self>, timeout: Duration) { // Change NOTIFIED to EMPTY and EMPTY to PARKED. let state = self.state.fetch_sub(1, Acquire); if state == NOTIFIED { + // The state has gone from NOTIFIED (1) to EMPTY (0) return; } + // The state has gone from EMPTY (0) to PARKED (-1) + assert!(state == EMPTY); // A value of zero indicates an indefinite wait. Clamp the number of // milliseconds to the allowed range. let millis = usize::max(timeout.as_millis().try_into().unwrap_or(usize::MAX), 1); - let was_timeout = blocking_scalar( + // The state is now PARKED (-1). Wait until the `unpark` wakes us up, + // or things time out. + let _was_timeout = blocking_scalar( ticktimer_server(), TicktimerScalar::WaitForCondition(self.index(), millis).into(), ) @@ -61,28 +70,37 @@ impl Parker { != 0; let state = self.state.swap(EMPTY, Acquire); - if was_timeout && state == NOTIFIED { - // The state was set to NOTIFIED after we returned from the wait - // but before we reset the state. Therefore, a wakeup is on its - // way, which we need to consume here. - // NOTICE: this is a priority hole. - blocking_scalar( - ticktimer_server(), - TicktimerScalar::WaitForCondition(self.index(), 0).into(), - ) - .expect("failed to send WaitForCondition command"); - } + assert!(state == PARKED || state == NOTIFIED); } pub fn unpark(self: Pin<&Self>) { - let state = self.state.swap(NOTIFIED, Release); - if state == PARKED { - // The thread is parked, wake it up. - blocking_scalar( - ticktimer_server(), - TicktimerScalar::NotifyCondition(self.index(), 1).into(), - ) - .expect("failed to send NotifyCondition command"); + // If the state is already `NOTIFIED`, then another thread has + // indicated it wants to wake up the target thread. + // + // If the state is `EMPTY` then there is nothing to wake up, and + // the target thread will immediately exit from `park()` the + // next time that function is called. + if self.state.swap(NOTIFIED, Release) != PARKED { + return; + } + + // The thread is parked, wake it up. Keep trying until we wake something up. + // This will happen when the `NotifyCondition` call returns the fact that + // 1 condition was notified. + // Alternately, keep going until the state is seen as `EMPTY`, indicating + // the thread woke up and kept going. This can happen when the Park + // times out before we can send the NotifyCondition message. + while blocking_scalar( + ticktimer_server(), + TicktimerScalar::NotifyCondition(self.index(), 1).into(), + ) + .expect("failed to send NotifyCondition command")[0] + == 0 + && self.state.load(Acquire) != EMPTY + { + // The target thread hasn't yet hit the `WaitForCondition` call. + // Yield to let the target thread run some more. + crate::thread::yield_now(); } } } diff --git a/library/std/src/sys/pal/zkvm/abi.rs b/library/std/src/sys/pal/zkvm/abi.rs new file mode 100644 index 00000000000..53332d90e02 --- /dev/null +++ b/library/std/src/sys/pal/zkvm/abi.rs @@ -0,0 +1,55 @@ +//! ABI definitions for symbols exported by risc0-zkvm-platform. + +// Included here so we don't have to depend on risc0-zkvm-platform. +// +// FIXME: Should we move this to the "libc" crate? It seems like other +// architectures put a lot of this kind of stuff there. But there's +// currently no risc0 fork of the libc crate, so we'd either have to +// fork it or upstream it. + +#![allow(dead_code)] +pub const DIGEST_WORDS: usize = 8; + +/// Standard IO file descriptors for use with sys_read and sys_write. +pub mod fileno { + pub const STDIN: u32 = 0; + pub const STDOUT: u32 = 1; + pub const STDERR: u32 = 2; + pub const JOURNAL: u32 = 3; +} + +extern "C" { + // Wrappers around syscalls provided by risc0-zkvm-platform: + pub fn sys_halt(); + pub fn sys_output(output_id: u32, output_value: u32); + pub fn sys_sha_compress( + out_state: *mut [u32; DIGEST_WORDS], + in_state: *const [u32; DIGEST_WORDS], + block1_ptr: *const [u32; DIGEST_WORDS], + block2_ptr: *const [u32; DIGEST_WORDS], + ); + pub fn sys_sha_buffer( + out_state: *mut [u32; DIGEST_WORDS], + in_state: *const [u32; DIGEST_WORDS], + buf: *const u8, + count: u32, + ); + pub fn sys_rand(recv_buf: *mut u32, words: usize); + pub fn sys_panic(msg_ptr: *const u8, len: usize) -> !; + pub fn sys_log(msg_ptr: *const u8, len: usize); + pub fn sys_cycle_count() -> usize; + pub fn sys_read(fd: u32, recv_buf: *mut u8, nrequested: usize) -> usize; + pub fn sys_write(fd: u32, write_buf: *const u8, nbytes: usize); + pub fn sys_getenv( + recv_buf: *mut u32, + words: usize, + varname: *const u8, + varname_len: usize, + ) -> usize; + pub fn sys_argc() -> usize; + pub fn sys_argv(out_words: *mut u32, out_nwords: usize, arg_index: usize) -> usize; + + // Allocate memory from global HEAP. + pub fn sys_alloc_words(nwords: usize) -> *mut u32; + pub fn sys_alloc_aligned(nwords: usize, align: usize) -> *mut u8; +} diff --git a/library/std/src/sys/pal/zkvm/alloc.rs b/library/std/src/sys/pal/zkvm/alloc.rs new file mode 100644 index 00000000000..fd333f12151 --- /dev/null +++ b/library/std/src/sys/pal/zkvm/alloc.rs @@ -0,0 +1,15 @@ +use super::abi; +use crate::alloc::{GlobalAlloc, Layout, System}; + +#[stable(feature = "alloc_system_type", since = "1.28.0")] +unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + abi::sys_alloc_aligned(layout.size(), layout.align()) + } + + #[inline] + unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) { + // this allocator never deallocates memory + } +} diff --git a/library/std/src/sys/pal/zkvm/args.rs b/library/std/src/sys/pal/zkvm/args.rs new file mode 100644 index 00000000000..7753cf63840 --- /dev/null +++ b/library/std/src/sys/pal/zkvm/args.rs @@ -0,0 +1,80 @@ +use super::{abi, WORD_SIZE}; +use crate::ffi::OsString; +use crate::fmt; +use crate::sys_common::FromInner; + +pub struct Args { + i_forward: usize, + i_back: usize, + count: usize, +} + +pub fn args() -> Args { + let count = unsafe { abi::sys_argc() }; + Args { i_forward: 0, i_back: 0, count } +} + +impl Args { + /// Use sys_argv to get the arg at the requested index. Does not check that i is less than argc + /// and will not return if the index is out of bounds. + fn argv(i: usize) -> OsString { + let arg_len = unsafe { abi::sys_argv(crate::ptr::null_mut(), 0, i) }; + + let arg_len_words = (arg_len + WORD_SIZE - 1) / WORD_SIZE; + let words = unsafe { abi::sys_alloc_words(arg_len_words) }; + + let arg_len2 = unsafe { abi::sys_argv(words, arg_len_words, i) }; + debug_assert_eq!(arg_len, arg_len2); + + // Convert to OsString. + // + // FIXME: We can probably get rid of the extra copy here if we + // reimplement "os_str" instead of just using the generic unix + // "os_str". + let arg_bytes: &[u8] = + unsafe { crate::slice::from_raw_parts(words.cast() as *const u8, arg_len) }; + OsString::from_inner(super::os_str::Buf { inner: arg_bytes.to_vec() }) + } +} + +impl fmt::Debug for Args { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().finish() + } +} + +impl Iterator for Args { + type Item = OsString; + + fn next(&mut self) -> Option<OsString> { + if self.i_forward >= self.count - self.i_back { + None + } else { + let arg = Self::argv(self.i_forward); + self.i_forward += 1; + Some(arg) + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (self.count, Some(self.count)) + } +} + +impl ExactSizeIterator for Args { + fn len(&self) -> usize { + self.count + } +} + +impl DoubleEndedIterator for Args { + fn next_back(&mut self) -> Option<OsString> { + if self.i_back >= self.count - self.i_forward { + None + } else { + let arg = Self::argv(self.count - 1 - self.i_back); + self.i_back += 1; + Some(arg) + } + } +} diff --git a/library/std/src/sys/pal/zkvm/env.rs b/library/std/src/sys/pal/zkvm/env.rs new file mode 100644 index 00000000000..b85153642b1 --- /dev/null +++ b/library/std/src/sys/pal/zkvm/env.rs @@ -0,0 +1,9 @@ +pub mod os { + pub const FAMILY: &str = ""; + pub const OS: &str = ""; + pub const DLL_PREFIX: &str = ""; + pub const DLL_SUFFIX: &str = ".elf"; + pub const DLL_EXTENSION: &str = "elf"; + pub const EXE_SUFFIX: &str = ".elf"; + pub const EXE_EXTENSION: &str = "elf"; +} diff --git a/library/std/src/sys/pal/zkvm/mod.rs b/library/std/src/sys/pal/zkvm/mod.rs new file mode 100644 index 00000000000..016c977dc33 --- /dev/null +++ b/library/std/src/sys/pal/zkvm/mod.rs @@ -0,0 +1,87 @@ +//! System bindings for the risc0 zkvm platform +//! +//! This module contains the facade (aka platform-specific) implementations of +//! OS level functionality for zkvm. +//! +//! This is all super highly experimental and not actually intended for +//! wide/production use yet, it's still all in the experimental category. This +//! will likely change over time. + +const WORD_SIZE: usize = core::mem::size_of::<u32>(); + +pub mod alloc; +#[path = "../zkvm/args.rs"] +pub mod args; +#[path = "../unix/cmath.rs"] +pub mod cmath; +pub mod env; +#[path = "../unsupported/fs.rs"] +pub mod fs; +#[path = "../unsupported/io.rs"] +pub mod io; +#[path = "../unsupported/net.rs"] +pub mod net; +#[path = "../unsupported/once.rs"] +pub mod once; +pub mod os; +#[path = "../unsupported/pipe.rs"] +pub mod pipe; +#[path = "../unsupported/process.rs"] +pub mod process; +pub mod stdio; +pub mod thread_local_key; +#[path = "../unsupported/time.rs"] +pub mod time; + +#[path = "../unsupported/thread.rs"] +pub mod thread; + +#[path = "../unsupported/thread_parking.rs"] +pub mod thread_parking; + +mod abi; + +use crate::io as std_io; + +pub mod memchr { + pub use core::slice::memchr::{memchr, memrchr}; +} + +// SAFETY: must be called only once during runtime initialization. +// NOTE: this is not guaranteed to run, for example when Rust code is called externally. +pub unsafe fn init(_argc: isize, _argv: *const *const u8, _sigpipe: u8) {} + +// SAFETY: must be called only once during runtime cleanup. +// NOTE: this is not guaranteed to run, for example when the program aborts. +pub unsafe fn cleanup() {} + +pub fn unsupported<T>() -> std_io::Result<T> { + Err(unsupported_err()) +} + +pub fn unsupported_err() -> std_io::Error { + std_io::const_io_error!( + std_io::ErrorKind::Unsupported, + "operation not supported on this platform", + ) +} + +pub fn is_interrupted(_code: i32) -> bool { + false +} + +pub fn decode_error_kind(_code: i32) -> crate::io::ErrorKind { + crate::io::ErrorKind::Uncategorized +} + +pub fn abort_internal() -> ! { + core::intrinsics::abort(); +} + +pub fn hashmap_random_keys() -> (u64, u64) { + let mut buf = [0u32; 4]; + unsafe { + abi::sys_rand(buf.as_mut_ptr(), 4); + }; + ((buf[0] as u64) << 32 + buf[1] as u64, (buf[2] as u64) << 32 + buf[3] as u64) +} diff --git a/library/std/src/sys/pal/zkvm/os.rs b/library/std/src/sys/pal/zkvm/os.rs new file mode 100644 index 00000000000..d8739ee3824 --- /dev/null +++ b/library/std/src/sys/pal/zkvm/os.rs @@ -0,0 +1,139 @@ +use super::{abi, unsupported, WORD_SIZE}; +use crate::error::Error as StdError; +use crate::ffi::{OsStr, OsString}; +use crate::fmt; +use crate::io; +use crate::marker::PhantomData; +use crate::path::{self, PathBuf}; +use crate::sys_common::FromInner; + +pub fn errno() -> i32 { + 0 +} + +pub fn error_string(_errno: i32) -> String { + "operation successful".to_string() +} + +pub fn getcwd() -> io::Result<PathBuf> { + unsupported() +} + +pub fn chdir(_: &path::Path) -> io::Result<()> { + unsupported() +} + +pub struct SplitPaths<'a>(!, PhantomData<&'a ()>); + +pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> { + panic!("unsupported") +} + +impl<'a> Iterator for SplitPaths<'a> { + type Item = PathBuf; + fn next(&mut self) -> Option<PathBuf> { + self.0 + } +} + +#[derive(Debug)] +pub struct JoinPathsError; + +pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError> +where + I: Iterator<Item = T>, + T: AsRef<OsStr>, +{ + Err(JoinPathsError) +} + +impl fmt::Display for JoinPathsError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "not supported on this platform yet".fmt(f) + } +} + +impl StdError for JoinPathsError { + #[allow(deprecated)] + fn description(&self) -> &str { + "not supported on this platform yet" + } +} + +pub fn current_exe() -> io::Result<PathBuf> { + unsupported() +} + +pub struct Env(!); + +impl Iterator for Env { + type Item = (OsString, OsString); + fn next(&mut self) -> Option<(OsString, OsString)> { + self.0 + } +} + +pub fn env() -> Env { + panic!("not supported on this platform") +} + +impl Env { + pub fn str_debug(&self) -> impl fmt::Debug + '_ { + let Self(inner) = self; + match *inner {} + } +} + +impl fmt::Debug for Env { + fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { + let Self(inner) = self; + match *inner {} + } +} + +pub fn getenv(varname: &OsStr) -> Option<OsString> { + let varname = varname.as_encoded_bytes(); + let nbytes = + unsafe { abi::sys_getenv(crate::ptr::null_mut(), 0, varname.as_ptr(), varname.len()) }; + if nbytes == usize::MAX { + return None; + } + + let nwords = (nbytes + WORD_SIZE - 1) / WORD_SIZE; + let words = unsafe { abi::sys_alloc_words(nwords) }; + + let nbytes2 = unsafe { abi::sys_getenv(words, nwords, varname.as_ptr(), varname.len()) }; + debug_assert_eq!(nbytes, nbytes2); + + // Convert to OsString. + // + // FIXME: We can probably get rid of the extra copy here if we + // reimplement "os_str" instead of just using the generic unix + // "os_str". + let u8s: &[u8] = unsafe { crate::slice::from_raw_parts(words.cast() as *const u8, nbytes) }; + Some(OsString::from_inner(super::os_str::Buf { inner: u8s.to_vec() })) +} + +pub fn setenv(_: &OsStr, _: &OsStr) -> io::Result<()> { + Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot set env vars on this platform")) +} + +pub fn unsetenv(_: &OsStr) -> io::Result<()> { + Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot unset env vars on this platform")) +} + +pub fn temp_dir() -> PathBuf { + panic!("no filesystem on this platform") +} + +pub fn home_dir() -> Option<PathBuf> { + None +} + +pub fn exit(_code: i32) -> ! { + crate::intrinsics::abort() +} + +pub fn getpid() -> u32 { + panic!("no pids on this platform") +} diff --git a/library/std/src/sys/pal/zkvm/stdio.rs b/library/std/src/sys/pal/zkvm/stdio.rs new file mode 100644 index 00000000000..e771ed0de28 --- /dev/null +++ b/library/std/src/sys/pal/zkvm/stdio.rs @@ -0,0 +1,64 @@ +use super::{abi, abi::fileno}; +use crate::io; + +pub struct Stdin; +pub struct Stdout; +pub struct Stderr; + +impl Stdin { + pub const fn new() -> Stdin { + Stdin + } +} + +impl io::Read for Stdin { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + Ok(unsafe { abi::sys_read(fileno::STDIN, buf.as_mut_ptr(), buf.len()) }) + } +} + +impl Stdout { + pub const fn new() -> Stdout { + Stdout + } +} + +impl io::Write for Stdout { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + unsafe { abi::sys_write(fileno::STDOUT, buf.as_ptr(), buf.len()) } + + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl Stderr { + pub const fn new() -> Stderr { + Stderr + } +} + +impl io::Write for Stderr { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + unsafe { abi::sys_write(fileno::STDERR, buf.as_ptr(), buf.len()) } + + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +pub const STDIN_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE; + +pub fn is_ebadf(_err: &io::Error) -> bool { + true +} + +pub fn panic_output() -> Option<impl io::Write> { + Some(Stderr::new()) +} diff --git a/library/std/src/sys/pal/zkvm/thread_local_key.rs b/library/std/src/sys/pal/zkvm/thread_local_key.rs new file mode 100644 index 00000000000..3ffe6247344 --- /dev/null +++ b/library/std/src/sys/pal/zkvm/thread_local_key.rs @@ -0,0 +1,23 @@ +use crate::alloc::{alloc, Layout}; + +pub type Key = usize; + +#[inline] +pub unsafe fn create(_dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key { + alloc(Layout::new::<*mut u8>()) as _ +} + +#[inline] +pub unsafe fn set(key: Key, value: *mut u8) { + let key: *mut *mut u8 = core::ptr::from_exposed_addr_mut(key); + *key = value; +} + +#[inline] +pub unsafe fn get(key: Key) -> *mut u8 { + let key: *mut *mut u8 = core::ptr::from_exposed_addr_mut(key); + *key +} + +#[inline] +pub unsafe fn destroy(_key: Key) {} diff --git a/library/std/src/sys/path/mod.rs b/library/std/src/sys/path/mod.rs new file mode 100644 index 00000000000..24a94ec7828 --- /dev/null +++ b/library/std/src/sys/path/mod.rs @@ -0,0 +1,18 @@ +cfg_if::cfg_if! { + if #[cfg(target_os = "windows")] { + mod windows; + pub use windows::*; + } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { + mod sgx; + pub use sgx::*; + } else if #[cfg(any( + target_os = "uefi", + target_os = "solid_asp3", + ))] { + mod unsupported_backslash; + pub use unsupported_backslash::*; + } else { + mod unix; + pub use unix::*; + } +} diff --git a/library/std/src/sys/pal/sgx/path.rs b/library/std/src/sys/path/sgx.rs index c805c15e702..c805c15e702 100644 --- a/library/std/src/sys/pal/sgx/path.rs +++ b/library/std/src/sys/path/sgx.rs diff --git a/library/std/src/sys/pal/unix/path.rs b/library/std/src/sys/path/unix.rs index 837f68d3eaf..837f68d3eaf 100644 --- a/library/std/src/sys/pal/unix/path.rs +++ b/library/std/src/sys/path/unix.rs diff --git a/library/std/src/sys/pal/solid/path.rs b/library/std/src/sys/path/unsupported_backslash.rs index 7045c9be25b..7045c9be25b 100644 --- a/library/std/src/sys/pal/solid/path.rs +++ b/library/std/src/sys/path/unsupported_backslash.rs diff --git a/library/std/src/sys/pal/windows/path.rs b/library/std/src/sys/path/windows.rs index d9684f21753..cebc7910231 100644 --- a/library/std/src/sys/pal/windows/path.rs +++ b/library/std/src/sys/path/windows.rs @@ -1,8 +1,8 @@ -use super::{c, fill_utf16_buf, to_u16s}; use crate::ffi::{OsStr, OsString}; use crate::io; use crate::path::{Path, PathBuf, Prefix}; use crate::ptr; +use crate::sys::pal::{c, fill_utf16_buf, os2path, to_u16s}; #[cfg(test)] mod tests; @@ -339,6 +339,6 @@ pub(crate) fn absolute(path: &Path) -> io::Result<PathBuf> { // `lpfilename` is a pointer to a null terminated string that is not // invalidated until after `GetFullPathNameW` returns successfully. |buffer, size| unsafe { c::GetFullPathNameW(lpfilename, size, buffer, ptr::null_mut()) }, - super::os2path, + os2path, ) } diff --git a/library/std/src/sys/pal/windows/path/tests.rs b/library/std/src/sys/path/windows/tests.rs index 623c6236166..623c6236166 100644 --- a/library/std/src/sys/pal/windows/path/tests.rs +++ b/library/std/src/sys/path/windows/tests.rs diff --git a/library/std/src/sys_common/backtrace.rs b/library/std/src/sys_common/backtrace.rs index adfe721cfa9..67711dbd5bc 100644 --- a/library/std/src/sys_common/backtrace.rs +++ b/library/std/src/sys_common/backtrace.rs @@ -218,7 +218,7 @@ pub fn output_filename( #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] pub fn set_image_base() { let image_base = crate::os::fortanix_sgx::mem::image_base(); - backtrace_rs::set_image_base(crate::ptr::invalid_mut(image_base as _)); + backtrace_rs::set_image_base(crate::ptr::without_provenance_mut(image_base as _)); } #[cfg(not(all(target_vendor = "fortanix", target_env = "sgx")))] diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs index 851832a377c..01f83ecb414 100644 --- a/library/std/src/sys_common/mod.rs +++ b/library/std/src/sys_common/mod.rs @@ -59,12 +59,14 @@ cfg_if::cfg_if! { /// A trait for viewing representations from std types #[doc(hidden)] +#[allow(dead_code)] // not used on all platforms pub trait AsInner<Inner: ?Sized> { fn as_inner(&self) -> &Inner; } /// A trait for viewing representations from std types #[doc(hidden)] +#[allow(dead_code)] // not used on all platforms pub trait AsInnerMut<Inner: ?Sized> { fn as_inner_mut(&mut self) -> &mut Inner; } diff --git a/library/std/src/sys_common/net.rs b/library/std/src/sys_common/net.rs index 8712bd2eca7..de7d31baaaf 100644 --- a/library/std/src/sys_common/net.rs +++ b/library/std/src/sys_common/net.rs @@ -199,7 +199,7 @@ impl<'a> TryFrom<(&'a str, u16)> for LookupHost { fn try_from((host, port): (&'a str, u16)) -> io::Result<LookupHost> { init(); - run_with_cstr(host.as_bytes(), |c_host| { + run_with_cstr(host.as_bytes(), &|c_host| { let mut hints: c::addrinfo = unsafe { mem::zeroed() }; hints.ai_socktype = c::SOCK_STREAM; let mut res = ptr::null_mut(); diff --git a/library/std/src/sys_common/once/mod.rs b/library/std/src/sys_common/once/mod.rs index 359697d8313..ec57568c54c 100644 --- a/library/std/src/sys_common/once/mod.rs +++ b/library/std/src/sys_common/once/mod.rs @@ -25,6 +25,7 @@ cfg_if::cfg_if! { target_family = "unix", all(target_vendor = "fortanix", target_env = "sgx"), target_os = "solid_asp3", + target_os = "xous", ))] { mod queue; pub use queue::{Once, OnceState}; diff --git a/library/std/src/sys_common/once/queue.rs b/library/std/src/sys_common/once/queue.rs index def0bcd6fac..3cc1df113e3 100644 --- a/library/std/src/sys_common/once/queue.rs +++ b/library/std/src/sys_common/once/queue.rs @@ -110,7 +110,7 @@ impl Once { #[inline] #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")] pub const fn new() -> Once { - Once { state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)) } + Once { state_and_queue: AtomicPtr::new(ptr::without_provenance_mut(INCOMPLETE)) } } #[inline] @@ -158,7 +158,7 @@ impl Once { // Try to register this thread as the one RUNNING. let exchange_result = self.state_and_queue.compare_exchange( state_and_queue, - ptr::invalid_mut(RUNNING), + ptr::without_provenance_mut(RUNNING), Ordering::Acquire, Ordering::Acquire, ); @@ -170,14 +170,14 @@ impl Once { // wake them up on drop. let mut waiter_queue = WaiterQueue { state_and_queue: &self.state_and_queue, - set_state_on_drop_to: ptr::invalid_mut(POISONED), + set_state_on_drop_to: ptr::without_provenance_mut(POISONED), }; // Run the initialization function, letting it know if we're // poisoned or not. let init_state = public::OnceState { inner: OnceState { poisoned: state_and_queue.addr() == POISONED, - set_state_on_drop_to: Cell::new(ptr::invalid_mut(COMPLETE)), + set_state_on_drop_to: Cell::new(ptr::without_provenance_mut(COMPLETE)), }, }; init(&init_state); @@ -289,6 +289,6 @@ impl OnceState { #[inline] pub fn poison(&self) { - self.set_state_on_drop_to.set(ptr::invalid_mut(POISONED)); + self.set_state_on_drop_to.set(ptr::without_provenance_mut(POISONED)); } } diff --git a/library/std/src/sys_common/thread_local_key/tests.rs b/library/std/src/sys_common/thread_local_key/tests.rs index 6a44c65d918..48bed31af51 100644 --- a/library/std/src/sys_common/thread_local_key/tests.rs +++ b/library/std/src/sys_common/thread_local_key/tests.rs @@ -9,8 +9,8 @@ fn statik() { unsafe { assert!(K1.get().is_null()); assert!(K2.get().is_null()); - K1.set(ptr::invalid_mut(1)); - K2.set(ptr::invalid_mut(2)); + K1.set(ptr::without_provenance_mut(1)); + K2.set(ptr::without_provenance_mut(2)); assert_eq!(K1.get() as usize, 1); assert_eq!(K2.get() as usize, 2); } diff --git a/library/std/src/sys_common/wstr.rs b/library/std/src/sys_common/wstr.rs index b230fd1a829..8eae1606485 100644 --- a/library/std/src/sys_common/wstr.rs +++ b/library/std/src/sys_common/wstr.rs @@ -2,7 +2,7 @@ #![allow(dead_code)] use crate::marker::PhantomData; -use crate::num::NonZeroU16; +use crate::num::NonZero; use crate::ptr::NonNull; /// A safe iterator over a LPWSTR @@ -23,15 +23,15 @@ impl WStrUnits<'_> { Some(Self { lpwstr: NonNull::new(lpwstr as _)?, lifetime: PhantomData }) } - pub fn peek(&self) -> Option<NonZeroU16> { + pub fn peek(&self) -> Option<NonZero<u16>> { // SAFETY: It's always safe to read the current item because we don't // ever move out of the array's bounds. - unsafe { NonZeroU16::new(*self.lpwstr.as_ptr()) } + unsafe { NonZero::new(*self.lpwstr.as_ptr()) } } /// Advance the iterator while `predicate` returns true. /// Returns the number of items it advanced by. - pub fn advance_while<P: FnMut(NonZeroU16) -> bool>(&mut self, mut predicate: P) -> usize { + pub fn advance_while<P: FnMut(NonZero<u16>) -> bool>(&mut self, mut predicate: P) -> usize { let mut counter = 0; while let Some(w) = self.peek() { if !predicate(w) { @@ -46,8 +46,9 @@ impl WStrUnits<'_> { impl Iterator for WStrUnits<'_> { // This can never return zero as that marks the end of the string. - type Item = NonZeroU16; - fn next(&mut self) -> Option<NonZeroU16> { + type Item = NonZero<u16>; + + fn next(&mut self) -> Option<Self::Item> { // SAFETY: If NULL is reached we immediately return. // Therefore it's safe to advance the pointer after that. unsafe { diff --git a/library/std/src/sys_common/wtf8.rs b/library/std/src/sys_common/wtf8.rs index 67db5ebd89c..2dbd19d7171 100644 --- a/library/std/src/sys_common/wtf8.rs +++ b/library/std/src/sys_common/wtf8.rs @@ -885,15 +885,43 @@ fn decode_surrogate_pair(lead: u16, trail: u16) -> char { unsafe { char::from_u32_unchecked(code_point) } } -/// Copied from core::str::StrPrelude::is_char_boundary +/// Copied from str::is_char_boundary #[inline] pub fn is_code_point_boundary(slice: &Wtf8, index: usize) -> bool { - if index == slice.len() { + if index == 0 { return true; } match slice.bytes.get(index) { - None => false, - Some(&b) => b < 128 || b >= 192, + None => index == slice.len(), + Some(&b) => (b as i8) >= -0x40, + } +} + +/// Verify that `index` is at the edge of either a valid UTF-8 codepoint +/// (i.e. a codepoint that's not a surrogate) or of the whole string. +/// +/// These are the cases currently permitted by `OsStr::slice_encoded_bytes`. +/// Splitting between surrogates is valid as far as WTF-8 is concerned, but +/// we do not permit it in the public API because WTF-8 is considered an +/// implementation detail. +#[track_caller] +#[inline] +pub fn check_utf8_boundary(slice: &Wtf8, index: usize) { + if index == 0 { + return; + } + match slice.bytes.get(index) { + Some(0xED) => (), // Might be a surrogate + Some(&b) if (b as i8) >= -0x40 => return, + Some(_) => panic!("byte index {index} is not a codepoint boundary"), + None if index == slice.len() => return, + None => panic!("byte index {index} is out of bounds"), + } + if slice.bytes[index + 1] >= 0xA0 { + // There's a surrogate after index. Now check before index. + if index >= 3 && slice.bytes[index - 3] == 0xED && slice.bytes[index - 2] >= 0xA0 { + panic!("byte index {index} lies between surrogate codepoints"); + } } } diff --git a/library/std/src/sys_common/wtf8/tests.rs b/library/std/src/sys_common/wtf8/tests.rs index 28a426648e5..6a1cc41a8fb 100644 --- a/library/std/src/sys_common/wtf8/tests.rs +++ b/library/std/src/sys_common/wtf8/tests.rs @@ -663,3 +663,65 @@ fn wtf8_to_owned() { assert_eq!(string.bytes, b"\xED\xA0\x80"); assert!(!string.is_known_utf8); } + +#[test] +fn wtf8_valid_utf8_boundaries() { + let mut string = Wtf8Buf::from_str("aé 💩"); + string.push(CodePoint::from_u32(0xD800).unwrap()); + string.push(CodePoint::from_u32(0xD800).unwrap()); + check_utf8_boundary(&string, 0); + check_utf8_boundary(&string, 1); + check_utf8_boundary(&string, 3); + check_utf8_boundary(&string, 4); + check_utf8_boundary(&string, 8); + check_utf8_boundary(&string, 14); + assert_eq!(string.len(), 14); + + string.push_char('a'); + check_utf8_boundary(&string, 14); + check_utf8_boundary(&string, 15); + + let mut string = Wtf8Buf::from_str("a"); + string.push(CodePoint::from_u32(0xD800).unwrap()); + check_utf8_boundary(&string, 1); + + let mut string = Wtf8Buf::from_str("\u{D7FF}"); + string.push(CodePoint::from_u32(0xD800).unwrap()); + check_utf8_boundary(&string, 3); + + let mut string = Wtf8Buf::new(); + string.push(CodePoint::from_u32(0xD800).unwrap()); + string.push_char('\u{D7FF}'); + check_utf8_boundary(&string, 3); +} + +#[test] +#[should_panic(expected = "byte index 4 is out of bounds")] +fn wtf8_utf8_boundary_out_of_bounds() { + let string = Wtf8::from_str("aé"); + check_utf8_boundary(&string, 4); +} + +#[test] +#[should_panic(expected = "byte index 1 is not a codepoint boundary")] +fn wtf8_utf8_boundary_inside_codepoint() { + let string = Wtf8::from_str("é"); + check_utf8_boundary(&string, 1); +} + +#[test] +#[should_panic(expected = "byte index 1 is not a codepoint boundary")] +fn wtf8_utf8_boundary_inside_surrogate() { + let mut string = Wtf8Buf::new(); + string.push(CodePoint::from_u32(0xD800).unwrap()); + check_utf8_boundary(&string, 1); +} + +#[test] +#[should_panic(expected = "byte index 3 lies between surrogate codepoints")] +fn wtf8_utf8_boundary_between_surrogates() { + let mut string = Wtf8Buf::new(); + string.push(CodePoint::from_u32(0xD800).unwrap()); + string.push(CodePoint::from_u32(0xD800).unwrap()); + check_utf8_boundary(&string, 3); +} diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs index 338567777f7..d1213e2f166 100644 --- a/library/std/src/thread/local.rs +++ b/library/std/src/thread/local.rs @@ -180,18 +180,18 @@ impl<T: 'static> fmt::Debug for LocalKey<T> { #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "thread_local_macro")] #[allow_internal_unstable(thread_local_internals)] -// FIXME: Use `SyncUnsafeCell` instead of allowing `static_mut_ref` lint -#[cfg_attr(not(bootstrap), allow(static_mut_ref))] +// FIXME: Use `SyncUnsafeCell` instead of allowing `static_mut_refs` lint +#[cfg_attr(not(bootstrap), allow(static_mut_refs))] macro_rules! thread_local { // empty (base case for the recursion) () => {}; - ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = const { $init:expr }; $($rest:tt)*) => ( + ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = const $init:block; $($rest:tt)*) => ( $crate::thread::local_impl::thread_local_inner!($(#[$attr])* $vis $name, $t, const $init); $crate::thread_local!($($rest)*); ); - ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = const { $init:expr }) => ( + ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = const $init:block) => ( $crate::thread::local_impl::thread_local_inner!($(#[$attr])* $vis $name, $t, const $init); ); diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs index 8498937809e..4f0f010984a 100644 --- a/library/std/src/thread/mod.rs +++ b/library/std/src/thread/mod.rs @@ -165,8 +165,7 @@ use crate::fmt; use crate::io; use crate::marker::PhantomData; use crate::mem::{self, forget}; -use crate::num::NonZeroU64; -use crate::num::NonZeroUsize; +use crate::num::{NonZero, NonZeroU64, NonZeroUsize}; use crate::panic; use crate::panicking; use crate::pin::Pin; @@ -1063,7 +1062,7 @@ pub fn park() { let guard = PanicGuard; // SAFETY: park_timeout is called on the parker owned by this thread. unsafe { - current().inner.as_ref().parker().park(); + current().park(); } // No panic occurred, do not abort. forget(guard); @@ -1166,7 +1165,7 @@ pub fn park_timeout(dur: Duration) { /// [`id`]: Thread::id #[stable(feature = "thread_id", since = "1.19.0")] #[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)] -pub struct ThreadId(NonZeroU64); +pub struct ThreadId(NonZero<u64>); impl ThreadId { // Generate a new unique thread ID. @@ -1189,7 +1188,7 @@ impl ThreadId { }; match COUNTER.compare_exchange_weak(last, id, Relaxed, Relaxed) { - Ok(_) => return ThreadId(NonZeroU64::new(id).unwrap()), + Ok(_) => return ThreadId(NonZero::new(id).unwrap()), Err(id) => last = id, } } @@ -1208,7 +1207,7 @@ impl ThreadId { *counter = id; drop(counter); - ThreadId(NonZeroU64::new(id).unwrap()) + ThreadId(NonZero::new(id).unwrap()) } } } @@ -1290,6 +1289,15 @@ impl Thread { Thread { inner } } + /// Like the public [`park`], but callable on any handle. This is used to + /// allow parking in TLS destructors. + /// + /// # Safety + /// May only be called from the thread to which this handle belongs. + pub(crate) unsafe fn park(&self) { + unsafe { self.inner.as_ref().parker().park() } + } + /// Atomically makes the handle's token available if it is not already. /// /// Every thread is equipped with some basic low-level blocking support, via |
