diff options
Diffstat (limited to 'library/std/src')
47 files changed, 894 insertions, 373 deletions
diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs index 05e9b2eb6bc..5cf6ec81789 100644 --- a/library/std/src/backtrace.rs +++ b/library/std/src/backtrace.rs @@ -9,12 +9,6 @@ //! implementing `std::error::Error`) to get a causal chain of where an error //! was generated. //! -//! > **Note**: this module is unstable and is designed in [RFC 2504], and you -//! > can learn more about its status in the [tracking issue]. -//! -//! [RFC 2504]: https://github.com/rust-lang/rfcs/blob/master/text/2504-fix-error.md -//! [tracking issue]: https://github.com/rust-lang/rust/issues/53487 -//! //! ## Accuracy //! //! Backtraces are attempted to be as accurate as possible, but no guarantees @@ -64,7 +58,7 @@ //! `RUST_LIB_BACKTRACE` or `RUST_BACKTRACE` at runtime might not actually change //! how backtraces are captured. -#![unstable(feature = "backtrace", issue = "53487")] +#![stable(feature = "backtrace", since = "1.65.0")] #[cfg(test)] mod tests; @@ -110,6 +104,7 @@ use crate::vec::Vec; /// previous point in time. In some instances the `Backtrace` type may /// internally be empty due to configuration. For more information see /// `Backtrace::capture`. +#[stable(feature = "backtrace", since = "1.65.0")] #[must_use] pub struct Backtrace { inner: Inner, @@ -117,17 +112,21 @@ pub struct Backtrace { /// The current status of a backtrace, indicating whether it was captured or /// whether it is empty for some other reason. +#[stable(feature = "backtrace", since = "1.65.0")] #[non_exhaustive] #[derive(Debug, PartialEq, Eq)] pub enum BacktraceStatus { /// Capturing a backtrace is not supported, likely because it's not /// implemented for the current platform. + #[stable(feature = "backtrace", since = "1.65.0")] Unsupported, /// Capturing a backtrace has been disabled through either the /// `RUST_LIB_BACKTRACE` or `RUST_BACKTRACE` environment variables. + #[stable(feature = "backtrace", since = "1.65.0")] Disabled, /// A backtrace has been captured and the `Backtrace` should print /// reasonable information when rendered. + #[stable(feature = "backtrace", since = "1.65.0")] Captured, } @@ -174,6 +173,7 @@ enum BytesOrWide { Wide(Vec<u16>), } +#[stable(feature = "backtrace", since = "1.65.0")] impl fmt::Debug for Backtrace { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let capture = match &self.inner { @@ -200,6 +200,7 @@ impl fmt::Debug for Backtrace { } } +#[unstable(feature = "backtrace_frames", issue = "79676")] impl fmt::Debug for BacktraceFrame { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let mut dbg = fmt.debug_list(); @@ -288,6 +289,7 @@ impl Backtrace { /// /// To forcibly capture a backtrace regardless of environment variables, use /// the `Backtrace::force_capture` function. + #[stable(feature = "backtrace", since = "1.65.0")] #[inline(never)] // want to make sure there's a frame here to remove pub fn capture() -> Backtrace { if !Backtrace::enabled() { @@ -306,6 +308,7 @@ impl Backtrace { /// Note that capturing a backtrace can be an expensive operation on some /// platforms, so this should be used with caution in performance-sensitive /// parts of code. + #[stable(feature = "backtrace", since = "1.65.0")] #[inline(never)] // want to make sure there's a frame here to remove pub fn force_capture() -> Backtrace { Backtrace::create(Backtrace::force_capture as usize) @@ -313,6 +316,8 @@ impl Backtrace { /// Forcibly captures a disabled backtrace, regardless of environment /// variable configuration. + #[stable(feature = "backtrace", since = "1.65.0")] + #[rustc_const_stable(feature = "backtrace", since = "1.65.0")] pub const fn disabled() -> Backtrace { Backtrace { inner: Inner::Disabled } } @@ -356,6 +361,7 @@ impl Backtrace { /// Returns the status of this backtrace, indicating whether this backtrace /// request was unsupported, disabled, or a stack trace was actually /// captured. + #[stable(feature = "backtrace", since = "1.65.0")] #[must_use] pub fn status(&self) -> BacktraceStatus { match self.inner { @@ -375,6 +381,7 @@ impl<'a> Backtrace { } } +#[stable(feature = "backtrace", since = "1.65.0")] impl fmt::Display for Backtrace { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let capture = match &self.inner { diff --git a/library/std/src/collections/hash/map/tests.rs b/library/std/src/collections/hash/map/tests.rs index 7ebc41588b3..cb3032719fa 100644 --- a/library/std/src/collections/hash/map/tests.rs +++ b/library/std/src/collections/hash/map/tests.rs @@ -268,10 +268,13 @@ fn test_lots_of_insertions() { // Try this a few times to make sure we never screw up the hashmap's // internal state. - for _ in 0..10 { + let loops = if cfg!(miri) { 2 } else { 10 }; + for _ in 0..loops { assert!(m.is_empty()); - for i in 1..1001 { + let count = if cfg!(miri) { 101 } else { 1001 }; + + for i in 1..count { assert!(m.insert(i, i).is_none()); for j in 1..=i { @@ -279,42 +282,42 @@ fn test_lots_of_insertions() { assert_eq!(r, Some(&j)); } - for j in i + 1..1001 { + for j in i + 1..count { let r = m.get(&j); assert_eq!(r, None); } } - for i in 1001..2001 { + for i in count..(2 * count) { assert!(!m.contains_key(&i)); } // remove forwards - for i in 1..1001 { + for i in 1..count { assert!(m.remove(&i).is_some()); for j in 1..=i { assert!(!m.contains_key(&j)); } - for j in i + 1..1001 { + for j in i + 1..count { assert!(m.contains_key(&j)); } } - for i in 1..1001 { + for i in 1..count { assert!(!m.contains_key(&i)); } - for i in 1..1001 { + for i in 1..count { assert!(m.insert(i, i).is_none()); } // remove backwards - for i in (1..1001).rev() { + for i in (1..count).rev() { assert!(m.remove(&i).is_some()); - for j in i..1001 { + for j in i..count { assert!(!m.contains_key(&j)); } @@ -817,6 +820,7 @@ fn test_retain() { } #[test] +#[cfg_attr(miri, ignore)] // Miri does not support signalling OOM #[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc fn test_try_reserve() { let mut empty_bytes: HashMap<u8, u8> = HashMap::new(); diff --git a/library/std/src/error.rs b/library/std/src/error.rs index 722df119d22..4fbcfd85d7c 100644 --- a/library/std/src/error.rs +++ b/library/std/src/error.rs @@ -1,4 +1,4 @@ -//! Interfaces for working with Errors. +//! The `Error` trait provides common functionality for errors. //! //! # Error Handling In Rust //! @@ -1454,7 +1454,6 @@ impl<E> Report<E> { /// /// ```rust /// #![feature(error_reporter)] - /// #![feature(backtrace)] /// #![feature(provide_any)] /// #![feature(error_generic_member_access)] /// # use std::error::Error; diff --git a/library/std/src/f32.rs b/library/std/src/f32.rs index 933b52b4dcc..3dd5b12507f 100644 --- a/library/std/src/f32.rs +++ b/library/std/src/f32.rs @@ -1,4 +1,4 @@ -//! Constants specific to the `f32` single-precision floating point type. +//! Constants for the `f32` single-precision floating point type. //! //! *[See also the `f32` primitive type](primitive@f32).* //! diff --git a/library/std/src/f64.rs b/library/std/src/f64.rs index a9aa84f70d1..31351a87978 100644 --- a/library/std/src/f64.rs +++ b/library/std/src/f64.rs @@ -1,4 +1,4 @@ -//! Constants specific to the `f64` double-precision floating point type. +//! Constants for the `f64` double-precision floating point type. //! //! *[See also the `f64` primitive type](primitive@f64).* //! diff --git a/library/std/src/io/tests.rs b/library/std/src/io/tests.rs index f357f33ec52..68a19eccc0e 100644 --- a/library/std/src/io/tests.rs +++ b/library/std/src/io/tests.rs @@ -94,7 +94,7 @@ fn read_to_end() { assert_eq!(c.read_to_end(&mut v).unwrap(), 1); assert_eq!(v, b"1"); - let cap = 1024 * 1024; + let cap = if cfg!(miri) { 1024 } else { 1024 * 1024 }; let data = (0..cap).map(|i| (i / 3) as u8).collect::<Vec<_>>(); let mut v = Vec::new(); let (a, b) = data.split_at(data.len() / 2); @@ -309,6 +309,7 @@ fn chain_zero_length_read_is_not_eof() { #[bench] #[cfg_attr(target_os = "emscripten", ignore)] +#[cfg_attr(miri, ignore)] // Miri isn't fast... fn bench_read_to_end(b: &mut test::Bencher) { b.iter(|| { let mut lr = repeat(1).take(10000000); diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index 6b0c0ad7c21..5029023121f 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -187,6 +187,7 @@ //! [rust-discord]: https://discord.gg/rust-lang //! [array]: prim@array //! [slice]: prim@slice + #![cfg_attr(not(feature = "restricted-std"), stable(feature = "rust1", since = "1.0.0"))] #![cfg_attr(feature = "restricted-std", unstable(feature = "restricted_std", issue = "none"))] #![doc( @@ -201,25 +202,35 @@ no_global_oom_handling, not(no_global_oom_handling) ))] +// To run libstd tests without x.py without ending up with two copies of libstd, Miri needs to be +// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>. +// rustc itself never sets the feature, so this line has no affect there. +#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))] +// miri-test-libstd also prefers to make std use the sysroot versions of the dependencies. +#![cfg_attr(feature = "miri-test-libstd", feature(rustc_private))] // Don't link to std. We are std. #![no_std] +// Tell the compiler to link to either panic_abort or panic_unwind +#![needs_panic_runtime] +// +// Lints: #![warn(deprecated_in_future)] #![warn(missing_docs)] #![warn(missing_debug_implementations)] #![allow(explicit_outlives_requirements)] #![allow(unused_lifetimes)] -// Tell the compiler to link to either panic_abort or panic_unwind -#![needs_panic_runtime] +#![deny(rustc::existing_doc_keyword)] // Ensure that std can be linked against panic_abort despite compiled with `-C panic=unwind` -#![cfg_attr(not(bootstrap), deny(ffi_unwind_calls))] +#![deny(ffi_unwind_calls)] // std may use features in a platform-specific way #![allow(unused_features)] +// +// Features: #![cfg_attr(test, feature(internal_output_capture, print_internals, update_panic_count, rt))] #![cfg_attr( all(target_vendor = "fortanix", target_env = "sgx"), feature(slice_index_methods, coerce_unsized, sgx_platform) )] -#![deny(rustc::existing_doc_keyword)] // // Language features: #![feature(alloc_error_handler)] @@ -243,9 +254,9 @@ #![feature(intra_doc_pointers)] #![feature(label_break_value)] #![feature(lang_items)] -#![cfg_attr(bootstrap, feature(let_chains))] #![feature(let_else)] #![feature(linkage)] +#![feature(link_cfg)] #![feature(min_specialization)] #![feature(must_not_suspend)] #![feature(needs_panic_runtime)] @@ -258,6 +269,7 @@ #![feature(staged_api)] #![feature(thread_local)] #![feature(try_blocks)] +#![feature(utf8_chunks)] // // Library features (core): #![feature(array_error_internals)] @@ -294,6 +306,8 @@ #![feature(std_internals)] #![feature(str_internals)] #![feature(strict_provenance)] +#![feature(maybe_uninit_uninit_array)] +#![feature(const_maybe_uninit_uninit_array)] // // Library features (alloc): #![feature(alloc_layout_extra)] diff --git a/library/std/src/net/ip.rs b/library/std/src/net/ip.rs index 41ca9ba8425..6004810655e 100644 --- a/library/std/src/net/ip.rs +++ b/library/std/src/net/ip.rs @@ -3,12 +3,14 @@ mod tests; use crate::cmp::Ordering; -use crate::fmt::{self, Write as FmtWrite}; -use crate::io::Write as IoWrite; +use crate::fmt::{self, Write}; use crate::mem::transmute; use crate::sys::net::netc as c; use crate::sys_common::{FromInner, IntoInner}; +mod display_buffer; +use display_buffer::IpDisplayBuffer; + /// An IP address, either IPv4 or IPv6. /// /// This enum can contain either an [`Ipv4Addr`] or an [`Ipv6Addr`], see their @@ -618,25 +620,31 @@ impl Ipv4Addr { matches!(self.octets(), [169, 254, ..]) } - /// Returns [`true`] if the address appears to be globally routable. - /// See [iana-ipv4-special-registry][ipv4-sr]. + /// Returns [`true`] if the address appears to be globally reachable + /// as specified by the [IANA IPv4 Special-Purpose Address Registry]. + /// Whether or not an address is practically reachable will depend on your network configuration. + /// + /// Most IPv4 addresses are globally reachable; + /// unless they are specifically defined as *not* globally reachable. /// - /// The following return [`false`]: + /// Non-exhaustive list of notable addresses that are not globally reachable: /// - /// - private addresses (see [`Ipv4Addr::is_private()`]) - /// - the loopback address (see [`Ipv4Addr::is_loopback()`]) - /// - the link-local address (see [`Ipv4Addr::is_link_local()`]) - /// - the broadcast address (see [`Ipv4Addr::is_broadcast()`]) - /// - addresses used for documentation (see [`Ipv4Addr::is_documentation()`]) - /// - the unspecified address (see [`Ipv4Addr::is_unspecified()`]), and the whole - /// `0.0.0.0/8` block - /// - addresses reserved for future protocols, except - /// `192.0.0.9/32` and `192.0.0.10/32` which are globally routable - /// - addresses reserved for future use (see [`Ipv4Addr::is_reserved()`] - /// - addresses reserved for networking devices benchmarking (see - /// [`Ipv4Addr::is_benchmarking()`]) + /// - The [unspecified address] ([`is_unspecified`](Ipv4Addr::is_unspecified)) + /// - Addresses reserved for private use ([`is_private`](Ipv4Addr::is_private)) + /// - Addresses in the shared address space ([`is_shared`](Ipv4Addr::is_shared)) + /// - Loopback addresses ([`is_loopback`](Ipv4Addr::is_loopback)) + /// - Link-local addresses ([`is_link_local`](Ipv4Addr::is_link_local)) + /// - Addresses reserved for documentation ([`is_documentation`](Ipv4Addr::is_documentation)) + /// - Addresses reserved for benchmarking ([`is_benchmarking`](Ipv4Addr::is_benchmarking)) + /// - Reserved addresses ([`is_reserved`](Ipv4Addr::is_reserved)) + /// - The [broadcast address] ([`is_broadcast`](Ipv4Addr::is_broadcast)) /// - /// [ipv4-sr]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml + /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv4 Special-Purpose Address Registry]. + /// + /// [IANA IPv4 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml + /// [unspecified address]: Ipv4Addr::UNSPECIFIED + /// [broadcast address]: Ipv4Addr::BROADCAST + /// /// # Examples /// @@ -645,71 +653,61 @@ impl Ipv4Addr { /// /// use std::net::Ipv4Addr; /// - /// // private addresses are not global + /// // Most IPv4 addresses are globally reachable: + /// assert_eq!(Ipv4Addr::new(80, 9, 12, 3).is_global(), true); + /// + /// // However some addresses have been assigned a special meaning + /// // that makes them not globally reachable. Some examples are: + /// + /// // The unspecified address (`0.0.0.0`) + /// assert_eq!(Ipv4Addr::UNSPECIFIED.is_global(), false); + /// + /// // Addresses reserved for private use (`10.0.0.0/8`, `172.16.0.0/12`, 192.168.0.0/16) /// assert_eq!(Ipv4Addr::new(10, 254, 0, 0).is_global(), false); /// assert_eq!(Ipv4Addr::new(192, 168, 10, 65).is_global(), false); /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_global(), false); /// - /// // the 0.0.0.0/8 block is not global - /// assert_eq!(Ipv4Addr::new(0, 1, 2, 3).is_global(), false); - /// // in particular, the unspecified address is not global - /// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_global(), false); + /// // Addresses in the shared address space (`100.64.0.0/10`) + /// assert_eq!(Ipv4Addr::new(100, 100, 0, 0).is_global(), false); /// - /// // the loopback address is not global - /// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_global(), false); + /// // The loopback addresses (`127.0.0.0/8`) + /// assert_eq!(Ipv4Addr::LOCALHOST.is_global(), false); /// - /// // link local addresses are not global + /// // Link-local addresses (`169.254.0.0/16`) /// assert_eq!(Ipv4Addr::new(169, 254, 45, 1).is_global(), false); /// - /// // the broadcast address is not global - /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_global(), false); - /// - /// // the address space designated for documentation is not global + /// // Addresses reserved for documentation (`192.0.2.0/24`, `198.51.100.0/24`, `203.0.113.0/24`) /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_global(), false); /// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_global(), false); /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_global(), false); /// - /// // shared addresses are not global - /// assert_eq!(Ipv4Addr::new(100, 100, 0, 0).is_global(), false); - /// - /// // addresses reserved for protocol assignment are not global - /// assert_eq!(Ipv4Addr::new(192, 0, 0, 0).is_global(), false); - /// assert_eq!(Ipv4Addr::new(192, 0, 0, 255).is_global(), false); + /// // Addresses reserved for benchmarking (`198.18.0.0/15`) + /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_global(), false); /// - /// // addresses reserved for future use are not global + /// // Reserved addresses (`240.0.0.0/4`) /// assert_eq!(Ipv4Addr::new(250, 10, 20, 30).is_global(), false); /// - /// // addresses reserved for network devices benchmarking are not global - /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_global(), false); + /// // The broadcast address (`255.255.255.255`) + /// assert_eq!(Ipv4Addr::BROADCAST.is_global(), false); /// - /// // All the other addresses are global - /// assert_eq!(Ipv4Addr::new(1, 1, 1, 1).is_global(), true); - /// assert_eq!(Ipv4Addr::new(80, 9, 12, 3).is_global(), true); + /// // For a complete overview see the IANA IPv4 Special-Purpose Address Registry. /// ``` #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")] #[unstable(feature = "ip", issue = "27709")] #[must_use] #[inline] pub const fn is_global(&self) -> bool { - // check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two - // globally routable addresses in the 192.0.0.0/24 range. - if u32::from_be_bytes(self.octets()) == 0xc0000009 - || u32::from_be_bytes(self.octets()) == 0xc000000a - { - return true; - } - !self.is_private() - && !self.is_loopback() - && !self.is_link_local() - && !self.is_broadcast() - && !self.is_documentation() - && !self.is_shared() + !(self.octets()[0] == 0 // "This network" + || self.is_private() + || self.is_shared() + || self.is_loopback() + || self.is_link_local() // addresses reserved for future protocols (`192.0.0.0/24`) - && !(self.octets()[0] == 192 && self.octets()[1] == 0 && self.octets()[2] == 0) - && !self.is_reserved() - && !self.is_benchmarking() - // Make sure the address is not in 0.0.0.0/8 - && self.octets()[0] != 0 + ||(self.octets()[0] == 192 && self.octets()[1] == 0 && self.octets()[2] == 0) + || self.is_documentation() + || self.is_benchmarking() + || self.is_reserved() + || self.is_broadcast()) } /// Returns [`true`] if this address is part of the Shared Address Space defined in @@ -991,21 +989,19 @@ impl From<Ipv6Addr> for IpAddr { impl fmt::Display for Ipv4Addr { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let octets = self.octets(); - // Fast Path: if there's no alignment stuff, write directly to the buffer + + // If there are no alignment requirements, write the IP address directly to `f`. + // Otherwise, write it to a local buffer and then use `f.pad`. if fmt.precision().is_none() && fmt.width().is_none() { write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]) } else { - const IPV4_BUF_LEN: usize = 15; // Long enough for the longest possible IPv4 address - let mut buf = [0u8; IPV4_BUF_LEN]; - let mut buf_slice = &mut buf[..]; + const LONGEST_IPV4_ADDR: &str = "255.255.255.255"; - // Note: The call to write should never fail, hence the unwrap - write!(buf_slice, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]).unwrap(); - let len = IPV4_BUF_LEN - buf_slice.len(); + let mut buf = IpDisplayBuffer::<{ LONGEST_IPV4_ADDR.len() }>::new(); + // Buffer is long enough for the longest possible IPv4 address, so this should never fail. + write!(buf, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]).unwrap(); - // This unsafe is OK because we know what is being written to the buffer - let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; - fmt.pad(buf) + fmt.pad(buf.as_str()) } } } @@ -1300,13 +1296,33 @@ impl Ipv6Addr { u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets()) } - /// Returns [`true`] if the address appears to be globally routable. + /// Returns [`true`] if the address appears to be globally reachable + /// as specified by the [IANA IPv6 Special-Purpose Address Registry]. + /// Whether or not an address is practically reachable will depend on your network configuration. /// - /// The following return [`false`]: + /// Most IPv6 addresses are globally reachable; + /// unless they are specifically defined as *not* globally reachable. /// - /// - the loopback address - /// - link-local and unique local unicast addresses - /// - interface-, link-, realm-, admin- and site-local multicast addresses + /// Non-exhaustive list of notable addresses that are not globally reachable: + /// - The [unspecified address] ([`is_unspecified`](Ipv6Addr::is_unspecified)) + /// - The [loopback address] ([`is_loopback`](Ipv6Addr::is_loopback)) + /// - IPv4-mapped addresses + /// - Addresses reserved for benchmarking + /// - Addresses reserved for documentation ([`is_documentation`](Ipv6Addr::is_documentation)) + /// - Unique local addresses ([`is_unique_local`](Ipv6Addr::is_unique_local)) + /// - Unicast addresses with link-local scope ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local)) + /// + /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv6 Special-Purpose Address Registry]. + /// + /// Note that an address having global scope is not the same as being globally reachable, + /// and there is no direct relation between the two concepts: There exist addresses with global scope + /// that are not globally reachable (for example unique local addresses), + /// and addresses that are globally reachable without having global scope + /// (multicast addresses with non-global scope). + /// + /// [IANA IPv6 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml + /// [unspecified address]: Ipv6Addr::UNSPECIFIED + /// [loopback address]: Ipv6Addr::LOCALHOST /// /// # Examples /// @@ -1315,20 +1331,65 @@ impl Ipv6Addr { /// /// use std::net::Ipv6Addr; /// - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_global(), true); - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_global(), false); - /// assert_eq!(Ipv6Addr::new(0, 0, 0x1c9, 0, 0, 0xafc8, 0, 0x1).is_global(), true); + /// // Most IPv6 addresses are globally reachable: + /// assert_eq!(Ipv6Addr::new(0x26, 0, 0x1c9, 0, 0, 0xafc8, 0x10, 0x1).is_global(), true); + /// + /// // However some addresses have been assigned a special meaning + /// // that makes them not globally reachable. Some examples are: + /// + /// // The unspecified address (`::`) + /// assert_eq!(Ipv6Addr::UNSPECIFIED.is_global(), false); + /// + /// // The loopback address (`::1`) + /// assert_eq!(Ipv6Addr::LOCALHOST.is_global(), false); + /// + /// // IPv4-mapped addresses (`::ffff:0:0/96`) + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_global(), false); + /// + /// // Addresses reserved for benchmarking (`2001:2::/48`) + /// assert_eq!(Ipv6Addr::new(0x2001, 2, 0, 0, 0, 0, 0, 1,).is_global(), false); + /// + /// // Addresses reserved for documentation (`2001:db8::/32`) + /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1).is_global(), false); + /// + /// // Unique local addresses (`fc00::/7`) + /// assert_eq!(Ipv6Addr::new(0xfc02, 0, 0, 0, 0, 0, 0, 1).is_global(), false); + /// + /// // Unicast addresses with link-local scope (`fe80::/10`) + /// assert_eq!(Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 1).is_global(), false); + /// + /// // For a complete overview see the IANA IPv6 Special-Purpose Address Registry. /// ``` #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] #[unstable(feature = "ip", issue = "27709")] #[must_use] #[inline] pub const fn is_global(&self) -> bool { - match self.multicast_scope() { - Some(Ipv6MulticastScope::Global) => true, - None => self.is_unicast_global(), - _ => false, - } + !(self.is_unspecified() + || self.is_loopback() + // IPv4-mapped Address (`::ffff:0:0/96`) + || matches!(self.segments(), [0, 0, 0, 0, 0, 0xffff, _, _]) + // IPv4-IPv6 Translat. (`64:ff9b:1::/48`) + || matches!(self.segments(), [0x64, 0xff9b, 1, _, _, _, _, _]) + // Discard-Only Address Block (`100::/64`) + || matches!(self.segments(), [0x100, 0, 0, 0, _, _, _, _]) + // IETF Protocol Assignments (`2001::/23`) + || (matches!(self.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200) + && !( + // Port Control Protocol Anycast (`2001:1::1`) + u128::from_be_bytes(self.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001 + // Traversal Using Relays around NAT Anycast (`2001:1::2`) + || u128::from_be_bytes(self.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002 + // AMT (`2001:3::/32`) + || matches!(self.segments(), [0x2001, 3, _, _, _, _, _, _]) + // AS112-v6 (`2001:4:112::/48`) + || matches!(self.segments(), [0x2001, 4, 0x112, _, _, _, _, _]) + // ORCHIDv2 (`2001:20::/28`) + || matches!(self.segments(), [0x2001, b, _, _, _, _, _, _] if b >= 0x20 && b <= 0x2F) + )) + || self.is_documentation() + || self.is_unique_local() + || self.is_unicast_link_local()) } /// Returns [`true`] if this is a unique local address (`fc00::/7`). @@ -1525,6 +1586,7 @@ impl Ipv6Addr { && !self.is_unique_local() && !self.is_unspecified() && !self.is_documentation() + && !self.is_benchmarking() } /// Returns the address's multicast scope if the address is multicast. @@ -1708,8 +1770,8 @@ impl Ipv6Addr { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for Ipv6Addr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // If there are no alignment requirements, write out the IP address to - // f. Otherwise, write it to a local buffer, then use f.pad. + // If there are no alignment requirements, write the IP address directly to `f`. + // Otherwise, write it to a local buffer and then use `f.pad`. if f.precision().is_none() && f.width().is_none() { let segments = self.segments(); @@ -1780,22 +1842,13 @@ impl fmt::Display for Ipv6Addr { } } } else { - // Slow path: write the address to a local buffer, then use f.pad. - // Defined recursively by using the fast path to write to the - // buffer. - - // This is the largest possible size of an IPv6 address - const IPV6_BUF_LEN: usize = (4 * 8) + 7; - let mut buf = [0u8; IPV6_BUF_LEN]; - let mut buf_slice = &mut buf[..]; - - // Note: This call to write should never fail, so unwrap is okay. - write!(buf_slice, "{}", self).unwrap(); - let len = IPV6_BUF_LEN - buf_slice.len(); - - // This is safe because we know exactly what can be in this buffer - let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; - f.pad(buf) + const LONGEST_IPV6_ADDR: &str = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"; + + let mut buf = IpDisplayBuffer::<{ LONGEST_IPV6_ADDR.len() }>::new(); + // Buffer is long enough for the longest possible IPv6 address, so this should never fail. + write!(buf, "{}", self).unwrap(); + + f.pad(buf.as_str()) } } } diff --git a/library/std/src/net/ip/display_buffer.rs b/library/std/src/net/ip/display_buffer.rs new file mode 100644 index 00000000000..bd852d5da8e --- /dev/null +++ b/library/std/src/net/ip/display_buffer.rs @@ -0,0 +1,40 @@ +use crate::fmt; +use crate::mem::MaybeUninit; +use crate::str; + +/// Used for slow path in `Display` implementations when alignment is required. +pub struct IpDisplayBuffer<const SIZE: usize> { + buf: [MaybeUninit<u8>; SIZE], + len: usize, +} + +impl<const SIZE: usize> IpDisplayBuffer<SIZE> { + #[inline] + pub const fn new() -> Self { + Self { buf: MaybeUninit::uninit_array(), len: 0 } + } + + #[inline] + pub fn as_str(&self) -> &str { + // SAFETY: `buf` is only written to by the `fmt::Write::write_str` implementation + // which writes a valid UTF-8 string to `buf` and correctly sets `len`. + unsafe { + let s = MaybeUninit::slice_assume_init_ref(&self.buf[..self.len]); + str::from_utf8_unchecked(s) + } + } +} + +impl<const SIZE: usize> fmt::Write for IpDisplayBuffer<SIZE> { + fn write_str(&mut self, s: &str) -> fmt::Result { + let bytes = s.as_bytes(); + + if let Some(buf) = self.buf.get_mut(self.len..(self.len + bytes.len())) { + MaybeUninit::write_slice(buf, bytes); + self.len += bytes.len(); + Ok(()) + } else { + Err(fmt::Error) + } + } +} diff --git a/library/std/src/net/ip/tests.rs b/library/std/src/net/ip/tests.rs index c29509331d7..7c3430b2b21 100644 --- a/library/std/src/net/ip/tests.rs +++ b/library/std/src/net/ip/tests.rs @@ -321,15 +321,15 @@ fn ip_properties() { check!("fe80:ffff::"); check!("febf:ffff::"); check!("fec0::", global); - check!("ff01::", multicast); - check!("ff02::", multicast); - check!("ff03::", multicast); - check!("ff04::", multicast); - check!("ff05::", multicast); - check!("ff08::", multicast); + check!("ff01::", global | multicast); + check!("ff02::", global | multicast); + check!("ff03::", global | multicast); + check!("ff04::", global | multicast); + check!("ff05::", global | multicast); + check!("ff08::", global | multicast); check!("ff0e::", global | multicast); check!("2001:db8:85a3::8a2e:370:7334", doc); - check!("2001:2::ac32:23ff:21", global | benchmarking); + check!("2001:2::ac32:23ff:21", benchmarking); check!("102:304:506:708:90a:b0c:d0e:f10", global); } @@ -609,6 +609,60 @@ fn ipv6_properties() { check!("1::", &[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], global | unicast_global); + check!( + "::ffff:127.0.0.1", + &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x7f, 0, 0, 1], + unicast_global + ); + + check!( + "64:ff9b:1::", + &[0, 0x64, 0xff, 0x9b, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + unicast_global + ); + + check!("100::", &[0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global); + + check!("2001::", &[0x20, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global); + + check!( + "2001:1::1", + &[0x20, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + global | unicast_global + ); + + check!( + "2001:1::2", + &[0x20, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], + global | unicast_global + ); + + check!( + "2001:3::", + &[0x20, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + global | unicast_global + ); + + check!( + "2001:4:112::", + &[0x20, 1, 0, 4, 1, 0x12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + global | unicast_global + ); + + check!( + "2001:20::", + &[0x20, 1, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + global | unicast_global + ); + + check!("2001:30::", &[0x20, 1, 0, 0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global); + + check!( + "2001:200::", + &[0x20, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + global | unicast_global + ); + check!("fc00::", &[0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unique_local); check!( @@ -666,21 +720,37 @@ fn ipv6_properties() { check!( "ff01::", &[0xff, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - multicast_interface_local + multicast_interface_local | global ); - check!("ff02::", &[0xff, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], multicast_link_local); + check!( + "ff02::", + &[0xff, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_link_local | global + ); - check!("ff03::", &[0xff, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], multicast_realm_local); + check!( + "ff03::", + &[0xff, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_realm_local | global + ); - check!("ff04::", &[0xff, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], multicast_admin_local); + check!( + "ff04::", + &[0xff, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_admin_local | global + ); - check!("ff05::", &[0xff, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], multicast_site_local); + check!( + "ff05::", + &[0xff, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_site_local | global + ); check!( "ff08::", &[0xff, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - multicast_organization_local + multicast_organization_local | global ); check!( @@ -698,7 +768,7 @@ fn ipv6_properties() { check!( "2001:2::ac32:23ff:21", &[0x20, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0xac, 0x32, 0x23, 0xff, 0, 0x21], - global | unicast_global | benchmarking + benchmarking ); check!( diff --git a/library/std/src/os/fd/raw.rs b/library/std/src/os/fd/raw.rs index 081915ed148..1b3d110426f 100644 --- a/library/std/src/os/fd/raw.rs +++ b/library/std/src/os/fd/raw.rs @@ -14,7 +14,7 @@ use crate::os::wasi::io::OwnedFd; use crate::sys_common::{AsInner, IntoInner}; /// Raw file descriptors. -#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)] +#[rustc_allowed_through_unstable_modules] #[stable(feature = "rust1", since = "1.0.0")] pub type RawFd = raw::c_int; @@ -23,7 +23,7 @@ pub type RawFd = raw::c_int; /// This is only available on unix and WASI platforms and must be imported in /// order to call the method. Windows platforms have a corresponding /// `AsRawHandle` and `AsRawSocket` set of traits. -#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)] +#[rustc_allowed_through_unstable_modules] #[stable(feature = "rust1", since = "1.0.0")] pub trait AsRawFd { /// Extracts the raw file descriptor. @@ -59,7 +59,7 @@ pub trait AsRawFd { /// A trait to express the ability to construct an object from a raw file /// descriptor. -#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)] +#[rustc_allowed_through_unstable_modules] #[stable(feature = "from_raw_os", since = "1.1.0")] pub trait FromRawFd { /// Constructs a new instance of `Self` from the given raw file @@ -103,7 +103,7 @@ pub trait FromRawFd { /// A trait to express the ability to consume an object and acquire ownership of /// its raw file descriptor. -#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)] +#[rustc_allowed_through_unstable_modules] #[stable(feature = "into_raw_os", since = "1.4.0")] pub trait IntoRawFd { /// Consumes this object, returning the raw underlying file descriptor. diff --git a/library/std/src/os/fortanix_sgx/mod.rs b/library/std/src/os/fortanix_sgx/mod.rs index a40dabe190a..da100b689db 100644 --- a/library/std/src/os/fortanix_sgx/mod.rs +++ b/library/std/src/os/fortanix_sgx/mod.rs @@ -26,6 +26,7 @@ pub mod usercalls { free, insecure_time, launch_thread, read, read_alloc, send, wait, write, }; pub use crate::sys::abi::usercalls::raw::{do_usercall, Usercalls as UsercallNrs}; + pub use crate::sys::abi::usercalls::raw::{Register, RegisterArgument, ReturnValue}; // fortanix-sgx-abi re-exports pub use crate::sys::abi::usercalls::raw::Error; diff --git a/library/std/src/os/unix/net/addr.rs b/library/std/src/os/unix/net/addr.rs index 9aeae4b2cae..bb313c7597b 100644 --- a/library/std/src/os/unix/net/addr.rs +++ b/library/std/src/os/unix/net/addr.rs @@ -329,7 +329,7 @@ impl SocketAddr { crate::ptr::copy_nonoverlapping( namespace.as_ptr(), - addr.sun_path.as_mut_ptr().offset(1) as *mut u8, + addr.sun_path.as_mut_ptr().add(1) as *mut u8, namespace.len(), ); let len = (sun_path_offset(&addr) + 1 + namespace.len()) as libc::socklen_t; diff --git a/library/std/src/os/unix/net/listener.rs b/library/std/src/os/unix/net/listener.rs index 7c0d539504d..3b2601e755a 100644 --- a/library/std/src/os/unix/net/listener.rs +++ b/library/std/src/os/unix/net/listener.rs @@ -73,9 +73,13 @@ impl UnixListener { unsafe { let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?; let (addr, len) = sockaddr_un(path.as_ref())?; + #[cfg(target_os = "linux")] + const backlog: libc::c_int = -1; + #[cfg(not(target_os = "linux"))] + const backlog: libc::c_int = 128; cvt(libc::bind(inner.as_inner().as_raw_fd(), &addr as *const _ as *const _, len as _))?; - cvt(libc::listen(inner.as_inner().as_raw_fd(), 128))?; + cvt(libc::listen(inner.as_inner().as_raw_fd(), backlog))?; Ok(UnixListener(inner)) } @@ -109,12 +113,16 @@ impl UnixListener { pub fn bind_addr(socket_addr: &SocketAddr) -> io::Result<UnixListener> { unsafe { let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?; + #[cfg(target_os = "linux")] + const backlog: libc::c_int = -1; + #[cfg(not(target_os = "linux"))] + const backlog: libc::c_int = 128; cvt(libc::bind( inner.as_raw_fd(), &socket_addr.addr as *const _ as *const _, socket_addr.len as _, ))?; - cvt(libc::listen(inner.as_raw_fd(), 128))?; + cvt(libc::listen(inner.as_raw_fd(), backlog))?; Ok(UnixListener(inner)) } } diff --git a/library/std/src/path/tests.rs b/library/std/src/path/tests.rs index 351cf698810..dd307022c6d 100644 --- a/library/std/src/path/tests.rs +++ b/library/std/src/path/tests.rs @@ -1768,6 +1768,7 @@ fn test_windows_absolute() { } #[bench] +#[cfg_attr(miri, ignore)] // Miri isn't fast... fn bench_path_cmp_fast_path_buf_sort(b: &mut test::Bencher) { let prefix = "my/home"; let mut paths: Vec<_> = @@ -1781,6 +1782,7 @@ fn bench_path_cmp_fast_path_buf_sort(b: &mut test::Bencher) { } #[bench] +#[cfg_attr(miri, ignore)] // Miri isn't fast... fn bench_path_cmp_fast_path_long(b: &mut test::Bencher) { let prefix = "/my/home/is/my/castle/and/my/castle/has/a/rusty/workbench/"; let paths: Vec<_> = @@ -1799,6 +1801,7 @@ fn bench_path_cmp_fast_path_long(b: &mut test::Bencher) { } #[bench] +#[cfg_attr(miri, ignore)] // Miri isn't fast... fn bench_path_cmp_fast_path_short(b: &mut test::Bencher) { let prefix = "my/home"; let paths: Vec<_> = @@ -1817,6 +1820,7 @@ fn bench_path_cmp_fast_path_short(b: &mut test::Bencher) { } #[bench] +#[cfg_attr(miri, ignore)] // Miri isn't fast... fn bench_path_hashset(b: &mut test::Bencher) { let prefix = "/my/home/is/my/castle/and/my/castle/has/a/rusty/workbench/"; let paths: Vec<_> = @@ -1835,6 +1839,7 @@ fn bench_path_hashset(b: &mut test::Bencher) { } #[bench] +#[cfg_attr(miri, ignore)] // Miri isn't fast... fn bench_path_hashset_miss(b: &mut test::Bencher) { let prefix = "/my/home/is/my/castle/and/my/castle/has/a/rusty/workbench/"; let paths: Vec<_> = diff --git a/library/std/src/primitive_docs.rs b/library/std/src/primitive_docs.rs index b8e5461640c..2b2ef64fdb1 100644 --- a/library/std/src/primitive_docs.rs +++ b/library/std/src/primitive_docs.rs @@ -801,11 +801,53 @@ mod prim_array {} /// assert_eq!(2 * pointer_size, std::mem::size_of::<Box<[u8]>>()); /// assert_eq!(2 * pointer_size, std::mem::size_of::<Rc<[u8]>>()); /// ``` +/// +/// ## Trait Implementations +/// +/// Some traits are implemented for slices if the element type implements +/// that trait. This includes [`Eq`], [`Hash`] and [`Ord`]. +/// +/// ## Iteration +/// +/// The slices implement `IntoIterator`. The iterator yields references to the +/// slice elements. +/// +/// ``` +/// let numbers: &[i32] = &[0, 1, 2]; +/// for n in numbers { +/// println!("{n} is a number!"); +/// } +/// ``` +/// +/// The mutable slice yields mutable references to the elements: +/// +/// ``` +/// let mut scores: &mut [i32] = &mut [7, 8, 9]; +/// for score in scores { +/// *score += 1; +/// } +/// ``` +/// +/// This iterator yields mutable references to the slice's elements, so while +/// the element type of the slice is `i32`, the element type of the iterator is +/// `&mut i32`. +/// +/// * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default +/// iterators. +/// * Further methods that return iterators are [`.split`], [`.splitn`], +/// [`.chunks`], [`.windows`] and more. +/// +/// [`Hash`]: core::hash::Hash +/// [`.iter`]: slice::iter +/// [`.iter_mut`]: slice::iter_mut +/// [`.split`]: slice::split +/// [`.splitn`]: slice::splitn +/// [`.chunks`]: slice::chunks +/// [`.windows`]: slice::windows #[stable(feature = "rust1", since = "1.0.0")] mod prim_slice {} #[doc(primitive = "str")] -// /// String slices. /// /// *[See also the `std::str` module](crate::str).* @@ -816,19 +858,22 @@ mod prim_slice {} /// /// String slices are always valid UTF-8. /// -/// # Examples +/// # Basic Usage /// /// String literals are string slices: /// /// ``` -/// let hello = "Hello, world!"; -/// -/// // with an explicit type annotation -/// let hello: &'static str = "Hello, world!"; +/// let hello_world = "Hello, World!"; /// ``` /// -/// They are `'static` because they're stored directly in the final binary, and -/// so will be valid for the `'static` duration. +/// Here we have declared a string slice initialized with a string literal. +/// String literals have a static lifetime, which means the string `hello_world` +/// is guaranteed to be valid for the duration of the entire program. +/// We can explicitly specify `hello_world`'s lifetime as well: +/// +/// ``` +/// let hello_world: &'static str = "Hello, world!"; +/// ``` /// /// # Representation /// @@ -996,7 +1041,7 @@ impl<T> (T,) {} // Fake impl that's only really used for docs. #[cfg(doc)] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(bootstrap), doc(fake_variadic))] +#[doc(fake_variadic)] /// This trait is implemented on arbitrary-length tuples. impl<T: Clone> Clone for (T,) { fn clone(&self) -> Self { @@ -1007,7 +1052,7 @@ impl<T: Clone> Clone for (T,) { // Fake impl that's only really used for docs. #[cfg(doc)] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(bootstrap), doc(fake_variadic))] +#[doc(fake_variadic)] /// This trait is implemented on arbitrary-length tuples. impl<T: Copy> Copy for (T,) { // empty @@ -1484,13 +1529,12 @@ mod prim_fn {} // Required to make auto trait impls render. // See src/librustdoc/passes/collect_trait_impls.rs:collect_trait_impls #[doc(hidden)] -#[cfg(not(bootstrap))] impl<Ret, T> fn(T) -> Ret {} // Fake impl that's only really used for docs. #[cfg(doc)] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(bootstrap), doc(fake_variadic))] +#[doc(fake_variadic)] /// This trait is implemented on function pointers with any number of arguments. impl<Ret, T> Clone for fn(T) -> Ret { fn clone(&self) -> Self { @@ -1501,7 +1545,7 @@ impl<Ret, T> Clone for fn(T) -> Ret { // Fake impl that's only really used for docs. #[cfg(doc)] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(bootstrap), doc(fake_variadic))] +#[doc(fake_variadic)] /// This trait is implemented on function pointers with any number of arguments. impl<Ret, T> Copy for fn(T) -> Ret { // empty diff --git a/library/std/src/process.rs b/library/std/src/process.rs index d6cba7e7598..d91d4fa64ca 100644 --- a/library/std/src/process.rs +++ b/library/std/src/process.rs @@ -169,15 +169,15 @@ use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner}; pub struct Child { pub(crate) handle: imp::Process, - /// The handle for writing to the child's standard input (stdin), if it has - /// been captured. To avoid partially moving - /// the `child` and thus blocking yourself from calling - /// functions on `child` while using `stdin`, - /// you might find it helpful: + /// The handle for writing to the child's standard input (stdin), if it + /// has been captured. You might find it helpful to do /// /// ```compile_fail,E0425 /// let stdin = child.stdin.take().unwrap(); /// ``` + /// + /// to avoid partially moving the `child` and thus blocking yourself from calling + /// functions on `child` while using `stdin`. #[stable(feature = "process", since = "1.0.0")] pub stdin: Option<ChildStdin>, diff --git a/library/std/src/sync/mpsc/mpsc_queue/tests.rs b/library/std/src/sync/mpsc/mpsc_queue/tests.rs index 9f4f31ed051..34b2a9a98ac 100644 --- a/library/std/src/sync/mpsc/mpsc_queue/tests.rs +++ b/library/std/src/sync/mpsc/mpsc_queue/tests.rs @@ -13,7 +13,7 @@ fn test_full() { #[test] fn test() { let nthreads = 8; - let nmsgs = 1000; + let nmsgs = if cfg!(miri) { 100 } else { 1000 }; let q = Queue::new(); match q.pop() { Empty => {} diff --git a/library/std/src/sync/mpsc/spsc_queue/tests.rs b/library/std/src/sync/mpsc/spsc_queue/tests.rs index 467ef3dbdcb..eb6d5c2cf66 100644 --- a/library/std/src/sync/mpsc/spsc_queue/tests.rs +++ b/library/std/src/sync/mpsc/spsc_queue/tests.rs @@ -77,12 +77,13 @@ fn stress() { } unsafe fn stress_bound(bound: usize) { + let count = if cfg!(miri) { 1000 } else { 100000 }; let q = Arc::new(Queue::with_additions(bound, (), ())); let (tx, rx) = channel(); let q2 = q.clone(); let _t = thread::spawn(move || { - for _ in 0..100000 { + for _ in 0..count { loop { match q2.pop() { Some(1) => break, @@ -93,7 +94,7 @@ fn stress() { } tx.send(()).unwrap(); }); - for _ in 0..100000 { + for _ in 0..count { q.push(1); } rx.recv().unwrap(); diff --git a/library/std/src/sync/mpsc/sync_tests.rs b/library/std/src/sync/mpsc/sync_tests.rs index e58649bab6e..63c79436974 100644 --- a/library/std/src/sync/mpsc/sync_tests.rs +++ b/library/std/src/sync/mpsc/sync_tests.rs @@ -113,23 +113,25 @@ fn chan_gone_concurrent() { #[test] fn stress() { + let count = if cfg!(miri) { 100 } else { 10000 }; let (tx, rx) = sync_channel::<i32>(0); thread::spawn(move || { - for _ in 0..10000 { + for _ in 0..count { tx.send(1).unwrap(); } }); - for _ in 0..10000 { + for _ in 0..count { assert_eq!(rx.recv().unwrap(), 1); } } #[test] fn stress_recv_timeout_two_threads() { + let count = if cfg!(miri) { 100 } else { 10000 }; let (tx, rx) = sync_channel::<i32>(0); thread::spawn(move || { - for _ in 0..10000 { + for _ in 0..count { tx.send(1).unwrap(); } }); @@ -146,12 +148,12 @@ fn stress_recv_timeout_two_threads() { } } - assert_eq!(recv_count, 10000); + assert_eq!(recv_count, count); } #[test] fn stress_recv_timeout_shared() { - const AMT: u32 = 1000; + const AMT: u32 = if cfg!(miri) { 100 } else { 1000 }; const NTHREADS: u32 = 8; let (tx, rx) = sync_channel::<i32>(0); let (dtx, drx) = sync_channel::<()>(0); @@ -191,7 +193,7 @@ fn stress_recv_timeout_shared() { #[test] fn stress_shared() { - const AMT: u32 = 1000; + const AMT: u32 = if cfg!(miri) { 100 } else { 1000 }; const NTHREADS: u32 = 8; let (tx, rx) = sync_channel::<i32>(0); let (dtx, drx) = sync_channel::<()>(0); @@ -438,12 +440,13 @@ fn stream_send_recv_stress() { #[test] fn recv_a_lot() { + let count = if cfg!(miri) { 1000 } else { 10000 }; // Regression test that we don't run out of stack in scheduler context - let (tx, rx) = sync_channel(10000); - for _ in 0..10000 { + let (tx, rx) = sync_channel(count); + for _ in 0..count { tx.send(()).unwrap(); } - for _ in 0..10000 { + for _ in 0..count { rx.recv().unwrap(); } } diff --git a/library/std/src/sync/mpsc/tests.rs b/library/std/src/sync/mpsc/tests.rs index 4deb3e59615..f6d0796f604 100644 --- a/library/std/src/sync/mpsc/tests.rs +++ b/library/std/src/sync/mpsc/tests.rs @@ -120,13 +120,14 @@ fn chan_gone_concurrent() { #[test] fn stress() { + let count = if cfg!(miri) { 100 } else { 10000 }; let (tx, rx) = channel::<i32>(); let t = thread::spawn(move || { - for _ in 0..10000 { + for _ in 0..count { tx.send(1).unwrap(); } }); - for _ in 0..10000 { + for _ in 0..count { assert_eq!(rx.recv().unwrap(), 1); } t.join().ok().expect("thread panicked"); @@ -134,7 +135,7 @@ fn stress() { #[test] fn stress_shared() { - const AMT: u32 = 10000; + const AMT: u32 = if cfg!(miri) { 100 } else { 10000 }; const NTHREADS: u32 = 8; let (tx, rx) = channel::<i32>(); @@ -504,12 +505,13 @@ fn very_long_recv_timeout_wont_panic() { #[test] fn recv_a_lot() { + let count = if cfg!(miri) { 1000 } else { 10000 }; // Regression test that we don't run out of stack in scheduler context let (tx, rx) = channel(); - for _ in 0..10000 { + for _ in 0..count { tx.send(()).unwrap(); } - for _ in 0..10000 { + for _ in 0..count { rx.recv().unwrap(); } } diff --git a/library/std/src/sync/rwlock/tests.rs b/library/std/src/sync/rwlock/tests.rs index 08255c985f5..b5b3ad9898e 100644 --- a/library/std/src/sync/rwlock/tests.rs +++ b/library/std/src/sync/rwlock/tests.rs @@ -19,7 +19,7 @@ fn smoke() { #[test] fn frob() { const N: u32 = 10; - const M: usize = 1000; + const M: usize = if cfg!(miri) { 100 } else { 1000 }; let r = Arc::new(RwLock::new(())); diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs index ea24fedd0eb..fe8392f78cd 100644 --- a/library/std/src/sys/sgx/abi/usercalls/alloc.rs +++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs @@ -56,6 +56,8 @@ unsafe impl UserSafeSized for Usercall {} #[unstable(feature = "sgx_platform", issue = "56975")] unsafe impl UserSafeSized for Return {} #[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl UserSafeSized for Cancel {} +#[unstable(feature = "sgx_platform", issue = "56975")] unsafe impl<T: UserSafeSized> UserSafeSized for [T; 2] {} /// A type that can be represented in memory as one or more `UserSafeSized`s. @@ -115,7 +117,7 @@ pub unsafe trait UserSafe { /// * the pointer is null. /// * the pointed-to range is not in user memory. unsafe fn check_ptr(ptr: *const Self) { - let is_aligned = |p| -> bool { 0 == (p as usize) & (Self::align_of() - 1) }; + let is_aligned = |p: *const u8| -> bool { 0 == p.addr() & (Self::align_of() - 1) }; assert!(is_aligned(ptr as *const u8)); assert!(is_user_range(ptr as _, mem::size_of_val(unsafe { &*ptr }))); @@ -305,6 +307,34 @@ where } } +// Split a memory region ptr..ptr + len into three parts: +// +--------+ +// | small0 | Chunk smaller than 8 bytes +// +--------+ +// | big | Chunk 8-byte aligned, and size a multiple of 8 bytes +// +--------+ +// | small1 | Chunk smaller than 8 bytes +// +--------+ +fn region_as_aligned_chunks(ptr: *const u8, len: usize) -> (usize, usize, usize) { + let small0_size = if ptr as usize % 8 == 0 { 0 } else { 8 - ptr as usize % 8 }; + let small1_size = (len - small0_size as usize) % 8; + let big_size = len - small0_size as usize - small1_size as usize; + + (small0_size, big_size, small1_size) +} + +unsafe fn copy_quadwords(src: *const u8, dst: *mut u8, len: usize) { + unsafe { + asm!( + "rep movsq (%rsi), (%rdi)", + inout("rcx") len / 8 => _, + inout("rdi") dst => _, + inout("rsi") src => _, + options(att_syntax, nostack, preserves_flags) + ); + } +} + /// Copies `len` bytes of data from enclave pointer `src` to userspace `dst` /// /// This function mitigates stale data vulnerabilities by ensuring all writes to untrusted memory are either: @@ -343,17 +373,6 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) } } - unsafe fn copy_aligned_quadwords_to_userspace(src: *const u8, dst: *mut u8, len: usize) { - unsafe { - asm!( - "rep movsq (%rsi), (%rdi)", - inout("rcx") len / 8 => _, - inout("rdi") dst => _, - inout("rsi") src => _, - options(att_syntax, nostack, preserves_flags) - ); - } - } assert!(!src.is_null()); assert!(!dst.is_null()); assert!(is_enclave_range(src, len)); @@ -370,7 +389,7 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) } else if len % 8 == 0 && dst as usize % 8 == 0 { // Copying 8-byte aligned quadwords: copy quad word per quad word unsafe { - copy_aligned_quadwords_to_userspace(src, dst, len); + copy_quadwords(src, dst, len); } } else { // Split copies into three parts: @@ -381,20 +400,16 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) // +--------+ // | small1 | Chunk smaller than 8 bytes // +--------+ + let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len); unsafe { // Copy small0 - let small0_size = (8 - dst as usize % 8) as u8; - let small0_src = src; - let small0_dst = dst; - copy_bytewise_to_userspace(small0_src as _, small0_dst, small0_size as _); + copy_bytewise_to_userspace(src, dst, small0_size as _); // Copy big - let small1_size = ((len - small0_size as usize) % 8) as u8; - let big_size = len - small0_size as usize - small1_size as usize; let big_src = src.offset(small0_size as _); let big_dst = dst.offset(small0_size as _); - copy_aligned_quadwords_to_userspace(big_src as _, big_dst, big_size); + copy_quadwords(big_src as _, big_dst, big_size); // Copy small1 let small1_src = src.offset(big_size as isize + small0_size as isize); @@ -404,6 +419,106 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) } } +/// Copies `len` bytes of data from userspace pointer `src` to enclave pointer `dst` +/// +/// This function mitigates AEPIC leak vulnerabilities by ensuring all reads from untrusted memory are 8-byte aligned +/// +/// # Panics +/// This function panics if: +/// +/// * The `src` pointer is null +/// * The `dst` pointer is null +/// * The `src` memory range is not in user memory +/// * The `dst` memory range is not in enclave memory +/// +/// # References +/// - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00657.html +/// - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/stale-data-read-from-xapic.html +pub(crate) unsafe fn copy_from_userspace(src: *const u8, dst: *mut u8, len: usize) { + // Copies memory region `src..src + len` to the enclave at `dst`. The source memory region + // is: + // - strictly less than 8 bytes in size and may be + // - located at a misaligned memory location + fn copy_misaligned_chunk_to_enclave(src: *const u8, dst: *mut u8, len: usize) { + let mut tmp_buff = [0u8; 16]; + + unsafe { + // Compute an aligned memory region to read from + // +--------+ <-- aligned_src + aligned_len (8B-aligned) + // | pad1 | + // +--------+ <-- src + len (misaligned) + // | | + // | | + // | | + // +--------+ <-- src (misaligned) + // | pad0 | + // +--------+ <-- aligned_src (8B-aligned) + let pad0_size = src as usize % 8; + let aligned_src = src.sub(pad0_size); + + let pad1_size = 8 - (src.add(len) as usize % 8); + let aligned_len = pad0_size + len + pad1_size; + + debug_assert!(len < 8); + debug_assert_eq!(aligned_src as usize % 8, 0); + debug_assert_eq!(aligned_len % 8, 0); + debug_assert!(aligned_len <= 16); + + // Copy the aligned buffer to a temporary buffer + // Note: copying from a slightly different memory location is a bit odd. In this case it + // can't lead to page faults or inadvertent copying from the enclave as we only ensured + // that the `src` pointer is aligned at an 8 byte boundary. As pages are 4096 bytes + // aligned, `aligned_src` must be on the same page as `src`. A similar argument can be made + // for `src + len` + copy_quadwords(aligned_src as _, tmp_buff.as_mut_ptr(), aligned_len); + + // Copy the correct parts of the temporary buffer to the destination + ptr::copy(tmp_buff.as_ptr().add(pad0_size), dst, len); + } + } + + assert!(!src.is_null()); + assert!(!dst.is_null()); + assert!(is_user_range(src, len)); + assert!(is_enclave_range(dst, len)); + assert!(!(src as usize).overflowing_add(len + 8).1); + assert!(!(dst as usize).overflowing_add(len + 8).1); + + if len < 8 { + copy_misaligned_chunk_to_enclave(src, dst, len); + } else if len % 8 == 0 && src as usize % 8 == 0 { + // Copying 8-byte aligned quadwords: copy quad word per quad word + unsafe { + copy_quadwords(src, dst, len); + } + } else { + // Split copies into three parts: + // +--------+ + // | small0 | Chunk smaller than 8 bytes + // +--------+ + // | big | Chunk 8-byte aligned, and size a multiple of 8 bytes + // +--------+ + // | small1 | Chunk smaller than 8 bytes + // +--------+ + let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len); + + unsafe { + // Copy small0 + copy_misaligned_chunk_to_enclave(src, dst, small0_size); + + // Copy big + let big_src = src.add(small0_size); + let big_dst = dst.add(small0_size); + copy_quadwords(big_src, big_dst, big_size); + + // Copy small1 + let small1_src = src.add(big_size + small0_size); + let small1_dst = dst.add(big_size + small0_size); + copy_misaligned_chunk_to_enclave(small1_src, small1_dst, small1_size); + } + } +} + #[unstable(feature = "sgx_platform", issue = "56975")] impl<T: ?Sized> UserRef<T> where @@ -468,7 +583,7 @@ where pub fn copy_to_enclave(&self, dest: &mut T) { unsafe { assert_eq!(mem::size_of_val(dest), mem::size_of_val(&*self.0.get())); - ptr::copy( + copy_from_userspace( self.0.get() as *const T as *const u8, dest as *mut T as *mut u8, mem::size_of_val(dest), @@ -494,7 +609,11 @@ where { /// Copies the value from user memory into enclave memory. pub fn to_enclave(&self) -> T { - unsafe { ptr::read(self.0.get()) } + unsafe { + let mut data: T = mem::MaybeUninit::uninit().assume_init(); + copy_from_userspace(self.0.get() as _, &mut data as *mut T as _, mem::size_of::<T>()); + data + } } } diff --git a/library/std/src/sys/sgx/abi/usercalls/mod.rs b/library/std/src/sys/sgx/abi/usercalls/mod.rs index 79d1db5e1c5..e19e843267a 100644 --- a/library/std/src/sys/sgx/abi/usercalls/mod.rs +++ b/library/std/src/sys/sgx/abi/usercalls/mod.rs @@ -292,12 +292,17 @@ fn check_os_error(err: Result) -> i32 { } } -trait FromSgxResult { +/// Translate the raw result of an SGX usercall. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub trait FromSgxResult { + /// Return type type Return; + /// Translate the raw result of an SGX usercall. fn from_sgx_result(self) -> IoResult<Self::Return>; } +#[unstable(feature = "sgx_platform", issue = "56975")] impl<T> FromSgxResult for (Result, T) { type Return = T; @@ -310,6 +315,7 @@ impl<T> FromSgxResult for (Result, T) { } } +#[unstable(feature = "sgx_platform", issue = "56975")] impl FromSgxResult for Result { type Return = (); diff --git a/library/std/src/sys/sgx/abi/usercalls/raw.rs b/library/std/src/sys/sgx/abi/usercalls/raw.rs index 4267b96ccd5..10c1456d4fd 100644 --- a/library/std/src/sys/sgx/abi/usercalls/raw.rs +++ b/library/std/src/sys/sgx/abi/usercalls/raw.rs @@ -37,14 +37,23 @@ pub unsafe fn do_usercall( (a, b) } -type Register = u64; +/// A value passed or returned in a CPU register. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub type Register = u64; -trait RegisterArgument { +/// Translate a type from/to Register to be used as an argument. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub trait RegisterArgument { + /// Translate a Register to Self. fn from_register(_: Register) -> Self; + /// Translate self to a Register. fn into_register(self) -> Register; } -trait ReturnValue { +/// Translate a pair of Registers to the raw usercall return value. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub trait ReturnValue { + /// Translate a pair of Registers to the raw usercall return value. fn from_registers(call: &'static str, regs: (Register, Register)) -> Self; } @@ -68,6 +77,7 @@ macro_rules! define_usercalls { macro_rules! define_ra { (< $i:ident > $t:ty) => { + #[unstable(feature = "sgx_platform", issue = "56975")] impl<$i> RegisterArgument for $t { fn from_register(a: Register) -> Self { a as _ @@ -78,6 +88,7 @@ macro_rules! define_ra { } }; ($i:ty as $t:ty) => { + #[unstable(feature = "sgx_platform", issue = "56975")] impl RegisterArgument for $t { fn from_register(a: Register) -> Self { a as $i as _ @@ -88,6 +99,7 @@ macro_rules! define_ra { } }; ($t:ty) => { + #[unstable(feature = "sgx_platform", issue = "56975")] impl RegisterArgument for $t { fn from_register(a: Register) -> Self { a as _ @@ -112,6 +124,7 @@ define_ra!(usize as isize); define_ra!(<T> *const T); define_ra!(<T> *mut T); +#[unstable(feature = "sgx_platform", issue = "56975")] impl RegisterArgument for bool { fn from_register(a: Register) -> bool { if a != 0 { true } else { false } @@ -121,6 +134,7 @@ impl RegisterArgument for bool { } } +#[unstable(feature = "sgx_platform", issue = "56975")] impl<T: RegisterArgument> RegisterArgument for Option<NonNull<T>> { fn from_register(a: Register) -> Option<NonNull<T>> { NonNull::new(a as _) @@ -130,12 +144,14 @@ impl<T: RegisterArgument> RegisterArgument for Option<NonNull<T>> { } } +#[unstable(feature = "sgx_platform", issue = "56975")] impl ReturnValue for ! { fn from_registers(call: &'static str, _regs: (Register, Register)) -> Self { rtabort!("Usercall {call}: did not expect to be re-entered"); } } +#[unstable(feature = "sgx_platform", issue = "56975")] impl ReturnValue for () { fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self { rtassert!(usercall_retval.0 == 0); @@ -144,6 +160,7 @@ impl ReturnValue for () { } } +#[unstable(feature = "sgx_platform", issue = "56975")] impl<T: RegisterArgument> ReturnValue for T { fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self { rtassert!(usercall_retval.1 == 0); @@ -151,6 +168,7 @@ impl<T: RegisterArgument> ReturnValue for T { } } +#[unstable(feature = "sgx_platform", issue = "56975")] impl<T: RegisterArgument, U: RegisterArgument> ReturnValue for (T, U) { fn from_registers(_call: &'static str, regs: (Register, Register)) -> Self { (T::from_register(regs.0), U::from_register(regs.1)) diff --git a/library/std/src/sys/sgx/abi/usercalls/tests.rs b/library/std/src/sys/sgx/abi/usercalls/tests.rs index cbf7d7d54f7..58b8eb215d7 100644 --- a/library/std/src/sys/sgx/abi/usercalls/tests.rs +++ b/library/std/src/sys/sgx/abi/usercalls/tests.rs @@ -1,8 +1,8 @@ -use super::alloc::copy_to_userspace; use super::alloc::User; +use super::alloc::{copy_from_userspace, copy_to_userspace}; #[test] -fn test_copy_function() { +fn test_copy_to_userspace_function() { let mut src = [0u8; 100]; let mut dst = User::<[u8]>::uninitialized(100); @@ -17,12 +17,38 @@ fn test_copy_function() { dst.copy_from_enclave(&[0u8; 100]); // Copy src[0..size] to dst + offset - unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().offset(offset), size) }; + unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().add(offset), size) }; // Verify copy for byte in 0..size { unsafe { - assert_eq!(*dst.as_ptr().offset(offset + byte as isize), src[byte as usize]); + assert_eq!(*dst.as_ptr().add(offset + byte), src[byte as usize]); + } + } + } + } +} + +#[test] +fn test_copy_from_userspace_function() { + let mut dst = [0u8; 100]; + let mut src = User::<[u8]>::uninitialized(100); + + src.copy_from_enclave(&[0u8; 100]); + + for size in 0..48 { + // For all possible alignment + for offset in 0..8 { + // overwrite complete dst + dst = [0u8; 100]; + + // Copy src[0..size] to dst + offset + unsafe { copy_from_userspace(src.as_ptr().offset(offset), dst.as_mut_ptr(), size) }; + + // Verify copy + for byte in 0..size { + unsafe { + assert_eq!(dst[byte as usize], *src.as_ptr().offset(offset + byte as isize)); } } } diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs index b5cc8038ca4..f38d2fd3d70 100644 --- a/library/std/src/sys/unix/fs.rs +++ b/library/std/src/sys/unix/fs.rs @@ -544,11 +544,11 @@ impl Default for FileTimes { fn default() -> Self { // Redox doesn't appear to support `UTIME_OMIT`, so we stub it out here, and always return // an error in `set_times`. - // ESP-IDF does not support `futimens` at all and the behavior for that OS is therefore + // ESP-IDF and HorizonOS do not support `futimens` at all and the behavior for those OS is therefore // the same as for Redox. - #[cfg(any(target_os = "redox", target_os = "espidf"))] + #[cfg(any(target_os = "redox", target_os = "espidf", target_os = "horizon"))] let omit = libc::timespec { tv_sec: 0, tv_nsec: 0 }; - #[cfg(not(any(target_os = "redox", target_os = "espidf")))] + #[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon")))] let omit = libc::timespec { tv_sec: 0, tv_nsec: libc::UTIME_OMIT as _ }; Self([omit; 2]) } @@ -687,7 +687,11 @@ impl Iterator for ReadDir { impl Drop for Dir { fn drop(&mut self) { let r = unsafe { libc::closedir(self.0) }; - debug_assert_eq!(r, 0); + assert!( + r == 0 || crate::io::Error::last_os_error().kind() == crate::io::ErrorKind::Interrupted, + "unexpected error during closedir: {:?}", + crate::io::Error::last_os_error() + ); } } @@ -825,6 +829,7 @@ impl DirEntry { target_os = "fuchsia", target_os = "redox" )))] + #[cfg_attr(miri, allow(unused))] fn name_cstr(&self) -> &CStr { unsafe { CStr::from_ptr(self.entry.d_name.as_ptr()) } } @@ -836,6 +841,7 @@ impl DirEntry { target_os = "fuchsia", target_os = "redox" ))] + #[cfg_attr(miri, allow(unused))] fn name_cstr(&self) -> &CStr { &self.name } @@ -1079,9 +1085,9 @@ impl File { pub fn set_times(&self, times: FileTimes) -> io::Result<()> { cfg_if::cfg_if! { - if #[cfg(any(target_os = "redox", target_os = "espidf"))] { + if #[cfg(any(target_os = "redox", target_os = "espidf", target_os = "horizon"))] { // Redox doesn't appear to support `UTIME_OMIT`. - // ESP-IDF does not support `futimens` at all and the behavior for that OS is therefore + // ESP-IDF and HorizonOS do not support `futimens` at all and the behavior for those OS is therefore // the same as for Redox. drop(times); Err(io::const_io_error!( diff --git a/library/std/src/sys/unix/locks/pthread_condvar.rs b/library/std/src/sys/unix/locks/pthread_condvar.rs index abf27e7db78..4741c0c6736 100644 --- a/library/std/src/sys/unix/locks/pthread_condvar.rs +++ b/library/std/src/sys/unix/locks/pthread_condvar.rs @@ -172,7 +172,7 @@ impl Condvar { let mut sys_now = libc::timeval { tv_sec: 0, tv_usec: 0 }; let stable_now = Instant::now(); let r = libc::gettimeofday(&mut sys_now, ptr::null_mut()); - debug_assert_eq!(r, 0); + assert_eq!(r, 0, "unexpected error: {:?}", crate::io::Error::last_os_error()); let nsec = dur.subsec_nanos() as libc::c_long + (sys_now.tv_usec * 1000) as libc::c_long; let extra = (nsec / 1_000_000_000) as libc::time_t; diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs index 3d0d91460f7..3a375093099 100644 --- a/library/std/src/sys/unix/mod.rs +++ b/library/std/src/sys/unix/mod.rs @@ -295,8 +295,10 @@ pub fn abort_internal() -> ! { cfg_if::cfg_if! { if #[cfg(target_os = "android")] { - #[link(name = "dl")] - #[link(name = "log")] + #[link(name = "dl", kind = "static", modifiers = "-bundle", + cfg(target_feature = "crt-static"))] + #[link(name = "dl", cfg(not(target_feature = "crt-static")))] + #[link(name = "log", cfg(not(target_feature = "crt-static")))] extern "C" {} } else if #[cfg(target_os = "freebsd")] { #[link(name = "execinfo")] diff --git a/library/std/src/sys/unix/os_str.rs b/library/std/src/sys/unix/os_str.rs index ccbc182240c..017e2af29d4 100644 --- a/library/std/src/sys/unix/os_str.rs +++ b/library/std/src/sys/unix/os_str.rs @@ -11,7 +11,7 @@ use crate::str; use crate::sync::Arc; use crate::sys_common::{AsInner, IntoInner}; -use core::str::lossy::{Utf8Lossy, Utf8LossyChunk}; +use core::str::Utf8Chunks; #[cfg(test)] #[path = "../unix/os_str/tests.rs"] @@ -29,26 +29,32 @@ pub struct Slice { } impl fmt::Debug for Slice { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - // Writes out a valid unicode string with the correct escape sequences - - formatter.write_str("\"")?; - for Utf8LossyChunk { valid, broken } in Utf8Lossy::from_bytes(&self.inner).chunks() { - for c in valid.chars().flat_map(|c| c.escape_debug()) { - formatter.write_char(c)? - } - - for b in broken { - write!(formatter, "\\x{:02X}", b)?; - } - } - formatter.write_str("\"") + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&Utf8Chunks::new(&self.inner).debug(), f) } } impl fmt::Display for Slice { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&Utf8Lossy::from_bytes(&self.inner), formatter) + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // If we're the empty string then our iterator won't actually yield + // anything, so perform the formatting manually + if self.inner.is_empty() { + return "".fmt(f); + } + + for chunk in Utf8Chunks::new(&self.inner) { + let valid = chunk.valid(); + // If we successfully decoded the whole chunk as a valid string then + // we can return a direct formatting of the string which will also + // respect various formatting flags if possible. + if chunk.invalid().is_empty() { + return valid.fmt(f); + } + + f.write_str(valid)?; + f.write_char(char::REPLACEMENT_CHARACTER)?; + } + Ok(()) } } diff --git a/library/std/src/sys/unix/os_str/tests.rs b/library/std/src/sys/unix/os_str/tests.rs index 213277f01f2..22ba0c92350 100644 --- a/library/std/src/sys/unix/os_str/tests.rs +++ b/library/std/src/sys/unix/os_str/tests.rs @@ -8,3 +8,11 @@ fn slice_debug_output() { assert_eq!(output, expected); } + +#[test] +fn display() { + assert_eq!( + "Hello\u{FFFD}\u{FFFD} There\u{FFFD} Goodbye", + Slice::from_u8_slice(b"Hello\xC0\x80 There\xE6\x83 Goodbye").to_string(), + ); +} diff --git a/library/std/src/sys/unix/rand.rs b/library/std/src/sys/unix/rand.rs index bf49204881d..a6fe07873d7 100644 --- a/library/std/src/sys/unix/rand.rs +++ b/library/std/src/sys/unix/rand.rs @@ -1,13 +1,13 @@ -use crate::mem; -use crate::slice; - pub fn hashmap_random_keys() -> (u64, u64) { - let mut v = (0, 0); - unsafe { - let view = slice::from_raw_parts_mut(&mut v as *mut _ as *mut u8, mem::size_of_val(&v)); - imp::fill_bytes(view); - } - v + const KEY_LEN: usize = core::mem::size_of::<u64>(); + + let mut v = [0u8; KEY_LEN * 2]; + imp::fill_bytes(&mut v); + + let key1 = v[0..KEY_LEN].try_into().unwrap(); + let key2 = v[KEY_LEN..].try_into().unwrap(); + + (u64::from_ne_bytes(key1), u64::from_ne_bytes(key2)) } #[cfg(all( diff --git a/library/std/src/sys/unix/thread.rs b/library/std/src/sys/unix/thread.rs index 36a3fa6023b..7db3065dee0 100644 --- a/library/std/src/sys/unix/thread.rs +++ b/library/std/src/sys/unix/thread.rs @@ -116,11 +116,9 @@ impl Thread { debug_assert_eq!(ret, 0); } - #[cfg(any(target_os = "linux", target_os = "android"))] + #[cfg(target_os = "android")] pub fn set_name(name: &CStr) { const PR_SET_NAME: libc::c_int = 15; - // pthread wrapper only appeared in glibc 2.12, so we use syscall - // directly. unsafe { libc::prctl( PR_SET_NAME, @@ -132,6 +130,14 @@ impl Thread { } } + #[cfg(target_os = "linux")] + pub fn set_name(name: &CStr) { + unsafe { + // Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20. + libc::pthread_setname_np(libc::pthread_self(), name.as_ptr()); + } + } + #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "openbsd"))] pub fn set_name(name: &CStr) { unsafe { diff --git a/library/std/src/sys/unsupported/alloc.rs b/library/std/src/sys/unsupported/alloc.rs index 8d5d0a2f5cc..d715ae45401 100644 --- a/library/std/src/sys/unsupported/alloc.rs +++ b/library/std/src/sys/unsupported/alloc.rs @@ -1,15 +1,16 @@ use crate::alloc::{GlobalAlloc, Layout, System}; +use crate::ptr::null_mut; #[stable(feature = "alloc_system_type", since = "1.28.0")] unsafe impl GlobalAlloc for System { #[inline] unsafe fn alloc(&self, _layout: Layout) -> *mut u8 { - 0 as *mut u8 + null_mut() } #[inline] unsafe fn alloc_zeroed(&self, _layout: Layout) -> *mut u8 { - 0 as *mut u8 + null_mut() } #[inline] @@ -17,6 +18,6 @@ unsafe impl GlobalAlloc for System { #[inline] unsafe fn realloc(&self, _ptr: *mut u8, _layout: Layout, _new_size: usize) -> *mut u8 { - 0 as *mut u8 + null_mut() } } diff --git a/library/std/src/sys/unsupported/process.rs b/library/std/src/sys/unsupported/process.rs index 42a1ff730e3..633f17c054b 100644 --- a/library/std/src/sys/unsupported/process.rs +++ b/library/std/src/sys/unsupported/process.rs @@ -200,6 +200,9 @@ impl<'a> Iterator for CommandArgs<'a> { fn next(&mut self) -> Option<&'a OsStr> { None } + fn size_hint(&self) -> (usize, Option<usize>) { + (0, Some(0)) + } } impl<'a> ExactSizeIterator for CommandArgs<'a> {} diff --git a/library/std/src/sys/windows/alloc.rs b/library/std/src/sys/windows/alloc.rs index fdc81cdea7d..fe00c08aa6a 100644 --- a/library/std/src/sys/windows/alloc.rs +++ b/library/std/src/sys/windows/alloc.rs @@ -168,7 +168,7 @@ unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 { // SAFETY: Because the size and alignment of a header is <= `MIN_ALIGN` and `aligned` // is aligned to at least `MIN_ALIGN` and has at least `MIN_ALIGN` bytes of padding before // it, it is safe to write a header directly before it. - unsafe { ptr::write((aligned as *mut Header).offset(-1), Header(ptr)) }; + unsafe { ptr::write((aligned as *mut Header).sub(1), Header(ptr)) }; // SAFETY: The returned pointer does not point to the to the start of an allocated block, // but there is a header readable directly before it containing the location of the start @@ -213,7 +213,7 @@ unsafe impl GlobalAlloc for System { // SAFETY: Because of the contract of `System`, `ptr` is guaranteed to be non-null // and have a header readable directly before it. - unsafe { ptr::read((ptr as *mut Header).offset(-1)).0 } + unsafe { ptr::read((ptr as *mut Header).sub(1)).0 } } }; diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs index c5a30f8bac8..ef3f6a9ba17 100644 --- a/library/std/src/sys/windows/c.rs +++ b/library/std/src/sys/windows/c.rs @@ -228,6 +228,8 @@ pub const IPV6_ADD_MEMBERSHIP: c_int = 12; pub const IPV6_DROP_MEMBERSHIP: c_int = 13; pub const MSG_PEEK: c_int = 0x2; +pub const LOAD_LIBRARY_SEARCH_SYSTEM32: u32 = 0x800; + #[repr(C)] #[derive(Copy, Clone)] pub struct linger { @@ -1030,6 +1032,7 @@ extern "system" { pub fn GetProcAddress(handle: HMODULE, name: LPCSTR) -> *mut c_void; pub fn GetModuleHandleA(lpModuleName: LPCSTR) -> HMODULE; pub fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE; + pub fn LoadLibraryExA(lplibfilename: *const i8, hfile: HANDLE, dwflags: u32) -> HINSTANCE; pub fn GetSystemTimeAsFileTime(lpSystemTimeAsFileTime: LPFILETIME); pub fn GetSystemInfo(lpSystemInfo: LPSYSTEM_INFO); @@ -1250,21 +1253,16 @@ compat_fn_with_fallback! { } } -compat_fn_with_fallback! { - pub static SYNCH_API: &CStr = ansi_str!("api-ms-win-core-synch-l1-2-0"); - #[allow(unused)] - fn WakeByAddressSingle(Address: LPVOID) -> () { - // This fallback is currently tightly coupled to its use in Parker::unpark. - // - // FIXME: If `WakeByAddressSingle` needs to be used anywhere other than - // Parker::unpark then this fallback will be wrong and will need to be decoupled. - crate::sys::windows::thread_parker::unpark_keyed_event(Address) - } +compat_fn_optional! { + crate::sys::compat::load_synch_functions(); + pub fn WaitOnAddress( + Address: LPVOID, + CompareAddress: LPVOID, + AddressSize: SIZE_T, + dwMilliseconds: DWORD + ); + pub fn WakeByAddressSingle(Address: LPVOID); } -pub use crate::sys::compat::WaitOnAddress; -// Change exported name of `WakeByAddressSingle` to make the strange fallback -// behaviour clear. -pub use WakeByAddressSingle::call as wake_by_address_single_or_unpark_keyed_event; compat_fn_with_fallback! { pub static NTDLL: &CStr = ansi_str!("ntdll"); diff --git a/library/std/src/sys/windows/compat.rs b/library/std/src/sys/windows/compat.rs index 473544c4d4f..9c8ddc3aa1d 100644 --- a/library/std/src/sys/windows/compat.rs +++ b/library/std/src/sys/windows/compat.rs @@ -21,6 +21,7 @@ use crate::ffi::{c_void, CStr}; use crate::ptr::NonNull; +use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sys::c; /// Helper macro for creating CStrs from literals and symbol names. @@ -74,6 +75,20 @@ impl Module { NonNull::new(module).map(Self) } + /// Load the library (if not already loaded) + /// + /// # Safety + /// + /// The module must not be unloaded. + pub unsafe fn load_system_library(name: &CStr) -> Option<Self> { + let module = c::LoadLibraryExA( + name.as_ptr(), + crate::ptr::null_mut(), + c::LOAD_LIBRARY_SEARCH_SYSTEM32, + ); + NonNull::new(module).map(Self) + } + // Try to get the address of a function. pub fn proc_address(self, name: &CStr) -> Option<NonNull<c_void>> { // SAFETY: @@ -144,61 +159,63 @@ macro_rules! compat_fn_with_fallback { )*) } -/// Optionally load `WaitOnAddress`. -/// Unlike the dynamic loading described above, this does not have a fallback. +/// Optionally loaded functions. /// -/// This is rexported from sys::c. You should prefer to import -/// from there in case this changes again in the future. -pub mod WaitOnAddress { - use super::*; - use crate::mem; - use crate::ptr; - use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; - use crate::sys::c; - - static MODULE_NAME: &CStr = ansi_str!("api-ms-win-core-synch-l1-2-0"); - static SYMBOL_NAME: &CStr = ansi_str!("WaitOnAddress"); - - // WaitOnAddress function signature. - type F = unsafe extern "system" fn( - Address: c::LPVOID, - CompareAddress: c::LPVOID, - AddressSize: c::SIZE_T, - dwMilliseconds: c::DWORD, - ); - - // A place to store the loaded function atomically. - static WAIT_ON_ADDRESS: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut()); - - // We can skip trying to load again if we already tried. - static LOAD_MODULE: AtomicBool = AtomicBool::new(true); - - #[inline(always)] - pub fn option() -> Option<F> { - let f = WAIT_ON_ADDRESS.load(Ordering::Acquire); - if !f.is_null() { Some(unsafe { mem::transmute(f) }) } else { try_load() } - } +/// Actual loading of the function defers to $load_functions. +macro_rules! compat_fn_optional { + ($load_functions:expr; + $( + $(#[$meta:meta])* + $vis:vis fn $symbol:ident($($argname:ident: $argtype:ty),*) $(-> $rettype:ty)?; + )+) => ( + $( + pub mod $symbol { + use super::*; + use crate::ffi::c_void; + use crate::mem; + use crate::ptr::{self, NonNull}; + use crate::sync::atomic::{AtomicPtr, Ordering}; + + pub(in crate::sys) static PTR: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut()); + + type F = unsafe extern "system" fn($($argtype),*) $(-> $rettype)?; + + #[inline(always)] + pub fn option() -> Option<F> { + let f = PTR.load(Ordering::Acquire); + if !f.is_null() { Some(unsafe { mem::transmute(f) }) } else { try_load() } + } - #[cold] - fn try_load() -> Option<F> { - if LOAD_MODULE.load(Ordering::Acquire) { - // load the module - let mut wait_on_address = None; - if let Some(func) = try_load_inner() { - WAIT_ON_ADDRESS.store(func.as_ptr(), Ordering::Release); - wait_on_address = Some(unsafe { mem::transmute(func) }); + #[cold] + fn try_load() -> Option<F> { + $load_functions; + NonNull::new(PTR.load(Ordering::Acquire)).map(|f| unsafe { mem::transmute(f) }) + } } - // Don't try to load the module again even if loading failed. - LOAD_MODULE.store(false, Ordering::Release); - wait_on_address - } else { - None - } - } + )+ + ) +} - // In the future this could be a `try` block but until then I think it's a - // little bit cleaner as a separate function. - fn try_load_inner() -> Option<NonNull<c_void>> { - unsafe { Module::new(MODULE_NAME)?.proc_address(SYMBOL_NAME) } +/// Load all needed functions from "api-ms-win-core-synch-l1-2-0". +pub(super) fn load_synch_functions() { + fn try_load() -> Option<()> { + const MODULE_NAME: &CStr = ansi_str!("api-ms-win-core-synch-l1-2-0"); + const WAIT_ON_ADDRESS: &CStr = ansi_str!("WaitOnAddress"); + const WAKE_BY_ADDRESS_SINGLE: &CStr = ansi_str!("WakeByAddressSingle"); + + // Try loading the library and all the required functions. + // If any step fails, then they all fail. + let library = unsafe { Module::load_system_library(MODULE_NAME) }?; + let wait_on_address = library.proc_address(WAIT_ON_ADDRESS)?; + let wake_by_address_single = library.proc_address(WAKE_BY_ADDRESS_SINGLE)?; + + c::WaitOnAddress::PTR.store(wait_on_address.as_ptr(), Ordering::Release); + c::WakeByAddressSingle::PTR.store(wake_by_address_single.as_ptr(), Ordering::Release); + Some(()) } + + // Try to load the module but skip loading if a previous attempt failed. + static LOAD_MODULE: AtomicBool = AtomicBool::new(true); + let module_loaded = LOAD_MODULE.load(Ordering::Acquire) && try_load().is_some(); + LOAD_MODULE.store(module_loaded, Ordering::Release) } diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs index aed082b3e0a..1361b9c90c0 100644 --- a/library/std/src/sys/windows/fs.rs +++ b/library/std/src/sys/windows/fs.rs @@ -512,7 +512,7 @@ impl File { )); } }; - let subst_ptr = path_buffer.offset(subst_off as isize); + let subst_ptr = path_buffer.add(subst_off.into()); let mut subst = slice::from_raw_parts(subst_ptr, subst_len as usize); // Absolute paths start with an NT internal namespace prefix `\??\` // We should not let it leak through. @@ -1345,10 +1345,10 @@ fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> { let v = br"\??\"; let v = v.iter().map(|x| *x as u16); for c in v.chain(original.as_os_str().encode_wide()) { - *buf.offset(i) = c; + *buf.add(i) = c; i += 1; } - *buf.offset(i) = 0; + *buf.add(i) = 0; i += 1; (*db).ReparseTag = c::IO_REPARSE_TAG_MOUNT_POINT; (*db).ReparseTargetMaximumLength = (i * 2) as c::WORD; diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs index b3f6d2d0aae..a9846a48488 100644 --- a/library/std/src/sys/windows/mod.rs +++ b/library/std/src/sys/windows/mod.rs @@ -2,6 +2,7 @@ use crate::ffi::{CStr, OsStr, OsString}; use crate::io::ErrorKind; +use crate::mem::MaybeUninit; use crate::os::windows::ffi::{OsStrExt, OsStringExt}; use crate::path::PathBuf; use crate::time::Duration; @@ -204,8 +205,8 @@ where // This initial size also works around `GetFullPathNameW` returning // incorrect size hints for some short paths: // https://github.com/dylni/normpath/issues/5 - let mut stack_buf = [0u16; 512]; - let mut heap_buf = Vec::new(); + let mut stack_buf: [MaybeUninit<u16>; 512] = MaybeUninit::uninit_array(); + let mut heap_buf: Vec<MaybeUninit<u16>> = Vec::new(); unsafe { let mut n = stack_buf.len(); loop { @@ -214,6 +215,11 @@ where } else { let extra = n - heap_buf.len(); heap_buf.reserve(extra); + // We used `reserve` and not `reserve_exact`, so in theory we + // may have gotten more than requested. If so, we'd like to use + // it... so long as we won't cause overflow. + n = heap_buf.capacity().min(c::DWORD::MAX as usize); + // Safety: MaybeUninit<u16> does not need initialization heap_buf.set_len(n); &mut heap_buf[..] }; @@ -228,13 +234,13 @@ where // error" is still 0 then we interpret it as a 0 length buffer and // not an actual error. c::SetLastError(0); - let k = match f1(buf.as_mut_ptr(), n as c::DWORD) { + let k = match f1(buf.as_mut_ptr().cast::<u16>(), n as c::DWORD) { 0 if c::GetLastError() == 0 => 0, 0 => return Err(crate::io::Error::last_os_error()), n => n, } as usize; if k == n && c::GetLastError() == c::ERROR_INSUFFICIENT_BUFFER { - n *= 2; + n = n.saturating_mul(2).min(c::DWORD::MAX as usize); } else if k > n { n = k; } else if k == n { @@ -244,7 +250,9 @@ where // Therefore k never equals n. unreachable!(); } else { - return Ok(f2(&buf[..k])); + // Safety: First `k` values are initialized. + let slice: &[u16] = MaybeUninit::slice_assume_init_ref(&buf[..k]); + return Ok(f2(slice)); } } } diff --git a/library/std/src/sys/windows/os.rs b/library/std/src/sys/windows/os.rs index bcac996c024..352337ba322 100644 --- a/library/std/src/sys/windows/os.rs +++ b/library/std/src/sys/windows/os.rs @@ -99,11 +99,11 @@ impl Iterator for Env { } let p = self.cur as *const u16; let mut len = 0; - while *p.offset(len) != 0 { + while *p.add(len) != 0 { len += 1; } - let s = slice::from_raw_parts(p, len as usize); - self.cur = self.cur.offset(len + 1); + let s = slice::from_raw_parts(p, len); + self.cur = self.cur.add(len + 1); // Windows allows environment variables to start with an equals // symbol (in any other position, this is the separator between diff --git a/library/std/src/sys/windows/thread_local_dtor.rs b/library/std/src/sys/windows/thread_local_dtor.rs index 25d1c6e8e87..9707a95dff2 100644 --- a/library/std/src/sys/windows/thread_local_dtor.rs +++ b/library/std/src/sys/windows/thread_local_dtor.rs @@ -8,10 +8,14 @@ #[thread_local] static mut DESTRUCTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new(); +// Ensure this can never be inlined because otherwise this may break in dylibs. +// See #44391. +#[inline(never)] pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { DESTRUCTORS.push((t, dtor)); } +#[inline(never)] // See comment above /// Runs destructors. This should not be called until thread exit. pub unsafe fn run_keyless_dtors() { // Drop all the destructors. diff --git a/library/std/src/sys/windows/thread_parker.rs b/library/std/src/sys/windows/thread_parker.rs index 16863c9903a..2f7ae863b6a 100644 --- a/library/std/src/sys/windows/thread_parker.rs +++ b/library/std/src/sys/windows/thread_parker.rs @@ -198,8 +198,18 @@ impl Parker { // with park(). if self.state.swap(NOTIFIED, Release) == PARKED { unsafe { - // This calls either WakeByAddressSingle or unpark_keyed_event (see below). - c::wake_by_address_single_or_unpark_keyed_event(self.ptr()); + if let Some(wake_by_address_single) = c::WakeByAddressSingle::option() { + wake_by_address_single(self.ptr()); + } else { + // If we run NtReleaseKeyedEvent before the waiting thread runs + // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up. + // If the waiting thread wakes up before we run NtReleaseKeyedEvent + // (e.g. due to a timeout), this blocks until we do wake up a thread. + // To prevent this thread from blocking indefinitely in that case, + // park_impl() will, after seeing the state set to NOTIFIED after + // waking up, call NtWaitForKeyedEvent again to unblock us. + c::NtReleaseKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut()); + } } } } @@ -209,19 +219,6 @@ impl Parker { } } -// This function signature makes it compatible with c::WakeByAddressSingle -// so that it can be used as a fallback for that function. -pub unsafe extern "C" fn unpark_keyed_event(address: c::LPVOID) { - // If we run NtReleaseKeyedEvent before the waiting thread runs - // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up. - // If the waiting thread wakes up before we run NtReleaseKeyedEvent - // (e.g. due to a timeout), this blocks until we do wake up a thread. - // To prevent this thread from blocking indefinitely in that case, - // park_impl() will, after seeing the state set to NOTIFIED after - // waking up, call NtWaitForKeyedEvent again to unblock us. - c::NtReleaseKeyedEvent(keyed_event_handle(), address, 0, ptr::null_mut()); -} - fn keyed_event_handle() -> c::HANDLE { const INVALID: c::HANDLE = ptr::invalid_mut(!0); static HANDLE: AtomicPtr<libc::c_void> = AtomicPtr::new(INVALID); diff --git a/library/std/src/sys_common/thread_local_key/tests.rs b/library/std/src/sys_common/thread_local_key/tests.rs index 968738a4180..6f32b858f09 100644 --- a/library/std/src/sys_common/thread_local_key/tests.rs +++ b/library/std/src/sys_common/thread_local_key/tests.rs @@ -1,4 +1,5 @@ use super::{Key, StaticKey}; +use core::ptr; fn assert_sync<T: Sync>() {} fn assert_send<T: Send>() {} @@ -12,8 +13,8 @@ fn smoke() { let k2 = Key::new(None); assert!(k1.get().is_null()); assert!(k2.get().is_null()); - k1.set(1 as *mut _); - k2.set(2 as *mut _); + k1.set(ptr::invalid_mut(1)); + k2.set(ptr::invalid_mut(2)); assert_eq!(k1.get() as usize, 1); assert_eq!(k2.get() as usize, 2); } @@ -26,8 +27,8 @@ fn statik() { unsafe { assert!(K1.get().is_null()); assert!(K2.get().is_null()); - K1.set(1 as *mut _); - K2.set(2 as *mut _); + K1.set(ptr::invalid_mut(1)); + K2.set(ptr::invalid_mut(2)); assert_eq!(K1.get() as usize, 1); assert_eq!(K2.get() as usize, 2); } diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs index 44c8a50fd86..479669647c1 100644 --- a/library/std/src/thread/mod.rs +++ b/library/std/src/thread/mod.rs @@ -170,7 +170,6 @@ use crate::ptr::addr_of_mut; use crate::str; use crate::sync::Arc; use crate::sys::thread as imp; -use crate::sys_common::mutex; use crate::sys_common::thread; use crate::sys_common::thread_info; use crate::sys_common::thread_parker::Parker; @@ -1033,24 +1032,48 @@ pub struct ThreadId(NonZeroU64); impl ThreadId { // Generate a new unique thread ID. fn new() -> ThreadId { - // It is UB to attempt to acquire this mutex reentrantly! - static GUARD: mutex::StaticMutex = mutex::StaticMutex::new(); - static mut COUNTER: u64 = 1; - - unsafe { - let guard = GUARD.lock(); - - // If we somehow use up all our bits, panic so that we're not - // covering up subtle bugs of IDs being reused. - if COUNTER == u64::MAX { - drop(guard); // in case the panic handler ends up calling `ThreadId::new()`, avoid reentrant lock acquire. - panic!("failed to generate unique thread ID: bitspace exhausted"); - } - - let id = COUNTER; - COUNTER += 1; + #[cold] + fn exhausted() -> ! { + panic!("failed to generate unique thread ID: bitspace exhausted") + } - ThreadId(NonZeroU64::new(id).unwrap()) + cfg_if::cfg_if! { + if #[cfg(target_has_atomic = "64")] { + use crate::sync::atomic::{AtomicU64, Ordering::Relaxed}; + + static COUNTER: AtomicU64 = AtomicU64::new(0); + + let mut last = COUNTER.load(Relaxed); + loop { + let Some(id) = last.checked_add(1) else { + exhausted(); + }; + + match COUNTER.compare_exchange_weak(last, id, Relaxed, Relaxed) { + Ok(_) => return ThreadId(NonZeroU64::new(id).unwrap()), + Err(id) => last = id, + } + } + } else { + use crate::sys_common::mutex::StaticMutex; + + // It is UB to attempt to acquire this mutex reentrantly! + static GUARD: StaticMutex = StaticMutex::new(); + static mut COUNTER: u64 = 0; + + unsafe { + let guard = GUARD.lock(); + + let Some(id) = COUNTER.checked_add(1) else { + drop(guard); // in case the panic handler ends up calling `ThreadId::new()`, avoid reentrant lock acquire. + exhausted(); + }; + + COUNTER = id; + drop(guard); + ThreadId(NonZeroU64::new(id).unwrap()) + } + } } } diff --git a/library/std/src/thread/tests.rs b/library/std/src/thread/tests.rs index ec68b529188..130e47c8d44 100644 --- a/library/std/src/thread/tests.rs +++ b/library/std/src/thread/tests.rs @@ -329,3 +329,22 @@ fn test_scoped_threads_nll() { let x = 42_u8; foo(&x); } + +// Regression test for https://github.com/rust-lang/rust/issues/98498. +#[test] +#[cfg(miri)] // relies on Miri's data race detector +fn scope_join_race() { + for _ in 0..100 { + let a_bool = AtomicBool::new(false); + + thread::scope(|s| { + for _ in 0..5 { + s.spawn(|| a_bool.load(Ordering::Relaxed)); + } + + for _ in 0..5 { + s.spawn(|| a_bool.load(Ordering::Relaxed)); + } + }); + } +} diff --git a/library/std/src/time/tests.rs b/library/std/src/time/tests.rs index d710a574465..6229556c85f 100644 --- a/library/std/src/time/tests.rs +++ b/library/std/src/time/tests.rs @@ -31,7 +31,8 @@ fn instant_monotonic_concurrent() -> crate::thread::Result<()> { .map(|_| { crate::thread::spawn(|| { let mut old = Instant::now(); - for _ in 0..5_000_000 { + let count = if cfg!(miri) { 1_000 } else { 5_000_000 }; + for _ in 0..count { let new = Instant::now(); assert!(new >= old); old = new; |
