diff options
Diffstat (limited to 'library')
51 files changed, 1259 insertions, 1394 deletions
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index c5f0cb8016e..45109270944 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -146,28 +146,7 @@ impl<T: PointeeSized> *const T { self as _ } - /// Gets the "address" portion of the pointer. - /// - /// This is similar to `self as usize`, except that the [provenance][crate::ptr#provenance] of - /// the pointer is discarded and not [exposed][crate::ptr#exposed-provenance]. This means that - /// casting the returned address back to a pointer yields a [pointer without - /// provenance][without_provenance], which is undefined behavior to dereference. To properly - /// restore the lost information and obtain a dereferenceable pointer, use - /// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr]. - /// - /// If using those APIs is not possible because there is no way to preserve a pointer with the - /// required provenance, then Strict Provenance might not be for you. Use pointer-integer casts - /// or [`expose_provenance`][pointer::expose_provenance] and [`with_exposed_provenance`][with_exposed_provenance] - /// instead. However, note that this makes your code less portable and less amenable to tools - /// that check for compliance with the Rust memory model. - /// - /// On most platforms this will produce a value with the same bytes as the original - /// pointer, because all the bytes are dedicated to describing the address. - /// Platforms which need to store additional information in the pointer may - /// perform a change of representation to produce a value containing only the address - /// portion of the pointer. What that means is up to the platform to define. - /// - /// This is a [Strict Provenance][crate::ptr#strict-provenance] API. + #[doc = include_str!("./docs/addr.md")] #[must_use] #[inline(always)] #[stable(feature = "strict_provenance", since = "1.84.0")] @@ -254,23 +233,16 @@ impl<T: PointeeSized> *const T { (self.cast(), metadata(self)) } - /// Returns `None` if the pointer is null, or else returns a shared reference to - /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`] - /// must be used instead. - /// - /// [`as_uninit_ref`]: #method.as_uninit_ref - /// - /// # Safety - /// - /// When calling this method, you have to ensure that *either* the pointer is null *or* - /// the pointer is [convertible to a reference](crate::ptr#pointer-to-reference-conversion). + #[doc = include_str!("./docs/as_ref.md")] /// - /// # Panics during const evaluation - /// - /// This method will panic during const evaluation if the pointer cannot be - /// determined to be null or not. See [`is_null`] for more information. + /// ``` + /// let ptr: *const u8 = &10u8 as *const u8; /// - /// [`is_null`]: #method.is_null + /// unsafe { + /// let val_back = &*ptr; + /// assert_eq!(val_back, &10); + /// } + /// ``` /// /// # Examples /// @@ -284,20 +256,9 @@ impl<T: PointeeSized> *const T { /// } /// ``` /// - /// # Null-unchecked version /// - /// If you are sure the pointer can never be null and are looking for some kind of - /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can - /// dereference the pointer directly. - /// - /// ``` - /// let ptr: *const u8 = &10u8 as *const u8; - /// - /// unsafe { - /// let val_back = &*ptr; - /// assert_eq!(val_back, &10); - /// } - /// ``` + /// [`is_null`]: #method.is_null + /// [`as_uninit_ref`]: #method.as_uninit_ref #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[rustc_const_stable(feature = "const_ptr_is_null", since = "1.84.0")] #[inline] @@ -338,23 +299,10 @@ impl<T: PointeeSized> *const T { unsafe { &*self } } - /// Returns `None` if the pointer is null, or else returns a shared reference to - /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require - /// that the value has to be initialized. - /// - /// [`as_ref`]: #method.as_ref - /// - /// # Safety - /// - /// When calling this method, you have to ensure that *either* the pointer is null *or* - /// the pointer is [convertible to a reference](crate::ptr#pointer-to-reference-conversion). - /// - /// # Panics during const evaluation - /// - /// This method will panic during const evaluation if the pointer cannot be - /// determined to be null or not. See [`is_null`] for more information. + #[doc = include_str!("./docs/as_uninit_ref.md")] /// /// [`is_null`]: #method.is_null + /// [`as_ref`]: #method.as_ref /// /// # Examples /// diff --git a/library/core/src/ptr/docs/INFO.md b/library/core/src/ptr/docs/INFO.md new file mode 100644 index 00000000000..28a0da4926a --- /dev/null +++ b/library/core/src/ptr/docs/INFO.md @@ -0,0 +1,21 @@ +This directory holds method documentation that otherwise +would be duplicated across mutable and immutable pointers. + +Note that most of the docs here are not the complete docs +for their corresponding method. This is for a few reasons: + +1. Examples need to be different for mutable/immutable + pointers, in order to actually call the correct method. +2. Link reference definitions are frequently different + between mutable/immutable pointers, in order to link to + the correct method. + For example, `<*const T>::as_ref` links to + `<*const T>::is_null`, while `<*mut T>::as_ref` links to + `<*mut T>::is_null`. +3. Many methods on mutable pointers link to an alternate + version that returns a mutable reference instead of + a shared reference. + +Always review the rendered docs manually when making +changes to these files to make sure you're not accidentally +splitting up a section. diff --git a/library/core/src/ptr/docs/addr.md b/library/core/src/ptr/docs/addr.md new file mode 100644 index 00000000000..785b88a9987 --- /dev/null +++ b/library/core/src/ptr/docs/addr.md @@ -0,0 +1,22 @@ +Gets the "address" portion of the pointer. + +This is similar to `self as usize`, except that the [provenance][crate::ptr#provenance] of +the pointer is discarded and not [exposed][crate::ptr#exposed-provenance]. This means that +casting the returned address back to a pointer yields a [pointer without +provenance][without_provenance], which is undefined behavior to dereference. To properly +restore the lost information and obtain a dereferenceable pointer, use +[`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr]. + +If using those APIs is not possible because there is no way to preserve a pointer with the +required provenance, then Strict Provenance might not be for you. Use pointer-integer casts +or [`expose_provenance`][pointer::expose_provenance] and [`with_exposed_provenance`][with_exposed_provenance] +instead. However, note that this makes your code less portable and less amenable to tools +that check for compliance with the Rust memory model. + +On most platforms this will produce a value with the same bytes as the original +pointer, because all the bytes are dedicated to describing the address. +Platforms which need to store additional information in the pointer may +perform a change of representation to produce a value containing only the address +portion of the pointer. What that means is up to the platform to define. + +This is a [Strict Provenance][crate::ptr#strict-provenance] API. diff --git a/library/core/src/ptr/docs/as_ref.md b/library/core/src/ptr/docs/as_ref.md new file mode 100644 index 00000000000..0c0d2768c74 --- /dev/null +++ b/library/core/src/ptr/docs/as_ref.md @@ -0,0 +1,19 @@ +Returns `None` if the pointer is null, or else returns a shared reference to +the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`] +must be used instead. + +# Safety + +When calling this method, you have to ensure that *either* the pointer is null *or* +the pointer is [convertible to a reference](crate::ptr#pointer-to-reference-conversion). + +# Panics during const evaluation + +This method will panic during const evaluation if the pointer cannot be +determined to be null or not. See [`is_null`] for more information. + +# Null-unchecked version + +If you are sure the pointer can never be null and are looking for some kind of +`as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can +dereference the pointer directly. diff --git a/library/core/src/ptr/docs/as_uninit_ref.md b/library/core/src/ptr/docs/as_uninit_ref.md new file mode 100644 index 00000000000..5b9a1ecb85b --- /dev/null +++ b/library/core/src/ptr/docs/as_uninit_ref.md @@ -0,0 +1,15 @@ +Returns `None` if the pointer is null, or else returns a shared reference to +the value wrapped in `Some`. In contrast to [`as_ref`], this does not require +that the value has to be initialized. + +# Safety + +When calling this method, you have to ensure that *either* the pointer is null *or* +the pointer is [convertible to a reference](crate::ptr#pointer-to-reference-conversion). +Note that because the created reference is to `MaybeUninit<T>`, the +source pointer can point to uninitialized memory. + +# Panics during const evaluation + +This method will panic during const evaluation if the pointer cannot be +determined to be null or not. See [`is_null`] for more information. diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index ce6eee4f911..ba78afc7ea1 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -135,28 +135,9 @@ impl<T: PointeeSized> *mut T { self as _ } - /// Gets the "address" portion of the pointer. - /// - /// This is similar to `self as usize`, except that the [provenance][crate::ptr#provenance] of - /// the pointer is discarded and not [exposed][crate::ptr#exposed-provenance]. This means that - /// casting the returned address back to a pointer yields a [pointer without - /// provenance][without_provenance_mut], which is undefined behavior to dereference. To properly - /// restore the lost information and obtain a dereferenceable pointer, use - /// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr]. - /// - /// If using those APIs is not possible because there is no way to preserve a pointer with the - /// required provenance, then Strict Provenance might not be for you. Use pointer-integer casts - /// or [`expose_provenance`][pointer::expose_provenance] and [`with_exposed_provenance`][with_exposed_provenance] - /// instead. However, note that this makes your code less portable and less amenable to tools - /// that check for compliance with the Rust memory model. - /// - /// On most platforms this will produce a value with the same bytes as the original - /// pointer, because all the bytes are dedicated to describing the address. - /// Platforms which need to store additional information in the pointer may - /// perform a change of representation to produce a value containing only the address - /// portion of the pointer. What that means is up to the platform to define. + #[doc = include_str!("./docs/addr.md")] /// - /// This is a [Strict Provenance][crate::ptr#strict-provenance] API. + /// [without_provenance]: without_provenance_mut #[must_use] #[inline(always)] #[stable(feature = "strict_provenance", since = "1.84.0")] @@ -243,26 +224,16 @@ impl<T: PointeeSized> *mut T { (self.cast(), super::metadata(self)) } - /// Returns `None` if the pointer is null, or else returns a shared reference to - /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`] - /// must be used instead. - /// - /// For the mutable counterpart see [`as_mut`]. + #[doc = include_str!("./docs/as_ref.md")] /// - /// [`as_uninit_ref`]: pointer#method.as_uninit_ref-1 - /// [`as_mut`]: #method.as_mut - /// - /// # Safety - /// - /// When calling this method, you have to ensure that *either* the pointer is null *or* - /// the pointer is [convertible to a reference](crate::ptr#pointer-to-reference-conversion). - /// - /// # Panics during const evaluation - /// - /// This method will panic during const evaluation if the pointer cannot be - /// determined to be null or not. See [`is_null`] for more information. + /// ``` + /// let ptr: *mut u8 = &mut 10u8 as *mut u8; /// - /// [`is_null`]: #method.is_null-1 + /// unsafe { + /// let val_back = &*ptr; + /// println!("We got back the value: {val_back}!"); + /// } + /// ``` /// /// # Examples /// @@ -276,20 +247,14 @@ impl<T: PointeeSized> *mut T { /// } /// ``` /// - /// # Null-unchecked version - /// - /// If you are sure the pointer can never be null and are looking for some kind of - /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can - /// dereference the pointer directly. + /// # See Also /// - /// ``` - /// let ptr: *mut u8 = &mut 10u8 as *mut u8; + /// For the mutable counterpart see [`as_mut`]. /// - /// unsafe { - /// let val_back = &*ptr; - /// println!("We got back the value: {val_back}!"); - /// } - /// ``` + /// [`is_null`]: #method.is_null-1 + /// [`as_uninit_ref`]: pointer#method.as_uninit_ref-1 + /// [`as_mut`]: #method.as_mut + #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[rustc_const_stable(feature = "const_ptr_is_null", since = "1.84.0")] #[inline] @@ -332,28 +297,15 @@ impl<T: PointeeSized> *mut T { unsafe { &*self } } - /// Returns `None` if the pointer is null, or else returns a shared reference to - /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require - /// that the value has to be initialized. - /// - /// For the mutable counterpart see [`as_uninit_mut`]. + #[doc = include_str!("./docs/as_uninit_ref.md")] /// + /// [`is_null`]: #method.is_null-1 /// [`as_ref`]: pointer#method.as_ref-1 - /// [`as_uninit_mut`]: #method.as_uninit_mut - /// - /// # Safety - /// - /// When calling this method, you have to ensure that *either* the pointer is null *or* - /// the pointer is [convertible to a reference](crate::ptr#pointer-to-reference-conversion). - /// Note that because the created reference is to `MaybeUninit<T>`, the - /// source pointer can point to uninitialized memory. - /// - /// # Panics during const evaluation /// - /// This method will panic during const evaluation if the pointer cannot be - /// determined to be null or not. See [`is_null`] for more information. + /// # See Also + /// For the mutable counterpart see [`as_uninit_mut`]. /// - /// [`is_null`]: #method.is_null-1 + /// [`as_uninit_mut`]: #method.as_uninit_mut /// /// # Examples /// diff --git a/library/coretests/tests/array.rs b/library/coretests/tests/array.rs index 30ccbbc3203..c4a8fc74fec 100644 --- a/library/coretests/tests/array.rs +++ b/library/coretests/tests/array.rs @@ -717,3 +717,10 @@ fn array_map_drops_unmapped_elements_on_panic() { assert_eq!(counter.load(Ordering::SeqCst), MAX); } } + +// This covers the `PartialEq::<[T]>::eq` impl for `[T; N]` when it returns false. +#[test] +fn array_eq() { + let not_true = [0u8] == [].as_slice(); + assert!(!not_true); +} diff --git a/library/coretests/tests/floats/f128.rs b/library/coretests/tests/floats/f128.rs index 4267fef50f8..d31eba863f5 100644 --- a/library/coretests/tests/floats/f128.rs +++ b/library/coretests/tests/floats/f128.rs @@ -17,12 +17,6 @@ const TOL: f128 = 1e-12; #[allow(unused)] const TOL_PRECISE: f128 = 1e-28; -/// First pattern over the mantissa -const NAN_MASK1: u128 = 0x0000aaaaaaaaaaaaaaaaaaaaaaaaaaaa; - -/// Second pattern over the mantissa -const NAN_MASK2: u128 = 0x00005555555555555555555555555555; - // FIXME(f16_f128,miri): many of these have to be disabled since miri does not yet support // the intrinsics. @@ -55,28 +49,6 @@ fn test_max_recip() { } #[test] -fn test_float_bits_conv() { - assert_eq!((1f128).to_bits(), 0x3fff0000000000000000000000000000); - assert_eq!((12.5f128).to_bits(), 0x40029000000000000000000000000000); - assert_eq!((1337f128).to_bits(), 0x40094e40000000000000000000000000); - assert_eq!((-14.25f128).to_bits(), 0xc002c800000000000000000000000000); - assert_biteq!(f128::from_bits(0x3fff0000000000000000000000000000), 1.0); - assert_biteq!(f128::from_bits(0x40029000000000000000000000000000), 12.5); - assert_biteq!(f128::from_bits(0x40094e40000000000000000000000000), 1337.0); - assert_biteq!(f128::from_bits(0xc002c800000000000000000000000000), -14.25); - - // Check that NaNs roundtrip their bits regardless of signaling-ness - // 0xA is 0b1010; 0x5 is 0b0101 -- so these two together clobbers all the mantissa bits - let masked_nan1 = f128::NAN.to_bits() ^ NAN_MASK1; - let masked_nan2 = f128::NAN.to_bits() ^ NAN_MASK2; - assert!(f128::from_bits(masked_nan1).is_nan()); - assert!(f128::from_bits(masked_nan2).is_nan()); - - assert_eq!(f128::from_bits(masked_nan1).to_bits(), masked_nan1); - assert_eq!(f128::from_bits(masked_nan2).to_bits(), masked_nan2); -} - -#[test] fn test_from() { assert_biteq!(f128::from(false), 0.0); assert_biteq!(f128::from(true), 1.0); diff --git a/library/coretests/tests/floats/f16.rs b/library/coretests/tests/floats/f16.rs index d2818a6d768..302fd0861d7 100644 --- a/library/coretests/tests/floats/f16.rs +++ b/library/coretests/tests/floats/f16.rs @@ -19,12 +19,6 @@ const TOL_P2: f16 = 0.5; #[allow(unused)] const TOL_P4: f16 = 10.0; -/// First pattern over the mantissa -const NAN_MASK1: u16 = 0x02aa; - -/// Second pattern over the mantissa -const NAN_MASK2: u16 = 0x0155; - // FIXME(f16_f128,miri): many of these have to be disabled since miri does not yet support // the intrinsics. @@ -53,27 +47,6 @@ fn test_max_recip() { } #[test] -fn test_float_bits_conv() { - assert_eq!((1f16).to_bits(), 0x3c00); - assert_eq!((12.5f16).to_bits(), 0x4a40); - assert_eq!((1337f16).to_bits(), 0x6539); - assert_eq!((-14.25f16).to_bits(), 0xcb20); - assert_biteq!(f16::from_bits(0x3c00), 1.0); - assert_biteq!(f16::from_bits(0x4a40), 12.5); - assert_biteq!(f16::from_bits(0x6539), 1337.0); - assert_biteq!(f16::from_bits(0xcb20), -14.25); - - // Check that NaNs roundtrip their bits regardless of signaling-ness - let masked_nan1 = f16::NAN.to_bits() ^ NAN_MASK1; - let masked_nan2 = f16::NAN.to_bits() ^ NAN_MASK2; - assert!(f16::from_bits(masked_nan1).is_nan()); - assert!(f16::from_bits(masked_nan2).is_nan()); - - assert_eq!(f16::from_bits(masked_nan1).to_bits(), masked_nan1); - assert_eq!(f16::from_bits(masked_nan2).to_bits(), masked_nan2); -} - -#[test] fn test_from() { assert_biteq!(f16::from(false), 0.0); assert_biteq!(f16::from(true), 1.0); diff --git a/library/coretests/tests/floats/f32.rs b/library/coretests/tests/floats/f32.rs index 7b25f354da4..a1fe8b07650 100644 --- a/library/coretests/tests/floats/f32.rs +++ b/library/coretests/tests/floats/f32.rs @@ -2,12 +2,6 @@ use core::f32; use super::assert_biteq; -/// First pattern over the mantissa -const NAN_MASK1: u32 = 0x002a_aaaa; - -/// Second pattern over the mantissa -const NAN_MASK2: u32 = 0x0055_5555; - // FIXME(#140515): mingw has an incorrect fma https://sourceforge.net/p/mingw-w64/bugs/848/ #[cfg_attr(all(target_os = "windows", target_env = "gnu", not(target_abi = "llvm")), ignore)] #[test] @@ -25,25 +19,3 @@ fn test_mul_add() { assert_biteq!(f32::math::mul_add(8.9f32, inf, 3.2), inf); assert_biteq!(f32::math::mul_add(-3.2f32, 2.4, neg_inf), neg_inf); } - -#[test] -fn test_float_bits_conv() { - assert_eq!((1f32).to_bits(), 0x3f800000); - assert_eq!((12.5f32).to_bits(), 0x41480000); - assert_eq!((1337f32).to_bits(), 0x44a72000); - assert_eq!((-14.25f32).to_bits(), 0xc1640000); - assert_biteq!(f32::from_bits(0x3f800000), 1.0); - assert_biteq!(f32::from_bits(0x41480000), 12.5); - assert_biteq!(f32::from_bits(0x44a72000), 1337.0); - assert_biteq!(f32::from_bits(0xc1640000), -14.25); - - // Check that NaNs roundtrip their bits regardless of signaling-ness - // 0xA is 0b1010; 0x5 is 0b0101 -- so these two together clobbers all the mantissa bits - let masked_nan1 = f32::NAN.to_bits() ^ NAN_MASK1; - let masked_nan2 = f32::NAN.to_bits() ^ NAN_MASK2; - assert!(f32::from_bits(masked_nan1).is_nan()); - assert!(f32::from_bits(masked_nan2).is_nan()); - - assert_eq!(f32::from_bits(masked_nan1).to_bits(), masked_nan1); - assert_eq!(f32::from_bits(masked_nan2).to_bits(), masked_nan2); -} diff --git a/library/coretests/tests/floats/f64.rs b/library/coretests/tests/floats/f64.rs index 099d85627a3..4c5a3d68d1f 100644 --- a/library/coretests/tests/floats/f64.rs +++ b/library/coretests/tests/floats/f64.rs @@ -2,12 +2,6 @@ use core::f64; use super::assert_biteq; -/// First pattern over the mantissa -const NAN_MASK1: u64 = 0x000a_aaaa_aaaa_aaaa; - -/// Second pattern over the mantissa -const NAN_MASK2: u64 = 0x0005_5555_5555_5555; - // FIXME(#140515): mingw has an incorrect fma https://sourceforge.net/p/mingw-w64/bugs/848/ #[cfg_attr(all(target_os = "windows", target_env = "gnu", not(target_abi = "llvm")), ignore)] #[test] @@ -25,24 +19,3 @@ fn test_mul_add() { assert_biteq!(8.9f64.mul_add(inf, 3.2), inf); assert_biteq!((-3.2f64).mul_add(2.4, neg_inf), neg_inf); } - -#[test] -fn test_float_bits_conv() { - assert_eq!((1f64).to_bits(), 0x3ff0000000000000); - assert_eq!((12.5f64).to_bits(), 0x4029000000000000); - assert_eq!((1337f64).to_bits(), 0x4094e40000000000); - assert_eq!((-14.25f64).to_bits(), 0xc02c800000000000); - assert_biteq!(f64::from_bits(0x3ff0000000000000), 1.0); - assert_biteq!(f64::from_bits(0x4029000000000000), 12.5); - assert_biteq!(f64::from_bits(0x4094e40000000000), 1337.0); - assert_biteq!(f64::from_bits(0xc02c800000000000), -14.25); - - // Check that NaNs roundtrip their bits regardless of signaling-ness - let masked_nan1 = f64::NAN.to_bits() ^ NAN_MASK1; - let masked_nan2 = f64::NAN.to_bits() ^ NAN_MASK2; - assert!(f64::from_bits(masked_nan1).is_nan()); - assert!(f64::from_bits(masked_nan2).is_nan()); - - assert_eq!(f64::from_bits(masked_nan1).to_bits(), masked_nan1); - assert_eq!(f64::from_bits(masked_nan2).to_bits(), masked_nan2); -} diff --git a/library/coretests/tests/floats/mod.rs b/library/coretests/tests/floats/mod.rs index c0439845a4a..31515561c63 100644 --- a/library/coretests/tests/floats/mod.rs +++ b/library/coretests/tests/floats/mod.rs @@ -30,6 +30,10 @@ trait TestableFloat: Sized { const EPS_ADD: Self; const EPS_MUL: Self; const EPS_DIV: Self; + const RAW_1: Self; + const RAW_12_DOT_5: Self; + const RAW_1337: Self; + const RAW_MINUS_14_DOT_25: Self; } impl TestableFloat for f16 { @@ -50,6 +54,10 @@ impl TestableFloat for f16 { const EPS_ADD: Self = if cfg!(miri) { 1e1 } else { 0.0 }; const EPS_MUL: Self = if cfg!(miri) { 1e3 } else { 0.0 }; const EPS_DIV: Self = if cfg!(miri) { 1e0 } else { 0.0 }; + const RAW_1: Self = Self::from_bits(0x3c00); + const RAW_12_DOT_5: Self = Self::from_bits(0x4a40); + const RAW_1337: Self = Self::from_bits(0x6539); + const RAW_MINUS_14_DOT_25: Self = Self::from_bits(0xcb20); } impl TestableFloat for f32 { @@ -72,6 +80,10 @@ impl TestableFloat for f32 { const EPS_ADD: Self = if cfg!(miri) { 1e-3 } else { 0.0 }; const EPS_MUL: Self = if cfg!(miri) { 1e-1 } else { 0.0 }; const EPS_DIV: Self = if cfg!(miri) { 1e-4 } else { 0.0 }; + const RAW_1: Self = Self::from_bits(0x3f800000); + const RAW_12_DOT_5: Self = Self::from_bits(0x41480000); + const RAW_1337: Self = Self::from_bits(0x44a72000); + const RAW_MINUS_14_DOT_25: Self = Self::from_bits(0xc1640000); } impl TestableFloat for f64 { @@ -90,6 +102,10 @@ impl TestableFloat for f64 { const EPS_ADD: Self = if cfg!(miri) { 1e-6 } else { 0.0 }; const EPS_MUL: Self = if cfg!(miri) { 1e-6 } else { 0.0 }; const EPS_DIV: Self = if cfg!(miri) { 1e-6 } else { 0.0 }; + const RAW_1: Self = Self::from_bits(0x3ff0000000000000); + const RAW_12_DOT_5: Self = Self::from_bits(0x4029000000000000); + const RAW_1337: Self = Self::from_bits(0x4094e40000000000); + const RAW_MINUS_14_DOT_25: Self = Self::from_bits(0xc02c800000000000); } impl TestableFloat for f128 { @@ -108,6 +124,10 @@ impl TestableFloat for f128 { const EPS_ADD: Self = if cfg!(miri) { 1e-6 } else { 0.0 }; const EPS_MUL: Self = if cfg!(miri) { 1e-6 } else { 0.0 }; const EPS_DIV: Self = if cfg!(miri) { 1e-6 } else { 0.0 }; + const RAW_1: Self = Self::from_bits(0x3fff0000000000000000000000000000); + const RAW_12_DOT_5: Self = Self::from_bits(0x40029000000000000000000000000000); + const RAW_1337: Self = Self::from_bits(0x40094e40000000000000000000000000); + const RAW_MINUS_14_DOT_25: Self = Self::from_bits(0xc002c800000000000000000000000000); } /// Determine the tolerance for values of the argument type. @@ -250,6 +270,8 @@ macro_rules! float_test { $( $( #[$f16_meta] )+ )? fn test_f16() { type $fty = f16; + #[allow(unused)] + const fn flt (x: $fty) -> $fty { x } $test } @@ -257,6 +279,8 @@ macro_rules! float_test { $( $( #[$f32_meta] )+ )? fn test_f32() { type $fty = f32; + #[allow(unused)] + const fn flt (x: $fty) -> $fty { x } $test } @@ -264,6 +288,8 @@ macro_rules! float_test { $( $( #[$f64_meta] )+ )? fn test_f64() { type $fty = f64; + #[allow(unused)] + const fn flt (x: $fty) -> $fty { x } $test } @@ -271,6 +297,8 @@ macro_rules! float_test { $( $( #[$f128_meta] )+ )? fn test_f128() { type $fty = f128; + #[allow(unused)] + const fn flt (x: $fty) -> $fty { x } $test } @@ -293,6 +321,8 @@ macro_rules! float_test { $( $( #[$f16_const_meta] )+ )? fn test_f16() { type $fty = f16; + #[allow(unused)] + const fn flt (x: $fty) -> $fty { x } const { $test } } @@ -300,6 +330,8 @@ macro_rules! float_test { $( $( #[$f32_const_meta] )+ )? fn test_f32() { type $fty = f32; + #[allow(unused)] + const fn flt (x: $fty) -> $fty { x } const { $test } } @@ -307,6 +339,8 @@ macro_rules! float_test { $( $( #[$f64_const_meta] )+ )? fn test_f64() { type $fty = f64; + #[allow(unused)] + const fn flt (x: $fty) -> $fty { x } const { $test } } @@ -314,6 +348,8 @@ macro_rules! float_test { $( $( #[$f128_const_meta] )+ )? fn test_f128() { type $fty = f128; + #[allow(unused)] + const fn flt (x: $fty) -> $fty { x } const { $test } } } @@ -1479,3 +1515,30 @@ float_test! { assert_approx_eq!(a.algebraic_rem(b), a % b, Float::EPS_DIV); } } + +float_test! { + name: to_bits_conv, + attrs: { + f16: #[cfg(target_has_reliable_f16)], + f128: #[cfg(target_has_reliable_f128)], + }, + test<Float> { + assert_biteq!(flt(1.0), Float::RAW_1); + assert_biteq!(flt(12.5), Float::RAW_12_DOT_5); + assert_biteq!(flt(1337.0), Float::RAW_1337); + assert_biteq!(flt(-14.25), Float::RAW_MINUS_14_DOT_25); + assert_biteq!(Float::RAW_1, 1.0); + assert_biteq!(Float::RAW_12_DOT_5, 12.5); + assert_biteq!(Float::RAW_1337, 1337.0); + assert_biteq!(Float::RAW_MINUS_14_DOT_25, -14.25); + + // Check that NaNs roundtrip their bits regardless of signaling-ness + let masked_nan1 = Float::NAN.to_bits() ^ Float::NAN_MASK1; + let masked_nan2 = Float::NAN.to_bits() ^ Float::NAN_MASK2; + assert!(Float::from_bits(masked_nan1).is_nan()); + assert!(Float::from_bits(masked_nan2).is_nan()); + + assert_biteq!(Float::from_bits(masked_nan1), Float::from_bits(masked_nan1)); + assert_biteq!(Float::from_bits(masked_nan2), Float::from_bits(masked_nan2)); + } +} diff --git a/library/coretests/tests/lib.rs b/library/coretests/tests/lib.rs index b5658a9970f..1bdaa6965f6 100644 --- a/library/coretests/tests/lib.rs +++ b/library/coretests/tests/lib.rs @@ -81,6 +81,7 @@ #![feature(next_index)] #![feature(non_exhaustive_omitted_patterns_lint)] #![feature(numfmt)] +#![feature(one_sided_range)] #![feature(option_reduce)] #![feature(pattern)] #![feature(peekable_next_if_map)] diff --git a/library/coretests/tests/ops.rs b/library/coretests/tests/ops.rs index 501e0f33fe4..121718f2167 100644 --- a/library/coretests/tests/ops.rs +++ b/library/coretests/tests/ops.rs @@ -2,7 +2,8 @@ mod control_flow; mod from_residual; use core::ops::{ - Bound, Deref, DerefMut, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive, + Bound, Deref, DerefMut, OneSidedRange, OneSidedRangeBound, Range, RangeBounds, RangeFrom, + RangeFull, RangeInclusive, RangeTo, RangeToInclusive, }; // Test the Range structs and syntax. @@ -71,6 +72,36 @@ fn test_range_to_inclusive() { } #[test] +fn test_range_contains() { + assert!(!(1u32..5).contains(&0u32)); + assert!((1u32..5).contains(&1u32)); + assert!((1u32..5).contains(&4u32)); + assert!(!(1u32..5).contains(&5u32)); + assert!(!(1u32..5).contains(&6u32)); +} + +#[test] +fn test_range_to_contains() { + assert!(!(1u32..=5).contains(&0)); + assert!((1u32..=5).contains(&1)); + assert!((1u32..=5).contains(&4)); + assert!((1u32..=5).contains(&5)); + assert!(!(1u32..=5).contains(&6)); +} + +// This test covers `RangeBounds::contains` when the start is excluded, +// which cannot be directly expressed by Rust's built-in range syntax. +#[test] +fn test_range_bounds_contains() { + let r = (Bound::Excluded(1u32), Bound::Included(5u32)); + assert!(!r.contains(&0)); + assert!(!r.contains(&1)); + assert!(r.contains(&3)); + assert!(r.contains(&5)); + assert!(!r.contains(&6)); +} + +#[test] fn test_range_is_empty() { assert!(!(0.0..10.0).is_empty()); assert!((-0.0..0.0).is_empty()); @@ -92,6 +123,34 @@ fn test_range_is_empty() { } #[test] +fn test_range_inclusive_end_bound() { + let mut r = 1u32..=1; + r.next().unwrap(); + assert!(!r.contains(&1)); +} + +#[test] +fn test_range_bounds() { + let r = (Bound::Included(1u32), Bound::Excluded(5u32)); + assert!(!r.contains(&0)); + assert!(r.contains(&1)); + assert!(r.contains(&3)); + assert!(!r.contains(&5)); + assert!(!r.contains(&6)); + + let r = (Bound::<u32>::Unbounded, Bound::Unbounded); + assert!(r.contains(&0)); + assert!(r.contains(&u32::MAX)); +} + +#[test] +fn test_one_sided_range_bound() { + assert!(matches!((..1u32).bound(), (OneSidedRangeBound::End, 1))); + assert!(matches!((1u32..).bound(), (OneSidedRangeBound::StartInclusive, 1))); + assert!(matches!((..=1u32).bound(), (OneSidedRangeBound::EndInclusive, 1))); +} + +#[test] fn test_bound_cloned_unbounded() { assert_eq!(Bound::<&u32>::Unbounded.cloned(), Bound::Unbounded); } @@ -240,3 +299,17 @@ fn deref_on_ref() { fn test_not_never() { if !return () {} } + +#[test] +fn test_fmt() { + let mut r = 1..=1; + assert_eq!(format!("{:?}", r), "1..=1"); + r.next().unwrap(); + assert_eq!(format!("{:?}", r), "1..=1 (exhausted)"); + + assert_eq!(format!("{:?}", 1..1), "1..1"); + assert_eq!(format!("{:?}", 1..), "1.."); + assert_eq!(format!("{:?}", ..1), "..1"); + assert_eq!(format!("{:?}", ..=1), "..=1"); + assert_eq!(format!("{:?}", ..), ".."); +} diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs index 8c115015580..2dbdc8a4e02 100644 --- a/library/std/src/sys/mod.rs +++ b/library/std/src/sys/mod.rs @@ -31,6 +31,7 @@ pub mod process; pub mod random; pub mod stdio; pub mod sync; +pub mod thread; pub mod thread_local; // FIXME(117276): remove this, move feature implementations into individual diff --git a/library/std/src/sys/pal/hermit/mod.rs b/library/std/src/sys/pal/hermit/mod.rs index fb8d69b7375..3ddf6e5acb0 100644 --- a/library/std/src/sys/pal/hermit/mod.rs +++ b/library/std/src/sys/pal/hermit/mod.rs @@ -25,7 +25,6 @@ pub mod futex; pub mod os; #[path = "../unsupported/pipe.rs"] pub mod pipe; -pub mod thread; pub mod time; pub fn unsupported<T>() -> crate::io::Result<T> { diff --git a/library/std/src/sys/pal/sgx/abi/mod.rs b/library/std/src/sys/pal/sgx/abi/mod.rs index 57247cffad3..b8c4d7740c4 100644 --- a/library/std/src/sys/pal/sgx/abi/mod.rs +++ b/library/std/src/sys/pal/sgx/abi/mod.rs @@ -67,7 +67,7 @@ extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64 let tls_guard = unsafe { tls.activate() }; if secondary { - let join_notifier = super::thread::Thread::entry(); + let join_notifier = crate::sys::thread::Thread::entry(); drop(tls_guard); drop(join_notifier); diff --git a/library/std/src/sys/pal/sgx/mod.rs b/library/std/src/sys/pal/sgx/mod.rs index 4a297b6823f..9a33873af58 100644 --- a/library/std/src/sys/pal/sgx/mod.rs +++ b/library/std/src/sys/pal/sgx/mod.rs @@ -13,7 +13,6 @@ mod libunwind_integration; pub mod os; #[path = "../unsupported/pipe.rs"] pub mod pipe; -pub mod thread; pub mod thread_parking; pub mod time; pub mod waitqueue; diff --git a/library/std/src/sys/pal/solid/mod.rs b/library/std/src/sys/pal/solid/mod.rs index 0011cf256df..9ca6dc58118 100644 --- a/library/std/src/sys/pal/solid/mod.rs +++ b/library/std/src/sys/pal/solid/mod.rs @@ -10,10 +10,8 @@ pub mod itron { pub mod error; pub mod spin; pub mod task; - pub mod thread; pub mod thread_parking; pub mod time; - use super::unsupported; } // `error` is `pub(crate)` so that it can be accessed by `itron/error.rs` as @@ -22,7 +20,7 @@ pub(crate) mod error; pub mod os; #[path = "../unsupported/pipe.rs"] pub mod pipe; -pub use self::itron::{thread, thread_parking}; +pub use self::itron::thread_parking; pub mod time; // SAFETY: must be called only once during runtime initialization. diff --git a/library/std/src/sys/pal/teeos/mod.rs b/library/std/src/sys/pal/teeos/mod.rs index c7b17777258..dd0155265da 100644 --- a/library/std/src/sys/pal/teeos/mod.rs +++ b/library/std/src/sys/pal/teeos/mod.rs @@ -9,7 +9,6 @@ pub mod os; #[path = "../unsupported/pipe.rs"] pub mod pipe; -pub mod thread; #[allow(non_upper_case_globals)] #[path = "../unix/time.rs"] pub mod time; diff --git a/library/std/src/sys/pal/uefi/mod.rs b/library/std/src/sys/pal/uefi/mod.rs index 8911a2ee519..ebd311db1e1 100644 --- a/library/std/src/sys/pal/uefi/mod.rs +++ b/library/std/src/sys/pal/uefi/mod.rs @@ -17,7 +17,6 @@ pub mod helpers; pub mod os; #[path = "../unsupported/pipe.rs"] pub mod pipe; -pub mod thread; pub mod time; #[cfg(test)] diff --git a/library/std/src/sys/pal/uefi/thread.rs b/library/std/src/sys/pal/uefi/thread.rs deleted file mode 100644 index 47a48008c76..00000000000 --- a/library/std/src/sys/pal/uefi/thread.rs +++ /dev/null @@ -1,66 +0,0 @@ -use super::unsupported; -use crate::ffi::CStr; -use crate::io; -use crate::num::NonZero; -use crate::ptr::NonNull; -use crate::time::{Duration, Instant}; - -pub struct Thread(!); - -pub const DEFAULT_MIN_STACK_SIZE: usize = 64 * 1024; - -impl Thread { - // unsafe: see thread::Builder::spawn_unchecked for safety requirements - pub unsafe fn new( - _stack: usize, - _name: Option<&str>, - _p: Box<dyn FnOnce()>, - ) -> io::Result<Thread> { - unsupported() - } - - pub fn yield_now() { - // do nothing - } - - pub fn set_name(_name: &CStr) { - // nope - } - - pub fn sleep(dur: Duration) { - let boot_services: NonNull<r_efi::efi::BootServices> = - crate::os::uefi::env::boot_services().expect("can't sleep").cast(); - let mut dur_ms = dur.as_micros(); - // ceil up to the nearest microsecond - if dur.subsec_nanos() % 1000 > 0 { - dur_ms += 1; - } - - while dur_ms > 0 { - let ms = crate::cmp::min(dur_ms, usize::MAX as u128); - let _ = unsafe { ((*boot_services.as_ptr()).stall)(ms as usize) }; - dur_ms -= ms; - } - } - - pub fn sleep_until(deadline: Instant) { - let now = Instant::now(); - - if let Some(delay) = deadline.checked_duration_since(now) { - Self::sleep(delay); - } - } - - pub fn join(self) { - self.0 - } -} - -pub(crate) fn current_os_id() -> Option<u64> { - None -} - -pub fn available_parallelism() -> io::Result<NonZero<usize>> { - // UEFI is single threaded - Ok(NonZero::new(1).unwrap()) -} diff --git a/library/std/src/sys/pal/unix/mod.rs b/library/std/src/sys/pal/unix/mod.rs index ac5c823a1bf..dd1059fe04a 100644 --- a/library/std/src/sys/pal/unix/mod.rs +++ b/library/std/src/sys/pal/unix/mod.rs @@ -17,7 +17,6 @@ pub mod os; pub mod pipe; pub mod stack_overflow; pub mod sync; -pub mod thread; pub mod thread_parking; pub mod time; @@ -55,7 +54,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { // thread-id for the main thread and so renaming the main thread will rename the // process and we only want to enable this on platforms we've tested. if cfg!(target_vendor = "apple") { - thread::Thread::set_name(&c"main"); + crate::sys::thread::set_name(c"main"); } unsafe fn sanitize_standard_fds() { diff --git a/library/std/src/sys/pal/wasip1/mod.rs b/library/std/src/sys/pal/wasip1/mod.rs index 61dd1c3f98b..ae5da3c1f77 100644 --- a/library/std/src/sys/pal/wasip1/mod.rs +++ b/library/std/src/sys/pal/wasip1/mod.rs @@ -20,7 +20,6 @@ pub mod futex; pub mod os; #[path = "../unsupported/pipe.rs"] pub mod pipe; -pub mod thread; pub mod time; #[path = "../unsupported/common.rs"] diff --git a/library/std/src/sys/pal/wasip1/thread.rs b/library/std/src/sys/pal/wasip1/thread.rs deleted file mode 100644 index e062b49bd7a..00000000000 --- a/library/std/src/sys/pal/wasip1/thread.rs +++ /dev/null @@ -1,214 +0,0 @@ -#![forbid(unsafe_op_in_unsafe_fn)] - -use crate::ffi::CStr; -use crate::num::NonZero; -use crate::time::{Duration, Instant}; -use crate::{io, mem}; - -cfg_select! { - target_feature = "atomics" => { - use crate::cmp; - use crate::ptr; - use crate::sys::os; - // Add a few symbols not in upstream `libc` just yet. - mod libc { - pub use crate::ffi; - pub use libc::*; - - // defined in wasi-libc - // https://github.com/WebAssembly/wasi-libc/blob/a6f871343313220b76009827ed0153586361c0d5/libc-top-half/musl/include/alltypes.h.in#L108 - #[repr(C)] - union pthread_attr_union { - __i: [ffi::c_int; if size_of::<ffi::c_long>() == 8 { 14 } else { 9 }], - __vi: [ffi::c_int; if size_of::<ffi::c_long>() == 8 { 14 } else { 9 }], - __s: [ffi::c_ulong; if size_of::<ffi::c_long>() == 8 { 7 } else { 9 }], - } - - #[repr(C)] - pub struct pthread_attr_t { - __u: pthread_attr_union, - } - - #[allow(non_camel_case_types)] - pub type pthread_t = *mut ffi::c_void; - - pub const _SC_NPROCESSORS_ONLN: ffi::c_int = 84; - - unsafe extern "C" { - pub fn pthread_create( - native: *mut pthread_t, - attr: *const pthread_attr_t, - f: extern "C" fn(*mut ffi::c_void) -> *mut ffi::c_void, - value: *mut ffi::c_void, - ) -> ffi::c_int; - pub fn pthread_join(native: pthread_t, value: *mut *mut ffi::c_void) -> ffi::c_int; - pub fn pthread_attr_init(attrp: *mut pthread_attr_t) -> ffi::c_int; - pub fn pthread_attr_setstacksize( - attr: *mut pthread_attr_t, - stack_size: libc::size_t, - ) -> ffi::c_int; - pub fn pthread_attr_destroy(attr: *mut pthread_attr_t) -> ffi::c_int; - pub fn pthread_detach(thread: pthread_t) -> ffi::c_int; - } - } - - pub struct Thread { - id: libc::pthread_t, - } - - impl Drop for Thread { - fn drop(&mut self) { - let ret = unsafe { libc::pthread_detach(self.id) }; - debug_assert_eq!(ret, 0); - } - } - } - _ => { - pub struct Thread(!); - } -} - -pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * 1024; - -impl Thread { - // unsafe: see thread::Builder::spawn_unchecked for safety requirements - cfg_select! { - target_feature = "atomics" => { - pub unsafe fn new(stack: usize, _name: Option<&str>, p: Box<dyn FnOnce()>) -> io::Result<Thread> { - let p = Box::into_raw(Box::new(p)); - let mut native: libc::pthread_t = unsafe { mem::zeroed() }; - let mut attr: libc::pthread_attr_t = unsafe { mem::zeroed() }; - assert_eq!(unsafe { libc::pthread_attr_init(&mut attr) }, 0); - - let stack_size = cmp::max(stack, DEFAULT_MIN_STACK_SIZE); - - match unsafe { libc::pthread_attr_setstacksize(&mut attr, stack_size) } { - 0 => {} - n => { - assert_eq!(n, libc::EINVAL); - // EINVAL means |stack_size| is either too small or not a - // multiple of the system page size. Because it's definitely - // >= PTHREAD_STACK_MIN, it must be an alignment issue. - // Round up to the nearest page and try again. - let page_size = os::page_size(); - let stack_size = - (stack_size + page_size - 1) & (-(page_size as isize - 1) as usize - 1); - assert_eq!(unsafe { libc::pthread_attr_setstacksize(&mut attr, stack_size) }, 0); - } - }; - - let ret = unsafe { libc::pthread_create(&mut native, &attr, thread_start, p as *mut _) }; - // Note: if the thread creation fails and this assert fails, then p will - // be leaked. However, an alternative design could cause double-free - // which is clearly worse. - assert_eq!(unsafe {libc::pthread_attr_destroy(&mut attr) }, 0); - - return if ret != 0 { - // The thread failed to start and as a result p was not consumed. Therefore, it is - // safe to reconstruct the box so that it gets deallocated. - unsafe { drop(Box::from_raw(p)); } - Err(io::Error::from_raw_os_error(ret)) - } else { - Ok(Thread { id: native }) - }; - - extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void { - unsafe { - // Finally, let's run some code. - Box::from_raw(main as *mut Box<dyn FnOnce()>)(); - } - ptr::null_mut() - } - } - } - _ => { - pub unsafe fn new(_stack: usize, _name: Option<&str>, _p: Box<dyn FnOnce()>) -> io::Result<Thread> { - crate::sys::unsupported() - } - } - } - - pub fn yield_now() { - let ret = unsafe { wasi::sched_yield() }; - debug_assert_eq!(ret, Ok(())); - } - - pub fn set_name(_name: &CStr) { - // nope - } - - pub fn sleep(dur: Duration) { - let mut nanos = dur.as_nanos(); - while nanos > 0 { - const USERDATA: wasi::Userdata = 0x0123_45678; - - let clock = wasi::SubscriptionClock { - id: wasi::CLOCKID_MONOTONIC, - timeout: u64::try_from(nanos).unwrap_or(u64::MAX), - precision: 0, - flags: 0, - }; - nanos -= u128::from(clock.timeout); - - let in_ = wasi::Subscription { - userdata: USERDATA, - u: wasi::SubscriptionU { tag: 0, u: wasi::SubscriptionUU { clock } }, - }; - unsafe { - let mut event: wasi::Event = mem::zeroed(); - let res = wasi::poll_oneoff(&in_, &mut event, 1); - match (res, event) { - ( - Ok(1), - wasi::Event { - userdata: USERDATA, - error: wasi::ERRNO_SUCCESS, - type_: wasi::EVENTTYPE_CLOCK, - .. - }, - ) => {} - _ => panic!("thread::sleep(): unexpected result of poll_oneoff"), - } - } - } - } - - pub fn sleep_until(deadline: Instant) { - let now = Instant::now(); - - if let Some(delay) = deadline.checked_duration_since(now) { - Self::sleep(delay); - } - } - - pub fn join(self) { - cfg_select! { - target_feature = "atomics" => { - let id = mem::ManuallyDrop::new(self).id; - let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) }; - if ret != 0 { - rtabort!("failed to join thread: {}", io::Error::from_raw_os_error(ret)); - } - } - _ => { - self.0 - } - } - } -} - -pub(crate) fn current_os_id() -> Option<u64> { - None -} - -pub fn available_parallelism() -> io::Result<NonZero<usize>> { - cfg_select! { - target_feature = "atomics" => { - match unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) } { - -1 => Err(io::Error::last_os_error()), - cpus => NonZero::new(cpus as usize).ok_or(io::Error::UNKNOWN_THREAD_COUNT), - } - } - _ => crate::sys::unsupported(), - } -} diff --git a/library/std/src/sys/pal/wasip2/mod.rs b/library/std/src/sys/pal/wasip2/mod.rs index 5f3fb6d6ddf..c1d89da2677 100644 --- a/library/std/src/sys/pal/wasip2/mod.rs +++ b/library/std/src/sys/pal/wasip2/mod.rs @@ -14,7 +14,6 @@ pub mod futex; pub mod os; #[path = "../unsupported/pipe.rs"] pub mod pipe; -pub mod thread; pub mod time; #[path = "../unsupported/common.rs"] diff --git a/library/std/src/sys/pal/wasip2/thread.rs b/library/std/src/sys/pal/wasip2/thread.rs deleted file mode 100644 index ad52918f15a..00000000000 --- a/library/std/src/sys/pal/wasip2/thread.rs +++ /dev/null @@ -1,73 +0,0 @@ -use crate::ffi::CStr; -use crate::io; -use crate::num::NonZero; -use crate::time::{Duration, Instant}; - -pub struct Thread(!); - -pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * 1024; - -impl Thread { - pub unsafe fn new( - _stack: usize, - _name: Option<&str>, - _p: Box<dyn FnOnce()>, - ) -> io::Result<Thread> { - // Note that unlike WASIp1 even if the wasm `atomics` feature is enabled - // there is no support for threads, not even experimentally, not even in - // wasi-libc. Thus this is unconditionally unsupported. - crate::sys::unsupported() - } - - pub fn yield_now() { - // no API for this in WASIp2, but there's also no threads, so that's - // sort of expected. - } - - pub fn set_name(_name: &CStr) { - // nope - } - - pub fn sleep(dur: Duration) { - // Sleep in increments of `u64::MAX` nanoseconds until the `dur` is - // entirely drained. - let mut remaining = dur.as_nanos(); - while remaining > 0 { - let amt = u64::try_from(remaining).unwrap_or(u64::MAX); - wasip2::clocks::monotonic_clock::subscribe_duration(amt).block(); - remaining -= u128::from(amt); - } - } - - pub fn sleep_until(deadline: Instant) { - match u64::try_from(deadline.into_inner().as_duration().as_nanos()) { - // If the point in time we're sleeping to fits within a 64-bit - // number of nanoseconds then directly use `subscribe_instant`. - Ok(deadline) => { - wasip2::clocks::monotonic_clock::subscribe_instant(deadline).block(); - } - // ... otherwise we're sleeping for 500+ years relative to the - // "start" of what the system is using as a clock so speed/accuracy - // is not so much of a concern. Use `sleep` instead. - Err(_) => { - let now = Instant::now(); - - if let Some(delay) = deadline.checked_duration_since(now) { - Self::sleep(delay); - } - } - } - } - - pub fn join(self) { - self.0 - } -} - -pub(crate) fn current_os_id() -> Option<u64> { - None -} - -pub fn available_parallelism() -> io::Result<NonZero<usize>> { - crate::sys::unsupported() -} diff --git a/library/std/src/sys/pal/wasip2/time.rs b/library/std/src/sys/pal/wasip2/time.rs index f1f6839774b..980070e7b85 100644 --- a/library/std/src/sys/pal/wasip2/time.rs +++ b/library/std/src/sys/pal/wasip2/time.rs @@ -25,7 +25,7 @@ impl Instant { Some(Instant(self.0.checked_sub(*other)?)) } - pub(super) fn as_duration(&self) -> &Duration { + pub(crate) fn as_duration(&self) -> &Duration { &self.0 } } diff --git a/library/std/src/sys/pal/wasm/atomics/thread.rs b/library/std/src/sys/pal/wasm/atomics/thread.rs deleted file mode 100644 index 42a7dbdf8b8..00000000000 --- a/library/std/src/sys/pal/wasm/atomics/thread.rs +++ /dev/null @@ -1,75 +0,0 @@ -use crate::ffi::CStr; -use crate::io; -use crate::num::NonZero; -use crate::sys::unsupported; -use crate::time::{Duration, Instant}; - -pub struct Thread(!); - -pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * 1024; - -impl Thread { - // unsafe: see thread::Builder::spawn_unchecked for safety requirements - pub unsafe fn new( - _stack: usize, - _name: Option<&str>, - _p: Box<dyn FnOnce()>, - ) -> io::Result<Thread> { - unsupported() - } - - pub fn yield_now() {} - - pub fn set_name(_name: &CStr) {} - - pub fn sleep(dur: Duration) { - #[cfg(target_arch = "wasm32")] - use core::arch::wasm32 as wasm; - #[cfg(target_arch = "wasm64")] - use core::arch::wasm64 as wasm; - - use crate::cmp; - - // Use an atomic wait to block the current thread artificially with a - // timeout listed. Note that we should never be notified (return value - // of 0) or our comparison should never fail (return value of 1) so we - // should always only resume execution through a timeout (return value - // 2). - let mut nanos = dur.as_nanos(); - while nanos > 0 { - let amt = cmp::min(i64::MAX as u128, nanos); - let mut x = 0; - let val = unsafe { wasm::memory_atomic_wait32(&mut x, 0, amt as i64) }; - debug_assert_eq!(val, 2); - nanos -= amt; - } - } - - pub fn sleep_until(deadline: Instant) { - let now = Instant::now(); - - if let Some(delay) = deadline.checked_duration_since(now) { - Self::sleep(delay); - } - } - - pub fn join(self) {} -} - -pub(crate) fn current_os_id() -> Option<u64> { - None -} - -pub fn available_parallelism() -> io::Result<NonZero<usize>> { - unsupported() -} - -pub mod guard { - pub type Guard = !; - pub unsafe fn current() -> Option<Guard> { - None - } - pub unsafe fn init() -> Option<Guard> { - None - } -} diff --git a/library/std/src/sys/pal/wasm/mod.rs b/library/std/src/sys/pal/wasm/mod.rs index 346c9ff88c9..a20cd0e9ac7 100644 --- a/library/std/src/sys/pal/wasm/mod.rs +++ b/library/std/src/sys/pal/wasm/mod.rs @@ -23,18 +23,9 @@ pub mod pipe; #[path = "../unsupported/time.rs"] pub mod time; -cfg_select! { - target_feature = "atomics" => { - #[path = "atomics/futex.rs"] - pub mod futex; - #[path = "atomics/thread.rs"] - pub mod thread; - } - _ => { - #[path = "../unsupported/thread.rs"] - pub mod thread; - } -} +#[cfg(target_feature = "atomics")] +#[path = "atomics/futex.rs"] +pub mod futex; #[path = "../unsupported/common.rs"] #[deny(unsafe_op_in_unsafe_fn)] diff --git a/library/std/src/sys/pal/windows/mod.rs b/library/std/src/sys/pal/windows/mod.rs index 3b6a86cbc8f..3357946b8f7 100644 --- a/library/std/src/sys/pal/windows/mod.rs +++ b/library/std/src/sys/pal/windows/mod.rs @@ -20,7 +20,6 @@ pub mod futex; pub mod handle; pub mod os; pub mod pipe; -pub mod thread; pub mod time; cfg_select! { not(target_vendor = "uwp") => { @@ -48,9 +47,9 @@ pub unsafe fn init(_argc: isize, _argv: *const *const u8, _sigpipe: u8) { unsafe { stack_overflow::init(); - // Normally, `thread::spawn` will call `Thread::set_name` but since this thread already + // Normally, `thread::spawn` will call `set_name` but since this thread already // exists, we have to call it ourselves. - thread::Thread::set_name_wide(wide_str!("main")); + crate::sys::thread::set_name_wide(wide_str!("main")); } } diff --git a/library/std/src/sys/pal/windows/time.rs b/library/std/src/sys/pal/windows/time.rs index a948c07e0a3..f8f9a9fd818 100644 --- a/library/std/src/sys/pal/windows/time.rs +++ b/library/std/src/sys/pal/windows/time.rs @@ -232,7 +232,7 @@ mod perf_counter { } /// A timer you can wait on. -pub(super) struct WaitableTimer { +pub(crate) struct WaitableTimer { handle: c::HANDLE, } impl WaitableTimer { diff --git a/library/std/src/sys/pal/xous/mod.rs b/library/std/src/sys/pal/xous/mod.rs index 042c4ff862f..e673157e0eb 100644 --- a/library/std/src/sys/pal/xous/mod.rs +++ b/library/std/src/sys/pal/xous/mod.rs @@ -5,7 +5,6 @@ use crate::os::xous::ffi::exit; pub mod os; #[path = "../unsupported/pipe.rs"] pub mod pipe; -pub mod thread; pub mod time; #[path = "../unsupported/common.rs"] diff --git a/library/std/src/sys/platform_version/darwin/tests.rs b/library/std/src/sys/platform_version/darwin/tests.rs index eecd58ec79e..17b2cc18ec0 100644 --- a/library/std/src/sys/platform_version/darwin/tests.rs +++ b/library/std/src/sys/platform_version/darwin/tests.rs @@ -35,9 +35,9 @@ fn compare_against_sw_vers() { assert_eq!(__isOSVersionAtLeast(major, minor, subminor), 1); // One lower is available - assert_eq!(__isOSVersionAtLeast(major, minor, subminor.saturating_sub(1)), 1); - assert_eq!(__isOSVersionAtLeast(major, minor.saturating_sub(1), subminor), 1); - assert_eq!(__isOSVersionAtLeast(major.saturating_sub(1), minor, subminor), 1); + assert_eq!(__isOSVersionAtLeast(major, minor, (subminor as u32).saturating_sub(1) as i32), 1); + assert_eq!(__isOSVersionAtLeast(major, (minor as u32).saturating_sub(1) as i32, subminor), 1); + assert_eq!(__isOSVersionAtLeast((major as u32).saturating_sub(1) as i32, minor, subminor), 1); // One higher isn't available assert_eq!(__isOSVersionAtLeast(major, minor, subminor + 1), 0); diff --git a/library/std/src/sys/sync/once/queue.rs b/library/std/src/sys/sync/once/queue.rs index 49e15d65f25..17d99cdb385 100644 --- a/library/std/src/sys/sync/once/queue.rs +++ b/library/std/src/sys/sync/once/queue.rs @@ -276,7 +276,9 @@ fn wait( // If the managing thread happens to signal and unpark us before we // can park ourselves, the result could be this thread never gets // unparked. Luckily `park` comes with the guarantee that if it got - // an `unpark` just before on an unparked thread it does not park. + // an `unpark` just before on an unparked thread it does not park. Crucially, we know + // the `unpark` must have happened between the `compare_exchange_weak` above and here, + // and there's no other `park` in that code that could steal our token. // SAFETY: we retrieved this handle on the current thread above. unsafe { node.thread.park() } } diff --git a/library/std/src/sys/pal/hermit/thread.rs b/library/std/src/sys/thread/hermit.rs index cc4734b6819..4d9f3b114c2 100644 --- a/library/std/src/sys/pal/hermit/thread.rs +++ b/library/std/src/sys/thread/hermit.rs @@ -1,10 +1,5 @@ -#![allow(dead_code)] - -use super::hermit_abi; -use crate::ffi::CStr; -use crate::mem::ManuallyDrop; use crate::num::NonZero; -use crate::time::{Duration, Instant}; +use crate::time::Duration; use crate::{io, ptr}; pub type Tid = hermit_abi::Tid; @@ -68,57 +63,30 @@ impl Thread { } } - #[inline] - pub fn yield_now() { - unsafe { - hermit_abi::yield_now(); - } - } - - #[inline] - pub fn set_name(_name: &CStr) { - // nope - } - - #[inline] - pub fn sleep(dur: Duration) { - let micros = dur.as_micros() + if dur.subsec_nanos() % 1_000 > 0 { 1 } else { 0 }; - let micros = u64::try_from(micros).unwrap_or(u64::MAX); - - unsafe { - hermit_abi::usleep(micros); - } - } - - pub fn sleep_until(deadline: Instant) { - let now = Instant::now(); - - if let Some(delay) = deadline.checked_duration_since(now) { - Self::sleep(delay); - } - } - pub fn join(self) { unsafe { let _ = hermit_abi::join(self.tid); } } +} - #[inline] - pub fn id(&self) -> Tid { - self.tid - } - - #[inline] - pub fn into_id(self) -> Tid { - ManuallyDrop::new(self).tid - } +pub fn available_parallelism() -> io::Result<NonZero<usize>> { + unsafe { Ok(NonZero::new_unchecked(hermit_abi::available_parallelism())) } } -pub(crate) fn current_os_id() -> Option<u64> { - None +#[inline] +pub fn sleep(dur: Duration) { + let micros = dur.as_micros() + if dur.subsec_nanos() % 1_000 > 0 { 1 } else { 0 }; + let micros = u64::try_from(micros).unwrap_or(u64::MAX); + + unsafe { + hermit_abi::usleep(micros); + } } -pub fn available_parallelism() -> io::Result<NonZero<usize>> { - unsafe { Ok(NonZero::new_unchecked(hermit_abi::available_parallelism())) } +#[inline] +pub fn yield_now() { + unsafe { + hermit_abi::yield_now(); + } } diff --git a/library/std/src/sys/thread/mod.rs b/library/std/src/sys/thread/mod.rs new file mode 100644 index 00000000000..6bb7fc1a20e --- /dev/null +++ b/library/std/src/sys/thread/mod.rs @@ -0,0 +1,152 @@ +cfg_select! { + target_os = "hermit" => { + mod hermit; + pub use hermit::{Thread, available_parallelism, sleep, yield_now, DEFAULT_MIN_STACK_SIZE}; + #[expect(dead_code)] + mod unsupported; + pub use unsupported::{current_os_id, set_name}; + } + all(target_vendor = "fortanix", target_env = "sgx") => { + mod sgx; + pub use sgx::{Thread, current_os_id, sleep, yield_now, DEFAULT_MIN_STACK_SIZE}; + + // SGX should protect in-enclave data from outside attackers, so there + // must not be any data leakage to the OS, particularly no 1-1 mapping + // between SGX thread names and OS thread names. Hence `set_name` is + // intentionally a no-op. + // + // Note that the internally visible SGX thread name is already provided + // by the platform-agnostic Rust thread code. This can be observed in + // the [`std::thread::tests::test_named_thread`] test, which succeeds + // as-is with the SGX target. + #[expect(dead_code)] + mod unsupported; + pub use unsupported::{available_parallelism, set_name}; + } + target_os = "solid_asp3" => { + mod solid; + pub use solid::{Thread, sleep, yield_now, DEFAULT_MIN_STACK_SIZE}; + #[expect(dead_code)] + mod unsupported; + pub use unsupported::{available_parallelism, current_os_id, set_name}; + } + target_os = "teeos" => { + mod teeos; + pub use teeos::{Thread, sleep, yield_now, DEFAULT_MIN_STACK_SIZE}; + #[expect(dead_code)] + mod unsupported; + pub use unsupported::{available_parallelism, current_os_id, set_name}; + } + target_os = "uefi" => { + mod uefi; + pub use uefi::{available_parallelism, sleep}; + #[expect(dead_code)] + mod unsupported; + pub use unsupported::{Thread, current_os_id, set_name, yield_now, DEFAULT_MIN_STACK_SIZE}; + } + target_family = "unix" => { + mod unix; + pub use unix::{Thread, available_parallelism, current_os_id, sleep, yield_now, DEFAULT_MIN_STACK_SIZE}; + #[cfg(not(any( + target_env = "newlib", + target_os = "l4re", + target_os = "emscripten", + target_os = "redox", + target_os = "hurd", + target_os = "aix", + )))] + pub use unix::set_name; + #[cfg(any( + target_os = "freebsd", + target_os = "netbsd", + target_os = "linux", + target_os = "android", + target_os = "solaris", + target_os = "illumos", + target_os = "dragonfly", + target_os = "hurd", + target_os = "fuchsia", + target_os = "vxworks", + ))] + pub use unix::sleep_until; + #[expect(dead_code)] + mod unsupported; + #[cfg(any( + target_env = "newlib", + target_os = "l4re", + target_os = "emscripten", + target_os = "redox", + target_os = "hurd", + target_os = "aix", + ))] + pub use unsupported::set_name; + } + all(target_os = "wasi", target_env = "p1") => { + mod wasip1; + pub use wasip1::{DEFAULT_MIN_STACK_SIZE, sleep, yield_now}; + #[cfg(target_feature = "atomics")] + pub use wasip1::{Thread, available_parallelism}; + #[expect(dead_code)] + mod unsupported; + pub use unsupported::{current_os_id, set_name}; + #[cfg(not(target_feature = "atomics"))] + pub use unsupported::{Thread, available_parallelism}; + } + all(target_os = "wasi", target_env = "p2") => { + mod wasip2; + pub use wasip2::{sleep, sleep_until}; + #[expect(dead_code)] + mod unsupported; + // Note that unlike WASIp1 even if the wasm `atomics` feature is enabled + // there is no support for threads, not even experimentally, not even in + // wasi-libc. Thus this is unconditionally unsupported. + pub use unsupported::{Thread, available_parallelism, current_os_id, set_name, yield_now, DEFAULT_MIN_STACK_SIZE}; + } + all(target_family = "wasm", target_feature = "atomics") => { + mod wasm; + pub use wasm::sleep; + + #[expect(dead_code)] + mod unsupported; + pub use unsupported::{Thread, available_parallelism, current_os_id, set_name, yield_now, DEFAULT_MIN_STACK_SIZE}; + } + target_os = "windows" => { + mod windows; + pub use windows::{Thread, available_parallelism, current_os_id, set_name, set_name_wide, sleep, yield_now, DEFAULT_MIN_STACK_SIZE}; + } + target_os = "xous" => { + mod xous; + pub use xous::{Thread, available_parallelism, sleep, yield_now, DEFAULT_MIN_STACK_SIZE}; + + #[expect(dead_code)] + mod unsupported; + pub use unsupported::{current_os_id, set_name}; + } + _ => { + mod unsupported; + pub use unsupported::{Thread, available_parallelism, current_os_id, set_name, sleep, yield_now, DEFAULT_MIN_STACK_SIZE}; + } +} + +#[cfg(not(any( + target_os = "freebsd", + target_os = "netbsd", + target_os = "linux", + target_os = "android", + target_os = "solaris", + target_os = "illumos", + target_os = "dragonfly", + target_os = "hurd", + target_os = "fuchsia", + target_os = "vxworks", + all(target_os = "wasi", target_env = "p2"), +)))] +pub fn sleep_until(deadline: crate::time::Instant) { + use crate::time::Instant; + + let now = Instant::now(); + + if let Some(delay) = deadline.checked_duration_since(now) { + sleep(delay); + } +} diff --git a/library/std/src/sys/pal/sgx/thread.rs b/library/std/src/sys/thread/sgx.rs index 1f613badcd7..f20ef7d86b9 100644 --- a/library/std/src/sys/pal/sgx/thread.rs +++ b/library/std/src/sys/thread/sgx.rs @@ -1,11 +1,8 @@ #![cfg_attr(test, allow(dead_code))] // why is this necessary? -use super::abi::{thread, usercalls}; -use super::unsupported; -use crate::ffi::CStr; use crate::io; -use crate::num::NonZero; -use crate::time::{Duration, Instant}; +use crate::sys::pal::abi::{thread, usercalls}; +use crate::time::Duration; pub struct Thread(task_queue::JoinHandle); @@ -108,51 +105,27 @@ impl Thread { Ok(Thread(handle)) } - pub(super) fn entry() -> JoinNotifier { + pub(crate) fn entry() -> JoinNotifier { let mut pending_tasks = task_queue::lock(); let task = rtunwrap!(Some, pending_tasks.pop()); drop(pending_tasks); // make sure to not hold the task queue lock longer than necessary task.run() } - pub fn yield_now() { - let wait_error = rtunwrap!(Err, usercalls::wait(0, usercalls::raw::WAIT_NO)); - rtassert!(wait_error.kind() == io::ErrorKind::WouldBlock); - } - - /// SGX should protect in-enclave data from the outside (attacker), - /// so there should be no data leakage to the OS, - /// and therefore also no 1-1 mapping between SGX thread names and OS thread names. - /// - /// This is why the method is intentionally No-Op. - pub fn set_name(_name: &CStr) { - // Note that the internally visible SGX thread name is already provided - // by the platform-agnostic (target-agnostic) Rust thread code. - // This can be observed in the [`std::thread::tests::test_named_thread`] test, - // which succeeds as-is with the SGX target. - } - - pub fn sleep(dur: Duration) { - usercalls::wait_timeout(0, dur, || true); - } - - pub fn sleep_until(deadline: Instant) { - let now = Instant::now(); - - if let Some(delay) = deadline.checked_duration_since(now) { - Self::sleep(delay); - } - } - pub fn join(self) { self.0.wait(); } } -pub(crate) fn current_os_id() -> Option<u64> { +pub fn current_os_id() -> Option<u64> { Some(thread::current().addr().get() as u64) } -pub fn available_parallelism() -> io::Result<NonZero<usize>> { - unsupported() +pub fn sleep(dur: Duration) { + usercalls::wait_timeout(0, dur, || true); +} + +pub fn yield_now() { + let wait_error = rtunwrap!(Err, usercalls::wait(0, usercalls::raw::WAIT_NO)); + rtassert!(wait_error.kind() == io::ErrorKind::WouldBlock); } diff --git a/library/std/src/sys/pal/itron/thread.rs b/library/std/src/sys/thread/solid.rs index 4e14cb3cbca..46a84faa802 100644 --- a/library/std/src/sys/pal/itron/thread.rs +++ b/library/std/src/sys/thread/solid.rs @@ -1,16 +1,14 @@ //! Thread implementation backed by μITRON tasks. Assumes `acre_tsk` and //! `exd_tsk` are available. -use super::error::{ItronError, expect_success, expect_success_aborting}; -use super::time::dur2reltims; -use super::{abi, task}; use crate::cell::UnsafeCell; -use crate::ffi::CStr; use crate::mem::ManuallyDrop; -use crate::num::NonZero; use crate::ptr::NonNull; use crate::sync::atomic::{Atomic, AtomicUsize, Ordering}; -use crate::time::{Duration, Instant}; +use crate::sys::pal::itron::error::{ItronError, expect_success, expect_success_aborting}; +use crate::sys::pal::itron::time::dur2reltims; +use crate::sys::pal::itron::{abi, task}; +use crate::time::Duration; use crate::{hint, io}; pub struct Thread { @@ -195,28 +193,6 @@ impl Thread { Ok(Self { p_inner, task: new_task }) } - pub fn yield_now() { - expect_success(unsafe { abi::rot_rdq(abi::TPRI_SELF) }, &"rot_rdq"); - } - - pub fn set_name(_name: &CStr) { - // nope - } - - pub fn sleep(dur: Duration) { - for timeout in dur2reltims(dur) { - expect_success(unsafe { abi::dly_tsk(timeout) }, &"dly_tsk"); - } - } - - pub fn sleep_until(deadline: Instant) { - let now = Instant::now(); - - if let Some(delay) = deadline.checked_duration_since(now) { - Self::sleep(delay); - } - } - pub fn join(self) { // Safety: `ThreadInner` is alive at this point let inner = unsafe { self.p_inner.as_ref() }; @@ -361,10 +337,12 @@ unsafe fn terminate_and_delete_current_task() -> ! { unsafe { crate::hint::unreachable_unchecked() }; } -pub(crate) fn current_os_id() -> Option<u64> { - None +pub fn yield_now() { + expect_success(unsafe { abi::rot_rdq(abi::TPRI_SELF) }, &"rot_rdq"); } -pub fn available_parallelism() -> io::Result<NonZero<usize>> { - super::unsupported() +pub fn sleep(dur: Duration) { + for timeout in dur2reltims(dur) { + expect_success(unsafe { abi::dly_tsk(timeout) }, &"dly_tsk"); + } } diff --git a/library/std/src/sys/pal/teeos/thread.rs b/library/std/src/sys/thread/teeos.rs index 1812d11e692..cad100395c9 100644 --- a/library/std/src/sys/pal/teeos/thread.rs +++ b/library/std/src/sys/thread/teeos.rs @@ -1,12 +1,18 @@ -use crate::ffi::CStr; use crate::mem::{self, ManuallyDrop}; -use crate::num::NonZero; use crate::sys::os; -use crate::time::{Duration, Instant}; +use crate::time::Duration; use crate::{cmp, io, ptr}; pub const DEFAULT_MIN_STACK_SIZE: usize = 8 * 1024; +unsafe extern "C" { + safe fn TEE_Wait(timeout: u32) -> u32; +} + +fn min_stack_size(_: *const libc::pthread_attr_t) -> usize { + libc::PTHREAD_STACK_MIN.try_into().expect("Infallible") +} + pub struct Thread { id: libc::pthread_t, } @@ -16,10 +22,6 @@ pub struct Thread { unsafe impl Send for Thread {} unsafe impl Sync for Thread {} -unsafe extern "C" { - pub fn TEE_Wait(timeout: u32) -> u32; -} - impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements pub unsafe fn new( @@ -74,7 +76,7 @@ impl Thread { } else { // The new thread will start running earliest after the next yield. // We add a yield here, so that the user does not have to. - Thread::yield_now(); + yield_now(); Ok(Thread { id: native }) }; @@ -91,36 +93,6 @@ impl Thread { } } - pub fn yield_now() { - let ret = unsafe { libc::sched_yield() }; - debug_assert_eq!(ret, 0); - } - - /// This does not do anything on teeos - pub fn set_name(_name: &CStr) { - // Both pthread_setname_np and prctl are not available to the TA, - // so we can't implement this currently. If the need arises please - // contact the teeos rustzone team. - } - - /// only main thread could wait for sometime in teeos - pub fn sleep(dur: Duration) { - let sleep_millis = dur.as_millis(); - let final_sleep: u32 = - if sleep_millis >= u32::MAX as u128 { u32::MAX } else { sleep_millis as u32 }; - unsafe { - let _ = TEE_Wait(final_sleep); - } - } - - pub fn sleep_until(deadline: Instant) { - let now = Instant::now(); - - if let Some(delay) = deadline.checked_duration_since(now) { - Self::sleep(delay); - } - } - /// must join, because no pthread_detach supported pub fn join(self) { let id = self.into_id(); @@ -128,10 +100,6 @@ impl Thread { assert!(ret == 0, "failed to join thread: {}", io::Error::from_raw_os_error(ret)); } - pub fn id(&self) -> libc::pthread_t { - self.id - } - pub fn into_id(self) -> libc::pthread_t { ManuallyDrop::new(self).id } @@ -144,16 +112,15 @@ impl Drop for Thread { } } -pub(crate) fn current_os_id() -> Option<u64> { - None -} - -// Note: Both `sched_getaffinity` and `sysconf` are available but not functional on -// teeos, so this function always returns an Error! -pub fn available_parallelism() -> io::Result<NonZero<usize>> { - Err(io::Error::UNKNOWN_THREAD_COUNT) +pub fn yield_now() { + let ret = unsafe { libc::sched_yield() }; + debug_assert_eq!(ret, 0); } -fn min_stack_size(_: *const libc::pthread_attr_t) -> usize { - libc::PTHREAD_STACK_MIN.try_into().expect("Infallible") +/// only main thread could wait for sometime in teeos +pub fn sleep(dur: Duration) { + let sleep_millis = dur.as_millis(); + let final_sleep: u32 = + if sleep_millis >= u32::MAX as u128 { u32::MAX } else { sleep_millis as u32 }; + TEE_Wait(final_sleep); } diff --git a/library/std/src/sys/thread/uefi.rs b/library/std/src/sys/thread/uefi.rs new file mode 100644 index 00000000000..94f67d7ace2 --- /dev/null +++ b/library/std/src/sys/thread/uefi.rs @@ -0,0 +1,25 @@ +use crate::io; +use crate::num::NonZero; +use crate::ptr::NonNull; +use crate::time::Duration; + +pub fn available_parallelism() -> io::Result<NonZero<usize>> { + // UEFI is single threaded + Ok(NonZero::new(1).unwrap()) +} + +pub fn sleep(dur: Duration) { + let boot_services: NonNull<r_efi::efi::BootServices> = + crate::os::uefi::env::boot_services().expect("can't sleep").cast(); + let mut dur_ms = dur.as_micros(); + // ceil up to the nearest microsecond + if dur.subsec_nanos() % 1000 > 0 { + dur_ms += 1; + } + + while dur_ms > 0 { + let ms = crate::cmp::min(dur_ms, usize::MAX as u128); + let _ = unsafe { ((*boot_services.as_ptr()).stall)(ms as usize) }; + dur_ms -= ms; + } +} diff --git a/library/std/src/sys/pal/unix/thread.rs b/library/std/src/sys/thread/unix.rs index 3389b8c0c8a..2d2c4f90212 100644 --- a/library/std/src/sys/pal/unix/thread.rs +++ b/library/std/src/sys/thread/unix.rs @@ -1,3 +1,11 @@ +#[cfg(not(any( + target_env = "newlib", + target_os = "l4re", + target_os = "emscripten", + target_os = "redox", + target_os = "hurd", + target_os = "aix", +)))] use crate::ffi::CStr; use crate::mem::{self, ManuallyDrop}; use crate::num::NonZero; @@ -6,7 +14,7 @@ use crate::sys::weak::dlsym; #[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "nto",))] use crate::sys::weak::weak; use crate::sys::{os, stack_overflow}; -use crate::time::{Duration, Instant}; +use crate::time::Duration; use crate::{cmp, io, ptr}; #[cfg(not(any( target_os = "l4re", @@ -121,273 +129,6 @@ impl Thread { } } - pub fn yield_now() { - let ret = unsafe { libc::sched_yield() }; - debug_assert_eq!(ret, 0); - } - - #[cfg(target_os = "android")] - pub fn set_name(name: &CStr) { - const PR_SET_NAME: libc::c_int = 15; - unsafe { - let res = libc::prctl( - PR_SET_NAME, - name.as_ptr(), - 0 as libc::c_ulong, - 0 as libc::c_ulong, - 0 as libc::c_ulong, - ); - // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked. - debug_assert_eq!(res, 0); - } - } - - #[cfg(any( - target_os = "linux", - target_os = "freebsd", - target_os = "dragonfly", - target_os = "nuttx", - target_os = "cygwin" - ))] - pub fn set_name(name: &CStr) { - unsafe { - cfg_select! { - any(target_os = "linux", target_os = "cygwin") => { - // Linux and Cygwin limits the allowed length of the name. - const TASK_COMM_LEN: usize = 16; - let name = truncate_cstr::<{ TASK_COMM_LEN }>(name); - } - _ => { - // FreeBSD, DragonFly BSD and NuttX do not enforce length limits. - } - }; - // Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20 for Linux, - // FreeBSD 12.2 and 13.0, and DragonFly BSD 6.0. - let res = libc::pthread_setname_np(libc::pthread_self(), name.as_ptr()); - // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked. - debug_assert_eq!(res, 0); - } - } - - #[cfg(target_os = "openbsd")] - pub fn set_name(name: &CStr) { - unsafe { - libc::pthread_set_name_np(libc::pthread_self(), name.as_ptr()); - } - } - - #[cfg(target_vendor = "apple")] - pub fn set_name(name: &CStr) { - unsafe { - let name = truncate_cstr::<{ libc::MAXTHREADNAMESIZE }>(name); - let res = libc::pthread_setname_np(name.as_ptr()); - // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked. - debug_assert_eq!(res, 0); - } - } - - #[cfg(target_os = "netbsd")] - pub fn set_name(name: &CStr) { - unsafe { - let res = libc::pthread_setname_np( - libc::pthread_self(), - c"%s".as_ptr(), - name.as_ptr() as *mut libc::c_void, - ); - debug_assert_eq!(res, 0); - } - } - - #[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "nto"))] - pub fn set_name(name: &CStr) { - weak!( - fn pthread_setname_np( - thread: libc::pthread_t, - name: *const libc::c_char, - ) -> libc::c_int; - ); - - if let Some(f) = pthread_setname_np.get() { - #[cfg(target_os = "nto")] - const THREAD_NAME_MAX: usize = libc::_NTO_THREAD_NAME_MAX as usize; - #[cfg(any(target_os = "solaris", target_os = "illumos"))] - const THREAD_NAME_MAX: usize = 32; - - let name = truncate_cstr::<{ THREAD_NAME_MAX }>(name); - let res = unsafe { f(libc::pthread_self(), name.as_ptr()) }; - debug_assert_eq!(res, 0); - } - } - - #[cfg(target_os = "fuchsia")] - pub fn set_name(name: &CStr) { - use super::fuchsia::*; - unsafe { - zx_object_set_property( - zx_thread_self(), - ZX_PROP_NAME, - name.as_ptr() as *const libc::c_void, - name.to_bytes().len(), - ); - } - } - - #[cfg(target_os = "haiku")] - pub fn set_name(name: &CStr) { - unsafe { - let thread_self = libc::find_thread(ptr::null_mut()); - let res = libc::rename_thread(thread_self, name.as_ptr()); - // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked. - debug_assert_eq!(res, libc::B_OK); - } - } - - #[cfg(target_os = "vxworks")] - pub fn set_name(name: &CStr) { - let mut name = truncate_cstr::<{ (libc::VX_TASK_RENAME_LENGTH - 1) as usize }>(name); - let res = unsafe { libc::taskNameSet(libc::taskIdSelf(), name.as_mut_ptr()) }; - debug_assert_eq!(res, libc::OK); - } - - #[cfg(any( - target_env = "newlib", - target_os = "l4re", - target_os = "emscripten", - target_os = "redox", - target_os = "hurd", - target_os = "aix", - ))] - pub fn set_name(_name: &CStr) { - // Newlib and Emscripten have no way to set a thread name. - } - - #[cfg(not(target_os = "espidf"))] - pub fn sleep(dur: Duration) { - let mut secs = dur.as_secs(); - let mut nsecs = dur.subsec_nanos() as _; - - // If we're awoken with a signal then the return value will be -1 and - // nanosleep will fill in `ts` with the remaining time. - unsafe { - while secs > 0 || nsecs > 0 { - let mut ts = libc::timespec { - tv_sec: cmp::min(libc::time_t::MAX as u64, secs) as libc::time_t, - tv_nsec: nsecs, - }; - secs -= ts.tv_sec as u64; - let ts_ptr = &raw mut ts; - if libc::nanosleep(ts_ptr, ts_ptr) == -1 { - assert_eq!(os::errno(), libc::EINTR); - secs += ts.tv_sec as u64; - nsecs = ts.tv_nsec; - } else { - nsecs = 0; - } - } - } - } - - #[cfg(target_os = "espidf")] - pub fn sleep(dur: Duration) { - // ESP-IDF does not have `nanosleep`, so we use `usleep` instead. - // As per the documentation of `usleep`, it is expected to support - // sleep times as big as at least up to 1 second. - // - // ESP-IDF does support almost up to `u32::MAX`, but due to a potential integer overflow in its - // `usleep` implementation - // (https://github.com/espressif/esp-idf/blob/d7ca8b94c852052e3bc33292287ef4dd62c9eeb1/components/newlib/time.c#L210), - // we limit the sleep time to the maximum one that would not cause the underlying `usleep` implementation to overflow - // (`portTICK_PERIOD_MS` can be anything between 1 to 1000, and is 10 by default). - const MAX_MICROS: u32 = u32::MAX - 1_000_000 - 1; - - // Add any nanoseconds smaller than a microsecond as an extra microsecond - // so as to comply with the `std::thread::sleep` contract which mandates - // implementations to sleep for _at least_ the provided `dur`. - // We can't overflow `micros` as it is a `u128`, while `Duration` is a pair of - // (`u64` secs, `u32` nanos), where the nanos are strictly smaller than 1 second - // (i.e. < 1_000_000_000) - let mut micros = dur.as_micros() + if dur.subsec_nanos() % 1_000 > 0 { 1 } else { 0 }; - - while micros > 0 { - let st = if micros > MAX_MICROS as u128 { MAX_MICROS } else { micros as u32 }; - unsafe { - libc::usleep(st); - } - - micros -= st as u128; - } - } - - // Any unix that has clock_nanosleep - // If this list changes update the MIRI chock_nanosleep shim - #[cfg(any( - target_os = "freebsd", - target_os = "netbsd", - target_os = "linux", - target_os = "android", - target_os = "solaris", - target_os = "illumos", - target_os = "dragonfly", - target_os = "hurd", - target_os = "fuchsia", - target_os = "vxworks", - ))] - pub fn sleep_until(deadline: Instant) { - let Some(ts) = deadline.into_inner().into_timespec().to_timespec() else { - // The deadline is further in the future then can be passed to - // clock_nanosleep. We have to use Self::sleep instead. This might - // happen on 32 bit platforms, especially closer to 2038. - let now = Instant::now(); - if let Some(delay) = deadline.checked_duration_since(now) { - Self::sleep(delay); - } - return; - }; - - unsafe { - // When we get interrupted (res = EINTR) call clock_nanosleep again - loop { - let res = libc::clock_nanosleep( - super::time::Instant::CLOCK_ID, - libc::TIMER_ABSTIME, - &ts, - core::ptr::null_mut(), // not required with TIMER_ABSTIME - ); - - if res == 0 { - break; - } else { - assert_eq!( - res, - libc::EINTR, - "timespec is in range, - clockid is valid and kernel should support it" - ); - } - } - } - } - - // Any unix that does not have clock_nanosleep - #[cfg(not(any( - target_os = "freebsd", - target_os = "netbsd", - target_os = "linux", - target_os = "android", - target_os = "solaris", - target_os = "illumos", - target_os = "dragonfly", - target_os = "hurd", - target_os = "fuchsia", - target_os = "vxworks", - )))] - pub fn sleep_until(deadline: Instant) { - let now = Instant::now(); - if let Some(delay) = deadline.checked_duration_since(now) { - Self::sleep(delay); - } - } - pub fn join(self) { let id = self.into_id(); let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) }; @@ -410,84 +151,6 @@ impl Drop for Thread { } } -pub(crate) fn current_os_id() -> Option<u64> { - // Most Unix platforms have a way to query an integer ID of the current thread, all with - // slightly different spellings. - // - // The OS thread ID is used rather than `pthread_self` so as to match what will be displayed - // for process inspection (debuggers, trace, `top`, etc.). - cfg_select! { - // Most platforms have a function returning a `pid_t` or int, which is an `i32`. - any(target_os = "android", target_os = "linux") => { - use crate::sys::weak::syscall; - - // `libc::gettid` is only available on glibc 2.30+, but the syscall is available - // since Linux 2.4.11. - syscall!(fn gettid() -> libc::pid_t;); - - // SAFETY: FFI call with no preconditions. - let id: libc::pid_t = unsafe { gettid() }; - Some(id as u64) - } - target_os = "nto" => { - // SAFETY: FFI call with no preconditions. - let id: libc::pid_t = unsafe { libc::gettid() }; - Some(id as u64) - } - target_os = "openbsd" => { - // SAFETY: FFI call with no preconditions. - let id: libc::pid_t = unsafe { libc::getthrid() }; - Some(id as u64) - } - target_os = "freebsd" => { - // SAFETY: FFI call with no preconditions. - let id: libc::c_int = unsafe { libc::pthread_getthreadid_np() }; - Some(id as u64) - } - target_os = "netbsd" => { - // SAFETY: FFI call with no preconditions. - let id: libc::lwpid_t = unsafe { libc::_lwp_self() }; - Some(id as u64) - } - any(target_os = "illumos", target_os = "solaris") => { - // On Illumos and Solaris, the `pthread_t` is the same as the OS thread ID. - // SAFETY: FFI call with no preconditions. - let id: libc::pthread_t = unsafe { libc::pthread_self() }; - Some(id as u64) - } - target_vendor = "apple" => { - // Apple allows querying arbitrary thread IDs, `thread=NULL` queries the current thread. - let mut id = 0u64; - // SAFETY: `thread_id` is a valid pointer, no other preconditions. - let status: libc::c_int = unsafe { libc::pthread_threadid_np(0, &mut id) }; - if status == 0 { - Some(id) - } else { - None - } - } - // Other platforms don't have an OS thread ID or don't have a way to access it. - _ => None, - } -} - -#[cfg(any( - target_os = "linux", - target_os = "nto", - target_os = "solaris", - target_os = "illumos", - target_os = "vxworks", - target_os = "cygwin", - target_vendor = "apple", -))] -fn truncate_cstr<const MAX_WITH_NUL: usize>(cstr: &CStr) -> [libc::c_char; MAX_WITH_NUL] { - let mut result = [0; MAX_WITH_NUL]; - for (src, dst) in cstr.to_bytes().iter().zip(&mut result[..MAX_WITH_NUL - 1]) { - *dst = *src as libc::c_char; - } - result -} - pub fn available_parallelism() -> io::Result<NonZero<usize>> { cfg_select! { any( @@ -668,6 +331,318 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> { } } +pub fn current_os_id() -> Option<u64> { + // Most Unix platforms have a way to query an integer ID of the current thread, all with + // slightly different spellings. + // + // The OS thread ID is used rather than `pthread_self` so as to match what will be displayed + // for process inspection (debuggers, trace, `top`, etc.). + cfg_select! { + // Most platforms have a function returning a `pid_t` or int, which is an `i32`. + any(target_os = "android", target_os = "linux") => { + use crate::sys::pal::weak::syscall; + + // `libc::gettid` is only available on glibc 2.30+, but the syscall is available + // since Linux 2.4.11. + syscall!(fn gettid() -> libc::pid_t;); + + // SAFETY: FFI call with no preconditions. + let id: libc::pid_t = unsafe { gettid() }; + Some(id as u64) + } + target_os = "nto" => { + // SAFETY: FFI call with no preconditions. + let id: libc::pid_t = unsafe { libc::gettid() }; + Some(id as u64) + } + target_os = "openbsd" => { + // SAFETY: FFI call with no preconditions. + let id: libc::pid_t = unsafe { libc::getthrid() }; + Some(id as u64) + } + target_os = "freebsd" => { + // SAFETY: FFI call with no preconditions. + let id: libc::c_int = unsafe { libc::pthread_getthreadid_np() }; + Some(id as u64) + } + target_os = "netbsd" => { + // SAFETY: FFI call with no preconditions. + let id: libc::lwpid_t = unsafe { libc::_lwp_self() }; + Some(id as u64) + } + any(target_os = "illumos", target_os = "solaris") => { + // On Illumos and Solaris, the `pthread_t` is the same as the OS thread ID. + // SAFETY: FFI call with no preconditions. + let id: libc::pthread_t = unsafe { libc::pthread_self() }; + Some(id as u64) + } + target_vendor = "apple" => { + // Apple allows querying arbitrary thread IDs, `thread=NULL` queries the current thread. + let mut id = 0u64; + // SAFETY: `thread_id` is a valid pointer, no other preconditions. + let status: libc::c_int = unsafe { libc::pthread_threadid_np(0, &mut id) }; + if status == 0 { + Some(id) + } else { + None + } + } + // Other platforms don't have an OS thread ID or don't have a way to access it. + _ => None, + } +} + +#[cfg(any( + target_os = "linux", + target_os = "nto", + target_os = "solaris", + target_os = "illumos", + target_os = "vxworks", + target_os = "cygwin", + target_vendor = "apple", +))] +fn truncate_cstr<const MAX_WITH_NUL: usize>(cstr: &CStr) -> [libc::c_char; MAX_WITH_NUL] { + let mut result = [0; MAX_WITH_NUL]; + for (src, dst) in cstr.to_bytes().iter().zip(&mut result[..MAX_WITH_NUL - 1]) { + *dst = *src as libc::c_char; + } + result +} + +#[cfg(target_os = "android")] +pub fn set_name(name: &CStr) { + const PR_SET_NAME: libc::c_int = 15; + unsafe { + let res = libc::prctl( + PR_SET_NAME, + name.as_ptr(), + 0 as libc::c_ulong, + 0 as libc::c_ulong, + 0 as libc::c_ulong, + ); + // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked. + debug_assert_eq!(res, 0); + } +} + +#[cfg(any( + target_os = "linux", + target_os = "freebsd", + target_os = "dragonfly", + target_os = "nuttx", + target_os = "cygwin" +))] +pub fn set_name(name: &CStr) { + unsafe { + cfg_select! { + any(target_os = "linux", target_os = "cygwin") => { + // Linux and Cygwin limits the allowed length of the name. + const TASK_COMM_LEN: usize = 16; + let name = truncate_cstr::<{ TASK_COMM_LEN }>(name); + } + _ => { + // FreeBSD, DragonFly BSD and NuttX do not enforce length limits. + } + }; + // Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20 for Linux, + // FreeBSD 12.2 and 13.0, and DragonFly BSD 6.0. + let res = libc::pthread_setname_np(libc::pthread_self(), name.as_ptr()); + // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked. + debug_assert_eq!(res, 0); + } +} + +#[cfg(target_os = "openbsd")] +pub fn set_name(name: &CStr) { + unsafe { + libc::pthread_set_name_np(libc::pthread_self(), name.as_ptr()); + } +} + +#[cfg(target_vendor = "apple")] +pub fn set_name(name: &CStr) { + unsafe { + let name = truncate_cstr::<{ libc::MAXTHREADNAMESIZE }>(name); + let res = libc::pthread_setname_np(name.as_ptr()); + // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked. + debug_assert_eq!(res, 0); + } +} + +#[cfg(target_os = "netbsd")] +pub fn set_name(name: &CStr) { + unsafe { + let res = libc::pthread_setname_np( + libc::pthread_self(), + c"%s".as_ptr(), + name.as_ptr() as *mut libc::c_void, + ); + debug_assert_eq!(res, 0); + } +} + +#[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "nto"))] +pub fn set_name(name: &CStr) { + weak!( + fn pthread_setname_np(thread: libc::pthread_t, name: *const libc::c_char) -> libc::c_int; + ); + + if let Some(f) = pthread_setname_np.get() { + #[cfg(target_os = "nto")] + const THREAD_NAME_MAX: usize = libc::_NTO_THREAD_NAME_MAX as usize; + #[cfg(any(target_os = "solaris", target_os = "illumos"))] + const THREAD_NAME_MAX: usize = 32; + + let name = truncate_cstr::<{ THREAD_NAME_MAX }>(name); + let res = unsafe { f(libc::pthread_self(), name.as_ptr()) }; + debug_assert_eq!(res, 0); + } +} + +#[cfg(target_os = "fuchsia")] +pub fn set_name(name: &CStr) { + use crate::sys::pal::fuchsia::*; + unsafe { + zx_object_set_property( + zx_thread_self(), + ZX_PROP_NAME, + name.as_ptr() as *const libc::c_void, + name.to_bytes().len(), + ); + } +} + +#[cfg(target_os = "haiku")] +pub fn set_name(name: &CStr) { + unsafe { + let thread_self = libc::find_thread(ptr::null_mut()); + let res = libc::rename_thread(thread_self, name.as_ptr()); + // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked. + debug_assert_eq!(res, libc::B_OK); + } +} + +#[cfg(target_os = "vxworks")] +pub fn set_name(name: &CStr) { + let mut name = truncate_cstr::<{ (libc::VX_TASK_RENAME_LENGTH - 1) as usize }>(name); + let res = unsafe { libc::taskNameSet(libc::taskIdSelf(), name.as_mut_ptr()) }; + debug_assert_eq!(res, libc::OK); +} + +#[cfg(not(target_os = "espidf"))] +pub fn sleep(dur: Duration) { + let mut secs = dur.as_secs(); + let mut nsecs = dur.subsec_nanos() as _; + + // If we're awoken with a signal then the return value will be -1 and + // nanosleep will fill in `ts` with the remaining time. + unsafe { + while secs > 0 || nsecs > 0 { + let mut ts = libc::timespec { + tv_sec: cmp::min(libc::time_t::MAX as u64, secs) as libc::time_t, + tv_nsec: nsecs, + }; + secs -= ts.tv_sec as u64; + let ts_ptr = &raw mut ts; + if libc::nanosleep(ts_ptr, ts_ptr) == -1 { + assert_eq!(os::errno(), libc::EINTR); + secs += ts.tv_sec as u64; + nsecs = ts.tv_nsec; + } else { + nsecs = 0; + } + } + } +} + +#[cfg(target_os = "espidf")] +pub fn sleep(dur: Duration) { + // ESP-IDF does not have `nanosleep`, so we use `usleep` instead. + // As per the documentation of `usleep`, it is expected to support + // sleep times as big as at least up to 1 second. + // + // ESP-IDF does support almost up to `u32::MAX`, but due to a potential integer overflow in its + // `usleep` implementation + // (https://github.com/espressif/esp-idf/blob/d7ca8b94c852052e3bc33292287ef4dd62c9eeb1/components/newlib/time.c#L210), + // we limit the sleep time to the maximum one that would not cause the underlying `usleep` implementation to overflow + // (`portTICK_PERIOD_MS` can be anything between 1 to 1000, and is 10 by default). + const MAX_MICROS: u32 = u32::MAX - 1_000_000 - 1; + + // Add any nanoseconds smaller than a microsecond as an extra microsecond + // so as to comply with the `std::thread::sleep` contract which mandates + // implementations to sleep for _at least_ the provided `dur`. + // We can't overflow `micros` as it is a `u128`, while `Duration` is a pair of + // (`u64` secs, `u32` nanos), where the nanos are strictly smaller than 1 second + // (i.e. < 1_000_000_000) + let mut micros = dur.as_micros() + if dur.subsec_nanos() % 1_000 > 0 { 1 } else { 0 }; + + while micros > 0 { + let st = if micros > MAX_MICROS as u128 { MAX_MICROS } else { micros as u32 }; + unsafe { + libc::usleep(st); + } + + micros -= st as u128; + } +} + +// Any unix that has clock_nanosleep +// If this list changes update the MIRI chock_nanosleep shim +#[cfg(any( + target_os = "freebsd", + target_os = "netbsd", + target_os = "linux", + target_os = "android", + target_os = "solaris", + target_os = "illumos", + target_os = "dragonfly", + target_os = "hurd", + target_os = "fuchsia", + target_os = "vxworks", +))] +pub fn sleep_until(deadline: crate::time::Instant) { + use crate::time::Instant; + + let Some(ts) = deadline.into_inner().into_timespec().to_timespec() else { + // The deadline is further in the future then can be passed to + // clock_nanosleep. We have to use Self::sleep instead. This might + // happen on 32 bit platforms, especially closer to 2038. + let now = Instant::now(); + if let Some(delay) = deadline.checked_duration_since(now) { + sleep(delay); + } + return; + }; + + unsafe { + // When we get interrupted (res = EINTR) call clock_nanosleep again + loop { + let res = libc::clock_nanosleep( + crate::sys::time::Instant::CLOCK_ID, + libc::TIMER_ABSTIME, + &ts, + core::ptr::null_mut(), // not required with TIMER_ABSTIME + ); + + if res == 0 { + break; + } else { + assert_eq!( + res, + libc::EINTR, + "timespec is in range, + clockid is valid and kernel should support it" + ); + } + } + } +} + +pub fn yield_now() { + let ret = unsafe { libc::sched_yield() }; + debug_assert_eq!(ret, 0); +} + #[cfg(any(target_os = "android", target_os = "linux"))] mod cgroups { //! Currently not covered diff --git a/library/std/src/sys/pal/unsupported/thread.rs b/library/std/src/sys/thread/unsupported.rs index 34d9b5ec70c..a5001efa3b4 100644 --- a/library/std/src/sys/pal/unsupported/thread.rs +++ b/library/std/src/sys/thread/unsupported.rs @@ -1,8 +1,7 @@ -use super::unsupported; use crate::ffi::CStr; use crate::io; use crate::num::NonZero; -use crate::time::{Duration, Instant}; +use crate::time::Duration; pub struct Thread(!); @@ -15,23 +14,7 @@ impl Thread { _name: Option<&str>, _p: Box<dyn FnOnce()>, ) -> io::Result<Thread> { - unsupported() - } - - pub fn yield_now() { - // do nothing - } - - pub fn set_name(_name: &CStr) { - // nope - } - - pub fn sleep(_dur: Duration) { - panic!("can't sleep"); - } - - pub fn sleep_until(_deadline: Instant) { - panic!("can't sleep"); + Err(io::Error::UNSUPPORTED_PLATFORM) } pub fn join(self) { @@ -39,10 +22,22 @@ impl Thread { } } -pub(crate) fn current_os_id() -> Option<u64> { +pub fn available_parallelism() -> io::Result<NonZero<usize>> { + Err(io::Error::UNKNOWN_THREAD_COUNT) +} + +pub fn current_os_id() -> Option<u64> { None } -pub fn available_parallelism() -> io::Result<NonZero<usize>> { - unsupported() +pub fn yield_now() { + // do nothing +} + +pub fn set_name(_name: &CStr) { + // nope +} + +pub fn sleep(_dur: Duration) { + panic!("can't sleep"); } diff --git a/library/std/src/sys/thread/wasip1.rs b/library/std/src/sys/thread/wasip1.rs new file mode 100644 index 00000000000..83001fad49c --- /dev/null +++ b/library/std/src/sys/thread/wasip1.rs @@ -0,0 +1,185 @@ +#![forbid(unsafe_op_in_unsafe_fn)] + +#[cfg(target_feature = "atomics")] +use crate::io; +use crate::mem; +#[cfg(target_feature = "atomics")] +use crate::num::NonZero; +#[cfg(target_feature = "atomics")] +use crate::sys::os; +use crate::time::Duration; +#[cfg(target_feature = "atomics")] +use crate::{cmp, ptr}; + +// Add a few symbols not in upstream `libc` just yet. +#[cfg(target_feature = "atomics")] +mod libc { + pub use libc::*; + + pub use crate::ffi; + + // defined in wasi-libc + // https://github.com/WebAssembly/wasi-libc/blob/a6f871343313220b76009827ed0153586361c0d5/libc-top-half/musl/include/alltypes.h.in#L108 + #[repr(C)] + union pthread_attr_union { + __i: [ffi::c_int; if size_of::<ffi::c_long>() == 8 { 14 } else { 9 }], + __vi: [ffi::c_int; if size_of::<ffi::c_long>() == 8 { 14 } else { 9 }], + __s: [ffi::c_ulong; if size_of::<ffi::c_long>() == 8 { 7 } else { 9 }], + } + + #[repr(C)] + pub struct pthread_attr_t { + __u: pthread_attr_union, + } + + #[allow(non_camel_case_types)] + pub type pthread_t = *mut ffi::c_void; + + pub const _SC_NPROCESSORS_ONLN: ffi::c_int = 84; + + unsafe extern "C" { + pub fn pthread_create( + native: *mut pthread_t, + attr: *const pthread_attr_t, + f: extern "C" fn(*mut ffi::c_void) -> *mut ffi::c_void, + value: *mut ffi::c_void, + ) -> ffi::c_int; + pub fn pthread_join(native: pthread_t, value: *mut *mut ffi::c_void) -> ffi::c_int; + pub fn pthread_attr_init(attrp: *mut pthread_attr_t) -> ffi::c_int; + pub fn pthread_attr_setstacksize( + attr: *mut pthread_attr_t, + stack_size: libc::size_t, + ) -> ffi::c_int; + pub fn pthread_attr_destroy(attr: *mut pthread_attr_t) -> ffi::c_int; + pub fn pthread_detach(thread: pthread_t) -> ffi::c_int; + } +} + +#[cfg(target_feature = "atomics")] +pub struct Thread { + id: libc::pthread_t, +} + +#[cfg(target_feature = "atomics")] +impl Drop for Thread { + fn drop(&mut self) { + let ret = unsafe { libc::pthread_detach(self.id) }; + debug_assert_eq!(ret, 0); + } +} + +pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * 1024; + +#[cfg(target_feature = "atomics")] +impl Thread { + // unsafe: see thread::Builder::spawn_unchecked for safety requirements + pub unsafe fn new( + stack: usize, + _name: Option<&str>, + p: Box<dyn FnOnce()>, + ) -> io::Result<Thread> { + let p = Box::into_raw(Box::new(p)); + let mut native: libc::pthread_t = unsafe { mem::zeroed() }; + let mut attr: libc::pthread_attr_t = unsafe { mem::zeroed() }; + assert_eq!(unsafe { libc::pthread_attr_init(&mut attr) }, 0); + + let stack_size = cmp::max(stack, DEFAULT_MIN_STACK_SIZE); + + match unsafe { libc::pthread_attr_setstacksize(&mut attr, stack_size) } { + 0 => {} + n => { + assert_eq!(n, libc::EINVAL); + // EINVAL means |stack_size| is either too small or not a + // multiple of the system page size. Because it's definitely + // >= PTHREAD_STACK_MIN, it must be an alignment issue. + // Round up to the nearest page and try again. + let page_size = os::page_size(); + let stack_size = + (stack_size + page_size - 1) & (-(page_size as isize - 1) as usize - 1); + assert_eq!(unsafe { libc::pthread_attr_setstacksize(&mut attr, stack_size) }, 0); + } + }; + + let ret = unsafe { libc::pthread_create(&mut native, &attr, thread_start, p as *mut _) }; + // Note: if the thread creation fails and this assert fails, then p will + // be leaked. However, an alternative design could cause double-free + // which is clearly worse. + assert_eq!(unsafe { libc::pthread_attr_destroy(&mut attr) }, 0); + + return if ret != 0 { + // The thread failed to start and as a result p was not consumed. Therefore, it is + // safe to reconstruct the box so that it gets deallocated. + unsafe { + drop(Box::from_raw(p)); + } + Err(io::Error::from_raw_os_error(ret)) + } else { + Ok(Thread { id: native }) + }; + + extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void { + unsafe { + // Finally, let's run some code. + Box::from_raw(main as *mut Box<dyn FnOnce()>)(); + } + ptr::null_mut() + } + } + + pub fn join(self) { + let id = mem::ManuallyDrop::new(self).id; + let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) }; + if ret != 0 { + rtabort!("failed to join thread: {}", io::Error::from_raw_os_error(ret)); + } + } +} + +#[cfg(target_feature = "atomics")] +pub fn available_parallelism() -> io::Result<NonZero<usize>> { + match unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) } { + -1 => Err(io::Error::last_os_error()), + cpus => NonZero::new(cpus as usize).ok_or(io::Error::UNKNOWN_THREAD_COUNT), + } +} + +pub fn yield_now() { + let ret = unsafe { wasi::sched_yield() }; + debug_assert_eq!(ret, Ok(())); +} + +pub fn sleep(dur: Duration) { + let mut nanos = dur.as_nanos(); + while nanos > 0 { + const USERDATA: wasi::Userdata = 0x0123_45678; + + let clock = wasi::SubscriptionClock { + id: wasi::CLOCKID_MONOTONIC, + timeout: u64::try_from(nanos).unwrap_or(u64::MAX), + precision: 0, + flags: 0, + }; + nanos -= u128::from(clock.timeout); + + let in_ = wasi::Subscription { + userdata: USERDATA, + u: wasi::SubscriptionU { tag: 0, u: wasi::SubscriptionUU { clock } }, + }; + unsafe { + let mut event: wasi::Event = mem::zeroed(); + let res = wasi::poll_oneoff(&in_, &mut event, 1); + match (res, event) { + ( + Ok(1), + wasi::Event { + userdata: USERDATA, + error: wasi::ERRNO_SUCCESS, + type_: wasi::EVENTTYPE_CLOCK, + .. + }, + ) => {} + _ => panic!("thread::sleep(): unexpected result of poll_oneoff"), + } + } + } +} diff --git a/library/std/src/sys/thread/wasip2.rs b/library/std/src/sys/thread/wasip2.rs new file mode 100644 index 00000000000..420cad2a5e4 --- /dev/null +++ b/library/std/src/sys/thread/wasip2.rs @@ -0,0 +1,32 @@ +use crate::time::{Duration, Instant}; + +pub fn sleep(dur: Duration) { + // Sleep in increments of `u64::MAX` nanoseconds until the `dur` is + // entirely drained. + let mut remaining = dur.as_nanos(); + while remaining > 0 { + let amt = u64::try_from(remaining).unwrap_or(u64::MAX); + wasip2::clocks::monotonic_clock::subscribe_duration(amt).block(); + remaining -= u128::from(amt); + } +} + +pub fn sleep_until(deadline: Instant) { + match u64::try_from(deadline.into_inner().as_duration().as_nanos()) { + // If the point in time we're sleeping to fits within a 64-bit + // number of nanoseconds then directly use `subscribe_instant`. + Ok(deadline) => { + wasip2::clocks::monotonic_clock::subscribe_instant(deadline).block(); + } + // ... otherwise we're sleeping for 500+ years relative to the + // "start" of what the system is using as a clock so speed/accuracy + // is not so much of a concern. Use `sleep` instead. + Err(_) => { + let now = Instant::now(); + + if let Some(delay) = deadline.checked_duration_since(now) { + sleep(delay); + } + } + } +} diff --git a/library/std/src/sys/thread/wasm.rs b/library/std/src/sys/thread/wasm.rs new file mode 100644 index 00000000000..e843bc992ba --- /dev/null +++ b/library/std/src/sys/thread/wasm.rs @@ -0,0 +1,23 @@ +use crate::cmp; +use crate::time::Duration; + +pub fn sleep(dur: Duration) { + #[cfg(target_arch = "wasm32")] + use core::arch::wasm32 as wasm; + #[cfg(target_arch = "wasm64")] + use core::arch::wasm64 as wasm; + + // Use an atomic wait to block the current thread artificially with a + // timeout listed. Note that we should never be notified (return value + // of 0) or our comparison should never fail (return value of 1) so we + // should always only resume execution through a timeout (return value + // 2). + let mut nanos = dur.as_nanos(); + while nanos > 0 { + let amt = cmp::min(i64::MAX as u128, nanos); + let mut x = 0; + let val = unsafe { wasm::memory_atomic_wait32(&mut x, 0, amt as i64) }; + debug_assert_eq!(val, 2); + nanos -= amt; + } +} diff --git a/library/std/src/sys/pal/windows/thread.rs b/library/std/src/sys/thread/windows.rs index b0e38220a2d..a5640c51c4a 100644 --- a/library/std/src/sys/pal/windows/thread.rs +++ b/library/std/src/sys/thread/windows.rs @@ -1,14 +1,14 @@ use core::ffi::c_void; -use super::time::WaitableTimer; -use super::to_u16s; use crate::ffi::CStr; use crate::num::NonZero; use crate::os::windows::io::{AsRawHandle, HandleOrNull}; use crate::sys::handle::Handle; +use crate::sys::pal::time::WaitableTimer; +use crate::sys::pal::{dur2timeout, to_u16s}; use crate::sys::{c, stack_overflow}; use crate::sys_common::FromInner; -use crate::time::{Duration, Instant}; +use crate::time::Duration; use crate::{io, ptr}; pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024; @@ -62,24 +62,6 @@ impl Thread { } } - pub fn set_name(name: &CStr) { - if let Ok(utf8) = name.to_str() { - if let Ok(utf16) = to_u16s(utf8) { - unsafe { - // SAFETY: the vec returned by `to_u16s` ends with a zero value - Self::set_name_wide(&utf16) - } - }; - }; - } - - /// # Safety - /// - /// `name` must end with a zero value - pub unsafe fn set_name_wide(name: &[u16]) { - unsafe { c::SetThreadDescription(c::GetCurrentThread(), name.as_ptr()) }; - } - pub fn join(self) { let rc = unsafe { c::WaitForSingleObject(self.handle.as_raw_handle(), c::INFINITE) }; if rc == c::WAIT_FAILED { @@ -87,37 +69,6 @@ impl Thread { } } - pub fn yield_now() { - // This function will return 0 if there are no other threads to execute, - // but this also means that the yield was useless so this isn't really a - // case that needs to be worried about. - unsafe { - c::SwitchToThread(); - } - } - - pub fn sleep(dur: Duration) { - fn high_precision_sleep(dur: Duration) -> Result<(), ()> { - let timer = WaitableTimer::high_resolution()?; - timer.set(dur)?; - timer.wait() - } - // Attempt to use high-precision sleep (Windows 10, version 1803+). - // On error fallback to the standard `Sleep` function. - // Also preserves the zero duration behavior of `Sleep`. - if dur.is_zero() || high_precision_sleep(dur).is_err() { - unsafe { c::Sleep(super::dur2timeout(dur)) } - } - } - - pub fn sleep_until(deadline: Instant) { - let now = Instant::now(); - - if let Some(delay) = deadline.checked_duration_since(now) { - Self::sleep(delay); - } - } - pub fn handle(&self) -> &Handle { &self.handle } @@ -127,14 +78,6 @@ impl Thread { } } -pub(crate) fn current_os_id() -> Option<u64> { - // SAFETY: FFI call with no preconditions. - let id: u32 = unsafe { c::GetCurrentThreadId() }; - - // A return value of 0 indicates failed lookup. - if id == 0 { None } else { Some(id.into()) } -} - pub fn available_parallelism() -> io::Result<NonZero<usize>> { let res = unsafe { let mut sysinfo: c::SYSTEM_INFO = crate::mem::zeroed(); @@ -146,3 +89,52 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> { cpus => Ok(unsafe { NonZero::new_unchecked(cpus) }), } } + +pub fn current_os_id() -> Option<u64> { + // SAFETY: FFI call with no preconditions. + let id: u32 = unsafe { c::GetCurrentThreadId() }; + + // A return value of 0 indicates failed lookup. + if id == 0 { None } else { Some(id.into()) } +} + +pub fn set_name(name: &CStr) { + if let Ok(utf8) = name.to_str() { + if let Ok(utf16) = to_u16s(utf8) { + unsafe { + // SAFETY: the vec returned by `to_u16s` ends with a zero value + set_name_wide(&utf16) + } + }; + }; +} + +/// # Safety +/// +/// `name` must end with a zero value +pub unsafe fn set_name_wide(name: &[u16]) { + unsafe { c::SetThreadDescription(c::GetCurrentThread(), name.as_ptr()) }; +} + +pub fn sleep(dur: Duration) { + fn high_precision_sleep(dur: Duration) -> Result<(), ()> { + let timer = WaitableTimer::high_resolution()?; + timer.set(dur)?; + timer.wait() + } + // Attempt to use high-precision sleep (Windows 10, version 1803+). + // On error fallback to the standard `Sleep` function. + // Also preserves the zero duration behavior of `Sleep`. + if dur.is_zero() || high_precision_sleep(dur).is_err() { + unsafe { c::Sleep(dur2timeout(dur)) } + } +} + +pub fn yield_now() { + // This function will return 0 if there are no other threads to execute, + // but this also means that the yield was useless so this isn't really a + // case that needs to be worried about. + unsafe { + c::SwitchToThread(); + } +} diff --git a/library/std/src/sys/pal/xous/thread.rs b/library/std/src/sys/thread/xous.rs index 92803c94c6e..133e15a0928 100644 --- a/library/std/src/sys/pal/xous/thread.rs +++ b/library/std/src/sys/thread/xous.rs @@ -1,6 +1,5 @@ use core::arch::asm; -use crate::ffi::CStr; use crate::io; use crate::num::NonZero; use crate::os::xous::ffi::{ @@ -8,7 +7,7 @@ use crate::os::xous::ffi::{ map_memory, update_memory_flags, }; use crate::os::xous::services::{TicktimerScalar, ticktimer_server}; -use crate::time::{Duration, Instant}; +use crate::time::Duration; pub struct Thread { tid: ThreadId, @@ -110,46 +109,29 @@ impl Thread { Ok(Thread { tid }) } - pub fn yield_now() { - do_yield(); - } - - pub fn set_name(_name: &CStr) { - // nope - } - - pub fn sleep(dur: Duration) { - // Because the sleep server works on units of `usized milliseconds`, split - // the messages up into these chunks. This means we may run into issues - // if you try to sleep a thread for more than 49 days on a 32-bit system. - let mut millis = dur.as_millis(); - while millis > 0 { - let sleep_duration = - if millis > (usize::MAX as _) { usize::MAX } else { millis as usize }; - blocking_scalar(ticktimer_server(), TicktimerScalar::SleepMs(sleep_duration).into()) - .expect("failed to send message to ticktimer server"); - millis -= sleep_duration as u128; - } - } - - pub fn sleep_until(deadline: Instant) { - let now = Instant::now(); - - if let Some(delay) = deadline.checked_duration_since(now) { - Self::sleep(delay); - } - } - pub fn join(self) { join_thread(self.tid).unwrap(); } } -pub(crate) fn current_os_id() -> Option<u64> { - None -} - pub fn available_parallelism() -> io::Result<NonZero<usize>> { // We're unicore right now. Ok(unsafe { NonZero::new_unchecked(1) }) } + +pub fn yield_now() { + do_yield(); +} + +pub fn sleep(dur: Duration) { + // Because the sleep server works on units of `usized milliseconds`, split + // the messages up into these chunks. This means we may run into issues + // if you try to sleep a thread for more than 49 days on a 32-bit system. + let mut millis = dur.as_millis(); + while millis > 0 { + let sleep_duration = if millis > (usize::MAX as _) { usize::MAX } else { millis as usize }; + blocking_scalar(ticktimer_server(), TicktimerScalar::SleepMs(sleep_duration).into()) + .expect("failed to send message to ticktimer server"); + millis -= sleep_duration as u128; + } +} diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs index b6059c28cec..4d09b2b4e9d 100644 --- a/library/std/src/thread/mod.rs +++ b/library/std/src/thread/mod.rs @@ -550,7 +550,7 @@ impl Builder { } if let Some(name) = their_thread.cname() { - imp::Thread::set_name(name); + imp::set_name(name); } let f = f.into_inner(); @@ -763,7 +763,7 @@ where /// [`Mutex`]: crate::sync::Mutex #[stable(feature = "rust1", since = "1.0.0")] pub fn yield_now() { - imp::Thread::yield_now() + imp::yield_now() } /// Determines whether the current thread is unwinding because of panic. @@ -884,7 +884,7 @@ pub fn sleep_ms(ms: u32) { /// ``` #[stable(feature = "thread_sleep", since = "1.4.0")] pub fn sleep(dur: Duration) { - imp::Thread::sleep(dur) + imp::sleep(dur) } /// Puts the current thread to sleep until the specified deadline has passed. @@ -983,7 +983,7 @@ pub fn sleep(dur: Duration) { /// ``` #[unstable(feature = "thread_sleep_until", issue = "113752")] pub fn sleep_until(deadline: Instant) { - imp::Thread::sleep_until(deadline) + imp::sleep_until(deadline) } /// Used to ensure that `park` and `park_timeout` do not unwind, as that can @@ -1021,13 +1021,23 @@ impl Drop for PanicGuard { /// specifying a maximum time to block the thread for. /// /// * The [`unpark`] method on a [`Thread`] atomically makes the token available -/// if it wasn't already. Because the token is initially absent, [`unpark`] -/// followed by [`park`] will result in the second call returning immediately. -/// -/// The API is typically used by acquiring a handle to the current thread, -/// placing that handle in a shared data structure so that other threads can -/// find it, and then `park`ing in a loop. When some desired condition is met, another -/// thread calls [`unpark`] on the handle. +/// if it wasn't already. Because the token can be held by a thread even if it is currently not +/// parked, [`unpark`] followed by [`park`] will result in the second call returning immediately. +/// However, note that to rely on this guarantee, you need to make sure that your `unpark` happens +/// after all `park` that may be done by other data structures! +/// +/// The API is typically used by acquiring a handle to the current thread, placing that handle in a +/// shared data structure so that other threads can find it, and then `park`ing in a loop. When some +/// desired condition is met, another thread calls [`unpark`] on the handle. The last bullet point +/// above guarantees that even if the `unpark` occurs before the thread is finished `park`ing, it +/// will be woken up properly. +/// +/// Note that the coordination via the shared data structure is crucial: If you `unpark` a thread +/// without first establishing that it is about to be `park`ing within your code, that `unpark` may +/// get consumed by a *different* `park` in the same thread, leading to a deadlock. This also means +/// you must not call unknown code between setting up for parking and calling `park`; for instance, +/// if you invoke `println!`, that may itself call `park` and thus consume your `unpark` and cause a +/// deadlock. /// /// The motivation for this design is twofold: /// @@ -1058,21 +1068,24 @@ impl Drop for PanicGuard { /// /// ``` /// use std::thread; -/// use std::sync::{Arc, atomic::{Ordering, AtomicBool}}; +/// use std::sync::atomic::{Ordering, AtomicBool}; /// use std::time::Duration; /// -/// let flag = Arc::new(AtomicBool::new(false)); -/// let flag2 = Arc::clone(&flag); +/// static QUEUED: AtomicBool = AtomicBool::new(false); +/// static FLAG: AtomicBool = AtomicBool::new(false); /// /// let parked_thread = thread::spawn(move || { +/// println!("Thread spawned"); +/// // Signal that we are going to `park`. Between this store and our `park`, there may +/// // be no other `park`, or else that `park` could consume our `unpark` token! +/// QUEUED.store(true, Ordering::Release); /// // We want to wait until the flag is set. We *could* just spin, but using /// // park/unpark is more efficient. -/// while !flag2.load(Ordering::Relaxed) { -/// println!("Parking thread"); +/// while !FLAG.load(Ordering::Acquire) { +/// // We can *not* use `println!` here since that could use thread parking internally. /// thread::park(); /// // We *could* get here spuriously, i.e., way before the 10ms below are over! /// // But that is no problem, we are in a loop until the flag is set anyway. -/// println!("Thread unparked"); /// } /// println!("Flag received"); /// }); @@ -1080,11 +1093,22 @@ impl Drop for PanicGuard { /// // Let some time pass for the thread to be spawned. /// thread::sleep(Duration::from_millis(10)); /// +/// // Ensure the thread is about to park. +/// // This is crucial! It guarantees that the `unpark` below is not consumed +/// // by some other code in the parked thread (e.g. inside `println!`). +/// while !QUEUED.load(Ordering::Acquire) { +/// // Spinning is of course inefficient; in practice, this would more likely be +/// // a dequeue where we have no work to do if there's nobody queued. +/// std::hint::spin_loop(); +/// } +/// /// // Set the flag, and let the thread wake up. -/// // There is no race condition here, if `unpark` +/// // There is no race condition here: if `unpark` /// // happens first, `park` will return immediately. +/// // There is also no other `park` that could consume this token, +/// // since we waited until the other thread got queued. /// // Hence there is no risk of a deadlock. -/// flag.store(true, Ordering::Relaxed); +/// FLAG.store(true, Ordering::Release); /// println!("Unpark the thread"); /// parked_thread.thread().unpark(); /// @@ -1494,10 +1518,14 @@ impl Thread { /// ``` /// use std::thread; /// use std::time::Duration; + /// use std::sync::atomic::{AtomicBool, Ordering}; + /// + /// static QUEUED: AtomicBool = AtomicBool::new(false); /// /// let parked_thread = thread::Builder::new() /// .spawn(|| { /// println!("Parking thread"); + /// QUEUED.store(true, Ordering::Release); /// thread::park(); /// println!("Thread unparked"); /// }) @@ -1506,6 +1534,15 @@ impl Thread { /// // Let some time pass for the thread to be spawned. /// thread::sleep(Duration::from_millis(10)); /// + /// // Wait until the other thread is queued. + /// // This is crucial! It guarantees that the `unpark` below is not consumed + /// // by some other code in the parked thread (e.g. inside `println!`). + /// while !QUEUED.load(Ordering::Acquire) { + /// // Spinning is of course inefficient; in practice, this would more likely be + /// // a dequeue where we have no work to do if there's nobody queued. + /// std::hint::spin_loop(); + /// } + /// /// println!("Unpark the thread"); /// parked_thread.thread().unpark(); /// diff --git a/library/std/src/thread/tests.rs b/library/std/src/thread/tests.rs index ae889f1e778..2117f5f93ce 100644 --- a/library/std/src/thread/tests.rs +++ b/library/std/src/thread/tests.rs @@ -287,6 +287,8 @@ fn test_park_unpark_called_other_thread() { for _ in 0..10 { let th = thread::current(); + // Here we rely on `thread::spawn` (specifically the part that runs after spawning + // the thread) to not consume the parking token. let _guard = thread::spawn(move || { super::sleep(Duration::from_millis(50)); th.unpark(); @@ -316,6 +318,8 @@ fn test_park_timeout_unpark_called_other_thread() { for _ in 0..10 { let th = thread::current(); + // Here we rely on `thread::spawn` (specifically the part that runs after spawning + // the thread) to not consume the parking token. let _guard = thread::spawn(move || { super::sleep(Duration::from_millis(50)); th.unpark(); diff --git a/library/std_detect/src/detect/os/linux/aarch64.rs b/library/std_detect/src/detect/os/linux/aarch64.rs index 87a9d6ebb88..b733b8a9eb2 100644 --- a/library/std_detect/src/detect/os/linux/aarch64.rs +++ b/library/std_detect/src/detect/os/linux/aarch64.rs @@ -140,7 +140,7 @@ struct AtHwcap { impl From<auxvec::AuxVec> for AtHwcap { /// Reads AtHwcap from the auxiliary vector. fn from(auxv: auxvec::AuxVec) -> Self { - AtHwcap { + let mut cap = AtHwcap { fp: bit::test(auxv.hwcap, 0), asimd: bit::test(auxv.hwcap, 1), // evtstrm: bit::test(auxv.hwcap, 2), @@ -207,39 +207,50 @@ impl From<auxvec::AuxVec> for AtHwcap { // smef32f32: bit::test(auxv.hwcap2, 29), smefa64: bit::test(auxv.hwcap2, 30), wfxt: bit::test(auxv.hwcap2, 31), - // ebf16: bit::test(auxv.hwcap2, 32), - // sveebf16: bit::test(auxv.hwcap2, 33), - cssc: bit::test(auxv.hwcap2, 34), - // rprfm: bit::test(auxv.hwcap2, 35), - sve2p1: bit::test(auxv.hwcap2, 36), - sme2: bit::test(auxv.hwcap2, 37), - sme2p1: bit::test(auxv.hwcap2, 38), - // smei16i32: bit::test(auxv.hwcap2, 39), - // smebi32i32: bit::test(auxv.hwcap2, 40), - smeb16b16: bit::test(auxv.hwcap2, 41), - smef16f16: bit::test(auxv.hwcap2, 42), - mops: bit::test(auxv.hwcap2, 43), - hbc: bit::test(auxv.hwcap2, 44), - sveb16b16: bit::test(auxv.hwcap2, 45), - lrcpc3: bit::test(auxv.hwcap2, 46), - lse128: bit::test(auxv.hwcap2, 47), - fpmr: bit::test(auxv.hwcap2, 48), - lut: bit::test(auxv.hwcap2, 49), - faminmax: bit::test(auxv.hwcap2, 50), - f8cvt: bit::test(auxv.hwcap2, 51), - f8fma: bit::test(auxv.hwcap2, 52), - f8dp4: bit::test(auxv.hwcap2, 53), - f8dp2: bit::test(auxv.hwcap2, 54), - f8e4m3: bit::test(auxv.hwcap2, 55), - f8e5m2: bit::test(auxv.hwcap2, 56), - smelutv2: bit::test(auxv.hwcap2, 57), - smef8f16: bit::test(auxv.hwcap2, 58), - smef8f32: bit::test(auxv.hwcap2, 59), - smesf8fma: bit::test(auxv.hwcap2, 60), - smesf8dp4: bit::test(auxv.hwcap2, 61), - smesf8dp2: bit::test(auxv.hwcap2, 62), - // pauthlr: bit::test(auxv.hwcap2, ??), + ..Default::default() + }; + + // Hardware capabilities from bits 32 to 63 should only + // be tested on LP64 targets with 64 bits `usize`. + // On ILP32 targets like `aarch64-unknown-linux-gnu_ilp32`, + // these hardware capabilities will default to `false`. + // https://github.com/rust-lang/rust/issues/146230 + #[cfg(target_pointer_width = "64")] + { + // cap.ebf16: bit::test(auxv.hwcap2, 32); + // cap.sveebf16: bit::test(auxv.hwcap2, 33); + cap.cssc = bit::test(auxv.hwcap2, 34); + // cap.rprfm: bit::test(auxv.hwcap2, 35); + cap.sve2p1 = bit::test(auxv.hwcap2, 36); + cap.sme2 = bit::test(auxv.hwcap2, 37); + cap.sme2p1 = bit::test(auxv.hwcap2, 38); + // cap.smei16i32 = bit::test(auxv.hwcap2, 39); + // cap.smebi32i32 = bit::test(auxv.hwcap2, 40); + cap.smeb16b16 = bit::test(auxv.hwcap2, 41); + cap.smef16f16 = bit::test(auxv.hwcap2, 42); + cap.mops = bit::test(auxv.hwcap2, 43); + cap.hbc = bit::test(auxv.hwcap2, 44); + cap.sveb16b16 = bit::test(auxv.hwcap2, 45); + cap.lrcpc3 = bit::test(auxv.hwcap2, 46); + cap.lse128 = bit::test(auxv.hwcap2, 47); + cap.fpmr = bit::test(auxv.hwcap2, 48); + cap.lut = bit::test(auxv.hwcap2, 49); + cap.faminmax = bit::test(auxv.hwcap2, 50); + cap.f8cvt = bit::test(auxv.hwcap2, 51); + cap.f8fma = bit::test(auxv.hwcap2, 52); + cap.f8dp4 = bit::test(auxv.hwcap2, 53); + cap.f8dp2 = bit::test(auxv.hwcap2, 54); + cap.f8e4m3 = bit::test(auxv.hwcap2, 55); + cap.f8e5m2 = bit::test(auxv.hwcap2, 56); + cap.smelutv2 = bit::test(auxv.hwcap2, 57); + cap.smef8f16 = bit::test(auxv.hwcap2, 58); + cap.smef8f32 = bit::test(auxv.hwcap2, 59); + cap.smesf8fma = bit::test(auxv.hwcap2, 60); + cap.smesf8dp4 = bit::test(auxv.hwcap2, 61); + cap.smesf8dp2 = bit::test(auxv.hwcap2, 62); + // cap.pauthlr = bit::test(auxv.hwcap2, ??); } + cap } } |
