diff options
Diffstat (limited to 'library')
67 files changed, 3331 insertions, 1384 deletions
diff --git a/library/Cargo.lock b/library/Cargo.lock index 656576d2d8e..8b860f69492 100644 --- a/library/Cargo.lock +++ b/library/Cargo.lock @@ -140,9 +140,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.174" +version = "0.2.175" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" +checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" dependencies = [ "rustc-std-workspace-core", ] @@ -169,9 +169,9 @@ dependencies = [ [[package]] name = "object" -version = "0.37.2" +version = "0.37.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3e3d0a7419f081f4a808147e845310313a39f322d7ae1f996b7f001d6cbed04" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" dependencies = [ "memchr", "rustc-std-workspace-alloc", @@ -192,7 +192,6 @@ name = "panic_unwind" version = "0.0.0" dependencies = [ "alloc", - "cfg-if", "libc", "rustc-std-workspace-core", "unwind", @@ -336,9 +335,9 @@ dependencies = [ name = "std_detect" version = "0.1.5" dependencies = [ - "alloc", - "core", "libc", + "rustc-std-workspace-alloc", + "rustc-std-workspace-core", ] [[package]] diff --git a/library/Cargo.toml b/library/Cargo.toml index a79c17fc4f7..e30e6240942 100644 --- a/library/Cargo.toml +++ b/library/Cargo.toml @@ -59,4 +59,3 @@ rustflags = ["-Cpanic=abort"] rustc-std-workspace-core = { path = 'rustc-std-workspace-core' } rustc-std-workspace-alloc = { path = 'rustc-std-workspace-alloc' } rustc-std-workspace-std = { path = 'rustc-std-workspace-std' } -compiler_builtins = { path = "compiler-builtins/compiler-builtins" } diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs index c9b98fa4e5a..76630a746dd 100644 --- a/library/alloc/src/alloc.rs +++ b/library/alloc/src/alloc.rs @@ -17,6 +17,7 @@ unsafe extern "Rust" { #[rustc_allocator] #[rustc_nounwind] #[rustc_std_internal_symbol] + #[rustc_allocator_zeroed_variant = "__rust_alloc_zeroed"] fn __rust_alloc(size: usize, align: usize) -> *mut u8; #[rustc_deallocator] #[rustc_nounwind] diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs index c4e599222e5..8b6d86a2888 100644 --- a/library/alloc/src/collections/btree/map.rs +++ b/library/alloc/src/collections/btree/map.rs @@ -40,30 +40,15 @@ pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT; /// An ordered map based on a [B-Tree]. /// -/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing -/// the amount of work performed in a search. In theory, a binary search tree (BST) is the optimal -/// choice for a sorted map, as a perfectly balanced BST performs the theoretical minimum amount of -/// comparisons necessary to find an element (log<sub>2</sub>n). However, in practice the way this -/// is done is *very* inefficient for modern computer architectures. In particular, every element -/// is stored in its own individually heap-allocated node. This means that every single insertion -/// triggers a heap-allocation, and every single comparison should be a cache-miss. Since these -/// are both notably expensive things to do in practice, we are forced to, at the very least, -/// reconsider the BST strategy. -/// -/// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing -/// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in -/// searches. However, this does mean that searches will have to do *more* comparisons on average. -/// The precise number of comparisons depends on the node search strategy used. For optimal cache -/// efficiency, one could search the nodes linearly. For optimal comparisons, one could search -/// the node using binary search. As a compromise, one could also perform a linear search -/// that initially only checks every i<sup>th</sup> element for some choice of i. +/// Given a key type with a [total order], an ordered map stores its entries in key order. +/// That means that keys must be of a type that implements the [`Ord`] trait, +/// such that two keys can always be compared to determine their [`Ordering`]. +/// Examples of keys with a total order are strings with lexicographical order, +/// and numbers with their natural order. /// -/// Currently, our implementation simply performs naive linear search. This provides excellent -/// performance on *small* nodes of elements which are cheap to compare. However in the future we -/// would like to further explore choosing the optimal search strategy based on the choice of B, -/// and possibly other factors. Using linear search, searching for a random element is expected -/// to take B * log(n) comparisons, which is generally worse than a BST. In practice, -/// however, performance is excellent. +/// Iterators obtained from functions such as [`BTreeMap::iter`], [`BTreeMap::into_iter`], [`BTreeMap::values`], or +/// [`BTreeMap::keys`] produce their items in key order, and take worst-case logarithmic and +/// amortized constant time per item returned. /// /// It is a logic error for a key to be modified in such a way that the key's ordering relative to /// any other key, as determined by the [`Ord`] trait, changes while it is in the map. This is @@ -72,14 +57,6 @@ pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT; /// `BTreeMap` that observed the logic error and not result in undefined behavior. This could /// include panics, incorrect results, aborts, memory leaks, and non-termination. /// -/// Iterators obtained from functions such as [`BTreeMap::iter`], [`BTreeMap::into_iter`], [`BTreeMap::values`], or -/// [`BTreeMap::keys`] produce their items in order by key, and take worst-case logarithmic and -/// amortized constant time per item returned. -/// -/// [B-Tree]: https://en.wikipedia.org/wiki/B-tree -/// [`Cell`]: core::cell::Cell -/// [`RefCell`]: core::cell::RefCell -/// /// # Examples /// /// ``` @@ -169,6 +146,43 @@ pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT; /// // modify an entry before an insert with in-place mutation /// player_stats.entry("mana").and_modify(|mana| *mana += 200).or_insert(100); /// ``` +/// +/// # Background +/// +/// A B-tree is (like) a [binary search tree], but adapted to the natural granularity that modern +/// machines like to consume data at. This means that each node contains an entire array of elements, +/// instead of just a single element. +/// +/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing +/// the amount of work performed in a search. In theory, a binary search tree (BST) is the optimal +/// choice for a sorted map, as a perfectly balanced BST performs the theoretical minimum number of +/// comparisons necessary to find an element (log<sub>2</sub>n). However, in practice the way this +/// is done is *very* inefficient for modern computer architectures. In particular, every element +/// is stored in its own individually heap-allocated node. This means that every single insertion +/// triggers a heap-allocation, and every comparison is a potential cache-miss due to the indirection. +/// Since both heap-allocations and cache-misses are notably expensive in practice, we are forced to, +/// at the very least, reconsider the BST strategy. +/// +/// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing +/// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in +/// searches. However, this does mean that searches will have to do *more* comparisons on average. +/// The precise number of comparisons depends on the node search strategy used. For optimal cache +/// efficiency, one could search the nodes linearly. For optimal comparisons, one could search +/// the node using binary search. As a compromise, one could also perform a linear search +/// that initially only checks every i<sup>th</sup> element for some choice of i. +/// +/// Currently, our implementation simply performs naive linear search. This provides excellent +/// performance on *small* nodes of elements which are cheap to compare. However in the future we +/// would like to further explore choosing the optimal search strategy based on the choice of B, +/// and possibly other factors. Using linear search, searching for a random element is expected +/// to take B * log(n) comparisons, which is generally worse than a BST. In practice, +/// however, performance is excellent. +/// +/// [B-Tree]: https://en.wikipedia.org/wiki/B-tree +/// [binary search tree]: https://en.wikipedia.org/wiki/Binary_search_tree +/// [total order]: https://en.wikipedia.org/wiki/Total_order +/// [`Cell`]: core::cell::Cell +/// [`RefCell`]: core::cell::RefCell #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "BTreeMap")] #[rustc_insignificant_dtor] diff --git a/library/compiler-builtins/compiler-builtins/README.md b/library/compiler-builtins/compiler-builtins/README.md index 387b70c0499..2d92b7651f9 100644 --- a/library/compiler-builtins/compiler-builtins/README.md +++ b/library/compiler-builtins/compiler-builtins/README.md @@ -10,6 +10,16 @@ to be added as an explicit dependency in `Cargo.toml`. [`compiler-rt`]: https://github.com/llvm/llvm-project/tree/1b1dc505057322f4fa1110ef4f53c44347f52986/compiler-rt +## Configuration + +`compiler-builtins` can be configured with the following environment variables when the `c` feature +is enabled: + +- `LLVM_COMPILER_RT_LIB` +- `RUST_COMPILER_RT_ROOT` + +See `build.rs` for details. + ## Contributing See [CONTRIBUTING.md](CONTRIBUTING.md). diff --git a/library/compiler-builtins/compiler-builtins/build.rs b/library/compiler-builtins/compiler-builtins/build.rs index 43b978606e5..6e1d230e3cd 100644 --- a/library/compiler-builtins/compiler-builtins/build.rs +++ b/library/compiler-builtins/compiler-builtins/build.rs @@ -540,12 +540,20 @@ mod c { sources.extend(&[("__emutls_get_address", "emutls.c")]); } + // Optionally, link against a prebuilt llvm compiler-rt containing the builtins + // library. Only the builtins library is required. On many platforms, this is + // available as a library named libclang_rt.builtins.a. + let link_against_prebuilt_rt = env::var_os("LLVM_COMPILER_RT_LIB").is_some(); + // When compiling the C code we require the user to tell us where the // source code is, and this is largely done so when we're compiling as // part of rust-lang/rust we can use the same llvm-project repository as // rust-lang/rust. let root = match env::var_os("RUST_COMPILER_RT_ROOT") { Some(s) => PathBuf::from(s), + // If a prebuild libcompiler-rt is provided, set a valid + // path to simplify later logic. Nothing should be compiled. + None if link_against_prebuilt_rt => PathBuf::new(), None => { panic!( "RUST_COMPILER_RT_ROOT is not set. You may need to run \ @@ -553,7 +561,7 @@ mod c { ); } }; - if !root.exists() { + if !link_against_prebuilt_rt && !root.exists() { panic!("RUST_COMPILER_RT_ROOT={} does not exist", root.display()); } @@ -569,7 +577,7 @@ mod c { let src_dir = root.join("lib/builtins"); if target.arch == "aarch64" && target.env != "msvc" && target.os != "uefi" { // See below for why we're building these as separate libraries. - build_aarch64_out_of_line_atomics_libraries(&src_dir, cfg); + build_aarch64_out_of_line_atomics_libraries(&src_dir, cfg, link_against_prebuilt_rt); // Some run-time CPU feature detection is necessary, as well. let cpu_model_src = if src_dir.join("cpu_model.c").exists() { @@ -583,20 +591,45 @@ mod c { let mut added_sources = HashSet::new(); for (sym, src) in sources.map.iter() { let src = src_dir.join(src); - if added_sources.insert(src.clone()) { + if !link_against_prebuilt_rt && added_sources.insert(src.clone()) { cfg.file(&src); println!("cargo:rerun-if-changed={}", src.display()); } println!("cargo:rustc-cfg={}=\"optimized-c\"", sym); } - cfg.compile("libcompiler-rt.a"); + if link_against_prebuilt_rt { + let rt_builtins_ext = PathBuf::from(env::var_os("LLVM_COMPILER_RT_LIB").unwrap()); + if !rt_builtins_ext.exists() { + panic!( + "LLVM_COMPILER_RT_LIB={} does not exist", + rt_builtins_ext.display() + ); + } + if let Some(dir) = rt_builtins_ext.parent() { + println!("cargo::rustc-link-search=native={}", dir.display()); + } + if let Some(lib) = rt_builtins_ext.file_name() { + println!( + "cargo::rustc-link-lib=static:+verbatim={}", + lib.to_str().unwrap() + ); + } + } else { + cfg.compile("libcompiler-rt.a"); + } } - fn build_aarch64_out_of_line_atomics_libraries(builtins_dir: &Path, cfg: &mut cc::Build) { + fn build_aarch64_out_of_line_atomics_libraries( + builtins_dir: &Path, + cfg: &mut cc::Build, + link_against_prebuilt_rt: bool, + ) { let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); let outlined_atomics_file = builtins_dir.join("aarch64").join("lse.S"); - println!("cargo:rerun-if-changed={}", outlined_atomics_file.display()); + if !link_against_prebuilt_rt { + println!("cargo:rerun-if-changed={}", outlined_atomics_file.display()); + } cfg.include(&builtins_dir); @@ -609,6 +642,13 @@ mod c { for (model_number, model_name) in &[(1, "relax"), (2, "acq"), (3, "rel"), (4, "acq_rel")] { + let sym = format!("__aarch64_{}{}_{}", instruction_type, size, model_name); + println!("cargo:rustc-cfg={}=\"optimized-c\"", sym); + + if link_against_prebuilt_rt { + continue; + } + // The original compiler-rt build system compiles the same // source file multiple times with different compiler // options. Here we do something slightly different: we @@ -632,9 +672,6 @@ mod c { .unwrap(); drop(file); cfg.file(path); - - let sym = format!("__aarch64_{}{}_{}", instruction_type, size, model_name); - println!("cargo:rustc-cfg={}=\"optimized-c\"", sym); } } } diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs index b3a498570f9..bb75ec74c81 100644 --- a/library/core/src/array/mod.rs +++ b/library/core/src/array/mod.rs @@ -621,11 +621,11 @@ impl<T, const N: usize> [T; N] { /// assert_eq!(strings.len(), 3); /// ``` #[stable(feature = "array_methods", since = "1.77.0")] - #[rustc_const_unstable(feature = "const_array_each_ref", issue = "133289")] + #[rustc_const_stable(feature = "const_array_each_ref", since = "CURRENT_RUSTC_VERSION")] pub const fn each_ref(&self) -> [&T; N] { let mut buf = [null::<T>(); N]; - // FIXME(const-hack): We would like to simply use iterators for this (as in the original implementation), but this is not allowed in constant expressions. + // FIXME(const_trait_impl): We would like to simply use iterators for this (as in the original implementation), but this is not allowed in constant expressions. let mut i = 0; while i < N { buf[i] = &raw const self[i]; @@ -652,11 +652,11 @@ impl<T, const N: usize> [T; N] { /// assert_eq!(floats, [0.0, 2.7, -1.0]); /// ``` #[stable(feature = "array_methods", since = "1.77.0")] - #[rustc_const_unstable(feature = "const_array_each_ref", issue = "133289")] + #[rustc_const_stable(feature = "const_array_each_ref", since = "CURRENT_RUSTC_VERSION")] pub const fn each_mut(&mut self) -> [&mut T; N] { let mut buf = [null_mut::<T>(); N]; - // FIXME(const-hack): We would like to simply use iterators for this (as in the original implementation), but this is not allowed in constant expressions. + // FIXME(const_trait_impl): We would like to simply use iterators for this (as in the original implementation), but this is not allowed in constant expressions. let mut i = 0; while i < N { buf[i] = &raw mut self[i]; diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs index 7ee0962721f..985e669c92d 100644 --- a/library/core/src/char/methods.rs +++ b/library/core/src/char/methods.rs @@ -1872,28 +1872,33 @@ pub const unsafe fn encode_utf8_raw_unchecked(code: u32, dst: *mut u8) { // SAFETY: The caller must guarantee that the buffer pointed to by `dst` // is at least `len` bytes long. unsafe { - match len { - 1 => { - *dst = code as u8; - } - 2 => { - *dst = (code >> 6 & 0x1F) as u8 | TAG_TWO_B; - *dst.add(1) = (code & 0x3F) as u8 | TAG_CONT; - } - 3 => { - *dst = (code >> 12 & 0x0F) as u8 | TAG_THREE_B; - *dst.add(1) = (code >> 6 & 0x3F) as u8 | TAG_CONT; - *dst.add(2) = (code & 0x3F) as u8 | TAG_CONT; - } - 4 => { - *dst = (code >> 18 & 0x07) as u8 | TAG_FOUR_B; - *dst.add(1) = (code >> 12 & 0x3F) as u8 | TAG_CONT; - *dst.add(2) = (code >> 6 & 0x3F) as u8 | TAG_CONT; - *dst.add(3) = (code & 0x3F) as u8 | TAG_CONT; - } - // SAFETY: `char` always takes between 1 and 4 bytes to encode in UTF-8. - _ => crate::hint::unreachable_unchecked(), + if len == 1 { + *dst = code as u8; + return; + } + + let last1 = (code >> 0 & 0x3F) as u8 | TAG_CONT; + let last2 = (code >> 6 & 0x3F) as u8 | TAG_CONT; + let last3 = (code >> 12 & 0x3F) as u8 | TAG_CONT; + let last4 = (code >> 18 & 0x3F) as u8 | TAG_FOUR_B; + + if len == 2 { + *dst = last2 | TAG_TWO_B; + *dst.add(1) = last1; + return; } + + if len == 3 { + *dst = last3 | TAG_THREE_B; + *dst.add(1) = last2; + *dst.add(2) = last1; + return; + } + + *dst = last4; + *dst.add(1) = last3; + *dst.add(2) = last2; + *dst.add(3) = last1; } } diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs index a64fade285b..ab018fa2675 100644 --- a/library/core/src/cmp.rs +++ b/library/core/src/cmp.rs @@ -1554,6 +1554,9 @@ pub fn min<T: Ord>(v1: T, v2: T) -> T { /// /// Returns the first argument if the comparison determines them to be equal. /// +/// The parameter order is preserved when calling the `compare` function, i.e. `v1` is +/// always passed as the first argument and `v2` as the second. +/// /// # Examples /// /// ``` @@ -1574,7 +1577,7 @@ pub fn min<T: Ord>(v1: T, v2: T) -> T { #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] pub fn min_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T { - if compare(&v2, &v1).is_lt() { v2 } else { v1 } + if compare(&v1, &v2).is_le() { v1 } else { v2 } } /// Returns the element that gives the minimum value from the specified function. @@ -1646,6 +1649,9 @@ pub fn max<T: Ord>(v1: T, v2: T) -> T { /// /// Returns the second argument if the comparison determines them to be equal. /// +/// The parameter order is preserved when calling the `compare` function, i.e. `v1` is +/// always passed as the first argument and `v2` as the second. +/// /// # Examples /// /// ``` @@ -1666,7 +1672,7 @@ pub fn max<T: Ord>(v1: T, v2: T) -> T { #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] pub fn max_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T { - if compare(&v2, &v1).is_lt() { v1 } else { v2 } + if compare(&v1, &v2).is_gt() { v1 } else { v2 } } /// Returns the element that gives the maximum value from the specified function. @@ -1745,6 +1751,9 @@ where /// /// Returns `[v1, v2]` if the comparison determines them to be equal. /// +/// The parameter order is preserved when calling the `compare` function, i.e. `v1` is +/// always passed as the first argument and `v2` as the second. +/// /// # Examples /// /// ``` @@ -1769,7 +1778,7 @@ pub fn minmax_by<T, F>(v1: T, v2: T, compare: F) -> [T; 2] where F: FnOnce(&T, &T) -> Ordering, { - if compare(&v2, &v1).is_lt() { [v2, v1] } else { [v1, v2] } + if compare(&v1, &v2).is_le() { [v1, v2] } else { [v2, v1] } } /// Returns minimum and maximum values with respect to the specified key function. diff --git a/library/core/src/fmt/num.rs b/library/core/src/fmt/num.rs index 7d41ae45093..605ba42c541 100644 --- a/library/core/src/fmt/num.rs +++ b/library/core/src/fmt/num.rs @@ -3,164 +3,78 @@ use crate::fmt::NumBuffer; use crate::mem::MaybeUninit; use crate::num::fmt as numfmt; -use crate::ops::{Div, Rem, Sub}; use crate::{fmt, ptr, slice, str}; -#[doc(hidden)] -trait DisplayInt: - PartialEq + PartialOrd + Div<Output = Self> + Rem<Output = Self> + Sub<Output = Self> + Copy -{ - fn zero() -> Self; - fn from_u8(u: u8) -> Self; - fn to_u8(&self) -> u8; - #[cfg(not(any(target_pointer_width = "64", target_arch = "wasm32")))] - fn to_u32(&self) -> u32; - fn to_u64(&self) -> u64; - fn to_u128(&self) -> u128; -} +/// Formatting of integers with a non-decimal radix. +macro_rules! radix_integer { + (fmt::$Trait:ident for $Signed:ident and $Unsigned:ident, $prefix:literal, $dig_tab:literal) => { + #[stable(feature = "rust1", since = "1.0.0")] + impl fmt::$Trait for $Unsigned { + /// Format unsigned integers in the radix. + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Check macro arguments at compile time. + const { + assert!($Unsigned::MIN == 0, "need unsigned"); + assert!($dig_tab.is_ascii(), "need single-byte entries"); + } -macro_rules! impl_int { - ($($t:ident)*) => ( - $(impl DisplayInt for $t { - fn zero() -> Self { 0 } - fn from_u8(u: u8) -> Self { u as Self } - fn to_u8(&self) -> u8 { *self as u8 } - #[cfg(not(any(target_pointer_width = "64", target_arch = "wasm32")))] - fn to_u32(&self) -> u32 { *self as u32 } - fn to_u64(&self) -> u64 { *self as u64 } - fn to_u128(&self) -> u128 { *self as u128 } - })* - ) -} + // ASCII digits in ascending order are used as a lookup table. + const DIG_TAB: &[u8] = $dig_tab; + const BASE: $Unsigned = DIG_TAB.len() as $Unsigned; + const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1; -impl_int! { - i8 i16 i32 i64 i128 isize - u8 u16 u32 u64 u128 usize -} + // Buffer digits of self with right alignment. + let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N]; + // Count the number of bytes in buf that are not initialized. + let mut offset = buf.len(); -/// A type that represents a specific radix -/// -/// # Safety -/// -/// `digit` must return an ASCII character. -#[doc(hidden)] -unsafe trait GenericRadix: Sized { - /// The number of digits. - const BASE: u8; - - /// A radix-specific prefix string. - const PREFIX: &'static str; - - /// Converts an integer to corresponding radix digit. - fn digit(x: u8) -> u8; - - /// Format an integer using the radix using a formatter. - fn fmt_int<T: DisplayInt>(&self, mut x: T, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // The radix can be as low as 2, so we need a buffer of at least 128 - // characters for a base 2 number. - let zero = T::zero(); - let is_nonnegative = x >= zero; - let mut buf = [MaybeUninit::<u8>::uninit(); 128]; - let mut offset = buf.len(); - let base = T::from_u8(Self::BASE); - if is_nonnegative { - // Accumulate each digit of the number from the least significant - // to the most significant figure. - loop { - let n = x % base; // Get the current place value. - x = x / base; // Deaccumulate the number. - offset -= 1; - buf[offset].write(Self::digit(n.to_u8())); // Store the digit in the buffer. - if x == zero { - // No more digits left to accumulate. - break; - }; - } - } else { - // Do the same as above, but accounting for two's complement. - loop { - let n = zero - (x % base); // Get the current place value. - x = x / base; // Deaccumulate the number. - offset -= 1; - buf[offset].write(Self::digit(n.to_u8())); // Store the digit in the buffer. - if x == zero { - // No more digits left to accumulate. - break; - }; - } - } - // SAFETY: Starting from `offset`, all elements of the slice have been set. - let buf_slice = unsafe { slice_buffer_to_str(&buf, offset) }; - f.pad_integral(is_nonnegative, Self::PREFIX, buf_slice) - } -} + // Accumulate each digit of the number from the least + // significant to the most significant figure. + let mut remain = *self; + loop { + let digit = remain % BASE; + remain /= BASE; -/// A binary (base 2) radix -#[derive(Clone, PartialEq)] -struct Binary; - -/// An octal (base 8) radix -#[derive(Clone, PartialEq)] -struct Octal; - -/// A hexadecimal (base 16) radix, formatted with lower-case characters -#[derive(Clone, PartialEq)] -struct LowerHex; - -/// A hexadecimal (base 16) radix, formatted with upper-case characters -#[derive(Clone, PartialEq)] -struct UpperHex; - -macro_rules! radix { - ($T:ident, $base:expr, $prefix:expr, $($x:pat => $conv:expr),+) => { - unsafe impl GenericRadix for $T { - const BASE: u8 = $base; - const PREFIX: &'static str = $prefix; - fn digit(x: u8) -> u8 { - match x { - $($x => $conv,)+ - x => panic!("number not in the range 0..={}: {}", Self::BASE - 1, x), + offset -= 1; + // SAFETY: `remain` will reach 0 and we will break before `offset` wraps + unsafe { core::hint::assert_unchecked(offset < buf.len()) } + buf[offset].write(DIG_TAB[digit as usize]); + if remain == 0 { + break; + } } + + // SAFETY: Starting from `offset`, all elements of the slice have been set. + let digits = unsafe { slice_buffer_to_str(&buf, offset) }; + f.pad_integral(true, $prefix, digits) } } - } -} -radix! { Binary, 2, "0b", x @ 0 ..= 1 => b'0' + x } -radix! { Octal, 8, "0o", x @ 0 ..= 7 => b'0' + x } -radix! { LowerHex, 16, "0x", x @ 0 ..= 9 => b'0' + x, x @ 10 ..= 15 => b'a' + (x - 10) } -radix! { UpperHex, 16, "0x", x @ 0 ..= 9 => b'0' + x, x @ 10 ..= 15 => b'A' + (x - 10) } - -macro_rules! int_base { - (fmt::$Trait:ident for $T:ident as $U:ident -> $Radix:ident) => { #[stable(feature = "rust1", since = "1.0.0")] - impl fmt::$Trait for $T { + impl fmt::$Trait for $Signed { + /// Format signed integers in the two’s-complement form. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - $Radix.fmt_int(*self as $U, f) + fmt::$Trait::fmt(&self.cast_unsigned(), f) } } }; } -macro_rules! integer { - ($Int:ident, $Uint:ident) => { - int_base! { fmt::Binary for $Int as $Uint -> Binary } - int_base! { fmt::Octal for $Int as $Uint -> Octal } - int_base! { fmt::LowerHex for $Int as $Uint -> LowerHex } - int_base! { fmt::UpperHex for $Int as $Uint -> UpperHex } - - int_base! { fmt::Binary for $Uint as $Uint -> Binary } - int_base! { fmt::Octal for $Uint as $Uint -> Octal } - int_base! { fmt::LowerHex for $Uint as $Uint -> LowerHex } - int_base! { fmt::UpperHex for $Uint as $Uint -> UpperHex } +/// Formatting of integers with a non-decimal radix. +macro_rules! radix_integers { + ($Signed:ident, $Unsigned:ident) => { + radix_integer! { fmt::Binary for $Signed and $Unsigned, "0b", b"01" } + radix_integer! { fmt::Octal for $Signed and $Unsigned, "0o", b"01234567" } + radix_integer! { fmt::LowerHex for $Signed and $Unsigned, "0x", b"0123456789abcdef" } + radix_integer! { fmt::UpperHex for $Signed and $Unsigned, "0x", b"0123456789ABCDEF" } }; } -integer! { isize, usize } -integer! { i8, u8 } -integer! { i16, u16 } -integer! { i32, u32 } -integer! { i64, u64 } -integer! { i128, u128 } +radix_integers! { isize, usize } +radix_integers! { i8, u8 } +radix_integers! { i16, u16 } +radix_integers! { i32, u32 } +radix_integers! { i64, u64 } +radix_integers! { i128, u128 } macro_rules! impl_Debug { ($($T:ident)*) => { @@ -205,16 +119,21 @@ unsafe fn slice_buffer_to_str(buf: &[MaybeUninit<u8>], offset: usize) -> &str { } macro_rules! impl_Display { - ($($signed:ident, $unsigned:ident,)* ; as $u:ident via $conv_fn:ident named $gen_name:ident) => { + ($($Signed:ident, $Unsigned:ident),* ; as $T:ident into $fmt_fn:ident) => { $( + const _: () = { + assert!($Signed::BITS <= $T::BITS, "need lossless conversion"); + assert!($Unsigned::BITS <= $T::BITS, "need lossless conversion"); + }; + #[stable(feature = "rust1", since = "1.0.0")] - impl fmt::Display for $unsigned { + impl fmt::Display for $Unsigned { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { #[cfg(not(feature = "optimize_for_size"))] { - const MAX_DEC_N: usize = $unsigned::MAX.ilog10() as usize + 1; - // Buffer decimals for $unsigned with right alignment. + const MAX_DEC_N: usize = $Unsigned::MAX.ilog10() as usize + 1; + // Buffer decimals for self with right alignment. let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DEC_N]; // SAFETY: `buf` is always big enough to contain all the digits. @@ -222,18 +141,20 @@ macro_rules! impl_Display { } #[cfg(feature = "optimize_for_size")] { - $gen_name(self.$conv_fn(), true, f) + // Lossless conversion (with as) is asserted at the top of + // this macro. + ${concat($fmt_fn, _small)}(*self as $T, true, f) } } } #[stable(feature = "rust1", since = "1.0.0")] - impl fmt::Display for $signed { + impl fmt::Display for $Signed { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { #[cfg(not(feature = "optimize_for_size"))] { - const MAX_DEC_N: usize = $unsigned::MAX.ilog10() as usize + 1; - // Buffer decimals for $unsigned with right alignment. + const MAX_DEC_N: usize = $Unsigned::MAX.ilog10() as usize + 1; + // Buffer decimals for self with right alignment. let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DEC_N]; // SAFETY: `buf` is always big enough to contain all the digits. @@ -241,13 +162,15 @@ macro_rules! impl_Display { } #[cfg(feature = "optimize_for_size")] { - return $gen_name(self.unsigned_abs().$conv_fn(), *self >= 0, f); + // Lossless conversion (with as) is asserted at the top of + // this macro. + return ${concat($fmt_fn, _small)}(self.unsigned_abs() as $T, *self >= 0, f); } } } #[cfg(not(feature = "optimize_for_size"))] - impl $unsigned { + impl $Unsigned { #[doc(hidden)] #[unstable( feature = "fmt_internals", @@ -268,7 +191,7 @@ macro_rules! impl_Display { let mut remain = self; // Format per four digits from the lookup table. - // Four digits need a 16-bit $unsigned or wider. + // Four digits need a 16-bit $Unsigned or wider. while size_of::<Self>() > 1 && remain > 999.try_into().expect("branch is not hit for types that cannot fit 999 (u8)") { // SAFETY: All of the decimals fit in buf due to MAX_DEC_N // and the while condition ensures at least 4 more decimals. @@ -327,7 +250,7 @@ macro_rules! impl_Display { } } - impl $signed { + impl $Signed { /// Allows users to write an integer (in signed decimal format) into a variable `buf` of /// type [`NumBuffer`] that is passed by the caller by mutable reference. /// @@ -337,15 +260,15 @@ macro_rules! impl_Display { /// #![feature(int_format_into)] /// use core::fmt::NumBuffer; /// - #[doc = concat!("let n = 0", stringify!($signed), ";")] + #[doc = concat!("let n = 0", stringify!($Signed), ";")] /// let mut buf = NumBuffer::new(); /// assert_eq!(n.format_into(&mut buf), "0"); /// - #[doc = concat!("let n1 = 32", stringify!($signed), ";")] + #[doc = concat!("let n1 = 32", stringify!($Signed), ";")] /// assert_eq!(n1.format_into(&mut buf), "32"); /// - #[doc = concat!("let n2 = ", stringify!($signed::MAX), ";")] - #[doc = concat!("assert_eq!(n2.format_into(&mut buf), ", stringify!($signed::MAX), ".to_string());")] + #[doc = concat!("let n2 = ", stringify!($Signed::MAX), ";")] + #[doc = concat!("assert_eq!(n2.format_into(&mut buf), ", stringify!($Signed::MAX), ".to_string());")] /// ``` #[unstable(feature = "int_format_into", issue = "138215")] pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str { @@ -358,7 +281,9 @@ macro_rules! impl_Display { } #[cfg(feature = "optimize_for_size")] { - offset = ${concat(_inner_slow_integer_to_str, $gen_name)}(self.unsigned_abs().$conv_fn(), &mut buf.buf); + // Lossless conversion (with as) is asserted at the top of + // this macro. + offset = ${concat($fmt_fn, _in_buf_small)}(self.unsigned_abs() as $T, &mut buf.buf); } // Only difference between signed and unsigned are these 4 lines. if self < 0 { @@ -370,7 +295,7 @@ macro_rules! impl_Display { } } - impl $unsigned { + impl $Unsigned { /// Allows users to write an integer (in signed decimal format) into a variable `buf` of /// type [`NumBuffer`] that is passed by the caller by mutable reference. /// @@ -380,15 +305,15 @@ macro_rules! impl_Display { /// #![feature(int_format_into)] /// use core::fmt::NumBuffer; /// - #[doc = concat!("let n = 0", stringify!($unsigned), ";")] + #[doc = concat!("let n = 0", stringify!($Unsigned), ";")] /// let mut buf = NumBuffer::new(); /// assert_eq!(n.format_into(&mut buf), "0"); /// - #[doc = concat!("let n1 = 32", stringify!($unsigned), ";")] + #[doc = concat!("let n1 = 32", stringify!($Unsigned), ";")] /// assert_eq!(n1.format_into(&mut buf), "32"); /// - #[doc = concat!("let n2 = ", stringify!($unsigned::MAX), ";")] - #[doc = concat!("assert_eq!(n2.format_into(&mut buf), ", stringify!($unsigned::MAX), ".to_string());")] + #[doc = concat!("let n2 = ", stringify!($Unsigned::MAX), ";")] + #[doc = concat!("assert_eq!(n2.format_into(&mut buf), ", stringify!($Unsigned::MAX), ".to_string());")] /// ``` #[unstable(feature = "int_format_into", issue = "138215")] pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str { @@ -401,7 +326,9 @@ macro_rules! impl_Display { } #[cfg(feature = "optimize_for_size")] { - offset = ${concat(_inner_slow_integer_to_str, $gen_name)}(self.$conv_fn(), &mut buf.buf); + // Lossless conversion (with as) is asserted at the top of + // this macro. + offset = ${concat($fmt_fn, _in_buf_small)}(self as $T, &mut buf.buf); } // SAFETY: Starting from `offset`, all elements of the slice have been set. unsafe { slice_buffer_to_str(&buf.buf, offset) } @@ -412,7 +339,7 @@ macro_rules! impl_Display { )* #[cfg(feature = "optimize_for_size")] - fn ${concat(_inner_slow_integer_to_str, $gen_name)}(mut n: $u, buf: &mut [MaybeUninit::<u8>]) -> usize { + fn ${concat($fmt_fn, _in_buf_small)}(mut n: $T, buf: &mut [MaybeUninit::<u8>]) -> usize { let mut curr = buf.len(); // SAFETY: To show that it's OK to copy into `buf_ptr`, notice that at the beginning @@ -433,11 +360,11 @@ macro_rules! impl_Display { } #[cfg(feature = "optimize_for_size")] - fn $gen_name(n: $u, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result { - const MAX_DEC_N: usize = $u::MAX.ilog(10) as usize + 1; + fn ${concat($fmt_fn, _small)}(n: $T, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result { + const MAX_DEC_N: usize = $T::MAX.ilog(10) as usize + 1; let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DEC_N]; - let offset = ${concat(_inner_slow_integer_to_str, $gen_name)}(n, &mut buf); + let offset = ${concat($fmt_fn, _in_buf_small)}(n, &mut buf); // SAFETY: Starting from `offset`, all elements of the slice have been set. let buf_slice = unsafe { slice_buffer_to_str(&buf, offset) }; f.pad_integral(is_nonnegative, "", buf_slice) @@ -446,9 +373,9 @@ macro_rules! impl_Display { } macro_rules! impl_Exp { - ($($t:ident),* as $u:ident via $conv_fn:ident named $name:ident) => { - fn $name( - mut n: $u, + ($($Signed:ident, $Unsigned:ident),* ; as $T:ident into $fmt_fn:ident) => { + fn $fmt_fn( + mut n: $T, is_nonnegative: bool, upper: bool, f: &mut fmt::Formatter<'_> @@ -582,32 +509,41 @@ macro_rules! impl_Exp { $( #[stable(feature = "integer_exp_format", since = "1.42.0")] - impl fmt::LowerExp for $t { - #[allow(unused_comparisons)] + impl fmt::LowerExp for $Signed { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let is_nonnegative = *self >= 0; let n = if is_nonnegative { - self.$conv_fn() + *self as $T } else { - // convert the negative num to positive by summing 1 to its 2s complement - (!self.$conv_fn()).wrapping_add(1) + self.unsigned_abs() as $T }; - $name(n, is_nonnegative, false, f) + $fmt_fn(n, is_nonnegative, false, f) + } + } + #[stable(feature = "integer_exp_format", since = "1.42.0")] + impl fmt::LowerExp for $Unsigned { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + $fmt_fn(*self as $T, true, false, f) } })* + $( #[stable(feature = "integer_exp_format", since = "1.42.0")] - impl fmt::UpperExp for $t { - #[allow(unused_comparisons)] + impl fmt::UpperExp for $Signed { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let is_nonnegative = *self >= 0; let n = if is_nonnegative { - self.$conv_fn() + *self as $T } else { - // convert the negative num to positive by summing 1 to its 2s complement - (!self.$conv_fn()).wrapping_add(1) + self.unsigned_abs() as $T }; - $name(n, is_nonnegative, true, f) + $fmt_fn(n, is_nonnegative, true, f) + } + } + #[stable(feature = "integer_exp_format", since = "1.42.0")] + impl fmt::UpperExp for $Unsigned { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + $fmt_fn(*self as $T, true, true, f) } })* }; @@ -623,37 +559,20 @@ impl_Debug! { #[cfg(any(target_pointer_width = "64", target_arch = "wasm32"))] mod imp { use super::*; - impl_Display!( - i8, u8, - i16, u16, - i32, u32, - i64, u64, - isize, usize, - ; as u64 via to_u64 named fmt_u64 - ); - impl_Exp!( - i8, u8, i16, u16, i32, u32, i64, u64, usize, isize - as u64 via to_u64 named exp_u64 - ); + impl_Display!(i8, u8, i16, u16, i32, u32, i64, u64, isize, usize; as u64 into display_u64); + impl_Exp!(i8, u8, i16, u16, i32, u32, i64, u64, isize, usize; as u64 into exp_u64); } #[cfg(not(any(target_pointer_width = "64", target_arch = "wasm32")))] mod imp { use super::*; - impl_Display!( - i8, u8, - i16, u16, - i32, u32, - isize, usize, - ; as u32 via to_u32 named fmt_u32); - impl_Display!( - i64, u64, - ; as u64 via to_u64 named fmt_u64); - - impl_Exp!(i8, u8, i16, u16, i32, u32, isize, usize as u32 via to_u32 named exp_u32); - impl_Exp!(i64, u64 as u64 via to_u64 named exp_u64); + impl_Display!(i8, u8, i16, u16, i32, u32, isize, usize; as u32 into display_u32); + impl_Display!(i64, u64; as u64 into display_u64); + + impl_Exp!(i8, u8, i16, u16, i32, u32, isize, usize; as u32 into exp_u32); + impl_Exp!(i64, u64; as u64 into exp_u64); } -impl_Exp!(i128, u128 as u128 via to_u128 named exp_u128); +impl_Exp!(i128, u128; as u128 into exp_u128); const U128_MAX_DEC_N: usize = u128::MAX.ilog10() as usize + 1; diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs index dd838d494bc..904aa52c784 100644 --- a/library/core/src/intrinsics/mod.rs +++ b/library/core/src/intrinsics/mod.rs @@ -261,53 +261,72 @@ pub unsafe fn atomic_fence<const ORD: AtomicOrdering>(); pub unsafe fn atomic_singlethreadfence<const ORD: AtomicOrdering>(); /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction -/// if supported; otherwise, it is a no-op. +/// for the given address if supported; otherwise, it is a no-op. /// Prefetches have no effect on the behavior of the program but can change its performance /// characteristics. /// -/// The `locality` argument must be a constant integer and is a temporal locality specifier -/// ranging from (0) - no locality, to (3) - extremely local keep in cache. +/// The `LOCALITY` argument is a temporal locality specifier ranging from (0) - no locality, +/// to (3) - extremely local keep in cache. /// /// This intrinsic does not have a stable counterpart. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn prefetch_read_data<T>(data: *const T, locality: i32); +#[miri::intrinsic_fallback_is_spec] +pub const fn prefetch_read_data<T, const LOCALITY: i32>(data: *const T) { + // This operation is a no-op, unless it is overridden by the backend. + let _ = data; +} + /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction -/// if supported; otherwise, it is a no-op. +/// for the given address if supported; otherwise, it is a no-op. /// Prefetches have no effect on the behavior of the program but can change its performance /// characteristics. /// -/// The `locality` argument must be a constant integer and is a temporal locality specifier -/// ranging from (0) - no locality, to (3) - extremely local keep in cache. +/// The `LOCALITY` argument is a temporal locality specifier ranging from (0) - no locality, +/// to (3) - extremely local keep in cache. /// /// This intrinsic does not have a stable counterpart. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn prefetch_write_data<T>(data: *const T, locality: i32); +#[miri::intrinsic_fallback_is_spec] +pub const fn prefetch_write_data<T, const LOCALITY: i32>(data: *const T) { + // This operation is a no-op, unless it is overridden by the backend. + let _ = data; +} + /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction -/// if supported; otherwise, it is a no-op. +/// for the given address if supported; otherwise, it is a no-op. /// Prefetches have no effect on the behavior of the program but can change its performance /// characteristics. /// -/// The `locality` argument must be a constant integer and is a temporal locality specifier -/// ranging from (0) - no locality, to (3) - extremely local keep in cache. +/// The `LOCALITY` argument is a temporal locality specifier ranging from (0) - no locality, +/// to (3) - extremely local keep in cache. /// /// This intrinsic does not have a stable counterpart. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn prefetch_read_instruction<T>(data: *const T, locality: i32); +#[miri::intrinsic_fallback_is_spec] +pub const fn prefetch_read_instruction<T, const LOCALITY: i32>(data: *const T) { + // This operation is a no-op, unless it is overridden by the backend. + let _ = data; +} + /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction -/// if supported; otherwise, it is a no-op. +/// for the given address if supported; otherwise, it is a no-op. /// Prefetches have no effect on the behavior of the program but can change its performance /// characteristics. /// -/// The `locality` argument must be a constant integer and is a temporal locality specifier -/// ranging from (0) - no locality, to (3) - extremely local keep in cache. +/// The `LOCALITY` argument is a temporal locality specifier ranging from (0) - no locality, +/// to (3) - extremely local keep in cache. /// /// This intrinsic does not have a stable counterpart. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn prefetch_write_instruction<T>(data: *const T, locality: i32); +#[miri::intrinsic_fallback_is_spec] +pub const fn prefetch_write_instruction<T, const LOCALITY: i32>(data: *const T) { + // This operation is a no-op, unless it is overridden by the backend. + let _ = data; +} /// Executes a breakpoint trap, for inspection by a debugger. /// diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs index 71abd707374..f69d6a98592 100644 --- a/library/core/src/lib.rs +++ b/library/core/src/lib.rs @@ -172,6 +172,7 @@ #![feature(no_core)] #![feature(optimize_attribute)] #![feature(prelude_import)] +#![feature(reborrow)] #![feature(repr_simd)] #![feature(rustc_allow_const_fn_unstable)] #![feature(rustc_attrs)] diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs index ba00ee17b65..8ad58599c68 100644 --- a/library/core/src/marker.rs +++ b/library/core/src/marker.rs @@ -1365,3 +1365,11 @@ pub macro CoercePointee($item:item) { pub trait CoercePointeeValidated { /* compiler built-in */ } + +/// Allows value to be reborrowed as exclusive, creating a copy of the value +/// that disables the source for reads and writes for the lifetime of the copy. +#[lang = "reborrow"] +#[unstable(feature = "reborrow", issue = "145612")] +pub trait Reborrow { + // Empty. +} diff --git a/library/core/src/num/dec2flt/mod.rs b/library/core/src/num/dec2flt/mod.rs index 1844cd98082..3118a6e5ca6 100644 --- a/library/core/src/num/dec2flt/mod.rs +++ b/library/core/src/num/dec2flt/mod.rs @@ -124,6 +124,8 @@ macro_rules! from_str_float_impl { /// * '2.5E-10' /// * '5.' /// * '.5', or, equivalently, '0.5' + /// * '7' + /// * '007' /// * 'inf', '-inf', '+infinity', 'NaN' /// /// Note that alphabetical characters are not case-sensitive. diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index bd2f7445612..25864db5fea 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -209,6 +209,48 @@ macro_rules! int_impl { self & self.wrapping_neg() } + /// Returns the index of the highest bit set to one in `self`, or `None` + /// if `self` is `0`. + /// + /// # Examples + /// + /// ``` + /// #![feature(int_lowest_highest_one)] + /// + #[doc = concat!("assert_eq!(0x0_", stringify!($SelfT), ".highest_one(), None);")] + #[doc = concat!("assert_eq!(0x1_", stringify!($SelfT), ".highest_one(), Some(0));")] + #[doc = concat!("assert_eq!(0x10_", stringify!($SelfT), ".highest_one(), Some(4));")] + #[doc = concat!("assert_eq!(0x1f_", stringify!($SelfT), ".highest_one(), Some(4));")] + /// ``` + #[unstable(feature = "int_lowest_highest_one", issue = "145203")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline(always)] + pub const fn highest_one(self) -> Option<u32> { + (self as $UnsignedT).highest_one() + } + + /// Returns the index of the lowest bit set to one in `self`, or `None` + /// if `self` is `0`. + /// + /// # Examples + /// + /// ``` + /// #![feature(int_lowest_highest_one)] + /// + #[doc = concat!("assert_eq!(0x0_", stringify!($SelfT), ".lowest_one(), None);")] + #[doc = concat!("assert_eq!(0x1_", stringify!($SelfT), ".lowest_one(), Some(0));")] + #[doc = concat!("assert_eq!(0x10_", stringify!($SelfT), ".lowest_one(), Some(4));")] + #[doc = concat!("assert_eq!(0x1f_", stringify!($SelfT), ".lowest_one(), Some(0));")] + /// ``` + #[unstable(feature = "int_lowest_highest_one", issue = "145203")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline(always)] + pub const fn lowest_one(self) -> Option<u32> { + (self as $UnsignedT).lowest_one() + } + /// Returns the bit pattern of `self` reinterpreted as an unsigned integer of the same size. /// /// This produces the same result as an `as` cast, but ensures that the bit-width remains @@ -2494,8 +2536,7 @@ macro_rules! int_impl { /// /// # Examples /// - /// Please note that this example is shared between integer types. - /// Which explains why `i32` is used here. + /// Please note that this example is shared among integer types, which is why `i32` is used. /// /// ``` /// #![feature(bigint_helper_methods)] @@ -2525,8 +2566,7 @@ macro_rules! int_impl { /// /// # Examples /// - /// Please note that this example is shared between integer types. - /// Which explains why `i32` is used here. + /// Please note that this example is shared among integer types, which is why `i32` is used. /// /// ``` /// #![feature(bigint_helper_methods)] @@ -2563,8 +2603,7 @@ macro_rules! int_impl { /// /// # Examples /// - /// Please note that this example is shared between integer types. - /// Which explains why `i32` is used here. + /// Please note that this example is shared among integer types, which is why `i32` is used. /// /// ``` /// #![feature(bigint_helper_methods)] diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs index 308d722f5d5..e02d9260a16 100644 --- a/library/core/src/num/nonzero.rs +++ b/library/core/src/num/nonzero.rs @@ -681,6 +681,54 @@ macro_rules! nonzero_integer { unsafe { NonZero::new_unchecked(n) } } + /// Returns the index of the highest bit set to one in `self`. + /// + /// # Examples + /// + /// ``` + /// #![feature(int_lowest_highest_one)] + /// + /// # use core::num::NonZero; + /// # fn main() { test().unwrap(); } + /// # fn test() -> Option<()> { + #[doc = concat!("assert_eq!(NonZero::<", stringify!($Int), ">::new(0x1)?.highest_one(), 0);")] + #[doc = concat!("assert_eq!(NonZero::<", stringify!($Int), ">::new(0x10)?.highest_one(), 4);")] + #[doc = concat!("assert_eq!(NonZero::<", stringify!($Int), ">::new(0x1f)?.highest_one(), 4);")] + /// # Some(()) + /// # } + /// ``` + #[unstable(feature = "int_lowest_highest_one", issue = "145203")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline(always)] + pub const fn highest_one(self) -> u32 { + Self::BITS - 1 - self.leading_zeros() + } + + /// Returns the index of the lowest bit set to one in `self`. + /// + /// # Examples + /// + /// ``` + /// #![feature(int_lowest_highest_one)] + /// + /// # use core::num::NonZero; + /// # fn main() { test().unwrap(); } + /// # fn test() -> Option<()> { + #[doc = concat!("assert_eq!(NonZero::<", stringify!($Int), ">::new(0x1)?.lowest_one(), 0);")] + #[doc = concat!("assert_eq!(NonZero::<", stringify!($Int), ">::new(0x10)?.lowest_one(), 4);")] + #[doc = concat!("assert_eq!(NonZero::<", stringify!($Int), ">::new(0x1f)?.lowest_one(), 0);")] + /// # Some(()) + /// # } + /// ``` + #[unstable(feature = "int_lowest_highest_one", issue = "145203")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline(always)] + pub const fn lowest_one(self) -> u32 { + self.trailing_zeros() + } + /// Returns the number of ones in the binary representation of `self`. /// /// # Examples diff --git a/library/core/src/num/saturating.rs b/library/core/src/num/saturating.rs index c7040721b93..365a82a57e0 100644 --- a/library/core/src/num/saturating.rs +++ b/library/core/src/num/saturating.rs @@ -729,8 +729,8 @@ macro_rules! saturating_int_impl { /// /// # Examples /// - /// Please note that this example is shared between integer types. - /// Which explains why `i16` is used here. + /// Please note that this example is shared among integer types, which is why `i16` + /// is used. /// /// ``` /// use std::num::Saturating; diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index 0afad09bdc6..10d9498d15e 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -261,6 +261,54 @@ macro_rules! uint_impl { self & self.wrapping_neg() } + /// Returns the index of the highest bit set to one in `self`, or `None` + /// if `self` is `0`. + /// + /// # Examples + /// + /// ``` + /// #![feature(int_lowest_highest_one)] + /// + #[doc = concat!("assert_eq!(0x0_", stringify!($SelfT), ".highest_one(), None);")] + #[doc = concat!("assert_eq!(0x1_", stringify!($SelfT), ".highest_one(), Some(0));")] + #[doc = concat!("assert_eq!(0x10_", stringify!($SelfT), ".highest_one(), Some(4));")] + #[doc = concat!("assert_eq!(0x1f_", stringify!($SelfT), ".highest_one(), Some(4));")] + /// ``` + #[unstable(feature = "int_lowest_highest_one", issue = "145203")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline(always)] + pub const fn highest_one(self) -> Option<u32> { + match NonZero::new(self) { + Some(v) => Some(v.highest_one()), + None => None, + } + } + + /// Returns the index of the lowest bit set to one in `self`, or `None` + /// if `self` is `0`. + /// + /// # Examples + /// + /// ``` + /// #![feature(int_lowest_highest_one)] + /// + #[doc = concat!("assert_eq!(0x0_", stringify!($SelfT), ".lowest_one(), None);")] + #[doc = concat!("assert_eq!(0x1_", stringify!($SelfT), ".lowest_one(), Some(0));")] + #[doc = concat!("assert_eq!(0x10_", stringify!($SelfT), ".lowest_one(), Some(4));")] + #[doc = concat!("assert_eq!(0x1f_", stringify!($SelfT), ".lowest_one(), Some(0));")] + /// ``` + #[unstable(feature = "int_lowest_highest_one", issue = "145203")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline(always)] + pub const fn lowest_one(self) -> Option<u32> { + match NonZero::new(self) { + Some(v) => Some(v.lowest_one()), + None => None, + } + } + /// Returns the bit pattern of `self` reinterpreted as a signed integer of the same size. /// /// This produces the same result as an `as` cast, but ensures that the bit-width remains @@ -2115,8 +2163,7 @@ macro_rules! uint_impl { /// /// # Examples /// - /// Please note that this example is shared between integer types. - /// Which explains why `u8` is used here. + /// Please note that this example is shared among integer types, which is why `u8` is used. /// /// ``` /// assert_eq!(10u8.wrapping_mul(12), 120); @@ -2606,8 +2653,8 @@ macro_rules! uint_impl { /// /// # Examples /// - /// Please note that this example is shared between integer types. - /// Which explains why `u32` is used here. + /// Please note that this example is shared among integer types, which is why why `u32` + /// is used. /// /// ``` /// assert_eq!(5u32.overflowing_mul(2), (10, false)); @@ -2633,8 +2680,7 @@ macro_rules! uint_impl { /// /// # Examples /// - /// Please note that this example is shared between integer types. - /// Which explains why `u32` is used here. + /// Please note that this example is shared among integer types, which is why `u32` is used. /// /// ``` /// #![feature(bigint_helper_methods)] @@ -2664,8 +2710,7 @@ macro_rules! uint_impl { /// /// # Examples /// - /// Please note that this example is shared between integer types. - /// Which explains why `u32` is used here. + /// Please note that this example is shared among integer types, which is why `u32` is used. /// /// ``` /// #![feature(bigint_helper_methods)] diff --git a/library/core/src/num/wrapping.rs b/library/core/src/num/wrapping.rs index 9ccad4b6459..881fe615f80 100644 --- a/library/core/src/num/wrapping.rs +++ b/library/core/src/num/wrapping.rs @@ -765,8 +765,8 @@ macro_rules! wrapping_int_impl { /// /// # Examples /// - /// Please note that this example is shared between integer types. - /// Which explains why `i16` is used here. + /// Please note that this example is shared among integer types, which is why `i16` + /// is used. /// /// Basic usage: /// diff --git a/library/core/src/panic/location.rs b/library/core/src/panic/location.rs index 6ef7d5a22a3..cafdcfa2c2e 100644 --- a/library/core/src/panic/location.rs +++ b/library/core/src/panic/location.rs @@ -183,7 +183,7 @@ impl<'a> Location<'a> { #[must_use] #[stable(feature = "panic_hooks", since = "1.10.0")] #[rustc_const_stable(feature = "const_location_fields", since = "1.79.0")] - pub const fn file(&self) -> &str { + pub const fn file(&self) -> &'a str { // SAFETY: The filename is valid. unsafe { self.filename.as_ref() } } @@ -195,7 +195,7 @@ impl<'a> Location<'a> { #[must_use] #[unstable(feature = "file_with_nul", issue = "141727")] #[inline] - pub const fn file_with_nul(&self) -> &CStr { + pub const fn file_with_nul(&self) -> &'a CStr { let filename = self.filename.as_ptr(); // SAFETY: The filename is valid for `filename_len+1` bytes, so this addition can't diff --git a/library/core/src/pin/unsafe_pinned.rs b/library/core/src/pin/unsafe_pinned.rs index b18b5d7c9ec..ede6e0d6106 100644 --- a/library/core/src/pin/unsafe_pinned.rs +++ b/library/core/src/pin/unsafe_pinned.rs @@ -120,8 +120,8 @@ impl<T: ?Sized> UnsafePinned<T> { #[inline(always)] #[must_use] #[unstable(feature = "unsafe_pinned", issue = "125735")] - pub const fn raw_get(this: *const Self) -> *const T { - this as *const T + pub const fn raw_get(this: *const Self) -> *mut T { + this as *const T as *mut T } /// Gets a mutable pointer to the wrapped value. diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs index f5c490ca7ce..6fc85a83e17 100644 --- a/library/core/src/ptr/mod.rs +++ b/library/core/src/ptr/mod.rs @@ -914,6 +914,7 @@ pub const fn dangling<T>() -> *const T { #[must_use] #[stable(feature = "strict_provenance", since = "1.84.0")] #[rustc_const_stable(feature = "strict_provenance", since = "1.84.0")] +#[allow(integer_to_ptr_transmutes)] // Expected semantics here. pub const fn without_provenance_mut<T>(addr: usize) -> *mut T { // An int-to-pointer transmute currently has exactly the intended semantics: it creates a // pointer without provenance. Note that this is *not* a stable guarantee about transmute diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs index ae360df80f6..98091e9fe83 100644 --- a/library/core/src/slice/index.rs +++ b/library/core/src/slice/index.rs @@ -34,53 +34,44 @@ where #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)] #[cfg_attr(feature = "panic_immediate_abort", inline)] #[track_caller] -const fn slice_start_index_len_fail(index: usize, len: usize) -> ! { - const_panic!( - "slice start index is out of range for slice", - "range start index {index} out of range for slice of length {len}", - index: usize, - len: usize, - ) -} +const fn slice_index_fail(start: usize, end: usize, len: usize) -> ! { + if start > len { + const_panic!( + "slice start index is out of range for slice", + "range start index {start} out of range for slice of length {len}", + start: usize, + len: usize, + ) + } -#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)] -#[cfg_attr(feature = "panic_immediate_abort", inline)] -#[track_caller] -const fn slice_end_index_len_fail(index: usize, len: usize) -> ! { - const_panic!( - "slice end index is out of range for slice", - "range end index {index} out of range for slice of length {len}", - index: usize, - len: usize, - ) -} + if end > len { + const_panic!( + "slice end index is out of range for slice", + "range end index {end} out of range for slice of length {len}", + end: usize, + len: usize, + ) + } -#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)] -#[cfg_attr(feature = "panic_immediate_abort", inline)] -#[track_caller] -const fn slice_index_order_fail(index: usize, end: usize) -> ! { + if start > end { + const_panic!( + "slice index start is larger than end", + "slice index starts at {start} but ends at {end}", + start: usize, + end: usize, + ) + } + + // Only reachable if the range was a `RangeInclusive` or a + // `RangeToInclusive`, with `end == len`. const_panic!( - "slice index start is larger than end", - "slice index starts at {index} but ends at {end}", - index: usize, + "slice end index is out of range for slice", + "range end index {end} out of range for slice of length {len}", end: usize, + len: usize, ) } -#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)] -#[cfg_attr(feature = "panic_immediate_abort", inline)] -#[track_caller] -const fn slice_start_index_overflow_fail() -> ! { - panic!("attempted to index slice from after maximum usize"); -} - -#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)] -#[cfg_attr(feature = "panic_immediate_abort", inline)] -#[track_caller] -const fn slice_end_index_overflow_fail() -> ! { - panic!("attempted to index slice up to maximum usize"); -} - // The UbChecks are great for catching bugs in the unsafe methods, but including // them in safe indexing is unnecessary and hurts inlining and debug runtime perf. // Both the safe and unsafe public methods share these helpers, @@ -341,7 +332,7 @@ unsafe impl<T> const SliceIndex<[T]> for ops::IndexRange { // SAFETY: `self` is checked to be valid and in bounds above. unsafe { &*get_offset_len_noubcheck(slice, self.start(), self.len()) } } else { - slice_end_index_len_fail(self.end(), slice.len()) + slice_index_fail(self.start(), self.end(), slice.len()) } } @@ -351,7 +342,7 @@ unsafe impl<T> const SliceIndex<[T]> for ops::IndexRange { // SAFETY: `self` is checked to be valid and in bounds above. unsafe { &mut *get_offset_len_mut_noubcheck(slice, self.start(), self.len()) } } else { - slice_end_index_len_fail(self.end(), slice.len()) + slice_index_fail(self.start(), self.end(), slice.len()) } } } @@ -436,26 +427,27 @@ unsafe impl<T> const SliceIndex<[T]> for ops::Range<usize> { #[inline(always)] fn index(self, slice: &[T]) -> &[T] { // Using checked_sub is a safe way to get `SubUnchecked` in MIR - let Some(new_len) = usize::checked_sub(self.end, self.start) else { - slice_index_order_fail(self.start, self.end) - }; - if self.end > slice.len() { - slice_end_index_len_fail(self.end, slice.len()); + if let Some(new_len) = usize::checked_sub(self.end, self.start) + && self.end <= slice.len() + { + // SAFETY: `self` is checked to be valid and in bounds above. + unsafe { &*get_offset_len_noubcheck(slice, self.start, new_len) } + } else { + slice_index_fail(self.start, self.end, slice.len()) } - // SAFETY: `self` is checked to be valid and in bounds above. - unsafe { &*get_offset_len_noubcheck(slice, self.start, new_len) } } #[inline] fn index_mut(self, slice: &mut [T]) -> &mut [T] { - let Some(new_len) = usize::checked_sub(self.end, self.start) else { - slice_index_order_fail(self.start, self.end) - }; - if self.end > slice.len() { - slice_end_index_len_fail(self.end, slice.len()); + // Using checked_sub is a safe way to get `SubUnchecked` in MIR + if let Some(new_len) = usize::checked_sub(self.end, self.start) + && self.end <= slice.len() + { + // SAFETY: `self` is checked to be valid and in bounds above. + unsafe { &mut *get_offset_len_mut_noubcheck(slice, self.start, new_len) } + } else { + slice_index_fail(self.start, self.end, slice.len()) } - // SAFETY: `self` is checked to be valid and in bounds above. - unsafe { &mut *get_offset_len_mut_noubcheck(slice, self.start, new_len) } } } @@ -567,7 +559,7 @@ unsafe impl<T> const SliceIndex<[T]> for ops::RangeFrom<usize> { #[inline] fn index(self, slice: &[T]) -> &[T] { if self.start > slice.len() { - slice_start_index_len_fail(self.start, slice.len()); + slice_index_fail(self.start, slice.len(), slice.len()) } // SAFETY: `self` is checked to be valid and in bounds above. unsafe { &*self.get_unchecked(slice) } @@ -576,7 +568,7 @@ unsafe impl<T> const SliceIndex<[T]> for ops::RangeFrom<usize> { #[inline] fn index_mut(self, slice: &mut [T]) -> &mut [T] { if self.start > slice.len() { - slice_start_index_len_fail(self.start, slice.len()); + slice_index_fail(self.start, slice.len(), slice.len()) } // SAFETY: `self` is checked to be valid and in bounds above. unsafe { &mut *self.get_unchecked_mut(slice) } @@ -690,18 +682,32 @@ unsafe impl<T> const SliceIndex<[T]> for ops::RangeInclusive<usize> { #[inline] fn index(self, slice: &[T]) -> &[T] { - if *self.end() == usize::MAX { - slice_end_index_overflow_fail(); + let Self { mut start, mut end, exhausted } = self; + let len = slice.len(); + if end < len { + end = end + 1; + start = if exhausted { end } else { start }; + if let Some(new_len) = usize::checked_sub(end, start) { + // SAFETY: `self` is checked to be valid and in bounds above. + unsafe { return &*get_offset_len_noubcheck(slice, start, new_len) } + } } - self.into_slice_range().index(slice) + slice_index_fail(start, end, slice.len()) } #[inline] fn index_mut(self, slice: &mut [T]) -> &mut [T] { - if *self.end() == usize::MAX { - slice_end_index_overflow_fail(); + let Self { mut start, mut end, exhausted } = self; + let len = slice.len(); + if end < len { + end = end + 1; + start = if exhausted { end } else { start }; + if let Some(new_len) = usize::checked_sub(end, start) { + // SAFETY: `self` is checked to be valid and in bounds above. + unsafe { return &mut *get_offset_len_mut_noubcheck(slice, start, new_len) } + } } - self.into_slice_range().index_mut(slice) + slice_index_fail(start, end, slice.len()) } } @@ -852,28 +858,26 @@ where { let len = bounds.end; - let start = match range.start_bound() { - ops::Bound::Included(&start) => start, - ops::Bound::Excluded(start) => { - start.checked_add(1).unwrap_or_else(|| slice_start_index_overflow_fail()) - } - ops::Bound::Unbounded => 0, - }; - let end = match range.end_bound() { - ops::Bound::Included(end) => { - end.checked_add(1).unwrap_or_else(|| slice_end_index_overflow_fail()) - } + ops::Bound::Included(&end) if end >= len => slice_index_fail(0, end, len), + // Cannot overflow because `end < len` implies `end < usize::MAX`. + ops::Bound::Included(&end) => end + 1, + + ops::Bound::Excluded(&end) if end > len => slice_index_fail(0, end, len), ops::Bound::Excluded(&end) => end, ops::Bound::Unbounded => len, }; - if start > end { - slice_index_order_fail(start, end); - } - if end > len { - slice_end_index_len_fail(end, len); - } + let start = match range.start_bound() { + ops::Bound::Excluded(&start) if start >= end => slice_index_fail(start, end, len), + // Cannot overflow because `start < end` implies `start < usize::MAX`. + ops::Bound::Excluded(&start) => start + 1, + + ops::Bound::Included(&start) if start > end => slice_index_fail(start, end, len), + ops::Bound::Included(&start) => start, + + ops::Bound::Unbounded => 0, + }; ops::Range { start, end } } @@ -982,25 +986,27 @@ pub(crate) fn into_slice_range( len: usize, (start, end): (ops::Bound<usize>, ops::Bound<usize>), ) -> ops::Range<usize> { - use ops::Bound; - let start = match start { - Bound::Included(start) => start, - Bound::Excluded(start) => { - start.checked_add(1).unwrap_or_else(|| slice_start_index_overflow_fail()) - } - Bound::Unbounded => 0, - }; - let end = match end { - Bound::Included(end) => { - end.checked_add(1).unwrap_or_else(|| slice_end_index_overflow_fail()) - } - Bound::Excluded(end) => end, - Bound::Unbounded => len, + ops::Bound::Included(end) if end >= len => slice_index_fail(0, end, len), + // Cannot overflow because `end < len` implies `end < usize::MAX`. + ops::Bound::Included(end) => end + 1, + + ops::Bound::Excluded(end) if end > len => slice_index_fail(0, end, len), + ops::Bound::Excluded(end) => end, + + ops::Bound::Unbounded => len, }; - // Don't bother with checking `start < end` and `end <= len` - // since these checks are handled by `Range` impls + let start = match start { + ops::Bound::Excluded(start) if start >= end => slice_index_fail(start, end, len), + // Cannot overflow because `start < end` implies `start < usize::MAX`. + ops::Bound::Excluded(start) => start + 1, + + ops::Bound::Included(start) if start > end => slice_index_fail(start, end, len), + ops::Bound::Included(start) => start, + + ops::Bound::Unbounded => 0, + }; start..end } diff --git a/library/core/src/task/poll.rs b/library/core/src/task/poll.rs index ca668361ef6..59ffe7ad49c 100644 --- a/library/core/src/task/poll.rs +++ b/library/core/src/task/poll.rs @@ -125,7 +125,7 @@ impl<T, E> Poll<Result<T, E>> { } } - /// Maps a `Poll::Ready<Result<T, E>>` to `Poll::Ready<Result<T, F>>` by + /// Maps a `Poll::Ready<Result<T, E>>` to `Poll::Ready<Result<T, U>>` by /// applying a function to a contained `Poll::Ready(Err)` value, leaving all other /// variants untouched. /// diff --git a/library/core/src/unicode/mod.rs b/library/core/src/unicode/mod.rs index 49dbdeb1a6d..191fe7711f9 100644 --- a/library/core/src/unicode/mod.rs +++ b/library/core/src/unicode/mod.rs @@ -1,5 +1,6 @@ +//! Unicode internals used in liballoc and libstd. Not public API. #![unstable(feature = "unicode_internals", issue = "none")] -#![allow(missing_docs)] +#![doc(hidden)] // for use in alloc, not re-exported in std. #[rustfmt::skip] @@ -31,5 +32,4 @@ mod unicode_data; /// /// The version numbering scheme is explained in /// [Unicode 11.0 or later, Section 3.1 Versions of the Unicode Standard](https://www.unicode.org/versions/Unicode11.0.0/ch03.pdf#page=4). -#[stable(feature = "unicode_version", since = "1.45.0")] pub const UNICODE_VERSION: (u8, u8, u8) = unicode_data::UNICODE_VERSION; diff --git a/library/coretests/benches/fmt.rs b/library/coretests/benches/fmt.rs index ee8e981b46b..f45b921b939 100644 --- a/library/coretests/benches/fmt.rs +++ b/library/coretests/benches/fmt.rs @@ -162,3 +162,183 @@ fn write_u8_min(bh: &mut Bencher) { black_box(format!("{}", black_box(u8::MIN))); }); } + +#[bench] +fn write_i8_bin(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:b}", black_box(0_i8)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(100_i8)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(-100_i8)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(1_i8 << 4)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i16_bin(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:b}", black_box(0_i16)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(100_i16)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(-100_i16)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(1_i16 << 8)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i32_bin(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:b}", black_box(0_i32)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(100_i32)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(-100_i32)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(1_i32 << 16)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i64_bin(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:b}", black_box(0_i64)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(100_i64)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(-100_i64)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(1_i64 << 32)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i128_bin(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:b}", black_box(0_i128)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(100_i128)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(-100_i128)).unwrap(); + write!(black_box(&mut buf), "{:b}", black_box(1_i128 << 64)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i8_oct(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:o}", black_box(0_i8)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(100_i8)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(-100_i8)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(1_i8 << 4)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i16_oct(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:o}", black_box(0_i16)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(100_i16)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(-100_i16)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(1_i16 << 8)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i32_oct(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:o}", black_box(0_i32)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(100_i32)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(-100_i32)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(1_i32 << 16)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i64_oct(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:o}", black_box(0_i64)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(100_i64)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(-100_i64)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(1_i64 << 32)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i128_oct(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:o}", black_box(0_i128)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(100_i128)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(-100_i128)).unwrap(); + write!(black_box(&mut buf), "{:o}", black_box(1_i128 << 64)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i8_hex(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:x}", black_box(0_i8)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(100_i8)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(-100_i8)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(1_i8 << 4)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i16_hex(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:x}", black_box(0_i16)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(100_i16)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(-100_i16)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(1_i16 << 8)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i32_hex(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:x}", black_box(0_i32)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(100_i32)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(-100_i32)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(1_i32 << 16)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i64_hex(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:x}", black_box(0_i64)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(100_i64)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(-100_i64)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(1_i64 << 32)).unwrap(); + black_box(&mut buf).clear(); + }); +} + +#[bench] +fn write_i128_hex(bh: &mut Bencher) { + let mut buf = String::with_capacity(256); + bh.iter(|| { + write!(black_box(&mut buf), "{:x}", black_box(0_i128)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(100_i128)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(-100_i128)).unwrap(); + write!(black_box(&mut buf), "{:x}", black_box(1_i128 << 64)).unwrap(); + black_box(&mut buf).clear(); + }); +} diff --git a/library/coretests/tests/lib.rs b/library/coretests/tests/lib.rs index b128acfc000..d2281b1df2f 100644 --- a/library/coretests/tests/lib.rs +++ b/library/coretests/tests/lib.rs @@ -54,6 +54,7 @@ #![feature(generic_assert_internals)] #![feature(hasher_prefixfree_extras)] #![feature(hashmap_internals)] +#![feature(int_lowest_highest_one)] #![feature(int_roundings)] #![feature(ip)] #![feature(is_ascii_octdigit)] diff --git a/library/coretests/tests/nonzero.rs b/library/coretests/tests/nonzero.rs index eb06c34fd02..69e4ed9c36b 100644 --- a/library/coretests/tests/nonzero.rs +++ b/library/coretests/tests/nonzero.rs @@ -462,3 +462,111 @@ fn test_nonzero_fmt() { assert_eq!(i, nz); } + +#[test] +fn test_nonzero_highest_one() { + macro_rules! nonzero_int_impl { + ($($T:ty),+) => { + $( + { + for i in 0..<$T>::BITS { + // Set single bit. + assert_eq!(NonZero::<$T>::new(1 << i).unwrap().highest_one(), i); + if i > <$T>::BITS { + // Set lowest bits. + assert_eq!( + NonZero::<$T>::new(<$T>::MAX >> i).unwrap().highest_one(), + <$T>::BITS - i - 2, + ); + } + // Set highest bits. + assert_eq!( + NonZero::<$T>::new(-1 << i).unwrap().highest_one(), + <$T>::BITS - 1, + ); + } + } + )+ + }; + } + + macro_rules! nonzero_uint_impl { + ($($T:ty),+) => { + $( + { + for i in 0..<$T>::BITS { + // Set single bit. + assert_eq!(NonZero::<$T>::new(1 << i).unwrap().highest_one(), i); + // Set lowest bits. + assert_eq!( + NonZero::<$T>::new(<$T>::MAX >> i).unwrap().highest_one(), + <$T>::BITS - i - 1, + ); + // Set highest bits. + assert_eq!( + NonZero::<$T>::new(<$T>::MAX << i).unwrap().highest_one(), + <$T>::BITS - 1, + ); + } + } + )+ + }; + } + + nonzero_int_impl!(i8, i16, i32, i64, i128, isize); + nonzero_uint_impl!(u8, u16, u32, u64, u128, usize); +} + +#[test] +fn test_nonzero_lowest_one() { + macro_rules! nonzero_int_impl { + ($($T:ty),+) => { + $( + { + for i in 0..<$T>::BITS { + // Set single bit. + assert_eq!(NonZero::<$T>::new(1 << i).unwrap().lowest_one(), i); + if i > <$T>::BITS { + // Set lowest bits. + assert_eq!( + NonZero::<$T>::new(<$T>::MAX >> i).unwrap().lowest_one(), + 0, + ); + } + // Set highest bits. + assert_eq!( + NonZero::<$T>::new(-1 << i).unwrap().lowest_one(), + i, + ); + } + } + )+ + }; + } + + macro_rules! nonzero_uint_impl { + ($($T:ty),+) => { + $( + { + for i in 0..<$T>::BITS { + // Set single bit. + assert_eq!(NonZero::<$T>::new(1 << i).unwrap().lowest_one(), i); + // Set lowest bits. + assert_eq!( + NonZero::<$T>::new(<$T>::MAX >> i).unwrap().lowest_one(), + 0, + ); + // Set highest bits. + assert_eq!( + NonZero::<$T>::new(<$T>::MAX << i).unwrap().lowest_one(), + i, + ); + } + } + )+ + }; + } + + nonzero_int_impl!(i8, i16, i32, i64, i128, isize); + nonzero_uint_impl!(u8, u16, u32, u64, u128, usize); +} diff --git a/library/coretests/tests/num/int_macros.rs b/library/coretests/tests/num/int_macros.rs index ca32fce861f..1611a6466f5 100644 --- a/library/coretests/tests/num/int_macros.rs +++ b/library/coretests/tests/num/int_macros.rs @@ -228,6 +228,46 @@ macro_rules! int_module { } #[test] + fn test_highest_one() { + const ZERO: $T = 0; + const ONE: $T = 1; + const MINUS_ONE: $T = -1; + + assert_eq!(ZERO.highest_one(), None); + + for i in 0..<$T>::BITS { + // Set single bit. + assert_eq!((ONE << i).highest_one(), Some(i)); + if i != <$T>::BITS - 1 { + // Set lowest bits. + assert_eq!((<$T>::MAX >> i).highest_one(), Some(<$T>::BITS - i - 2)); + } + // Set highest bits. + assert_eq!((MINUS_ONE << i).highest_one(), Some(<$T>::BITS - 1)); + } + } + + #[test] + fn test_lowest_one() { + const ZERO: $T = 0; + const ONE: $T = 1; + const MINUS_ONE: $T = -1; + + assert_eq!(ZERO.lowest_one(), None); + + for i in 0..<$T>::BITS { + // Set single bit. + assert_eq!((ONE << i).lowest_one(), Some(i)); + if i != <$T>::BITS - 1 { + // Set lowest bits. + assert_eq!((<$T>::MAX >> i).lowest_one(), Some(0)); + } + // Set highest bits. + assert_eq!((MINUS_ONE << i).lowest_one(), Some(i)); + } + } + + #[test] fn test_from_str() { fn from_str<T: std::str::FromStr>(t: &str) -> Option<T> { std::str::FromStr::from_str(t).ok() diff --git a/library/coretests/tests/num/uint_macros.rs b/library/coretests/tests/num/uint_macros.rs index 8f389de70aa..c7d10ea4d88 100644 --- a/library/coretests/tests/num/uint_macros.rs +++ b/library/coretests/tests/num/uint_macros.rs @@ -184,6 +184,40 @@ macro_rules! uint_module { } } + #[test] + fn test_highest_one() { + const ZERO: $T = 0; + const ONE: $T = 1; + + assert_eq!(ZERO.highest_one(), None); + + for i in 0..<$T>::BITS { + // Set single bit. + assert_eq!((ONE << i).highest_one(), Some(i)); + // Set lowest bits. + assert_eq!((<$T>::MAX >> i).highest_one(), Some(<$T>::BITS - i - 1)); + // Set highest bits. + assert_eq!((<$T>::MAX << i).highest_one(), Some(<$T>::BITS - 1)); + } + } + + #[test] + fn test_lowest_one() { + const ZERO: $T = 0; + const ONE: $T = 1; + + assert_eq!(ZERO.lowest_one(), None); + + for i in 0..<$T>::BITS { + // Set single bit. + assert_eq!((ONE << i).lowest_one(), Some(i)); + // Set lowest bits. + assert_eq!((<$T>::MAX >> i).lowest_one(), Some(0)); + // Set highest bits. + assert_eq!((<$T>::MAX << i).lowest_one(), Some(i)); + } + } + fn from_str<T: core::str::FromStr>(t: &str) -> Option<T> { core::str::FromStr::from_str(t).ok() } diff --git a/library/coretests/tests/panic/location.rs b/library/coretests/tests/panic/location.rs index 910001bcc1c..a7db05a15c6 100644 --- a/library/coretests/tests/panic/location.rs +++ b/library/coretests/tests/panic/location.rs @@ -48,10 +48,18 @@ fn location_const_column() { } #[test] +fn location_file_lifetime<'x>() { + // Verify that the returned `&str`s lifetime is derived from the generic + // lifetime 'a, not the lifetime of `&self`, when calling `Location::file`. + // Test failure is indicated by a compile failure, not a runtime panic. + let _: for<'a> fn(&'a Location<'x>) -> &'x str = Location::file; +} + +#[test] fn location_debug() { let f = format!("{:?}", Location::caller()); assert!(f.contains(&format!("{:?}", file!()))); - assert!(f.contains("52")); + assert!(f.contains("60")); assert!(f.contains("29")); } diff --git a/library/coretests/tests/slice.rs b/library/coretests/tests/slice.rs index 992f24cb18f..110c4e5f3b4 100644 --- a/library/coretests/tests/slice.rs +++ b/library/coretests/tests/slice.rs @@ -1492,28 +1492,28 @@ mod slice_index { // note: using 0 specifically ensures that the result of overflowing is 0..0, // so that `get` doesn't simply return None for the wrong reason. bad: data[0 ..= usize::MAX]; - message: "maximum usize"; + message: "out of range"; } in mod rangetoinclusive_overflow { data: [0, 1]; bad: data[..= usize::MAX]; - message: "maximum usize"; + message: "out of range"; } in mod boundpair_overflow_end { data: [0; 1]; bad: data[(Bound::Unbounded, Bound::Included(usize::MAX))]; - message: "maximum usize"; + message: "out of range"; } in mod boundpair_overflow_start { data: [0; 1]; bad: data[(Bound::Excluded(usize::MAX), Bound::Unbounded)]; - message: "maximum usize"; + message: "out of range"; } } // panic_cases! } @@ -2008,7 +2008,7 @@ fn test_copy_within_panics_src_inverted() { bytes.copy_within(2..1, 0); } #[test] -#[should_panic(expected = "attempted to index slice up to maximum usize")] +#[should_panic(expected = "out of range")] fn test_copy_within_panics_src_out_of_bounds() { let mut bytes = *b"Hello, World!"; // an inclusive range ending at usize::MAX would make src_end overflow diff --git a/library/panic_unwind/Cargo.toml b/library/panic_unwind/Cargo.toml index 13d1a7160da..67fc919c42c 100644 --- a/library/panic_unwind/Cargo.toml +++ b/library/panic_unwind/Cargo.toml @@ -13,7 +13,6 @@ doc = false [dependencies] alloc = { path = "../alloc" } -cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] } core = { path = "../rustc-std-workspace-core", package = "rustc-std-workspace-core" } unwind = { path = "../unwind" } diff --git a/library/panic_unwind/src/lib.rs b/library/panic_unwind/src/lib.rs index 50bd933aca2..83311f32380 100644 --- a/library/panic_unwind/src/lib.rs +++ b/library/panic_unwind/src/lib.rs @@ -15,6 +15,7 @@ #![unstable(feature = "panic_unwind", issue = "32837")] #![doc(issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")] #![feature(cfg_emscripten_wasm_eh)] +#![feature(cfg_select)] #![feature(core_intrinsics)] #![feature(lang_items)] #![feature(panic_unwind)] @@ -33,18 +34,21 @@ use alloc::boxed::Box; use core::any::Any; use core::panic::PanicPayload; -cfg_if::cfg_if! { - if #[cfg(all(target_os = "emscripten", not(emscripten_wasm_eh)))] { +cfg_select! { + all(target_os = "emscripten", not(emscripten_wasm_eh)) => { #[path = "emcc.rs"] mod imp; - } else if #[cfg(target_os = "hermit")] { + } + target_os = "hermit" => { #[path = "hermit.rs"] mod imp; - } else if #[cfg(target_os = "l4re")] { + } + target_os = "l4re" => { // L4Re is unix family but does not yet support unwinding. #[path = "dummy.rs"] mod imp; - } else if #[cfg(any( + } + any( all(target_family = "windows", target_env = "gnu"), target_os = "psp", target_os = "xous", @@ -52,19 +56,22 @@ cfg_if::cfg_if! { all(target_family = "unix", not(any(target_os = "espidf", target_os = "nuttx"))), all(target_vendor = "fortanix", target_env = "sgx"), target_family = "wasm", - ))] { + ) => { #[path = "gcc.rs"] mod imp; - } else if #[cfg(miri)] { + } + miri => { // Use the Miri runtime on Windows as miri doesn't support funclet based unwinding, // only landingpad based unwinding. Also use the Miri runtime on unsupported platforms. #[path = "miri.rs"] mod imp; - } else if #[cfg(all(target_env = "msvc", not(target_arch = "arm")))] { + } + all(target_env = "msvc", not(target_arch = "arm")) => { // LLVM does not support unwinding on 32 bit ARM msvc (thumbv7a-pc-windows-msvc) #[path = "seh.rs"] mod imp; - } else { + } + _ => { // Targets that don't support unwinding. // - os=none ("bare metal" targets) // - os=uefi diff --git a/library/panic_unwind/src/seh.rs b/library/panic_unwind/src/seh.rs index 668e988abff..a5d67dbb6a9 100644 --- a/library/panic_unwind/src/seh.rs +++ b/library/panic_unwind/src/seh.rs @@ -289,10 +289,11 @@ macro_rules! define_cleanup { } } } -cfg_if::cfg_if! { - if #[cfg(target_arch = "x86")] { +cfg_select! { + target_arch = "x86" => { define_cleanup!("thiscall" "thiscall-unwind"); - } else { + } + _ => { define_cleanup!("C" "C-unwind"); } } diff --git a/library/std/src/collections/mod.rs b/library/std/src/collections/mod.rs index 889ed3c5380..6104a02c739 100644 --- a/library/std/src/collections/mod.rs +++ b/library/std/src/collections/mod.rs @@ -26,7 +26,7 @@ //! should be considered. Detailed discussions of strengths and weaknesses of //! individual collections can be found on their own documentation pages. //! -//! ### Use a `Vec` when: +//! ### Use a [`Vec`] when: //! * You want to collect items up to be processed or sent elsewhere later, and //! don't care about any properties of the actual values being stored. //! * You want a sequence of elements in a particular order, and will only be @@ -35,25 +35,25 @@ //! * You want a resizable array. //! * You want a heap-allocated array. //! -//! ### Use a `VecDeque` when: +//! ### Use a [`VecDeque`] when: //! * You want a [`Vec`] that supports efficient insertion at both ends of the //! sequence. //! * You want a queue. //! * You want a double-ended queue (deque). //! -//! ### Use a `LinkedList` when: +//! ### Use a [`LinkedList`] when: //! * You want a [`Vec`] or [`VecDeque`] of unknown size, and can't tolerate //! amortization. //! * You want to efficiently split and append lists. //! * You are *absolutely* certain you *really*, *truly*, want a doubly linked //! list. //! -//! ### Use a `HashMap` when: +//! ### Use a [`HashMap`] when: //! * You want to associate arbitrary keys with an arbitrary value. //! * You want a cache. //! * You want a map, with no extra functionality. //! -//! ### Use a `BTreeMap` when: +//! ### Use a [`BTreeMap`] when: //! * You want a map sorted by its keys. //! * You want to be able to get a range of entries on-demand. //! * You're interested in what the smallest or largest key-value pair is. @@ -65,7 +65,7 @@ //! * There is no meaningful value to associate with your keys. //! * You just want a set. //! -//! ### Use a `BinaryHeap` when: +//! ### Use a [`BinaryHeap`] when: //! //! * You want to store a bunch of elements, but only ever want to process the //! "biggest" or "most important" one at any given time. diff --git a/library/std/src/ffi/os_str.rs b/library/std/src/ffi/os_str.rs index 8d7edc732af..1214490caad 100644 --- a/library/std/src/ffi/os_str.rs +++ b/library/std/src/ffi/os_str.rs @@ -137,7 +137,7 @@ impl OsString { #[stable(feature = "rust1", since = "1.0.0")] #[must_use] #[inline] - #[rustc_const_unstable(feature = "const_pathbuf_osstring_new", issue = "141520")] + #[rustc_const_stable(feature = "const_pathbuf_osstring_new", since = "CURRENT_RUSTC_VERSION")] pub const fn new() -> OsString { OsString { inner: Buf::from_string(String::new()) } } diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs index d4a584f4d14..1ed4f2f9f0c 100644 --- a/library/std/src/fs.rs +++ b/library/std/src/fs.rs @@ -3156,6 +3156,25 @@ pub fn set_permissions<P: AsRef<Path>>(path: P, perm: Permissions) -> io::Result fs_imp::set_permissions(path.as_ref(), perm.0) } +/// Set the permissions of a file, unless it is a symlink. +/// +/// Note that the non-final path elements are allowed to be symlinks. +/// +/// # Platform-specific behavior +/// +/// Currently unimplemented on Windows. +/// +/// On Unix platforms, this results in a [`FilesystemLoop`] error if the last element is a symlink. +/// +/// This behavior may change in the future. +/// +/// [`FilesystemLoop`]: crate::io::ErrorKind::FilesystemLoop +#[doc(alias = "chmod", alias = "SetFileAttributes")] +#[unstable(feature = "set_permissions_nofollow", issue = "141607")] +pub fn set_permissions_nofollow<P: AsRef<Path>>(path: P, perm: Permissions) -> io::Result<()> { + fs_imp::set_permissions_nofollow(path.as_ref(), perm) +} + impl DirBuilder { /// Creates a new set of options with default mode/security settings for all /// platforms and also non-recursive. diff --git a/library/std/src/io/buffered/bufreader/buffer.rs b/library/std/src/io/buffered/bufreader/buffer.rs index 574288e579e..9b600cd5575 100644 --- a/library/std/src/io/buffered/bufreader/buffer.rs +++ b/library/std/src/io/buffered/bufreader/buffer.rs @@ -122,7 +122,7 @@ impl Buffer { /// Remove bytes that have already been read from the buffer. pub fn backshift(&mut self) { - self.buf.copy_within(self.pos.., 0); + self.buf.copy_within(self.pos..self.filled, 0); self.filled -= self.pos; self.pos = 0; } diff --git a/library/std/src/io/error.rs b/library/std/src/io/error.rs index 562fdbf4ff7..dcfa189823f 100644 --- a/library/std/src/io/error.rs +++ b/library/std/src/io/error.rs @@ -18,7 +18,7 @@ use crate::{error, fmt, result, sys}; /// This type is broadly used across [`std::io`] for any operation which may /// produce an error. /// -/// This typedef is generally used to avoid writing out [`io::Error`] directly and +/// This type alias is generally used to avoid writing out [`io::Error`] directly and /// is otherwise a direct mapping to [`Result`]. /// /// While usual Rust style is to import types directly, aliases of [`Result`] diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs index d351ee5e739..ff0e29e04c2 100644 --- a/library/std/src/io/mod.rs +++ b/library/std/src/io/mod.rs @@ -2461,7 +2461,7 @@ pub trait BufRead: Read { /// delimiter or EOF is found. /// /// If successful, this function will return the total number of bytes read, - /// including the delimiter byte. + /// including the delimiter byte if found. /// /// This is useful for efficiently skipping data such as NUL-terminated strings /// in binary file formats without buffering. @@ -2489,7 +2489,7 @@ pub trait BufRead: Read { /// ``` /// use std::io::{self, BufRead}; /// - /// let mut cursor = io::Cursor::new(b"Ferris\0Likes long walks on the beach\0Crustacean\0"); + /// let mut cursor = io::Cursor::new(b"Ferris\0Likes long walks on the beach\0Crustacean\0!"); /// /// // read name /// let mut name = Vec::new(); @@ -2509,6 +2509,11 @@ pub trait BufRead: Read { /// .expect("reading from cursor won't fail"); /// assert_eq!(num_bytes, 11); /// assert_eq!(animal, b"Crustacean\0"); + /// + /// // reach EOF + /// let num_bytes = cursor.skip_until(b'\0') + /// .expect("reading from cursor won't fail"); + /// assert_eq!(num_bytes, 1); /// ``` #[stable(feature = "bufread_skip_until", since = "1.83.0")] fn skip_until(&mut self, byte: u8) -> Result<usize> { diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index ab417b6c72f..f0de03c9a28 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -15,7 +15,7 @@ //! //! If you already know the name of what you are looking for, the fastest way to //! find it is to use the <a href="#" onclick="window.searchState.focus();">search -//! bar</a> at the top of the page. +//! button</a> at the top of the page. //! //! Otherwise, you may want to jump to one of these useful sections: //! diff --git a/library/std/src/path.rs b/library/std/src/path.rs index 23e957484a5..3899fbf86db 100644 --- a/library/std/src/path.rs +++ b/library/std/src/path.rs @@ -1191,7 +1191,7 @@ impl PathBuf { #[stable(feature = "rust1", since = "1.0.0")] #[must_use] #[inline] - #[rustc_const_unstable(feature = "const_pathbuf_osstring_new", issue = "141520")] + #[rustc_const_stable(feature = "const_pathbuf_osstring_new", since = "CURRENT_RUSTC_VERSION")] pub const fn new() -> PathBuf { PathBuf { inner: OsString::new() } } diff --git a/library/std/src/sync/lazy_lock.rs b/library/std/src/sync/lazy_lock.rs index a40e29a772a..3231125f7a1 100644 --- a/library/std/src/sync/lazy_lock.rs +++ b/library/std/src/sync/lazy_lock.rs @@ -244,7 +244,11 @@ impl<T, F: FnOnce() -> T> LazyLock<T, F> { #[inline] #[stable(feature = "lazy_cell", since = "1.80.0")] pub fn force(this: &LazyLock<T, F>) -> &T { - this.once.call_once(|| { + this.once.call_once_force(|state| { + if state.is_poisoned() { + panic_poisoned(); + } + // SAFETY: `call_once` only runs this closure once, ever. let data = unsafe { &mut *this.data.get() }; let f = unsafe { ManuallyDrop::take(&mut data.f) }; @@ -257,8 +261,7 @@ impl<T, F: FnOnce() -> T> LazyLock<T, F> { // * the closure was called and initialized `value`. // * the closure was called and panicked, so this point is never reached. // * the closure was not called, but a previous call initialized `value`. - // * the closure was not called because the Once is poisoned, so this point - // is never reached. + // * the closure was not called because the Once is poisoned, which we handled above. // So `value` has definitely been initialized and will not be modified again. unsafe { &*(*this.data.get()).value } } diff --git a/library/std/src/sync/nonpoison.rs b/library/std/src/sync/nonpoison.rs index 2bbf226dc2c..b3ae376e70d 100644 --- a/library/std/src/sync/nonpoison.rs +++ b/library/std/src/sync/nonpoison.rs @@ -33,5 +33,10 @@ impl fmt::Display for WouldBlock { pub use self::mutex::MappedMutexGuard; #[unstable(feature = "nonpoison_mutex", issue = "134645")] pub use self::mutex::{Mutex, MutexGuard}; +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +pub use self::rwlock::{MappedRwLockReadGuard, MappedRwLockWriteGuard}; +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; mod mutex; +mod rwlock; diff --git a/library/std/src/sync/nonpoison/mutex.rs b/library/std/src/sync/nonpoison/mutex.rs index b6861c78f00..fd1e671d7a3 100644 --- a/library/std/src/sync/nonpoison/mutex.rs +++ b/library/std/src/sync/nonpoison/mutex.rs @@ -100,7 +100,7 @@ pub struct MutexGuard<'a, T: ?Sized + 'a> { lock: &'a Mutex<T>, } -/// A [`MutexGuard`] is not `Send` to maximize platform portablity. +/// A [`MutexGuard`] is not `Send` to maximize platform portability. /// /// On platforms that use POSIX threads (commonly referred to as pthreads) there is a requirement to /// release mutex locks on the same thread they were acquired. diff --git a/library/std/src/sync/nonpoison/rwlock.rs b/library/std/src/sync/nonpoison/rwlock.rs new file mode 100644 index 00000000000..eb0aef99cc1 --- /dev/null +++ b/library/std/src/sync/nonpoison/rwlock.rs @@ -0,0 +1,1081 @@ +use crate::cell::UnsafeCell; +use crate::fmt; +use crate::marker::PhantomData; +use crate::mem::{self, ManuallyDrop, forget}; +use crate::ops::{Deref, DerefMut}; +use crate::ptr::NonNull; +use crate::sync::nonpoison::{TryLockResult, WouldBlock}; +use crate::sys::sync as sys; + +/// A reader-writer lock that does not keep track of lock poisoning. +/// +/// For more information about reader-writer locks, check out the documentation for the poisoning +/// variant of this lock (which can be found at [`poison::RwLock`]). +/// +/// [`poison::RwLock`]: crate::sync::poison::RwLock +/// +/// # Examples +/// +/// ``` +/// #![feature(nonpoison_rwlock)] +/// +/// use std::sync::nonpoison::RwLock; +/// +/// let lock = RwLock::new(5); +/// +/// // many reader locks can be held at once +/// { +/// let r1 = lock.read(); +/// let r2 = lock.read(); +/// assert_eq!(*r1, 5); +/// assert_eq!(*r2, 5); +/// } // read locks are dropped at this point +/// +/// // only one write lock may be held, however +/// { +/// let mut w = lock.write(); +/// *w += 1; +/// assert_eq!(*w, 6); +/// } // write lock is dropped here +/// ``` +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonRwLock")] +pub struct RwLock<T: ?Sized> { + /// The inner [`sys::RwLock`] that synchronizes thread access to the protected data. + inner: sys::RwLock, + /// The lock-protected data. + data: UnsafeCell<T>, +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +unsafe impl<T: ?Sized + Send> Send for RwLock<T> {} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Guards +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// RAII structure used to release the shared read access of a lock when +/// dropped. +/// +/// This structure is created by the [`read`] and [`try_read`] methods on +/// [`RwLock`]. +/// +/// [`read`]: RwLock::read +/// [`try_read`]: RwLock::try_read +#[must_use = "if unused the RwLock will immediately unlock"] +#[must_not_suspend = "holding a RwLockReadGuard across suspend \ + points can cause deadlocks, delays, \ + and cause Futures to not implement `Send`"] +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +#[clippy::has_significant_drop] +#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonRwLockReadGuard")] +pub struct RwLockReadGuard<'rwlock, T: ?Sized + 'rwlock> { + /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of + /// `&'rwlock T` to avoid `noalias` violations, because a `RwLockReadGuard` instance only holds + /// immutability until it drops, not for its whole scope. + /// `NonNull` is preferable over `*const T` to allow for niche optimizations. `NonNull` is also + /// covariant over `T`, just like we would have with `&T`. + data: NonNull<T>, + /// A reference to the internal [`sys::RwLock`] that we have read-locked. + inner_lock: &'rwlock sys::RwLock, +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> !Send for RwLockReadGuard<'_, T> {} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {} + +/// RAII structure used to release the exclusive write access of a lock when +/// dropped. +/// +/// This structure is created by the [`write`] and [`try_write`] methods +/// on [`RwLock`]. +/// +/// [`write`]: RwLock::write +/// [`try_write`]: RwLock::try_write +#[must_use = "if unused the RwLock will immediately unlock"] +#[must_not_suspend = "holding a RwLockWriteGuard across suspend \ + points can cause deadlocks, delays, \ + and cause Future's to not implement `Send`"] +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +#[clippy::has_significant_drop] +#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonRwLockWriteGuard")] +pub struct RwLockWriteGuard<'rwlock, T: ?Sized + 'rwlock> { + /// A reference to the [`RwLock`] that we have write-locked. + lock: &'rwlock RwLock<T>, +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> !Send for RwLockWriteGuard<'_, T> {} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +unsafe impl<T: ?Sized + Sync> Sync for RwLockWriteGuard<'_, T> {} + +/// RAII structure used to release the shared read access of a lock when +/// dropped, which can point to a subfield of the protected data. +/// +/// This structure is created by the [`map`] and [`filter_map`] methods +/// on [`RwLockReadGuard`]. +/// +/// [`map`]: RwLockReadGuard::map +/// [`filter_map`]: RwLockReadGuard::filter_map +#[must_use = "if unused the RwLock will immediately unlock"] +#[must_not_suspend = "holding a MappedRwLockReadGuard across suspend \ + points can cause deadlocks, delays, \ + and cause Futures to not implement `Send`"] +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +#[clippy::has_significant_drop] +pub struct MappedRwLockReadGuard<'rwlock, T: ?Sized + 'rwlock> { + /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of + /// `&'rwlock T` to avoid `noalias` violations, because a `MappedRwLockReadGuard` instance only + /// holds immutability until it drops, not for its whole scope. + /// `NonNull` is preferable over `*const T` to allow for niche optimizations. `NonNull` is also + /// covariant over `T`, just like we would have with `&T`. + data: NonNull<T>, + /// A reference to the internal [`sys::RwLock`] that we have read-locked. + inner_lock: &'rwlock sys::RwLock, +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> !Send for MappedRwLockReadGuard<'_, T> {} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +unsafe impl<T: ?Sized + Sync> Sync for MappedRwLockReadGuard<'_, T> {} + +/// RAII structure used to release the exclusive write access of a lock when +/// dropped, which can point to a subfield of the protected data. +/// +/// This structure is created by the [`map`] and [`filter_map`] methods +/// on [`RwLockWriteGuard`]. +/// +/// [`map`]: RwLockWriteGuard::map +/// [`filter_map`]: RwLockWriteGuard::filter_map +#[must_use = "if unused the RwLock will immediately unlock"] +#[must_not_suspend = "holding a MappedRwLockWriteGuard across suspend \ + points can cause deadlocks, delays, \ + and cause Future's to not implement `Send`"] +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +#[clippy::has_significant_drop] +pub struct MappedRwLockWriteGuard<'rwlock, T: ?Sized + 'rwlock> { + /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of + /// `&'rwlock T` to avoid `noalias` violations, because a `MappedRwLockWriteGuard` instance only + /// holds uniquneness until it drops, not for its whole scope. + /// `NonNull` is preferable over `*const T` to allow for niche optimizations. + data: NonNull<T>, + /// `NonNull` is covariant over `T`, so we add a `PhantomData<&'rwlock mut T>` field here to + /// enforce the correct invariance over `T`. + _variance: PhantomData<&'rwlock mut T>, + /// A reference to the internal [`sys::RwLock`] that we have write-locked. + inner_lock: &'rwlock sys::RwLock, +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> !Send for MappedRwLockWriteGuard<'_, T> {} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +unsafe impl<T: ?Sized + Sync> Sync for MappedRwLockWriteGuard<'_, T> {} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Implementations +//////////////////////////////////////////////////////////////////////////////////////////////////// + +impl<T> RwLock<T> { + /// Creates a new instance of an `RwLock<T>` which is unlocked. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_rwlock)] + /// + /// use std::sync::nonpoison::RwLock; + /// + /// let lock = RwLock::new(5); + /// ``` + #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + #[inline] + pub const fn new(t: T) -> RwLock<T> { + RwLock { inner: sys::RwLock::new(), data: UnsafeCell::new(t) } + } + + /// Returns the contained value by cloning it. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_rwlock)] + /// #![feature(lock_value_accessors)] + /// + /// use std::sync::nonpoison::RwLock; + /// + /// let mut lock = RwLock::new(7); + /// + /// assert_eq!(lock.get_cloned(), 7); + /// ``` + #[unstable(feature = "lock_value_accessors", issue = "133407")] + // #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn get_cloned(&self) -> T + where + T: Clone, + { + self.read().clone() + } + + /// Sets the contained value. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_rwlock)] + /// #![feature(lock_value_accessors)] + /// + /// use std::sync::nonpoison::RwLock; + /// + /// let mut lock = RwLock::new(7); + /// + /// assert_eq!(lock.get_cloned(), 7); + /// lock.set(11); + /// assert_eq!(lock.get_cloned(), 11); + /// ``` + #[unstable(feature = "lock_value_accessors", issue = "133407")] + // #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn set(&self, value: T) { + if mem::needs_drop::<T>() { + // If the contained value has a non-trivial destructor, we + // call that destructor after the lock has been released. + drop(self.replace(value)) + } else { + *self.write() = value; + } + } + + /// Replaces the contained value with `value`, and returns the old contained value. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_rwlock)] + /// #![feature(lock_value_accessors)] + /// + /// use std::sync::nonpoison::RwLock; + /// + /// let mut lock = RwLock::new(7); + /// + /// assert_eq!(lock.replace(11), 7); + /// assert_eq!(lock.get_cloned(), 11); + /// ``` + #[unstable(feature = "lock_value_accessors", issue = "133407")] + // #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn replace(&self, value: T) -> T { + let mut guard = self.write(); + mem::replace(&mut *guard, value) + } +} + +impl<T: ?Sized> RwLock<T> { + /// Locks this `RwLock` with shared read access, blocking the current thread + /// until it can be acquired. + /// + /// The calling thread will be blocked until there are no more writers which + /// hold the lock. There may be other readers currently inside the lock when + /// this method returns. This method does not provide any guarantees with + /// respect to the ordering of whether contentious readers or writers will + /// acquire the lock first. + /// + /// Returns an RAII guard which will release this thread's shared access + /// once it is dropped. + /// + /// # Panics + /// + /// This function might panic when called if the lock is already held by the current thread. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_rwlock)] + /// + /// use std::sync::Arc; + /// use std::sync::nonpoison::RwLock; + /// use std::thread; + /// + /// let lock = Arc::new(RwLock::new(1)); + /// let c_lock = Arc::clone(&lock); + /// + /// let n = lock.read(); + /// assert_eq!(*n, 1); + /// + /// thread::spawn(move || { + /// let r = c_lock.read(); + /// }).join().unwrap(); + /// ``` + #[inline] + #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn read(&self) -> RwLockReadGuard<'_, T> { + unsafe { + self.inner.read(); + RwLockReadGuard::new(self) + } + } + + /// Attempts to acquire this `RwLock` with shared read access. + /// + /// If the access could not be granted at this time, then `Err` is returned. + /// Otherwise, an RAII guard is returned which will release the shared access + /// when it is dropped. + /// + /// This function does not block. + /// + /// This function does not provide any guarantees with respect to the ordering + /// of whether contentious readers or writers will acquire the lock first. + /// + /// # Errors + /// + /// This function will return the [`WouldBlock`] error if the `RwLock` could + /// not be acquired because it was already locked exclusively. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_rwlock)] + /// + /// use std::sync::nonpoison::RwLock; + /// + /// let lock = RwLock::new(1); + /// + /// match lock.try_read() { + /// Ok(n) => assert_eq!(*n, 1), + /// Err(_) => unreachable!(), + /// }; + /// ``` + #[inline] + #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<'_, T>> { + unsafe { + if self.inner.try_read() { Ok(RwLockReadGuard::new(self)) } else { Err(WouldBlock) } + } + } + + /// Locks this `RwLock` with exclusive write access, blocking the current + /// thread until it can be acquired. + /// + /// This function will not return while other writers or other readers + /// currently have access to the lock. + /// + /// Returns an RAII guard which will drop the write access of this `RwLock` + /// when dropped. + /// + /// # Panics + /// + /// This function might panic when called if the lock is already held by the current thread. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_rwlock)] + /// + /// use std::sync::nonpoison::RwLock; + /// + /// let lock = RwLock::new(1); + /// + /// let mut n = lock.write(); + /// *n = 2; + /// + /// assert!(lock.try_read().is_err()); + /// ``` + #[inline] + #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn write(&self) -> RwLockWriteGuard<'_, T> { + unsafe { + self.inner.write(); + RwLockWriteGuard::new(self) + } + } + + /// Attempts to lock this `RwLock` with exclusive write access. + /// + /// If the lock could not be acquired at this time, then `Err` is returned. + /// Otherwise, an RAII guard is returned which will release the lock when + /// it is dropped. + /// + /// This function does not block. + /// + /// This function does not provide any guarantees with respect to the ordering + /// of whether contentious readers or writers will acquire the lock first. + /// + /// # Errors + /// + /// This function will return the [`WouldBlock`] error if the `RwLock` could + /// not be acquired because it was already locked. + /// + /// [`WouldBlock`]: WouldBlock + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_rwlock)] + /// + /// use std::sync::nonpoison::RwLock; + /// + /// let lock = RwLock::new(1); + /// + /// let n = lock.read(); + /// assert_eq!(*n, 1); + /// + /// assert!(lock.try_write().is_err()); + /// ``` + #[inline] + #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> { + unsafe { + if self.inner.try_write() { Ok(RwLockWriteGuard::new(self)) } else { Err(WouldBlock) } + } + } + + /// Consumes this `RwLock`, returning the underlying data. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_rwlock)] + /// + /// use std::sync::nonpoison::RwLock; + /// + /// let lock = RwLock::new(String::new()); + /// { + /// let mut s = lock.write(); + /// *s = "modified".to_owned(); + /// } + /// assert_eq!(lock.into_inner(), "modified"); + /// ``` + #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn into_inner(self) -> T + where + T: Sized, + { + self.data.into_inner() + } + + /// Returns a mutable reference to the underlying data. + /// + /// Since this call borrows the `RwLock` mutably, no actual locking needs to + /// take place -- the mutable borrow statically guarantees no new locks can be acquired + /// while this reference exists. Note that this method does not clear any previously abandoned + /// locks (e.g., via [`forget()`] on a [`RwLockReadGuard`] or [`RwLockWriteGuard`]). + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_rwlock)] + /// + /// use std::sync::nonpoison::RwLock; + /// + /// let mut lock = RwLock::new(0); + /// *lock.get_mut() = 10; + /// assert_eq!(*lock.read(), 10); + /// ``` + #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn get_mut(&mut self) -> &mut T { + self.data.get_mut() + } + + /// Returns a raw pointer to the underlying data. + /// + /// The returned pointer is always non-null and properly aligned, but it is + /// the user's responsibility to ensure that any reads and writes through it + /// are properly synchronized to avoid data races, and that it is not read + /// or written through after the lock is dropped. + #[unstable(feature = "rwlock_data_ptr", issue = "140368")] + // #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn data_ptr(&self) -> *mut T { + self.data.get() + } +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut d = f.debug_struct("RwLock"); + match self.try_read() { + Ok(guard) => { + d.field("data", &&*guard); + } + Err(WouldBlock) => { + d.field("data", &format_args!("<locked>")); + } + } + d.finish_non_exhaustive() + } +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: Default> Default for RwLock<T> { + /// Creates a new `RwLock<T>`, with the `Default` value for T. + fn default() -> RwLock<T> { + RwLock::new(Default::default()) + } +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T> From<T> for RwLock<T> { + /// Creates a new instance of an `RwLock<T>` which is unlocked. + /// This is equivalent to [`RwLock::new`]. + fn from(t: T) -> Self { + RwLock::new(t) + } +} + +impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> { + /// Creates a new instance of `RwLockReadGuard<T>` from a `RwLock<T>`. + /// + /// # Safety + /// + /// This function is safe if and only if the same thread has successfully and safely called + /// `lock.inner.read()`, `lock.inner.try_read()`, or `lock.inner.downgrade()` before + /// instantiating this object. + unsafe fn new(lock: &'rwlock RwLock<T>) -> RwLockReadGuard<'rwlock, T> { + RwLockReadGuard { + data: unsafe { NonNull::new_unchecked(lock.data.get()) }, + inner_lock: &lock.inner, + } + } + + /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data, e.g. + /// an enum variant. + /// + /// The `RwLock` is already locked for reading, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `RwLockReadGuard::map(...)`. A method would interfere with methods of + /// the same name on the contents of the `RwLockReadGuard` used through + /// `Deref`. + /// + /// # Panics + /// + /// If the closure panics, the guard will be dropped (unlocked). + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + // #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'rwlock, U> + where + F: FnOnce(&T) -> &U, + U: ?Sized, + { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. + let data = NonNull::from(f(unsafe { orig.data.as_ref() })); + let orig = ManuallyDrop::new(orig); + MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock } + } + + /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data. The + /// original guard is returned as an `Err(...)` if the closure returns + /// `None`. + /// + /// The `RwLock` is already locked for reading, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `RwLockReadGuard::filter_map(...)`. A method would interfere with methods + /// of the same name on the contents of the `RwLockReadGuard` used through + /// `Deref`. + /// + /// # Panics + /// + /// If the closure panics, the guard will be dropped (unlocked). + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + // #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'rwlock, U>, Self> + where + F: FnOnce(&T) -> Option<&U>, + U: ?Sized, + { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. + match f(unsafe { orig.data.as_ref() }) { + Some(data) => { + let data = NonNull::from(data); + let orig = ManuallyDrop::new(orig); + Ok(MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }) + } + None => Err(orig), + } + } +} + +impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> { + /// Creates a new instance of `RwLockWriteGuard<T>` from a `RwLock<T>`. + /// + /// # Safety + /// + /// This function is safe if and only if the same thread has successfully and safely called + /// `lock.inner.write()`, `lock.inner.try_write()`, or `lock.inner.try_upgrade` before + /// instantiating this object. + unsafe fn new(lock: &'rwlock RwLock<T>) -> RwLockWriteGuard<'rwlock, T> { + RwLockWriteGuard { lock } + } + + /// Downgrades a write-locked `RwLockWriteGuard` into a read-locked [`RwLockReadGuard`]. + /// + /// Since we have the `RwLockWriteGuard`, the [`RwLock`] must already be locked for writing, so + /// this method cannot fail. + /// + /// After downgrading, other readers will be allowed to read the protected data. + /// + /// # Examples + /// + /// `downgrade` takes ownership of the `RwLockWriteGuard` and returns a [`RwLockReadGuard`]. + /// + /// ``` + /// #![feature(nonpoison_rwlock)] + /// #![feature(rwlock_downgrade)] + /// + /// use std::sync::nonpoison::{RwLock, RwLockWriteGuard}; + /// + /// let rw = RwLock::new(0); + /// + /// let mut write_guard = rw.write(); + /// *write_guard = 42; + /// + /// let read_guard = RwLockWriteGuard::downgrade(write_guard); + /// assert_eq!(42, *read_guard); + /// ``` + /// + /// `downgrade` will _atomically_ change the state of the [`RwLock`] from exclusive mode into + /// shared mode. This means that it is impossible for another writing thread to get in between a + /// thread calling `downgrade` and any reads it performs after downgrading. + /// + /// ``` + /// #![feature(nonpoison_rwlock)] + /// #![feature(rwlock_downgrade)] + /// + /// use std::sync::Arc; + /// use std::sync::nonpoison::{RwLock, RwLockWriteGuard}; + /// + /// let rw = Arc::new(RwLock::new(1)); + /// + /// // Put the lock in write mode. + /// let mut main_write_guard = rw.write(); + /// + /// let rw_clone = rw.clone(); + /// let evil_handle = std::thread::spawn(move || { + /// // This will not return until the main thread drops the `main_read_guard`. + /// let mut evil_guard = rw_clone.write(); + /// + /// assert_eq!(*evil_guard, 2); + /// *evil_guard = 3; + /// }); + /// + /// *main_write_guard = 2; + /// + /// // Atomically downgrade the write guard into a read guard. + /// let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard); + /// + /// // Since `downgrade` is atomic, the writer thread cannot have changed the protected data. + /// assert_eq!(*main_read_guard, 2, "`downgrade` was not atomic"); + /// # + /// # drop(main_read_guard); + /// # evil_handle.join().unwrap(); + /// # + /// # let final_check = rw.read(); + /// # assert_eq!(*final_check, 3); + /// ``` + #[unstable(feature = "rwlock_downgrade", issue = "128203")] + // #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn downgrade(s: Self) -> RwLockReadGuard<'rwlock, T> { + let lock = s.lock; + + // We don't want to call the destructor since that calls `write_unlock`. + forget(s); + + // SAFETY: We take ownership of a write guard, so we must already have the `RwLock` in write + // mode, satisfying the `downgrade` contract. + unsafe { lock.inner.downgrade() }; + + // SAFETY: We have just successfully called `downgrade`, so we fulfill the safety contract. + unsafe { RwLockReadGuard::new(lock) } + } + + /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data, e.g. + /// an enum variant. + /// + /// The `RwLock` is already locked for writing, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `RwLockWriteGuard::map(...)`. A method would interfere with methods of + /// the same name on the contents of the `RwLockWriteGuard` used through + /// `Deref`. + /// + /// # Panics + /// + /// If the closure panics, the guard will be dropped (unlocked). + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + // #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockWriteGuard<'rwlock, U> + where + F: FnOnce(&mut T) -> &mut U, + U: ?Sized, + { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. + let data = NonNull::from(f(unsafe { &mut *orig.lock.data.get() })); + let orig = ManuallyDrop::new(orig); + MappedRwLockWriteGuard { data, inner_lock: &orig.lock.inner, _variance: PhantomData } + } + + /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data. The + /// original guard is returned as an `Err(...)` if the closure returns + /// `None`. + /// + /// The `RwLock` is already locked for writing, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `RwLockWriteGuard::filter_map(...)`. A method would interfere with methods + /// of the same name on the contents of the `RwLockWriteGuard` used through + /// `Deref`. + /// + /// # Panics + /// + /// If the closure panics, the guard will be dropped (unlocked). + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + // #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockWriteGuard<'rwlock, U>, Self> + where + F: FnOnce(&mut T) -> Option<&mut U>, + U: ?Sized, + { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. + match f(unsafe { &mut *orig.lock.data.get() }) { + Some(data) => { + let data = NonNull::from(data); + let orig = ManuallyDrop::new(orig); + Ok(MappedRwLockWriteGuard { + data, + inner_lock: &orig.lock.inner, + _variance: PhantomData, + }) + } + None => Err(orig), + } + } +} + +impl<'rwlock, T: ?Sized> MappedRwLockReadGuard<'rwlock, T> { + /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data, + /// e.g. an enum variant. + /// + /// The `RwLock` is already locked for reading, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `MappedRwLockReadGuard::map(...)`. A method would interfere with + /// methods of the same name on the contents of the `MappedRwLockReadGuard` + /// used through `Deref`. + /// + /// # Panics + /// + /// If the closure panics, the guard will be dropped (unlocked). + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + // #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'rwlock, U> + where + F: FnOnce(&T) -> &U, + U: ?Sized, + { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. + let data = NonNull::from(f(unsafe { orig.data.as_ref() })); + let orig = ManuallyDrop::new(orig); + MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock } + } + + /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data. + /// The original guard is returned as an `Err(...)` if the closure returns + /// `None`. + /// + /// The `RwLock` is already locked for reading, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `MappedRwLockReadGuard::filter_map(...)`. A method would interfere with + /// methods of the same name on the contents of the `MappedRwLockReadGuard` + /// used through `Deref`. + /// + /// # Panics + /// + /// If the closure panics, the guard will be dropped (unlocked). + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + // #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'rwlock, U>, Self> + where + F: FnOnce(&T) -> Option<&U>, + U: ?Sized, + { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. + match f(unsafe { orig.data.as_ref() }) { + Some(data) => { + let data = NonNull::from(data); + let orig = ManuallyDrop::new(orig); + Ok(MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }) + } + None => Err(orig), + } + } +} + +impl<'rwlock, T: ?Sized> MappedRwLockWriteGuard<'rwlock, T> { + /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data, + /// e.g. an enum variant. + /// + /// The `RwLock` is already locked for writing, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `MappedRwLockWriteGuard::map(...)`. A method would interfere with + /// methods of the same name on the contents of the `MappedRwLockWriteGuard` + /// used through `Deref`. + /// + /// # Panics + /// + /// If the closure panics, the guard will be dropped (unlocked). + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + // #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn map<U, F>(mut orig: Self, f: F) -> MappedRwLockWriteGuard<'rwlock, U> + where + F: FnOnce(&mut T) -> &mut U, + U: ?Sized, + { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. + let data = NonNull::from(f(unsafe { orig.data.as_mut() })); + let orig = ManuallyDrop::new(orig); + MappedRwLockWriteGuard { data, inner_lock: orig.inner_lock, _variance: PhantomData } + } + + /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data. + /// The original guard is returned as an `Err(...)` if the closure returns + /// `None`. + /// + /// The `RwLock` is already locked for writing, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `MappedRwLockWriteGuard::filter_map(...)`. A method would interfere with + /// methods of the same name on the contents of the `MappedRwLockWriteGuard` + /// used through `Deref`. + /// + /// # Panics + /// + /// If the closure panics, the guard will be dropped (unlocked). + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + // #[unstable(feature = "nonpoison_rwlock", issue = "134645")] + pub fn filter_map<U, F>( + mut orig: Self, + f: F, + ) -> Result<MappedRwLockWriteGuard<'rwlock, U>, Self> + where + F: FnOnce(&mut T) -> Option<&mut U>, + U: ?Sized, + { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. + match f(unsafe { orig.data.as_mut() }) { + Some(data) => { + let data = NonNull::from(data); + let orig = ManuallyDrop::new(orig); + Ok(MappedRwLockWriteGuard { + data, + inner_lock: orig.inner_lock, + _variance: PhantomData, + }) + } + None => Err(orig), + } + } +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> { + fn drop(&mut self) { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created. + unsafe { + self.inner_lock.read_unlock(); + } + } +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> { + fn drop(&mut self) { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created. + unsafe { + self.lock.inner.write_unlock(); + } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> Drop for MappedRwLockReadGuard<'_, T> { + fn drop(&mut self) { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + unsafe { + self.inner_lock.read_unlock(); + } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> Drop for MappedRwLockWriteGuard<'_, T> { + fn drop(&mut self) { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + unsafe { + self.inner_lock.write_unlock(); + } + } +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created. + unsafe { self.data.as_ref() } + } +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created. + unsafe { &*self.lock.data.get() } + } +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created. + unsafe { &mut *self.lock.data.get() } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> Deref for MappedRwLockReadGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + unsafe { self.data.as_ref() } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> Deref for MappedRwLockWriteGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + unsafe { self.data.as_ref() } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized> DerefMut for MappedRwLockWriteGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + unsafe { self.data.as_mut() } + } +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedRwLockReadGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized + fmt::Display> fmt::Display for MappedRwLockReadGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedRwLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_rwlock", issue = "134645")] +impl<T: ?Sized + fmt::Display> fmt::Display for MappedRwLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} diff --git a/library/std/src/sync/poison/mutex.rs b/library/std/src/sync/poison/mutex.rs index 6205c4fa4ca..720c212c65c 100644 --- a/library/std/src/sync/poison/mutex.rs +++ b/library/std/src/sync/poison/mutex.rs @@ -279,7 +279,7 @@ pub struct MutexGuard<'a, T: ?Sized + 'a> { poison: poison::Guard, } -/// A [`MutexGuard`] is not `Send` to maximize platform portablity. +/// A [`MutexGuard`] is not `Send` to maximize platform portability. /// /// On platforms that use POSIX threads (commonly referred to as pthreads) there is a requirement to /// release mutex locks on the same thread they were acquired. diff --git a/library/std/src/sync/poison/rwlock.rs b/library/std/src/sync/poison/rwlock.rs index 2c92602bc87..0a463f3f9c7 100644 --- a/library/std/src/sync/poison/rwlock.rs +++ b/library/std/src/sync/poison/rwlock.rs @@ -80,16 +80,24 @@ use crate::sys::sync as sys; #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "RwLock")] pub struct RwLock<T: ?Sized> { + /// The inner [`sys::RwLock`] that synchronizes thread access to the protected data. inner: sys::RwLock, + /// A flag denoting if this `RwLock` has been poisoned. poison: poison::Flag, + /// The lock-protected data. data: UnsafeCell<T>, } #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T: ?Sized + Send> Send for RwLock<T> {} + #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {} +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Guards +//////////////////////////////////////////////////////////////////////////////////////////////////// + /// RAII structure used to release the shared read access of a lock when /// dropped. /// @@ -105,13 +113,15 @@ unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {} #[stable(feature = "rust1", since = "1.0.0")] #[clippy::has_significant_drop] #[cfg_attr(not(test), rustc_diagnostic_item = "RwLockReadGuard")] -pub struct RwLockReadGuard<'a, T: ?Sized + 'a> { - // NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a - // `RwLockReadGuard` argument doesn't hold immutability for its whole scope, only until it drops. - // `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull` - // is preferable over `const* T` to allow for niche optimization. +pub struct RwLockReadGuard<'rwlock, T: ?Sized + 'rwlock> { + /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of + /// `&'rwlock T` to avoid `noalias` violations, because a `RwLockReadGuard` instance only holds + /// immutability until it drops, not for its whole scope. + /// `NonNull` is preferable over `*const T` to allow for niche optimizations. `NonNull` is also + /// covariant over `T`, just like we would have with `&T`. data: NonNull<T>, - inner_lock: &'a sys::RwLock, + /// A reference to the internal [`sys::RwLock`] that we have read-locked. + inner_lock: &'rwlock sys::RwLock, } #[stable(feature = "rust1", since = "1.0.0")] @@ -135,8 +145,10 @@ unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {} #[stable(feature = "rust1", since = "1.0.0")] #[clippy::has_significant_drop] #[cfg_attr(not(test), rustc_diagnostic_item = "RwLockWriteGuard")] -pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> { - lock: &'a RwLock<T>, +pub struct RwLockWriteGuard<'rwlock, T: ?Sized + 'rwlock> { + /// A reference to the [`RwLock`] that we have write-locked. + lock: &'rwlock RwLock<T>, + /// The poison guard. See the [`poison`] module for more information. poison: poison::Guard, } @@ -160,13 +172,15 @@ unsafe impl<T: ?Sized + Sync> Sync for RwLockWriteGuard<'_, T> {} and cause Futures to not implement `Send`"] #[unstable(feature = "mapped_lock_guards", issue = "117108")] #[clippy::has_significant_drop] -pub struct MappedRwLockReadGuard<'a, T: ?Sized + 'a> { - // NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a - // `MappedRwLockReadGuard` argument doesn't hold immutability for its whole scope, only until it drops. - // `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull` - // is preferable over `const* T` to allow for niche optimization. +pub struct MappedRwLockReadGuard<'rwlock, T: ?Sized + 'rwlock> { + /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of + /// `&'rwlock T` to avoid `noalias` violations, because a `MappedRwLockReadGuard` instance only + /// holds immutability until it drops, not for its whole scope. + /// `NonNull` is preferable over `*const T` to allow for niche optimizations. `NonNull` is also + /// covariant over `T`, just like we would have with `&T`. data: NonNull<T>, - inner_lock: &'a sys::RwLock, + /// A reference to the internal [`sys::RwLock`] that we have read-locked. + inner_lock: &'rwlock sys::RwLock, } #[unstable(feature = "mapped_lock_guards", issue = "117108")] @@ -189,16 +203,21 @@ unsafe impl<T: ?Sized + Sync> Sync for MappedRwLockReadGuard<'_, T> {} and cause Future's to not implement `Send`"] #[unstable(feature = "mapped_lock_guards", issue = "117108")] #[clippy::has_significant_drop] -pub struct MappedRwLockWriteGuard<'a, T: ?Sized + 'a> { - // NB: we use a pointer instead of `&'a mut T` to avoid `noalias` violations, because a - // `MappedRwLockWriteGuard` argument doesn't hold uniqueness for its whole scope, only until it drops. - // `NonNull` is covariant over `T`, so we add a `PhantomData<&'a mut T>` field - // below for the correct variance over `T` (invariance). +pub struct MappedRwLockWriteGuard<'rwlock, T: ?Sized + 'rwlock> { + /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of + /// `&'rwlock T` to avoid `noalias` violations, because a `MappedRwLockWriteGuard` instance only + /// holds uniquneness until it drops, not for its whole scope. + /// `NonNull` is preferable over `*const T` to allow for niche optimizations. data: NonNull<T>, - inner_lock: &'a sys::RwLock, - poison_flag: &'a poison::Flag, - poison: poison::Guard, - _variance: PhantomData<&'a mut T>, + /// `NonNull` is covariant over `T`, so we add a `PhantomData<&'rwlock mut T>` field here to + /// enforce the correct invariance over `T`. + _variance: PhantomData<&'rwlock mut T>, + /// A reference to the internal [`sys::RwLock`] that we have write-locked. + inner_lock: &'rwlock sys::RwLock, + /// A reference to the original `RwLock`'s poison state. + poison_flag: &'rwlock poison::Flag, + /// The poison guard. See the [`poison`] module for more information. + poison_guard: poison::Guard, } #[unstable(feature = "mapped_lock_guards", issue = "117108")] @@ -207,6 +226,10 @@ impl<T: ?Sized> !Send for MappedRwLockWriteGuard<'_, T> {} #[unstable(feature = "mapped_lock_guards", issue = "117108")] unsafe impl<T: ?Sized + Sync> Sync for MappedRwLockWriteGuard<'_, T> {} +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Implementations +//////////////////////////////////////////////////////////////////////////////////////////////////// + impl<T> RwLock<T> { /// Creates a new instance of an `RwLock<T>` which is unlocked. /// @@ -611,8 +634,8 @@ impl<T: ?Sized> RwLock<T> { /// /// Since this call borrows the `RwLock` mutably, no actual locking needs to /// take place -- the mutable borrow statically guarantees no new locks can be acquired - /// while this reference exists. Note that this method does not clear any previously abandoned locks - /// (e.g., via [`forget()`] on a [`RwLockReadGuard`] or [`RwLockWriteGuard`]). + /// while this reference exists. Note that this method does not clear any previously abandoned + /// locks (e.g., via [`forget()`] on a [`RwLockReadGuard`] or [`RwLockWriteGuard`]). /// /// # Errors /// @@ -700,177 +723,7 @@ impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> { inner_lock: &lock.inner, }) } -} - -impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> { - /// Creates a new instance of `RwLockWriteGuard<T>` from a `RwLock<T>`. - // SAFETY: if and only if `lock.inner.write()` (or `lock.inner.try_write()`) has been - // successfully called from the same thread before instantiating this object. - unsafe fn new(lock: &'rwlock RwLock<T>) -> LockResult<RwLockWriteGuard<'rwlock, T>> { - poison::map_result(lock.poison.guard(), |guard| RwLockWriteGuard { lock, poison: guard }) - } -} - -#[stable(feature = "std_debug", since = "1.16.0")] -impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[stable(feature = "std_guard_impls", since = "1.20.0")] -impl<T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[stable(feature = "std_debug", since = "1.16.0")] -impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockWriteGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[stable(feature = "std_guard_impls", since = "1.20.0")] -impl<T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[unstable(feature = "mapped_lock_guards", issue = "117108")] -impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedRwLockReadGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[unstable(feature = "mapped_lock_guards", issue = "117108")] -impl<T: ?Sized + fmt::Display> fmt::Display for MappedRwLockReadGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[unstable(feature = "mapped_lock_guards", issue = "117108")] -impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedRwLockWriteGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[unstable(feature = "mapped_lock_guards", issue = "117108")] -impl<T: ?Sized + fmt::Display> fmt::Display for MappedRwLockWriteGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created. - unsafe { self.data.as_ref() } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created. - unsafe { &*self.lock.data.get() } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> { - fn deref_mut(&mut self) -> &mut T { - // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created. - unsafe { &mut *self.lock.data.get() } - } -} - -#[unstable(feature = "mapped_lock_guards", issue = "117108")] -impl<T: ?Sized> Deref for MappedRwLockReadGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard - // was created, and have been upheld throughout `map` and/or `filter_map`. - unsafe { self.data.as_ref() } - } -} - -#[unstable(feature = "mapped_lock_guards", issue = "117108")] -impl<T: ?Sized> Deref for MappedRwLockWriteGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard - // was created, and have been upheld throughout `map` and/or `filter_map`. - unsafe { self.data.as_ref() } - } -} - -#[unstable(feature = "mapped_lock_guards", issue = "117108")] -impl<T: ?Sized> DerefMut for MappedRwLockWriteGuard<'_, T> { - fn deref_mut(&mut self) -> &mut T { - // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard - // was created, and have been upheld throughout `map` and/or `filter_map`. - unsafe { self.data.as_mut() } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> { - fn drop(&mut self) { - // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created. - unsafe { - self.inner_lock.read_unlock(); - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> { - fn drop(&mut self) { - self.lock.poison.done(&self.poison); - // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created. - unsafe { - self.lock.inner.write_unlock(); - } - } -} - -#[unstable(feature = "mapped_lock_guards", issue = "117108")] -impl<T: ?Sized> Drop for MappedRwLockReadGuard<'_, T> { - fn drop(&mut self) { - // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard - // was created, and have been upheld throughout `map` and/or `filter_map`. - unsafe { - self.inner_lock.read_unlock(); - } - } -} - -#[unstable(feature = "mapped_lock_guards", issue = "117108")] -impl<T: ?Sized> Drop for MappedRwLockWriteGuard<'_, T> { - fn drop(&mut self) { - self.poison_flag.done(&self.poison); - // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard - // was created, and have been upheld throughout `map` and/or `filter_map`. - unsafe { - self.inner_lock.write_unlock(); - } - } -} -impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data, e.g. /// an enum variant. /// @@ -883,17 +736,18 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { /// /// # Panics /// - /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned. + /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be + /// poisoned. #[unstable(feature = "mapped_lock_guards", issue = "117108")] - pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'a, U> + pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'rwlock, U> where F: FnOnce(&T) -> &U, U: ?Sized, { // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard // was created, and have been upheld throughout `map` and/or `filter_map`. - // The signature of the closure guarantees that it will not "leak" the lifetime of the reference - // passed to it. If the closure panics, the guard will be dropped. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. let data = NonNull::from(f(unsafe { orig.data.as_ref() })); let orig = ManuallyDrop::new(orig); MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock } @@ -912,17 +766,18 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { /// /// # Panics /// - /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned. + /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be + /// poisoned. #[unstable(feature = "mapped_lock_guards", issue = "117108")] - pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'a, U>, Self> + pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'rwlock, U>, Self> where F: FnOnce(&T) -> Option<&U>, U: ?Sized, { // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard // was created, and have been upheld throughout `map` and/or `filter_map`. - // The signature of the closure guarantees that it will not "leak" the lifetime of the reference - // passed to it. If the closure panics, the guard will be dropped. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. match f(unsafe { orig.data.as_ref() }) { Some(data) => { let data = NonNull::from(data); @@ -934,71 +789,95 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { } } -impl<'a, T: ?Sized> MappedRwLockReadGuard<'a, T> { - /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data, - /// e.g. an enum variant. - /// - /// The `RwLock` is already locked for reading, so this cannot fail. - /// - /// This is an associated function that needs to be used as - /// `MappedRwLockReadGuard::map(...)`. A method would interfere with - /// methods of the same name on the contents of the `MappedRwLockReadGuard` - /// used through `Deref`. +impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> { + /// Creates a new instance of `RwLockWriteGuard<T>` from a `RwLock<T>`. /// - /// # Panics + /// # Safety /// - /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned. - #[unstable(feature = "mapped_lock_guards", issue = "117108")] - pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'a, U> - where - F: FnOnce(&T) -> &U, - U: ?Sized, - { - // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard - // was created, and have been upheld throughout `map` and/or `filter_map`. - // The signature of the closure guarantees that it will not "leak" the lifetime of the reference - // passed to it. If the closure panics, the guard will be dropped. - let data = NonNull::from(f(unsafe { orig.data.as_ref() })); - let orig = ManuallyDrop::new(orig); - MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock } + /// This function is safe if and only if the same thread has successfully and safely called + /// `lock.inner.write()`, `lock.inner.try_write()`, or `lock.inner.try_upgrade` before + /// instantiating this object. + unsafe fn new(lock: &'rwlock RwLock<T>) -> LockResult<RwLockWriteGuard<'rwlock, T>> { + poison::map_result(lock.poison.guard(), |guard| RwLockWriteGuard { lock, poison: guard }) } - /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data. - /// The original guard is returned as an `Err(...)` if the closure returns - /// `None`. + /// Downgrades a write-locked `RwLockWriteGuard` into a read-locked [`RwLockReadGuard`]. /// - /// The `RwLock` is already locked for reading, so this cannot fail. + /// Since we have the `RwLockWriteGuard`, the [`RwLock`] must already be locked for writing, so + /// this method cannot fail. /// - /// This is an associated function that needs to be used as - /// `MappedRwLockReadGuard::filter_map(...)`. A method would interfere with - /// methods of the same name on the contents of the `MappedRwLockReadGuard` - /// used through `Deref`. + /// After downgrading, other readers will be allowed to read the protected data. /// - /// # Panics + /// # Examples /// - /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned. - #[unstable(feature = "mapped_lock_guards", issue = "117108")] - pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'a, U>, Self> - where - F: FnOnce(&T) -> Option<&U>, - U: ?Sized, - { - // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard - // was created, and have been upheld throughout `map` and/or `filter_map`. - // The signature of the closure guarantees that it will not "leak" the lifetime of the reference - // passed to it. If the closure panics, the guard will be dropped. - match f(unsafe { orig.data.as_ref() }) { - Some(data) => { - let data = NonNull::from(data); - let orig = ManuallyDrop::new(orig); - Ok(MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }) - } - None => Err(orig), - } + /// `downgrade` takes ownership of the `RwLockWriteGuard` and returns a [`RwLockReadGuard`]. + /// + /// ``` + /// #![feature(rwlock_downgrade)] + /// + /// use std::sync::{RwLock, RwLockWriteGuard}; + /// + /// let rw = RwLock::new(0); + /// + /// let mut write_guard = rw.write().unwrap(); + /// *write_guard = 42; + /// + /// let read_guard = RwLockWriteGuard::downgrade(write_guard); + /// assert_eq!(42, *read_guard); + /// ``` + /// + /// `downgrade` will _atomically_ change the state of the [`RwLock`] from exclusive mode into + /// shared mode. This means that it is impossible for another writing thread to get in between a + /// thread calling `downgrade` and any reads it performs after downgrading. + /// + /// ``` + /// #![feature(rwlock_downgrade)] + /// + /// use std::sync::{Arc, RwLock, RwLockWriteGuard}; + /// + /// let rw = Arc::new(RwLock::new(1)); + /// + /// // Put the lock in write mode. + /// let mut main_write_guard = rw.write().unwrap(); + /// + /// let rw_clone = rw.clone(); + /// let evil_handle = std::thread::spawn(move || { + /// // This will not return until the main thread drops the `main_read_guard`. + /// let mut evil_guard = rw_clone.write().unwrap(); + /// + /// assert_eq!(*evil_guard, 2); + /// *evil_guard = 3; + /// }); + /// + /// *main_write_guard = 2; + /// + /// // Atomically downgrade the write guard into a read guard. + /// let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard); + /// + /// // Since `downgrade` is atomic, the writer thread cannot have changed the protected data. + /// assert_eq!(*main_read_guard, 2, "`downgrade` was not atomic"); + /// # + /// # drop(main_read_guard); + /// # evil_handle.join().unwrap(); + /// # + /// # let final_check = rw.read().unwrap(); + /// # assert_eq!(*final_check, 3); + /// ``` + #[unstable(feature = "rwlock_downgrade", issue = "128203")] + pub fn downgrade(s: Self) -> RwLockReadGuard<'rwlock, T> { + let lock = s.lock; + + // We don't want to call the destructor since that calls `write_unlock`. + forget(s); + + // SAFETY: We take ownership of a write guard, so we must already have the `RwLock` in write + // mode, satisfying the `downgrade` contract. + unsafe { lock.inner.downgrade() }; + + // SAFETY: We have just successfully called `downgrade`, so we fulfill the safety contract. + unsafe { RwLockReadGuard::new(lock).unwrap_or_else(PoisonError::into_inner) } } -} -impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data, e.g. /// an enum variant. /// @@ -1013,22 +892,22 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { /// /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned. #[unstable(feature = "mapped_lock_guards", issue = "117108")] - pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockWriteGuard<'a, U> + pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockWriteGuard<'rwlock, U> where F: FnOnce(&mut T) -> &mut U, U: ?Sized, { // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard // was created, and have been upheld throughout `map` and/or `filter_map`. - // The signature of the closure guarantees that it will not "leak" the lifetime of the reference - // passed to it. If the closure panics, the guard will be dropped. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. let data = NonNull::from(f(unsafe { &mut *orig.lock.data.get() })); let orig = ManuallyDrop::new(orig); MappedRwLockWriteGuard { data, inner_lock: &orig.lock.inner, poison_flag: &orig.lock.poison, - poison: orig.poison.clone(), + poison_guard: orig.poison.clone(), _variance: PhantomData, } } @@ -1048,15 +927,15 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { /// /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned. #[unstable(feature = "mapped_lock_guards", issue = "117108")] - pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, U>, Self> + pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockWriteGuard<'rwlock, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>, U: ?Sized, { // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard // was created, and have been upheld throughout `map` and/or `filter_map`. - // The signature of the closure guarantees that it will not "leak" the lifetime of the reference - // passed to it. If the closure panics, the guard will be dropped. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. match f(unsafe { &mut *orig.lock.data.get() }) { Some(data) => { let data = NonNull::from(data); @@ -1065,78 +944,82 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { data, inner_lock: &orig.lock.inner, poison_flag: &orig.lock.poison, - poison: orig.poison.clone(), + poison_guard: orig.poison.clone(), _variance: PhantomData, }) } None => Err(orig), } } +} - /// Downgrades a write-locked `RwLockWriteGuard` into a read-locked [`RwLockReadGuard`]. - /// - /// This method will atomically change the state of the [`RwLock`] from exclusive mode into - /// shared mode. This means that it is impossible for a writing thread to get in between a - /// thread calling `downgrade` and the same thread reading whatever it wrote while it had the - /// [`RwLock`] in write mode. - /// - /// Note that since we have the `RwLockWriteGuard`, we know that the [`RwLock`] is already - /// locked for writing, so this method cannot fail. - /// - /// # Example - /// - /// ``` - /// #![feature(rwlock_downgrade)] - /// use std::sync::{Arc, RwLock, RwLockWriteGuard}; - /// - /// // The inner value starts as 0. - /// let rw = Arc::new(RwLock::new(0)); +impl<'rwlock, T: ?Sized> MappedRwLockReadGuard<'rwlock, T> { + /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data, + /// e.g. an enum variant. /// - /// // Put the lock in write mode. - /// let mut main_write_guard = rw.write().unwrap(); + /// The `RwLock` is already locked for reading, so this cannot fail. /// - /// let evil = rw.clone(); - /// let handle = std::thread::spawn(move || { - /// // This will not return until the main thread drops the `main_read_guard`. - /// let mut evil_guard = evil.write().unwrap(); + /// This is an associated function that needs to be used as + /// `MappedRwLockReadGuard::map(...)`. A method would interfere with + /// methods of the same name on the contents of the `MappedRwLockReadGuard` + /// used through `Deref`. /// - /// assert_eq!(*evil_guard, 1); - /// *evil_guard = 2; - /// }); + /// # Panics /// - /// // After spawning the writer thread, set the inner value to 1. - /// *main_write_guard = 1; + /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be + /// poisoned. + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'rwlock, U> + where + F: FnOnce(&T) -> &U, + U: ?Sized, + { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. + let data = NonNull::from(f(unsafe { orig.data.as_ref() })); + let orig = ManuallyDrop::new(orig); + MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock } + } + + /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data. + /// The original guard is returned as an `Err(...)` if the closure returns + /// `None`. /// - /// // Atomically downgrade the write guard into a read guard. - /// let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard); + /// The `RwLock` is already locked for reading, so this cannot fail. /// - /// // Since `downgrade` is atomic, the writer thread cannot have set the inner value to 2. - /// assert_eq!(*main_read_guard, 1, "`downgrade` was not atomic"); + /// This is an associated function that needs to be used as + /// `MappedRwLockReadGuard::filter_map(...)`. A method would interfere with + /// methods of the same name on the contents of the `MappedRwLockReadGuard` + /// used through `Deref`. /// - /// // Clean up everything now - /// drop(main_read_guard); - /// handle.join().unwrap(); + /// # Panics /// - /// let final_check = rw.read().unwrap(); - /// assert_eq!(*final_check, 2); - /// ``` - #[unstable(feature = "rwlock_downgrade", issue = "128203")] - pub fn downgrade(s: Self) -> RwLockReadGuard<'a, T> { - let lock = s.lock; - - // We don't want to call the destructor since that calls `write_unlock`. - forget(s); - - // SAFETY: We take ownership of a write guard, so we must already have the `RwLock` in write - // mode, satisfying the `downgrade` contract. - unsafe { lock.inner.downgrade() }; - - // SAFETY: We have just successfully called `downgrade`, so we fulfill the safety contract. - unsafe { RwLockReadGuard::new(lock).unwrap_or_else(PoisonError::into_inner) } + /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be + /// poisoned. + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'rwlock, U>, Self> + where + F: FnOnce(&T) -> Option<&U>, + U: ?Sized, + { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. + match f(unsafe { orig.data.as_ref() }) { + Some(data) => { + let data = NonNull::from(data); + let orig = ManuallyDrop::new(orig); + Ok(MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }) + } + None => Err(orig), + } } } -impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> { +impl<'rwlock, T: ?Sized> MappedRwLockWriteGuard<'rwlock, T> { /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data, /// e.g. an enum variant. /// @@ -1151,22 +1034,22 @@ impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> { /// /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned. #[unstable(feature = "mapped_lock_guards", issue = "117108")] - pub fn map<U, F>(mut orig: Self, f: F) -> MappedRwLockWriteGuard<'a, U> + pub fn map<U, F>(mut orig: Self, f: F) -> MappedRwLockWriteGuard<'rwlock, U> where F: FnOnce(&mut T) -> &mut U, U: ?Sized, { // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard // was created, and have been upheld throughout `map` and/or `filter_map`. - // The signature of the closure guarantees that it will not "leak" the lifetime of the reference - // passed to it. If the closure panics, the guard will be dropped. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. let data = NonNull::from(f(unsafe { orig.data.as_mut() })); let orig = ManuallyDrop::new(orig); MappedRwLockWriteGuard { data, inner_lock: orig.inner_lock, poison_flag: orig.poison_flag, - poison: orig.poison.clone(), + poison_guard: orig.poison_guard.clone(), _variance: PhantomData, } } @@ -1186,15 +1069,18 @@ impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> { /// /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned. #[unstable(feature = "mapped_lock_guards", issue = "117108")] - pub fn filter_map<U, F>(mut orig: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, U>, Self> + pub fn filter_map<U, F>( + mut orig: Self, + f: F, + ) -> Result<MappedRwLockWriteGuard<'rwlock, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>, U: ?Sized, { // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard // was created, and have been upheld throughout `map` and/or `filter_map`. - // The signature of the closure guarantees that it will not "leak" the lifetime of the reference - // passed to it. If the closure panics, the guard will be dropped. + // The signature of the closure guarantees that it will not "leak" the lifetime of the + // reference passed to it. If the closure panics, the guard will be dropped. match f(unsafe { orig.data.as_mut() }) { Some(data) => { let data = NonNull::from(data); @@ -1203,7 +1089,7 @@ impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> { data, inner_lock: orig.inner_lock, poison_flag: orig.poison_flag, - poison: orig.poison.clone(), + poison_guard: orig.poison_guard.clone(), _variance: PhantomData, }) } @@ -1211,3 +1097,162 @@ impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> { } } } + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> { + fn drop(&mut self) { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created. + unsafe { + self.inner_lock.read_unlock(); + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> { + fn drop(&mut self) { + self.lock.poison.done(&self.poison); + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created. + unsafe { + self.lock.inner.write_unlock(); + } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl<T: ?Sized> Drop for MappedRwLockReadGuard<'_, T> { + fn drop(&mut self) { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + unsafe { + self.inner_lock.read_unlock(); + } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl<T: ?Sized> Drop for MappedRwLockWriteGuard<'_, T> { + fn drop(&mut self) { + self.poison_flag.done(&self.poison_guard); + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + unsafe { + self.inner_lock.write_unlock(); + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created. + unsafe { self.data.as_ref() } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created. + unsafe { &*self.lock.data.get() } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created. + unsafe { &mut *self.lock.data.get() } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl<T: ?Sized> Deref for MappedRwLockReadGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + unsafe { self.data.as_ref() } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl<T: ?Sized> Deref for MappedRwLockWriteGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + unsafe { self.data.as_ref() } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl<T: ?Sized> DerefMut for MappedRwLockWriteGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + unsafe { self.data.as_mut() } + } +} + +#[stable(feature = "std_debug", since = "1.16.0")] +impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[stable(feature = "std_guard_impls", since = "1.20.0")] +impl<T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[stable(feature = "std_debug", since = "1.16.0")] +impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[stable(feature = "std_guard_impls", since = "1.20.0")] +impl<T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedRwLockReadGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl<T: ?Sized + fmt::Display> fmt::Display for MappedRwLockReadGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedRwLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl<T: ?Sized + fmt::Display> fmt::Display for MappedRwLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} diff --git a/library/std/src/sys/fd/unix.rs b/library/std/src/sys/fd/unix.rs index cdca73cdca1..a12f692e754 100644 --- a/library/std/src/sys/fd/unix.rs +++ b/library/std/src/sys/fd/unix.rs @@ -37,10 +37,10 @@ pub struct FileDesc(OwnedFd); // // On Apple targets however, apparently the 64-bit libc is either buggy or // intentionally showing odd behavior by rejecting any read with a size -// larger than or equal to INT_MAX. To handle both of these the read -// size is capped on both platforms. +// larger than INT_MAX. To handle both of these the read size is capped on +// both platforms. const READ_LIMIT: usize = if cfg!(target_vendor = "apple") { - libc::c_int::MAX as usize - 1 + libc::c_int::MAX as usize } else { libc::ssize_t::MAX as usize }; diff --git a/library/std/src/sys/fs/mod.rs b/library/std/src/sys/fs/mod.rs index d9740e15789..dbd782f5018 100644 --- a/library/std/src/sys/fs/mod.rs +++ b/library/std/src/sys/fs/mod.rs @@ -114,6 +114,21 @@ pub fn set_permissions(path: &Path, perm: FilePermissions) -> io::Result<()> { with_native_path(path, &|path| imp::set_perm(path, perm.clone())) } +#[cfg(unix)] +pub fn set_permissions_nofollow(path: &Path, perm: crate::fs::Permissions) -> io::Result<()> { + use crate::fs::OpenOptions; + use crate::os::unix::fs::OpenOptionsExt; + + OpenOptions::new().custom_flags(libc::O_NOFOLLOW).open(path)?.set_permissions(perm) +} + +#[cfg(not(unix))] +pub fn set_permissions_nofollow(_path: &Path, _perm: crate::fs::Permissions) -> io::Result<()> { + crate::unimplemented!( + "`set_permissions_nofollow` is currently only implemented on Unix platforms" + ) +} + pub fn canonicalize(path: &Path) -> io::Result<PathBuf> { with_native_path(path, &imp::canonicalize) } diff --git a/library/std/src/sys/fs/unix.rs b/library/std/src/sys/fs/unix.rs index 1e2fe6b01f7..0d710a4b2a6 100644 --- a/library/std/src/sys/fs/unix.rs +++ b/library/std/src/sys/fs/unix.rs @@ -1264,6 +1264,8 @@ impl File { target_os = "fuchsia", target_os = "linux", target_os = "netbsd", + target_os = "openbsd", + target_os = "cygwin", target_vendor = "apple", ))] pub fn lock(&self) -> io::Result<()> { @@ -1276,6 +1278,8 @@ impl File { target_os = "fuchsia", target_os = "linux", target_os = "netbsd", + target_os = "openbsd", + target_os = "cygwin", target_vendor = "apple", )))] pub fn lock(&self) -> io::Result<()> { @@ -1287,6 +1291,8 @@ impl File { target_os = "fuchsia", target_os = "linux", target_os = "netbsd", + target_os = "openbsd", + target_os = "cygwin", target_vendor = "apple", ))] pub fn lock_shared(&self) -> io::Result<()> { @@ -1299,6 +1305,8 @@ impl File { target_os = "fuchsia", target_os = "linux", target_os = "netbsd", + target_os = "openbsd", + target_os = "cygwin", target_vendor = "apple", )))] pub fn lock_shared(&self) -> io::Result<()> { @@ -1310,6 +1318,8 @@ impl File { target_os = "fuchsia", target_os = "linux", target_os = "netbsd", + target_os = "openbsd", + target_os = "cygwin", target_vendor = "apple", ))] pub fn try_lock(&self) -> Result<(), TryLockError> { @@ -1330,6 +1340,8 @@ impl File { target_os = "fuchsia", target_os = "linux", target_os = "netbsd", + target_os = "openbsd", + target_os = "cygwin", target_vendor = "apple", )))] pub fn try_lock(&self) -> Result<(), TryLockError> { @@ -1344,6 +1356,8 @@ impl File { target_os = "fuchsia", target_os = "linux", target_os = "netbsd", + target_os = "openbsd", + target_os = "cygwin", target_vendor = "apple", ))] pub fn try_lock_shared(&self) -> Result<(), TryLockError> { @@ -1364,6 +1378,8 @@ impl File { target_os = "fuchsia", target_os = "linux", target_os = "netbsd", + target_os = "openbsd", + target_os = "cygwin", target_vendor = "apple", )))] pub fn try_lock_shared(&self) -> Result<(), TryLockError> { @@ -1378,6 +1394,8 @@ impl File { target_os = "fuchsia", target_os = "linux", target_os = "netbsd", + target_os = "openbsd", + target_os = "cygwin", target_vendor = "apple", ))] pub fn unlock(&self) -> io::Result<()> { @@ -1390,6 +1408,8 @@ impl File { target_os = "fuchsia", target_os = "linux", target_os = "netbsd", + target_os = "openbsd", + target_os = "cygwin", target_vendor = "apple", )))] pub fn unlock(&self) -> io::Result<()> { diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs index 8ec0a0e3302..6324c1a232a 100644 --- a/library/std/src/sys/mod.rs +++ b/library/std/src/sys/mod.rs @@ -1,7 +1,7 @@ #![allow(unsafe_op_in_unsafe_fn)] /// The configure builtins provides runtime support compiler-builtin features -/// which require dynamic intialization to work as expected, e.g. aarch64 +/// which require dynamic initialization to work as expected, e.g. aarch64 /// outline-atomics. mod configure_builtins; diff --git a/library/std/src/sys/pal/uefi/time.rs b/library/std/src/sys/pal/uefi/time.rs index b7c71b76ee8..36ce3f7ef96 100644 --- a/library/std/src/sys/pal/uefi/time.rs +++ b/library/std/src/sys/pal/uefi/time.rs @@ -188,7 +188,7 @@ pub(crate) mod system_time_internal { Duration::new(epoch, t.nanosecond) } - /// This algorithm is a modifed version of the one described in the post: + /// This algorithm is a modified version of the one described in the post: /// https://howardhinnant.github.io/date_algorithms.html#clive_from_days /// /// The changes are to use 1900-01-01-00:00:00 with timezone -1440 as anchor instead of UNIX @@ -197,7 +197,7 @@ pub(crate) mod system_time_internal { // Check timzone validity assert!(timezone <= 1440 && timezone >= -1440); - // FIXME(#126043): use checked_sub_signed once stablized + // FIXME(#126043): use checked_sub_signed once stabilized let secs = dur.as_secs().checked_add_signed((-timezone as i64) * SECS_IN_MINUTE as i64).unwrap(); diff --git a/library/std/src/sys/pal/unix/mod.rs b/library/std/src/sys/pal/unix/mod.rs index fede3673eb6..aef7ab55088 100644 --- a/library/std/src/sys/pal/unix/mod.rs +++ b/library/std/src/sys/pal/unix/mod.rs @@ -59,6 +59,30 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { } unsafe fn sanitize_standard_fds() { + #[allow(dead_code, unused_variables, unused_mut)] + let mut opened_devnull = -1; + #[allow(dead_code, unused_variables, unused_mut)] + let mut open_devnull = || { + #[cfg(not(all(target_os = "linux", target_env = "gnu")))] + use libc::open; + #[cfg(all(target_os = "linux", target_env = "gnu"))] + use libc::open64 as open; + + if opened_devnull != -1 { + if libc::dup(opened_devnull) != -1 { + return; + } + } + opened_devnull = open(c"/dev/null".as_ptr(), libc::O_RDWR, 0); + if opened_devnull == -1 { + // If the stream is closed but we failed to reopen it, abort the + // process. Otherwise we wouldn't preserve the safety of + // operations on the corresponding Rust object Stdin, Stdout, or + // Stderr. + libc::abort(); + } + }; + // fast path with a single syscall for systems with poll() #[cfg(not(any( miri, @@ -74,11 +98,6 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { target_vendor = "apple", )))] 'poll: { - #[cfg(not(all(target_os = "linux", target_env = "gnu")))] - use libc::open as open64; - #[cfg(all(target_os = "linux", target_env = "gnu"))] - use libc::open64; - use crate::sys::os::errno; let pfds: &mut [_] = &mut [ libc::pollfd { fd: 0, events: 0, revents: 0 }, @@ -106,13 +125,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { if pfd.revents & libc::POLLNVAL == 0 { continue; } - if open64(c"/dev/null".as_ptr(), libc::O_RDWR, 0) == -1 { - // If the stream is closed but we failed to reopen it, abort the - // process. Otherwise we wouldn't preserve the safety of - // operations on the corresponding Rust object Stdin, Stdout, or - // Stderr. - libc::abort(); - } + open_devnull(); } return; } @@ -129,21 +142,10 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { target_os = "vita", )))] { - #[cfg(not(all(target_os = "linux", target_env = "gnu")))] - use libc::open as open64; - #[cfg(all(target_os = "linux", target_env = "gnu"))] - use libc::open64; - use crate::sys::os::errno; for fd in 0..3 { if libc::fcntl(fd, libc::F_GETFD) == -1 && errno() == libc::EBADF { - if open64(c"/dev/null".as_ptr(), libc::O_RDWR, 0) == -1 { - // If the stream is closed but we failed to reopen it, abort the - // process. Otherwise we wouldn't preserve the safety of - // operations on the corresponding Rust object Stdin, Stdout, or - // Stderr. - libc::abort(); - } + open_devnull(); } } } diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs index 0ad014ccd3e..797feeb2bbb 100644 --- a/library/std/src/thread/local.rs +++ b/library/std/src/thread/local.rs @@ -8,8 +8,8 @@ use crate::fmt; /// A thread local storage (TLS) key which owns its contents. /// -/// This key uses the fastest possible implementation available to it for the -/// target platform. It is instantiated with the [`thread_local!`] macro and the +/// This key uses the fastest implementation available on the target platform. +/// It is instantiated with the [`thread_local!`] macro and the /// primary method is the [`with`] method, though there are helpers to make /// working with [`Cell`] types easier. /// @@ -24,10 +24,10 @@ use crate::fmt; /// [`with`]) within a thread, and values that implement [`Drop`] get /// destructed when a thread exits. Some platform-specific caveats apply, which /// are explained below. -/// Note that, should the destructor panics, the whole process will be [aborted]. +/// Note that if the destructor panics, the whole process will be [aborted]. /// /// A `LocalKey`'s initializer cannot recursively depend on itself. Using a -/// `LocalKey` in this way may cause panics, aborts or infinite recursion on +/// `LocalKey` in this way may cause panics, aborts, or infinite recursion on /// the first call to `with`. /// /// [aborted]: crate::process::abort diff --git a/library/std/tests/sync/lazy_lock.rs b/library/std/tests/sync/lazy_lock.rs index 6c14b79f2ce..68aeea834b4 100644 --- a/library/std/tests/sync/lazy_lock.rs +++ b/library/std/tests/sync/lazy_lock.rs @@ -34,16 +34,6 @@ fn lazy_default() { } #[test] -#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] -fn lazy_poisoning() { - let x: LazyCell<String> = LazyCell::new(|| panic!("kaboom")); - for _ in 0..2 { - let res = panic::catch_unwind(panic::AssertUnwindSafe(|| x.len())); - assert!(res.is_err()); - } -} - -#[test] #[cfg_attr(any(target_os = "emscripten", target_os = "wasi"), ignore)] // no threads fn sync_lazy_new() { static CALLED: AtomicUsize = AtomicUsize::new(0); @@ -123,16 +113,6 @@ fn static_sync_lazy_via_fn() { assert_eq!(xs(), &vec![1, 2, 3]); } -#[test] -#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] -fn sync_lazy_poisoning() { - let x: LazyLock<String> = LazyLock::new(|| panic!("kaboom")); - for _ in 0..2 { - let res = panic::catch_unwind(|| x.len()); - assert!(res.is_err()); - } -} - // Check that we can infer `T` from closure's type. #[test] fn lazy_type_inference() { @@ -146,17 +126,6 @@ fn is_sync_send() { } #[test] -#[should_panic = "has previously been poisoned"] -fn lazy_force_mut_panic() { - let mut lazy = LazyLock::<String>::new(|| panic!()); - panic::catch_unwind(panic::AssertUnwindSafe(|| { - let _ = LazyLock::force_mut(&mut lazy); - })) - .unwrap_err(); - let _ = &*lazy; -} - -#[test] fn lazy_force_mut() { let s = "abc".to_owned(); let mut lazy = LazyLock::new(move || s); @@ -165,3 +134,56 @@ fn lazy_force_mut() { p.clear(); LazyLock::force_mut(&mut lazy); } + +#[test] +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +fn lazy_poisoning() { + let x: LazyCell<String> = LazyCell::new(|| panic!("kaboom")); + for _ in 0..2 { + let res = panic::catch_unwind(panic::AssertUnwindSafe(|| x.len())); + assert!(res.is_err()); + } +} + +/// Verifies that when a `LazyLock` is poisoned, it panics with the correct error message ("LazyLock +/// instance has previously been poisoned") instead of the underlying `Once` error message. +#[test] +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +#[should_panic(expected = "LazyLock instance has previously been poisoned")] +fn lazy_lock_deref_panic() { + let lazy: LazyLock<String> = LazyLock::new(|| panic!("initialization failed")); + + // First access will panic during initialization. + let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| { + let _ = &*lazy; + })); + + // Second access should panic with the poisoned message. + let _ = &*lazy; +} + +#[test] +#[should_panic(expected = "LazyLock instance has previously been poisoned")] +fn lazy_lock_deref_mut_panic() { + let mut lazy: LazyLock<String> = LazyLock::new(|| panic!("initialization failed")); + + // First access will panic during initialization. + let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| { + let _ = LazyLock::force_mut(&mut lazy); + })); + + // Second access should panic with the poisoned message. + let _ = &*lazy; +} + +/// Verifies that when the initialization closure panics with a custom message, that message is +/// preserved and not overridden by `LazyLock`. +#[test] +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +#[should_panic(expected = "custom panic message from closure")] +fn lazy_lock_preserves_closure_panic_message() { + let lazy: LazyLock<String> = LazyLock::new(|| panic!("custom panic message from closure")); + + // This should panic with the original message from the closure. + let _ = &*lazy; +} diff --git a/library/std/tests/sync/lib.rs b/library/std/tests/sync/lib.rs index 94f1fe96b6a..f874c2ba389 100644 --- a/library/std/tests/sync/lib.rs +++ b/library/std/tests/sync/lib.rs @@ -8,6 +8,7 @@ #![feature(std_internals)] #![feature(sync_nonpoison)] #![feature(nonpoison_mutex)] +#![feature(nonpoison_rwlock)] #![allow(internal_features)] #![feature(macro_metavar_expr_concat)] // For concatenating identifiers in macros. diff --git a/library/std/tests/sync/rwlock.rs b/library/std/tests/sync/rwlock.rs index 1d55a176948..eca15d2a4ad 100644 --- a/library/std/tests/sync/rwlock.rs +++ b/library/std/tests/sync/rwlock.rs @@ -29,239 +29,457 @@ fn test_needs_drop() { assert!(mem::needs_drop::<NonCopyNeedsDrop>()); } -#[derive(Clone, Eq, PartialEq, Debug)] -struct Cloneable(i32); - -#[test] -fn smoke() { - let l = RwLock::new(()); - drop(l.read().unwrap()); - drop(l.write().unwrap()); - drop((l.read().unwrap(), l.read().unwrap())); - drop(l.write().unwrap()); -} +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Non-poison & Poison Tests +//////////////////////////////////////////////////////////////////////////////////////////////////// +use super::nonpoison_and_poison_unwrap_test; + +nonpoison_and_poison_unwrap_test!( + name: smoke, + test_body: { + use locks::RwLock; + + let l = RwLock::new(()); + drop(maybe_unwrap(l.read())); + drop(maybe_unwrap(l.write())); + drop((maybe_unwrap(l.read()), maybe_unwrap(l.read()))); + drop(maybe_unwrap(l.write())); + } +); -#[test] // FIXME: On macOS we use a provenance-incorrect implementation and Miri // catches that issue with a chance of around 1/1000. // See <https://github.com/rust-lang/rust/issues/121950> for details. #[cfg_attr(all(miri, target_os = "macos"), ignore)] -fn frob() { - const N: u32 = 10; - const M: usize = if cfg!(miri) { 100 } else { 1000 }; +nonpoison_and_poison_unwrap_test!( + name: frob, + test_body: { + use locks::RwLock; - let r = Arc::new(RwLock::new(())); + const N: u32 = 10; + const M: usize = if cfg!(miri) { 100 } else { 1000 }; - let (tx, rx) = channel::<()>(); - for _ in 0..N { - let tx = tx.clone(); - let r = r.clone(); - thread::spawn(move || { - let mut rng = crate::common::test_rng(); - for _ in 0..M { - if rng.random_bool(1.0 / (N as f64)) { - drop(r.write().unwrap()); - } else { - drop(r.read().unwrap()); + let r = Arc::new(RwLock::new(())); + + let (tx, rx) = channel::<()>(); + for _ in 0..N { + let tx = tx.clone(); + let r = r.clone(); + thread::spawn(move || { + let mut rng = crate::common::test_rng(); + for _ in 0..M { + if rng.random_bool(1.0 / (N as f64)) { + drop(maybe_unwrap(r.write())); + } else { + drop(maybe_unwrap(r.read())); + } } + drop(tx); + }); + } + drop(tx); + let _ = rx.recv(); + } +); + +nonpoison_and_poison_unwrap_test!( + name: test_rw_arc, + test_body: { + use locks::RwLock; + + let arc = Arc::new(RwLock::new(0)); + let arc2 = arc.clone(); + let (tx, rx) = channel(); + + thread::spawn(move || { + let mut lock = maybe_unwrap(arc2.write()); + for _ in 0..10 { + let tmp = *lock; + *lock = -1; + thread::yield_now(); + *lock = tmp + 1; } - drop(tx); + tx.send(()).unwrap(); }); + + // Readers try to catch the writer in the act + let mut children = Vec::new(); + for _ in 0..5 { + let arc3 = arc.clone(); + children.push(thread::spawn(move || { + let lock = maybe_unwrap(arc3.read()); + assert!(*lock >= 0); + })); + } + + // Wait for children to pass their asserts + for r in children { + assert!(r.join().is_ok()); + } + + // Wait for writer to finish + rx.recv().unwrap(); + let lock = maybe_unwrap(arc.read()); + assert_eq!(*lock, 10); } - drop(tx); - let _ = rx.recv(); -} +); -#[test] #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] -fn test_rw_arc_poison_wr() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = arc2.write().unwrap(); - panic!(); - }) - .join(); - assert!(arc.read().is_err()); -} +nonpoison_and_poison_unwrap_test!( + name: test_rw_arc_access_in_unwind, + test_body: { + use locks::RwLock; + + let arc = Arc::new(RwLock::new(1)); + let arc2 = arc.clone(); + let _ = thread::spawn(move || -> () { + struct Unwinder { + i: Arc<RwLock<isize>>, + } + impl Drop for Unwinder { + fn drop(&mut self) { + let mut lock = maybe_unwrap(self.i.write()); + *lock += 1; + } + } + let _u = Unwinder { i: arc2 }; + panic!(); + }) + .join(); + let lock = maybe_unwrap(arc.read()); + assert_eq!(*lock, 2); + } +); + +nonpoison_and_poison_unwrap_test!( + name: test_rwlock_unsized, + test_body: { + use locks::RwLock; + + let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); + { + let b = &mut *maybe_unwrap(rw.write()); + b[0] = 4; + b[2] = 5; + } + let comp: &[i32] = &[4, 2, 5]; + assert_eq!(&*maybe_unwrap(rw.read()), comp); + } +); -#[test] -#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] -fn test_rw_arc_poison_mapped_w_r() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let lock = arc2.write().unwrap(); - let _lock = RwLockWriteGuard::map(lock, |val| val); - panic!(); - }) - .join(); - assert!(arc.read().is_err()); -} +nonpoison_and_poison_unwrap_test!( + name: test_into_inner, + test_body: { + use locks::RwLock; -#[test] -#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] -fn test_rw_arc_poison_ww() { - let arc = Arc::new(RwLock::new(1)); - assert!(!arc.is_poisoned()); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = arc2.write().unwrap(); - panic!(); - }) - .join(); - assert!(arc.write().is_err()); - assert!(arc.is_poisoned()); -} + let m = RwLock::new(NonCopy(10)); + assert_eq!(maybe_unwrap(m.into_inner()), NonCopy(10)); + } +); -#[test] -#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] -fn test_rw_arc_poison_mapped_w_w() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let lock = arc2.write().unwrap(); - let _lock = RwLockWriteGuard::map(lock, |val| val); - panic!(); - }) - .join(); - assert!(arc.write().is_err()); - assert!(arc.is_poisoned()); -} +nonpoison_and_poison_unwrap_test!( + name: test_into_inner_drop, + test_body: { + use locks::RwLock; -#[test] -#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] -fn test_rw_arc_no_poison_rr() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = arc2.read().unwrap(); - panic!(); - }) - .join(); - let lock = arc.read().unwrap(); - assert_eq!(*lock, 1); -} + struct Foo(Arc<AtomicUsize>); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } -#[test] -#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] -fn test_rw_arc_no_poison_mapped_r_r() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let lock = arc2.read().unwrap(); - let _lock = RwLockReadGuard::map(lock, |val| val); - panic!(); - }) - .join(); - let lock = arc.read().unwrap(); - assert_eq!(*lock, 1); -} + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = RwLock::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = maybe_unwrap(m.into_inner()); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); + } +); -#[test] -#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] -fn test_rw_arc_no_poison_rw() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = arc2.read().unwrap(); - panic!() - }) - .join(); - let lock = arc.write().unwrap(); - assert_eq!(*lock, 1); -} +nonpoison_and_poison_unwrap_test!( + name: test_get_cloned, + test_body: { + use locks::RwLock; -#[test] -#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] -fn test_rw_arc_no_poison_mapped_r_w() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let lock = arc2.read().unwrap(); - let _lock = RwLockReadGuard::map(lock, |val| val); - panic!(); - }) - .join(); - let lock = arc.write().unwrap(); - assert_eq!(*lock, 1); -} + #[derive(Clone, Eq, PartialEq, Debug)] + struct Cloneable(i32); -#[test] -fn test_rw_arc() { - let arc = Arc::new(RwLock::new(0)); - let arc2 = arc.clone(); - let (tx, rx) = channel(); - - thread::spawn(move || { - let mut lock = arc2.write().unwrap(); - for _ in 0..10 { - let tmp = *lock; - *lock = -1; - thread::yield_now(); - *lock = tmp + 1; + let m = RwLock::new(Cloneable(10)); + + assert_eq!(maybe_unwrap(m.get_cloned()), Cloneable(10)); + } +); + +nonpoison_and_poison_unwrap_test!( + name: test_get_mut, + test_body: { + use locks::RwLock; + + let mut m = RwLock::new(NonCopy(10)); + *maybe_unwrap(m.get_mut()) = NonCopy(20); + assert_eq!(maybe_unwrap(m.into_inner()), NonCopy(20)); + } +); + +nonpoison_and_poison_unwrap_test!( + name: test_set, + test_body: { + use locks::RwLock; + + fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T) + where + T: Debug + Eq, + { + let m = RwLock::new(init()); + + assert_eq!(*maybe_unwrap(m.read()), init()); + maybe_unwrap(m.set(value())); + assert_eq!(*maybe_unwrap(m.read()), value()); + } + + inner(|| NonCopy(10), || NonCopy(20)); + inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20)); + } +); + +nonpoison_and_poison_unwrap_test!( + name: test_replace, + test_body: { + use locks::RwLock; + + fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T) + where + T: Debug + Eq, + { + let m = RwLock::new(init()); + + assert_eq!(*maybe_unwrap(m.read()), init()); + assert_eq!(maybe_unwrap(m.replace(value())), init()); + assert_eq!(*maybe_unwrap(m.read()), value()); } - tx.send(()).unwrap(); - }); - // Readers try to catch the writer in the act - let mut children = Vec::new(); - for _ in 0..5 { - let arc3 = arc.clone(); - children.push(thread::spawn(move || { - let lock = arc3.read().unwrap(); - assert!(*lock >= 0); - })); + inner(|| NonCopy(10), || NonCopy(20)); + inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20)); + } +); + +nonpoison_and_poison_unwrap_test!( + name: test_read_guard_covariance, + test_body: { + use locks::{RwLock, RwLockReadGuard}; + + fn do_stuff<'a>(_: RwLockReadGuard<'_, &'a i32>, _: &'a i32) {} + let j: i32 = 5; + let lock = RwLock::new(&j); + { + let i = 6; + do_stuff(maybe_unwrap(lock.read()), &i); + } + drop(lock); } +); + +nonpoison_and_poison_unwrap_test!( + name: test_mapped_read_guard_covariance, + test_body: { + use locks::{RwLock, RwLockReadGuard, MappedRwLockReadGuard}; + + fn do_stuff<'a>(_: MappedRwLockReadGuard<'_, &'a i32>, _: &'a i32) {} + let j: i32 = 5; + let lock = RwLock::new((&j, &j)); + { + let i = 6; + let guard = maybe_unwrap(lock.read()); + let guard = RwLockReadGuard::map(guard, |(val, _val)| val); + do_stuff(guard, &i); + } + drop(lock); + } +); + +nonpoison_and_poison_unwrap_test!( + name: test_downgrade_basic, + test_body: { + use locks::{RwLock, RwLockWriteGuard}; + + let r = RwLock::new(()); - // Wait for children to pass their asserts - for r in children { - assert!(r.join().is_ok()); + let write_guard = maybe_unwrap(r.write()); + let _read_guard = RwLockWriteGuard::downgrade(write_guard); } +); - // Wait for writer to finish - rx.recv().unwrap(); - let lock = arc.read().unwrap(); - assert_eq!(*lock, 10); -} +// FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue. +// See <https://github.com/rust-lang/rust/issues/121950> for details. +#[cfg_attr(all(miri, target_os = "macos"), ignore)] +nonpoison_and_poison_unwrap_test!( + name: test_downgrade_observe, + test_body: { + use locks::{RwLock, RwLockWriteGuard}; + + // Inspired by the test `test_rwlock_downgrade` from: + // https://github.com/Amanieu/parking_lot/blob/master/src/rwlock.rs + + const W: usize = 20; + const N: usize = if cfg!(miri) { 40 } else { 100 }; + + // This test spawns `W` writer threads, where each will increment a counter `N` times, + // ensuring that the value they wrote has not changed after downgrading. + + let rw = Arc::new(RwLock::new(0)); + + // Spawn the writers that will do `W * N` operations and checks. + let handles: Vec<_> = (0..W) + .map(|_| { + let rw = rw.clone(); + thread::spawn(move || { + for _ in 0..N { + // Increment the counter. + let mut write_guard = maybe_unwrap(rw.write()); + *write_guard += 1; + let cur_val = *write_guard; + + // Downgrade the lock to read mode, where the value protected cannot be + // modified. + let read_guard = RwLockWriteGuard::downgrade(write_guard); + assert_eq!(cur_val, *read_guard); + } + }) + }) + .collect(); -#[test] -#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] -fn test_rw_arc_access_in_unwind() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _ = thread::spawn(move || -> () { - struct Unwinder { - i: Arc<RwLock<isize>>, + for handle in handles { + handle.join().unwrap(); } - impl Drop for Unwinder { - fn drop(&mut self) { - let mut lock = self.i.write().unwrap(); - *lock += 1; - } + + assert_eq!(*maybe_unwrap(rw.read()), W * N); + } +); + +// FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue. +// See <https://github.com/rust-lang/rust/issues/121950> for details. +#[cfg_attr(all(miri, target_os = "macos"), ignore)] +nonpoison_and_poison_unwrap_test!( + name: test_downgrade_atomic, + test_body: { + use locks::{RwLock, RwLockWriteGuard}; + + const NEW_VALUE: i32 = -1; + + // This test checks that `downgrade` is atomic, meaning as soon as a write lock has been + // downgraded, the lock must be in read mode and no other threads can take the write lock to + // modify the protected value. + + // `W` is the number of evil writer threads. + const W: usize = 20; + let rwlock = Arc::new(RwLock::new(0)); + + // Spawns many evil writer threads that will try and write to the locked value before the + // initial writer (who has the exclusive lock) can read after it downgrades. + // If the `RwLock` behaves correctly, then the initial writer should read the value it wrote + // itself as no other thread should be able to mutate the protected value. + + // Put the lock in write mode, causing all future threads trying to access this go to sleep. + let mut main_write_guard = maybe_unwrap(rwlock.write()); + + // Spawn all of the evil writer threads. They will each increment the protected value by 1. + let handles: Vec<_> = (0..W) + .map(|_| { + let rwlock = rwlock.clone(); + thread::spawn(move || { + // Will go to sleep since the main thread initially has the write lock. + let mut evil_guard = maybe_unwrap(rwlock.write()); + *evil_guard += 1; + }) + }) + .collect(); + + // Wait for a good amount of time so that evil threads go to sleep. + // Note: this is not strictly necessary... + let eternity = std::time::Duration::from_millis(42); + thread::sleep(eternity); + + // Once everyone is asleep, set the value to `NEW_VALUE`. + *main_write_guard = NEW_VALUE; + + // Atomically downgrade the write guard into a read guard. + let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard); + + // If the above is not atomic, then it would be possible for an evil thread to get in front + // of this read and change the value to be non-negative. + assert_eq!(*main_read_guard, NEW_VALUE, "`downgrade` was not atomic"); + + // Drop the main read guard and allow the evil writer threads to start incrementing. + drop(main_read_guard); + + for handle in handles { + handle.join().unwrap(); } - let _u = Unwinder { i: arc2 }; - panic!(); - }) - .join(); - let lock = arc.read().unwrap(); - assert_eq!(*lock, 2); -} + + let final_check = maybe_unwrap(rwlock.read()); + assert_eq!(*final_check, W as i32 + NEW_VALUE); + } +); + +nonpoison_and_poison_unwrap_test!( + name: test_mapping_mapped_guard, + test_body: { + use locks::{ + RwLock, RwLockReadGuard, RwLockWriteGuard, MappedRwLockReadGuard, MappedRwLockWriteGuard + }; + + let arr = [0; 4]; + let mut lock = RwLock::new(arr); + let guard = maybe_unwrap(lock.write()); + let guard = RwLockWriteGuard::map(guard, |arr| &mut arr[..2]); + let mut guard = MappedRwLockWriteGuard::map(guard, |slice| &mut slice[1..]); + assert_eq!(guard.len(), 1); + guard[0] = 42; + drop(guard); + assert_eq!(*maybe_unwrap(lock.get_mut()), [0, 42, 0, 0]); + + let guard = maybe_unwrap(lock.read()); + let guard = RwLockReadGuard::map(guard, |arr| &arr[..2]); + let guard = MappedRwLockReadGuard::map(guard, |slice| &slice[1..]); + assert_eq!(*guard, [42]); + drop(guard); + assert_eq!(*maybe_unwrap(lock.get_mut()), [0, 42, 0, 0]); + } +); #[test] -fn test_rwlock_unsized() { - let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); - { - let b = &mut *rw.write().unwrap(); - b[0] = 4; - b[2] = 5; +fn nonpoison_test_rwlock_try_write() { + use std::sync::nonpoison::{RwLock, RwLockReadGuard, WouldBlock}; + + let lock = RwLock::new(0isize); + let read_guard = lock.read(); + + let write_result = lock.try_write(); + match write_result { + Err(WouldBlock) => (), + Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"), + } + + drop(read_guard); + let mapped_read_guard = RwLockReadGuard::map(lock.read(), |_| &()); + + let write_result = lock.try_write(); + match write_result { + Err(WouldBlock) => (), + Ok(_) => assert!(false, "try_write should not succeed while mapped_read_guard is in scope"), } - let comp: &[i32] = &[4, 2, 5]; - assert_eq!(&*rw.read().unwrap(), comp); + + drop(mapped_read_guard); } #[test] -fn test_rwlock_try_write() { +fn poison_test_rwlock_try_write() { + use std::sync::poison::{RwLock, RwLockReadGuard, TryLockError}; + let lock = RwLock::new(0isize); let read_guard = lock.read().unwrap(); @@ -285,6 +503,11 @@ fn test_rwlock_try_write() { drop(mapped_read_guard); } +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Poison Tests +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Creates a rwlock that is immediately poisoned. fn new_poisoned_rwlock<T>(value: T) -> RwLock<T> { let lock = RwLock::new(value); @@ -301,30 +524,6 @@ fn new_poisoned_rwlock<T>(value: T) -> RwLock<T> { } #[test] -fn test_into_inner() { - let m = RwLock::new(NonCopy(10)); - assert_eq!(m.into_inner().unwrap(), NonCopy(10)); -} - -#[test] -fn test_into_inner_drop() { - struct Foo(Arc<AtomicUsize>); - impl Drop for Foo { - fn drop(&mut self) { - self.0.fetch_add(1, Ordering::SeqCst); - } - } - let num_drops = Arc::new(AtomicUsize::new(0)); - let m = RwLock::new(Foo(num_drops.clone())); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - { - let _inner = m.into_inner().unwrap(); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - } - assert_eq!(num_drops.load(Ordering::SeqCst), 1); -} - -#[test] #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] fn test_into_inner_poison() { let m = new_poisoned_rwlock(NonCopy(10)); @@ -336,15 +535,11 @@ fn test_into_inner_poison() { } #[test] -fn test_get_cloned() { - let m = RwLock::new(Cloneable(10)); - - assert_eq!(m.get_cloned().unwrap(), Cloneable(10)); -} - -#[test] #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] fn test_get_cloned_poison() { + #[derive(Clone, Eq, PartialEq, Debug)] + struct Cloneable(i32); + let m = new_poisoned_rwlock(Cloneable(10)); match m.get_cloned() { @@ -354,13 +549,6 @@ fn test_get_cloned_poison() { } #[test] -fn test_get_mut() { - let mut m = RwLock::new(NonCopy(10)); - *m.get_mut().unwrap() = NonCopy(20); - assert_eq!(m.into_inner().unwrap(), NonCopy(20)); -} - -#[test] #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] fn test_get_mut_poison() { let mut m = new_poisoned_rwlock(NonCopy(10)); @@ -372,23 +560,6 @@ fn test_get_mut_poison() { } #[test] -fn test_set() { - fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T) - where - T: Debug + Eq, - { - let m = RwLock::new(init()); - - assert_eq!(*m.read().unwrap(), init()); - m.set(value()).unwrap(); - assert_eq!(*m.read().unwrap(), value()); - } - - inner(|| NonCopy(10), || NonCopy(20)); - inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20)); -} - -#[test] #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] fn test_set_poison() { fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T) @@ -411,23 +582,6 @@ fn test_set_poison() { } #[test] -fn test_replace() { - fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T) - where - T: Debug + Eq, - { - let m = RwLock::new(init()); - - assert_eq!(*m.read().unwrap(), init()); - assert_eq!(m.replace(value()).unwrap(), init()); - assert_eq!(*m.read().unwrap(), value()); - } - - inner(|| NonCopy(10), || NonCopy(20)); - inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20)); -} - -#[test] #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] fn test_replace_poison() { fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T) @@ -450,49 +604,118 @@ fn test_replace_poison() { } #[test] -fn test_read_guard_covariance() { - fn do_stuff<'a>(_: RwLockReadGuard<'_, &'a i32>, _: &'a i32) {} - let j: i32 = 5; - let lock = RwLock::new(&j); - { - let i = 6; - do_stuff(lock.read().unwrap(), &i); - } - drop(lock); +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +fn test_rw_arc_poison_wr() { + let arc = Arc::new(RwLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.write().unwrap(); + panic!(); + }) + .join(); + assert!(arc.read().is_err()); } #[test] -fn test_mapped_read_guard_covariance() { - fn do_stuff<'a>(_: MappedRwLockReadGuard<'_, &'a i32>, _: &'a i32) {} - let j: i32 = 5; - let lock = RwLock::new((&j, &j)); - { - let i = 6; - let guard = lock.read().unwrap(); - let guard = RwLockReadGuard::map(guard, |(val, _val)| val); - do_stuff(guard, &i); - } - drop(lock); +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +fn test_rw_arc_poison_mapped_w_r() { + let arc = Arc::new(RwLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let lock = arc2.write().unwrap(); + let _lock = RwLockWriteGuard::map(lock, |val| val); + panic!(); + }) + .join(); + assert!(arc.read().is_err()); } #[test] -fn test_mapping_mapped_guard() { - let arr = [0; 4]; - let mut lock = RwLock::new(arr); - let guard = lock.write().unwrap(); - let guard = RwLockWriteGuard::map(guard, |arr| &mut arr[..2]); - let mut guard = MappedRwLockWriteGuard::map(guard, |slice| &mut slice[1..]); - assert_eq!(guard.len(), 1); - guard[0] = 42; - drop(guard); - assert_eq!(*lock.get_mut().unwrap(), [0, 42, 0, 0]); - - let guard = lock.read().unwrap(); - let guard = RwLockReadGuard::map(guard, |arr| &arr[..2]); - let guard = MappedRwLockReadGuard::map(guard, |slice| &slice[1..]); - assert_eq!(*guard, [42]); - drop(guard); - assert_eq!(*lock.get_mut().unwrap(), [0, 42, 0, 0]); +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +fn test_rw_arc_poison_ww() { + let arc = Arc::new(RwLock::new(1)); + assert!(!arc.is_poisoned()); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.write().unwrap(); + panic!(); + }) + .join(); + assert!(arc.write().is_err()); + assert!(arc.is_poisoned()); +} + +#[test] +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +fn test_rw_arc_poison_mapped_w_w() { + let arc = Arc::new(RwLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let lock = arc2.write().unwrap(); + let _lock = RwLockWriteGuard::map(lock, |val| val); + panic!(); + }) + .join(); + assert!(arc.write().is_err()); + assert!(arc.is_poisoned()); +} + +#[test] +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +fn test_rw_arc_no_poison_rr() { + let arc = Arc::new(RwLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.read().unwrap(); + panic!(); + }) + .join(); + let lock = arc.read().unwrap(); + assert_eq!(*lock, 1); +} + +#[test] +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +fn test_rw_arc_no_poison_mapped_r_r() { + let arc = Arc::new(RwLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let lock = arc2.read().unwrap(); + let _lock = RwLockReadGuard::map(lock, |val| val); + panic!(); + }) + .join(); + let lock = arc.read().unwrap(); + assert_eq!(*lock, 1); +} + +#[test] +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +fn test_rw_arc_no_poison_rw() { + let arc = Arc::new(RwLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let _lock = arc2.read().unwrap(); + panic!() + }) + .join(); + let lock = arc.write().unwrap(); + assert_eq!(*lock, 1); +} + +#[test] +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +fn test_rw_arc_no_poison_mapped_r_w() { + let arc = Arc::new(RwLock::new(1)); + let arc2 = arc.clone(); + let _: Result<(), _> = thread::spawn(move || { + let lock = arc2.read().unwrap(); + let _lock = RwLockReadGuard::map(lock, |val| val); + panic!(); + }) + .join(); + let lock = arc.write().unwrap(); + assert_eq!(*lock, 1); } #[test] @@ -638,114 +861,3 @@ fn panic_while_mapping_write_unlocked_poison() { drop(lock); } - -#[test] -fn test_downgrade_basic() { - let r = RwLock::new(()); - - let write_guard = r.write().unwrap(); - let _read_guard = RwLockWriteGuard::downgrade(write_guard); -} - -#[test] -// FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue. -// See <https://github.com/rust-lang/rust/issues/121950> for details. -#[cfg_attr(all(miri, target_os = "macos"), ignore)] -fn test_downgrade_observe() { - // Taken from the test `test_rwlock_downgrade` from: - // https://github.com/Amanieu/parking_lot/blob/master/src/rwlock.rs - - const W: usize = 20; - const N: usize = if cfg!(miri) { 40 } else { 100 }; - - // This test spawns `W` writer threads, where each will increment a counter `N` times, ensuring - // that the value they wrote has not changed after downgrading. - - let rw = Arc::new(RwLock::new(0)); - - // Spawn the writers that will do `W * N` operations and checks. - let handles: Vec<_> = (0..W) - .map(|_| { - let rw = rw.clone(); - thread::spawn(move || { - for _ in 0..N { - // Increment the counter. - let mut write_guard = rw.write().unwrap(); - *write_guard += 1; - let cur_val = *write_guard; - - // Downgrade the lock to read mode, where the value protected cannot be modified. - let read_guard = RwLockWriteGuard::downgrade(write_guard); - assert_eq!(cur_val, *read_guard); - } - }) - }) - .collect(); - - for handle in handles { - handle.join().unwrap(); - } - - assert_eq!(*rw.read().unwrap(), W * N); -} - -#[test] -// FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue. -// See <https://github.com/rust-lang/rust/issues/121950> for details. -#[cfg_attr(all(miri, target_os = "macos"), ignore)] -fn test_downgrade_atomic() { - const NEW_VALUE: i32 = -1; - - // This test checks that `downgrade` is atomic, meaning as soon as a write lock has been - // downgraded, the lock must be in read mode and no other threads can take the write lock to - // modify the protected value. - - // `W` is the number of evil writer threads. - const W: usize = 20; - let rwlock = Arc::new(RwLock::new(0)); - - // Spawns many evil writer threads that will try and write to the locked value before the - // initial writer (who has the exclusive lock) can read after it downgrades. - // If the `RwLock` behaves correctly, then the initial writer should read the value it wrote - // itself as no other thread should be able to mutate the protected value. - - // Put the lock in write mode, causing all future threads trying to access this go to sleep. - let mut main_write_guard = rwlock.write().unwrap(); - - // Spawn all of the evil writer threads. They will each increment the protected value by 1. - let handles: Vec<_> = (0..W) - .map(|_| { - let rwlock = rwlock.clone(); - thread::spawn(move || { - // Will go to sleep since the main thread initially has the write lock. - let mut evil_guard = rwlock.write().unwrap(); - *evil_guard += 1; - }) - }) - .collect(); - - // Wait for a good amount of time so that evil threads go to sleep. - // Note: this is not strictly necessary... - let eternity = std::time::Duration::from_millis(42); - thread::sleep(eternity); - - // Once everyone is asleep, set the value to `NEW_VALUE`. - *main_write_guard = NEW_VALUE; - - // Atomically downgrade the write guard into a read guard. - let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard); - - // If the above is not atomic, then it would be possible for an evil thread to get in front of - // this read and change the value to be non-negative. - assert_eq!(*main_read_guard, NEW_VALUE, "`downgrade` was not atomic"); - - // Drop the main read guard and allow the evil writer threads to start incrementing. - drop(main_read_guard); - - for handle in handles { - handle.join().unwrap(); - } - - let final_check = rwlock.read().unwrap(); - assert_eq!(*final_check, W as i32 + NEW_VALUE); -} diff --git a/library/std_detect/Cargo.toml b/library/std_detect/Cargo.toml index 33e6617c381..2739bb59230 100644 --- a/library/std_detect/Cargo.toml +++ b/library/std_detect/Cargo.toml @@ -21,8 +21,8 @@ is-it-maintained-open-issues = { repository = "rust-lang/stdarch" } maintenance = { status = "experimental" } [dependencies] -core = { path = "../core" } -alloc = { path = "../alloc" } +core = { version = "1.0.0", package = 'rustc-std-workspace-core' } +alloc = { version = "1.0.0", package = 'rustc-std-workspace-alloc' } [target.'cfg(not(windows))'.dependencies] libc = { version = "0.2.0", optional = true, default-features = false } diff --git a/library/std_detect/src/detect/arch/loongarch.rs b/library/std_detect/src/detect/arch/loongarch.rs index 68fc600fa8e..d5a442fbbb8 100644 --- a/library/std_detect/src/detect/arch/loongarch.rs +++ b/library/std_detect/src/detect/arch/loongarch.rs @@ -8,6 +8,7 @@ features! { /// Checks if `loongarch` feature is enabled. /// Supported arguments are: /// + /// * `"32s"` /// * `"f"` /// * `"d"` /// * `"frecipe"` @@ -22,6 +23,8 @@ features! { /// * `"lvz"` /// * `"ual"` #[stable(feature = "stdarch_loongarch_feature", since = "1.89.0")] + @FEATURE: #[unstable(feature = "stdarch_loongarch_feature_detection", issue = "117425")] _32s: "32s"; + /// 32S @FEATURE: #[stable(feature = "stdarch_loongarch_feature", since = "1.89.0")] f: "f"; /// F @FEATURE: #[stable(feature = "stdarch_loongarch_feature", since = "1.89.0")] d: "d"; diff --git a/library/std_detect/src/detect/arch/riscv.rs b/library/std_detect/src/detect/arch/riscv.rs index 1d21b1d4855..1e57d09edb1 100644 --- a/library/std_detect/src/detect/arch/riscv.rs +++ b/library/std_detect/src/detect/arch/riscv.rs @@ -37,90 +37,121 @@ features! { /// /// # Unprivileged Specification /// - /// The supported ratified RISC-V instruction sets are as follows: + /// The supported ratified RISC-V instruction sets are as follows (OS + /// columns denote runtime feature detection support with or without the + /// minimum supported version): /// - /// * RV32E: `"rv32e"` - /// * RV32I: `"rv32i"` - /// * RV64I: `"rv64i"` - /// * A: `"a"` - /// * Zaamo: `"zaamo"` - /// * Zalrsc: `"zalrsc"` - /// * B: `"b"` - /// * Zba: `"zba"` - /// * Zbb: `"zbb"` - /// * Zbs: `"zbs"` - /// * C: `"c"` - /// * Zca: `"zca"` - /// * Zcd: `"zcd"` (if D is enabled) - /// * Zcf: `"zcf"` (if F is enabled on RV32) - /// * D: `"d"` - /// * F: `"f"` - /// * M: `"m"` - /// * Q: `"q"` - /// * V: `"v"` - /// * Zve32x: `"zve32x"` - /// * Zve32f: `"zve32f"` - /// * Zve64x: `"zve64x"` - /// * Zve64f: `"zve64f"` - /// * Zve64d: `"zve64d"` - /// * Zicbom: `"zicbom"` - /// * Zicboz: `"zicboz"` - /// * Zicntr: `"zicntr"` - /// * Zicond: `"zicond"` - /// * Zicsr: `"zicsr"` - /// * Zifencei: `"zifencei"` - /// * Zihintntl: `"zihintntl"` - /// * Zihintpause: `"zihintpause"` - /// * Zihpm: `"zihpm"` - /// * Zimop: `"zimop"` - /// * Zabha: `"zabha"` - /// * Zacas: `"zacas"` - /// * Zawrs: `"zawrs"` - /// * Zfa: `"zfa"` - /// * Zfbfmin: `"zfbfmin"` - /// * Zfh: `"zfh"` - /// * Zfhmin: `"zfhmin"` - /// * Zfinx: `"zfinx"` - /// * Zdinx: `"zdinx"` - /// * Zhinx: `"zhinx"` - /// * Zhinxmin: `"zhinxmin"` - /// * Zcb: `"zcb"` - /// * Zcmop: `"zcmop"` - /// * Zbc: `"zbc"` - /// * Zbkb: `"zbkb"` - /// * Zbkc: `"zbkc"` - /// * Zbkx: `"zbkx"` - /// * Zk: `"zk"` - /// * Zkn: `"zkn"` - /// * Zknd: `"zknd"` - /// * Zkne: `"zkne"` - /// * Zknh: `"zknh"` - /// * Zkr: `"zkr"` - /// * Zks: `"zks"` - /// * Zksed: `"zksed"` - /// * Zksh: `"zksh"` - /// * Zkt: `"zkt"` - /// * Zvbb: `"zvbb"` - /// * Zvbc: `"zvbc"` - /// * Zvfbfmin: `"zvfbfmin"` - /// * Zvfbfwma: `"zvfbfwma"` - /// * Zvfh: `"zvfh"` - /// * Zvfhmin: `"zvfhmin"` - /// * Zvkb: `"zvkb"` - /// * Zvkg: `"zvkg"` - /// * Zvkn: `"zvkn"` - /// * Zvkned: `"zvkned"` - /// * Zvknha: `"zvknha"` - /// * Zvknhb: `"zvknhb"` - /// * Zvknc: `"zvknc"` - /// * Zvkng: `"zvkng"` - /// * Zvks: `"zvks"` - /// * Zvksed: `"zvksed"` - /// * Zvksh: `"zvksh"` - /// * Zvksc: `"zvksc"` - /// * Zvksg: `"zvksg"` - /// * Zvkt: `"zvkt"` - /// * Ztso: `"ztso"` + /// | Literal | Base | Linux | + /// |:---------- |:------- |:---------- | + /// | `"rv32e"` | RV32E | No | + /// | `"rv32i"` | RV32I | Yes [^ima] | + /// | `"rv64i"` | RV64I | Yes [^ima] | + /// + /// | Literal | Extension | Linux | + /// |:--------------- |:----------- |:------------------- | + /// | `"a"` | A | Yes [^ima] | + /// | `"b"` | B | 6.5 | + /// | `"c"` | C | Yes | + /// | `"d"` | D | Yes | + /// | `"f"` | F | Yes | + /// | `"m"` | M | Yes [^ima] | + /// | `"q"` | Q | No | + /// | `"v"` | V | 6.5 | + /// | `"zaamo"` | Zaamo | 6.15 [^ima] [^dep] | + /// | `"zabha"` | Zabha | 6.16 | + /// | `"zacas"` | Zacas | 6.8 | + /// | `"zalrsc"` | Zalrsc | 6.15 [^ima] [^dep] | + /// | `"zawrs"` | Zawrs | 6.11 | + /// | `"zba"` | Zba | 6.5 | + /// | `"zbb"` | Zbb | 6.5 | + /// | `"zbc"` | Zbc | 6.8 | + /// | `"zbkb"` | Zbkb | 6.8 | + /// | `"zbkc"` | Zbkc | 6.8 | + /// | `"zbkx"` | Zbkx | 6.8 | + /// | `"zbs"` | Zbs | 6.5 | + /// | `"zca"` | Zca | 6.11 [^dep] | + /// | `"zcb"` | Zcb | 6.11 | + /// | `"zcd"` | Zcd | 6.11 [^dep] | + /// | `"zcf"` | Zcf | 6.11 [^dep] | + /// | `"zcmop"` | Zcmop | 6.11 | + /// | `"zdinx"` | Zdinx | No | + /// | `"zfa"` | Zfa | 6.8 | + /// | `"zfbfmin"` | Zfbfmin | 6.15 | + /// | `"zfh"` | Zfh | 6.8 | + /// | `"zfhmin"` | Zfhmin | 6.8 | + /// | `"zfinx"` | Zfinx | No | + /// | `"zhinx"` | Zhinx | No | + /// | `"zhinxmin"` | Zhinxmin | No | + /// | `"zicbom"` | Zicbom | 6.15 | + /// | `"zicboz"` | Zicboz | 6.7 | + /// | `"zicntr"` | Zicntr | 6.15 [^ima] [^cntr] | + /// | `"zicond"` | Zicond | 6.8 | + /// | `"zicsr"` | Zicsr | No [^ima] [^dep] | + /// | `"zifencei"` | Zifencei | No [^ima] | + /// | `"zihintntl"` | Zihintntl | 6.8 | + /// | `"zihintpause"` | Zihintpause | 6.10 | + /// | `"zihpm"` | Zihpm | 6.15 [^cntr] | + /// | `"zimop"` | Zimop | 6.11 | + /// | `"zk"` | Zk | No [^zkr] | + /// | `"zkn"` | Zkn | 6.8 | + /// | `"zknd"` | Zknd | 6.8 | + /// | `"zkne"` | Zkne | 6.8 | + /// | `"zknh"` | Zknh | 6.8 | + /// | `"zkr"` | Zkr | No [^zkr] | + /// | `"zks"` | Zks | 6.8 | + /// | `"zksed"` | Zksed | 6.8 | + /// | `"zksh"` | Zksh | 6.8 | + /// | `"zkt"` | Zkt | 6.8 | + /// | `"ztso"` | Ztso | 6.8 | + /// | `"zvbb"` | Zvbb | 6.8 | + /// | `"zvbc"` | Zvbc | 6.8 | + /// | `"zve32f"` | Zve32f | 6.11 [^dep] | + /// | `"zve32x"` | Zve32x | 6.11 [^dep] | + /// | `"zve64d"` | Zve64d | 6.11 [^dep] | + /// | `"zve64f"` | Zve64f | 6.11 [^dep] | + /// | `"zve64x"` | Zve64x | 6.11 [^dep] | + /// | `"zvfbfmin"` | Zvfbfmin | 6.15 | + /// | `"zvfbfwma"` | Zvfbfwma | 6.15 | + /// | `"zvfh"` | Zvfh | 6.8 | + /// | `"zvfhmin"` | Zvfhmin | 6.8 | + /// | `"zvkb"` | Zvkb | 6.8 | + /// | `"zvkg"` | Zvkg | 6.8 | + /// | `"zvkn"` | Zvkn | 6.8 | + /// | `"zvknc"` | Zvknc | 6.8 | + /// | `"zvkned"` | Zvkned | 6.8 | + /// | `"zvkng"` | Zvkng | 6.8 | + /// | `"zvknha"` | Zvknha | 6.8 | + /// | `"zvknhb"` | Zvknhb | 6.8 | + /// | `"zvks"` | Zvks | 6.8 | + /// | `"zvksc"` | Zvksc | 6.8 | + /// | `"zvksed"` | Zvksed | 6.8 | + /// | `"zvksg"` | Zvksg | 6.8 | + /// | `"zvksh"` | Zvksh | 6.8 | + /// | `"zvkt"` | Zvkt | 6.8 | + /// + /// [^ima]: Or enabled when the IMA base behavior is detected on the Linux + /// kernel version 6.4 or later (for bases, the only matching one -- either + /// `"rv32i"` or `"rv64i"` -- is enabled). + /// + /// [^cntr]: Even if this extension is available, it does not necessarily + /// mean all performance counters are accessible. + /// For example, accesses to all performance counters except `time` + /// (wall-clock) are blocked by default on the Linux kernel + /// version 6.6 or later. + /// Also beware that, even if performance counters like `cycle` and + /// `instret` are accessible, their value can be unreliable (e.g. returning + /// the constant value) under certain circumstances. + /// + /// [^dep]: Or enabled as a dependency of another extension (a superset) + /// even if runtime detection of this feature itself is not supported (as + /// long as the runtime detection of the superset is supported). + /// + /// [^zkr]: Linux does not report existence of this extension even if + /// supported by the hardware mainly because the `seed` CSR on the Zkr + /// extension (which provides hardware-based randomness) is normally + /// inaccessible from the user mode. + /// For the Zk extension features except this CSR, check existence of both + /// `"zkn"` and `"zkt"` features instead. /// /// There's also bases and extensions marked as standard instruction set, /// but they are in frozen or draft state. These instruction sets are also diff --git a/library/std_detect/src/detect/arch/x86.rs b/library/std_detect/src/detect/arch/x86.rs index 28b3e3cfb35..bd749b88f56 100644 --- a/library/std_detect/src/detect/arch/x86.rs +++ b/library/std_detect/src/detect/arch/x86.rs @@ -233,6 +233,12 @@ features! { /// AMX-TF32 (TensorFloat32 Operations) @FEATURE: #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] amx_transpose: "amx-transpose"; /// AMX-TRANSPOSE (Matrix Transpose Operations) + @FEATURE: #[unstable(feature = "apx_target_feature", issue = "139284")] apxf: "apxf"; + /// APX-F (Advanced Performance Extensions - Foundation) + @FEATURE: #[unstable(feature = "avx10_target_feature", issue = "138843")] avx10_1: "avx10.1"; + /// AVX10.1 + @FEATURE: #[unstable(feature = "avx10_target_feature", issue = "138843")] avx10_2: "avx10.2"; + /// AVX10.2 @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] f16c: "f16c"; /// F16C (Conversions between IEEE-754 `binary16` and `binary32` formats) @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] fma: "fma"; diff --git a/library/std_detect/src/detect/os/linux/loongarch.rs b/library/std_detect/src/detect/os/linux/loongarch.rs index e97fda11d08..74415266f8b 100644 --- a/library/std_detect/src/detect/os/linux/loongarch.rs +++ b/library/std_detect/src/detect/os/linux/loongarch.rs @@ -17,22 +17,21 @@ pub(crate) fn detect_features() -> cache::Initializer { // The values are part of the platform-specific [cpucfg] // // [cpucfg]: LoongArch Reference Manual Volume 1: Basic Architecture v1.1 + let cpucfg1: usize; let cpucfg2: usize; - unsafe { - asm!( - "cpucfg {}, {}", - out(reg) cpucfg2, in(reg) 2, - options(pure, nomem, preserves_flags, nostack) - ); - } let cpucfg3: usize; unsafe { asm!( "cpucfg {}, {}", + "cpucfg {}, {}", + "cpucfg {}, {}", + out(reg) cpucfg1, in(reg) 1, + out(reg) cpucfg2, in(reg) 2, out(reg) cpucfg3, in(reg) 3, options(pure, nomem, preserves_flags, nostack) ); } + enable_feature(&mut value, Feature::_32s, bit::test(cpucfg1, 0) || bit::test(cpucfg1, 1)); enable_feature(&mut value, Feature::frecipe, bit::test(cpucfg2, 25)); enable_feature(&mut value, Feature::div32, bit::test(cpucfg2, 26)); enable_feature(&mut value, Feature::lam_bh, bit::test(cpucfg2, 27)); diff --git a/library/std_detect/src/detect/os/riscv.rs b/library/std_detect/src/detect/os/riscv.rs index c6acbd3525b..9b9e0cba09d 100644 --- a/library/std_detect/src/detect/os/riscv.rs +++ b/library/std_detect/src/detect/os/riscv.rs @@ -119,11 +119,31 @@ pub(crate) fn imply_features(mut value: cache::Initializer) -> cache::Initialize imply!(d | zfhmin | zfa => f); imply!(zfbfmin => f); // and some of (not all) "Zfh" instructions. - // Relatively complex implication rules from the "C" extension. + // Relatively complex implication rules around the "C" extension. + // (from "C" and some others) imply!(c => zca); imply!(c & d => zcd); #[cfg(target_arch = "riscv32")] imply!(c & f => zcf); + // (to "C"; defined as superset) + cfg_select! { + target_arch = "riscv32" => { + if value.test(Feature::d as u32) { + imply!(zcf & zcd => c); + } else if value.test(Feature::f as u32) { + imply!(zcf => c); + } else { + imply!(zca => c); + } + } + _ => { + if value.test(Feature::d as u32) { + imply!(zcd => c); + } else { + imply!(zca => c); + } + } + } imply!(zicntr | zihpm | f | zfinx | zve32x => zicsr); diff --git a/library/std_detect/src/detect/os/x86.rs b/library/std_detect/src/detect/os/x86.rs index 20f848ab05c..cf11d833312 100644 --- a/library/std_detect/src/detect/os/x86.rs +++ b/library/std_detect/src/detect/os/x86.rs @@ -137,6 +137,32 @@ pub(crate) fn detect_features() -> cache::Initializer { enable(ebx, 2, Feature::widekl); } + // This detects ABM on AMD CPUs and LZCNT on Intel CPUs. + // On intel CPUs with popcnt, lzcnt implements the + // "missing part" of ABM, so we map both to the same + // internal feature. + // + // The `is_x86_feature_detected!("lzcnt")` macro then + // internally maps to Feature::abm. + enable(extended_proc_info_ecx, 5, Feature::lzcnt); + + // As Hygon Dhyana originates from AMD technology and shares most of the architecture with + // AMD's family 17h, but with different CPU Vendor ID("HygonGenuine")/Family series + // number(Family 18h). + // + // For CPUID feature bits, Hygon Dhyana(family 18h) share the same definition with AMD + // family 17h. + // + // Related AMD CPUID specification is https://www.amd.com/system/files/TechDocs/25481.pdf. + // Related Hygon kernel patch can be found on + // http://lkml.kernel.org/r/5ce86123a7b9dad925ac583d88d2f921040e859b.1538583282.git.puwen@hygon.cn + if vendor_id == *b"AuthenticAMD" || vendor_id == *b"HygonGenuine" { + // These features are available on AMD arch CPUs: + enable(extended_proc_info_ecx, 6, Feature::sse4a); + enable(extended_proc_info_ecx, 21, Feature::tbm); + enable(extended_proc_info_ecx, 11, Feature::xop); + } + // `XSAVE` and `AVX` support: let cpu_xsave = bit::test(proc_info_ecx as usize, 26); if cpu_xsave { @@ -161,6 +187,7 @@ pub(crate) fn detect_features() -> cache::Initializer { // * AVX -> `XCR0.AVX[2]` // * AVX-512 -> `XCR0.AVX-512[7:5]`. // * AMX -> `XCR0.AMX[18:17]` + // * APX -> `XCR0.APX[19]` // // by setting the corresponding bits of `XCR0` to `1`. // @@ -173,6 +200,8 @@ pub(crate) fn detect_features() -> cache::Initializer { let os_avx512_support = xcr0 & 0xe0 == 0xe0; // Test `XCR0.AMX[18:17]` with the mask `0b110_0000_0000_0000_0000 == 0x60000` let os_amx_support = xcr0 & 0x60000 == 0x60000; + // Test `XCR0.APX[19]` with the mask `0b1000_0000_0000_0000_0000 == 0x80000` + let os_apx_support = xcr0 & 0x80000 == 0x80000; // Only if the OS and the CPU support saving/restoring the AVX // registers we enable `xsave` support: @@ -262,33 +291,20 @@ pub(crate) fn detect_features() -> cache::Initializer { enable(amx_feature_flags_eax, 8, Feature::amx_movrs); } } - } - } - // This detects ABM on AMD CPUs and LZCNT on Intel CPUs. - // On intel CPUs with popcnt, lzcnt implements the - // "missing part" of ABM, so we map both to the same - // internal feature. - // - // The `is_x86_feature_detected!("lzcnt")` macro then - // internally maps to Feature::abm. - enable(extended_proc_info_ecx, 5, Feature::lzcnt); + if os_apx_support { + enable(extended_features_edx_leaf_1, 21, Feature::apxf); + } - // As Hygon Dhyana originates from AMD technology and shares most of the architecture with - // AMD's family 17h, but with different CPU Vendor ID("HygonGenuine")/Family series - // number(Family 18h). - // - // For CPUID feature bits, Hygon Dhyana(family 18h) share the same definition with AMD - // family 17h. - // - // Related AMD CPUID specification is https://www.amd.com/system/files/TechDocs/25481.pdf. - // Related Hygon kernel patch can be found on - // http://lkml.kernel.org/r/5ce86123a7b9dad925ac583d88d2f921040e859b.1538583282.git.puwen@hygon.cn - if vendor_id == *b"AuthenticAMD" || vendor_id == *b"HygonGenuine" { - // These features are available on AMD arch CPUs: - enable(extended_proc_info_ecx, 6, Feature::sse4a); - enable(extended_proc_info_ecx, 21, Feature::tbm); - enable(extended_proc_info_ecx, 11, Feature::xop); + let avx10_1 = enable(extended_features_edx_leaf_1, 19, Feature::avx10_1); + if avx10_1 { + let CpuidResult { ebx, .. } = unsafe { __cpuid(0x24) }; + let avx10_version = ebx & 0xff; + if avx10_version >= 2 { + value.set(Feature::avx10_2 as u32); + } + } + } } } diff --git a/library/std_detect/tests/macro_trailing_commas.rs b/library/std_detect/tests/macro_trailing_commas.rs index 2fee0abdd57..6072ddf5ac4 100644 --- a/library/std_detect/tests/macro_trailing_commas.rs +++ b/library/std_detect/tests/macro_trailing_commas.rs @@ -69,6 +69,8 @@ fn aarch64() { #[test] #[cfg(any(target_arch = "loongarch32", target_arch = "loongarch64"))] fn loongarch() { + let _ = is_loongarch_feature_detected!("32s"); + let _ = is_loongarch_feature_detected!("32s",); let _ = is_loongarch_feature_detected!("lsx"); let _ = is_loongarch_feature_detected!("lsx",); } |
