diff options
| author | bors <bors@rust-lang.org> | 2020-11-02 10:42:45 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2020-11-02 10:42:45 +0000 |
| commit | 4051473c8b5158984a5253d1b5faad6a94de7682 (patch) | |
| tree | a3dcca4b2e5d8b9a5b0a3d058010826b5901d30f /library | |
| parent | 234099d1d12bef9d6e81a296222fbc272dc51d89 (diff) | |
| parent | 50d7716efb7cffb43a0ca77c723754ad2174e9cc (diff) | |
| download | rust-4051473c8b5158984a5253d1b5faad6a94de7682.tar.gz rust-4051473c8b5158984a5253d1b5faad6a94de7682.zip | |
Auto merge of #78661 - JohnTitor:rollup-er2isja, r=JohnTitor
Rollup of 5 pull requests Successful merges: - #78606 (Clarify handling of final line ending in str::lines()) - #78610 (Do not remove tokens before AST json serialization) - #78620 (Trivial fixes to bitwise operator documentation) - #78627 (Point out that total_cmp is no strict superset of partial comparison) - #78637 (Add fetch_update methods to AtomicBool and AtomicPtr) Failed merges: r? `@ghost`
Diffstat (limited to 'library')
| -rw-r--r-- | library/core/src/num/f32.rs | 4 | ||||
| -rw-r--r-- | library/core/src/num/f64.rs | 4 | ||||
| -rw-r--r-- | library/core/src/ops/bit.rs | 35 | ||||
| -rw-r--r-- | library/core/src/str/mod.rs | 4 | ||||
| -rw-r--r-- | library/core/src/sync/atomic.rs | 125 |
5 files changed, 159 insertions, 13 deletions
diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs index bf7c87f685d..86e6352d132 100644 --- a/library/core/src/num/f32.rs +++ b/library/core/src/num/f32.rs @@ -876,6 +876,10 @@ impl f32 { /// - Positive signaling NaN /// - Positive quiet NaN /// + /// Note that this function does not always agree with the [`PartialOrd`] + /// and [`PartialEq`] implementations of `f32`. In particular, they regard + /// negative and positive zero as equal, while `total_cmp` doesn't. + /// /// # Example /// ``` /// #![feature(total_cmp)] diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs index e31e176ba1b..9b1405b479f 100644 --- a/library/core/src/num/f64.rs +++ b/library/core/src/num/f64.rs @@ -890,6 +890,10 @@ impl f64 { /// - Positive signaling NaN /// - Positive quiet NaN /// + /// Note that this function does not always agree with the [`PartialOrd`] + /// and [`PartialEq`] implementations of `f64`. In particular, they regard + /// negative and positive zero as equal, while `total_cmp` doesn't. + /// /// # Example /// ``` /// #![feature(total_cmp)] diff --git a/library/core/src/ops/bit.rs b/library/core/src/ops/bit.rs index 6120da50c3c..51f80438173 100644 --- a/library/core/src/ops/bit.rs +++ b/library/core/src/ops/bit.rs @@ -109,10 +109,12 @@ not_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// fn bitand(self, Self(rhs): Self) -> Self::Output { /// let Self(lhs) = self; /// assert_eq!(lhs.len(), rhs.len()); -/// Self(lhs.iter() +/// Self( +/// lhs.iter() /// .zip(rhs.iter()) -/// .map(|(x, y)| *x && *y) -/// .collect()) +/// .map(|(x, y)| *x & *y) +/// .collect() +/// ) /// } /// } /// @@ -207,7 +209,12 @@ bitand_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// fn bitor(self, Self(rhs): Self) -> Self::Output { /// let Self(lhs) = self; /// assert_eq!(lhs.len(), rhs.len()); -/// Self(lhs.iter().zip(rhs.iter()).map(|(x, y)| *x || *y).collect()) +/// Self( +/// lhs.iter() +/// .zip(rhs.iter()) +/// .map(|(x, y)| *x | *y) +/// .collect() +/// ) /// } /// } /// @@ -302,10 +309,12 @@ bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// fn bitxor(self, Self(rhs): Self) -> Self::Output { /// let Self(lhs) = self; /// assert_eq!(lhs.len(), rhs.len()); -/// Self(lhs.iter() +/// Self( +/// lhs.iter() /// .zip(rhs.iter()) -/// .map(|(x, y)| (*x || *y) && !(*x && *y)) -/// .collect()) +/// .map(|(x, y)| *x ^ *y) +/// .collect() +/// ) /// } /// } /// @@ -643,11 +652,13 @@ shr_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize } /// // `rhs` is the "right-hand side" of the expression `a &= b`. /// fn bitand_assign(&mut self, rhs: Self) { /// assert_eq!(self.0.len(), rhs.0.len()); -/// *self = Self(self.0 -/// .iter() -/// .zip(rhs.0.iter()) -/// .map(|(x, y)| *x && *y) -/// .collect()); +/// *self = Self( +/// self.0 +/// .iter() +/// .zip(rhs.0.iter()) +/// .map(|(x, y)| *x & *y) +/// .collect() +/// ); /// } /// } /// diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs index ab0c8739330..23d63a4787e 100644 --- a/library/core/src/str/mod.rs +++ b/library/core/src/str/mod.rs @@ -842,7 +842,9 @@ impl str { /// Lines are ended with either a newline (`\n`) or a carriage return with /// a line feed (`\r\n`). /// - /// The final line ending is optional. + /// The final line ending is optional. A string that ends with a final line + /// ending will return the same lines as an otherwise identical string + /// without a final line ending. /// /// # Examples /// diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index a3d93d7074b..0c53b6ed54a 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -801,6 +801,64 @@ impl AtomicBool { pub fn as_mut_ptr(&self) -> *mut bool { self.v.get() as *mut bool } + + /// Fetches the value, and applies a function to it that returns an optional + /// new value. Returns a `Result` of `Ok(previous_value)` if the function + /// returned `Some(_)`, else `Err(previous_value)`. + /// + /// Note: This may call the function multiple times if the value has been + /// changed from other threads in the meantime, as long as the function + /// returns `Some(_)`, but the function will have been applied only once to + /// the stored value. + /// + /// `fetch_update` takes two [`Ordering`] arguments to describe the memory + /// ordering of this operation. The first describes the required ordering for + /// when the operation finally succeeds while the second describes the + /// required ordering for loads. These correspond to the success and failure + /// orderings of [`AtomicBool::compare_exchange`] respectively. + /// + /// Using [`Acquire`] as success ordering makes the store part of this + /// operation [`Relaxed`], and using [`Release`] makes the final successful + /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], + /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the + /// success ordering. + /// + /// **Note:** This method is only available on platforms that support atomic + /// operations on `u8`. + /// + /// # Examples + /// + /// ```rust + /// #![feature(atomic_fetch_update)] + /// use std::sync::atomic::{AtomicBool, Ordering}; + /// + /// let x = AtomicBool::new(false); + /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false)); + /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false)); + /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true)); + /// assert_eq!(x.load(Ordering::SeqCst), false); + /// ``` + #[inline] + #[unstable(feature = "atomic_fetch_update", reason = "recently added", issue = "78639")] + #[cfg(target_has_atomic = "8")] + pub fn fetch_update<F>( + &self, + set_order: Ordering, + fetch_order: Ordering, + mut f: F, + ) -> Result<bool, bool> + where + F: FnMut(bool) -> Option<bool>, + { + let mut prev = self.load(fetch_order); + while let Some(next) = f(prev) { + match self.compare_exchange_weak(prev, next, set_order, fetch_order) { + x @ Ok(_) => return x, + Err(next_prev) => prev = next_prev, + } + } + Err(prev) + } } #[cfg(target_has_atomic_load_store = "ptr")] @@ -1123,6 +1181,73 @@ impl<T> AtomicPtr<T> { } } } + + /// Fetches the value, and applies a function to it that returns an optional + /// new value. Returns a `Result` of `Ok(previous_value)` if the function + /// returned `Some(_)`, else `Err(previous_value)`. + /// + /// Note: This may call the function multiple times if the value has been + /// changed from other threads in the meantime, as long as the function + /// returns `Some(_)`, but the function will have been applied only once to + /// the stored value. + /// + /// `fetch_update` takes two [`Ordering`] arguments to describe the memory + /// ordering of this operation. The first describes the required ordering for + /// when the operation finally succeeds while the second describes the + /// required ordering for loads. These correspond to the success and failure + /// orderings of [`AtomicPtr::compare_exchange`] respectively. + /// + /// Using [`Acquire`] as success ordering makes the store part of this + /// operation [`Relaxed`], and using [`Release`] makes the final successful + /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], + /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the + /// success ordering. + /// + /// **Note:** This method is only available on platforms that support atomic + /// operations on pointers. + /// + /// # Examples + /// + /// ```rust + /// #![feature(atomic_fetch_update)] + /// use std::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let ptr: *mut _ = &mut 5; + /// let some_ptr = AtomicPtr::new(ptr); + /// + /// let new: *mut _ = &mut 10; + /// assert_eq!(some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr)); + /// let result = some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| { + /// if x == ptr { + /// Some(new) + /// } else { + /// None + /// } + /// }); + /// assert_eq!(result, Ok(ptr)); + /// assert_eq!(some_ptr.load(Ordering::SeqCst), new); + /// ``` + #[inline] + #[unstable(feature = "atomic_fetch_update", reason = "recently added", issue = "78639")] + #[cfg(target_has_atomic = "ptr")] + pub fn fetch_update<F>( + &self, + set_order: Ordering, + fetch_order: Ordering, + mut f: F, + ) -> Result<*mut T, *mut T> + where + F: FnMut(*mut T) -> Option<*mut T>, + { + let mut prev = self.load(fetch_order); + while let Some(next) = f(prev) { + match self.compare_exchange_weak(prev, next, set_order, fetch_order) { + x @ Ok(_) => return x, + Err(next_prev) => prev = next_prev, + } + } + Err(prev) + } } #[cfg(target_has_atomic_load_store = "8")] |
