diff options
Diffstat (limited to 'library')
158 files changed, 3309 insertions, 1673 deletions
diff --git a/library/alloc/benches/vec_deque.rs b/library/alloc/benches/vec_deque.rs index 7c78561ebf1..313a97ed1ff 100644 --- a/library/alloc/benches/vec_deque.rs +++ b/library/alloc/benches/vec_deque.rs @@ -1,4 +1,8 @@ -use std::collections::VecDeque; +use core::iter::Iterator; +use std::{ + collections::{vec_deque, VecDeque}, + mem, +}; use test::{black_box, Bencher}; #[bench] @@ -53,6 +57,146 @@ fn bench_try_fold(b: &mut Bencher) { b.iter(|| black_box(ring.iter().try_fold(0, |a, b| Some(a + b)))) } +/// does the memory bookkeeping to reuse the buffer of the Vec between iterations. +/// `setup` must not modify its argument's length or capacity. `g` must not move out of its argument. +fn into_iter_helper< + T: Copy, + F: FnOnce(&mut VecDeque<T>), + G: FnOnce(&mut vec_deque::IntoIter<T>), +>( + v: &mut Vec<T>, + setup: F, + g: G, +) { + let ptr = v.as_mut_ptr(); + let len = v.len(); + // ensure that the vec is full, to make sure that any wrapping from the deque doesn't + // access uninitialized memory. + assert_eq!(v.len(), v.capacity()); + + let mut deque = VecDeque::from(mem::take(v)); + setup(&mut deque); + + let mut it = deque.into_iter(); + g(&mut it); + + mem::forget(it); + + // SAFETY: the provided functions are not allowed to modify the allocation, so the buffer is still alive. + // len and capacity are accurate due to the above assertion. + // All the elements in the buffer are still valid, because of `T: Copy` which implies `T: !Drop`. + mem::forget(mem::replace(v, unsafe { Vec::from_raw_parts(ptr, len, len) })); +} + +#[bench] +fn bench_into_iter(b: &mut Bencher) { + let len = 1024; + // we reuse this allocation for every run + let mut vec: Vec<usize> = (0..len).collect(); + vec.shrink_to_fit(); + + b.iter(|| { + let mut sum = 0; + into_iter_helper( + &mut vec, + |_| {}, + |it| { + for i in it { + sum += i; + } + }, + ); + black_box(sum); + + let mut sum = 0; + // rotating a full deque doesn't move any memory. + into_iter_helper( + &mut vec, + |d| d.rotate_left(len / 2), + |it| { + for i in it { + sum += i; + } + }, + ); + black_box(sum); + }); +} + +#[bench] +fn bench_into_iter_fold(b: &mut Bencher) { + let len = 1024; + + // because `fold` takes ownership of the iterator, + // we can't prevent it from dropping the memory, + // so we have to bite the bullet and reallocate + // for every iteration. + b.iter(|| { + let deque: VecDeque<usize> = (0..len).collect(); + assert_eq!(deque.len(), deque.capacity()); + let sum = deque.into_iter().fold(0, |a, b| a + b); + black_box(sum); + + // rotating a full deque doesn't move any memory. + let mut deque: VecDeque<usize> = (0..len).collect(); + assert_eq!(deque.len(), deque.capacity()); + deque.rotate_left(len / 2); + let sum = deque.into_iter().fold(0, |a, b| a + b); + black_box(sum); + }); +} + +#[bench] +fn bench_into_iter_try_fold(b: &mut Bencher) { + let len = 1024; + // we reuse this allocation for every run + let mut vec: Vec<usize> = (0..len).collect(); + vec.shrink_to_fit(); + + // Iterator::any uses Iterator::try_fold under the hood + b.iter(|| { + let mut b = false; + into_iter_helper(&mut vec, |_| {}, |it| b = it.any(|i| i == len - 1)); + black_box(b); + + into_iter_helper(&mut vec, |d| d.rotate_left(len / 2), |it| b = it.any(|i| i == len - 1)); + black_box(b); + }); +} + +#[bench] +fn bench_into_iter_next_chunk(b: &mut Bencher) { + let len = 1024; + // we reuse this allocation for every run + let mut vec: Vec<usize> = (0..len).collect(); + vec.shrink_to_fit(); + + b.iter(|| { + let mut buf = [0; 64]; + into_iter_helper( + &mut vec, + |_| {}, + |it| { + while let Ok(a) = it.next_chunk() { + buf = a; + } + }, + ); + black_box(buf); + + into_iter_helper( + &mut vec, + |d| d.rotate_left(len / 2), + |it| { + while let Ok(a) = it.next_chunk() { + buf = a; + } + }, + ); + black_box(buf); + }); +} + #[bench] fn bench_from_array_1000(b: &mut Bencher) { const N: usize = 1000; diff --git a/library/alloc/src/collections/binary_heap/mod.rs b/library/alloc/src/collections/binary_heap/mod.rs index 0b73b1af4eb..f1d0a305d99 100644 --- a/library/alloc/src/collections/binary_heap/mod.rs +++ b/library/alloc/src/collections/binary_heap/mod.rs @@ -851,18 +851,30 @@ impl<T: Ord> BinaryHeap<T> { where F: FnMut(&T) -> bool, { - let mut first_removed = self.len(); + struct RebuildOnDrop<'a, T: Ord> { + heap: &'a mut BinaryHeap<T>, + first_removed: usize, + } + + let mut guard = RebuildOnDrop { first_removed: self.len(), heap: self }; + let mut i = 0; - self.data.retain(|e| { + guard.heap.data.retain(|e| { let keep = f(e); - if !keep && i < first_removed { - first_removed = i; + if !keep && i < guard.first_removed { + guard.first_removed = i; } i += 1; keep }); - // data[0..first_removed] is untouched, so we only need to rebuild the tail: - self.rebuild_tail(first_removed); + + impl<'a, T: Ord> Drop for RebuildOnDrop<'a, T> { + fn drop(&mut self) { + // data[..first_removed] is untouched, so we only need to + // rebuild the tail: + self.heap.rebuild_tail(self.first_removed); + } + } } } diff --git a/library/alloc/src/collections/binary_heap/tests.rs b/library/alloc/src/collections/binary_heap/tests.rs index ffbb6c80ac0..500caa35678 100644 --- a/library/alloc/src/collections/binary_heap/tests.rs +++ b/library/alloc/src/collections/binary_heap/tests.rs @@ -474,6 +474,25 @@ fn test_retain() { assert!(a.is_empty()); } +#[test] +fn test_retain_catch_unwind() { + let mut heap = BinaryHeap::from(vec![3, 1, 2]); + + // Removes the 3, then unwinds out of retain. + let _ = catch_unwind(AssertUnwindSafe(|| { + heap.retain(|e| { + if *e == 1 { + panic!(); + } + false + }); + })); + + // Naively this would be [1, 2] (an invalid heap) if BinaryHeap delegates to + // Vec's retain impl and then does not rebuild the heap after that unwinds. + assert_eq!(heap.into_vec(), [2, 1]); +} + // old binaryheap failed this test // // Integrity means that all elements are present after a comparison panics, diff --git a/library/alloc/src/collections/btree/borrow.rs b/library/alloc/src/collections/btree/borrow.rs index 016f139a501..000b9bd0fab 100644 --- a/library/alloc/src/collections/btree/borrow.rs +++ b/library/alloc/src/collections/btree/borrow.rs @@ -41,6 +41,28 @@ impl<'a, T> DormantMutRef<'a, T> { // SAFETY: our own safety conditions imply this reference is again unique. unsafe { &mut *self.ptr.as_ptr() } } + + /// Borrows a new mutable reference from the unique borrow initially captured. + /// + /// # Safety + /// + /// The reborrow must have ended, i.e., the reference returned by `new` and + /// all pointers and references derived from it, must not be used anymore. + pub unsafe fn reborrow(&mut self) -> &'a mut T { + // SAFETY: our own safety conditions imply this reference is again unique. + unsafe { &mut *self.ptr.as_ptr() } + } + + /// Borrows a new shared reference from the unique borrow initially captured. + /// + /// # Safety + /// + /// The reborrow must have ended, i.e., the reference returned by `new` and + /// all pointers and references derived from it, must not be used anymore. + pub unsafe fn reborrow_shared(&self) -> &'a T { + // SAFETY: our own safety conditions imply this reference is again unique. + unsafe { &*self.ptr.as_ptr() } + } } #[cfg(test)] diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs index 1d9c4460ec9..386cd1a1657 100644 --- a/library/alloc/src/collections/btree/map.rs +++ b/library/alloc/src/collections/btree/map.rs @@ -6,7 +6,7 @@ use core::hash::{Hash, Hasher}; use core::iter::{FromIterator, FusedIterator}; use core::marker::PhantomData; use core::mem::{self, ManuallyDrop}; -use core::ops::{Index, RangeBounds}; +use core::ops::{Bound, Index, RangeBounds}; use core::ptr; use crate::alloc::{Allocator, Global}; @@ -15,7 +15,7 @@ use super::borrow::DormantMutRef; use super::dedup_sorted_iter::DedupSortedIter; use super::navigate::{LazyLeafRange, LeafRange}; use super::node::{self, marker, ForceResult::*, Handle, NodeRef, Root}; -use super::search::SearchResult::*; +use super::search::{SearchBound, SearchResult::*}; use super::set_val::SetValZST; mod entry; @@ -2422,6 +2422,732 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { pub const fn is_empty(&self) -> bool { self.len() == 0 } + + /// Returns a [`Cursor`] pointing at the first element that is above the + /// given bound. + /// + /// If no such element exists then a cursor pointing at the "ghost" + /// non-element is returned. + /// + /// Passing [`Bound::Unbounded`] will return a cursor pointing at the first + /// element of the map. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(btree_cursors)] + /// + /// use std::collections::BTreeMap; + /// use std::ops::Bound; + /// + /// let mut a = BTreeMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// a.insert(3, "c"); + /// a.insert(4, "c"); + /// let cursor = a.lower_bound(Bound::Excluded(&2)); + /// assert_eq!(cursor.key(), Some(&3)); + /// ``` + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn lower_bound<Q>(&self, bound: Bound<&Q>) -> Cursor<'_, K, V> + where + K: Borrow<Q> + Ord, + Q: Ord, + { + let root_node = match self.root.as_ref() { + None => return Cursor { current: None, root: None }, + Some(root) => root.reborrow(), + }; + let edge = root_node.lower_bound(SearchBound::from_range(bound)); + Cursor { current: edge.next_kv().ok(), root: self.root.as_ref() } + } + + /// Returns a [`CursorMut`] pointing at the first element that is above the + /// given bound. + /// + /// If no such element exists then a cursor pointing at the "ghost" + /// non-element is returned. + /// + /// Passing [`Bound::Unbounded`] will return a cursor pointing at the first + /// element of the map. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(btree_cursors)] + /// + /// use std::collections::BTreeMap; + /// use std::ops::Bound; + /// + /// let mut a = BTreeMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// a.insert(3, "c"); + /// a.insert(4, "c"); + /// let cursor = a.lower_bound_mut(Bound::Excluded(&2)); + /// assert_eq!(cursor.key(), Some(&3)); + /// ``` + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn lower_bound_mut<Q>(&mut self, bound: Bound<&Q>) -> CursorMut<'_, K, V, A> + where + K: Borrow<Q> + Ord, + Q: Ord, + { + let (root, dormant_root) = DormantMutRef::new(&mut self.root); + let root_node = match root.as_mut() { + None => { + return CursorMut { + current: None, + root: dormant_root, + length: &mut self.length, + alloc: &mut *self.alloc, + }; + } + Some(root) => root.borrow_mut(), + }; + let edge = root_node.lower_bound(SearchBound::from_range(bound)); + CursorMut { + current: edge.next_kv().ok(), + root: dormant_root, + length: &mut self.length, + alloc: &mut *self.alloc, + } + } + + /// Returns a [`Cursor`] pointing at the last element that is below the + /// given bound. + /// + /// If no such element exists then a cursor pointing at the "ghost" + /// non-element is returned. + /// + /// Passing [`Bound::Unbounded`] will return a cursor pointing at the last + /// element of the map. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(btree_cursors)] + /// + /// use std::collections::BTreeMap; + /// use std::ops::Bound; + /// + /// let mut a = BTreeMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// a.insert(3, "c"); + /// a.insert(4, "c"); + /// let cursor = a.upper_bound(Bound::Excluded(&3)); + /// assert_eq!(cursor.key(), Some(&2)); + /// ``` + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn upper_bound<Q>(&self, bound: Bound<&Q>) -> Cursor<'_, K, V> + where + K: Borrow<Q> + Ord, + Q: Ord, + { + let root_node = match self.root.as_ref() { + None => return Cursor { current: None, root: None }, + Some(root) => root.reborrow(), + }; + let edge = root_node.upper_bound(SearchBound::from_range(bound)); + Cursor { current: edge.next_back_kv().ok(), root: self.root.as_ref() } + } + + /// Returns a [`CursorMut`] pointing at the last element that is below the + /// given bound. + /// + /// If no such element exists then a cursor pointing at the "ghost" + /// non-element is returned. + /// + /// Passing [`Bound::Unbounded`] will return a cursor pointing at the last + /// element of the map. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(btree_cursors)] + /// + /// use std::collections::BTreeMap; + /// use std::ops::Bound; + /// + /// let mut a = BTreeMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// a.insert(3, "c"); + /// a.insert(4, "c"); + /// let cursor = a.upper_bound_mut(Bound::Excluded(&3)); + /// assert_eq!(cursor.key(), Some(&2)); + /// ``` + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn upper_bound_mut<Q>(&mut self, bound: Bound<&Q>) -> CursorMut<'_, K, V, A> + where + K: Borrow<Q> + Ord, + Q: Ord, + { + let (root, dormant_root) = DormantMutRef::new(&mut self.root); + let root_node = match root.as_mut() { + None => { + return CursorMut { + current: None, + root: dormant_root, + length: &mut self.length, + alloc: &mut *self.alloc, + }; + } + Some(root) => root.borrow_mut(), + }; + let edge = root_node.upper_bound(SearchBound::from_range(bound)); + CursorMut { + current: edge.next_back_kv().ok(), + root: dormant_root, + length: &mut self.length, + alloc: &mut *self.alloc, + } + } +} + +/// A cursor over a `BTreeMap`. +/// +/// A `Cursor` is like an iterator, except that it can freely seek back-and-forth. +/// +/// Cursors always point to an element in the tree, and index in a logically circular way. +/// To accommodate this, there is a "ghost" non-element that yields `None` between the last and +/// first elements of the tree. +/// +/// A `Cursor` is created with the [`BTreeMap::lower_bound`] and [`BTreeMap::upper_bound`] methods. +#[unstable(feature = "btree_cursors", issue = "107540")] +pub struct Cursor<'a, K: 'a, V: 'a> { + current: Option<Handle<NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal>, marker::KV>>, + root: Option<&'a node::Root<K, V>>, +} + +#[unstable(feature = "btree_cursors", issue = "107540")] +impl<K, V> Clone for Cursor<'_, K, V> { + fn clone(&self) -> Self { + let Cursor { current, root } = *self; + Cursor { current, root } + } +} + +#[unstable(feature = "btree_cursors", issue = "107540")] +impl<K: Debug, V: Debug> Debug for Cursor<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("Cursor").field(&self.key_value()).finish() + } +} + +/// A cursor over a `BTreeMap` with editing operations. +/// +/// A `Cursor` is like an iterator, except that it can freely seek back-and-forth, and can +/// safely mutate the tree during iteration. This is because the lifetime of its yielded +/// references is tied to its own lifetime, instead of just the underlying tree. This means +/// cursors cannot yield multiple elements at once. +/// +/// Cursors always point to an element in the tree, and index in a logically circular way. +/// To accommodate this, there is a "ghost" non-element that yields `None` between the last and +/// first elements of the tree. +/// +/// A `Cursor` is created with the [`BTreeMap::lower_bound_mut`] and [`BTreeMap::upper_bound_mut`] +/// methods. +#[unstable(feature = "btree_cursors", issue = "107540")] +pub struct CursorMut< + 'a, + K: 'a, + V: 'a, + #[unstable(feature = "allocator_api", issue = "32838")] A = Global, +> { + current: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV>>, + root: DormantMutRef<'a, Option<node::Root<K, V>>>, + length: &'a mut usize, + alloc: &'a mut A, +} + +#[unstable(feature = "btree_cursors", issue = "107540")] +impl<K: Debug, V: Debug, A> Debug for CursorMut<'_, K, V, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("CursorMut").field(&self.key_value()).finish() + } +} + +impl<'a, K, V> Cursor<'a, K, V> { + /// Moves the cursor to the next element of the `BTreeMap`. + /// + /// If the cursor is pointing to the "ghost" non-element then this will move it to + /// the first element of the `BTreeMap`. If it is pointing to the last + /// element of the `BTreeMap` then this will move it to the "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn move_next(&mut self) { + match self.current.take() { + None => { + self.current = self.root.and_then(|root| { + root.reborrow().first_leaf_edge().forget_node_type().right_kv().ok() + }); + } + Some(current) => { + self.current = current.next_leaf_edge().next_kv().ok(); + } + } + } + + /// Moves the cursor to the previous element of the `BTreeMap`. + /// + /// If the cursor is pointing to the "ghost" non-element then this will move it to + /// the last element of the `BTreeMap`. If it is pointing to the first + /// element of the `BTreeMap` then this will move it to the "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn move_prev(&mut self) { + match self.current.take() { + None => { + self.current = self.root.and_then(|root| { + root.reborrow().last_leaf_edge().forget_node_type().left_kv().ok() + }); + } + Some(current) => { + self.current = current.next_back_leaf_edge().next_back_kv().ok(); + } + } + } + + /// Returns a reference to the key of the element that the cursor is + /// currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn key(&self) -> Option<&'a K> { + self.current.as_ref().map(|current| current.into_kv().0) + } + + /// Returns a reference to the value of the element that the cursor is + /// currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn value(&self) -> Option<&'a V> { + self.current.as_ref().map(|current| current.into_kv().1) + } + + /// Returns a reference to the key and value of the element that the cursor + /// is currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn key_value(&self) -> Option<(&'a K, &'a V)> { + self.current.as_ref().map(|current| current.into_kv()) + } + + /// Returns a reference to the next element. + /// + /// If the cursor is pointing to the "ghost" non-element then this returns + /// the first element of the `BTreeMap`. If it is pointing to the last + /// element of the `BTreeMap` then this returns `None`. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn peek_next(&self) -> Option<(&'a K, &'a V)> { + let mut next = self.clone(); + next.move_next(); + next.current.as_ref().map(|current| current.into_kv()) + } + + /// Returns a reference to the previous element. + /// + /// If the cursor is pointing to the "ghost" non-element then this returns + /// the last element of the `BTreeMap`. If it is pointing to the first + /// element of the `BTreeMap` then this returns `None`. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn peek_prev(&self) -> Option<(&'a K, &'a V)> { + let mut prev = self.clone(); + prev.move_prev(); + prev.current.as_ref().map(|current| current.into_kv()) + } +} + +impl<'a, K, V, A> CursorMut<'a, K, V, A> { + /// Moves the cursor to the next element of the `BTreeMap`. + /// + /// If the cursor is pointing to the "ghost" non-element then this will move it to + /// the first element of the `BTreeMap`. If it is pointing to the last + /// element of the `BTreeMap` then this will move it to the "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn move_next(&mut self) { + match self.current.take() { + None => { + // SAFETY: The previous borrow of root has ended. + self.current = unsafe { self.root.reborrow() }.as_mut().and_then(|root| { + root.borrow_mut().first_leaf_edge().forget_node_type().right_kv().ok() + }); + } + Some(current) => { + self.current = current.next_leaf_edge().next_kv().ok(); + } + } + } + + /// Moves the cursor to the previous element of the `BTreeMap`. + /// + /// If the cursor is pointing to the "ghost" non-element then this will move it to + /// the last element of the `BTreeMap`. If it is pointing to the first + /// element of the `BTreeMap` then this will move it to the "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn move_prev(&mut self) { + match self.current.take() { + None => { + // SAFETY: The previous borrow of root has ended. + self.current = unsafe { self.root.reborrow() }.as_mut().and_then(|root| { + root.borrow_mut().last_leaf_edge().forget_node_type().left_kv().ok() + }); + } + Some(current) => { + self.current = current.next_back_leaf_edge().next_back_kv().ok(); + } + } + } + + /// Returns a reference to the key of the element that the cursor is + /// currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn key(&self) -> Option<&K> { + self.current.as_ref().map(|current| current.reborrow().into_kv().0) + } + + /// Returns a reference to the value of the element that the cursor is + /// currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn value(&self) -> Option<&V> { + self.current.as_ref().map(|current| current.reborrow().into_kv().1) + } + + /// Returns a reference to the key and value of the element that the cursor + /// is currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn key_value(&self) -> Option<(&K, &V)> { + self.current.as_ref().map(|current| current.reborrow().into_kv()) + } + + /// Returns a mutable reference to the value of the element that the cursor + /// is currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn value_mut(&mut self) -> Option<&mut V> { + self.current.as_mut().map(|current| current.kv_mut().1) + } + + /// Returns a reference to the key and mutable reference to the value of the + /// element that the cursor is currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn key_value_mut(&mut self) -> Option<(&K, &mut V)> { + self.current.as_mut().map(|current| { + let (k, v) = current.kv_mut(); + (&*k, v) + }) + } + + /// Returns a mutable reference to the of the element that the cursor is + /// currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + /// + /// # Safety + /// + /// This can be used to modify the key, but you must ensure that the + /// `BTreeMap` invariants are maintained. Specifically: + /// + /// * The key must remain unique within the tree. + /// * The key must remain in sorted order with regards to other elements in + /// the tree. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub unsafe fn key_mut_unchecked(&mut self) -> Option<&mut K> { + self.current.as_mut().map(|current| current.kv_mut().0) + } + + /// Returns a reference to the key and value of the next element. + /// + /// If the cursor is pointing to the "ghost" non-element then this returns + /// the first element of the `BTreeMap`. If it is pointing to the last + /// element of the `BTreeMap` then this returns `None`. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn peek_next(&mut self) -> Option<(&K, &mut V)> { + let (k, v) = match self.current { + None => { + // SAFETY: The previous borrow of root has ended. + unsafe { self.root.reborrow() } + .as_mut()? + .borrow_mut() + .first_leaf_edge() + .next_kv() + .ok()? + .into_kv_valmut() + } + // SAFETY: We're not using this to mutate the tree. + Some(ref mut current) => { + unsafe { current.reborrow_mut() }.next_leaf_edge().next_kv().ok()?.into_kv_valmut() + } + }; + Some((k, v)) + } + + /// Returns a reference to the key and value of the previous element. + /// + /// If the cursor is pointing to the "ghost" non-element then this returns + /// the last element of the `BTreeMap`. If it is pointing to the first + /// element of the `BTreeMap` then this returns `None`. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn peek_prev(&mut self) -> Option<(&K, &mut V)> { + let (k, v) = match self.current.as_mut() { + None => { + // SAFETY: The previous borrow of root has ended. + unsafe { self.root.reborrow() } + .as_mut()? + .borrow_mut() + .first_leaf_edge() + .next_kv() + .ok()? + .into_kv_valmut() + } + Some(current) => { + // SAFETY: We're not using this to mutate the tree. + unsafe { current.reborrow_mut() } + .next_back_leaf_edge() + .next_back_kv() + .ok()? + .into_kv_valmut() + } + }; + Some((k, v)) + } + + /// Returns a read-only cursor pointing to the current element. + /// + /// The lifetime of the returned `Cursor` is bound to that of the + /// `CursorMut`, which means it cannot outlive the `CursorMut` and that the + /// `CursorMut` is frozen for the lifetime of the `Cursor`. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn as_cursor(&self) -> Cursor<'_, K, V> { + Cursor { + // SAFETY: The tree is immutable while the cursor exists. + root: unsafe { self.root.reborrow_shared().as_ref() }, + current: self.current.as_ref().map(|current| current.reborrow()), + } + } +} + +// Now the tree editing operations +impl<'a, K: Ord, V, A: Allocator + Clone> CursorMut<'a, K, V, A> { + /// Inserts a new element into the `BTreeMap` after the current one. + /// + /// If the cursor is pointing at the "ghost" non-element then the new element is + /// inserted at the front of the `BTreeMap`. + /// + /// # Safety + /// + /// You must ensure that the `BTreeMap` invariants are maintained. + /// Specifically: + /// + /// * The key of the newly inserted element must be unique in the tree. + /// * All keys in the tree must remain in sorted order. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub unsafe fn insert_after_unchecked(&mut self, key: K, value: V) { + let edge = match self.current.take() { + None => { + // SAFETY: We have no other reference to the tree. + match unsafe { self.root.reborrow() } { + root @ None => { + // Tree is empty, allocate a new root. + let mut node = NodeRef::new_leaf(self.alloc.clone()); + node.borrow_mut().push(key, value); + *root = Some(node.forget_type()); + *self.length += 1; + return; + } + Some(root) => root.borrow_mut().first_leaf_edge(), + } + } + Some(current) => current.next_leaf_edge(), + }; + + let handle = edge.insert_recursing(key, value, self.alloc.clone(), |ins| { + drop(ins.left); + // SAFETY: The handle to the newly inserted value is always on a + // leaf node, so adding a new root node doesn't invalidate it. + let root = unsafe { self.root.reborrow().as_mut().unwrap() }; + root.push_internal_level(self.alloc.clone()).push(ins.kv.0, ins.kv.1, ins.right) + }); + self.current = handle.left_edge().next_back_kv().ok(); + *self.length += 1; + } + + /// Inserts a new element into the `BTreeMap` before the current one. + /// + /// If the cursor is pointing at the "ghost" non-element then the new element is + /// inserted at the end of the `BTreeMap`. + /// + /// # Safety + /// + /// You must ensure that the `BTreeMap` invariants are maintained. + /// Specifically: + /// + /// * The key of the newly inserted element must be unique in the tree. + /// * All keys in the tree must remain in sorted order. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub unsafe fn insert_before_unchecked(&mut self, key: K, value: V) { + let edge = match self.current.take() { + None => { + // SAFETY: We have no other reference to the tree. + match unsafe { self.root.reborrow() } { + root @ None => { + // Tree is empty, allocate a new root. + let mut node = NodeRef::new_leaf(self.alloc.clone()); + node.borrow_mut().push(key, value); + *root = Some(node.forget_type()); + *self.length += 1; + return; + } + Some(root) => root.borrow_mut().last_leaf_edge(), + } + } + Some(current) => current.next_back_leaf_edge(), + }; + + let handle = edge.insert_recursing(key, value, self.alloc.clone(), |ins| { + drop(ins.left); + // SAFETY: The handle to the newly inserted value is always on a + // leaf node, so adding a new root node doesn't invalidate it. + let root = unsafe { self.root.reborrow().as_mut().unwrap() }; + root.push_internal_level(self.alloc.clone()).push(ins.kv.0, ins.kv.1, ins.right) + }); + self.current = handle.right_edge().next_kv().ok(); + *self.length += 1; + } + + /// Inserts a new element into the `BTreeMap` after the current one. + /// + /// If the cursor is pointing at the "ghost" non-element then the new element is + /// inserted at the front of the `BTreeMap`. + /// + /// # Panics + /// + /// This function panics if: + /// - the given key compares less than or equal to the current element (if + /// any). + /// - the given key compares greater than or equal to the next element (if + /// any). + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn insert_after(&mut self, key: K, value: V) { + if let Some(current) = self.key() { + if &key <= current { + panic!("key must be ordered above the current element"); + } + } + if let Some((next, _)) = self.peek_prev() { + if &key >= next { + panic!("key must be ordered below the next element"); + } + } + unsafe { + self.insert_after_unchecked(key, value); + } + } + + /// Inserts a new element into the `BTreeMap` before the current one. + /// + /// If the cursor is pointing at the "ghost" non-element then the new element is + /// inserted at the end of the `BTreeMap`. + /// + /// # Panics + /// + /// This function panics if: + /// - the given key compares greater than or equal to the current element + /// (if any). + /// - the given key compares less than or equal to the previous element (if + /// any). + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn insert_before(&mut self, key: K, value: V) { + if let Some(current) = self.key() { + if &key >= current { + panic!("key must be ordered below the current element"); + } + } + if let Some((prev, _)) = self.peek_prev() { + if &key <= prev { + panic!("key must be ordered above the previous element"); + } + } + unsafe { + self.insert_before_unchecked(key, value); + } + } + + /// Removes the current element from the `BTreeMap`. + /// + /// The element that was removed is returned, and the cursor is + /// moved to point to the next element in the `BTreeMap`. + /// + /// If the cursor is currently pointing to the "ghost" non-element then no element + /// is removed and `None` is returned. The cursor is not moved in this case. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn remove_current(&mut self) -> Option<(K, V)> { + let current = self.current.take()?; + let mut emptied_internal_root = false; + let (kv, pos) = + current.remove_kv_tracking(|| emptied_internal_root = true, self.alloc.clone()); + self.current = pos.next_kv().ok(); + *self.length -= 1; + if emptied_internal_root { + // SAFETY: This is safe since current does not point within the now + // empty root node. + let root = unsafe { self.root.reborrow().as_mut().unwrap() }; + root.pop_internal_level(self.alloc.clone()); + } + Some(kv) + } + + /// Removes the current element from the `BTreeMap`. + /// + /// The element that was removed is returned, and the cursor is + /// moved to point to the previous element in the `BTreeMap`. + /// + /// If the cursor is currently pointing to the "ghost" non-element then no element + /// is removed and `None` is returned. The cursor is not moved in this case. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn remove_current_and_move_back(&mut self) -> Option<(K, V)> { + let current = self.current.take()?; + let mut emptied_internal_root = false; + let (kv, pos) = + current.remove_kv_tracking(|| emptied_internal_root = true, self.alloc.clone()); + self.current = pos.next_back_kv().ok(); + *self.length -= 1; + if emptied_internal_root { + // SAFETY: This is safe since current does not point within the now + // empty root node. + let root = unsafe { self.root.reborrow().as_mut().unwrap() }; + root.pop_internal_level(self.alloc.clone()); + } + Some(kv) + } } #[cfg(test)] diff --git a/library/alloc/src/collections/btree/map/entry.rs b/library/alloc/src/collections/btree/map/entry.rs index 370b58864af..e9366eec9ce 100644 --- a/library/alloc/src/collections/btree/map/entry.rs +++ b/library/alloc/src/collections/btree/map/entry.rs @@ -347,7 +347,7 @@ impl<'a, K: Ord, V, A: Allocator + Clone> VacantEntry<'a, K, V, A> { /// assert_eq!(map["poneyland"], 37); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn insert(self, value: V) -> &'a mut V { + pub fn insert(mut self, value: V) -> &'a mut V { let out_ptr = match self.handle { None => { // SAFETY: There is no tree yet so no reference to it exists. @@ -358,25 +358,27 @@ impl<'a, K: Ord, V, A: Allocator + Clone> VacantEntry<'a, K, V, A> { map.length = 1; val_ptr } - Some(handle) => match handle.insert_recursing(self.key, value, self.alloc.clone()) { - (None, val_ptr) => { - // SAFETY: We have consumed self.handle. - let map = unsafe { self.dormant_map.awaken() }; - map.length += 1; - val_ptr - } - (Some(ins), val_ptr) => { - drop(ins.left); - // SAFETY: We have consumed self.handle and dropped the - // remaining reference to the tree, ins.left. - let map = unsafe { self.dormant_map.awaken() }; - let root = map.root.as_mut().unwrap(); // same as ins.left - root.push_internal_level(self.alloc).push(ins.kv.0, ins.kv.1, ins.right); - map.length += 1; - val_ptr - } - }, + Some(handle) => { + let new_handle = + handle.insert_recursing(self.key, value, self.alloc.clone(), |ins| { + drop(ins.left); + // SAFETY: Pushing a new root node doesn't invalidate + // handles to existing nodes. + let map = unsafe { self.dormant_map.reborrow() }; + let root = map.root.as_mut().unwrap(); // same as ins.left + root.push_internal_level(self.alloc).push(ins.kv.0, ins.kv.1, ins.right) + }); + + // Get the pointer to the value + let val_ptr = new_handle.into_val_mut(); + + // SAFETY: We have consumed self.handle. + let map = unsafe { self.dormant_map.awaken() }; + map.length += 1; + val_ptr + } }; + // Now that we have finished growing the tree using borrowed references, // dereference the pointer to a part of it, that we picked up along the way. unsafe { &mut *out_ptr } diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs index 700b1463bfd..76c2f27b466 100644 --- a/library/alloc/src/collections/btree/map/tests.rs +++ b/library/alloc/src/collections/btree/map/tests.rs @@ -2336,3 +2336,52 @@ fn from_array() { let unordered_duplicates = BTreeMap::from([(3, 4), (1, 2), (1, 2)]); assert_eq!(map, unordered_duplicates); } + +#[test] +fn test_cursor() { + let map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]); + + let mut cur = map.lower_bound(Bound::Unbounded); + assert_eq!(cur.key(), Some(&1)); + cur.move_next(); + assert_eq!(cur.key(), Some(&2)); + assert_eq!(cur.peek_next(), Some((&3, &'c'))); + cur.move_prev(); + assert_eq!(cur.key(), Some(&1)); + assert_eq!(cur.peek_prev(), None); + + let mut cur = map.upper_bound(Bound::Excluded(&1)); + assert_eq!(cur.key(), None); + cur.move_next(); + assert_eq!(cur.key(), Some(&1)); + cur.move_prev(); + assert_eq!(cur.key(), None); + assert_eq!(cur.peek_prev(), Some((&3, &'c'))); +} + +#[test] +fn test_cursor_mut() { + let mut map = BTreeMap::from([(1, 'a'), (3, 'c'), (5, 'e')]); + let mut cur = map.lower_bound_mut(Bound::Excluded(&3)); + assert_eq!(cur.key(), Some(&5)); + cur.insert_before(4, 'd'); + assert_eq!(cur.key(), Some(&5)); + assert_eq!(cur.peek_prev(), Some((&4, &mut 'd'))); + cur.move_next(); + assert_eq!(cur.key(), None); + cur.insert_before(6, 'f'); + assert_eq!(cur.key(), None); + assert_eq!(cur.remove_current(), None); + assert_eq!(cur.key(), None); + cur.insert_after(0, '?'); + assert_eq!(cur.key(), None); + assert_eq!(map, BTreeMap::from([(0, '?'), (1, 'a'), (3, 'c'), (4, 'd'), (5, 'e'), (6, 'f')])); + + let mut cur = map.upper_bound_mut(Bound::Included(&5)); + assert_eq!(cur.key(), Some(&5)); + assert_eq!(cur.remove_current(), Some((5, 'e'))); + assert_eq!(cur.key(), Some(&6)); + assert_eq!(cur.remove_current_and_move_back(), Some((6, 'f'))); + assert_eq!(cur.key(), Some(&4)); + assert_eq!(map, BTreeMap::from([(0, '?'), (1, 'a'), (3, 'c'), (4, 'd')])); +} diff --git a/library/alloc/src/collections/btree/navigate.rs b/library/alloc/src/collections/btree/navigate.rs index 1e33c1e64d6..b890717e50b 100644 --- a/library/alloc/src/collections/btree/navigate.rs +++ b/library/alloc/src/collections/btree/navigate.rs @@ -4,6 +4,7 @@ use core::ops::RangeBounds; use core::ptr; use super::node::{marker, ForceResult::*, Handle, NodeRef}; +use super::search::SearchBound; use crate::alloc::Allocator; // `front` and `back` are always both `None` or both `Some`. @@ -386,7 +387,7 @@ impl<BorrowType: marker::BorrowType, K, V> /// Given a leaf edge handle, returns [`Result::Ok`] with a handle to the neighboring KV /// on the left side, which is either in the same leaf node or in an ancestor node. /// If the leaf edge is the first one in the tree, returns [`Result::Err`] with the root node. - fn next_back_kv( + pub fn next_back_kv( self, ) -> Result< Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>, @@ -707,7 +708,9 @@ impl<BorrowType: marker::BorrowType, K, V> } /// Returns the leaf edge closest to a KV for backward navigation. - fn next_back_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> { + pub fn next_back_leaf_edge( + self, + ) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> { match self.force() { Leaf(leaf_kv) => leaf_kv.left_edge(), Internal(internal_kv) => { @@ -717,3 +720,51 @@ impl<BorrowType: marker::BorrowType, K, V> } } } + +impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> { + /// Returns the leaf edge corresponding to the first point at which the + /// given bound is true. + pub fn lower_bound<Q: ?Sized>( + self, + mut bound: SearchBound<&Q>, + ) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> + where + Q: Ord, + K: Borrow<Q>, + { + let mut node = self; + loop { + let (edge, new_bound) = node.find_lower_bound_edge(bound); + match edge.force() { + Leaf(edge) => return edge, + Internal(edge) => { + node = edge.descend(); + bound = new_bound; + } + } + } + } + + /// Returns the leaf edge corresponding to the last point at which the + /// given bound is true. + pub fn upper_bound<Q: ?Sized>( + self, + mut bound: SearchBound<&Q>, + ) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> + where + Q: Ord, + K: Borrow<Q>, + { + let mut node = self; + loop { + let (edge, new_bound) = node.find_upper_bound_edge(bound); + match edge.force() { + Leaf(edge) => return edge, + Internal(edge) => { + node = edge.descend(); + bound = new_bound; + } + } + } + } +} diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs index 6912466448f..3233a575ecf 100644 --- a/library/alloc/src/collections/btree/node.rs +++ b/library/alloc/src/collections/btree/node.rs @@ -442,6 +442,24 @@ impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> { // SAFETY: we have exclusive access to the entire node. unsafe { &mut *ptr } } + + /// Returns a dormant copy of this node with its lifetime erased which can + /// be reawakened later. + pub fn dormant(&self) -> NodeRef<marker::DormantMut, K, V, Type> { + NodeRef { height: self.height, node: self.node, _marker: PhantomData } + } +} + +impl<K, V, Type> NodeRef<marker::DormantMut, K, V, Type> { + /// Revert to the unique borrow initially captured. + /// + /// # Safety + /// + /// The reborrow must have ended, i.e., the reference returned by `new` and + /// all pointers and references derived from it, must not be used anymore. + pub unsafe fn awaken<'a>(self) -> NodeRef<marker::Mut<'a>, K, V, Type> { + NodeRef { height: self.height, node: self.node, _marker: PhantomData } + } } impl<K, V, Type> NodeRef<marker::Dying, K, V, Type> { @@ -798,6 +816,25 @@ impl<'a, K, V, NodeType, HandleType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeT // We can't use Handle::new_kv or Handle::new_edge because we don't know our type Handle { node: unsafe { self.node.reborrow_mut() }, idx: self.idx, _marker: PhantomData } } + + /// Returns a dormant copy of this handle which can be reawakened later. + /// + /// See `DormantMutRef` for more details. + pub fn dormant(&self) -> Handle<NodeRef<marker::DormantMut, K, V, NodeType>, HandleType> { + Handle { node: self.node.dormant(), idx: self.idx, _marker: PhantomData } + } +} + +impl<K, V, NodeType, HandleType> Handle<NodeRef<marker::DormantMut, K, V, NodeType>, HandleType> { + /// Revert to the unique borrow initially captured. + /// + /// # Safety + /// + /// The reborrow must have ended, i.e., the reference returned by `new` and + /// all pointers and references derived from it, must not be used anymore. + pub unsafe fn awaken<'a>(self) -> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, HandleType> { + Handle { node: unsafe { self.node.awaken() }, idx: self.idx, _marker: PhantomData } + } } impl<BorrowType, K, V, NodeType> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> { @@ -851,9 +888,11 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark /// Inserts a new key-value pair between the key-value pairs to the right and left of /// this edge. This method assumes that there is enough space in the node for the new /// pair to fit. - /// - /// The returned pointer points to the inserted value. - fn insert_fit(&mut self, key: K, val: V) -> *mut V { + unsafe fn insert_fit( + mut self, + key: K, + val: V, + ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> { debug_assert!(self.node.len() < CAPACITY); let new_len = self.node.len() + 1; @@ -862,7 +901,7 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark slice_insert(self.node.val_area_mut(..new_len), self.idx, val); *self.node.len_mut() = new_len as u16; - self.node.val_area_mut(self.idx).assume_init_mut() + Handle::new_kv(self.node, self.idx) } } } @@ -871,21 +910,26 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark /// Inserts a new key-value pair between the key-value pairs to the right and left of /// this edge. This method splits the node if there isn't enough room. /// - /// The returned pointer points to the inserted value. + /// Returns a dormant handle to the inserted node which can be reawakened + /// once splitting is complete. fn insert<A: Allocator + Clone>( - mut self, + self, key: K, val: V, alloc: A, - ) -> (Option<SplitResult<'a, K, V, marker::Leaf>>, *mut V) { + ) -> ( + Option<SplitResult<'a, K, V, marker::Leaf>>, + Handle<NodeRef<marker::DormantMut, K, V, marker::Leaf>, marker::KV>, + ) { if self.node.len() < CAPACITY { - let val_ptr = self.insert_fit(key, val); - (None, val_ptr) + // SAFETY: There is enough space in the node for insertion. + let handle = unsafe { self.insert_fit(key, val) }; + (None, handle.dormant()) } else { let (middle_kv_idx, insertion) = splitpoint(self.idx); let middle = unsafe { Handle::new_kv(self.node, middle_kv_idx) }; let mut result = middle.split(alloc); - let mut insertion_edge = match insertion { + let insertion_edge = match insertion { LeftOrRight::Left(insert_idx) => unsafe { Handle::new_edge(result.left.reborrow_mut(), insert_idx) }, @@ -893,8 +937,10 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark Handle::new_edge(result.right.borrow_mut(), insert_idx) }, }; - let val_ptr = insertion_edge.insert_fit(key, val); - (Some(result), val_ptr) + // SAFETY: We just split the node, so there is enough space for + // insertion. + let handle = unsafe { insertion_edge.insert_fit(key, val).dormant() }; + (Some(result), handle) } } } @@ -976,21 +1022,31 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark key: K, value: V, alloc: A, - ) -> (Option<SplitResult<'a, K, V, marker::LeafOrInternal>>, *mut V) { - let (mut split, val_ptr) = match self.insert(key, value, alloc.clone()) { - (None, val_ptr) => return (None, val_ptr), - (Some(split), val_ptr) => (split.forget_node_type(), val_ptr), + split_root: impl FnOnce(SplitResult<'a, K, V, marker::LeafOrInternal>), + ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> { + let (mut split, handle) = match self.insert(key, value, alloc.clone()) { + // SAFETY: we have finished splitting and can now re-awaken the + // handle to the inserted element. + (None, handle) => return unsafe { handle.awaken() }, + (Some(split), handle) => (split.forget_node_type(), handle), }; loop { split = match split.left.ascend() { Ok(parent) => { match parent.insert(split.kv.0, split.kv.1, split.right, alloc.clone()) { - None => return (None, val_ptr), + // SAFETY: we have finished splitting and can now re-awaken the + // handle to the inserted element. + None => return unsafe { handle.awaken() }, Some(split) => split.forget_node_type(), } } - Err(root) => return (Some(SplitResult { left: root, ..split }), val_ptr), + Err(root) => { + split_root(SplitResult { left: root, ..split }); + // SAFETY: we have finished splitting and can now re-awaken the + // handle to the inserted element. + return unsafe { handle.awaken() }; + } }; } } @@ -1043,6 +1099,14 @@ impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType> let leaf = self.node.into_leaf_mut(); unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() } } + + pub fn into_kv_valmut(self) -> (&'a K, &'a mut V) { + debug_assert!(self.idx < self.node.len()); + let leaf = self.node.into_leaf_mut(); + let k = unsafe { leaf.keys.get_unchecked(self.idx).assume_init_ref() }; + let v = unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() }; + (k, v) + } } impl<'a, K, V, NodeType> Handle<NodeRef<marker::ValMut<'a>, K, V, NodeType>, marker::KV> { @@ -1667,6 +1731,7 @@ pub mod marker { pub enum Owned {} pub enum Dying {} + pub enum DormantMut {} pub struct Immut<'a>(PhantomData<&'a ()>); pub struct Mut<'a>(PhantomData<&'a mut ()>); pub struct ValMut<'a>(PhantomData<&'a mut ()>); @@ -1688,6 +1753,7 @@ pub mod marker { impl<'a> BorrowType for Immut<'a> {} impl<'a> BorrowType for Mut<'a> {} impl<'a> BorrowType for ValMut<'a> {} + impl BorrowType for DormantMut {} pub enum KV {} pub enum Edge {} diff --git a/library/alloc/src/collections/vec_deque/into_iter.rs b/library/alloc/src/collections/vec_deque/into_iter.rs index e54880e8652..34bc0ce9177 100644 --- a/library/alloc/src/collections/vec_deque/into_iter.rs +++ b/library/alloc/src/collections/vec_deque/into_iter.rs @@ -1,5 +1,5 @@ -use core::fmt; use core::iter::{FusedIterator, TrustedLen}; +use core::{array, fmt, mem::MaybeUninit, ops::Try, ptr}; use crate::alloc::{Allocator, Global}; @@ -52,6 +52,126 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { let len = self.inner.len(); (len, Some(len)) } + + #[inline] + fn advance_by(&mut self, n: usize) -> Result<(), usize> { + if self.inner.len < n { + let len = self.inner.len; + self.inner.clear(); + Err(len) + } else { + self.inner.drain(..n); + Ok(()) + } + } + + #[inline] + fn count(self) -> usize { + self.inner.len + } + + fn try_fold<B, F, R>(&mut self, mut init: B, mut f: F) -> R + where + F: FnMut(B, Self::Item) -> R, + R: Try<Output = B>, + { + struct Guard<'a, T, A: Allocator> { + deque: &'a mut VecDeque<T, A>, + // `consumed <= deque.len` always holds. + consumed: usize, + } + + impl<'a, T, A: Allocator> Drop for Guard<'a, T, A> { + fn drop(&mut self) { + self.deque.len -= self.consumed; + self.deque.head = self.deque.to_physical_idx(self.consumed); + } + } + + let mut guard = Guard { deque: &mut self.inner, consumed: 0 }; + + let (head, tail) = guard.deque.as_slices(); + + init = head + .iter() + .map(|elem| { + guard.consumed += 1; + // SAFETY: Because we incremented `guard.consumed`, the + // deque effectively forgot the element, so we can take + // ownership + unsafe { ptr::read(elem) } + }) + .try_fold(init, &mut f)?; + + tail.iter() + .map(|elem| { + guard.consumed += 1; + // SAFETY: Same as above. + unsafe { ptr::read(elem) } + }) + .try_fold(init, &mut f) + } + + #[inline] + fn fold<B, F>(mut self, init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + match self.try_fold(init, |b, item| Ok::<B, !>(f(b, item))) { + Ok(b) => b, + Err(e) => match e {}, + } + } + + #[inline] + fn last(mut self) -> Option<Self::Item> { + self.inner.pop_back() + } + + fn next_chunk<const N: usize>( + &mut self, + ) -> Result<[Self::Item; N], array::IntoIter<Self::Item, N>> { + let mut raw_arr = MaybeUninit::uninit_array(); + let raw_arr_ptr = raw_arr.as_mut_ptr().cast(); + let (head, tail) = self.inner.as_slices(); + + if head.len() >= N { + // SAFETY: By manually adjusting the head and length of the deque, we effectively + // make it forget the first `N` elements, so taking ownership of them is safe. + unsafe { ptr::copy_nonoverlapping(head.as_ptr(), raw_arr_ptr, N) }; + self.inner.head = self.inner.to_physical_idx(N); + self.inner.len -= N; + // SAFETY: We initialized the entire array with items from `head` + return Ok(unsafe { raw_arr.transpose().assume_init() }); + } + + // SAFETY: Same argument as above. + unsafe { ptr::copy_nonoverlapping(head.as_ptr(), raw_arr_ptr, head.len()) }; + let remaining = N - head.len(); + + if tail.len() >= remaining { + // SAFETY: Same argument as above. + unsafe { + ptr::copy_nonoverlapping(tail.as_ptr(), raw_arr_ptr.add(head.len()), remaining) + }; + self.inner.head = self.inner.to_physical_idx(N); + self.inner.len -= N; + // SAFETY: We initialized the entire array with items from `head` and `tail` + Ok(unsafe { raw_arr.transpose().assume_init() }) + } else { + // SAFETY: Same argument as above. + unsafe { + ptr::copy_nonoverlapping(tail.as_ptr(), raw_arr_ptr.add(head.len()), tail.len()) + }; + let init = head.len() + tail.len(); + // We completely drained all the deques elements. + self.inner.head = 0; + self.inner.len = 0; + // SAFETY: We copied all elements from both slices to the beginning of the array, so + // the given range is initialized. + Err(unsafe { array::IntoIter::new_unchecked(raw_arr, 0..init) }) + } + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -60,10 +180,73 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> { fn next_back(&mut self) -> Option<T> { self.inner.pop_back() } + + #[inline] + fn advance_back_by(&mut self, n: usize) -> Result<(), usize> { + let len = self.inner.len; + if len >= n { + self.inner.truncate(len - n); + Ok(()) + } else { + self.inner.clear(); + Err(len) + } + } + + fn try_rfold<B, F, R>(&mut self, mut init: B, mut f: F) -> R + where + F: FnMut(B, Self::Item) -> R, + R: Try<Output = B>, + { + struct Guard<'a, T, A: Allocator> { + deque: &'a mut VecDeque<T, A>, + // `consumed <= deque.len` always holds. + consumed: usize, + } + + impl<'a, T, A: Allocator> Drop for Guard<'a, T, A> { + fn drop(&mut self) { + self.deque.len -= self.consumed; + } + } + + let mut guard = Guard { deque: &mut self.inner, consumed: 0 }; + + let (head, tail) = guard.deque.as_slices(); + + init = tail + .iter() + .map(|elem| { + guard.consumed += 1; + // SAFETY: See `try_fold`'s safety comment. + unsafe { ptr::read(elem) } + }) + .try_rfold(init, &mut f)?; + + head.iter() + .map(|elem| { + guard.consumed += 1; + // SAFETY: Same as above. + unsafe { ptr::read(elem) } + }) + .try_rfold(init, &mut f) + } + + #[inline] + fn rfold<B, F>(mut self, init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + match self.try_rfold(init, |b, item| Ok::<B, !>(f(b, item))) { + Ok(b) => b, + Err(e) => match e {}, + } + } } #[stable(feature = "rust1", since = "1.0.0")] impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> { + #[inline] fn is_empty(&self) -> bool { self.inner.is_empty() } diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 451e4936bc5..1573b3d77dc 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -537,7 +537,7 @@ impl<T> VecDeque<T> { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_stable(feature = "const_vec_deque_new", since = "CURRENT_RUSTC_VERSION")] + #[rustc_const_stable(feature = "const_vec_deque_new", since = "1.68.0")] #[must_use] pub const fn new() -> VecDeque<T> { // FIXME: This should just be `VecDeque::new_in(Global)` once that hits stable. diff --git a/library/alloc/src/ffi/c_str.rs b/library/alloc/src/ffi/c_str.rs index 11bd4c4dc1b..f99395c72aa 100644 --- a/library/alloc/src/ffi/c_str.rs +++ b/library/alloc/src/ffi/c_str.rs @@ -991,12 +991,6 @@ impl IntoStringError { pub fn utf8_error(&self) -> Utf8Error { self.error } - - #[doc(hidden)] - #[unstable(feature = "cstr_internals", issue = "none")] - pub fn __source(&self) -> &Utf8Error { - &self.error - } } impl IntoStringError { @@ -1141,6 +1135,6 @@ impl core::error::Error for IntoStringError { } fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - Some(self.__source()) + Some(&self.error) } } diff --git a/library/alloc/src/fmt.rs b/library/alloc/src/fmt.rs index eadb35cb96d..1da86e1a46a 100644 --- a/library/alloc/src/fmt.rs +++ b/library/alloc/src/fmt.rs @@ -558,7 +558,7 @@ pub use core::fmt::Alignment; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::Error; #[stable(feature = "rust1", since = "1.0.0")] -pub use core::fmt::{write, ArgumentV1, Arguments}; +pub use core::fmt::{write, Arguments}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::{Binary, Octal}; #[stable(feature = "rust1", since = "1.0.0")] diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index ca75c3895f4..e9cc3875f68 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -87,6 +87,7 @@ #![warn(missing_debug_implementations)] #![warn(missing_docs)] #![allow(explicit_outlives_requirements)] +#![cfg_attr(not(bootstrap), warn(multiple_supertrait_upcastable))] // // Library features: #![feature(alloc_layout_extra)] @@ -115,7 +116,6 @@ #![feature(const_eval_select)] #![feature(const_pin)] #![feature(const_waker)] -#![feature(cstr_from_bytes_until_nul)] #![feature(dispatch_from_dyn)] #![feature(error_generic_member_access)] #![feature(error_in_core)] @@ -195,6 +195,7 @@ #![feature(c_unwind)] #![feature(with_negative_coherence)] #![cfg_attr(test, feature(panic_update_hook))] +#![cfg_attr(not(bootstrap), feature(multiple_supertrait_upcastable))] // // Rustdoc features: #![feature(doc_cfg)] diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index 5a10121bbbe..3751f2a2454 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -241,10 +241,15 @@ impl<T, A: Allocator> RawVec<T, A> { if T::IS_ZST || self.cap == 0 { None } else { - // We have an allocated chunk of memory, so we can bypass runtime - // checks to get our current layout. + // We could use Layout::array here which ensures the absence of isize and usize overflows + // and could hypothetically handle differences between stride and size, but this memory + // has already been allocated so we know it can't overflow and currently rust does not + // support such types. So we can do better by skipping some checks and avoid an unwrap. + let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) }; unsafe { - let layout = Layout::array::<T>(self.cap).unwrap_unchecked(); + let align = mem::align_of::<T>(); + let size = mem::size_of::<T>().unchecked_mul(self.cap); + let layout = Layout::from_size_align_unchecked(size, align); Some((self.ptr.cast().into(), layout)) } } @@ -426,11 +431,13 @@ impl<T, A: Allocator> RawVec<T, A> { assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity"); let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) }; - + // See current_memory() why this assert is here + let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) }; let ptr = unsafe { // `Layout::array` cannot overflow here because it would have // overflowed earlier when capacity was larger. - let new_layout = Layout::array::<T>(cap).unwrap_unchecked(); + let new_size = mem::size_of::<T>().unchecked_mul(cap); + let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); self.alloc .shrink(ptr, layout, new_layout) .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs index c9aa23fc4af..fd1e3e0f75b 100644 --- a/library/alloc/src/rc.rs +++ b/library/alloc/src/rc.rs @@ -1092,7 +1092,7 @@ impl<T: ?Sized> Rc<T> { /// # Safety /// /// If any other `Rc` or [`Weak`] pointers to the same allocation exist, then - /// they must be must not be dereferenced or have active borrows for the duration + /// they must not be dereferenced or have active borrows for the duration /// of the returned borrow, and their inner type must be exactly the same as the /// inner type of this Rc (including lifetimes). This is trivially the case if no /// such pointers exist, for example immediately after `Rc::new`. diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index fecacc2bb63..093dcbbe8bf 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -782,6 +782,38 @@ impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> { } } +// Specializable trait for implementing ToOwned::clone_into. This is +// public in the crate and has the Allocator parameter so that +// vec::clone_from use it too. +#[cfg(not(no_global_oom_handling))] +pub(crate) trait SpecCloneIntoVec<T, A: Allocator> { + fn clone_into(&self, target: &mut Vec<T, A>); +} + +#[cfg(not(no_global_oom_handling))] +impl<T: Clone, A: Allocator> SpecCloneIntoVec<T, A> for [T] { + default fn clone_into(&self, target: &mut Vec<T, A>) { + // drop anything in target that will not be overwritten + target.truncate(self.len()); + + // target.len <= self.len due to the truncate above, so the + // slices here are always in-bounds. + let (init, tail) = self.split_at(target.len()); + + // reuse the contained values' allocations/resources. + target.clone_from_slice(init); + target.extend_from_slice(tail); + } +} + +#[cfg(not(no_global_oom_handling))] +impl<T: Copy, A: Allocator> SpecCloneIntoVec<T, A> for [T] { + fn clone_into(&self, target: &mut Vec<T, A>) { + target.clear(); + target.extend_from_slice(self); + } +} + #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] impl<T: Clone> ToOwned for [T] { @@ -797,16 +829,7 @@ impl<T: Clone> ToOwned for [T] { } fn clone_into(&self, target: &mut Vec<T>) { - // drop anything in target that will not be overwritten - target.truncate(self.len()); - - // target.len <= self.len due to the truncate above, so the - // slices here are always in-bounds. - let (init, tail) = self.split_at(target.len()); - - // reuse the contained values' allocations/resources. - target.clone_from_slice(init); - target.extend_from_slice(tail); + SpecCloneIntoVec::clone_into(self, target); } } diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs index 3118c7189a5..2b843647dd5 100644 --- a/library/alloc/src/string.rs +++ b/library/alloc/src/string.rs @@ -42,8 +42,6 @@ #![stable(feature = "rust1", since = "1.0.0")] -#[cfg(not(no_global_oom_handling))] -use core::char::{decode_utf16, REPLACEMENT_CHARACTER}; use core::error::Error; use core::fmt; use core::hash; @@ -683,7 +681,7 @@ impl String { // This isn't done via collect::<Result<_, _>>() for performance reasons. // FIXME: the function can be simplified again when #48994 is closed. let mut ret = String::with_capacity(v.len()); - for c in decode_utf16(v.iter().cloned()) { + for c in char::decode_utf16(v.iter().cloned()) { if let Ok(c) = c { ret.push(c); } else { @@ -722,7 +720,9 @@ impl String { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf16_lossy(v: &[u16]) -> String { - decode_utf16(v.iter().cloned()).map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)).collect() + char::decode_utf16(v.iter().cloned()) + .map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER)) + .collect() } /// Decomposes a `String` into its raw components. @@ -928,12 +928,12 @@ impl String { /// Copies elements from `src` range to the end of the string. /// - /// ## Panics + /// # Panics /// /// Panics if the starting point or end point do not lie on a [`char`] /// boundary, or if they're out of bounds. /// - /// ## Examples + /// # Examples /// /// ``` /// #![feature(string_extend_from_within)] @@ -2549,7 +2549,7 @@ impl ToString for char { } #[cfg(not(no_global_oom_handling))] -#[stable(feature = "bool_to_string_specialization", since = "CURRENT_RUSTC_VERSION")] +#[stable(feature = "bool_to_string_specialization", since = "1.68.0")] impl ToString for bool { #[inline] fn to_string(&self) -> String { diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs index 9bc9182f7b5..f20486ca9e4 100644 --- a/library/alloc/src/sync.rs +++ b/library/alloc/src/sync.rs @@ -1733,7 +1733,7 @@ impl<T: ?Sized> Arc<T> { /// # Safety /// /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then - /// they must be must not be dereferenced or have active borrows for the duration + /// they must not be dereferenced or have active borrows for the duration /// of the returned borrow, and their inner type must be exactly the same as the /// inner type of this Rc (including lifetimes). This is trivially the case if no /// such pointers exist, for example immediately after `Arc::new`. diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 36b0b3c9e7c..a07f3da78d3 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -2647,35 +2647,6 @@ impl<T, A: Allocator> ops::DerefMut for Vec<T, A> { } #[cfg(not(no_global_oom_handling))] -trait SpecCloneFrom { - fn clone_from(this: &mut Self, other: &Self); -} - -#[cfg(not(no_global_oom_handling))] -impl<T: Clone, A: Allocator> SpecCloneFrom for Vec<T, A> { - default fn clone_from(this: &mut Self, other: &Self) { - // drop anything that will not be overwritten - this.truncate(other.len()); - - // self.len <= other.len due to the truncate above, so the - // slices here are always in-bounds. - let (init, tail) = other.split_at(this.len()); - - // reuse the contained values' allocations/resources. - this.clone_from_slice(init); - this.extend_from_slice(tail); - } -} - -#[cfg(not(no_global_oom_handling))] -impl<T: Copy, A: Allocator> SpecCloneFrom for Vec<T, A> { - fn clone_from(this: &mut Self, other: &Self) { - this.clear(); - this.extend_from_slice(other); - } -} - -#[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> { #[cfg(not(test))] @@ -2695,7 +2666,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> { } fn clone_from(&mut self, other: &Self) { - SpecCloneFrom::clone_from(self, other) + crate::slice::SpecCloneIntoVec::clone_into(other.as_slice(), self); } } diff --git a/library/core/benches/array.rs b/library/core/benches/array.rs new file mode 100644 index 00000000000..d8cc44d05c4 --- /dev/null +++ b/library/core/benches/array.rs @@ -0,0 +1,19 @@ +use test::black_box; +use test::Bencher; + +macro_rules! map_array { + ($func_name:ident, $start_item: expr, $map_item: expr, $arr_size: expr) => { + #[bench] + fn $func_name(b: &mut Bencher) { + let arr = [$start_item; $arr_size]; + b.iter(|| black_box(arr).map(|_| black_box($map_item))); + } + }; +} + +map_array!(map_8byte_8byte_8, 0u64, 1u64, 80); +map_array!(map_8byte_8byte_64, 0u64, 1u64, 640); +map_array!(map_8byte_8byte_256, 0u64, 1u64, 2560); + +map_array!(map_8byte_256byte_256, 0u64, [0u64; 4], 2560); +map_array!(map_256byte_8byte_256, [0u64; 4], 0u64, 2560); diff --git a/library/core/benches/char/methods.rs b/library/core/benches/char/methods.rs index 9408f83c32f..5d4df1ac8bd 100644 --- a/library/core/benches/char/methods.rs +++ b/library/core/benches/char/methods.rs @@ -1,26 +1,26 @@ -use test::Bencher; +use test::{black_box, Bencher}; const CHARS: [char; 9] = ['0', 'x', '2', '5', 'A', 'f', '7', '8', '9']; const RADIX: [u32; 5] = [2, 8, 10, 16, 32]; #[bench] fn bench_to_digit_radix_2(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(2)).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_digit(2)).min()) } #[bench] fn bench_to_digit_radix_10(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(10)).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_digit(10)).min()) } #[bench] fn bench_to_digit_radix_16(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(16)).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_digit(16)).min()) } #[bench] fn bench_to_digit_radix_36(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(36)).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_digit(36)).min()) } #[bench] @@ -31,47 +31,59 @@ fn bench_to_digit_radix_var(b: &mut Bencher) { .cycle() .zip(RADIX.iter().cycle()) .take(10_000) - .map(|(c, radix)| c.to_digit(*radix)) + .map(|(c, radix)| black_box(c).to_digit(*radix)) .min() }) } #[bench] fn bench_to_ascii_uppercase(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_ascii_uppercase()).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_ascii_uppercase()).min()) } #[bench] fn bench_to_ascii_lowercase(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_ascii_lowercase()).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_ascii_lowercase()).min()) } #[bench] fn bench_ascii_mix_to_uppercase(b: &mut Bencher) { - b.iter(|| (0..=255).cycle().take(10_000).map(|b| char::from(b).to_uppercase()).count()) + b.iter(|| { + (0..=255).cycle().take(10_000).map(|b| black_box(char::from(b)).to_uppercase()).count() + }) } #[bench] fn bench_ascii_mix_to_lowercase(b: &mut Bencher) { - b.iter(|| (0..=255).cycle().take(10_000).map(|b| char::from(b).to_lowercase()).count()) + b.iter(|| { + (0..=255).cycle().take(10_000).map(|b| black_box(char::from(b)).to_lowercase()).count() + }) } #[bench] fn bench_ascii_char_to_uppercase(b: &mut Bencher) { - b.iter(|| (0..=127).cycle().take(10_000).map(|b| char::from(b).to_uppercase()).count()) + b.iter(|| { + (0..=127).cycle().take(10_000).map(|b| black_box(char::from(b)).to_uppercase()).count() + }) } #[bench] fn bench_ascii_char_to_lowercase(b: &mut Bencher) { - b.iter(|| (0..=127).cycle().take(10_000).map(|b| char::from(b).to_lowercase()).count()) + b.iter(|| { + (0..=127).cycle().take(10_000).map(|b| black_box(char::from(b)).to_lowercase()).count() + }) } #[bench] fn bench_non_ascii_char_to_uppercase(b: &mut Bencher) { - b.iter(|| (128..=255).cycle().take(10_000).map(|b| char::from(b).to_uppercase()).count()) + b.iter(|| { + (128..=255).cycle().take(10_000).map(|b| black_box(char::from(b)).to_uppercase()).count() + }) } #[bench] fn bench_non_ascii_char_to_lowercase(b: &mut Bencher) { - b.iter(|| (128..=255).cycle().take(10_000).map(|b| char::from(b).to_lowercase()).count()) + b.iter(|| { + (128..=255).cycle().take(10_000).map(|b| black_box(char::from(b)).to_lowercase()).count() + }) } diff --git a/library/core/benches/lib.rs b/library/core/benches/lib.rs index f1244d93285..e4100120d82 100644 --- a/library/core/benches/lib.rs +++ b/library/core/benches/lib.rs @@ -9,6 +9,7 @@ extern crate test; mod any; +mod array; mod ascii; mod char; mod fmt; diff --git a/library/core/benches/num/flt2dec/strategy/dragon.rs b/library/core/benches/num/flt2dec/strategy/dragon.rs index 319b9773e49..377c99effd0 100644 --- a/library/core/benches/num/flt2dec/strategy/dragon.rs +++ b/library/core/benches/num/flt2dec/strategy/dragon.rs @@ -1,14 +1,14 @@ use super::super::*; use core::num::flt2dec::strategy::dragon::*; use std::mem::MaybeUninit; -use test::Bencher; +use test::{black_box, Bencher}; #[bench] fn bench_small_shortest(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS]; b.iter(|| { - format_shortest(&decoded, &mut buf); + format_shortest(black_box(&decoded), &mut buf); }); } @@ -17,7 +17,7 @@ fn bench_big_shortest(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS]; b.iter(|| { - format_shortest(&decoded, &mut buf); + format_shortest(black_box(&decoded), &mut buf); }); } @@ -26,7 +26,7 @@ fn bench_small_exact_3(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 3]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -35,7 +35,7 @@ fn bench_big_exact_3(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 3]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -44,7 +44,7 @@ fn bench_small_exact_12(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 12]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -53,7 +53,7 @@ fn bench_big_exact_12(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 12]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -62,7 +62,7 @@ fn bench_small_exact_inf(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 1024]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -71,6 +71,6 @@ fn bench_big_exact_inf(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 1024]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } diff --git a/library/core/benches/num/flt2dec/strategy/grisu.rs b/library/core/benches/num/flt2dec/strategy/grisu.rs index 8e47a046cde..6bea5e55d37 100644 --- a/library/core/benches/num/flt2dec/strategy/grisu.rs +++ b/library/core/benches/num/flt2dec/strategy/grisu.rs @@ -1,7 +1,7 @@ use super::super::*; use core::num::flt2dec::strategy::grisu::*; use std::mem::MaybeUninit; -use test::Bencher; +use test::{black_box, Bencher}; pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded { match decode(v).1 { @@ -15,7 +15,7 @@ fn bench_small_shortest(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS]; b.iter(|| { - format_shortest(&decoded, &mut buf); + format_shortest(black_box(&decoded), &mut buf); }); } @@ -24,7 +24,7 @@ fn bench_big_shortest(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS]; b.iter(|| { - format_shortest(&decoded, &mut buf); + format_shortest(black_box(&decoded), &mut buf); }); } @@ -33,7 +33,7 @@ fn bench_small_exact_3(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 3]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -42,7 +42,7 @@ fn bench_big_exact_3(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 3]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -51,7 +51,7 @@ fn bench_small_exact_12(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 12]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -60,7 +60,7 @@ fn bench_big_exact_12(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 12]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -69,7 +69,7 @@ fn bench_small_exact_inf(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 1024]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -78,6 +78,6 @@ fn bench_big_exact_inf(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 1024]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } diff --git a/library/core/src/alloc/global.rs b/library/core/src/alloc/global.rs index 1d80b8bf9ec..18da70451f2 100644 --- a/library/core/src/alloc/global.rs +++ b/library/core/src/alloc/global.rs @@ -203,7 +203,7 @@ pub unsafe trait GlobalAlloc { ptr } - /// Shrink or grow a block of memory to the given `new_size`. + /// Shrink or grow a block of memory to the given `new_size` in bytes. /// The block is described by the given `ptr` pointer and `layout`. /// /// If this returns a non-null pointer, then ownership of the memory block @@ -211,10 +211,11 @@ pub unsafe trait GlobalAlloc { /// Any access to the old `ptr` is Undefined Behavior, even if the /// allocation remained in-place. The newly returned pointer is the only valid pointer /// for accessing this memory now. + /// /// The new memory block is allocated with `layout`, - /// but with the `size` updated to `new_size`. This new layout must be - /// used when deallocating the new memory block with `dealloc`. The range - /// `0..min(layout.size(), new_size)` of the new memory block is + /// but with the `size` updated to `new_size` in bytes. + /// This new layout must be used when deallocating the new memory block with `dealloc`. + /// The range `0..min(layout.size(), new_size)` of the new memory block is /// guaranteed to have the same values as the original block. /// /// If this method returns null, then ownership of the memory diff --git a/library/core/src/array/drain.rs b/library/core/src/array/drain.rs new file mode 100644 index 00000000000..5fadf907b62 --- /dev/null +++ b/library/core/src/array/drain.rs @@ -0,0 +1,76 @@ +use crate::iter::{TrustedLen, UncheckedIterator}; +use crate::mem::ManuallyDrop; +use crate::ptr::drop_in_place; +use crate::slice; + +/// A situationally-optimized version of `array.into_iter().for_each(func)`. +/// +/// [`crate::array::IntoIter`]s are great when you need an owned iterator, but +/// storing the entire array *inside* the iterator like that can sometimes +/// pessimize code. Notable, it can be more bytes than you really want to move +/// around, and because the array accesses index into it SRoA has a harder time +/// optimizing away the type than it does iterators that just hold a couple pointers. +/// +/// Thus this function exists, which gives a way to get *moved* access to the +/// elements of an array using a small iterator -- no bigger than a slice iterator. +/// +/// The function-taking-a-closure structure makes it safe, as it keeps callers +/// from looking at already-dropped elements. +pub(crate) fn drain_array_with<T, R, const N: usize>( + array: [T; N], + func: impl for<'a> FnOnce(Drain<'a, T>) -> R, +) -> R { + let mut array = ManuallyDrop::new(array); + // SAFETY: Now that the local won't drop it, it's ok to construct the `Drain` which will. + let drain = Drain(array.iter_mut()); + func(drain) +} + +/// See [`drain_array_with`] -- this is `pub(crate)` only so it's allowed to be +/// mentioned in the signature of that method. (Otherwise it hits `E0446`.) +// INVARIANT: It's ok to drop the remainder of the inner iterator. +pub(crate) struct Drain<'a, T>(slice::IterMut<'a, T>); + +impl<T> Drop for Drain<'_, T> { + fn drop(&mut self) { + // SAFETY: By the type invariant, we're allowed to drop all these. + unsafe { drop_in_place(self.0.as_mut_slice()) } + } +} + +impl<T> Iterator for Drain<'_, T> { + type Item = T; + + #[inline] + fn next(&mut self) -> Option<T> { + let p: *const T = self.0.next()?; + // SAFETY: The iterator was already advanced, so we won't drop this later. + Some(unsafe { p.read() }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + let n = self.len(); + (n, Some(n)) + } +} + +impl<T> ExactSizeIterator for Drain<'_, T> { + #[inline] + fn len(&self) -> usize { + self.0.len() + } +} + +// SAFETY: This is a 1:1 wrapper for a slice iterator, which is also `TrustedLen`. +unsafe impl<T> TrustedLen for Drain<'_, T> {} + +impl<T> UncheckedIterator for Drain<'_, T> { + unsafe fn next_unchecked(&mut self) -> T { + // SAFETY: `Drain` is 1:1 with the inner iterator, so if the caller promised + // that there's an element left, the inner iterator has one too. + let p: *const T = unsafe { self.0.next_unchecked() }; + // SAFETY: The iterator was already advanced, so we won't drop this later. + unsafe { p.read() } + } +} diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs index 2825e0bbb43..1643842d607 100644 --- a/library/core/src/array/mod.rs +++ b/library/core/src/array/mod.rs @@ -10,16 +10,19 @@ use crate::convert::{Infallible, TryFrom}; use crate::error::Error; use crate::fmt; use crate::hash::{self, Hash}; -use crate::iter::TrustedLen; +use crate::iter::UncheckedIterator; use crate::mem::{self, MaybeUninit}; use crate::ops::{ ChangeOutputType, ControlFlow, FromResidual, Index, IndexMut, NeverShortCircuit, Residual, Try, }; use crate::slice::{Iter, IterMut}; +mod drain; mod equality; mod iter; +pub(crate) use drain::drain_array_with; + #[stable(feature = "array_value_iter", since = "1.51.0")] pub use iter::IntoIter; @@ -52,16 +55,11 @@ pub use iter::IntoIter; /// ``` #[inline] #[stable(feature = "array_from_fn", since = "1.63.0")] -pub fn from_fn<T, const N: usize, F>(mut cb: F) -> [T; N] +pub fn from_fn<T, const N: usize, F>(cb: F) -> [T; N] where F: FnMut(usize) -> T, { - let mut idx = 0; - [(); N].map(|_| { - let res = cb(idx); - idx += 1; - res - }) + try_from_fn(NeverShortCircuit::wrap_mut_1(cb)).0 } /// Creates an array `[T; N]` where each fallible array element `T` is returned by the `cb` call. @@ -101,9 +99,14 @@ where R: Try, R::Residual: Residual<[R::Output; N]>, { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { try_collect_into_array_unchecked(&mut (0..N).map(cb)) } + let mut array = MaybeUninit::uninit_array::<N>(); + match try_from_fn_erased(&mut array, cb) { + ControlFlow::Break(r) => FromResidual::from_residual(r), + ControlFlow::Continue(()) => { + // SAFETY: All elements of the array were populated. + try { unsafe { MaybeUninit::array_assume_init(array) } } + } + } } /// Converts a reference to `T` into a reference to an array of length 1 (without copying). @@ -131,7 +134,8 @@ pub struct TryFromSliceError(()); impl fmt::Display for TryFromSliceError { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self.__description(), f) + #[allow(deprecated)] + self.description().fmt(f) } } @@ -139,20 +143,6 @@ impl fmt::Display for TryFromSliceError { impl Error for TryFromSliceError { #[allow(deprecated)] fn description(&self) -> &str { - self.__description() - } -} - -impl TryFromSliceError { - #[unstable( - feature = "array_error_internals", - reason = "available through Error trait and this method should not \ - be exposed publicly", - issue = "none" - )] - #[inline] - #[doc(hidden)] - pub fn __description(&self) -> &str { "could not convert slice to array" } } @@ -427,9 +417,7 @@ trait SpecArrayClone: Clone { impl<T: Clone> SpecArrayClone for T { #[inline] default fn clone<const N: usize>(array: &[T; N]) -> [T; N] { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut array.iter().cloned()) } + from_trusted_iterator(array.iter().cloned()) } } @@ -513,9 +501,7 @@ impl<T, const N: usize> [T; N] { where F: FnMut(T) -> U, { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut IntoIterator::into_iter(self).map(f)) } + self.try_map(NeverShortCircuit::wrap_mut_1(f)).0 } /// A fallible function `f` applied to each element on array `self` in order to @@ -552,9 +538,7 @@ impl<T, const N: usize> [T; N] { R: Try, R::Residual: Residual<[R::Output; N]>, { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { try_collect_into_array_unchecked(&mut IntoIterator::into_iter(self).map(f)) } + drain_array_with(self, |iter| try_from_trusted_iterator(iter.map(f))) } /// 'Zips up' two arrays into a single array of pairs. @@ -575,11 +559,9 @@ impl<T, const N: usize> [T; N] { /// ``` #[unstable(feature = "array_zip", issue = "80094")] pub fn zip<U>(self, rhs: [U; N]) -> [(T, U); N] { - let mut iter = IntoIterator::into_iter(self).zip(rhs); - - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut iter) } + drain_array_with(self, |lhs| { + drain_array_with(rhs, |rhs| from_trusted_iterator(crate::iter::zip(lhs, rhs))) + }) } /// Returns a slice containing the entire array. Equivalent to `&s[..]`. @@ -626,9 +608,7 @@ impl<T, const N: usize> [T; N] { /// ``` #[unstable(feature = "array_methods", issue = "76118")] pub fn each_ref(&self) -> [&T; N] { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut self.iter()) } + from_trusted_iterator(self.iter()) } /// Borrows each element mutably and returns an array of mutable references @@ -648,9 +628,7 @@ impl<T, const N: usize> [T; N] { /// ``` #[unstable(feature = "array_methods", issue = "76118")] pub fn each_mut(&mut self) -> [&mut T; N] { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut self.iter_mut()) } + from_trusted_iterator(self.iter_mut()) } /// Divides one array reference into two at an index. @@ -810,105 +788,71 @@ impl<T, const N: usize> [T; N] { } } -/// Pulls `N` items from `iter` and returns them as an array. If the iterator -/// yields fewer than `N` items, this function exhibits undefined behavior. +/// Populate an array from the first `N` elements of `iter` /// -/// See [`try_collect_into_array`] for more information. +/// # Panics /// +/// If the iterator doesn't actually have enough items. /// -/// # Safety -/// -/// It is up to the caller to guarantee that `iter` yields at least `N` items. -/// Violating this condition causes undefined behavior. -unsafe fn try_collect_into_array_unchecked<I, T, R, const N: usize>(iter: &mut I) -> R::TryType -where - // Note: `TrustedLen` here is somewhat of an experiment. This is just an - // internal function, so feel free to remove if this bound turns out to be a - // bad idea. In that case, remember to also remove the lower bound - // `debug_assert!` below! - I: Iterator + TrustedLen, - I::Item: Try<Output = T, Residual = R>, - R: Residual<[T; N]>, -{ - debug_assert!(N <= iter.size_hint().1.unwrap_or(usize::MAX)); - debug_assert!(N <= iter.size_hint().0); - - // SAFETY: covered by the function contract. - unsafe { try_collect_into_array(iter).unwrap_unchecked() } +/// By depending on `TrustedLen`, however, we can do that check up-front (where +/// it easily optimizes away) so it doesn't impact the loop that fills the array. +#[inline] +fn from_trusted_iterator<T, const N: usize>(iter: impl UncheckedIterator<Item = T>) -> [T; N] { + try_from_trusted_iterator(iter.map(NeverShortCircuit)).0 } -// Infallible version of `try_collect_into_array_unchecked`. -unsafe fn collect_into_array_unchecked<I, const N: usize>(iter: &mut I) -> [I::Item; N] +#[inline] +fn try_from_trusted_iterator<T, R, const N: usize>( + iter: impl UncheckedIterator<Item = R>, +) -> ChangeOutputType<R, [T; N]> where - I: Iterator + TrustedLen, + R: Try<Output = T>, + R::Residual: Residual<[T; N]>, { - let mut map = iter.map(NeverShortCircuit); - - // SAFETY: The same safety considerations w.r.t. the iterator length - // apply for `try_collect_into_array_unchecked` as for - // `collect_into_array_unchecked` - match unsafe { try_collect_into_array_unchecked(&mut map) } { - NeverShortCircuit(array) => array, + assert!(iter.size_hint().0 >= N); + fn next<T>(mut iter: impl UncheckedIterator<Item = T>) -> impl FnMut(usize) -> T { + move |_| { + // SAFETY: We know that `from_fn` will call this at most N times, + // and we checked to ensure that we have at least that many items. + unsafe { iter.next_unchecked() } + } } + + try_from_fn(next(iter)) } -/// Pulls `N` items from `iter` and returns them as an array. If the iterator -/// yields fewer than `N` items, `Err` is returned containing an iterator over -/// the already yielded items. +/// Version of [`try_from_fn`] using a passed-in slice in order to avoid +/// needing to monomorphize for every array length. /// -/// Since the iterator is passed as a mutable reference and this function calls -/// `next` at most `N` times, the iterator can still be used afterwards to -/// retrieve the remaining items. +/// This takes a generator rather than an iterator so that *at the type level* +/// it never needs to worry about running out of items. When combined with +/// an infallible `Try` type, that means the loop canonicalizes easily, allowing +/// it to optimize well. /// -/// If `iter.next()` panicks, all items already yielded by the iterator are -/// dropped. +/// It would be *possible* to unify this and [`iter_next_chunk_erased`] into one +/// function that does the union of both things, but last time it was that way +/// it resulted in poor codegen from the "are there enough source items?" checks +/// not optimizing away. So if you give it a shot, make sure to watch what +/// happens in the codegen tests. #[inline] -fn try_collect_into_array<I, T, R, const N: usize>( - iter: &mut I, -) -> Result<R::TryType, IntoIter<T, N>> +fn try_from_fn_erased<T, R>( + buffer: &mut [MaybeUninit<T>], + mut generator: impl FnMut(usize) -> R, +) -> ControlFlow<R::Residual> where - I: Iterator, - I::Item: Try<Output = T, Residual = R>, - R: Residual<[T; N]>, + R: Try<Output = T>, { - if N == 0 { - // SAFETY: An empty array is always inhabited and has no validity invariants. - return Ok(Try::from_output(unsafe { mem::zeroed() })); - } + let mut guard = Guard { array_mut: buffer, initialized: 0 }; - let mut array = MaybeUninit::uninit_array::<N>(); - let mut guard = Guard { array_mut: &mut array, initialized: 0 }; - - for _ in 0..N { - match iter.next() { - Some(item_rslt) => { - let item = match item_rslt.branch() { - ControlFlow::Break(r) => { - return Ok(FromResidual::from_residual(r)); - } - ControlFlow::Continue(elem) => elem, - }; - - // SAFETY: `guard.initialized` starts at 0, which means push can be called - // at most N times, which this loop does. - unsafe { - guard.push_unchecked(item); - } - } - None => { - let alive = 0..guard.initialized; - mem::forget(guard); - // SAFETY: `array` was initialized with exactly `initialized` - // number of elements. - return Err(unsafe { IntoIter::new_unchecked(array, alive) }); - } - } + while guard.initialized < guard.array_mut.len() { + let item = generator(guard.initialized).branch()?; + + // SAFETY: The loop condition ensures we have space to push the item + unsafe { guard.push_unchecked(item) }; } mem::forget(guard); - // SAFETY: All elements of the array were populated in the loop above. - let output = unsafe { array.transpose().assume_init() }; - Ok(Try::from_output(output)) + ControlFlow::Continue(()) } /// Panic guard for incremental initialization of arrays. @@ -922,14 +866,14 @@ where /// /// To minimize indirection fields are still pub but callers should at least use /// `push_unchecked` to signal that something unsafe is going on. -pub(crate) struct Guard<'a, T, const N: usize> { +struct Guard<'a, T> { /// The array to be initialized. - pub array_mut: &'a mut [MaybeUninit<T>; N], + pub array_mut: &'a mut [MaybeUninit<T>], /// The number of items that have been initialized so far. pub initialized: usize, } -impl<T, const N: usize> Guard<'_, T, N> { +impl<T> Guard<'_, T> { /// Adds an item to the array and updates the initialized item counter. /// /// # Safety @@ -947,28 +891,73 @@ impl<T, const N: usize> Guard<'_, T, N> { } } -impl<T, const N: usize> Drop for Guard<'_, T, N> { +impl<T> Drop for Guard<'_, T> { fn drop(&mut self) { - debug_assert!(self.initialized <= N); + debug_assert!(self.initialized <= self.array_mut.len()); // SAFETY: this slice will contain only initialized objects. unsafe { crate::ptr::drop_in_place(MaybeUninit::slice_assume_init_mut( - &mut self.array_mut.get_unchecked_mut(..self.initialized), + self.array_mut.get_unchecked_mut(..self.initialized), )); } } } -/// Returns the next chunk of `N` items from the iterator or errors with an -/// iterator over the remainder. Used for `Iterator::next_chunk`. +/// Pulls `N` items from `iter` and returns them as an array. If the iterator +/// yields fewer than `N` items, `Err` is returned containing an iterator over +/// the already yielded items. +/// +/// Since the iterator is passed as a mutable reference and this function calls +/// `next` at most `N` times, the iterator can still be used afterwards to +/// retrieve the remaining items. +/// +/// If `iter.next()` panicks, all items already yielded by the iterator are +/// dropped. +/// +/// Used for [`Iterator::next_chunk`]. +#[inline] +pub(crate) fn iter_next_chunk<T, const N: usize>( + iter: &mut impl Iterator<Item = T>, +) -> Result<[T; N], IntoIter<T, N>> { + let mut array = MaybeUninit::uninit_array::<N>(); + let r = iter_next_chunk_erased(&mut array, iter); + match r { + Ok(()) => { + // SAFETY: All elements of `array` were populated. + Ok(unsafe { MaybeUninit::array_assume_init(array) }) + } + Err(initialized) => { + // SAFETY: Only the first `initialized` elements were populated + Err(unsafe { IntoIter::new_unchecked(array, 0..initialized) }) + } + } +} + +/// Version of [`iter_next_chunk`] using a passed-in slice in order to avoid +/// needing to monomorphize for every array length. +/// +/// Unfortunately this loop has two exit conditions, the buffer filling up +/// or the iterator running out of items, making it tend to optimize poorly. #[inline] -pub(crate) fn iter_next_chunk<I, const N: usize>( - iter: &mut I, -) -> Result<[I::Item; N], IntoIter<I::Item, N>> -where - I: Iterator, -{ - let mut map = iter.map(NeverShortCircuit); - try_collect_into_array(&mut map).map(|NeverShortCircuit(arr)| arr) +fn iter_next_chunk_erased<T>( + buffer: &mut [MaybeUninit<T>], + iter: &mut impl Iterator<Item = T>, +) -> Result<(), usize> { + let mut guard = Guard { array_mut: buffer, initialized: 0 }; + while guard.initialized < guard.array_mut.len() { + let Some(item) = iter.next() else { + // Unlike `try_from_fn_erased`, we want to keep the partial results, + // so we need to defuse the guard instead of using `?`. + let initialized = guard.initialized; + mem::forget(guard); + return Err(initialized) + }; + + // SAFETY: The loop condition ensures we have space to push the item + unsafe { guard.push_unchecked(item) }; + } + + mem::forget(guard); + Ok(()) } diff --git a/library/core/src/char/convert.rs b/library/core/src/char/convert.rs index f1a51a550f5..136bbcb8b21 100644 --- a/library/core/src/char/convert.rs +++ b/library/core/src/char/convert.rs @@ -2,6 +2,7 @@ use crate::char::TryFromCharError; use crate::convert::TryFrom; +use crate::error::Error; use crate::fmt; use crate::mem::transmute; use crate::str::FromStr; @@ -150,14 +151,16 @@ pub struct ParseCharError { kind: CharErrorKind, } -impl ParseCharError { - #[unstable( - feature = "char_error_internals", - reason = "this method should not be available publicly", - issue = "none" - )] - #[doc(hidden)] - pub fn __description(&self) -> &str { +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +enum CharErrorKind { + EmptyString, + TooManyChars, +} + +#[stable(feature = "char_from_str", since = "1.20.0")] +impl Error for ParseCharError { + #[allow(deprecated)] + fn description(&self) -> &str { match self.kind { CharErrorKind::EmptyString => "cannot parse char from empty string", CharErrorKind::TooManyChars => "too many characters in string", @@ -165,16 +168,11 @@ impl ParseCharError { } } -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -enum CharErrorKind { - EmptyString, - TooManyChars, -} - #[stable(feature = "char_from_str", since = "1.20.0")] impl fmt::Display for ParseCharError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.__description().fmt(f) + #[allow(deprecated)] + self.description().fmt(f) } } diff --git a/library/core/src/char/decode.rs b/library/core/src/char/decode.rs index eeb08803040..dbfe251f2bb 100644 --- a/library/core/src/char/decode.rs +++ b/library/core/src/char/decode.rs @@ -3,8 +3,6 @@ use crate::error::Error; use crate::fmt; -use super::from_u32_unchecked; - /// An iterator that decodes UTF-16 encoded code points from an iterator of `u16`s. /// /// This `struct` is created by the [`decode_utf16`] method on [`char`]. See its @@ -49,7 +47,7 @@ impl<I: Iterator<Item = u16>> Iterator for DecodeUtf16<I> { if !u.is_utf16_surrogate() { // SAFETY: not a surrogate - Some(Ok(unsafe { from_u32_unchecked(u as u32) })) + Some(Ok(unsafe { char::from_u32_unchecked(u as u32) })) } else if u >= 0xDC00 { // a trailing surrogate Some(Err(DecodeUtf16Error { code: u })) @@ -69,7 +67,7 @@ impl<I: Iterator<Item = u16>> Iterator for DecodeUtf16<I> { // all ok, so lets decode it. let c = (((u & 0x3ff) as u32) << 10 | (u2 & 0x3ff) as u32) + 0x1_0000; // SAFETY: we checked that it's a legal unicode value - Some(Ok(unsafe { from_u32_unchecked(c) })) + Some(Ok(unsafe { char::from_u32_unchecked(c) })) } } diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs index 3e7383b4cd1..9bc97ea0bff 100644 --- a/library/core/src/char/methods.rs +++ b/library/core/src/char/methods.rs @@ -53,15 +53,13 @@ impl char { /// Basic usage: /// /// ``` - /// use std::char::decode_utf16; - /// /// // 𝄞mus<invalid>ic<invalid> /// let v = [ /// 0xD834, 0xDD1E, 0x006d, 0x0075, 0x0073, 0xDD1E, 0x0069, 0x0063, 0xD834, /// ]; /// /// assert_eq!( - /// decode_utf16(v) + /// char::decode_utf16(v) /// .map(|r| r.map_err(|e| e.unpaired_surrogate())) /// .collect::<Vec<_>>(), /// vec![ @@ -77,16 +75,14 @@ impl char { /// A lossy decoder can be obtained by replacing `Err` results with the replacement character: /// /// ``` - /// use std::char::{decode_utf16, REPLACEMENT_CHARACTER}; - /// /// // 𝄞mus<invalid>ic<invalid> /// let v = [ /// 0xD834, 0xDD1E, 0x006d, 0x0075, 0x0073, 0xDD1E, 0x0069, 0x0063, 0xD834, /// ]; /// /// assert_eq!( - /// decode_utf16(v) - /// .map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)) + /// char::decode_utf16(v) + /// .map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER)) /// .collect::<String>(), /// "𝄞mus�ic�" /// ); @@ -123,8 +119,6 @@ impl char { /// Basic usage: /// /// ``` - /// use std::char; - /// /// let c = char::from_u32(0x2764); /// /// assert_eq!(Some('❤'), c); @@ -133,8 +127,6 @@ impl char { /// Returning `None` when the input is not a valid `char`: /// /// ``` - /// use std::char; - /// /// let c = char::from_u32(0x110000); /// /// assert_eq!(None, c); @@ -176,8 +168,6 @@ impl char { /// Basic usage: /// /// ``` - /// use std::char; - /// /// let c = unsafe { char::from_u32_unchecked(0x2764) }; /// /// assert_eq!('❤', c); @@ -210,8 +200,6 @@ impl char { /// Basic usage: /// /// ``` - /// use std::char; - /// /// let c = char::from_digit(4, 10); /// /// assert_eq!(Some('4'), c); @@ -225,8 +213,6 @@ impl char { /// Returning `None` when the input is not a digit: /// /// ``` - /// use std::char; - /// /// let c = char::from_digit(20, 10); /// /// assert_eq!(None, c); @@ -235,8 +221,6 @@ impl char { /// Passing a large radix, causing a panic: /// /// ```should_panic - /// use std::char; - /// /// // this panics /// let _c = char::from_digit(1, 37); /// ``` @@ -1786,7 +1770,7 @@ pub fn encode_utf16_raw(mut code: u32, dst: &mut [u16]) -> &mut [u16] { } else { panic!( "encode_utf16: need {} units to encode U+{:X}, but the buffer has {}", - from_u32_unchecked(code).len_utf16(), + char::from_u32_unchecked(code).len_utf16(), code, dst.len(), ) diff --git a/library/core/src/char/mod.rs b/library/core/src/char/mod.rs index af98059cf42..8ec78e88733 100644 --- a/library/core/src/char/mod.rs +++ b/library/core/src/char/mod.rs @@ -189,7 +189,7 @@ impl Iterator for EscapeUnicode { } EscapeUnicodeState::Value => { let hex_digit = ((self.c as u32) >> (self.hex_digit_idx * 4)) & 0xf; - let c = from_digit(hex_digit, 16).unwrap(); + let c = char::from_digit(hex_digit, 16).unwrap(); if self.hex_digit_idx == 0 { self.state = EscapeUnicodeState::RightBrace; } else { diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs index a7d6fec7d3d..b4d58376aea 100644 --- a/library/core/src/cmp.rs +++ b/library/core/src/cmp.rs @@ -22,7 +22,6 @@ #![stable(feature = "rust1", since = "1.0.0")] -use crate::const_closure::ConstFnMutClosure; use crate::marker::Destruct; use self::Ordering::*; @@ -798,16 +797,7 @@ pub trait Ord: Eq + PartialOrd<Self> { Self: Sized, Self: ~const Destruct, { - #[cfg(not(bootstrap))] - { - max_by(self, other, Ord::cmp) - } - - #[cfg(bootstrap)] - match self.cmp(&other) { - Ordering::Less | Ordering::Equal => other, - Ordering::Greater => self, - } + max_by(self, other, Ord::cmp) } /// Compares and returns the minimum of two values. @@ -828,16 +818,7 @@ pub trait Ord: Eq + PartialOrd<Self> { Self: Sized, Self: ~const Destruct, { - #[cfg(not(bootstrap))] - { - min_by(self, other, Ord::cmp) - } - - #[cfg(bootstrap)] - match self.cmp(&other) { - Ordering::Less | Ordering::Equal => self, - Ordering::Greater => other, - } + min_by(self, other, Ord::cmp) } /// Restrict a value to a certain interval. @@ -1234,23 +1215,7 @@ where F: ~const Destruct, K: ~const Destruct, { - cfg_if! { - if #[cfg(bootstrap)] { - const fn imp<T, F: ~const FnMut(&T) -> K, K: ~const Ord>( - f: &mut F, - (v1, v2): (&T, &T), - ) -> Ordering - where - T: ~const Destruct, - K: ~const Destruct, - { - f(v1).cmp(&f(v2)) - } - min_by(v1, v2, ConstFnMutClosure::new(&mut f, imp)) - } else { - min_by(v1, v2, const |v1, v2| f(v1).cmp(&f(v2))) - } - } + min_by(v1, v2, const |v1, v2| f(v1).cmp(&f(v2))) } /// Compares and returns the maximum of two values. @@ -1325,17 +1290,7 @@ where F: ~const Destruct, K: ~const Destruct, { - const fn imp<T, F: ~const FnMut(&T) -> K, K: ~const Ord>( - f: &mut F, - (v1, v2): (&T, &T), - ) -> Ordering - where - T: ~const Destruct, - K: ~const Destruct, - { - f(v1).cmp(&f(v2)) - } - max_by(v1, v2, ConstFnMutClosure::new(&mut f, imp)) + max_by(v1, v2, const |v1, v2| f(v1).cmp(&f(v2))) } // Implementation of PartialEq, Eq, PartialOrd and Ord for primitive types @@ -1536,9 +1491,10 @@ mod impls { } } #[stable(feature = "rust1", since = "1.0.0")] - impl<A: ?Sized, B: ?Sized> PartialOrd<&B> for &A + #[rustc_const_unstable(feature = "const_cmp", issue = "92391")] + impl<A: ?Sized, B: ?Sized> const PartialOrd<&B> for &A where - A: PartialOrd<B>, + A: ~const PartialOrd<B>, { #[inline] fn partial_cmp(&self, other: &&B) -> Option<Ordering> { diff --git a/library/core/src/const_closure.rs b/library/core/src/const_closure.rs deleted file mode 100644 index 97900a4862f..00000000000 --- a/library/core/src/const_closure.rs +++ /dev/null @@ -1,78 +0,0 @@ -use crate::marker::Destruct; -use crate::marker::Tuple; - -/// Struct representing a closure with mutably borrowed data. -/// -/// Example: -/// ```no_build -/// #![feature(const_mut_refs)] -/// use crate::const_closure::ConstFnMutClosure; -/// const fn imp(state: &mut i32, (arg,): (i32,)) -> i32 { -/// *state += arg; -/// *state -/// } -/// let mut i = 5; -/// let mut cl = ConstFnMutClosure::new(&mut i, imp); -/// -/// assert!(7 == cl(2)); -/// assert!(8 == cl(1)); -/// ``` -pub(crate) struct ConstFnMutClosure<CapturedData, Function> { - /// The Data captured by the Closure. - /// Must be either a (mutable) reference or a tuple of (mutable) references. - pub data: CapturedData, - /// The Function of the Closure, must be: Fn(CapturedData, ClosureArgs) -> ClosureReturn - pub func: Function, -} -impl<'a, CapturedData: ?Sized, Function> ConstFnMutClosure<&'a mut CapturedData, Function> { - /// Function for creating a new closure. - /// - /// `data` is the a mutable borrow of data that is captured from the environment. - /// If you want Data to be a tuple of mutable Borrows, the struct must be constructed manually. - /// - /// `func` is the function of the closure, it gets the data and a tuple of the arguments closure - /// and return the return value of the closure. - pub(crate) const fn new<ClosureArguments, ClosureReturnValue>( - data: &'a mut CapturedData, - func: Function, - ) -> Self - where - Function: ~const Fn(&mut CapturedData, ClosureArguments) -> ClosureReturnValue, - { - Self { data, func } - } -} - -macro_rules! impl_fn_mut_tuple { - ($($var:ident)*) => { - #[allow(unused_parens)] - impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const - FnOnce<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function> - where - Function: ~const Fn(($(&mut $var),*), ClosureArguments) -> ClosureReturnValue+ ~const Destruct, - { - type Output = ClosureReturnValue; - - extern "rust-call" fn call_once(mut self, args: ClosureArguments) -> Self::Output { - self.call_mut(args) - } - } - #[allow(unused_parens)] - impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const - FnMut<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function> - where - Function: ~const Fn(($(&mut $var),*), ClosureArguments)-> ClosureReturnValue, - { - extern "rust-call" fn call_mut(&mut self, args: ClosureArguments) -> Self::Output { - #[allow(non_snake_case)] - let ($($var),*) = &mut self.data; - (self.func)(($($var),*), args) - } - } - }; -} -impl_fn_mut_tuple!(A); -impl_fn_mut_tuple!(A B); -impl_fn_mut_tuple!(A B C); -impl_fn_mut_tuple!(A B C D); -impl_fn_mut_tuple!(A B C D E); diff --git a/library/core/src/convert/num.rs b/library/core/src/convert/num.rs index 45e2f711c6c..4da7c323492 100644 --- a/library/core/src/convert/num.rs +++ b/library/core/src/convert/num.rs @@ -169,7 +169,7 @@ impl_from! { u32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0" impl_from! { f32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] } // bool -> Float -#[stable(feature = "float_from_bool", since = "CURRENT_RUSTC_VERSION")] +#[stable(feature = "float_from_bool", since = "1.68.0")] #[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")] impl const From<bool> for f32 { /// Converts `bool` to `f32` losslessly. @@ -178,7 +178,7 @@ impl const From<bool> for f32 { small as u8 as Self } } -#[stable(feature = "float_from_bool", since = "CURRENT_RUSTC_VERSION")] +#[stable(feature = "float_from_bool", since = "1.68.0")] #[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")] impl const From<bool> for f64 { /// Converts `bool` to `f64` losslessly. diff --git a/library/core/src/error.rs b/library/core/src/error.rs index 7152300abcb..571bc4bcfd1 100644 --- a/library/core/src/error.rs +++ b/library/core/src/error.rs @@ -28,6 +28,7 @@ use crate::fmt::{Debug, Display}; #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "Error")] #[rustc_has_incoherent_inherent_impls] +#[cfg_attr(not(bootstrap), allow(multiple_supertrait_upcastable))] pub trait Error: Debug + Display { /// The lower-level source of this error, if any. /// @@ -485,26 +486,10 @@ impl Error for crate::char::CharTryFromError { } } -#[stable(feature = "char_from_str", since = "1.20.0")] -impl Error for crate::char::ParseCharError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - #[stable(feature = "duration_checked_float", since = "1.66.0")] impl Error for crate::time::TryFromFloatSecsError {} -#[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")] -impl Error for crate::ffi::FromBytesWithNulError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - -#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] +#[stable(feature = "cstr_from_bytes_until_nul", since = "CURRENT_RUSTC_VERSION")] impl Error for crate::ffi::FromBytesUntilNulError {} #[unstable(feature = "get_many_mut", issue = "104642")] diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs index 15dd9ea7e80..87f077325f8 100644 --- a/library/core/src/ffi/c_str.rs +++ b/library/core/src/ffi/c_str.rs @@ -1,4 +1,5 @@ use crate::cmp::Ordering; +use crate::error::Error; use crate::ffi::c_char; use crate::fmt; use crate::intrinsics; @@ -129,10 +130,12 @@ impl FromBytesWithNulError { const fn not_nul_terminated() -> FromBytesWithNulError { FromBytesWithNulError { kind: FromBytesWithNulErrorKind::NotNulTerminated } } +} - #[doc(hidden)] - #[unstable(feature = "cstr_internals", issue = "none")] - pub fn __description(&self) -> &str { +#[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")] +impl Error for FromBytesWithNulError { + #[allow(deprecated)] + fn description(&self) -> &str { match self.kind { FromBytesWithNulErrorKind::InteriorNul(..) => { "data provided contains an interior nul byte" @@ -150,10 +153,10 @@ impl FromBytesWithNulError { /// This error is created by the [`CStr::from_bytes_until_nul`] method. /// #[derive(Clone, PartialEq, Eq, Debug)] -#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] +#[stable(feature = "cstr_from_bytes_until_nul", since = "CURRENT_RUSTC_VERSION")] pub struct FromBytesUntilNulError(()); -#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] +#[stable(feature = "cstr_from_bytes_until_nul", since = "CURRENT_RUSTC_VERSION")] impl fmt::Display for FromBytesUntilNulError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "data provided does not contain a nul") @@ -180,7 +183,7 @@ impl Default for &CStr { impl fmt::Display for FromBytesWithNulError { #[allow(deprecated, deprecated_in_future)] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.__description())?; + f.write_str(self.description())?; if let FromBytesWithNulErrorKind::InteriorNul(pos) = self.kind { write!(f, " at byte pos {pos}")?; } @@ -306,8 +309,6 @@ impl CStr { /// /// # Examples /// ``` - /// #![feature(cstr_from_bytes_until_nul)] - /// /// use std::ffi::CStr; /// /// let mut buffer = [0u8; 16]; @@ -322,8 +323,9 @@ impl CStr { /// assert_eq!(c_str.to_str().unwrap(), "AAAAAAAA"); /// ``` /// - #[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] - #[rustc_const_unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] + #[rustc_allow_const_fn_unstable(const_slice_index)] + #[stable(feature = "cstr_from_bytes_until_nul", since = "CURRENT_RUSTC_VERSION")] + #[rustc_const_stable(feature = "cstr_from_bytes_until_nul", since = "CURRENT_RUSTC_VERSION")] pub const fn from_bytes_until_nul(bytes: &[u8]) -> Result<&CStr, FromBytesUntilNulError> { let nul_pos = memchr::memchr(0, bytes); match nul_pos { @@ -455,6 +457,10 @@ impl CStr { /// to a contiguous region of memory terminated with a 0 byte to represent /// the end of the string. /// + /// The type of the returned pointer is + /// [`*const c_char`][crate::ffi::c_char], and whether it's + /// an alias for `*const i8` or `*const u8` is platform-specific. + /// /// **WARNING** /// /// The returned pointer is read-only; writing to it (including passing it @@ -468,6 +474,7 @@ impl CStr { /// # #![allow(unused_must_use)] #![allow(temporary_cstring_as_ptr)] /// use std::ffi::CString; /// + /// // Do not do this: /// let ptr = CString::new("Hello").expect("CString::new failed").as_ptr(); /// unsafe { /// // `ptr` is dangling diff --git a/library/core/src/future/mod.rs b/library/core/src/future/mod.rs index c4fb3620946..46cbcd43530 100644 --- a/library/core/src/future/mod.rs +++ b/library/core/src/future/mod.rs @@ -56,51 +56,6 @@ unsafe impl Send for ResumeTy {} #[unstable(feature = "gen_future", issue = "50547")] unsafe impl Sync for ResumeTy {} -/// Wrap a generator in a future. -/// -/// This function returns a `GenFuture` underneath, but hides it in `impl Trait` to give -/// better error messages (`impl Future` rather than `GenFuture<[closure.....]>`). -// This is `const` to avoid extra errors after we recover from `const async fn` -#[doc(hidden)] -#[unstable(feature = "gen_future", issue = "50547")] -#[rustc_const_unstable(feature = "gen_future", issue = "50547")] -#[inline] -pub const fn from_generator<T>(gen: T) -> impl Future<Output = T::Return> -where - T: crate::ops::Generator<ResumeTy, Yield = ()>, -{ - use crate::{ - ops::{Generator, GeneratorState}, - pin::Pin, - task::Poll, - }; - - #[rustc_diagnostic_item = "gen_future"] - struct GenFuture<T: Generator<ResumeTy, Yield = ()>>(T); - - // We rely on the fact that async/await futures are immovable in order to create - // self-referential borrows in the underlying generator. - impl<T: Generator<ResumeTy, Yield = ()>> !Unpin for GenFuture<T> {} - - impl<T: Generator<ResumeTy, Yield = ()>> Future for GenFuture<T> { - type Output = T::Return; - #[track_caller] - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { - // SAFETY: Safe because we're !Unpin + !Drop, and this is just a field projection. - let gen = unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) }; - - // Resume the generator, turning the `&mut Context` into a `NonNull` raw pointer. The - // `.await` lowering will safely cast that back to a `&mut Context`. - match gen.resume(ResumeTy(NonNull::from(cx).cast::<Context<'static>>())) { - GeneratorState::Yielded(()) => Poll::Pending, - GeneratorState::Complete(x) => Poll::Ready(x), - } - } - } - - GenFuture(gen) -} - #[lang = "get_context"] #[doc(hidden)] #[unstable(feature = "gen_future", issue = "50547")] diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs index 5a76e866923..ee13dae60b1 100644 --- a/library/core/src/hint.rs +++ b/library/core/src/hint.rs @@ -216,7 +216,8 @@ pub fn spin_loop() { /// /// Note however, that `black_box` is only (and can only be) provided on a "best-effort" basis. The /// extent to which it can block optimisations may vary depending upon the platform and code-gen -/// backend used. Programs cannot rely on `black_box` for *correctness* in any way. +/// backend used. Programs cannot rely on `black_box` for *correctness*, beyond it behaving as the +/// identity function. /// /// [`std::convert::identity`]: crate::convert::identity /// diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index a315a28fb0d..b1ed3b31e43 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -58,7 +58,6 @@ use crate::marker::DiscriminantKind; use crate::marker::Tuple; use crate::mem; -#[cfg(not(bootstrap))] pub mod mir; // These imports are used for simplifying intra-doc links @@ -963,7 +962,6 @@ extern "rust-intrinsic" { /// This intrinsic does not have a stable counterpart. #[rustc_const_unstable(feature = "const_assert_type2", issue = "none")] #[rustc_safe_intrinsic] - #[cfg(not(bootstrap))] pub fn assert_mem_uninitialized_valid<T>(); /// Gets a reference to a static `Location` indicating where it was called. diff --git a/library/core/src/intrinsics/mir.rs b/library/core/src/intrinsics/mir.rs index 3d7ccffa173..995d66dca56 100644 --- a/library/core/src/intrinsics/mir.rs +++ b/library/core/src/intrinsics/mir.rs @@ -60,8 +60,7 @@ //! //! # Examples //! -#![cfg_attr(bootstrap, doc = "```rust,compile_fail")] -#![cfg_attr(not(bootstrap), doc = "```rust")] +//! ```rust //! #![feature(core_intrinsics, custom_mir)] //! //! extern crate core; @@ -300,8 +299,7 @@ define!( /// /// # Examples /// - #[cfg_attr(bootstrap, doc = "```rust,compile_fail")] - #[cfg_attr(not(bootstrap), doc = "```rust")] + /// ```rust /// #![feature(custom_mir, core_intrinsics)] /// /// extern crate core; diff --git a/library/core/src/iter/adapters/array_chunks.rs b/library/core/src/iter/adapters/array_chunks.rs index 5e4211058aa..13719c727e9 100644 --- a/library/core/src/iter/adapters/array_chunks.rs +++ b/library/core/src/iter/adapters/array_chunks.rs @@ -1,7 +1,5 @@ use crate::array; -use crate::const_closure::ConstFnMutClosure; use crate::iter::{ByRefSized, FusedIterator, Iterator, TrustedRandomAccessNoCoerce}; -use crate::mem::{self, MaybeUninit}; use crate::ops::{ControlFlow, NeverShortCircuit, Try}; /// An iterator over `N` elements of the iterator at a time. @@ -189,13 +187,12 @@ where I: Iterator, { #[inline] - default fn fold<B, F>(mut self, init: B, mut f: F) -> B + default fn fold<B, F>(mut self, init: B, f: F) -> B where Self: Sized, F: FnMut(B, Self::Item) -> B, { - let fold = ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp); - self.try_fold(init, fold).0 + self.try_fold(init, NeverShortCircuit::wrap_mut_2(f)).0 } } @@ -214,19 +211,14 @@ where let mut i = 0; // Use a while loop because (0..len).step_by(N) doesn't optimize well. while inner_len - i >= N { - let mut chunk = MaybeUninit::uninit_array(); - let mut guard = array::Guard { array_mut: &mut chunk, initialized: 0 }; - while guard.initialized < N { + let chunk = crate::array::from_fn(|local| { // SAFETY: The method consumes the iterator and the loop condition ensures that // all accesses are in bounds and only happen once. unsafe { - let idx = i + guard.initialized; - guard.push_unchecked(self.iter.__iterator_get_unchecked(idx)); + let idx = i + local; + self.iter.__iterator_get_unchecked(idx) } - } - mem::forget(guard); - // SAFETY: The loop above initialized all elements - let chunk = unsafe { MaybeUninit::array_assume_init(chunk) }; + }); accum = f(accum, chunk); i += N; } diff --git a/library/core/src/iter/adapters/by_ref_sized.rs b/library/core/src/iter/adapters/by_ref_sized.rs index 1945e402ff5..477e7117c3e 100644 --- a/library/core/src/iter/adapters/by_ref_sized.rs +++ b/library/core/src/iter/adapters/by_ref_sized.rs @@ -1,7 +1,4 @@ -use crate::{ - const_closure::ConstFnMutClosure, - ops::{NeverShortCircuit, Try}, -}; +use crate::ops::{NeverShortCircuit, Try}; /// Like `Iterator::by_ref`, but requiring `Sized` so it can forward generics. /// @@ -39,13 +36,12 @@ impl<I: Iterator> Iterator for ByRefSized<'_, I> { } #[inline] - fn fold<B, F>(self, init: B, mut f: F) -> B + fn fold<B, F>(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B, { // `fold` needs ownership, so this can't forward directly. - I::try_fold(self.0, init, ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp)) - .0 + I::try_fold(self.0, init, NeverShortCircuit::wrap_mut_2(f)).0 } #[inline] @@ -76,17 +72,12 @@ impl<I: DoubleEndedIterator> DoubleEndedIterator for ByRefSized<'_, I> { } #[inline] - fn rfold<B, F>(self, init: B, mut f: F) -> B + fn rfold<B, F>(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B, { // `rfold` needs ownership, so this can't forward directly. - I::try_rfold( - self.0, - init, - ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp), - ) - .0 + I::try_rfold(self.0, init, NeverShortCircuit::wrap_mut_2(f)).0 } #[inline] diff --git a/library/core/src/iter/adapters/cloned.rs b/library/core/src/iter/adapters/cloned.rs index aba24a79dcf..914ff86c1a9 100644 --- a/library/core/src/iter/adapters/cloned.rs +++ b/library/core/src/iter/adapters/cloned.rs @@ -1,7 +1,7 @@ use crate::iter::adapters::{ zip::try_get_unchecked, TrustedRandomAccess, TrustedRandomAccessNoCoerce, }; -use crate::iter::{FusedIterator, TrustedLen}; +use crate::iter::{FusedIterator, TrustedLen, UncheckedIterator}; use crate::ops::Try; /// An iterator that clones the elements of an underlying iterator. @@ -140,3 +140,16 @@ where T: Clone, { } + +impl<'a, I, T: 'a> UncheckedIterator for Cloned<I> +where + I: UncheckedIterator<Item = &'a T>, + T: Clone, +{ + unsafe fn next_unchecked(&mut self) -> T { + // SAFETY: `Cloned` is 1:1 with the inner iterator, so if the caller promised + // that there's an element left, the inner iterator has one too. + let item = unsafe { self.it.next_unchecked() }; + item.clone() + } +} diff --git a/library/core/src/iter/adapters/filter_map.rs b/library/core/src/iter/adapters/filter_map.rs index e0d665c9e12..6bdf53f7fc9 100644 --- a/library/core/src/iter/adapters/filter_map.rs +++ b/library/core/src/iter/adapters/filter_map.rs @@ -99,7 +99,7 @@ where ) -> impl FnMut((), T) -> ControlFlow<B> + '_ { move |(), x| match f(x) { Some(x) => ControlFlow::Break(x), - None => ControlFlow::CONTINUE, + None => ControlFlow::Continue(()), } } diff --git a/library/core/src/iter/adapters/flatten.rs b/library/core/src/iter/adapters/flatten.rs index 307016c2690..b040a0ea901 100644 --- a/library/core/src/iter/adapters/flatten.rs +++ b/library/core/src/iter/adapters/flatten.rs @@ -539,7 +539,7 @@ where #[rustc_inherit_overflow_checks] fn advance<U: Iterator>(n: usize, iter: &mut U) -> ControlFlow<(), usize> { match iter.advance_by(n) { - Ok(()) => ControlFlow::BREAK, + Ok(()) => ControlFlow::Break(()), Err(advanced) => ControlFlow::Continue(n - advanced), } } @@ -629,7 +629,7 @@ where #[rustc_inherit_overflow_checks] fn advance<U: DoubleEndedIterator>(n: usize, iter: &mut U) -> ControlFlow<(), usize> { match iter.advance_back_by(n) { - Ok(()) => ControlFlow::BREAK, + Ok(()) => ControlFlow::Break(()), Err(advanced) => ControlFlow::Continue(n - advanced), } } diff --git a/library/core/src/iter/adapters/map.rs b/library/core/src/iter/adapters/map.rs index 9e25dbe462c..31d02a4da6e 100644 --- a/library/core/src/iter/adapters/map.rs +++ b/library/core/src/iter/adapters/map.rs @@ -2,7 +2,7 @@ use crate::fmt; use crate::iter::adapters::{ zip::try_get_unchecked, SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce, }; -use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen}; +use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen, UncheckedIterator}; use crate::ops::Try; /// An iterator that maps the values of `iter` with `f`. @@ -187,6 +187,19 @@ where { } +impl<B, I, F> UncheckedIterator for Map<I, F> +where + I: UncheckedIterator, + F: FnMut(I::Item) -> B, +{ + unsafe fn next_unchecked(&mut self) -> B { + // SAFETY: `Map` is 1:1 with the inner iterator, so if the caller promised + // that there's an element left, the inner iterator has one too. + let item = unsafe { self.iter.next_unchecked() }; + (self.f)(item) + } +} + #[doc(hidden)] #[unstable(feature = "trusted_random_access", issue = "none")] unsafe impl<I, F> TrustedRandomAccess for Map<I, F> where I: TrustedRandomAccess {} diff --git a/library/core/src/iter/adapters/zip.rs b/library/core/src/iter/adapters/zip.rs index 8153c8cfef1..b6b0c90cb7d 100644 --- a/library/core/src/iter/adapters/zip.rs +++ b/library/core/src/iter/adapters/zip.rs @@ -1,7 +1,7 @@ use crate::cmp; use crate::fmt::{self, Debug}; use crate::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator}; -use crate::iter::{InPlaceIterable, SourceIter, TrustedLen}; +use crate::iter::{InPlaceIterable, SourceIter, TrustedLen, UncheckedIterator}; /// An iterator that iterates two other iterators simultaneously. /// @@ -417,6 +417,13 @@ where { } +impl<A, B> UncheckedIterator for Zip<A, B> +where + A: UncheckedIterator, + B: UncheckedIterator, +{ +} + // Arbitrarily selects the left side of the zip iteration as extractable "source" // it would require negative trait bounds to be able to try both #[unstable(issue = "none", feature = "inplace_iteration")] diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs index bb35d50b4bf..156b925de77 100644 --- a/library/core/src/iter/mod.rs +++ b/library/core/src/iter/mod.rs @@ -362,15 +362,13 @@ macro_rules! impl_fold_via_try_fold { }; (@internal $fold:ident -> $try_fold:ident) => { #[inline] - fn $fold<AAA, FFF>(mut self, init: AAA, mut fold: FFF) -> AAA + fn $fold<AAA, FFF>(mut self, init: AAA, fold: FFF) -> AAA where FFF: FnMut(AAA, Self::Item) -> AAA, { - use crate::const_closure::ConstFnMutClosure; use crate::ops::NeverShortCircuit; - let fold = ConstFnMutClosure::new(&mut fold, NeverShortCircuit::wrap_mut_2_imp); - self.$try_fold(init, fold).0 + self.$try_fold(init, NeverShortCircuit::wrap_mut_2(fold)).0 } }; } @@ -452,6 +450,7 @@ pub use self::adapters::{ pub use self::adapters::{Intersperse, IntersperseWith}; pub(crate) use self::adapters::try_process; +pub(crate) use self::traits::UncheckedIterator; mod adapters; mod range; diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs index b5739f2f3c0..78e27d73065 100644 --- a/library/core/src/iter/range.rs +++ b/library/core/src/iter/range.rs @@ -1,4 +1,3 @@ -use crate::char; use crate::convert::TryFrom; use crate::mem; use crate::ops::{self, Try}; diff --git a/library/core/src/iter/sources/once_with.rs b/library/core/src/iter/sources/once_with.rs index 080ae27a30f..9309a06c8cf 100644 --- a/library/core/src/iter/sources/once_with.rs +++ b/library/core/src/iter/sources/once_with.rs @@ -73,7 +73,7 @@ pub struct OnceWith<F> { gen: Option<F>, } -#[stable(feature = "iter_once_with_debug", since = "CURRENT_RUSTC_VERSION")] +#[stable(feature = "iter_once_with_debug", since = "1.68.0")] impl<F> fmt::Debug for OnceWith<F> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.gen.is_some() { diff --git a/library/core/src/iter/sources/repeat_with.rs b/library/core/src/iter/sources/repeat_with.rs index 20420a3ad8e..3f34105a3e0 100644 --- a/library/core/src/iter/sources/repeat_with.rs +++ b/library/core/src/iter/sources/repeat_with.rs @@ -78,7 +78,7 @@ pub struct RepeatWith<F> { repeater: F, } -#[stable(feature = "iterator_repeat_with_debug", since = "CURRENT_RUSTC_VERSION")] +#[stable(feature = "iterator_repeat_with_debug", since = "1.68.0")] impl<F> fmt::Debug for RepeatWith<F> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RepeatWith").finish_non_exhaustive() diff --git a/library/core/src/iter/traits/double_ended.rs b/library/core/src/iter/traits/double_ended.rs index bdf94c792c2..ed23873cdde 100644 --- a/library/core/src/iter/traits/double_ended.rs +++ b/library/core/src/iter/traits/double_ended.rs @@ -352,7 +352,7 @@ pub trait DoubleEndedIterator: Iterator { #[inline] fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> { move |(), x| { - if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::CONTINUE } + if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) } } } diff --git a/library/core/src/iter/traits/exact_size.rs b/library/core/src/iter/traits/exact_size.rs index 1757e37ec0e..908830d8a95 100644 --- a/library/core/src/iter/traits/exact_size.rs +++ b/library/core/src/iter/traits/exact_size.rs @@ -21,6 +21,16 @@ /// /// [`len`]: ExactSizeIterator::len /// +/// # When *shouldn't* an adapter be `ExactSizeIterator`? +/// +/// If an adapter makes an iterator *longer*, then it's usually incorrect for +/// that adapter to implement `ExactSizeIterator`. The inner exact-sized +/// iterator might already be `usize::MAX`-long, and thus the length of the +/// longer adapted iterator would no longer be exactly representable in `usize`. +/// +/// This is why [`Chain<A, B>`](crate::iter::Chain) isn't `ExactSizeIterator`, +/// even when `A` and `B` are both `ExactSizeIterator`. +/// /// # Examples /// /// Basic usage: diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs index a4a665d48d5..b3a630d9559 100644 --- a/library/core/src/iter/traits/iterator.rs +++ b/library/core/src/iter/traits/iterator.rs @@ -69,6 +69,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {} #[doc(notable_trait)] #[rustc_diagnostic_item = "Iterator"] #[must_use = "iterators are lazy and do nothing unless consumed"] +#[cfg_attr(not(bootstrap), const_trait)] pub trait Iterator { /// The type of the elements being iterated over. #[rustc_diagnostic_item = "IteratorItem"] @@ -141,6 +142,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iter_next_chunk", reason = "recently added", issue = "98326")] + #[rustc_do_not_const_check] fn next_chunk<const N: usize>( &mut self, ) -> Result<[Self::Item; N], array::IntoIter<Self::Item, N>> @@ -218,6 +220,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn size_hint(&self) -> (usize, Option<usize>) { (0, None) } @@ -255,6 +258,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn count(self) -> usize where Self: Sized, @@ -285,6 +289,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn last(self) -> Option<Self::Item> where Self: Sized, @@ -331,6 +336,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iter_advance_by", reason = "recently added", issue = "77404")] + #[rustc_do_not_const_check] fn advance_by(&mut self, n: usize) -> Result<(), usize> { for i in 0..n { self.next().ok_or(i)?; @@ -379,6 +385,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn nth(&mut self, n: usize) -> Option<Self::Item> { self.advance_by(n).ok()?; self.next() @@ -431,6 +438,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_step_by", since = "1.28.0")] + #[rustc_do_not_const_check] fn step_by(self, step: usize) -> StepBy<Self> where Self: Sized, @@ -502,6 +510,7 @@ pub trait Iterator { /// [`OsStr`]: ../../std/ffi/struct.OsStr.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn chain<U>(self, other: U) -> Chain<Self, U::IntoIter> where Self: Sized, @@ -620,6 +629,7 @@ pub trait Iterator { /// [`zip`]: crate::iter::zip #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn zip<U>(self, other: U) -> Zip<Self, U::IntoIter> where Self: Sized, @@ -662,6 +672,7 @@ pub trait Iterator { /// [`intersperse_with`]: Iterator::intersperse_with #[inline] #[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")] + #[rustc_do_not_const_check] fn intersperse(self, separator: Self::Item) -> Intersperse<Self> where Self: Sized, @@ -720,6 +731,7 @@ pub trait Iterator { /// [`intersperse`]: Iterator::intersperse #[inline] #[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")] + #[rustc_do_not_const_check] fn intersperse_with<G>(self, separator: G) -> IntersperseWith<Self, G> where Self: Sized, @@ -779,6 +791,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn map<B, F>(self, f: F) -> Map<Self, F> where Self: Sized, @@ -824,6 +837,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_for_each", since = "1.21.0")] + #[rustc_do_not_const_check] fn for_each<F>(self, f: F) where Self: Sized, @@ -899,6 +913,7 @@ pub trait Iterator { /// Note that `iter.filter(f).next()` is equivalent to `iter.find(f)`. #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn filter<P>(self, predicate: P) -> Filter<Self, P> where Self: Sized, @@ -944,6 +959,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn filter_map<B, F>(self, f: F) -> FilterMap<Self, F> where Self: Sized, @@ -990,6 +1006,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn enumerate(self) -> Enumerate<Self> where Self: Sized, @@ -1061,6 +1078,7 @@ pub trait Iterator { /// [`next`]: Iterator::next #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn peekable(self) -> Peekable<Self> where Self: Sized, @@ -1126,6 +1144,7 @@ pub trait Iterator { #[inline] #[doc(alias = "drop_while")] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn skip_while<P>(self, predicate: P) -> SkipWhile<Self, P> where Self: Sized, @@ -1207,6 +1226,7 @@ pub trait Iterator { /// the iteration should stop, but wasn't placed back into the iterator. #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn take_while<P>(self, predicate: P) -> TakeWhile<Self, P> where Self: Sized, @@ -1295,6 +1315,7 @@ pub trait Iterator { /// [`fuse`]: Iterator::fuse #[inline] #[stable(feature = "iter_map_while", since = "1.57.0")] + #[rustc_do_not_const_check] fn map_while<B, P>(self, predicate: P) -> MapWhile<Self, P> where Self: Sized, @@ -1326,6 +1347,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn skip(self, n: usize) -> Skip<Self> where Self: Sized, @@ -1379,6 +1401,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn take(self, n: usize) -> Take<Self> where Self: Sized, @@ -1428,6 +1451,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn scan<St, B, F>(self, initial_state: St, f: F) -> Scan<Self, St, F> where Self: Sized, @@ -1468,6 +1492,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn flat_map<U, F>(self, f: F) -> FlatMap<Self, U, F> where Self: Sized, @@ -1552,6 +1577,7 @@ pub trait Iterator { /// [`flat_map()`]: Iterator::flat_map #[inline] #[stable(feature = "iterator_flatten", since = "1.29.0")] + #[rustc_do_not_const_check] fn flatten(self) -> Flatten<Self> where Self: Sized, @@ -1620,6 +1646,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn fuse(self) -> Fuse<Self> where Self: Sized, @@ -1704,6 +1731,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn inspect<F>(self, f: F) -> Inspect<Self, F> where Self: Sized, @@ -1734,6 +1762,7 @@ pub trait Iterator { /// assert_eq!(of_rust, vec!["of", "Rust"]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn by_ref(&mut self) -> &mut Self where Self: Sized, @@ -1853,6 +1882,7 @@ pub trait Iterator { #[stable(feature = "rust1", since = "1.0.0")] #[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"] #[cfg_attr(not(test), rustc_diagnostic_item = "iterator_collect_fn")] + #[rustc_do_not_const_check] fn collect<B: FromIterator<Self::Item>>(self) -> B where Self: Sized, @@ -1931,6 +1961,7 @@ pub trait Iterator { /// [`collect`]: Iterator::collect #[inline] #[unstable(feature = "iterator_try_collect", issue = "94047")] + #[rustc_do_not_const_check] fn try_collect<B>(&mut self) -> ChangeOutputType<Self::Item, B> where Self: Sized, @@ -2004,6 +2035,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iter_collect_into", reason = "new API", issue = "94780")] + #[rustc_do_not_const_check] fn collect_into<E: Extend<Self::Item>>(self, collection: &mut E) -> &mut E where Self: Sized, @@ -2038,6 +2070,7 @@ pub trait Iterator { /// assert_eq!(odd, vec![1, 3]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn partition<B, F>(self, f: F) -> (B, B) where Self: Sized, @@ -2100,6 +2133,7 @@ pub trait Iterator { /// assert!(a[i..].iter().all(|&n| n % 2 == 1)); // odds /// ``` #[unstable(feature = "iter_partition_in_place", reason = "new API", issue = "62543")] + #[rustc_do_not_const_check] fn partition_in_place<'a, T: 'a, P>(mut self, ref mut predicate: P) -> usize where Self: Sized + DoubleEndedIterator<Item = &'a mut T>, @@ -2157,6 +2191,7 @@ pub trait Iterator { /// assert!(!"IntoIterator".chars().is_partitioned(char::is_uppercase)); /// ``` #[unstable(feature = "iter_is_partitioned", reason = "new API", issue = "62544")] + #[rustc_do_not_const_check] fn is_partitioned<P>(mut self, mut predicate: P) -> bool where Self: Sized, @@ -2251,6 +2286,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_try_fold", since = "1.27.0")] + #[rustc_do_not_const_check] fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R where Self: Sized, @@ -2309,6 +2345,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_try_fold", since = "1.27.0")] + #[rustc_do_not_const_check] fn try_for_each<F, R>(&mut self, f: F) -> R where Self: Sized, @@ -2428,6 +2465,7 @@ pub trait Iterator { #[doc(alias = "inject", alias = "foldl")] #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn fold<B, F>(mut self, init: B, mut f: F) -> B where Self: Sized, @@ -2465,6 +2503,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_fold_self", since = "1.51.0")] + #[rustc_do_not_const_check] fn reduce<F>(mut self, f: F) -> Option<Self::Item> where Self: Sized, @@ -2536,6 +2575,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iterator_try_reduce", reason = "new API", issue = "87053")] + #[rustc_do_not_const_check] fn try_reduce<F, R>(&mut self, f: F) -> ChangeOutputType<R, Option<R::Output>> where Self: Sized, @@ -2593,6 +2633,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn all<F>(&mut self, f: F) -> bool where Self: Sized, @@ -2601,10 +2642,10 @@ pub trait Iterator { #[inline] fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> { move |(), x| { - if f(x) { ControlFlow::CONTINUE } else { ControlFlow::BREAK } + if f(x) { ControlFlow::Continue(()) } else { ControlFlow::Break(()) } } } - self.try_fold((), check(f)) == ControlFlow::CONTINUE + self.try_fold((), check(f)) == ControlFlow::Continue(()) } /// Tests if any element of the iterator matches a predicate. @@ -2646,6 +2687,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn any<F>(&mut self, f: F) -> bool where Self: Sized, @@ -2654,11 +2696,11 @@ pub trait Iterator { #[inline] fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> { move |(), x| { - if f(x) { ControlFlow::BREAK } else { ControlFlow::CONTINUE } + if f(x) { ControlFlow::Break(()) } else { ControlFlow::Continue(()) } } } - self.try_fold((), check(f)) == ControlFlow::BREAK + self.try_fold((), check(f)) == ControlFlow::Break(()) } /// Searches for an element of an iterator that satisfies a predicate. @@ -2709,6 +2751,7 @@ pub trait Iterator { /// Note that `iter.find(f)` is equivalent to `iter.filter(f).next()`. #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn find<P>(&mut self, predicate: P) -> Option<Self::Item> where Self: Sized, @@ -2717,7 +2760,7 @@ pub trait Iterator { #[inline] fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> { move |(), x| { - if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::CONTINUE } + if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) } } } @@ -2740,6 +2783,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_find_map", since = "1.30.0")] + #[rustc_do_not_const_check] fn find_map<B, F>(&mut self, f: F) -> Option<B> where Self: Sized, @@ -2749,7 +2793,7 @@ pub trait Iterator { fn check<T, B>(mut f: impl FnMut(T) -> Option<B>) -> impl FnMut((), T) -> ControlFlow<B> { move |(), x| match f(x) { Some(x) => ControlFlow::Break(x), - None => ControlFlow::CONTINUE, + None => ControlFlow::Continue(()), } } @@ -2796,6 +2840,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "try_find", reason = "new API", issue = "63178")] + #[rustc_do_not_const_check] fn try_find<F, R>(&mut self, f: F) -> ChangeOutputType<R, Option<Self::Item>> where Self: Sized, @@ -2812,7 +2857,7 @@ pub trait Iterator { R: Residual<Option<I>>, { move |(), x| match f(&x).branch() { - ControlFlow::Continue(false) => ControlFlow::CONTINUE, + ControlFlow::Continue(false) => ControlFlow::Continue(()), ControlFlow::Continue(true) => ControlFlow::Break(Try::from_output(Some(x))), ControlFlow::Break(r) => ControlFlow::Break(FromResidual::from_residual(r)), } @@ -2878,6 +2923,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn position<P>(&mut self, predicate: P) -> Option<usize> where Self: Sized, @@ -2935,6 +2981,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn rposition<P>(&mut self, predicate: P) -> Option<usize> where P: FnMut(Self::Item) -> bool, @@ -2986,6 +3033,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn max(self) -> Option<Self::Item> where Self: Sized, @@ -3024,6 +3072,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn min(self) -> Option<Self::Item> where Self: Sized, @@ -3046,6 +3095,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] + #[rustc_do_not_const_check] fn max_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item> where Self: Sized, @@ -3079,6 +3129,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iter_max_by", since = "1.15.0")] + #[rustc_do_not_const_check] fn max_by<F>(self, compare: F) -> Option<Self::Item> where Self: Sized, @@ -3106,6 +3157,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] + #[rustc_do_not_const_check] fn min_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item> where Self: Sized, @@ -3139,6 +3191,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iter_min_by", since = "1.15.0")] + #[rustc_do_not_const_check] fn min_by<F>(self, compare: F) -> Option<Self::Item> where Self: Sized, @@ -3176,6 +3229,7 @@ pub trait Iterator { #[inline] #[doc(alias = "reverse")] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn rev(self) -> Rev<Self> where Self: Sized + DoubleEndedIterator, @@ -3214,6 +3268,7 @@ pub trait Iterator { /// assert_eq!(z, [3, 6]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) where FromA: Default + Extend<A>, @@ -3246,6 +3301,7 @@ pub trait Iterator { /// assert_eq!(v_map, vec![1, 2, 3]); /// ``` #[stable(feature = "iter_copied", since = "1.36.0")] + #[rustc_do_not_const_check] fn copied<'a, T: 'a>(self) -> Copied<Self> where Self: Sized + Iterator<Item = &'a T>, @@ -3293,6 +3349,7 @@ pub trait Iterator { /// assert_eq!(&[vec![23]], &faster[..]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn cloned<'a, T: 'a>(self) -> Cloned<Self> where Self: Sized + Iterator<Item = &'a T>, @@ -3327,6 +3384,7 @@ pub trait Iterator { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[rustc_do_not_const_check] fn cycle(self) -> Cycle<Self> where Self: Sized + Clone, @@ -3370,6 +3428,7 @@ pub trait Iterator { /// ``` #[track_caller] #[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")] + #[rustc_do_not_const_check] fn array_chunks<const N: usize>(self) -> ArrayChunks<Self, N> where Self: Sized, @@ -3400,6 +3459,7 @@ pub trait Iterator { /// assert_eq!(sum, 6); /// ``` #[stable(feature = "iter_arith", since = "1.11.0")] + #[rustc_do_not_const_check] fn sum<S>(self) -> S where Self: Sized, @@ -3429,6 +3489,7 @@ pub trait Iterator { /// assert_eq!(factorial(5), 120); /// ``` #[stable(feature = "iter_arith", since = "1.11.0")] + #[rustc_do_not_const_check] fn product<P>(self) -> P where Self: Sized, @@ -3450,6 +3511,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().cmp([1].iter()), Ordering::Greater); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn cmp<I>(self, other: I) -> Ordering where I: IntoIterator<Item = Self::Item>, @@ -3479,6 +3541,7 @@ pub trait Iterator { /// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| (2 * x).cmp(&y)), Ordering::Greater); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] + #[rustc_do_not_const_check] fn cmp_by<I, F>(self, other: I, cmp: F) -> Ordering where Self: Sized, @@ -3491,7 +3554,7 @@ pub trait Iterator { F: FnMut(X, Y) -> Ordering, { move |x, y| match cmp(x, y) { - Ordering::Equal => ControlFlow::CONTINUE, + Ordering::Equal => ControlFlow::Continue(()), non_eq => ControlFlow::Break(non_eq), } } @@ -3502,8 +3565,10 @@ pub trait Iterator { } } - /// [Lexicographically](Ord#lexicographical-comparison) compares the elements of this [`Iterator`] with those - /// of another. + /// [Lexicographically](Ord#lexicographical-comparison) compares the [`PartialOrd`] elements of + /// this [`Iterator`] with those of another. The comparison works like short-circuit + /// evaluation, returning a result without comparing the remaining elements. + /// As soon as an order can be determined, the evaluation stops and a result is returned. /// /// # Examples /// @@ -3513,10 +3578,27 @@ pub trait Iterator { /// assert_eq!([1.].iter().partial_cmp([1.].iter()), Some(Ordering::Equal)); /// assert_eq!([1.].iter().partial_cmp([1., 2.].iter()), Some(Ordering::Less)); /// assert_eq!([1., 2.].iter().partial_cmp([1.].iter()), Some(Ordering::Greater)); + /// ``` /// + /// For floating-point numbers, NaN does not have a total order and will result + /// in `None` when compared: + /// + /// ``` /// assert_eq!([f64::NAN].iter().partial_cmp([1.].iter()), None); /// ``` + /// + /// The results are determined by the order of evaluation. + /// + /// ``` + /// use std::cmp::Ordering; + /// + /// assert_eq!([1.0, f64::NAN].iter().partial_cmp([2.0, f64::NAN].iter()), Some(Ordering::Less)); + /// assert_eq!([2.0, f64::NAN].iter().partial_cmp([1.0, f64::NAN].iter()), Some(Ordering::Greater)); + /// assert_eq!([f64::NAN, 1.0].iter().partial_cmp([f64::NAN, 2.0].iter()), None); + /// ``` + /// #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn partial_cmp<I>(self, other: I) -> Option<Ordering> where I: IntoIterator, @@ -3555,6 +3637,7 @@ pub trait Iterator { /// ); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] + #[rustc_do_not_const_check] fn partial_cmp_by<I, F>(self, other: I, partial_cmp: F) -> Option<Ordering> where Self: Sized, @@ -3567,7 +3650,7 @@ pub trait Iterator { F: FnMut(X, Y) -> Option<Ordering>, { move |x, y| match partial_cmp(x, y) { - Some(Ordering::Equal) => ControlFlow::CONTINUE, + Some(Ordering::Equal) => ControlFlow::Continue(()), non_eq => ControlFlow::Break(non_eq), } } @@ -3588,6 +3671,7 @@ pub trait Iterator { /// assert_eq!([1].iter().eq([1, 2].iter()), false); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn eq<I>(self, other: I) -> bool where I: IntoIterator, @@ -3613,6 +3697,7 @@ pub trait Iterator { /// assert!(xs.iter().eq_by(&ys, |&x, &y| x * x == y)); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] + #[rustc_do_not_const_check] fn eq_by<I, F>(self, other: I, eq: F) -> bool where Self: Sized, @@ -3625,7 +3710,7 @@ pub trait Iterator { F: FnMut(X, Y) -> bool, { move |x, y| { - if eq(x, y) { ControlFlow::CONTINUE } else { ControlFlow::BREAK } + if eq(x, y) { ControlFlow::Continue(()) } else { ControlFlow::Break(()) } } } @@ -3645,6 +3730,7 @@ pub trait Iterator { /// assert_eq!([1].iter().ne([1, 2].iter()), true); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn ne<I>(self, other: I) -> bool where I: IntoIterator, @@ -3666,6 +3752,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().lt([1, 2].iter()), false); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn lt<I>(self, other: I) -> bool where I: IntoIterator, @@ -3687,6 +3774,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().le([1, 2].iter()), true); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn le<I>(self, other: I) -> bool where I: IntoIterator, @@ -3708,6 +3796,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().gt([1, 2].iter()), false); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn gt<I>(self, other: I) -> bool where I: IntoIterator, @@ -3729,6 +3818,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().ge([1, 2].iter()), true); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn ge<I>(self, other: I) -> bool where I: IntoIterator, @@ -3760,6 +3850,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + #[rustc_do_not_const_check] fn is_sorted(self) -> bool where Self: Sized, @@ -3788,6 +3879,7 @@ pub trait Iterator { /// /// [`is_sorted`]: Iterator::is_sorted #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + #[rustc_do_not_const_check] fn is_sorted_by<F>(mut self, compare: F) -> bool where Self: Sized, @@ -3834,6 +3926,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + #[rustc_do_not_const_check] fn is_sorted_by_key<F, K>(self, f: F) -> bool where Self: Sized, @@ -3849,6 +3942,7 @@ pub trait Iterator { #[inline] #[doc(hidden)] #[unstable(feature = "trusted_random_access", issue = "none")] + #[rustc_do_not_const_check] unsafe fn __iterator_get_unchecked(&mut self, _idx: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, @@ -3859,7 +3953,7 @@ pub trait Iterator { /// Compares two iterators element-wise using the given function. /// -/// If `ControlFlow::CONTINUE` is returned from the function, the comparison moves on to the next +/// If `ControlFlow::Continue(())` is returned from the function, the comparison moves on to the next /// elements of both iterators. Returning `ControlFlow::Break(x)` short-circuits the iteration and /// returns `ControlFlow::Break(x)`. If one of the iterators runs out of elements, /// `ControlFlow::Continue(ord)` is returned where `ord` is the result of comparing the lengths of diff --git a/library/core/src/iter/traits/marker.rs b/library/core/src/iter/traits/marker.rs index da753745740..af02848233d 100644 --- a/library/core/src/iter/traits/marker.rs +++ b/library/core/src/iter/traits/marker.rs @@ -31,6 +31,17 @@ impl<I: FusedIterator + ?Sized> FusedIterator for &mut I {} /// The iterator must produce exactly the number of elements it reported /// or diverge before reaching the end. /// +/// # When *shouldn't* an adapter be `TrustedLen`? +/// +/// If an adapter makes an iterator *shorter* by a given amount, then it's +/// usually incorrect for that adapter to implement `TrustedLen`. The inner +/// iterator might return more than `usize::MAX` items, but there's no way to +/// know what `k` elements less than that will be, since the `size_hint` from +/// the inner iterator has already saturated and lost that information. +/// +/// This is why [`Skip<I>`](crate::iter::Skip) isn't `TrustedLen`, even when +/// `I` implements `TrustedLen`. +/// /// # Safety /// /// This trait must only be implemented when the contract is upheld. Consumers diff --git a/library/core/src/iter/traits/mod.rs b/library/core/src/iter/traits/mod.rs index ed0fb634dbf..41ea29e6a84 100644 --- a/library/core/src/iter/traits/mod.rs +++ b/library/core/src/iter/traits/mod.rs @@ -4,6 +4,7 @@ mod double_ended; mod exact_size; mod iterator; mod marker; +mod unchecked_iterator; #[stable(feature = "rust1", since = "1.0.0")] pub use self::{ @@ -19,3 +20,5 @@ pub use self::{ pub use self::marker::InPlaceIterable; #[unstable(feature = "trusted_step", issue = "85731")] pub use self::marker::TrustedStep; + +pub(crate) use self::unchecked_iterator::UncheckedIterator; diff --git a/library/core/src/iter/traits/unchecked_iterator.rs b/library/core/src/iter/traits/unchecked_iterator.rs new file mode 100644 index 00000000000..ae4bfcad4e6 --- /dev/null +++ b/library/core/src/iter/traits/unchecked_iterator.rs @@ -0,0 +1,36 @@ +use crate::iter::TrustedLen; + +/// [`TrustedLen`] cannot have methods, so this allows augmenting it. +/// +/// It currently requires `TrustedLen` because it's unclear whether it's +/// reasonably possible to depend on the `size_hint` of anything else. +pub(crate) trait UncheckedIterator: TrustedLen { + /// Gets the next item from a non-empty iterator. + /// + /// Because there's always a value to return, that means it can return + /// the `Item` type directly, without wrapping it in an `Option`. + /// + /// # Safety + /// + /// This can only be called if `size_hint().0 != 0`, guaranteeing that + /// there's at least one item available. + /// + /// Otherwise (aka when `size_hint().1 == Some(0)`), this is UB. + /// + /// # Note to Implementers + /// + /// This has a default implementation using [`Option::unwrap_unchecked`]. + /// That's probably sufficient if your `next` *always* returns `Some`, + /// such as for infinite iterators. In more complicated situations, however, + /// sometimes there can still be `insertvalue`/`assume`/`extractvalue` + /// instructions remaining in the IR from the `Option` handling, at which + /// point you might want to implement this manually instead. + #[unstable(feature = "trusted_len_next_unchecked", issue = "37572")] + #[inline] + unsafe fn next_unchecked(&mut self) -> Self::Item { + let opt = self.next(); + // SAFETY: The caller promised that we're not empty, and + // `Self: TrustedLen` so we can actually trust the `size_hint`. + unsafe { opt.unwrap_unchecked() } + } +} diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs index 8790649abe6..d3727a824b5 100644 --- a/library/core/src/lib.rs +++ b/library/core/src/lib.rs @@ -95,6 +95,7 @@ #![warn(missing_docs)] #![allow(explicit_outlives_requirements)] #![allow(incomplete_features)] +#![cfg_attr(not(bootstrap), warn(multiple_supertrait_upcastable))] // // Library features: #![feature(const_align_offset)] @@ -191,8 +192,9 @@ #![feature(cfg_sanitize)] #![feature(cfg_target_has_atomic)] #![feature(cfg_target_has_atomic_equal_alignment)] -#![cfg_attr(not(bootstrap), feature(const_closures))] +#![feature(const_closures)] #![feature(const_fn_floating_point_arithmetic)] +#![feature(const_for)] #![feature(const_mut_refs)] #![feature(const_precise_live_drops)] #![feature(const_refs_to_cell)] @@ -235,6 +237,7 @@ #![feature(unsized_fn_params)] #![feature(asm_const)] #![feature(const_transmute_copy)] +#![cfg_attr(not(bootstrap), feature(multiple_supertrait_upcastable))] // // Target features: #![feature(arm_target_feature)] @@ -248,7 +251,6 @@ #![feature(sse4a_target_feature)] #![feature(tbm_target_feature)] #![feature(wasm_target_feature)] -#![cfg_attr(bootstrap, feature(f16c_target_feature))] // allow using `core::` in intra-doc links #[allow(unused_extern_crates)] @@ -375,8 +377,6 @@ mod bool; mod tuple; mod unit; -mod const_closure; - #[stable(feature = "core_primitive", since = "1.43.0")] pub mod primitive; diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs index 74055602ec2..520ae0edb09 100644 --- a/library/core/src/marker.rs +++ b/library/core/src/marker.rs @@ -97,6 +97,7 @@ unsafe impl<T: Sync + ?Sized> Send for &T {} #[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable #[rustc_specialization_trait] #[rustc_deny_explicit_impl] +#[cfg_attr(not(bootstrap), rustc_coinductive)] pub trait Sized { // Empty. } @@ -871,14 +872,18 @@ pub trait Destruct {} #[rustc_deny_explicit_impl] pub trait Tuple {} -/// A marker for things -#[unstable(feature = "pointer_sized_trait", issue = "none")] -#[lang = "pointer_sized"] +/// A marker for pointer-like types. +/// +/// All types that have the same size and alignment as a `usize` or +/// `*const ()` automatically implement this trait. +#[unstable(feature = "pointer_like_trait", issue = "none")] +#[cfg_attr(bootstrap, lang = "pointer_sized")] +#[cfg_attr(not(bootstrap), lang = "pointer_like")] #[rustc_on_unimplemented( - message = "`{Self}` needs to be a pointer-sized type", - label = "`{Self}` needs to be a pointer-sized type" + message = "`{Self}` needs to have the same alignment and size as a pointer", + label = "`{Self}` needs to be a pointer-like type" )] -pub trait PointerSized {} +pub trait PointerLike {} /// Implementations of `Copy` for primitive types. /// diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs index 5e01ccc07d8..a67df7ed557 100644 --- a/library/core/src/mem/mod.rs +++ b/library/core/src/mem/mod.rs @@ -682,7 +682,6 @@ pub unsafe fn zeroed<T>() -> T { pub unsafe fn uninitialized<T>() -> T { // SAFETY: the caller must guarantee that an uninitialized value is valid for `T`. unsafe { - #[cfg(not(bootstrap))] // If the compiler hits this itself then it deserves the UB. intrinsics::assert_mem_uninitialized_valid::<T>(); let mut val = MaybeUninit::<T>::uninit(); diff --git a/library/core/src/num/dec2flt/mod.rs b/library/core/src/num/dec2flt/mod.rs index a888ced49b3..f8d493e8b62 100644 --- a/library/core/src/num/dec2flt/mod.rs +++ b/library/core/src/num/dec2flt/mod.rs @@ -75,6 +75,7 @@ issue = "none" )] +use crate::error::Error; use crate::fmt; use crate::str::FromStr; @@ -182,15 +183,10 @@ enum FloatErrorKind { Invalid, } -impl ParseFloatError { - #[unstable( - feature = "int_error_internals", - reason = "available through Error trait and this method should \ - not be exposed publicly", - issue = "none" - )] - #[doc(hidden)] - pub fn __description(&self) -> &str { +#[stable(feature = "rust1", since = "1.0.0")] +impl Error for ParseFloatError { + #[allow(deprecated)] + fn description(&self) -> &str { match self.kind { FloatErrorKind::Empty => "cannot parse float from empty string", FloatErrorKind::Invalid => "invalid float literal", @@ -201,7 +197,8 @@ impl ParseFloatError { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for ParseFloatError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.__description().fmt(f) + #[allow(deprecated)] + self.description().fmt(f) } } diff --git a/library/core/src/num/error.rs b/library/core/src/num/error.rs index 768dd87816d..1bae4efe7d9 100644 --- a/library/core/src/num/error.rs +++ b/library/core/src/num/error.rs @@ -9,23 +9,19 @@ use crate::fmt; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct TryFromIntError(pub(crate) ()); -impl TryFromIntError { - #[unstable( - feature = "int_error_internals", - reason = "available through Error trait and this method should \ - not be exposed publicly", - issue = "none" - )] - #[doc(hidden)] - pub fn __description(&self) -> &str { - "out of range integral type conversion attempted" +#[stable(feature = "try_from", since = "1.34.0")] +impl fmt::Display for TryFromIntError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + #[allow(deprecated)] + self.description().fmt(fmt) } } #[stable(feature = "try_from", since = "1.34.0")] -impl fmt::Display for TryFromIntError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.__description().fmt(fmt) +impl Error for TryFromIntError { + #[allow(deprecated)] + fn description(&self) -> &str { + "out of range integral type conversion attempted" } } @@ -121,28 +117,13 @@ impl ParseIntError { pub fn kind(&self) -> &IntErrorKind { &self.kind } - #[unstable( - feature = "int_error_internals", - reason = "available through Error trait and this method should \ - not be exposed publicly", - issue = "none" - )] - #[doc(hidden)] - pub fn __description(&self) -> &str { - match self.kind { - IntErrorKind::Empty => "cannot parse integer from empty string", - IntErrorKind::InvalidDigit => "invalid digit found in string", - IntErrorKind::PosOverflow => "number too large to fit in target type", - IntErrorKind::NegOverflow => "number too small to fit in target type", - IntErrorKind::Zero => "number would be zero for non-zero type", - } - } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for ParseIntError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.__description().fmt(f) + #[allow(deprecated)] + self.description().fmt(f) } } @@ -150,14 +131,12 @@ impl fmt::Display for ParseIntError { impl Error for ParseIntError { #[allow(deprecated)] fn description(&self) -> &str { - self.__description() - } -} - -#[stable(feature = "try_from", since = "1.34.0")] -impl Error for TryFromIntError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() + match self.kind { + IntErrorKind::Empty => "cannot parse integer from empty string", + IntErrorKind::InvalidDigit => "invalid digit found in string", + IntErrorKind::PosOverflow => "number too large to fit in target type", + IntErrorKind::NegOverflow => "number too small to fit in target type", + IntErrorKind::Zero => "number would be zero for non-zero type", + } } } diff --git a/library/core/src/num/int_log10.rs b/library/core/src/num/int_log10.rs index 80472528f6c..0ce31b40a38 100644 --- a/library/core/src/num/int_log10.rs +++ b/library/core/src/num/int_log10.rs @@ -138,3 +138,11 @@ pub const fn i64(val: i64) -> u32 { pub const fn i128(val: i128) -> u32 { u128(val as u128) } + +/// Instantiate this panic logic once, rather than for all the ilog methods +/// on every single primitive type. +#[cold] +#[track_caller] +pub const fn panic_for_nonpositive_argument() -> ! { + panic!("argument of integer logarithm must be positive") +} diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index 2cae98b8e49..572191d0f9b 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -1,11 +1,26 @@ macro_rules! int_impl { - ($SelfT:ty, $ActualT:ident, $UnsignedT:ty, $BITS:expr, $BITS_MINUS_ONE:expr, $Min:expr, $Max:expr, - $rot:expr, $rot_op:expr, $rot_result:expr, $swap_op:expr, $swapped:expr, - $reversed:expr, $le_bytes:expr, $be_bytes:expr, - $to_xe_bytes_doc:expr, $from_xe_bytes_doc:expr, - $bound_condition:expr) => { + ( + Self = $SelfT:ty, + ActualT = $ActualT:ident, + UnsignedT = $UnsignedT:ty, + BITS = $BITS:expr, + BITS_MINUS_ONE = $BITS_MINUS_ONE:expr, + Min = $Min:expr, + Max = $Max:expr, + rot = $rot:expr, + rot_op = $rot_op:expr, + rot_result = $rot_result:expr, + swap_op = $swap_op:expr, + swapped = $swapped:expr, + reversed = $reversed:expr, + le_bytes = $le_bytes:expr, + be_bytes = $be_bytes:expr, + to_xe_bytes_doc = $to_xe_bytes_doc:expr, + from_xe_bytes_doc = $from_xe_bytes_doc:expr, + bound_condition = $bound_condition:expr, + ) => { /// The smallest value that can be represented by this integer type - #[doc = concat!("(−2<sup>", $BITS_MINUS_ONE, "</sup>", $bound_condition, ")")] + #[doc = concat!("(−2<sup>", $BITS_MINUS_ONE, "</sup>", $bound_condition, ").")] /// /// # Examples /// @@ -18,7 +33,7 @@ macro_rules! int_impl { pub const MIN: Self = !0 ^ ((!0 as $UnsignedT) >> 1) as Self; /// The largest value that can be represented by this integer type - #[doc = concat!("(2<sup>", $BITS_MINUS_ONE, "</sup> − 1", $bound_condition, ")")] + #[doc = concat!("(2<sup>", $BITS_MINUS_ONE, "</sup> − 1", $bound_condition, ").")] /// /// # Examples /// @@ -2331,14 +2346,17 @@ macro_rules! int_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog(self, base: Self) -> u32 { assert!(base >= 2, "base of integer logarithm must be at least 2"); - self.checked_ilog(base).expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog(base) { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the base 2 logarithm of the number, rounded down. @@ -2354,13 +2372,16 @@ macro_rules! int_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog2(self) -> u32 { - self.checked_ilog2().expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog2() { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the base 10 logarithm of the number, rounded down. @@ -2376,13 +2397,16 @@ macro_rules! int_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog10(self) -> u32 { - self.checked_ilog10().expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog10() { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the logarithm of the number with respect to an arbitrary base, @@ -2574,12 +2598,13 @@ macro_rules! int_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] + #[rustc_allow_const_fn_unstable(const_cmp)] pub const fn signum(self) -> Self { - match self { - n if n > 0 => 1, - 0 => 0, - _ => -1, - } + // Picking the right way to phrase this is complicated + // (<https://graphics.stanford.edu/~seander/bithacks.html#CopyIntegerSign>) + // so delegate it to `Ord` which is already producing -1/0/+1 + // exactly like we need and can be the place to deal with the complexity. + self.cmp(&0) as _ } /// Returns `true` if `self` is positive and `false` if the number is zero or diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs index ac7f579ebb5..a50c91579fa 100644 --- a/library/core/src/num/mod.rs +++ b/library/core/src/num/mod.rs @@ -9,9 +9,6 @@ use crate::mem; use crate::ops::{Add, Mul, Sub}; use crate::str::FromStr; -#[cfg(not(no_fp_fmt_parse))] -use crate::error::Error; - // Used because the `?` operator is not allowed in a const context. macro_rules! try_opt { ($e:expr) => { @@ -61,15 +58,6 @@ pub use wrapping::Wrapping; #[cfg(not(no_fp_fmt_parse))] pub use dec2flt::ParseFloatError; -#[cfg(not(no_fp_fmt_parse))] -#[stable(feature = "rust1", since = "1.0.0")] -impl Error for ParseFloatError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - #[stable(feature = "rust1", since = "1.0.0")] pub use error::ParseIntError; @@ -238,72 +226,217 @@ macro_rules! widening_impl { } impl i8 { - int_impl! { i8, i8, u8, 8, 7, -128, 127, 2, "-0x7e", "0xa", "0x12", "0x12", "0x48", - "[0x12]", "[0x12]", "", "", "" } + int_impl! { + Self = i8, + ActualT = i8, + UnsignedT = u8, + BITS = 8, + BITS_MINUS_ONE = 7, + Min = -128, + Max = 127, + rot = 2, + rot_op = "-0x7e", + rot_result = "0xa", + swap_op = "0x12", + swapped = "0x12", + reversed = "0x48", + le_bytes = "[0x12]", + be_bytes = "[0x12]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } impl i16 { - int_impl! { i16, i16, u16, 16, 15, -32768, 32767, 4, "-0x5ffd", "0x3a", "0x1234", "0x3412", - "0x2c48", "[0x34, 0x12]", "[0x12, 0x34]", "", "", "" } + int_impl! { + Self = i16, + ActualT = i16, + UnsignedT = u16, + BITS = 16, + BITS_MINUS_ONE = 15, + Min = -32768, + Max = 32767, + rot = 4, + rot_op = "-0x5ffd", + rot_result = "0x3a", + swap_op = "0x1234", + swapped = "0x3412", + reversed = "0x2c48", + le_bytes = "[0x34, 0x12]", + be_bytes = "[0x12, 0x34]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } impl i32 { - int_impl! { i32, i32, u32, 32, 31, -2147483648, 2147483647, 8, "0x10000b3", "0xb301", - "0x12345678", "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78]", "", "", "" } + int_impl! { + Self = i32, + ActualT = i32, + UnsignedT = u32, + BITS = 32, + BITS_MINUS_ONE = 31, + Min = -2147483648, + Max = 2147483647, + rot = 8, + rot_op = "0x10000b3", + rot_result = "0xb301", + swap_op = "0x12345678", + swapped = "0x78563412", + reversed = "0x1e6a2c48", + le_bytes = "[0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } impl i64 { - int_impl! { i64, i64, u64, 64, 63, -9223372036854775808, 9223372036854775807, 12, - "0xaa00000000006e1", "0x6e10aa", "0x1234567890123456", "0x5634129078563412", - "0x6a2c48091e6a2c48", "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", "", "", "" } + int_impl! { + Self = i64, + ActualT = i64, + UnsignedT = u64, + BITS = 64, + BITS_MINUS_ONE = 63, + Min = -9223372036854775808, + Max = 9223372036854775807, + rot = 12, + rot_op = "0xaa00000000006e1", + rot_result = "0x6e10aa", + swap_op = "0x1234567890123456", + swapped = "0x5634129078563412", + reversed = "0x6a2c48091e6a2c48", + le_bytes = "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } impl i128 { - int_impl! { i128, i128, u128, 128, 127, -170141183460469231731687303715884105728, - 170141183460469231731687303715884105727, 16, - "0x13f40000000000000000000000004f76", "0x4f7613f4", "0x12345678901234567890123456789012", - "0x12907856341290785634129078563412", "0x48091e6a2c48091e6a2c48091e6a2c48", - "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \ - 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \ - 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", "", "", "" } + int_impl! { + Self = i128, + ActualT = i128, + UnsignedT = u128, + BITS = 128, + BITS_MINUS_ONE = 127, + Min = -170141183460469231731687303715884105728, + Max = 170141183460469231731687303715884105727, + rot = 16, + rot_op = "0x13f40000000000000000000000004f76", + rot_result = "0x4f7613f4", + swap_op = "0x12345678901234567890123456789012", + swapped = "0x12907856341290785634129078563412", + reversed = "0x48091e6a2c48091e6a2c48091e6a2c48", + le_bytes = "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \ + 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \ + 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } #[cfg(target_pointer_width = "16")] impl isize { - int_impl! { isize, i16, usize, 16, 15, -32768, 32767, 4, "-0x5ffd", "0x3a", "0x1234", - "0x3412", "0x2c48", "[0x34, 0x12]", "[0x12, 0x34]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 16-bit targets" } + int_impl! { + Self = isize, + ActualT = i16, + UnsignedT = usize, + BITS = 16, + BITS_MINUS_ONE = 15, + Min = -32768, + Max = 32767, + rot = 4, + rot_op = "-0x5ffd", + rot_result = "0x3a", + swap_op = "0x1234", + swapped = "0x3412", + reversed = "0x2c48", + le_bytes = "[0x34, 0x12]", + be_bytes = "[0x12, 0x34]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 16-bit targets", + } } #[cfg(target_pointer_width = "32")] impl isize { - int_impl! { isize, i32, usize, 32, 31, -2147483648, 2147483647, 8, "0x10000b3", "0xb301", - "0x12345678", "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 32-bit targets" } + int_impl! { + Self = isize, + ActualT = i32, + UnsignedT = usize, + BITS = 32, + BITS_MINUS_ONE = 31, + Min = -2147483648, + Max = 2147483647, + rot = 8, + rot_op = "0x10000b3", + rot_result = "0xb301", + swap_op = "0x12345678", + swapped = "0x78563412", + reversed = "0x1e6a2c48", + le_bytes = "[0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 32-bit targets", + } } #[cfg(target_pointer_width = "64")] impl isize { - int_impl! { isize, i64, usize, 64, 63, -9223372036854775808, 9223372036854775807, - 12, "0xaa00000000006e1", "0x6e10aa", "0x1234567890123456", "0x5634129078563412", - "0x6a2c48091e6a2c48", "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 64-bit targets" } + int_impl! { + Self = isize, + ActualT = i64, + UnsignedT = usize, + BITS = 64, + BITS_MINUS_ONE = 63, + Min = -9223372036854775808, + Max = 9223372036854775807, + rot = 12, + rot_op = "0xaa00000000006e1", + rot_result = "0x6e10aa", + swap_op = "0x1234567890123456", + swapped = "0x5634129078563412", + reversed = "0x6a2c48091e6a2c48", + le_bytes = "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 64-bit targets", + } } /// If 6th bit set ascii is upper case. const ASCII_CASE_MASK: u8 = 0b0010_0000; impl u8 { - uint_impl! { u8, u8, i8, NonZeroU8, 8, 255, 2, "0x82", "0xa", "0x12", "0x12", "0x48", "[0x12]", - "[0x12]", "", "", "" } + uint_impl! { + Self = u8, + ActualT = u8, + SignedT = i8, + NonZeroT = NonZeroU8, + BITS = 8, + MAX = 255, + rot = 2, + rot_op = "0x82", + rot_result = "0xa", + swap_op = "0x12", + swapped = "0x12", + reversed = "0x48", + le_bytes = "[0x12]", + be_bytes = "[0x12]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } widening_impl! { u8, u16, 8, unsigned } /// Checks if the value is within the ASCII range. @@ -887,8 +1020,25 @@ impl u8 { } impl u16 { - uint_impl! { u16, u16, i16, NonZeroU16, 16, 65535, 4, "0xa003", "0x3a", "0x1234", "0x3412", "0x2c48", - "[0x34, 0x12]", "[0x12, 0x34]", "", "", "" } + uint_impl! { + Self = u16, + ActualT = u16, + SignedT = i16, + NonZeroT = NonZeroU16, + BITS = 16, + MAX = 65535, + rot = 4, + rot_op = "0xa003", + rot_result = "0x3a", + swap_op = "0x1234", + swapped = "0x3412", + reversed = "0x2c48", + le_bytes = "[0x34, 0x12]", + be_bytes = "[0x12, 0x34]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } widening_impl! { u16, u32, 16, unsigned } /// Checks if the value is a Unicode surrogate code point, which are disallowed values for [`char`]. @@ -918,56 +1068,144 @@ impl u16 { } impl u32 { - uint_impl! { u32, u32, i32, NonZeroU32, 32, 4294967295, 8, "0x10000b3", "0xb301", "0x12345678", - "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", "[0x12, 0x34, 0x56, 0x78]", "", "", "" } + uint_impl! { + Self = u32, + ActualT = u32, + SignedT = i32, + NonZeroT = NonZeroU32, + BITS = 32, + MAX = 4294967295, + rot = 8, + rot_op = "0x10000b3", + rot_result = "0xb301", + swap_op = "0x12345678", + swapped = "0x78563412", + reversed = "0x1e6a2c48", + le_bytes = "[0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } widening_impl! { u32, u64, 32, unsigned } } impl u64 { - uint_impl! { u64, u64, i64, NonZeroU64, 64, 18446744073709551615, 12, "0xaa00000000006e1", "0x6e10aa", - "0x1234567890123456", "0x5634129078563412", "0x6a2c48091e6a2c48", - "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", - "", "", ""} + uint_impl! { + Self = u64, + ActualT = u64, + SignedT = i64, + NonZeroT = NonZeroU64, + BITS = 64, + MAX = 18446744073709551615, + rot = 12, + rot_op = "0xaa00000000006e1", + rot_result = "0x6e10aa", + swap_op = "0x1234567890123456", + swapped = "0x5634129078563412", + reversed = "0x6a2c48091e6a2c48", + le_bytes = "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } widening_impl! { u64, u128, 64, unsigned } } impl u128 { - uint_impl! { u128, u128, i128, NonZeroU128, 128, 340282366920938463463374607431768211455, 16, - "0x13f40000000000000000000000004f76", "0x4f7613f4", "0x12345678901234567890123456789012", - "0x12907856341290785634129078563412", "0x48091e6a2c48091e6a2c48091e6a2c48", - "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \ - 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \ - 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", - "", "", ""} + uint_impl! { + Self = u128, + ActualT = u128, + SignedT = i128, + NonZeroT = NonZeroU128, + BITS = 128, + MAX = 340282366920938463463374607431768211455, + rot = 16, + rot_op = "0x13f40000000000000000000000004f76", + rot_result = "0x4f7613f4", + swap_op = "0x12345678901234567890123456789012", + swapped = "0x12907856341290785634129078563412", + reversed = "0x48091e6a2c48091e6a2c48091e6a2c48", + le_bytes = "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \ + 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \ + 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } #[cfg(target_pointer_width = "16")] impl usize { - uint_impl! { usize, u16, isize, NonZeroUsize, 16, 65535, 4, "0xa003", "0x3a", "0x1234", "0x3412", "0x2c48", - "[0x34, 0x12]", "[0x12, 0x34]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 16-bit targets" } + uint_impl! { + Self = usize, + ActualT = u16, + SignedT = isize, + NonZeroT = NonZeroUsize, + BITS = 16, + MAX = 65535, + rot = 4, + rot_op = "0xa003", + rot_result = "0x3a", + swap_op = "0x1234", + swapped = "0x3412", + reversed = "0x2c48", + le_bytes = "[0x34, 0x12]", + be_bytes = "[0x12, 0x34]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 16-bit targets", + } widening_impl! { usize, u32, 16, unsigned } } + #[cfg(target_pointer_width = "32")] impl usize { - uint_impl! { usize, u32, isize, NonZeroUsize, 32, 4294967295, 8, "0x10000b3", "0xb301", "0x12345678", - "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", "[0x12, 0x34, 0x56, 0x78]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 32-bit targets" } + uint_impl! { + Self = usize, + ActualT = u32, + SignedT = isize, + NonZeroT = NonZeroUsize, + BITS = 32, + MAX = 4294967295, + rot = 8, + rot_op = "0x10000b3", + rot_result = "0xb301", + swap_op = "0x12345678", + swapped = "0x78563412", + reversed = "0x1e6a2c48", + le_bytes = "[0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 32-bit targets", + } widening_impl! { usize, u64, 32, unsigned } } #[cfg(target_pointer_width = "64")] impl usize { - uint_impl! { usize, u64, isize, NonZeroUsize, 64, 18446744073709551615, 12, "0xaa00000000006e1", "0x6e10aa", - "0x1234567890123456", "0x5634129078563412", "0x6a2c48091e6a2c48", - "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 64-bit targets" } + uint_impl! { + Self = usize, + ActualT = u64, + SignedT = isize, + NonZeroT = NonZeroUsize, + BITS = 64, + MAX = 18446744073709551615, + rot = 12, + rot_op = "0xaa00000000006e1", + rot_result = "0x6e10aa", + swap_op = "0x1234567890123456", + swapped = "0x5634129078563412", + reversed = "0x6a2c48091e6a2c48", + le_bytes = "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 64-bit targets", + } widening_impl! { usize, u128, 64, unsigned } } diff --git a/library/core/src/num/shells/i128.rs b/library/core/src/num/shells/i128.rs index 7b048dc5206..b3b3d3b4875 100644 --- a/library/core/src/num/shells/i128.rs +++ b/library/core/src/num/shells/i128.rs @@ -1,6 +1,4 @@ -//! Constants for the 128-bit signed integer type. -//! -//! *[See also the `i128` primitive type][i128].* +//! Redundant constants module for the [`i128` primitive type][i128]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/i16.rs b/library/core/src/num/shells/i16.rs index 5c5812d5c5e..70a452e1939 100644 --- a/library/core/src/num/shells/i16.rs +++ b/library/core/src/num/shells/i16.rs @@ -1,6 +1,4 @@ -//! Constants for the 16-bit signed integer type. -//! -//! *[See also the `i16` primitive type][i16].* +//! Redundant constants module for the [`i16` primitive type][i16]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/i32.rs b/library/core/src/num/shells/i32.rs index b283ac64415..c30849e2591 100644 --- a/library/core/src/num/shells/i32.rs +++ b/library/core/src/num/shells/i32.rs @@ -1,6 +1,4 @@ -//! Constants for the 32-bit signed integer type. -//! -//! *[See also the `i32` primitive type][i32].* +//! Redundant constants module for the [`i32` primitive type][i32]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/i64.rs b/library/core/src/num/shells/i64.rs index a416fa7e936..77d95d71250 100644 --- a/library/core/src/num/shells/i64.rs +++ b/library/core/src/num/shells/i64.rs @@ -1,6 +1,4 @@ -//! Constants for the 64-bit signed integer type. -//! -//! *[See also the `i64` primitive type][i64].* +//! Redundant constants module for the [`i64` primitive type][i64]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/i8.rs b/library/core/src/num/shells/i8.rs index 02465013a4a..516ba8cdef3 100644 --- a/library/core/src/num/shells/i8.rs +++ b/library/core/src/num/shells/i8.rs @@ -1,6 +1,4 @@ -//! Constants for the 8-bit signed integer type. -//! -//! *[See also the `i8` primitive type][i8].* +//! Redundant constants module for the [`i8` primitive type][i8]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/isize.rs b/library/core/src/num/shells/isize.rs index 1579fbab6d4..828f7345baf 100644 --- a/library/core/src/num/shells/isize.rs +++ b/library/core/src/num/shells/isize.rs @@ -1,6 +1,4 @@ -//! Constants for the pointer-sized signed integer type. -//! -//! *[See also the `isize` primitive type][isize].* +//! Redundant constants module for the [`isize` primitive type][isize]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u128.rs b/library/core/src/num/shells/u128.rs index fe08cee586c..b1e30e38435 100644 --- a/library/core/src/num/shells/u128.rs +++ b/library/core/src/num/shells/u128.rs @@ -1,6 +1,4 @@ -//! Constants for the 128-bit unsigned integer type. -//! -//! *[See also the `u128` primitive type][u128].* +//! Redundant constants module for the [`u128` primitive type][u128]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u16.rs b/library/core/src/num/shells/u16.rs index 36f8c697878..b203806f460 100644 --- a/library/core/src/num/shells/u16.rs +++ b/library/core/src/num/shells/u16.rs @@ -1,6 +1,4 @@ -//! Constants for the 16-bit unsigned integer type. -//! -//! *[See also the `u16` primitive type][u16].* +//! Redundant constants module for the [`i16` primitive type][i16]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u32.rs b/library/core/src/num/shells/u32.rs index 1c369097dcd..4c84274e752 100644 --- a/library/core/src/num/shells/u32.rs +++ b/library/core/src/num/shells/u32.rs @@ -1,6 +1,4 @@ -//! Constants for the 32-bit unsigned integer type. -//! -//! *[See also the `u32` primitive type][u32].* +//! Redundant constants module for the [`u32` primitive type][u32]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u64.rs b/library/core/src/num/shells/u64.rs index e8b691d1555..47a95c6820f 100644 --- a/library/core/src/num/shells/u64.rs +++ b/library/core/src/num/shells/u64.rs @@ -1,6 +1,4 @@ -//! Constants for the 64-bit unsigned integer type. -//! -//! *[See also the `u64` primitive type][u64].* +//! Redundant constants module for the [`u64` primitive type][u64]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u8.rs b/library/core/src/num/shells/u8.rs index 817c6a18aaa..360baef7228 100644 --- a/library/core/src/num/shells/u8.rs +++ b/library/core/src/num/shells/u8.rs @@ -1,6 +1,4 @@ -//! Constants for the 8-bit unsigned integer type. -//! -//! *[See also the `u8` primitive type][u8].* +//! Redundant constants module for the [`u8` primitive type][u8]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/usize.rs b/library/core/src/num/shells/usize.rs index 3e1bec5ec48..44c24dfc2cf 100644 --- a/library/core/src/num/shells/usize.rs +++ b/library/core/src/num/shells/usize.rs @@ -1,6 +1,4 @@ -//! Constants for the pointer-sized unsigned integer type. -//! -//! *[See also the `usize` primitive type][usize].* +//! Redundant constants module for the [`usize` primitive type][usize]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index 1c97c468628..c4fe8e966fd 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -1,10 +1,23 @@ macro_rules! uint_impl { - ($SelfT:ty, $ActualT:ident, $SignedT:ident, $NonZeroT:ident, - $BITS:expr, $MaxV:expr, - $rot:expr, $rot_op:expr, $rot_result:expr, $swap_op:expr, $swapped:expr, - $reversed:expr, $le_bytes:expr, $be_bytes:expr, - $to_xe_bytes_doc:expr, $from_xe_bytes_doc:expr, - $bound_condition:expr) => { + ( + Self = $SelfT:ty, + ActualT = $ActualT:ident, + SignedT = $SignedT:ident, + NonZeroT = $NonZeroT:ident, + BITS = $BITS:expr, + MAX = $MaxV:expr, + rot = $rot:expr, + rot_op = $rot_op:expr, + rot_result = $rot_result:expr, + swap_op = $swap_op:expr, + swapped = $swapped:expr, + reversed = $reversed:expr, + le_bytes = $le_bytes:expr, + be_bytes = $be_bytes:expr, + to_xe_bytes_doc = $to_xe_bytes_doc:expr, + from_xe_bytes_doc = $from_xe_bytes_doc:expr, + bound_condition = $bound_condition:expr, + ) => { /// The smallest value that can be represented by this integer type. /// /// # Examples @@ -18,7 +31,7 @@ macro_rules! uint_impl { pub const MIN: Self = 0; /// The largest value that can be represented by this integer type - #[doc = concat!("(2<sup>", $BITS, "</sup> − 1", $bound_condition, ")")] + #[doc = concat!("(2<sup>", $BITS, "</sup> − 1", $bound_condition, ").")] /// /// # Examples /// @@ -705,14 +718,17 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog(self, base: Self) -> u32 { assert!(base >= 2, "base of integer logarithm must be at least 2"); - self.checked_ilog(base).expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog(base) { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the base 2 logarithm of the number, rounded down. @@ -728,13 +744,16 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog2(self) -> u32 { - self.checked_ilog2().expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog2() { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the base 10 logarithm of the number, rounded down. @@ -750,13 +769,16 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog10(self) -> u32 { - self.checked_ilog10().expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog10() { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the logarithm of the number with respect to an arbitrary base, diff --git a/library/core/src/ops/arith.rs b/library/core/src/ops/arith.rs index 75c52d3ecfc..0c7ee9630c6 100644 --- a/library/core/src/ops/arith.rs +++ b/library/core/src/ops/arith.rs @@ -86,7 +86,8 @@ pub trait Add<Rhs = Self> { /// ``` /// assert_eq!(12 + 1, 13); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "add"] #[stable(feature = "rust1", since = "1.0.0")] fn add(self, rhs: Rhs) -> Self::Output; } @@ -195,7 +196,8 @@ pub trait Sub<Rhs = Self> { /// ``` /// assert_eq!(12 - 1, 11); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "sub"] #[stable(feature = "rust1", since = "1.0.0")] fn sub(self, rhs: Rhs) -> Self::Output; } @@ -325,7 +327,8 @@ pub trait Mul<Rhs = Self> { /// ``` /// assert_eq!(12 * 2, 24); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "mul"] #[stable(feature = "rust1", since = "1.0.0")] fn mul(self, rhs: Rhs) -> Self::Output; } @@ -459,7 +462,8 @@ pub trait Div<Rhs = Self> { /// ``` /// assert_eq!(12 / 2, 6); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "div"] #[stable(feature = "rust1", since = "1.0.0")] fn div(self, rhs: Rhs) -> Self::Output; } @@ -545,7 +549,7 @@ div_impl_float! { f32 f64 } #[lang = "rem"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented( - message = "cannot mod `{Self}` by `{Rhs}`", + message = "cannot calculate the remainder of `{Self}` divided by `{Rhs}`", label = "no implementation for `{Self} % {Rhs}`" )] #[doc(alias = "%")] @@ -562,7 +566,8 @@ pub trait Rem<Rhs = Self> { /// ``` /// assert_eq!(12 % 10, 2); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "rem"] #[stable(feature = "rust1", since = "1.0.0")] fn rem(self, rhs: Rhs) -> Self::Output; } @@ -678,7 +683,8 @@ pub trait Neg { /// let x: i32 = 12; /// assert_eq!(-x, -12); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "neg"] #[stable(feature = "rust1", since = "1.0.0")] fn neg(self) -> Self::Output; } @@ -981,7 +987,7 @@ div_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } #[lang = "rem_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] #[rustc_on_unimplemented( - message = "cannot mod-assign `{Self}` by `{Rhs}``", + message = "cannot calculate and assign the remainder of `{Self}` divided by `{Rhs}`", label = "no implementation for `{Self} %= {Rhs}`" )] #[doc(alias = "%")] diff --git a/library/core/src/ops/control_flow.rs b/library/core/src/ops/control_flow.rs index cd183540cd5..117706fb4b2 100644 --- a/library/core/src/ops/control_flow.rs +++ b/library/core/src/ops/control_flow.rs @@ -259,46 +259,3 @@ impl<R: ops::Try> ControlFlow<R, R::Output> { } } } - -impl<B> ControlFlow<B, ()> { - /// It's frequently the case that there's no value needed with `Continue`, - /// so this provides a way to avoid typing `(())`, if you prefer it. - /// - /// # Examples - /// - /// ``` - /// #![feature(control_flow_enum)] - /// use std::ops::ControlFlow; - /// - /// let mut partial_sum = 0; - /// let last_used = (1..10).chain(20..25).try_for_each(|x| { - /// partial_sum += x; - /// if partial_sum > 100 { ControlFlow::Break(x) } - /// else { ControlFlow::CONTINUE } - /// }); - /// assert_eq!(last_used.break_value(), Some(22)); - /// ``` - #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")] - pub const CONTINUE: Self = ControlFlow::Continue(()); -} - -impl<C> ControlFlow<(), C> { - /// APIs like `try_for_each` don't need values with `Break`, - /// so this provides a way to avoid typing `(())`, if you prefer it. - /// - /// # Examples - /// - /// ``` - /// #![feature(control_flow_enum)] - /// use std::ops::ControlFlow; - /// - /// let mut partial_sum = 0; - /// (1..10).chain(20..25).try_for_each(|x| { - /// if partial_sum > 100 { ControlFlow::BREAK } - /// else { partial_sum += x; ControlFlow::CONTINUE } - /// }); - /// assert_eq!(partial_sum, 108); - /// ``` - #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")] - pub const BREAK: Self = ControlFlow::Break(()); -} diff --git a/library/core/src/ops/range.rs b/library/core/src/ops/range.rs index d29ae35614c..b8ab2656473 100644 --- a/library/core/src/ops/range.rs +++ b/library/core/src/ops/range.rs @@ -96,7 +96,7 @@ impl<Idx: fmt::Debug> fmt::Debug for Range<Idx> { } } -impl<Idx: PartialOrd<Idx>> Range<Idx> { +impl<Idx: ~const PartialOrd<Idx>> Range<Idx> { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -116,10 +116,11 @@ impl<Idx: PartialOrd<Idx>> Range<Idx> { /// assert!(!(f32::NAN..1.0).contains(&0.5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains<U>(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains<U>(&self, item: &U) -> bool where - Idx: PartialOrd<U>, - U: ?Sized + PartialOrd<Idx>, + Idx: ~const PartialOrd<U>, + U: ?Sized + ~const PartialOrd<Idx>, { <Self as RangeBounds<Idx>>::contains(self, item) } @@ -142,7 +143,8 @@ impl<Idx: PartialOrd<Idx>> Range<Idx> { /// assert!( (f32::NAN..5.0).is_empty()); /// ``` #[stable(feature = "range_is_empty", since = "1.47.0")] - pub fn is_empty(&self) -> bool { + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn is_empty(&self) -> bool { !(self.start < self.end) } } @@ -199,7 +201,7 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeFrom<Idx> { } } -impl<Idx: PartialOrd<Idx>> RangeFrom<Idx> { +impl<Idx: ~const PartialOrd<Idx>> RangeFrom<Idx> { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -214,10 +216,11 @@ impl<Idx: PartialOrd<Idx>> RangeFrom<Idx> { /// assert!(!(f32::NAN..).contains(&0.5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains<U>(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains<U>(&self, item: &U) -> bool where - Idx: PartialOrd<U>, - U: ?Sized + PartialOrd<Idx>, + Idx: ~const PartialOrd<U>, + U: ?Sized + ~const PartialOrd<Idx>, { <Self as RangeBounds<Idx>>::contains(self, item) } @@ -280,7 +283,7 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeTo<Idx> { } } -impl<Idx: PartialOrd<Idx>> RangeTo<Idx> { +impl<Idx: ~const PartialOrd<Idx>> RangeTo<Idx> { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -295,10 +298,11 @@ impl<Idx: PartialOrd<Idx>> RangeTo<Idx> { /// assert!(!(..f32::NAN).contains(&0.5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains<U>(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains<U>(&self, item: &U) -> bool where - Idx: PartialOrd<U>, - U: ?Sized + PartialOrd<Idx>, + Idx: ~const PartialOrd<U>, + U: ?Sized + ~const PartialOrd<Idx>, { <Self as RangeBounds<Idx>>::contains(self, item) } @@ -437,7 +441,8 @@ impl<Idx> RangeInclusive<Idx> { /// ``` #[stable(feature = "inclusive_range_methods", since = "1.27.0")] #[inline] - pub fn into_inner(self) -> (Idx, Idx) { + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn into_inner(self) -> (Idx, Idx) { (self.start, self.end) } } @@ -469,7 +474,7 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeInclusive<Idx> { } } -impl<Idx: PartialOrd<Idx>> RangeInclusive<Idx> { +impl<Idx: ~const PartialOrd<Idx>> RangeInclusive<Idx> { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -500,10 +505,11 @@ impl<Idx: PartialOrd<Idx>> RangeInclusive<Idx> { /// assert!(!r.contains(&3) && !r.contains(&5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains<U>(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains<U>(&self, item: &U) -> bool where - Idx: PartialOrd<U>, - U: ?Sized + PartialOrd<Idx>, + Idx: ~const PartialOrd<U>, + U: ?Sized + ~const PartialOrd<Idx>, { <Self as RangeBounds<Idx>>::contains(self, item) } @@ -535,8 +541,9 @@ impl<Idx: PartialOrd<Idx>> RangeInclusive<Idx> { /// assert!(r.is_empty()); /// ``` #[stable(feature = "range_is_empty", since = "1.47.0")] + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] #[inline] - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { self.exhausted || !(self.start <= self.end) } } @@ -598,7 +605,7 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeToInclusive<Idx> { } } -impl<Idx: PartialOrd<Idx>> RangeToInclusive<Idx> { +impl<Idx: ~const PartialOrd<Idx>> RangeToInclusive<Idx> { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -613,10 +620,11 @@ impl<Idx: PartialOrd<Idx>> RangeToInclusive<Idx> { /// assert!(!(..=f32::NAN).contains(&0.5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains<U>(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains<U>(&self, item: &U) -> bool where - Idx: PartialOrd<U>, - U: ?Sized + PartialOrd<Idx>, + Idx: ~const PartialOrd<U>, + U: ?Sized + ~const PartialOrd<Idx>, { <Self as RangeBounds<Idx>>::contains(self, item) } @@ -757,6 +765,7 @@ impl<T: Clone> Bound<&T> { /// `RangeBounds` is implemented by Rust's built-in range types, produced /// by range syntax like `..`, `a..`, `..b`, `..=c`, `d..e`, or `f..=g`. #[stable(feature = "collections_range", since = "1.28.0")] +#[const_trait] pub trait RangeBounds<T: ?Sized> { /// Start index bound. /// @@ -809,8 +818,8 @@ pub trait RangeBounds<T: ?Sized> { #[stable(feature = "range_contains", since = "1.35.0")] fn contains<U>(&self, item: &U) -> bool where - T: PartialOrd<U>, - U: ?Sized + PartialOrd<T>, + T: ~const PartialOrd<U>, + U: ?Sized + ~const PartialOrd<T>, { (match self.start_bound() { Included(start) => start <= item, @@ -827,7 +836,8 @@ pub trait RangeBounds<T: ?Sized> { use self::Bound::{Excluded, Included, Unbounded}; #[stable(feature = "collections_range", since = "1.28.0")] -impl<T: ?Sized> RangeBounds<T> for RangeFull { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T: ?Sized> const RangeBounds<T> for RangeFull { fn start_bound(&self) -> Bound<&T> { Unbounded } @@ -837,7 +847,8 @@ impl<T: ?Sized> RangeBounds<T> for RangeFull { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeFrom<T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeFrom<T> { fn start_bound(&self) -> Bound<&T> { Included(&self.start) } @@ -847,7 +858,8 @@ impl<T> RangeBounds<T> for RangeFrom<T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeTo<T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeTo<T> { fn start_bound(&self) -> Bound<&T> { Unbounded } @@ -857,7 +869,8 @@ impl<T> RangeBounds<T> for RangeTo<T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for Range<T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for Range<T> { fn start_bound(&self) -> Bound<&T> { Included(&self.start) } @@ -867,7 +880,8 @@ impl<T> RangeBounds<T> for Range<T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeInclusive<T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeInclusive<T> { fn start_bound(&self) -> Bound<&T> { Included(&self.start) } @@ -883,7 +897,8 @@ impl<T> RangeBounds<T> for RangeInclusive<T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeToInclusive<T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeToInclusive<T> { fn start_bound(&self) -> Bound<&T> { Unbounded } @@ -893,7 +908,8 @@ impl<T> RangeBounds<T> for RangeToInclusive<T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for (Bound<T>, Bound<T>) { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for (Bound<T>, Bound<T>) { fn start_bound(&self) -> Bound<&T> { match *self { (Included(ref start), _) => Included(start), @@ -912,7 +928,8 @@ impl<T> RangeBounds<T> for (Bound<T>, Bound<T>) { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<'a, T: ?Sized + 'a> RangeBounds<T> for (Bound<&'a T>, Bound<&'a T>) { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<'a, T: ?Sized + 'a> const RangeBounds<T> for (Bound<&'a T>, Bound<&'a T>) { fn start_bound(&self) -> Bound<&T> { self.0 } @@ -923,7 +940,8 @@ impl<'a, T: ?Sized + 'a> RangeBounds<T> for (Bound<&'a T>, Bound<&'a T>) { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeFrom<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeFrom<&T> { fn start_bound(&self) -> Bound<&T> { Included(self.start) } @@ -933,7 +951,8 @@ impl<T> RangeBounds<T> for RangeFrom<&T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeTo<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeTo<&T> { fn start_bound(&self) -> Bound<&T> { Unbounded } @@ -943,7 +962,8 @@ impl<T> RangeBounds<T> for RangeTo<&T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for Range<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for Range<&T> { fn start_bound(&self) -> Bound<&T> { Included(self.start) } @@ -953,7 +973,8 @@ impl<T> RangeBounds<T> for Range<&T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeInclusive<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeInclusive<&T> { fn start_bound(&self) -> Bound<&T> { Included(self.start) } @@ -963,7 +984,8 @@ impl<T> RangeBounds<T> for RangeInclusive<&T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeToInclusive<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeToInclusive<&T> { fn start_bound(&self) -> Bound<&T> { Unbounded } diff --git a/library/core/src/ops/try_trait.rs b/library/core/src/ops/try_trait.rs index 84a69046807..86aa1e4fd20 100644 --- a/library/core/src/ops/try_trait.rs +++ b/library/core/src/ops/try_trait.rs @@ -379,13 +379,27 @@ pub(crate) type ChangeOutputType<T, V> = <<T as Try>::Residual as Residual<V>>:: pub(crate) struct NeverShortCircuit<T>(pub T); impl<T> NeverShortCircuit<T> { - /// Implementation for building `ConstFnMutClosure` for wrapping the output of a ~const FnMut in a `NeverShortCircuit`. + /// Wraps a unary function to produce one that wraps the output into a `NeverShortCircuit`. + /// + /// This is useful for implementing infallible functions in terms of the `try_` ones, + /// without accidentally capturing extra generic parameters in a closure. + #[inline] + pub fn wrap_mut_1<A>(mut f: impl FnMut(A) -> T) -> impl FnMut(A) -> NeverShortCircuit<T> { + move |a| NeverShortCircuit(f(a)) + } + #[inline] - pub const fn wrap_mut_2_imp<A, B, F: ~const FnMut(A, B) -> T>( - f: &mut F, - (a, b): (A, B), - ) -> NeverShortCircuit<T> { - NeverShortCircuit(f(a, b)) + pub fn wrap_mut_2<A, B>( + mut f: impl ~const FnMut(A, B) -> T, + ) -> impl ~const FnMut(A, B) -> Self { + cfg_if! { + if #[cfg(bootstrap)] { + #[allow(unused_parens)] + (const move |a, b| NeverShortCircuit(f(a, b))) + } else { + const move |a, b| NeverShortCircuit(f(a, b)) + } + } } } diff --git a/library/core/src/option.rs b/library/core/src/option.rs index 7cc00e3f8d1..5d5e9559034 100644 --- a/library/core/src/option.rs +++ b/library/core/src/option.rs @@ -551,7 +551,7 @@ use crate::marker::Destruct; use crate::panicking::{panic, panic_str}; use crate::pin::Pin; use crate::{ - convert, hint, mem, + cmp, convert, hint, mem, ops::{self, ControlFlow, Deref, DerefMut}, }; @@ -943,7 +943,7 @@ impl<T> Option<T> { // Transforming contained values ///////////////////////////////////////////////////////////////////////// - /// Maps an `Option<T>` to `Option<U>` by applying a function to a contained value. + /// Maps an `Option<T>` to `Option<U>` by applying a function to a contained value (if `Some`) or returns `None` (if `None`). /// /// # Examples /// @@ -955,8 +955,10 @@ impl<T> Option<T> { /// let maybe_some_string = Some(String::from("Hello, World!")); /// // `Option::map` takes self *by value*, consuming `maybe_some_string` /// let maybe_some_len = maybe_some_string.map(|s| s.len()); - /// /// assert_eq!(maybe_some_len, Some(13)); + /// + /// let x: Option<&str> = None; + /// assert_eq!(x.map(|s| s.len()), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -2090,6 +2092,12 @@ impl<T: PartialEq> PartialEq for Option<T> { } } +/// This specialization trait is a workaround for LLVM not currently (2023-01) +/// being able to optimize this itself, even though Alive confirms that it would +/// be legal to do so: <https://github.com/llvm/llvm-project/issues/52622> +/// +/// Once that's fixed, `Option` should go back to deriving `PartialEq`, as +/// it used to do before <https://github.com/rust-lang/rust/pull/103556>. #[unstable(feature = "spec_option_partial_eq", issue = "none", reason = "exposed only for rustc")] #[doc(hidden)] pub trait SpecOptionPartialEq: Sized { @@ -2146,6 +2154,14 @@ impl<T> SpecOptionPartialEq for crate::ptr::NonNull<T> { } } +#[stable(feature = "rust1", since = "1.0.0")] +impl SpecOptionPartialEq for cmp::Ordering { + #[inline] + fn eq(l: &Option<Self>, r: &Option<Self>) -> bool { + l.map_or(2, |x| x as i8) == r.map_or(2, |x| x as i8) + } +} + ///////////////////////////////////////////////////////////////////////////// // The Option Iterators ///////////////////////////////////////////////////////////////////////////// diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs index 48e90e6d794..805a1e51ae9 100644 --- a/library/core/src/panicking.rs +++ b/library/core/src/panicking.rs @@ -117,7 +117,7 @@ pub const fn panic(expr: &'static str) -> ! { /// Like `panic`, but without unwinding and track_caller to reduce the impact on codesize. #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)] #[cfg_attr(feature = "panic_immediate_abort", inline)] -#[cfg_attr(not(bootstrap), lang = "panic_nounwind")] // needed by codegen for non-unwinding panics +#[lang = "panic_nounwind"] // needed by codegen for non-unwinding panics #[rustc_nounwind] pub fn panic_nounwind(expr: &'static str) -> ! { panic_nounwind_fmt(fmt::Arguments::new_v1(&[expr], &[])); @@ -165,8 +165,7 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { /// any extra arguments (including those synthesized by track_caller). #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)] #[cfg_attr(feature = "panic_immediate_abort", inline)] -#[cfg_attr(bootstrap, lang = "panic_no_unwind")] // needed by codegen for panic in nounwind function -#[cfg_attr(not(bootstrap), lang = "panic_cannot_unwind")] // needed by codegen for panic in nounwind function +#[lang = "panic_cannot_unwind"] // needed by codegen for panic in nounwind function #[rustc_nounwind] fn panic_cannot_unwind() -> ! { panic_nounwind("panic in a function that cannot unwind") diff --git a/library/core/src/pin.rs b/library/core/src/pin.rs index ec0c9984841..febe57dc90b 100644 --- a/library/core/src/pin.rs +++ b/library/core/src/pin.rs @@ -1164,7 +1164,7 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {} /// constructor. /// /// [`Box::pin`]: ../../std/boxed/struct.Box.html#method.pin -#[stable(feature = "pin_macro", since = "CURRENT_RUSTC_VERSION")] +#[stable(feature = "pin_macro", since = "1.68.0")] #[rustc_macro_transparency = "semitransparent"] #[allow_internal_unstable(unsafe_pin_internals)] pub macro pin($value:expr $(,)?) { diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index 16eb726f6f6..57e2ffe5d20 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -23,8 +23,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "Follow the rabbit"; /// let ptr: *const u8 = s.as_ptr(); @@ -323,8 +321,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let ptr: *const u8 = &10u8 as *const u8; /// @@ -384,8 +380,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// #![feature(ptr_as_uninit)] /// @@ -449,8 +443,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); @@ -526,8 +518,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; @@ -908,8 +898,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); @@ -993,8 +981,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// @@ -1072,8 +1058,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; @@ -1152,8 +1136,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; @@ -1359,7 +1341,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: /// ``` /// #![feature(pointer_is_aligned)] /// #![feature(pointer_byte_offsets)] @@ -1482,7 +1463,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: /// ``` /// #![feature(pointer_is_aligned)] /// #![feature(pointer_byte_offsets)] diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index 0a2f63e3ec6..422d0f2b8f0 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -22,8 +22,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); @@ -332,8 +330,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let ptr: *mut u8 = &mut 10u8 as *mut u8; /// @@ -396,8 +392,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// #![feature(ptr_as_uninit)] /// @@ -461,8 +455,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); @@ -539,8 +531,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let mut data = [1u8, 2, 3, 4, 5]; @@ -660,8 +650,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); @@ -1010,8 +998,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); @@ -1095,8 +1081,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// @@ -1174,8 +1158,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; @@ -1254,8 +1236,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; @@ -1627,7 +1607,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: /// ``` /// #![feature(pointer_is_aligned)] /// #![feature(pointer_byte_offsets)] @@ -1752,7 +1731,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: /// ``` /// #![feature(pointer_is_aligned)] /// #![feature(pointer_byte_offsets)] diff --git a/library/core/src/result.rs b/library/core/src/result.rs index f00c40f35d5..208b220c24a 100644 --- a/library/core/src/result.rs +++ b/library/core/src/result.rs @@ -458,7 +458,7 @@ //! [`Result`] of a collection of each contained value of the original //! [`Result`] values, or [`Err`] if any of the elements was [`Err`]. //! -//! [impl-FromIterator]: Result#impl-FromIterator%3CResult%3CA%2C%20E%3E%3E-for-Result%3CV%2C%20E%3E +//! [impl-FromIterator]: Result#impl-FromIterator%3CResult%3CA,+E%3E%3E-for-Result%3CV,+E%3E //! //! ``` //! let v = [Ok(2), Ok(4), Err("err!"), Ok(8)]; @@ -474,8 +474,8 @@ //! to provide the [`product`][Iterator::product] and //! [`sum`][Iterator::sum] methods. //! -//! [impl-Product]: Result#impl-Product%3CResult%3CU%2C%20E%3E%3E-for-Result%3CT%2C%20E%3E -//! [impl-Sum]: Result#impl-Sum%3CResult%3CU%2C%20E%3E%3E-for-Result%3CT%2C%20E%3E +//! [impl-Product]: Result#impl-Product%3CResult%3CU,+E%3E%3E-for-Result%3CT,+E%3E +//! [impl-Sum]: Result#impl-Sum%3CResult%3CU,+E%3E%3E-for-Result%3CT,+E%3E //! //! ``` //! let v = [Err("error!"), Ok(1), Ok(2), Ok(3), Err("foo")]; @@ -525,8 +525,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<i32, &str> = Ok(-3); /// assert_eq!(x.is_ok(), true); @@ -572,8 +570,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<i32, &str> = Ok(-3); /// assert_eq!(x.is_err(), false); @@ -627,8 +623,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(2); /// assert_eq!(x.ok(), Some(2)); @@ -658,8 +652,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(2); /// assert_eq!(x.err(), None); @@ -693,8 +685,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(2); /// assert_eq!(x.as_ref(), Ok(&2)); @@ -716,8 +706,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// fn mutate(r: &mut Result<i32, i32>) { /// match r.as_mut() { @@ -812,8 +800,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let k = 21; /// @@ -841,8 +827,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// fn stringify(x: u32) -> String { format!("error code: {x}") } /// @@ -968,8 +952,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(7); /// assert_eq!(x.iter().next(), Some(&7)); @@ -989,8 +971,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let mut x: Result<u32, &str> = Ok(7); /// match x.iter_mut().next() { @@ -1031,8 +1011,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ```should_panic /// let x: Result<u32, &str> = Err("emergency failure"); /// x.expect("Testing expect"); // panics with `Testing expect: emergency failure` @@ -1160,8 +1138,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ```should_panic /// let x: Result<u32, &str> = Ok(10); /// x.expect_err("Testing expect_err"); // panics with `Testing expect_err: 10` @@ -1222,8 +1198,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// # #![feature(never_type)] /// # #![feature(unwrap_infallible)] @@ -1259,8 +1233,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// # #![feature(never_type)] /// # #![feature(unwrap_infallible)] @@ -1298,8 +1270,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(2); /// let y: Result<&str, &str> = Err("late error"); @@ -1383,8 +1353,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(2); /// let y: Result<u32, &str> = Err("late error"); @@ -1426,8 +1394,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// fn sq(x: u32) -> Result<u32, u32> { Ok(x * x) } /// fn err(x: u32) -> Result<u32, u32> { Err(x) } @@ -1456,8 +1422,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let default = 2; /// let x: Result<u32, &str> = Ok(9); @@ -1487,8 +1451,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// fn count(x: &str) -> usize { x.len() } /// @@ -1752,8 +1714,6 @@ impl<T, E> Result<Result<T, E>, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// #![feature(result_flattening)] /// let x: Result<Result<&'static str, u32>, u32> = Ok(Ok("hello")); @@ -1842,8 +1802,6 @@ impl<T, E> IntoIterator for Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(5); /// let v: Vec<u32> = x.into_iter().collect(); diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs index 90ab43d1289..c4317799bcc 100644 --- a/library/core/src/slice/iter.rs +++ b/library/core/src/slice/iter.rs @@ -7,7 +7,9 @@ use crate::cmp; use crate::cmp::Ordering; use crate::fmt; use crate::intrinsics::assume; -use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce}; +use crate::iter::{ + FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce, UncheckedIterator, +}; use crate::marker::{PhantomData, Send, Sized, Sync}; use crate::mem::{self, SizedTypeProperties}; use crate::num::NonZeroUsize; diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs index 0fd57b197aa..89b92a7d597 100644 --- a/library/core/src/slice/iter/macros.rs +++ b/library/core/src/slice/iter/macros.rs @@ -384,6 +384,15 @@ macro_rules! iterator { #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl<T> TrustedLen for $name<'_, T> {} + + impl<'a, T> UncheckedIterator for $name<'a, T> { + unsafe fn next_unchecked(&mut self) -> $elem { + // SAFETY: The caller promised there's at least one more item. + unsafe { + next_unchecked!(self) + } + } + } } } diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index c848c2e18e9..98c8349eb60 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -16,25 +16,29 @@ const USIZE_BYTES: usize = mem::size_of::<usize>(); /// bytes where the borrow propagated all the way to the most significant /// bit." #[inline] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn contains_zero_byte(x: usize) -> bool { x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0 } -#[cfg(target_pointer_width = "16")] #[inline] +#[cfg(target_pointer_width = "16")] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn repeat_byte(b: u8) -> usize { (b as usize) << 8 | b as usize } -#[cfg(not(target_pointer_width = "16"))] #[inline] +#[cfg(not(target_pointer_width = "16"))] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn repeat_byte(b: u8) -> usize { (b as usize) * (usize::MAX / 255) } /// Returns the first index matching the byte `x` in `text`. -#[must_use] #[inline] +#[must_use] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] pub const fn memchr(x: u8, text: &[u8]) -> Option<usize> { // Fast path for small slices. if text.len() < 2 * USIZE_BYTES { @@ -45,6 +49,7 @@ pub const fn memchr(x: u8, text: &[u8]) -> Option<usize> { } #[inline] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn memchr_naive(x: u8, text: &[u8]) -> Option<usize> { let mut i = 0; @@ -60,6 +65,10 @@ const fn memchr_naive(x: u8, text: &[u8]) -> Option<usize> { None } +#[rustc_allow_const_fn_unstable(const_cmp)] +#[rustc_allow_const_fn_unstable(const_slice_index)] +#[rustc_allow_const_fn_unstable(const_align_offset)] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn memchr_aligned(x: u8, text: &[u8]) -> Option<usize> { // Scan for a single byte value by reading two `usize` words at a time. // diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index d93a3a57ecd..e8374784501 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -805,8 +805,9 @@ impl<T> [T] { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[track_caller] pub fn windows(&self, size: usize) -> Windows<'_, T> { - let size = NonZeroUsize::new(size).expect("size is zero"); + let size = NonZeroUsize::new(size).expect("window size must be non-zero"); Windows::new(self, size) } @@ -839,8 +840,9 @@ impl<T> [T] { /// [`rchunks`]: slice::rchunks #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[track_caller] pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> { - assert_ne!(chunk_size, 0, "chunks cannot have a size of zero"); + assert!(chunk_size != 0, "chunk size must be non-zero"); Chunks::new(self, chunk_size) } @@ -877,8 +879,9 @@ impl<T> [T] { /// [`rchunks_mut`]: slice::rchunks_mut #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[track_caller] pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> { - assert_ne!(chunk_size, 0, "chunks cannot have a size of zero"); + assert!(chunk_size != 0, "chunk size must be non-zero"); ChunksMut::new(self, chunk_size) } @@ -914,8 +917,9 @@ impl<T> [T] { /// [`rchunks_exact`]: slice::rchunks_exact #[stable(feature = "chunks_exact", since = "1.31.0")] #[inline] + #[track_caller] pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> { - assert_ne!(chunk_size, 0, "chunks cannot have a size of zero"); + assert!(chunk_size != 0, "chunk size must be non-zero"); ChunksExact::new(self, chunk_size) } @@ -956,8 +960,9 @@ impl<T> [T] { /// [`rchunks_exact_mut`]: slice::rchunks_exact_mut #[stable(feature = "chunks_exact", since = "1.31.0")] #[inline] + #[track_caller] pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> { - assert_ne!(chunk_size, 0, "chunks cannot have a size of zero"); + assert!(chunk_size != 0, "chunk size must be non-zero"); ChunksExactMut::new(self, chunk_size) } @@ -1037,9 +1042,10 @@ impl<T> [T] { /// ``` #[unstable(feature = "slice_as_chunks", issue = "74985")] #[inline] + #[track_caller] #[must_use] pub fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T]) { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); let len = self.len() / N; let (multiple_of_n, remainder) = self.split_at(len * N); // SAFETY: We already panicked for zero, and ensured by construction @@ -1068,9 +1074,10 @@ impl<T> [T] { /// ``` #[unstable(feature = "slice_as_chunks", issue = "74985")] #[inline] + #[track_caller] #[must_use] pub fn as_rchunks<const N: usize>(&self) -> (&[T], &[[T; N]]) { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); let len = self.len() / N; let (remainder, multiple_of_n) = self.split_at(self.len() - len * N); // SAFETY: We already panicked for zero, and ensured by construction @@ -1108,8 +1115,9 @@ impl<T> [T] { /// [`chunks_exact`]: slice::chunks_exact #[unstable(feature = "array_chunks", issue = "74985")] #[inline] + #[track_caller] pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); ArrayChunks::new(self) } @@ -1186,9 +1194,10 @@ impl<T> [T] { /// ``` #[unstable(feature = "slice_as_chunks", issue = "74985")] #[inline] + #[track_caller] #[must_use] pub fn as_chunks_mut<const N: usize>(&mut self) -> (&mut [[T; N]], &mut [T]) { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); let len = self.len() / N; let (multiple_of_n, remainder) = self.split_at_mut(len * N); // SAFETY: We already panicked for zero, and ensured by construction @@ -1223,9 +1232,10 @@ impl<T> [T] { /// ``` #[unstable(feature = "slice_as_chunks", issue = "74985")] #[inline] + #[track_caller] #[must_use] pub fn as_rchunks_mut<const N: usize>(&mut self) -> (&mut [T], &mut [[T; N]]) { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); let len = self.len() / N; let (remainder, multiple_of_n) = self.split_at_mut(self.len() - len * N); // SAFETY: We already panicked for zero, and ensured by construction @@ -1265,8 +1275,9 @@ impl<T> [T] { /// [`chunks_exact_mut`]: slice::chunks_exact_mut #[unstable(feature = "array_chunks", issue = "74985")] #[inline] + #[track_caller] pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); ArrayChunksMut::new(self) } @@ -1297,8 +1308,9 @@ impl<T> [T] { /// [`windows`]: slice::windows #[unstable(feature = "array_windows", issue = "75027")] #[inline] + #[track_caller] pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N> { - assert_ne!(N, 0, "windows cannot have a size of zero"); + assert!(N != 0, "window size must be non-zero"); ArrayWindows::new(self) } @@ -1331,8 +1343,9 @@ impl<T> [T] { /// [`chunks`]: slice::chunks #[stable(feature = "rchunks", since = "1.31.0")] #[inline] + #[track_caller] pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> { - assert!(chunk_size != 0); + assert!(chunk_size != 0, "chunk size must be non-zero"); RChunks::new(self, chunk_size) } @@ -1369,8 +1382,9 @@ impl<T> [T] { /// [`chunks_mut`]: slice::chunks_mut #[stable(feature = "rchunks", since = "1.31.0")] #[inline] + #[track_caller] pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> { - assert!(chunk_size != 0); + assert!(chunk_size != 0, "chunk size must be non-zero"); RChunksMut::new(self, chunk_size) } @@ -1408,8 +1422,9 @@ impl<T> [T] { /// [`chunks_exact`]: slice::chunks_exact #[stable(feature = "rchunks", since = "1.31.0")] #[inline] + #[track_caller] pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> { - assert!(chunk_size != 0); + assert!(chunk_size != 0, "chunk size must be non-zero"); RChunksExact::new(self, chunk_size) } @@ -1451,8 +1466,9 @@ impl<T> [T] { /// [`chunks_exact_mut`]: slice::chunks_exact_mut #[stable(feature = "rchunks", since = "1.31.0")] #[inline] + #[track_caller] pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> { - assert!(chunk_size != 0); + assert!(chunk_size != 0, "chunk size must be non-zero"); RChunksExactMut::new(self, chunk_size) } @@ -2714,8 +2730,10 @@ impl<T> [T] { /// This reordering has the additional property that any value at position `i < index` will be /// less than or equal to any value at a position `j > index`. Additionally, this reordering is /// unstable (i.e. any number of equal elements may end up at position `index`), in-place - /// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth - /// element" in other libraries. It returns a triplet of the following from the reordered slice: + /// (i.e. does not allocate), and *O*(*n*) on average. The worst-case performance is *O*(*n* log *n*). + /// This function is also known as "kth element" in other libraries. + /// + /// It returns a triplet of the following from the reordered slice: /// the subslice prior to `index`, the element at `index`, and the subslice after `index`; /// accordingly, the values in those two subslices will respectively all be less-than-or-equal-to /// and greater-than-or-equal-to the value of the element at `index`. @@ -2761,8 +2779,11 @@ impl<T> [T] { /// This reordering has the additional property that any value at position `i < index` will be /// less than or equal to any value at a position `j > index` using the comparator function. /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at - /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function - /// is also known as "kth element" in other libraries. It returns a triplet of the following from + /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) on average. + /// The worst-case performance is *O*(*n* log *n*). This function is also known as + /// "kth element" in other libraries. + /// + /// It returns a triplet of the following from /// the slice reordered according to the provided comparator function: the subslice prior to /// `index`, the element at `index`, and the subslice after `index`; accordingly, the values in /// those two subslices will respectively all be less-than-or-equal-to and greater-than-or-equal-to @@ -2813,8 +2834,11 @@ impl<T> [T] { /// This reordering has the additional property that any value at position `i < index` will be /// less than or equal to any value at a position `j > index` using the key extraction function. /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at - /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function - /// is also known as "kth element" in other libraries. It returns a triplet of the following from + /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) on average. + /// The worst-case performance is *O*(*n* log *n*). + /// This function is also known as "kth element" in other libraries. + /// + /// It returns a triplet of the following from /// the slice reordered according to the provided key extraction function: the subslice prior to /// `index`, the element at `index`, and the subslice after `index`; accordingly, the values in /// those two subslices will respectively all be less-than-or-equal-to and greater-than-or-equal-to diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs index 2181f9a8118..4ca4eb86bde 100644 --- a/library/core/src/slice/sort.rs +++ b/library/core/src/slice/sort.rs @@ -13,115 +13,184 @@ use crate::cmp; use crate::mem::{self, MaybeUninit, SizedTypeProperties}; use crate::ptr; -/// When dropped, copies from `src` into `dest`. -struct CopyOnDrop<T> { +// When dropped, copies from `src` into `dest`. +struct InsertionHole<T> { src: *const T, dest: *mut T, } -impl<T> Drop for CopyOnDrop<T> { +impl<T> Drop for InsertionHole<T> { fn drop(&mut self) { - // SAFETY: This is a helper class. - // Please refer to its usage for correctness. - // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`. + // SAFETY: This is a helper class. Please refer to its usage for correctness. Namely, one + // must be sure that `src` and `dst` does not overlap as required by + // `ptr::copy_nonoverlapping` and are both valid for writes. unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); } } } -/// Shifts the first element to the right until it encounters a greater or equal element. -fn shift_head<T, F>(v: &mut [T], is_less: &mut F) +/// Inserts `v[v.len() - 1]` into pre-sorted sequence `v[..v.len() - 1]` so that whole `v[..]` +/// becomes sorted. +unsafe fn insert_tail<T, F>(v: &mut [T], is_less: &mut F) where F: FnMut(&T, &T) -> bool, { - let len = v.len(); - // SAFETY: The unsafe operations below involves indexing without a bounds check (by offsetting a - // pointer) and copying memory (`ptr::copy_nonoverlapping`). - // - // a. Indexing: - // 1. We checked the size of the array to >=2. - // 2. All the indexing that we will do is always between {0 <= index < len} at most. - // - // b. Memory copying - // 1. We are obtaining pointers to references which are guaranteed to be valid. - // 2. They cannot overlap because we obtain pointers to difference indices of the slice. - // Namely, `i` and `i-1`. - // 3. If the slice is properly aligned, the elements are properly aligned. - // It is the caller's responsibility to make sure the slice is properly aligned. - // - // See comments below for further detail. + debug_assert!(v.len() >= 2); + + let arr_ptr = v.as_mut_ptr(); + let i = v.len() - 1; + + // SAFETY: caller must ensure v is at least len 2. unsafe { - // If the first two elements are out-of-order... - if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) { - // Read the first element into a stack-allocated variable. If a following comparison - // operation panics, `hole` will get dropped and automatically write the element back - // into the slice. - let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0))); - let v = v.as_mut_ptr(); - let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(1) }; - ptr::copy_nonoverlapping(v.add(1), v.add(0), 1); - - for i in 2..len { - if !is_less(&*v.add(i), &*tmp) { + // See insert_head which talks about why this approach is beneficial. + let i_ptr = arr_ptr.add(i); + + // It's important that we use i_ptr here. If this check is positive and we continue, + // We want to make sure that no other copy of the value was seen by is_less. + // Otherwise we would have to copy it back. + if is_less(&*i_ptr, &*i_ptr.sub(1)) { + // It's important, that we use tmp for comparison from now on. As it is the value that + // will be copied back. And notionally we could have created a divergence if we copy + // back the wrong value. + let tmp = mem::ManuallyDrop::new(ptr::read(i_ptr)); + // Intermediate state of the insertion process is always tracked by `hole`, which + // serves two purposes: + // 1. Protects integrity of `v` from panics in `is_less`. + // 2. Fills the remaining hole in `v` in the end. + // + // Panic safety: + // + // If `is_less` panics at any point during the process, `hole` will get dropped and + // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it + // initially held exactly once. + let mut hole = InsertionHole { src: &*tmp, dest: i_ptr.sub(1) }; + ptr::copy_nonoverlapping(hole.dest, i_ptr, 1); + + // SAFETY: We know i is at least 1. + for j in (0..(i - 1)).rev() { + let j_ptr = arr_ptr.add(j); + if !is_less(&*tmp, &*j_ptr) { break; } - // Move `i`-th element one place to the left, thus shifting the hole to the right. - ptr::copy_nonoverlapping(v.add(i), v.add(i - 1), 1); - hole.dest = v.add(i); + ptr::copy_nonoverlapping(j_ptr, hole.dest, 1); + hole.dest = j_ptr; } // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. } } } -/// Shifts the last element to the left until it encounters a smaller or equal element. -fn shift_tail<T, F>(v: &mut [T], is_less: &mut F) +/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted. +/// +/// This is the integral subroutine of insertion sort. +unsafe fn insert_head<T, F>(v: &mut [T], is_less: &mut F) where F: FnMut(&T, &T) -> bool, { - let len = v.len(); - // SAFETY: The unsafe operations below involves indexing without a bound check (by offsetting a - // pointer) and copying memory (`ptr::copy_nonoverlapping`). - // - // a. Indexing: - // 1. We checked the size of the array to >= 2. - // 2. All the indexing that we will do is always between `0 <= index < len-1` at most. - // - // b. Memory copying - // 1. We are obtaining pointers to references which are guaranteed to be valid. - // 2. They cannot overlap because we obtain pointers to difference indices of the slice. - // Namely, `i` and `i+1`. - // 3. If the slice is properly aligned, the elements are properly aligned. - // It is the caller's responsibility to make sure the slice is properly aligned. - // - // See comments below for further detail. + debug_assert!(v.len() >= 2); + + // SAFETY: caller must ensure v is at least len 2. unsafe { - // If the last two elements are out-of-order... - if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) { - // Read the last element into a stack-allocated variable. If a following comparison - // operation panics, `hole` will get dropped and automatically write the element back - // into the slice. - let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1))); - let v = v.as_mut_ptr(); - let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(len - 2) }; - ptr::copy_nonoverlapping(v.add(len - 2), v.add(len - 1), 1); - - for i in (0..len - 2).rev() { - if !is_less(&*tmp, &*v.add(i)) { + if is_less(v.get_unchecked(1), v.get_unchecked(0)) { + let arr_ptr = v.as_mut_ptr(); + + // There are three ways to implement insertion here: + // + // 1. Swap adjacent elements until the first one gets to its final destination. + // However, this way we copy data around more than is necessary. If elements are big + // structures (costly to copy), this method will be slow. + // + // 2. Iterate until the right place for the first element is found. Then shift the + // elements succeeding it to make room for it and finally place it into the + // remaining hole. This is a good method. + // + // 3. Copy the first element into a temporary variable. Iterate until the right place + // for it is found. As we go along, copy every traversed element into the slot + // preceding it. Finally, copy data from the temporary variable into the remaining + // hole. This method is very good. Benchmarks demonstrated slightly better + // performance than with the 2nd method. + // + // All methods were benchmarked, and the 3rd showed best results. So we chose that one. + let tmp = mem::ManuallyDrop::new(ptr::read(arr_ptr)); + + // Intermediate state of the insertion process is always tracked by `hole`, which + // serves two purposes: + // 1. Protects integrity of `v` from panics in `is_less`. + // 2. Fills the remaining hole in `v` in the end. + // + // Panic safety: + // + // If `is_less` panics at any point during the process, `hole` will get dropped and + // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it + // initially held exactly once. + let mut hole = InsertionHole { src: &*tmp, dest: arr_ptr.add(1) }; + ptr::copy_nonoverlapping(arr_ptr.add(1), arr_ptr.add(0), 1); + + for i in 2..v.len() { + if !is_less(&v.get_unchecked(i), &*tmp) { break; } - - // Move `i`-th element one place to the right, thus shifting the hole to the left. - ptr::copy_nonoverlapping(v.add(i), v.add(i + 1), 1); - hole.dest = v.add(i); + ptr::copy_nonoverlapping(arr_ptr.add(i), arr_ptr.add(i - 1), 1); + hole.dest = arr_ptr.add(i); } // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. } } } +/// Sort `v` assuming `v[..offset]` is already sorted. +/// +/// Never inline this function to avoid code bloat. It still optimizes nicely and has practically no +/// performance impact. Even improving performance in some cases. +#[inline(never)] +fn insertion_sort_shift_left<T, F>(v: &mut [T], offset: usize, is_less: &mut F) +where + F: FnMut(&T, &T) -> bool, +{ + let len = v.len(); + + // Using assert here improves performance. + assert!(offset != 0 && offset <= len); + + // Shift each element of the unsorted region v[i..] as far left as is needed to make v sorted. + for i in offset..len { + // SAFETY: we tested that `offset` must be at least 1, so this loop is only entered if len + // >= 2. The range is exclusive and we know `i` must be at least 1 so this slice has at + // >least len 2. + unsafe { + insert_tail(&mut v[..=i], is_less); + } + } +} + +/// Sort `v` assuming `v[offset..]` is already sorted. +/// +/// Never inline this function to avoid code bloat. It still optimizes nicely and has practically no +/// performance impact. Even improving performance in some cases. +#[inline(never)] +fn insertion_sort_shift_right<T, F>(v: &mut [T], offset: usize, is_less: &mut F) +where + F: FnMut(&T, &T) -> bool, +{ + let len = v.len(); + + // Using assert here improves performance. + assert!(offset != 0 && offset <= len && len >= 2); + + // Shift each element of the unsorted region v[..i] as far left as is needed to make v sorted. + for i in (0..offset).rev() { + // SAFETY: we tested that `offset` must be at least 1, so this loop is only entered if len + // >= 2.We ensured that the slice length is always at least 2 long. We know that start_found + // will be at least one less than end, and the range is exclusive. Which gives us i always + // <= (end - 2). + unsafe { + insert_head(&mut v[i..len], is_less); + } + } +} + /// Partially sorts a slice by shifting several out-of-order elements around. /// /// Returns `true` if the slice is sorted at the end. This function is *O*(*n*) worst-case. @@ -161,26 +230,19 @@ where // Swap the found pair of elements. This puts them in correct order. v.swap(i - 1, i); - // Shift the smaller element to the left. - shift_tail(&mut v[..i], is_less); - // Shift the greater element to the right. - shift_head(&mut v[i..], is_less); + if i >= 2 { + // Shift the smaller element to the left. + insertion_sort_shift_left(&mut v[..i], i - 1, is_less); + + // Shift the greater element to the right. + insertion_sort_shift_right(&mut v[..i], 1, is_less); + } } // Didn't manage to sort the slice in the limited number of steps. false } -/// Sorts a slice using insertion sort, which is *O*(*n*^2) worst-case. -fn insertion_sort<T, F>(v: &mut [T], is_less: &mut F) -where - F: FnMut(&T, &T) -> bool, -{ - for i in 1..v.len() { - shift_tail(&mut v[..i + 1], is_less); - } -} - /// Sorts `v` using heapsort, which guarantees *O*(*n* \* log(*n*)) worst-case. #[cold] #[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")] @@ -198,8 +260,11 @@ where } // Choose the greater child. - if child + 1 < v.len() && is_less(&v[child], &v[child + 1]) { - child += 1; + if child + 1 < v.len() { + // We need a branch to be sure not to out-of-bounds index, + // but it's highly predictable. The comparison, however, + // is better done branchless, especially for primitives. + child += is_less(&v[child], &v[child + 1]) as usize; } // Stop if the invariant holds at `node`. @@ -507,7 +572,7 @@ where // SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe. let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); - let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot }; + let _pivot_guard = InsertionHole { src: &*tmp, dest: pivot }; let pivot = &*tmp; // Find the first pair of out-of-order elements. @@ -560,7 +625,7 @@ where // operation panics, the pivot will be automatically written back into the slice. // SAFETY: The pointer here is valid because it is obtained from a reference to a slice. let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); - let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot }; + let _pivot_guard = InsertionHole { src: &*tmp, dest: pivot }; let pivot = &*tmp; // Now partition the slice. @@ -742,7 +807,9 @@ where // Very short slices get sorted using insertion sort. if len <= MAX_INSERTION { - insertion_sort(v, is_less); + if len >= 2 { + insertion_sort_shift_left(v, 1, is_less); + } return; } @@ -844,10 +911,14 @@ fn partition_at_index_loop<'a, T, F>( let mut was_balanced = true; loop { + let len = v.len(); + // For slices of up to this length it's probably faster to simply sort them. const MAX_INSERTION: usize = 10; - if v.len() <= MAX_INSERTION { - insertion_sort(v, is_less); + if len <= MAX_INSERTION { + if len >= 2 { + insertion_sort_shift_left(v, 1, is_less); + } return; } @@ -887,7 +958,7 @@ fn partition_at_index_loop<'a, T, F>( } let (mid, _) = partition(v, pivot, is_less); - was_balanced = cmp::min(mid, v.len() - mid) >= v.len() / 8; + was_balanced = cmp::min(mid, len - mid) >= len / 8; // Split the slice into `left`, `pivot`, and `right`. let (left, right) = v.split_at_mut(mid); @@ -954,75 +1025,6 @@ where (left, pivot, right) } -/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted. -/// -/// This is the integral subroutine of insertion sort. -fn insert_head<T, F>(v: &mut [T], is_less: &mut F) -where - F: FnMut(&T, &T) -> bool, -{ - if v.len() >= 2 && is_less(&v[1], &v[0]) { - // SAFETY: Copy tmp back even if panic, and ensure unique observation. - unsafe { - // There are three ways to implement insertion here: - // - // 1. Swap adjacent elements until the first one gets to its final destination. - // However, this way we copy data around more than is necessary. If elements are big - // structures (costly to copy), this method will be slow. - // - // 2. Iterate until the right place for the first element is found. Then shift the - // elements succeeding it to make room for it and finally place it into the - // remaining hole. This is a good method. - // - // 3. Copy the first element into a temporary variable. Iterate until the right place - // for it is found. As we go along, copy every traversed element into the slot - // preceding it. Finally, copy data from the temporary variable into the remaining - // hole. This method is very good. Benchmarks demonstrated slightly better - // performance than with the 2nd method. - // - // All methods were benchmarked, and the 3rd showed best results. So we chose that one. - let tmp = mem::ManuallyDrop::new(ptr::read(&v[0])); - - // Intermediate state of the insertion process is always tracked by `hole`, which - // serves two purposes: - // 1. Protects integrity of `v` from panics in `is_less`. - // 2. Fills the remaining hole in `v` in the end. - // - // Panic safety: - // - // If `is_less` panics at any point during the process, `hole` will get dropped and - // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it - // initially held exactly once. - let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] }; - ptr::copy_nonoverlapping(&v[1], &mut v[0], 1); - - for i in 2..v.len() { - if !is_less(&v[i], &*tmp) { - break; - } - ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1); - hole.dest = &mut v[i]; - } - // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. - } - } - - // When dropped, copies from `src` into `dest`. - struct InsertionHole<T> { - src: *const T, - dest: *mut T, - } - - impl<T> Drop for InsertionHole<T> { - fn drop(&mut self) { - // SAFETY: The caller must ensure that src and dest are correctly set. - unsafe { - ptr::copy_nonoverlapping(self.src, self.dest, 1); - } - } - } -} - /// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and /// stores the result into `v[..]`. /// @@ -1180,8 +1182,6 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( { // Slices of up to this length get sorted using insertion sort. const MAX_INSERTION: usize = 20; - // Very short runs are extended using insertion sort to span at least this many elements. - const MIN_RUN: usize = 10; // The caller should have already checked that. debug_assert!(!T::IS_ZST); @@ -1191,9 +1191,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( // Short arrays get sorted in-place via insertion sort to avoid allocations. if len <= MAX_INSERTION { if len >= 2 { - for i in (0..len - 1).rev() { - insert_head(&mut v[i..], is_less); - } + insertion_sort_shift_left(v, 1, is_less); } return; } @@ -1203,59 +1201,43 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run, // which will always have length at most `len / 2`. let buf = BufGuard::new(len / 2, elem_alloc_fn, elem_dealloc_fn); - let buf_ptr = buf.buf_ptr; + let buf_ptr = buf.buf_ptr.as_ptr(); let mut runs = RunVec::new(run_alloc_fn, run_dealloc_fn); - // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a - // strange decision, but consider the fact that merges more often go in the opposite direction - // (forwards). According to benchmarks, merging forwards is slightly faster than merging - // backwards. To conclude, identifying runs by traversing backwards improves performance. - let mut end = len; - while end > 0 { - // Find the next natural run, and reverse it if it's strictly descending. - let mut start = end - 1; - if start > 0 { - start -= 1; - - // SAFETY: The v.get_unchecked must be fed with correct inbound indicies. - unsafe { - if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) { - while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) { - start -= 1; - } - v[start..end].reverse(); - } else { - while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) - { - start -= 1; - } - } - } + let mut end = 0; + let mut start = 0; + + // Scan forward. Memory pre-fetching prefers forward scanning vs backwards scanning, and the + // code-gen is usually better. For the most sensitive types such as integers, these are merged + // bidirectionally at once. So there is no benefit in scanning backwards. + while end < len { + let (streak_end, was_reversed) = find_streak(&v[start..], is_less); + end += streak_end; + if was_reversed { + v[start..end].reverse(); } // Insert some more elements into the run if it's too short. Insertion sort is faster than // merge sort on short sequences, so this significantly improves performance. - while start > 0 && end - start < MIN_RUN { - start -= 1; - insert_head(&mut v[start..end], is_less); - } + end = provide_sorted_batch(v, start, end, is_less); // Push this run onto the stack. runs.push(TimSortRun { start, len: end - start }); - end = start; + start = end; // Merge some pairs of adjacent runs to satisfy the invariants. - while let Some(r) = collapse(runs.as_slice()) { - let left = runs[r + 1]; - let right = runs[r]; + while let Some(r) = collapse(runs.as_slice(), len) { + let left = runs[r]; + let right = runs[r + 1]; + let merge_slice = &mut v[left.start..right.start + right.len]; // SAFETY: `buf_ptr` must hold enough capacity for the shorter of the two sides, and // neither side may be on length 0. unsafe { - merge(&mut v[left.start..right.start + right.len], left.len, buf_ptr, is_less); + merge(merge_slice, left.len, buf_ptr, is_less); } - runs[r] = TimSortRun { start: left.start, len: left.len + right.len }; - runs.remove(r + 1); + runs[r + 1] = TimSortRun { start: left.start, len: left.len + right.len }; + runs.remove(r); } } @@ -1277,10 +1259,10 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( // run starts at index 0, it will always demand a merge operation until the stack is fully // collapsed, in order to complete the sort. #[inline] - fn collapse(runs: &[TimSortRun]) -> Option<usize> { + fn collapse(runs: &[TimSortRun], stop: usize) -> Option<usize> { let n = runs.len(); if n >= 2 - && (runs[n - 1].start == 0 + && (runs[n - 1].start + runs[n - 1].len == stop || runs[n - 2].len <= runs[n - 1].len || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len) || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len)) @@ -1298,7 +1280,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( where ElemDeallocF: Fn(*mut T, usize), { - buf_ptr: *mut T, + buf_ptr: ptr::NonNull<T>, capacity: usize, elem_dealloc_fn: ElemDeallocF, } @@ -1315,7 +1297,11 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( where ElemAllocF: Fn(usize) -> *mut T, { - Self { buf_ptr: elem_alloc_fn(len), capacity: len, elem_dealloc_fn } + Self { + buf_ptr: ptr::NonNull::new(elem_alloc_fn(len)).unwrap(), + capacity: len, + elem_dealloc_fn, + } } } @@ -1324,7 +1310,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( ElemDeallocF: Fn(*mut T, usize), { fn drop(&mut self) { - (self.elem_dealloc_fn)(self.buf_ptr, self.capacity); + (self.elem_dealloc_fn)(self.buf_ptr.as_ptr(), self.capacity); } } @@ -1333,7 +1319,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( RunAllocF: Fn(usize) -> *mut TimSortRun, RunDeallocF: Fn(*mut TimSortRun, usize), { - buf_ptr: *mut TimSortRun, + buf_ptr: ptr::NonNull<TimSortRun>, capacity: usize, len: usize, run_alloc_fn: RunAllocF, @@ -1350,7 +1336,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( const START_RUN_CAPACITY: usize = 16; Self { - buf_ptr: run_alloc_fn(START_RUN_CAPACITY), + buf_ptr: ptr::NonNull::new(run_alloc_fn(START_RUN_CAPACITY)).unwrap(), capacity: START_RUN_CAPACITY, len: 0, run_alloc_fn, @@ -1361,15 +1347,15 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( fn push(&mut self, val: TimSortRun) { if self.len == self.capacity { let old_capacity = self.capacity; - let old_buf_ptr = self.buf_ptr; + let old_buf_ptr = self.buf_ptr.as_ptr(); self.capacity = self.capacity * 2; - self.buf_ptr = (self.run_alloc_fn)(self.capacity); + self.buf_ptr = ptr::NonNull::new((self.run_alloc_fn)(self.capacity)).unwrap(); // SAFETY: buf_ptr new and old were correctly allocated and old_buf_ptr has // old_capacity valid elements. unsafe { - ptr::copy_nonoverlapping(old_buf_ptr, self.buf_ptr, old_capacity); + ptr::copy_nonoverlapping(old_buf_ptr, self.buf_ptr.as_ptr(), old_capacity); } (self.run_dealloc_fn)(old_buf_ptr, old_capacity); @@ -1377,7 +1363,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( // SAFETY: The invariant was just checked. unsafe { - self.buf_ptr.add(self.len).write(val); + self.buf_ptr.as_ptr().add(self.len).write(val); } self.len += 1; } @@ -1390,7 +1376,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( // SAFETY: buf_ptr needs to be valid and len invariant upheld. unsafe { // the place we are taking from. - let ptr = self.buf_ptr.add(index); + let ptr = self.buf_ptr.as_ptr().add(index); // Shift everything down to fill in that spot. ptr::copy(ptr.add(1), ptr, self.len - index - 1); @@ -1400,7 +1386,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( fn as_slice(&self) -> &[TimSortRun] { // SAFETY: Safe as long as buf_ptr is valid and len invariant was upheld. - unsafe { &*ptr::slice_from_raw_parts(self.buf_ptr, self.len) } + unsafe { &*ptr::slice_from_raw_parts(self.buf_ptr.as_ptr(), self.len) } } fn len(&self) -> usize { @@ -1419,7 +1405,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( if index < self.len { // SAFETY: buf_ptr and len invariant must be upheld. unsafe { - return &*(self.buf_ptr.add(index)); + return &*(self.buf_ptr.as_ptr().add(index)); } } @@ -1436,7 +1422,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( if index < self.len { // SAFETY: buf_ptr and len invariant must be upheld. unsafe { - return &mut *(self.buf_ptr.add(index)); + return &mut *(self.buf_ptr.as_ptr().add(index)); } } @@ -1452,7 +1438,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( fn drop(&mut self) { // As long as TimSortRun is Copy we don't need to drop them individually but just the // whole allocation. - (self.run_dealloc_fn)(self.buf_ptr, self.capacity); + (self.run_dealloc_fn)(self.buf_ptr.as_ptr(), self.capacity); } } } @@ -1463,3 +1449,71 @@ pub struct TimSortRun { len: usize, start: usize, } + +/// Takes a range as denoted by start and end, that is already sorted and extends it to the right if +/// necessary with sorts optimized for smaller ranges such as insertion sort. +#[cfg(not(no_global_oom_handling))] +fn provide_sorted_batch<T, F>(v: &mut [T], start: usize, mut end: usize, is_less: &mut F) -> usize +where + F: FnMut(&T, &T) -> bool, +{ + let len = v.len(); + assert!(end >= start && end <= len); + + // This value is a balance between least comparisons and best performance, as + // influenced by for example cache locality. + const MIN_INSERTION_RUN: usize = 10; + + // Insert some more elements into the run if it's too short. Insertion sort is faster than + // merge sort on short sequences, so this significantly improves performance. + let start_end_diff = end - start; + + if start_end_diff < MIN_INSERTION_RUN && end < len { + // v[start_found..end] are elements that are already sorted in the input. We want to extend + // the sorted region to the left, so we push up MIN_INSERTION_RUN - 1 to the right. Which is + // more efficient that trying to push those already sorted elements to the left. + end = cmp::min(start + MIN_INSERTION_RUN, len); + let presorted_start = cmp::max(start_end_diff, 1); + + insertion_sort_shift_left(&mut v[start..end], presorted_start, is_less); + } + + end +} + +/// Finds a streak of presorted elements starting at the beginning of the slice. Returns the first +/// value that is not part of said streak, and a bool denoting wether the streak was reversed. +/// Streaks can be increasing or decreasing. +fn find_streak<T, F>(v: &[T], is_less: &mut F) -> (usize, bool) +where + F: FnMut(&T, &T) -> bool, +{ + let len = v.len(); + + if len < 2 { + return (len, false); + } + + let mut end = 2; + + // SAFETY: See below specific. + unsafe { + // SAFETY: We checked that len >= 2, so 0 and 1 are valid indices. + let assume_reverse = is_less(v.get_unchecked(1), v.get_unchecked(0)); + + // SAFETY: We know end >= 2 and check end < len. + // From that follows that accessing v at end and end - 1 is safe. + if assume_reverse { + while end < len && is_less(v.get_unchecked(end), v.get_unchecked(end - 1)) { + end += 1; + } + + (end, true) + } else { + while end < len && !is_less(v.get_unchecked(end), v.get_unchecked(end - 1)) { + end += 1; + } + (end, false) + } + } +} diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs index d969475aa48..95c682f42d0 100644 --- a/library/core/src/str/iter.rs +++ b/library/core/src/str/iter.rs @@ -1,6 +1,6 @@ //! Iterators for `str` methods. -use crate::char; +use crate::char as char_mod; use crate::fmt::{self, Write}; use crate::iter::{Chain, FlatMap, Flatten}; use crate::iter::{Copied, Filter, FusedIterator, Map, TrustedLen}; @@ -1455,8 +1455,8 @@ impl FusedIterator for EncodeUtf16<'_> {} #[derive(Clone, Debug)] pub struct EscapeDebug<'a> { pub(super) inner: Chain< - Flatten<option::IntoIter<char::EscapeDebug>>, - FlatMap<Chars<'a>, char::EscapeDebug, CharEscapeDebugContinue>, + Flatten<option::IntoIter<char_mod::EscapeDebug>>, + FlatMap<Chars<'a>, char_mod::EscapeDebug, CharEscapeDebugContinue>, >, } @@ -1464,14 +1464,14 @@ pub struct EscapeDebug<'a> { #[stable(feature = "str_escape", since = "1.34.0")] #[derive(Clone, Debug)] pub struct EscapeDefault<'a> { - pub(super) inner: FlatMap<Chars<'a>, char::EscapeDefault, CharEscapeDefault>, + pub(super) inner: FlatMap<Chars<'a>, char_mod::EscapeDefault, CharEscapeDefault>, } /// The return type of [`str::escape_unicode`]. #[stable(feature = "str_escape", since = "1.34.0")] #[derive(Clone, Debug)] pub struct EscapeUnicode<'a> { - pub(super) inner: FlatMap<Chars<'a>, char::EscapeUnicode, CharEscapeUnicode>, + pub(super) inner: FlatMap<Chars<'a>, char_mod::EscapeUnicode, CharEscapeUnicode>, } macro_rules! escape_types_impls { diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index 818721062d7..00bcaf3e18c 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -922,14 +922,14 @@ impl AtomicBool { /// /// let mut atomic = AtomicBool::new(true); /// unsafe { - /// my_atomic_op(atomic.as_mut_ptr()); + /// my_atomic_op(atomic.as_ptr()); /// } /// # } /// ``` #[inline] #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")] - pub fn as_mut_ptr(&self) -> *mut bool { - self.v.get() as *mut bool + pub const fn as_ptr(&self) -> *mut bool { + self.v.get().cast() } /// Fetches the value, and applies a function to it that returns an optional @@ -1803,7 +1803,7 @@ impl<T> AtomicPtr<T> { /// /// ```ignore (extern-declaration) /// #![feature(atomic_mut_ptr)] - //// use std::sync::atomic::AtomicPtr; + /// use std::sync::atomic::AtomicPtr; /// /// extern "C" { /// fn my_atomic_op(arg: *mut *mut u32); @@ -1814,12 +1814,12 @@ impl<T> AtomicPtr<T> { /// /// // SAFETY: Safe as long as `my_atomic_op` is atomic. /// unsafe { - /// my_atomic_op(atomic.as_mut_ptr()); + /// my_atomic_op(atomic.as_ptr()); /// } /// ``` #[inline] #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")] - pub fn as_mut_ptr(&self) -> *mut *mut T { + pub const fn as_ptr(&self) -> *mut *mut T { self.p.get() } } @@ -2719,7 +2719,7 @@ macro_rules! atomic_int { /// /// // SAFETY: Safe as long as `my_atomic_op` is atomic. /// unsafe { - /// my_atomic_op(atomic.as_mut_ptr()); + /// my_atomic_op(atomic.as_ptr()); /// } /// # } /// ``` @@ -2727,7 +2727,7 @@ macro_rules! atomic_int { #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")] - pub fn as_mut_ptr(&self) -> *mut $int_type { + pub const fn as_ptr(&self) -> *mut $int_type { self.v.get() } } diff --git a/library/core/src/task/wake.rs b/library/core/src/task/wake.rs index 89adfccd901..808825326ae 100644 --- a/library/core/src/task/wake.rs +++ b/library/core/src/task/wake.rs @@ -174,7 +174,7 @@ impl RawWakerVTable { /// Currently, `Context` only serves to provide access to a [`&Waker`](Waker) /// which can be used to wake the current task. #[stable(feature = "futures_api", since = "1.36.0")] -#[cfg_attr(not(bootstrap), lang = "Context")] +#[lang = "Context"] pub struct Context<'a> { waker: &'a Waker, // Ensure we future-proof against variance changes by forcing diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs index f268fe3ae7b..5327e4f8139 100644 --- a/library/core/tests/array.rs +++ b/library/core/tests/array.rs @@ -700,3 +700,28 @@ fn array_into_iter_rfold() { let s = it.rfold(10, |a, b| 10 * a + b); assert_eq!(s, 10432); } + +#[cfg(not(panic = "abort"))] +#[test] +fn array_map_drops_unmapped_elements_on_panic() { + struct DropCounter<'a>(usize, &'a AtomicUsize); + impl Drop for DropCounter<'_> { + fn drop(&mut self) { + self.1.fetch_add(1, Ordering::SeqCst); + } + } + + const MAX: usize = 11; + for panic_after in 0..MAX { + let counter = AtomicUsize::new(0); + let a = array::from_fn::<_, 11, _>(|i| DropCounter(i, &counter)); + let success = std::panic::catch_unwind(|| { + let _ = a.map(|x| { + assert!(x.0 < panic_after); + assert_eq!(counter.load(Ordering::SeqCst), x.0); + }); + }); + assert!(success.is_err()); + assert_eq!(counter.load(Ordering::SeqCst), MAX); + } +} diff --git a/library/core/tests/iter/range.rs b/library/core/tests/iter/range.rs index 84498a8eae5..0f91ffe2dfc 100644 --- a/library/core/tests/iter/range.rs +++ b/library/core/tests/iter/range.rs @@ -26,7 +26,6 @@ fn test_range() { #[test] fn test_char_range() { - use std::char; // Miri is too slow let from = if cfg!(miri) { char::from_u32(0xD800 - 10).unwrap() } else { '\0' }; let to = if cfg!(miri) { char::from_u32(0xDFFF + 10).unwrap() } else { char::MAX }; diff --git a/library/core/tests/iter/traits/iterator.rs b/library/core/tests/iter/traits/iterator.rs index 37345c1d381..62566a9502d 100644 --- a/library/core/tests/iter/traits/iterator.rs +++ b/library/core/tests/iter/traits/iterator.rs @@ -582,6 +582,9 @@ fn test_next_chunk() { assert_eq!(it.next_chunk().unwrap(), []); assert_eq!(it.next_chunk().unwrap(), [4, 5, 6, 7, 8, 9]); assert_eq!(it.next_chunk::<4>().unwrap_err().as_slice(), &[10, 11]); + + let mut it = std::iter::repeat_with(|| panic!()); + assert_eq!(it.next_chunk::<0>().unwrap(), []); } // just tests by whether or not this compiles diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs index 80d30f14c66..c02cd99cc44 100644 --- a/library/core/tests/ptr.rs +++ b/library/core/tests/ptr.rs @@ -25,7 +25,7 @@ fn test() { snd: isize, } let mut p = Pair { fst: 10, snd: 20 }; - let pptr: *mut Pair = &mut p; + let pptr: *mut Pair = addr_of_mut!(p); let iptr: *mut isize = pptr as *mut isize; assert_eq!(*iptr, 10); *iptr = 30; @@ -1070,8 +1070,8 @@ fn swap_copy_untyped() { let mut x = 5u8; let mut y = 6u8; - let ptr1 = &mut x as *mut u8 as *mut bool; - let ptr2 = &mut y as *mut u8 as *mut bool; + let ptr1 = addr_of_mut!(x).cast::<bool>(); + let ptr2 = addr_of_mut!(y).cast::<bool>(); unsafe { ptr::swap(ptr1, ptr2); diff --git a/library/proc_macro/src/bridge/rpc.rs b/library/proc_macro/src/bridge/rpc.rs index e9d7a46c06f..5b1bfb30983 100644 --- a/library/proc_macro/src/bridge/rpc.rs +++ b/library/proc_macro/src/bridge/rpc.rs @@ -1,7 +1,6 @@ //! Serialization for client-server communication. use std::any::Any; -use std::char; use std::io::Write; use std::num::NonZeroU32; use std::str; diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index adf521d9b94..349cd91c89e 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -16,7 +16,7 @@ panic_unwind = { path = "../panic_unwind", optional = true } panic_abort = { path = "../panic_abort" } core = { path = "../core" } libc = { version = "0.2.138", default-features = false, features = ['rustc-dep-of-std'] } -compiler_builtins = { version = "0.1.85" } +compiler_builtins = { version = "0.1.87" } profiler_builtins = { path = "../profiler_builtins", optional = true } unwind = { path = "../unwind" } hashbrown = { version = "0.12", default-features = false, features = ['rustc-dep-of-std'] } diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs index df490358827..742c4cc7c55 100644 --- a/library/std/src/collections/hash/map.rs +++ b/library/std/src/collections/hash/map.rs @@ -238,7 +238,7 @@ impl<K, V> HashMap<K, V, RandomState> { /// /// The hash map will be able to hold at least `capacity` elements without /// reallocating. This method is allowed to allocate for more elements than - /// `capacity`. If `capacity` is 0, the hash set will not allocate. + /// `capacity`. If `capacity` is 0, the hash map will not allocate. /// /// # Examples /// diff --git a/library/std/src/f32.rs b/library/std/src/f32.rs index 4e300762463..6b1f0cba82d 100644 --- a/library/std/src/f32.rs +++ b/library/std/src/f32.rs @@ -69,8 +69,8 @@ impl f32 { unsafe { intrinsics::ceilf32(self) } } - /// Returns the nearest integer to `self`. Round half-way cases away from - /// `0.0`. + /// Returns the nearest integer to `self`. If a value is half-way between two + /// integers, round away from `0.0`. /// /// # Examples /// diff --git a/library/std/src/f64.rs b/library/std/src/f64.rs index ec67fdad4f7..16359766b51 100644 --- a/library/std/src/f64.rs +++ b/library/std/src/f64.rs @@ -69,8 +69,8 @@ impl f64 { unsafe { intrinsics::ceilf64(self) } } - /// Returns the nearest integer to `self`. Round half-way cases away from - /// `0.0`. + /// Returns the nearest integer to `self`. If a value is half-way between two + /// integers, round away from `0.0`. /// /// # Examples /// diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs index 286ad68fd13..c550378e7d6 100644 --- a/library/std/src/fs.rs +++ b/library/std/src/fs.rs @@ -334,6 +334,10 @@ impl File { /// /// See the [`OpenOptions::open`] method for more details. /// + /// If you only need to read the entire file contents, + /// consider [`std::fs::read()`][self::read] or + /// [`std::fs::read_to_string()`][self::read_to_string] instead. + /// /// # Errors /// /// This function will return an error if `path` does not already exist. @@ -343,9 +347,12 @@ impl File { /// /// ```no_run /// use std::fs::File; + /// use std::io::Read; /// /// fn main() -> std::io::Result<()> { /// let mut f = File::open("foo.txt")?; + /// let mut data = vec![]; + /// f.read_to_end(&mut data)?; /// Ok(()) /// } /// ``` @@ -361,16 +368,20 @@ impl File { /// /// Depending on the platform, this function may fail if the /// full directory path does not exist. - /// /// See the [`OpenOptions::open`] function for more details. /// + /// See also [`std::fs::write()`][self::write] for a simple function to + /// create a file with a given data. + /// /// # Examples /// /// ```no_run /// use std::fs::File; + /// use std::io::Write; /// /// fn main() -> std::io::Result<()> { /// let mut f = File::create("foo.txt")?; + /// f.write_all(&1234_u32.to_be_bytes())?; /// Ok(()) /// } /// ``` @@ -397,9 +408,11 @@ impl File { /// #![feature(file_create_new)] /// /// use std::fs::File; + /// use std::io::Write; /// /// fn main() -> std::io::Result<()> { /// let mut f = File::create_new("foo.txt")?; + /// f.write_all("Hello, world!".as_bytes())?; /// Ok(()) /// } /// ``` @@ -426,9 +439,11 @@ impl File { /// /// ```no_run /// use std::fs::File; + /// use std::io::Write; /// /// fn main() -> std::io::Result<()> { /// let mut f = File::options().append(true).open("example.log")?; + /// writeln!(&mut f, "new line")?; /// Ok(()) /// } /// ``` @@ -966,6 +981,9 @@ impl OpenOptions { /// In order for the file to be created, [`OpenOptions::write`] or /// [`OpenOptions::append`] access must be used. /// + /// See also [`std::fs::write()`][self::write] for a simple function to + /// create a file with a given data. + /// /// # Examples /// /// ```no_run diff --git a/library/std/src/fs/tests.rs b/library/std/src/fs/tests.rs index eb582be012b..839fdc96632 100644 --- a/library/std/src/fs/tests.rs +++ b/library/std/src/fs/tests.rs @@ -12,6 +12,8 @@ use crate::time::{Duration, Instant}; use rand::RngCore; +#[cfg(target_os = "macos")] +use crate::ffi::{c_char, c_int}; #[cfg(unix)] use crate::os::unix::fs::symlink as symlink_dir; #[cfg(unix)] @@ -24,8 +26,6 @@ use crate::os::windows::fs::{symlink_dir, symlink_file}; use crate::sys::fs::symlink_junction; #[cfg(target_os = "macos")] use crate::sys::weak::weak; -#[cfg(target_os = "macos")] -use libc::{c_char, c_int}; macro_rules! check { ($e:expr) => { diff --git a/library/std/src/io/error.rs b/library/std/src/io/error.rs index 3cabf24492e..7f07e4fddef 100644 --- a/library/std/src/io/error.rs +++ b/library/std/src/io/error.rs @@ -88,12 +88,23 @@ impl From<alloc::ffi::NulError> for Error { // doesn't accidentally get printed. #[cfg_attr(test, derive(Debug))] enum ErrorData<C> { - Os(i32), + Os(RawOsError), Simple(ErrorKind), SimpleMessage(&'static SimpleMessage), Custom(C), } +/// The type of raw OS error codes returned by [`Error::raw_os_error`]. +/// +/// This is an [`i32`] on all currently supported platforms, but platforms +/// added in the future (such as UEFI) may use a different primitive type like +/// [`usize`]. Use `as`or [`into`] conversions where applicable to ensure maximum +/// portability. +/// +/// [`into`]: Into::into +#[unstable(feature = "raw_os_error_ty", issue = "107792")] +pub type RawOsError = i32; + // `#[repr(align(4))]` is probably redundant, it should have that value or // higher already. We include it just because repr_bitpacked.rs's encoding // requires an alignment >= 4 (note that `#[repr(align)]` will not reduce the @@ -579,7 +590,7 @@ impl Error { #[must_use] #[inline] pub fn last_os_error() -> Error { - Error::from_raw_os_error(sys::os::errno() as i32) + Error::from_raw_os_error(sys::os::errno()) } /// Creates a new instance of an [`Error`] from a particular OS error code. @@ -610,7 +621,7 @@ impl Error { #[stable(feature = "rust1", since = "1.0.0")] #[must_use] #[inline] - pub fn from_raw_os_error(code: i32) -> Error { + pub fn from_raw_os_error(code: RawOsError) -> Error { Error { repr: Repr::new_os(code) } } @@ -646,7 +657,7 @@ impl Error { #[stable(feature = "rust1", since = "1.0.0")] #[must_use] #[inline] - pub fn raw_os_error(&self) -> Option<i32> { + pub fn raw_os_error(&self) -> Option<RawOsError> { match self.repr.data() { ErrorData::Os(i) => Some(i), ErrorData::Custom(..) => None, diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs index 3581484050d..f94f88bac41 100644 --- a/library/std/src/io/error/repr_bitpacked.rs +++ b/library/std/src/io/error/repr_bitpacked.rs @@ -102,7 +102,7 @@ //! to use a pointer type to store something that may hold an integer, some of //! the time. -use super::{Custom, ErrorData, ErrorKind, SimpleMessage}; +use super::{Custom, ErrorData, ErrorKind, RawOsError, SimpleMessage}; use alloc::boxed::Box; use core::marker::PhantomData; use core::mem::{align_of, size_of}; @@ -172,7 +172,7 @@ impl Repr { } #[inline] - pub(super) fn new_os(code: i32) -> Self { + pub(super) fn new_os(code: RawOsError) -> Self { let utagged = ((code as usize) << 32) | TAG_OS; // Safety: `TAG_OS` is not zero, so the result of the `|` is not 0. let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData); @@ -250,7 +250,7 @@ where let bits = ptr.as_ptr().addr(); match bits & TAG_MASK { TAG_OS => { - let code = ((bits as i64) >> 32) as i32; + let code = ((bits as i64) >> 32) as RawOsError; ErrorData::Os(code) } TAG_SIMPLE => { @@ -374,6 +374,9 @@ static_assert!((TAG_MASK + 1).is_power_of_two()); static_assert!(align_of::<SimpleMessage>() >= TAG_MASK + 1); static_assert!(align_of::<Custom>() >= TAG_MASK + 1); +// `RawOsError` must be an alias for `i32`. +const _: fn(RawOsError) -> i32 = |os| os; + static_assert!(@usize_eq: TAG_MASK & TAG_SIMPLE_MESSAGE, TAG_SIMPLE_MESSAGE); static_assert!(@usize_eq: TAG_MASK & TAG_CUSTOM, TAG_CUSTOM); static_assert!(@usize_eq: TAG_MASK & TAG_OS, TAG_OS); diff --git a/library/std/src/io/error/repr_unpacked.rs b/library/std/src/io/error/repr_unpacked.rs index d6ad55b99f5..093fde33757 100644 --- a/library/std/src/io/error/repr_unpacked.rs +++ b/library/std/src/io/error/repr_unpacked.rs @@ -2,7 +2,7 @@ //! non-64bit targets, where the packed 64 bit representation wouldn't work, and //! would have no benefit. -use super::{Custom, ErrorData, ErrorKind, SimpleMessage}; +use super::{Custom, ErrorData, ErrorKind, RawOsError, SimpleMessage}; use alloc::boxed::Box; type Inner = ErrorData<Box<Custom>>; @@ -18,7 +18,7 @@ impl Repr { Self(Inner::Custom(b)) } #[inline] - pub(super) fn new_os(code: i32) -> Self { + pub(super) fn new_os(code: RawOsError) -> Self { Self(Inner::Os(code)) } #[inline] diff --git a/library/std/src/io/error/tests.rs b/library/std/src/io/error/tests.rs index 9aea62a5b94..36d52aef03c 100644 --- a/library/std/src/io/error/tests.rs +++ b/library/std/src/io/error/tests.rs @@ -71,7 +71,7 @@ fn test_const() { #[test] fn test_os_packing() { - for code in -20i32..20i32 { + for code in -20..20 { let e = Error::from_raw_os_error(code); assert_eq!(e.raw_os_error(), Some(code)); assert_matches!( diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs index de528e85368..b2b6d86134b 100644 --- a/library/std/src/io/mod.rs +++ b/library/std/src/io/mod.rs @@ -262,6 +262,8 @@ use crate::sys_common::memchr; #[stable(feature = "bufwriter_into_parts", since = "1.56.0")] pub use self::buffered::WriterPanicked; +#[unstable(feature = "raw_os_error_ty", issue = "107792")] +pub use self::error::RawOsError; pub(crate) use self::stdio::attempt_print_to_stderr; #[unstable(feature = "internal_output_capture", issue = "none")] #[doc(no_inline, hidden)] diff --git a/library/std/src/keyword_docs.rs b/library/std/src/keyword_docs.rs index e35145c4ade..203c490fa29 100644 --- a/library/std/src/keyword_docs.rs +++ b/library/std/src/keyword_docs.rs @@ -1568,7 +1568,7 @@ mod static_keyword {} /// /// # Style conventions /// -/// Structs are always written in CamelCase, with few exceptions. While the trailing comma on a +/// Structs are always written in UpperCamelCase, with few exceptions. While the trailing comma on a /// struct's list of fields can be omitted, it's usually kept for convenience in adding and /// removing fields down the line. /// diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index 99cc0186310..363a2667174 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -273,13 +273,9 @@ #![feature(utf8_chunks)] // // Library features (core): -#![feature(array_error_internals)] #![feature(atomic_mut_ptr)] -#![feature(char_error_internals)] #![feature(char_internals)] #![feature(core_intrinsics)] -#![feature(cstr_from_bytes_until_nul)] -#![feature(cstr_internals)] #![feature(duration_constants)] #![feature(error_generic_member_access)] #![feature(error_in_core)] @@ -291,7 +287,6 @@ #![feature(float_next_up_down)] #![feature(hasher_prefixfree_extras)] #![feature(hashmap_internals)] -#![feature(int_error_internals)] #![feature(is_some_and)] #![feature(maybe_uninit_slice)] #![feature(maybe_uninit_write_slice)] @@ -358,7 +353,6 @@ #![feature(const_ip)] #![feature(const_ipv4)] #![feature(const_ipv6)] -#![feature(const_socketaddr)] #![feature(thread_local_internals)] // #![default_lib_allocator] diff --git a/library/std/src/net/socket_addr.rs b/library/std/src/net/socket_addr.rs index 33b0dfa03e0..1264bae809b 100644 --- a/library/std/src/net/socket_addr.rs +++ b/library/std/src/net/socket_addr.rs @@ -133,7 +133,7 @@ impl SocketAddr { /// ``` #[stable(feature = "ip_addr", since = "1.7.0")] #[must_use] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] + #[rustc_const_stable(feature = "const_socketaddr", since = "CURRENT_RUSTC_VERSION")] pub const fn new(ip: IpAddr, port: u16) -> SocketAddr { match ip { IpAddr::V4(a) => SocketAddr::V4(SocketAddrV4::new(a, port)), @@ -153,7 +153,7 @@ impl SocketAddr { /// ``` #[must_use] #[stable(feature = "ip_addr", since = "1.7.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] + #[rustc_const_stable(feature = "const_socketaddr", since = "CURRENT_RUSTC_VERSION")] pub const fn ip(&self) -> IpAddr { match *self { SocketAddr::V4(ref a) => IpAddr::V4(*a.ip()), @@ -194,7 +194,7 @@ impl SocketAddr { /// ``` #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] + #[rustc_const_stable(feature = "const_socketaddr", since = "CURRENT_RUSTC_VERSION")] pub const fn port(&self) -> u16 { match *self { SocketAddr::V4(ref a) => a.port(), @@ -238,7 +238,7 @@ impl SocketAddr { /// ``` #[must_use] #[stable(feature = "sockaddr_checker", since = "1.16.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] + #[rustc_const_stable(feature = "const_socketaddr", since = "CURRENT_RUSTC_VERSION")] pub const fn is_ipv4(&self) -> bool { matches!(*self, SocketAddr::V4(_)) } @@ -260,7 +260,7 @@ impl SocketAddr { /// ``` #[must_use] #[stable(feature = "sockaddr_checker", since = "1.16.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] + #[rustc_const_stable(feature = "const_socketaddr", since = "CURRENT_RUSTC_VERSION")] pub const fn is_ipv6(&self) -> bool { matches!(*self, SocketAddr::V6(_)) } @@ -280,7 +280,7 @@ impl SocketAddrV4 { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[must_use] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] + #[rustc_const_stable(feature = "const_socketaddr", since = "CURRENT_RUSTC_VERSION")] pub const fn new(ip: Ipv4Addr, port: u16) -> SocketAddrV4 { SocketAddrV4 { ip, port } } @@ -297,7 +297,7 @@ impl SocketAddrV4 { /// ``` #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] + #[rustc_const_stable(feature = "const_socketaddr", since = "CURRENT_RUSTC_VERSION")] pub const fn ip(&self) -> &Ipv4Addr { &self.ip } @@ -330,7 +330,7 @@ impl SocketAddrV4 { /// ``` #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] + #[rustc_const_stable(feature = "const_socketaddr", since = "CURRENT_RUSTC_VERSION")] pub const fn port(&self) -> u16 { self.port } @@ -371,7 +371,7 @@ impl SocketAddrV6 { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[must_use] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] + #[rustc_const_stable(feature = "const_socketaddr", since = "CURRENT_RUSTC_VERSION")] pub const fn new(ip: Ipv6Addr, port: u16, flowinfo: u32, scope_id: u32) -> SocketAddrV6 { SocketAddrV6 { ip, port, flowinfo, scope_id } } @@ -388,7 +388,7 @@ impl SocketAddrV6 { /// ``` #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] + #[rustc_const_stable(feature = "const_socketaddr", since = "CURRENT_RUSTC_VERSION")] pub const fn ip(&self) -> &Ipv6Addr { &self.ip } @@ -421,7 +421,7 @@ impl SocketAddrV6 { /// ``` #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] + #[rustc_const_stable(feature = "const_socketaddr", since = "CURRENT_RUSTC_VERSION")] pub const fn port(&self) -> u16 { self.port } @@ -464,7 +464,7 @@ impl SocketAddrV6 { /// ``` #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] + #[rustc_const_stable(feature = "const_socketaddr", since = "CURRENT_RUSTC_VERSION")] pub const fn flowinfo(&self) -> u32 { self.flowinfo } @@ -504,7 +504,7 @@ impl SocketAddrV6 { /// ``` #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] + #[rustc_const_stable(feature = "const_socketaddr", since = "CURRENT_RUSTC_VERSION")] pub const fn scope_id(&self) -> u32 { self.scope_id } diff --git a/library/std/src/net/tcp.rs b/library/std/src/net/tcp.rs index 69b72a81c5b..ac09a805975 100644 --- a/library/std/src/net/tcp.rs +++ b/library/std/src/net/tcp.rs @@ -829,7 +829,7 @@ impl TcpListener { /// } /// /// fn main() -> std::io::Result<()> { - /// let listener = TcpListener::bind("127.0.0.1:80").unwrap(); + /// let listener = TcpListener::bind("127.0.0.1:80")?; /// /// for stream in listener.incoming() { /// match stream { diff --git a/library/std/src/os/fd/owned.rs b/library/std/src/os/fd/owned.rs index c41e093a7e5..439b8d52a2d 100644 --- a/library/std/src/os/fd/owned.rs +++ b/library/std/src/os/fd/owned.rs @@ -396,6 +396,14 @@ impl<T: AsFd> AsFd for crate::sync::Arc<T> { } } +#[stable(feature = "asfd_rc", since = "CURRENT_RUSTC_VERSION")] +impl<T: AsFd> AsFd for crate::rc::Rc<T> { + #[inline] + fn as_fd(&self) -> BorrowedFd<'_> { + (**self).as_fd() + } +} + #[stable(feature = "asfd_ptrs", since = "1.64.0")] impl<T: AsFd> AsFd for Box<T> { #[inline] diff --git a/library/std/src/os/fd/raw.rs b/library/std/src/os/fd/raw.rs index f92a0506670..c138162f1ab 100644 --- a/library/std/src/os/fd/raw.rs +++ b/library/std/src/os/fd/raw.rs @@ -244,6 +244,14 @@ impl<T: AsRawFd> AsRawFd for crate::sync::Arc<T> { } } +#[stable(feature = "asfd_rc", since = "CURRENT_RUSTC_VERSION")] +impl<T: AsRawFd> AsRawFd for crate::rc::Rc<T> { + #[inline] + fn as_raw_fd(&self) -> RawFd { + (**self).as_raw_fd() + } +} + #[stable(feature = "asrawfd_ptrs", since = "1.63.0")] impl<T: AsRawFd> AsRawFd for Box<T> { #[inline] diff --git a/library/std/src/os/fuchsia/raw.rs b/library/std/src/os/fuchsia/raw.rs index 060d6e86b6c..ea6b94f2f13 100644 --- a/library/std/src/os/fuchsia/raw.rs +++ b/library/std/src/os/fuchsia/raw.rs @@ -24,12 +24,7 @@ pub type pthread_t = c_ulong; #[stable(feature = "raw_ext", since = "1.1.0")] pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t}; -#[cfg(any( - target_arch = "x86", - target_arch = "le32", - target_arch = "powerpc", - target_arch = "arm" -))] +#[cfg(any(target_arch = "x86", target_arch = "powerpc", target_arch = "arm"))] mod arch { use crate::os::raw::{c_long, c_short, c_uint}; diff --git a/library/std/src/os/l4re/raw.rs b/library/std/src/os/l4re/raw.rs index 699e8be33c8..b3f7439f8cd 100644 --- a/library/std/src/os/l4re/raw.rs +++ b/library/std/src/os/l4re/raw.rs @@ -26,7 +26,6 @@ pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t}; #[cfg(any( target_arch = "x86", - target_arch = "le32", target_arch = "m68k", target_arch = "powerpc", target_arch = "sparc", diff --git a/library/std/src/os/linux/raw.rs b/library/std/src/os/linux/raw.rs index c73791d1452..f46028c3a96 100644 --- a/library/std/src/os/linux/raw.rs +++ b/library/std/src/os/linux/raw.rs @@ -26,7 +26,6 @@ pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t}; #[cfg(any( target_arch = "x86", - target_arch = "le32", target_arch = "m68k", target_arch = "powerpc", target_arch = "sparc", diff --git a/library/std/src/os/net/mod.rs b/library/std/src/os/net/mod.rs index 5ec267c41e9..b7046dd7c59 100644 --- a/library/std/src/os/net/mod.rs +++ b/library/std/src/os/net/mod.rs @@ -1,4 +1,13 @@ //! OS-specific networking functionality. +// See cfg macros in `library/std/src/os/mod.rs` for why these platforms must +// be special-cased during rustdoc generation. +#[cfg(not(all( + doc, + any( + all(target_arch = "wasm32", not(target_os = "wasi")), + all(target_vendor = "fortanix", target_env = "sgx") + ) +)))] #[cfg(any(target_os = "linux", target_os = "android", doc))] pub(super) mod linux_ext; diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs index b0db3112e22..e59f32af77d 100644 --- a/library/std/src/panicking.rs +++ b/library/std/src/panicking.rs @@ -95,13 +95,16 @@ impl Default for Hook { static HOOK: RwLock<Hook> = RwLock::new(Hook::Default); -/// Registers a custom panic hook, replacing any that was previously registered. +/// Registers a custom panic hook, replacing the previously registered hook. /// /// The panic hook is invoked when a thread panics, but before the panic runtime /// is invoked. As such, the hook will run with both the aborting and unwinding -/// runtimes. The default hook prints a message to standard error and generates -/// a backtrace if requested, but this behavior can be customized with the -/// `set_hook` and [`take_hook`] functions. +/// runtimes. +/// +/// The default hook, which is registered at startup, prints a message to standard error and +/// generates a backtrace if requested. This behavior can be customized using the `set_hook` function. +/// The current hook can be retrieved while reinstating the default hook with the [`take_hook`] +/// function. /// /// [`take_hook`]: ./fn.take_hook.html /// @@ -143,13 +146,14 @@ pub fn set_hook(hook: Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>) { drop(old); } -/// Unregisters the current panic hook, returning it. +/// Unregisters the current panic hook and returns it, registering the default hook +/// in its place. /// /// *See also the function [`set_hook`].* /// /// [`set_hook`]: ./fn.set_hook.html /// -/// If no custom hook is registered, the default hook will be returned. +/// If the default hook is registered it will be returned, but remain registered. /// /// # Panics /// diff --git a/library/std/src/path.rs b/library/std/src/path.rs index 2f53cf83936..cd6b393a2ea 100644 --- a/library/std/src/path.rs +++ b/library/std/src/path.rs @@ -271,7 +271,7 @@ pub const MAIN_SEPARATOR: char = crate::sys::path::MAIN_SEP; /// The primary separator of path components for the current platform. /// /// For example, `/` on Unix and `\` on Windows. -#[stable(feature = "main_separator_str", since = "CURRENT_RUSTC_VERSION")] +#[stable(feature = "main_separator_str", since = "1.68.0")] pub const MAIN_SEPARATOR_STR: &str = crate::sys::path::MAIN_SEP_STR; //////////////////////////////////////////////////////////////////////////////// @@ -1778,7 +1778,7 @@ impl ops::Deref for PathBuf { } } -#[stable(feature = "path_buf_deref_mut", since = "CURRENT_RUSTC_VERSION")] +#[stable(feature = "path_buf_deref_mut", since = "1.68.0")] impl ops::DerefMut for PathBuf { #[inline] fn deref_mut(&mut self) -> &mut Path { @@ -2688,6 +2688,7 @@ impl Path { /// escapes the path please use [`Debug`] instead. /// /// [`Display`]: fmt::Display + /// [`Debug`]: fmt::Debug /// /// # Examples /// diff --git a/library/std/src/personality/emcc.rs b/library/std/src/personality/emcc.rs index f942bdf18c1..cb52ae89b19 100644 --- a/library/std/src/personality/emcc.rs +++ b/library/std/src/personality/emcc.rs @@ -1,7 +1,7 @@ //! On Emscripten Rust panics are wrapped in C++ exceptions, so we just forward //! to `__gxx_personality_v0` which is provided by Emscripten. -use libc::c_int; +use crate::ffi::c_int; use unwind as uw; // This is required by the compiler to exist (e.g., it's a lang item), but it's diff --git a/library/std/src/personality/gcc.rs b/library/std/src/personality/gcc.rs index 5fc1b91a1c3..41c0fe725a5 100644 --- a/library/std/src/personality/gcc.rs +++ b/library/std/src/personality/gcc.rs @@ -37,7 +37,8 @@ //! and the last personality routine transfers control to the catch block. use super::dwarf::eh::{self, EHAction, EHContext}; -use libc::{c_int, uintptr_t}; +use crate::ffi::c_int; +use libc::uintptr_t; use unwind as uw; // Register ids were lifted from LLVM's TargetLowering::getExceptionPointerRegister() diff --git a/library/std/src/sync/lazy_lock.rs b/library/std/src/sync/lazy_lock.rs index 4a15305301d..7e85d6a063a 100644 --- a/library/std/src/sync/lazy_lock.rs +++ b/library/std/src/sync/lazy_lock.rs @@ -1,8 +1,21 @@ -use crate::cell::Cell; +use crate::cell::UnsafeCell; use crate::fmt; +use crate::mem::ManuallyDrop; use crate::ops::Deref; use crate::panic::{RefUnwindSafe, UnwindSafe}; -use crate::sync::OnceLock; +use crate::sync::Once; + +use super::once::ExclusiveState; + +// We use the state of a Once as discriminant value. Upon creation, the state is +// "incomplete" and `f` contains the initialization closure. In the first call to +// `call_once`, `f` is taken and run. If it succeeds, `value` is set and the state +// is changed to "complete". If it panics, the Once is poisoned, so none of the +// two fields is initialized. +union Data<T, F> { + value: ManuallyDrop<T>, + f: ManuallyDrop<F>, +} /// A value which is initialized on the first access. /// @@ -43,16 +56,17 @@ use crate::sync::OnceLock; /// ``` #[unstable(feature = "once_cell", issue = "74465")] pub struct LazyLock<T, F = fn() -> T> { - cell: OnceLock<T>, - init: Cell<Option<F>>, + once: Once, + data: UnsafeCell<Data<T, F>>, } + impl<T, F: FnOnce() -> T> LazyLock<T, F> { /// Creates a new lazy value with the given initializing /// function. #[inline] #[unstable(feature = "once_cell", issue = "74465")] pub const fn new(f: F) -> LazyLock<T, F> { - LazyLock { cell: OnceLock::new(), init: Cell::new(Some(f)) } + LazyLock { once: Once::new(), data: UnsafeCell::new(Data { f: ManuallyDrop::new(f) }) } } /// Forces the evaluation of this lazy value and @@ -74,10 +88,50 @@ impl<T, F: FnOnce() -> T> LazyLock<T, F> { #[inline] #[unstable(feature = "once_cell", issue = "74465")] pub fn force(this: &LazyLock<T, F>) -> &T { - this.cell.get_or_init(|| match this.init.take() { - Some(f) => f(), - None => panic!("Lazy instance has previously been poisoned"), - }) + this.once.call_once(|| { + // SAFETY: `call_once` only runs this closure once, ever. + let data = unsafe { &mut *this.data.get() }; + let f = unsafe { ManuallyDrop::take(&mut data.f) }; + let value = f(); + data.value = ManuallyDrop::new(value); + }); + + // SAFETY: + // There are four possible scenarios: + // * the closure was called and initialized `value`. + // * the closure was called and panicked, so this point is never reached. + // * the closure was not called, but a previous call initialized `value`. + // * the closure was not called because the Once is poisoned, so this point + // is never reached. + // So `value` has definitely been initialized and will not be modified again. + unsafe { &*(*this.data.get()).value } + } +} + +impl<T, F> LazyLock<T, F> { + /// Get the inner value if it has already been initialized. + fn get(&self) -> Option<&T> { + if self.once.is_completed() { + // SAFETY: + // The closure has been run successfully, so `value` has been initialized + // and will not be modified again. + Some(unsafe { &*(*self.data.get()).value }) + } else { + None + } + } +} + +#[unstable(feature = "once_cell", issue = "74465")] +impl<T, F> Drop for LazyLock<T, F> { + fn drop(&mut self) { + match self.once.state() { + ExclusiveState::Incomplete => unsafe { ManuallyDrop::drop(&mut self.data.get_mut().f) }, + ExclusiveState::Complete => unsafe { + ManuallyDrop::drop(&mut self.data.get_mut().value) + }, + ExclusiveState::Poisoned => {} + } } } @@ -103,23 +157,23 @@ impl<T: Default> Default for LazyLock<T> { #[unstable(feature = "once_cell", issue = "74465")] impl<T: fmt::Debug, F> fmt::Debug for LazyLock<T, F> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Lazy").field("cell", &self.cell).finish_non_exhaustive() + match self.get() { + Some(v) => f.debug_tuple("LazyLock").field(v).finish(), + None => f.write_str("LazyLock(Uninit)"), + } } } // We never create a `&F` from a `&LazyLock<T, F>` so it is fine // to not impl `Sync` for `F` -// we do create a `&mut Option<F>` in `force`, but this is -// properly synchronized, so it only happens once -// so it also does not contribute to this impl. #[unstable(feature = "once_cell", issue = "74465")] -unsafe impl<T, F: Send> Sync for LazyLock<T, F> where OnceLock<T>: Sync {} +unsafe impl<T: Sync + Send, F: Send> Sync for LazyLock<T, F> {} // auto-derived `Send` impl is OK. #[unstable(feature = "once_cell", issue = "74465")] -impl<T, F: UnwindSafe> RefUnwindSafe for LazyLock<T, F> where OnceLock<T>: RefUnwindSafe {} +impl<T: RefUnwindSafe + UnwindSafe, F: UnwindSafe> RefUnwindSafe for LazyLock<T, F> {} #[unstable(feature = "once_cell", issue = "74465")] -impl<T, F: UnwindSafe> UnwindSafe for LazyLock<T, F> where OnceLock<T>: UnwindSafe {} +impl<T: UnwindSafe, F: UnwindSafe> UnwindSafe for LazyLock<T, F> {} #[cfg(test)] mod tests; diff --git a/library/std/src/sync/mod.rs b/library/std/src/sync/mod.rs index ba20bab87a4..4edc956173b 100644 --- a/library/std/src/sync/mod.rs +++ b/library/std/src/sync/mod.rs @@ -186,7 +186,7 @@ mod condvar; mod lazy_lock; mod mpmc; mod mutex; -mod once; +pub(crate) mod once; mod once_lock; mod poison; mod remutex; diff --git a/library/std/src/sync/once.rs b/library/std/src/sync/once.rs index 0f25417d6b5..1b17c31089f 100644 --- a/library/std/src/sync/once.rs +++ b/library/std/src/sync/once.rs @@ -43,6 +43,12 @@ pub struct OnceState { pub(crate) inner: sys::OnceState, } +pub(crate) enum ExclusiveState { + Incomplete, + Poisoned, + Complete, +} + /// Initialization value for static [`Once`] values. /// /// # Examples @@ -248,6 +254,16 @@ impl Once { pub fn is_completed(&self) -> bool { self.inner.is_completed() } + + /// Returns the current state of the `Once` instance. + /// + /// Since this takes a mutable reference, no initialization can currently + /// be running, so the state must be either "incomplete", "poisoned" or + /// "complete". + #[inline] + pub(crate) fn state(&mut self) -> ExclusiveState { + self.inner.state() + } } #[stable(feature = "std_debug", since = "1.16.0")] diff --git a/library/std/src/sys/common/alloc.rs b/library/std/src/sys/common/alloc.rs index 3edbe728077..403a5e627f1 100644 --- a/library/std/src/sys/common/alloc.rs +++ b/library/std/src/sys/common/alloc.rs @@ -7,6 +7,7 @@ use crate::ptr; #[cfg(any( target_arch = "x86", target_arch = "arm", + target_arch = "m68k", target_arch = "mips", target_arch = "powerpc", target_arch = "powerpc64", diff --git a/library/std/src/sys/hermit/mod.rs b/library/std/src/sys/hermit/mod.rs index 6811fadb018..20fd3dd8f09 100644 --- a/library/std/src/sys/hermit/mod.rs +++ b/library/std/src/sys/hermit/mod.rs @@ -72,11 +72,6 @@ pub fn unsupported_err() -> crate::io::Error { ) } -#[no_mangle] -pub extern "C" fn floor(x: f64) -> f64 { - unsafe { intrinsics::floorf64(x) } -} - pub fn abort_internal() -> ! { unsafe { abi::abort(); diff --git a/library/std/src/sys/itron/thread_parking.rs b/library/std/src/sys/itron/thread_parking.rs new file mode 100644 index 00000000000..fe9934439d1 --- /dev/null +++ b/library/std/src/sys/itron/thread_parking.rs @@ -0,0 +1,37 @@ +use super::abi; +use super::error::expect_success_aborting; +use super::time::with_tmos; +use crate::time::Duration; + +pub type ThreadId = abi::ID; + +pub use super::task::current_task_id_aborting as current; + +pub fn park(_hint: usize) { + match unsafe { abi::slp_tsk() } { + abi::E_OK | abi::E_RLWAI => {} + err => { + expect_success_aborting(err, &"slp_tsk"); + } + } +} + +pub fn park_timeout(dur: Duration, _hint: usize) { + match with_tmos(dur, |tmo| unsafe { abi::tslp_tsk(tmo) }) { + abi::E_OK | abi::E_RLWAI | abi::E_TMOUT => {} + err => { + expect_success_aborting(err, &"tslp_tsk"); + } + } +} + +pub fn unpark(id: ThreadId, _hint: usize) { + match unsafe { abi::wup_tsk(id) } { + // It is allowed to try to wake up a destroyed or unrelated task, so we ignore all + // errors that could result from that situation. + abi::E_OK | abi::E_NOEXS | abi::E_OBJ | abi::E_QOVR => {} + err => { + expect_success_aborting(err, &"wup_tsk"); + } + } +} diff --git a/library/std/src/sys/itron/wait_flag.rs b/library/std/src/sys/itron/wait_flag.rs deleted file mode 100644 index e432edd2077..00000000000 --- a/library/std/src/sys/itron/wait_flag.rs +++ /dev/null @@ -1,72 +0,0 @@ -use crate::mem::MaybeUninit; -use crate::time::Duration; - -use super::{ - abi, - error::{expect_success, fail}, - time::with_tmos, -}; - -const CLEAR: abi::FLGPTN = 0; -const RAISED: abi::FLGPTN = 1; - -/// A thread parking primitive that is not susceptible to race conditions, -/// but provides no atomic ordering guarantees and allows only one `raise` per wait. -pub struct WaitFlag { - flag: abi::ID, -} - -impl WaitFlag { - /// Creates a new wait flag. - pub fn new() -> WaitFlag { - let flag = expect_success( - unsafe { - abi::acre_flg(&abi::T_CFLG { - flgatr: abi::TA_FIFO | abi::TA_WSGL | abi::TA_CLR, - iflgptn: CLEAR, - }) - }, - &"acre_flg", - ); - - WaitFlag { flag } - } - - /// Wait for the wait flag to be raised. - pub fn wait(&self) { - let mut token = MaybeUninit::uninit(); - expect_success( - unsafe { abi::wai_flg(self.flag, RAISED, abi::TWF_ORW, token.as_mut_ptr()) }, - &"wai_flg", - ); - } - - /// Wait for the wait flag to be raised or the timeout to occur. - /// - /// Returns whether the flag was raised (`true`) or the operation timed out (`false`). - pub fn wait_timeout(&self, dur: Duration) -> bool { - let mut token = MaybeUninit::uninit(); - let res = with_tmos(dur, |tmout| unsafe { - abi::twai_flg(self.flag, RAISED, abi::TWF_ORW, token.as_mut_ptr(), tmout) - }); - - match res { - abi::E_OK => true, - abi::E_TMOUT => false, - error => fail(error, &"twai_flg"), - } - } - - /// Raise the wait flag. - /// - /// Calls to this function should be balanced with the number of successful waits. - pub fn raise(&self) { - expect_success(unsafe { abi::set_flg(self.flag, RAISED) }, &"set_flg"); - } -} - -impl Drop for WaitFlag { - fn drop(&mut self) { - expect_success(unsafe { abi::del_flg(self.flag) }, &"del_flg"); - } -} diff --git a/library/std/src/sys/solid/mod.rs b/library/std/src/sys/solid/mod.rs index 5867979a2a7..923d27fd936 100644 --- a/library/std/src/sys/solid/mod.rs +++ b/library/std/src/sys/solid/mod.rs @@ -13,9 +13,9 @@ mod itron { pub(super) mod spin; pub(super) mod task; pub mod thread; + pub mod thread_parking; pub(super) mod time; use super::unsupported; - pub mod wait_flag; } pub mod alloc; @@ -43,8 +43,8 @@ pub use self::itron::thread; pub mod memchr; pub mod thread_local_dtor; pub mod thread_local_key; +pub use self::itron::thread_parking; pub mod time; -pub use self::itron::wait_flag; mod rwlock; diff --git a/library/std/src/sys/unix/args.rs b/library/std/src/sys/unix/args.rs index a342f0f5e85..a5ce6d5120d 100644 --- a/library/std/src/sys/unix/args.rs +++ b/library/std/src/sys/unix/args.rs @@ -141,12 +141,28 @@ mod imp { // list. let argv = ARGV.load(Ordering::Relaxed); let argc = if argv.is_null() { 0 } else { ARGC.load(Ordering::Relaxed) }; - (0..argc) - .map(|i| { - let cstr = CStr::from_ptr(*argv.offset(i) as *const libc::c_char); - OsStringExt::from_vec(cstr.to_bytes().to_vec()) - }) - .collect() + let mut args = Vec::with_capacity(argc as usize); + for i in 0..argc { + let ptr = *argv.offset(i) as *const libc::c_char; + + // Some C commandline parsers (e.g. GLib and Qt) are replacing already + // handled arguments in `argv` with `NULL` and move them to the end. That + // means that `argc` might be bigger than the actual number of non-`NULL` + // pointers in `argv` at this point. + // + // To handle this we simply stop iterating at the first `NULL` argument. + // + // `argv` is also guaranteed to be `NULL`-terminated so any non-`NULL` arguments + // after the first `NULL` can safely be ignored. + if ptr.is_null() { + break; + } + + let cstr = CStr::from_ptr(ptr); + args.push(OsStringExt::from_vec(cstr.to_bytes().to_vec())); + } + + args } } } diff --git a/library/std/src/sys/unix/fd.rs b/library/std/src/sys/unix/fd.rs index dbaa3c33e2e..66c33d58d6c 100644 --- a/library/std/src/sys/unix/fd.rs +++ b/library/std/src/sys/unix/fd.rs @@ -197,11 +197,6 @@ impl FileDesc { } } - #[cfg(target_os = "linux")] - pub fn get_cloexec(&self) -> io::Result<bool> { - unsafe { Ok((cvt(libc::fcntl(self.as_raw_fd(), libc::F_GETFD))? & libc::FD_CLOEXEC) != 0) } - } - #[cfg(not(any( target_env = "newlib", target_os = "solaris", @@ -284,6 +279,10 @@ impl<'a> Read for &'a FileDesc { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { (**self).read(buf) } + + fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> { + (**self).read_buf(cursor) + } } impl AsInner<OwnedFd> for FileDesc { diff --git a/library/std/src/sys/unsupported/once.rs b/library/std/src/sys/unsupported/once.rs index b4bb4975f41..11fde1888ba 100644 --- a/library/std/src/sys/unsupported/once.rs +++ b/library/std/src/sys/unsupported/once.rs @@ -1,5 +1,6 @@ use crate::cell::Cell; use crate::sync as public; +use crate::sync::once::ExclusiveState; pub struct Once { state: Cell<State>, @@ -44,6 +45,16 @@ impl Once { self.state.get() == State::Complete } + #[inline] + pub(crate) fn state(&mut self) -> ExclusiveState { + match self.state.get() { + State::Incomplete => ExclusiveState::Incomplete, + State::Poisoned => ExclusiveState::Poisoned, + State::Complete => ExclusiveState::Complete, + _ => unreachable!("invalid Once state"), + } + } + #[cold] #[track_caller] pub fn call(&self, ignore_poisoning: bool, f: &mut impl FnMut(&public::OnceState)) { diff --git a/library/std/src/sys/wasi/os.rs b/library/std/src/sys/wasi/os.rs index f5513e9996d..9919dc7087e 100644 --- a/library/std/src/sys/wasi/os.rs +++ b/library/std/src/sys/wasi/os.rs @@ -21,6 +21,7 @@ mod libc { extern "C" { pub fn getcwd(buf: *mut c_char, size: size_t) -> *mut c_char; pub fn chdir(dir: *const c_char) -> c_int; + pub fn __wasilibc_get_environ() -> *mut *mut c_char; } } @@ -161,7 +162,12 @@ impl Iterator for Env { pub fn env() -> Env { unsafe { let _guard = env_read_lock(); - let mut environ = libc::environ; + + // Use `__wasilibc_get_environ` instead of `environ` here so that we + // don't require wasi-libc to eagerly initialize the environment + // variables. + let mut environ = libc::__wasilibc_get_environ(); + let mut result = Vec::new(); if !environ.is_null() { while !(*environ).is_null() { diff --git a/library/std/src/sys/windows/args.rs b/library/std/src/sys/windows/args.rs index 6741ae46d32..30356fa8519 100644 --- a/library/std/src/sys/windows/args.rs +++ b/library/std/src/sys/windows/args.rs @@ -270,7 +270,7 @@ pub(crate) fn make_bat_command_line( // It is necessary to surround the command in an extra pair of quotes, // hence the trailing quote here. It will be closed after all arguments // have been added. - let mut cmd: Vec<u16> = "cmd.exe /c \"".encode_utf16().collect(); + let mut cmd: Vec<u16> = "cmd.exe /d /c \"".encode_utf16().collect(); // Push the script name surrounded by its quote pair. cmd.push(b'"' as u16); @@ -290,6 +290,15 @@ pub(crate) fn make_bat_command_line( // reconstructed by the batch script by default. for arg in args { cmd.push(' ' as u16); + // Make sure to always quote special command prompt characters, including: + // * Characters `cmd /?` says require quotes. + // * `%` for environment variables, as in `%TMP%`. + // * `|<>` pipe/redirect characters. + const SPECIAL: &[u8] = b"\t &()[]{}^=;!'+,`~%|<>"; + let force_quotes = match arg { + Arg::Regular(arg) if !force_quotes => arg.bytes().iter().any(|c| SPECIAL.contains(c)), + _ => force_quotes, + }; append_arg(&mut cmd, arg, force_quotes)?; } diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs index 81461de4f72..1d0ab772739 100644 --- a/library/std/src/sys/windows/c.rs +++ b/library/std/src/sys/windows/c.rs @@ -295,8 +295,6 @@ pub fn nt_success(status: NTSTATUS) -> bool { status >= 0 } -// "RNG\0" -pub const BCRYPT_RNG_ALGORITHM: &[u16] = &[b'R' as u16, b'N' as u16, b'G' as u16, 0]; pub const BCRYPT_USE_SYSTEM_PREFERRED_RNG: DWORD = 0x00000002; #[repr(C)] @@ -541,14 +539,6 @@ pub struct SYMBOLIC_LINK_REPARSE_BUFFER { pub PathBuffer: WCHAR, } -/// NB: Use carefully! In general using this as a reference is likely to get the -/// provenance wrong for the `PathBuffer` field! -#[repr(C)] -pub struct FILE_NAME_INFO { - pub FileNameLength: DWORD, - pub FileName: [WCHAR; 1], -} - #[repr(C)] pub struct MOUNT_POINT_REPARSE_BUFFER { pub SubstituteNameOffset: c_ushort, @@ -834,6 +824,10 @@ if #[cfg(not(target_vendor = "uwp"))] { #[link(name = "advapi32")] extern "system" { + // Forbidden when targeting UWP + #[link_name = "SystemFunction036"] + pub fn RtlGenRandom(RandomBuffer: *mut u8, RandomBufferLength: ULONG) -> BOOLEAN; + // Allowed but unused by UWP pub fn OpenProcessToken( ProcessHandle: HANDLE, @@ -1258,13 +1252,6 @@ extern "system" { cbBuffer: ULONG, dwFlags: ULONG, ) -> NTSTATUS; - pub fn BCryptOpenAlgorithmProvider( - phalgorithm: *mut BCRYPT_ALG_HANDLE, - pszAlgId: LPCWSTR, - pszimplementation: LPCWSTR, - dwflags: ULONG, - ) -> NTSTATUS; - pub fn BCryptCloseAlgorithmProvider(hAlgorithm: BCRYPT_ALG_HANDLE, dwFlags: ULONG) -> NTSTATUS; } // Functions that aren't available on every version of Windows that we support, diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs index 37809803828..d2c597664fa 100644 --- a/library/std/src/sys/windows/fs.rs +++ b/library/std/src/sys/windows/fs.rs @@ -1266,7 +1266,12 @@ fn metadata(path: &Path, reparse: ReparsePoint) -> io::Result<FileAttr> { // If the fallback fails for any reason we return the original error. match File::open(path, &opts) { Ok(file) => file.file_attr(), - Err(e) if e.raw_os_error() == Some(c::ERROR_SHARING_VIOLATION as _) => { + Err(e) + if [Some(c::ERROR_SHARING_VIOLATION as _), Some(c::ERROR_ACCESS_DENIED as _)] + .contains(&e.raw_os_error()) => + { + // `ERROR_ACCESS_DENIED` is returned when the user doesn't have permission for the resource. + // One such example is `System Volume Information` as default but can be created as well // `ERROR_SHARING_VIOLATION` will almost never be returned. // Usually if a file is locked you can still read some metadata. // However, there are special system files, such as @@ -1393,6 +1398,8 @@ fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> { let mut data = Align8([MaybeUninit::<u8>::uninit(); c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE]); let data_ptr = data.0.as_mut_ptr(); let db = data_ptr.cast::<c::REPARSE_MOUNTPOINT_DATA_BUFFER>(); + // Zero the header to ensure it's fully initialized, including reserved parameters. + *db = mem::zeroed(); let buf = ptr::addr_of_mut!((*db).ReparseTarget).cast::<c::WCHAR>(); let mut i = 0; // FIXME: this conversion is very hacky diff --git a/library/std/src/sys/windows/io.rs b/library/std/src/sys/windows/io.rs index 2cc34c986b9..7fdd1f702e2 100644 --- a/library/std/src/sys/windows/io.rs +++ b/library/std/src/sys/windows/io.rs @@ -2,8 +2,7 @@ use crate::marker::PhantomData; use crate::mem::size_of; use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle}; use crate::slice; -use crate::sys::{c, Align8}; -use core; +use crate::sys::c; use libc; #[derive(Copy, Clone)] @@ -125,22 +124,33 @@ unsafe fn msys_tty_on(handle: c::HANDLE) -> bool { return false; } - const SIZE: usize = size_of::<c::FILE_NAME_INFO>() + c::MAX_PATH * size_of::<c::WCHAR>(); - let mut name_info_bytes = Align8([0u8; SIZE]); + /// Mirrors [`FILE_NAME_INFO`], giving it a fixed length that we can stack + /// allocate + /// + /// [`FILE_NAME_INFO`]: https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_name_info + #[repr(C)] + #[allow(non_snake_case)] + struct FILE_NAME_INFO { + FileNameLength: u32, + FileName: [u16; c::MAX_PATH as usize], + } + let mut name_info = FILE_NAME_INFO { FileNameLength: 0, FileName: [0; c::MAX_PATH as usize] }; + // Safety: buffer length is fixed. let res = c::GetFileInformationByHandleEx( handle, c::FileNameInfo, - name_info_bytes.0.as_mut_ptr() as *mut libc::c_void, - SIZE as u32, + &mut name_info as *mut _ as *mut libc::c_void, + size_of::<FILE_NAME_INFO>() as u32, ); if res == 0 { return false; } - let name_info: &c::FILE_NAME_INFO = &*(name_info_bytes.0.as_ptr() as *const c::FILE_NAME_INFO); - let name_len = name_info.FileNameLength as usize / 2; - // Offset to get the `FileName` field. - let name_ptr = name_info_bytes.0.as_ptr().offset(size_of::<c::DWORD>() as isize).cast::<u16>(); - let s = core::slice::from_raw_parts(name_ptr, name_len); + + // Use `get` because `FileNameLength` can be out of range. + let s = match name_info.FileName.get(..name_info.FileNameLength as usize / 2) { + None => return false, + Some(s) => s, + }; let name = String::from_utf16_lossy(s); // Get the file name only. let name = name.rsplit('\\').next().unwrap_or(&name); diff --git a/library/std/src/sys/windows/rand.rs b/library/std/src/sys/windows/rand.rs index b5a49489d3f..cdf37cfe911 100644 --- a/library/std/src/sys/windows/rand.rs +++ b/library/std/src/sys/windows/rand.rs @@ -1,106 +1,39 @@ -//! # Random key generation -//! -//! This module wraps the RNG provided by the OS. There are a few different -//! ways to interface with the OS RNG so it's worth exploring each of the options. -//! Note that at the time of writing these all go through the (undocumented) -//! `bcryptPrimitives.dll` but they use different route to get there. -//! -//! Originally we were using [`RtlGenRandom`], however that function is -//! deprecated and warns it "may be altered or unavailable in subsequent versions". -//! -//! So we switched to [`BCryptGenRandom`] with the `BCRYPT_USE_SYSTEM_PREFERRED_RNG` -//! flag to query and find the system configured RNG. However, this change caused a small -//! but significant number of users to experience panics caused by a failure of -//! this function. See [#94098]. -//! -//! The current version falls back to using `BCryptOpenAlgorithmProvider` if -//! `BCRYPT_USE_SYSTEM_PREFERRED_RNG` fails for any reason. -//! -//! [#94098]: https://github.com/rust-lang/rust/issues/94098 -//! [`RtlGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/ntsecapi/nf-ntsecapi-rtlgenrandom -//! [`BCryptGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom +use crate::io; use crate::mem; use crate::ptr; use crate::sys::c; -/// Generates high quality secure random keys for use by [`HashMap`]. -/// -/// This is used to seed the default [`RandomState`]. -/// -/// [`HashMap`]: crate::collections::HashMap -/// [`RandomState`]: crate::collections::hash_map::RandomState pub fn hashmap_random_keys() -> (u64, u64) { - Rng::SYSTEM.gen_random_keys().unwrap_or_else(fallback_rng) + let mut v = (0, 0); + let ret = unsafe { + c::BCryptGenRandom( + ptr::null_mut(), + &mut v as *mut _ as *mut u8, + mem::size_of_val(&v) as c::ULONG, + c::BCRYPT_USE_SYSTEM_PREFERRED_RNG, + ) + }; + if c::nt_success(ret) { v } else { fallback_rng() } } -struct Rng { - algorithm: c::BCRYPT_ALG_HANDLE, - flags: u32, -} -impl Rng { - const SYSTEM: Self = unsafe { Self::new(ptr::null_mut(), c::BCRYPT_USE_SYSTEM_PREFERRED_RNG) }; - - /// Create the RNG from an existing algorithm handle. - /// - /// # Safety - /// - /// The handle must either be null or a valid algorithm handle. - const unsafe fn new(algorithm: c::BCRYPT_ALG_HANDLE, flags: u32) -> Self { - Self { algorithm, flags } - } - - /// Open a handle to the RNG algorithm. - fn open() -> Result<Self, c::NTSTATUS> { - use crate::sync::atomic::AtomicPtr; - use crate::sync::atomic::Ordering::{Acquire, Release}; - - // An atomic is used so we don't need to reopen the handle every time. - static HANDLE: AtomicPtr<crate::ffi::c_void> = AtomicPtr::new(ptr::null_mut()); - - let mut handle = HANDLE.load(Acquire); - if handle.is_null() { - let status = unsafe { - c::BCryptOpenAlgorithmProvider( - &mut handle, - c::BCRYPT_RNG_ALGORITHM.as_ptr(), - ptr::null(), - 0, - ) - }; - if c::nt_success(status) { - // If another thread opens a handle first then use that handle instead. - let result = HANDLE.compare_exchange(ptr::null_mut(), handle, Release, Acquire); - if let Err(previous_handle) = result { - // Close our handle and return the previous one. - unsafe { c::BCryptCloseAlgorithmProvider(handle, 0) }; - handle = previous_handle; - } - Ok(unsafe { Self::new(handle, 0) }) - } else { - Err(status) - } - } else { - Ok(unsafe { Self::new(handle, 0) }) - } - } +/// Generate random numbers using the fallback RNG function (RtlGenRandom) +/// +/// This is necessary because of a failure to load the SysWOW64 variant of the +/// bcryptprimitives.dll library from code that lives in bcrypt.dll +/// See <https://bugzilla.mozilla.org/show_bug.cgi?id=1788004#c9> +#[cfg(not(target_vendor = "uwp"))] +#[inline(never)] +fn fallback_rng() -> (u64, u64) { + let mut v = (0, 0); + let ret = + unsafe { c::RtlGenRandom(&mut v as *mut _ as *mut u8, mem::size_of_val(&v) as c::ULONG) }; - fn gen_random_keys(self) -> Result<(u64, u64), c::NTSTATUS> { - let mut v = (0, 0); - let status = unsafe { - let size = mem::size_of_val(&v).try_into().unwrap(); - c::BCryptGenRandom(self.algorithm, ptr::addr_of_mut!(v).cast(), size, self.flags) - }; - if c::nt_success(status) { Ok(v) } else { Err(status) } - } + if ret != 0 { v } else { panic!("fallback RNG broken: {}", io::Error::last_os_error()) } } -/// Generate random numbers using the fallback RNG function +/// We can't use RtlGenRandom with UWP, so there is no fallback +#[cfg(target_vendor = "uwp")] #[inline(never)] -fn fallback_rng(rng_status: c::NTSTATUS) -> (u64, u64) { - match Rng::open().and_then(|rng| rng.gen_random_keys()) { - Ok(keys) => keys, - Err(status) => { - panic!("RNG broken: {rng_status:#x}, fallback RNG broken: {status:#x}") - } - } +fn fallback_rng() -> (u64, u64) { + panic!("fallback RNG broken: RtlGenRandom() not supported on UWP"); } diff --git a/library/std/src/sys/windows/stdio.rs b/library/std/src/sys/windows/stdio.rs index 70c9b14a08f..c2cd48470bd 100644 --- a/library/std/src/sys/windows/stdio.rs +++ b/library/std/src/sys/windows/stdio.rs @@ -1,6 +1,5 @@ #![unstable(issue = "none", feature = "windows_stdio")] -use crate::char::decode_utf16; use crate::cmp; use crate::io; use crate::mem::MaybeUninit; @@ -369,7 +368,7 @@ fn read_u16s(handle: c::HANDLE, buf: &mut [MaybeUninit<u16>]) -> io::Result<usiz #[allow(unused)] fn utf16_to_utf8(utf16: &[u16], utf8: &mut [u8]) -> io::Result<usize> { let mut written = 0; - for chr in decode_utf16(utf16.iter().cloned()) { + for chr in char::decode_utf16(utf16.iter().cloned()) { match chr { Ok(chr) => { chr.encode_utf8(&mut utf8[written..]); diff --git a/library/std/src/sys/windows/thread_parking.rs b/library/std/src/sys/windows/thread_parking.rs index 5d43676adbb..eb9167cd855 100644 --- a/library/std/src/sys/windows/thread_parking.rs +++ b/library/std/src/sys/windows/thread_parking.rs @@ -221,7 +221,7 @@ impl Parker { fn keyed_event_handle() -> c::HANDLE { const INVALID: c::HANDLE = ptr::invalid_mut(!0); - static HANDLE: AtomicPtr<libc::c_void> = AtomicPtr::new(INVALID); + static HANDLE: AtomicPtr<crate::ffi::c_void> = AtomicPtr::new(INVALID); match HANDLE.load(Relaxed) { INVALID => { let mut handle = c::INVALID_HANDLE_VALUE; diff --git a/library/std/src/sys_common/net.rs b/library/std/src/sys_common/net.rs index fad4a63331b..2c38dfecf97 100644 --- a/library/std/src/sys_common/net.rs +++ b/library/std/src/sys_common/net.rs @@ -14,7 +14,7 @@ use crate::sys::net::{cvt, cvt_gai, cvt_r, init, wrlen_t, Socket}; use crate::sys_common::{AsInner, FromInner, IntoInner}; use crate::time::Duration; -use libc::{c_int, c_void}; +use crate::ffi::{c_int, c_void}; cfg_if::cfg_if! { if #[cfg(any( @@ -47,7 +47,7 @@ cfg_if::cfg_if! { target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd", target_os = "netbsd", target_os = "solaris", target_os = "illumos"))] { - use libc::c_uchar; + use crate::ffi::c_uchar; type IpV4MultiCastType = c_uchar; } else { type IpV4MultiCastType = c_int; @@ -127,8 +127,8 @@ fn to_ipv6mr_interface(value: u32) -> c_int { } #[cfg(not(target_os = "android"))] -fn to_ipv6mr_interface(value: u32) -> libc::c_uint { - value as libc::c_uint +fn to_ipv6mr_interface(value: u32) -> crate::ffi::c_uint { + value as crate::ffi::c_uint } //////////////////////////////////////////////////////////////////////////////// diff --git a/library/std/src/sys_common/once/futex.rs b/library/std/src/sys_common/once/futex.rs index 5c7e6c01371..42db5fad4b4 100644 --- a/library/std/src/sys_common/once/futex.rs +++ b/library/std/src/sys_common/once/futex.rs @@ -4,6 +4,7 @@ use crate::sync::atomic::{ AtomicU32, Ordering::{Acquire, Relaxed, Release}, }; +use crate::sync::once::ExclusiveState; use crate::sys::futex::{futex_wait, futex_wake_all}; // On some platforms, the OS is very nice and handles the waiter queue for us. @@ -78,6 +79,16 @@ impl Once { self.state.load(Acquire) == COMPLETE } + #[inline] + pub(crate) fn state(&mut self) -> ExclusiveState { + match *self.state.get_mut() { + INCOMPLETE => ExclusiveState::Incomplete, + POISONED => ExclusiveState::Poisoned, + COMPLETE => ExclusiveState::Complete, + _ => unreachable!("invalid Once state"), + } + } + // This uses FnMut to match the API of the generic implementation. As this // implementation is quite light-weight, it is generic over the closure and // so avoids the cost of dynamic dispatch. diff --git a/library/std/src/sys_common/once/queue.rs b/library/std/src/sys_common/once/queue.rs index d953a674592..def0bcd6fac 100644 --- a/library/std/src/sys_common/once/queue.rs +++ b/library/std/src/sys_common/once/queue.rs @@ -60,6 +60,7 @@ use crate::fmt; use crate::ptr; use crate::sync as public; use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; +use crate::sync::once::ExclusiveState; use crate::thread::{self, Thread}; type Masked = (); @@ -121,6 +122,16 @@ impl Once { self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE } + #[inline] + pub(crate) fn state(&mut self) -> ExclusiveState { + match self.state_and_queue.get_mut().addr() { + INCOMPLETE => ExclusiveState::Incomplete, + POISONED => ExclusiveState::Poisoned, + COMPLETE => ExclusiveState::Complete, + _ => unreachable!("invalid Once state"), + } + } + // This is a non-generic function to reduce the monomorphization cost of // using `call_once` (this isn't exactly a trivial or small implementation). // diff --git a/library/std/src/sys_common/thread_parking/id.rs b/library/std/src/sys_common/thread_parking/id.rs index e98169597c3..575988ec760 100644 --- a/library/std/src/sys_common/thread_parking/id.rs +++ b/library/std/src/sys_common/thread_parking/id.rs @@ -60,7 +60,7 @@ impl Parker { if state == PARKED { // Loop to guard against spurious wakeups. while state == PARKED { - park(self.state.as_mut_ptr().addr()); + park(self.state.as_ptr().addr()); state = self.state.load(Acquire); } @@ -76,7 +76,7 @@ impl Parker { let state = self.state.fetch_sub(1, Acquire).wrapping_sub(1); if state == PARKED { - park_timeout(dur, self.state.as_mut_ptr().addr()); + park_timeout(dur, self.state.as_ptr().addr()); // Swap to ensure that we observe all state changes with acquire // ordering, even if the state has been changed after the timeout // occured. @@ -99,7 +99,7 @@ impl Parker { // and terminated before this call is made. This call then returns an // error or wakes up an unrelated thread. The platform API and // environment does allow this, however. - unpark(tid, self.state.as_mut_ptr().addr()); + unpark(tid, self.state.as_ptr().addr()); } } } diff --git a/library/std/src/sys_common/thread_parking/mod.rs b/library/std/src/sys_common/thread_parking/mod.rs index 0ead6633c35..e8e028bb330 100644 --- a/library/std/src/sys_common/thread_parking/mod.rs +++ b/library/std/src/sys_common/thread_parking/mod.rs @@ -14,12 +14,10 @@ cfg_if::cfg_if! { } else if #[cfg(any( target_os = "netbsd", all(target_vendor = "fortanix", target_env = "sgx"), + target_os = "solid_asp3", ))] { mod id; pub use id::Parker; - } else if #[cfg(target_os = "solid_asp3")] { - mod wait_flag; - pub use wait_flag::Parker; } else if #[cfg(any(windows, target_family = "unix"))] { pub use crate::sys::thread_parking::Parker; } else { diff --git a/library/std/src/sys_common/thread_parking/wait_flag.rs b/library/std/src/sys_common/thread_parking/wait_flag.rs deleted file mode 100644 index d0f8899a94e..00000000000 --- a/library/std/src/sys_common/thread_parking/wait_flag.rs +++ /dev/null @@ -1,102 +0,0 @@ -//! A wait-flag-based thread parker. -//! -//! Some operating systems provide low-level parking primitives like wait counts, -//! event flags or semaphores which are not susceptible to race conditions (meaning -//! the wakeup can occur before the wait operation). To implement the `std` thread -//! parker on top of these primitives, we only have to ensure that parking is fast -//! when the thread token is available, the atomic ordering guarantees are maintained -//! and spurious wakeups are minimized. -//! -//! To achieve this, this parker uses an atomic variable with three states: `EMPTY`, -//! `PARKED` and `NOTIFIED`: -//! * `EMPTY` means the token has not been made available, but the thread is not -//! currently waiting on it. -//! * `PARKED` means the token is not available and the thread is parked. -//! * `NOTIFIED` means the token is available. -//! -//! `park` and `park_timeout` change the state from `EMPTY` to `PARKED` and from -//! `NOTIFIED` to `EMPTY`. If the state was `NOTIFIED`, the thread was unparked and -//! execution can continue without calling into the OS. If the state was `EMPTY`, -//! the token is not available and the thread waits on the primitive (here called -//! "wait flag"). -//! -//! `unpark` changes the state to `NOTIFIED`. If the state was `PARKED`, the thread -//! is or will be sleeping on the wait flag, so we raise it. - -use crate::pin::Pin; -use crate::sync::atomic::AtomicI8; -use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; -use crate::sys::wait_flag::WaitFlag; -use crate::time::Duration; - -const EMPTY: i8 = 0; -const PARKED: i8 = -1; -const NOTIFIED: i8 = 1; - -pub struct Parker { - state: AtomicI8, - wait_flag: WaitFlag, -} - -impl Parker { - /// Construct a parker for the current thread. The UNIX parker - /// implementation requires this to happen in-place. - pub unsafe fn new_in_place(parker: *mut Parker) { - parker.write(Parker { state: AtomicI8::new(EMPTY), wait_flag: WaitFlag::new() }) - } - - // This implementation doesn't require `unsafe` and `Pin`, but other implementations do. - pub unsafe fn park(self: Pin<&Self>) { - match self.state.fetch_sub(1, Acquire) { - // NOTIFIED => EMPTY - NOTIFIED => return, - // EMPTY => PARKED - EMPTY => (), - _ => panic!("inconsistent park state"), - } - - // Avoid waking up from spurious wakeups (these are quite likely, see below). - loop { - self.wait_flag.wait(); - - match self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed) { - Ok(_) => return, - Err(PARKED) => (), - Err(_) => panic!("inconsistent park state"), - } - } - } - - // This implementation doesn't require `unsafe` and `Pin`, but other implementations do. - pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { - match self.state.fetch_sub(1, Acquire) { - NOTIFIED => return, - EMPTY => (), - _ => panic!("inconsistent park state"), - } - - self.wait_flag.wait_timeout(dur); - - // Either a wakeup or a timeout occurred. Wakeups may be spurious, as there can be - // a race condition when `unpark` is performed between receiving the timeout and - // resetting the state, resulting in the eventflag being set unnecessarily. `park` - // is protected against this by looping until the token is actually given, but - // here we cannot easily tell. - - // Use `swap` to provide acquire ordering. - match self.state.swap(EMPTY, Acquire) { - NOTIFIED => (), - PARKED => (), - _ => panic!("inconsistent park state"), - } - } - - // This implementation doesn't require `Pin`, but other implementations do. - pub fn unpark(self: Pin<&Self>) { - let state = self.state.swap(NOTIFIED, Release); - - if state == PARKED { - self.wait_flag.raise(); - } - } -} diff --git a/library/std/src/sys_common/wtf8.rs b/library/std/src/sys_common/wtf8.rs index dd53767d452..e202d17e1c2 100644 --- a/library/std/src/sys_common/wtf8.rs +++ b/library/std/src/sys_common/wtf8.rs @@ -18,10 +18,10 @@ #[cfg(test)] mod tests; +use core::char::{encode_utf16_raw, encode_utf8_raw}; use core::str::next_code_point; use crate::borrow::Cow; -use crate::char; use crate::collections::TryReserveError; use crate::fmt; use crate::hash::{Hash, Hasher}; @@ -235,7 +235,7 @@ impl Wtf8Buf { /// This does **not** include the WTF-8 concatenation check or `is_known_utf8` check. fn push_code_point_unchecked(&mut self, code_point: CodePoint) { let mut bytes = [0; 4]; - let bytes = char::encode_utf8_raw(code_point.value, &mut bytes); + let bytes = encode_utf8_raw(code_point.value, &mut bytes); self.bytes.extend_from_slice(bytes) } @@ -939,7 +939,7 @@ impl<'a> Iterator for EncodeWide<'a> { let mut buf = [0; 2]; self.code_points.next().map(|code_point| { - let n = char::encode_utf16_raw(code_point.value, &mut buf).len(); + let n = encode_utf16_raw(code_point.value, &mut buf).len(); if n == 2 { self.extra = buf[1]; } diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs index 692ff0cbca6..489af776798 100644 --- a/library/std/src/thread/mod.rs +++ b/library/std/src/thread/mod.rs @@ -124,8 +124,10 @@ //! //! ## Stack size //! -//! The default stack size is platform-dependent and subject to change. Currently it is 2MB on all -//! Tier-1 platforms. There are two ways to manually specify the stack size for spawned threads: +//! The default stack size is platform-dependent and subject to change. +//! Currently, it is 2 MiB on all Tier-1 platforms. +//! +//! There are two ways to manually specify the stack size for spawned threads: //! //! * Build the thread with [`Builder`] and pass the desired stack size to [`Builder::stack_size`]. //! * Set the `RUST_MIN_STACK` environment variable to an integer representing the desired stack diff --git a/library/std/src/time.rs b/library/std/src/time.rs index ecd06ebf743..5c2e9da70fb 100644 --- a/library/std/src/time.rs +++ b/library/std/src/time.rs @@ -1,6 +1,6 @@ //! Temporal quantification. //! -//! # Examples: +//! # Examples //! //! There are multiple ways to create a new [`Duration`]: //! @@ -352,7 +352,7 @@ impl Instant { self.checked_duration_since(earlier).unwrap_or_default() } - /// Returns the amount of time elapsed since this instant was created. + /// Returns the amount of time elapsed since this instant. /// /// # Panics /// @@ -525,8 +525,8 @@ impl SystemTime { self.0.sub_time(&earlier.0).map_err(SystemTimeError) } - /// Returns the difference between the clock time when this - /// system time was created, and the current clock time. + /// Returns the difference from this system time to the + /// current clock time. /// /// This function may fail as the underlying system clock is susceptible to /// drift and updates (e.g., the system clock could go backwards), so this diff --git a/library/test/src/console.rs b/library/test/src/console.rs index 24cbe035f2f..1ee68c8540b 100644 --- a/library/test/src/console.rs +++ b/library/test/src/console.rs @@ -53,6 +53,7 @@ pub struct ConsoleTestState { pub metrics: MetricMap, pub failures: Vec<(TestDesc, Vec<u8>)>, pub not_failures: Vec<(TestDesc, Vec<u8>)>, + pub ignores: Vec<(TestDesc, Vec<u8>)>, pub time_failures: Vec<(TestDesc, Vec<u8>)>, pub options: Options, } @@ -76,6 +77,7 @@ impl ConsoleTestState { metrics: MetricMap::new(), failures: Vec::new(), not_failures: Vec::new(), + ignores: Vec::new(), time_failures: Vec::new(), options: opts.options, }) @@ -194,7 +196,10 @@ fn handle_test_result(st: &mut ConsoleTestState, completed_test: CompletedTest) st.passed += 1; st.not_failures.push((test, stdout)); } - TestResult::TrIgnored => st.ignored += 1, + TestResult::TrIgnored => { + st.ignored += 1; + st.ignores.push((test, stdout)); + } TestResult::TrBench(bs) => { st.metrics.insert_metric( test.name.as_slice(), diff --git a/library/test/src/formatters/terse.rs b/library/test/src/formatters/terse.rs index 0837ab16905..a431acfbc27 100644 --- a/library/test/src/formatters/terse.rs +++ b/library/test/src/formatters/terse.rs @@ -254,6 +254,15 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> { self.write_plain("\n\n")?; + // Custom handling of cases where there is only 1 test to execute and that test was ignored. + // We want to show more detailed information(why was the test ignored) for investigation purposes. + if self.total_test_count == 1 && state.ignores.len() == 1 { + let test_desc = &state.ignores[0].0; + if let Some(im) = test_desc.ignore_message { + self.write_plain(format!("test: {}, ignore_message: {}\n\n", test_desc.name, im))?; + } + } + Ok(success) } } diff --git a/library/test/src/tests.rs b/library/test/src/tests.rs index 3a0260f86cf..44776fb0a31 100644 --- a/library/test/src/tests.rs +++ b/library/test/src/tests.rs @@ -790,6 +790,7 @@ fn should_sort_failures_before_printing_them() { failures: vec![(test_b, Vec::new()), (test_a, Vec::new())], options: Options::new(), not_failures: Vec::new(), + ignores: Vec::new(), time_failures: Vec::new(), }; |
