about summary refs log tree commit diff
path: root/library
diff options
context:
space:
mode:
Diffstat (limited to 'library')
-rw-r--r--library/alloc/src/collections/binary_heap.rs51
-rw-r--r--library/alloc/src/collections/btree/map.rs5
-rw-r--r--library/alloc/src/collections/btree/map/tests.rs33
-rw-r--r--library/alloc/src/collections/btree/mod.rs1
-rw-r--r--library/alloc/src/collections/btree/node/tests.rs12
-rw-r--r--library/alloc/src/collections/btree/search.rs15
-rw-r--r--library/alloc/src/collections/btree/set.rs24
-rw-r--r--library/alloc/src/collections/btree/set/tests.rs23
-rw-r--r--library/alloc/src/collections/btree/set_val.rs29
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs16
-rw-r--r--library/alloc/src/string.rs70
-rw-r--r--library/alloc/src/sync.rs17
-rw-r--r--library/alloc/src/vec/mod.rs85
-rw-r--r--library/alloc/tests/vec.rs9
-rw-r--r--library/core/src/array/mod.rs79
-rw-r--r--library/core/src/iter/traits/iterator.rs42
-rw-r--r--library/core/src/sync/atomic.rs7
-rw-r--r--library/core/tests/iter/traits/iterator.rs9
-rw-r--r--library/core/tests/lib.rs1
-rw-r--r--library/std/src/collections/hash/map.rs35
-rw-r--r--library/std/src/collections/hash/set.rs25
-rw-r--r--library/std/src/ffi/os_str.rs22
-rw-r--r--library/std/src/io/buffered/bufwriter.rs4
-rw-r--r--library/std/src/io/buffered/linewriter.rs4
-rw-r--r--library/std/src/sync/rwlock.rs28
-rw-r--r--library/std/src/sync/rwlock/tests.rs14
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/alloc.rs120
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/mod.rs2
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/tests.rs30
-rw-r--r--library/std/src/sys/unix/futex.rs50
-rw-r--r--library/std/src/sys/unix/locks/pthread_mutex.rs20
-rw-r--r--library/std/src/sys/unix/locks/pthread_rwlock.rs16
-rw-r--r--library/std/src/sys/unix/thread_parker.rs1
-rw-r--r--library/std/src/sys/windows/fs.rs156
-rw-r--r--library/std/src/sys_common/lazy_box.rs19
-rw-r--r--library/std/src/sys_common/thread_parker/mod.rs1
36 files changed, 793 insertions, 282 deletions
diff --git a/library/alloc/src/collections/binary_heap.rs b/library/alloc/src/collections/binary_heap.rs
index 3706300dcfe..197e7aaaccf 100644
--- a/library/alloc/src/collections/binary_heap.rs
+++ b/library/alloc/src/collections/binary_heap.rs
@@ -374,10 +374,11 @@ impl<T: Ord> BinaryHeap<T> {
         BinaryHeap { data: vec![] }
     }
 
-    /// Creates an empty `BinaryHeap` with a specific capacity.
-    /// This preallocates enough memory for `capacity` elements,
-    /// so that the `BinaryHeap` does not have to be reallocated
-    /// until it contains at least that many values.
+    /// Creates an empty `BinaryHeap` with at least the specified capacity.
+    ///
+    /// The binary heap will be able to hold at least `capacity` elements without
+    /// reallocating. This method is allowed to allocate for more elements than
+    /// `capacity`. If `capacity` is 0, the binary heap will not allocate.
     ///
     /// # Examples
     ///
@@ -906,16 +907,18 @@ impl<T> BinaryHeap<T> {
         self.data.capacity()
     }
 
-    /// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the
-    /// given `BinaryHeap`. Does nothing if the capacity is already sufficient.
+    /// Reserves the minimum capacity for at least `additional` elements more than
+    /// the current length. Unlike [`reserve`], this will not
+    /// deliberately over-allocate to speculatively avoid frequent allocations.
+    /// After calling `reserve_exact`, capacity will be greater than or equal to
+    /// `self.len() + additional`. Does nothing if the capacity is already
+    /// sufficient.
     ///
-    /// Note that the allocator may give the collection more space than it requests. Therefore
-    /// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future
-    /// insertions are expected.
+    /// [`reserve`]: BinaryHeap::reserve
     ///
     /// # Panics
     ///
-    /// Panics if the new capacity overflows `usize`.
+    /// Panics if the new capacity overflows [`usize`].
     ///
     /// # Examples
     ///
@@ -935,12 +938,15 @@ impl<T> BinaryHeap<T> {
         self.data.reserve_exact(additional);
     }
 
-    /// Reserves capacity for at least `additional` more elements to be inserted in the
-    /// `BinaryHeap`. The collection may reserve more space to avoid frequent reallocations.
+    /// Reserves capacity for at least `additional` elements more than the
+    /// current length. The allocator may reserve more space to speculatively
+    /// avoid frequent allocations. After calling `reserve`,
+    /// capacity will be greater than or equal to `self.len() + additional`.
+    /// Does nothing if capacity is already sufficient.
     ///
     /// # Panics
     ///
-    /// Panics if the new capacity overflows `usize`.
+    /// Panics if the new capacity overflows [`usize`].
     ///
     /// # Examples
     ///
@@ -958,10 +964,11 @@ impl<T> BinaryHeap<T> {
         self.data.reserve(additional);
     }
 
-    /// Tries to reserve the minimum capacity for exactly `additional`
-    /// elements to be inserted in the given `BinaryHeap<T>`. After calling
-    /// `try_reserve_exact`, capacity will be greater than or equal to
-    /// `self.len() + additional` if it returns `Ok(())`.
+    /// Tries to reserve the minimum capacity for at least `additional` elements
+    /// more than the current length. Unlike [`try_reserve`], this will not
+    /// deliberately over-allocate to speculatively avoid frequent allocations.
+    /// After calling `try_reserve_exact`, capacity will be greater than or
+    /// equal to `self.len() + additional` if it returns `Ok(())`.
     /// Does nothing if the capacity is already sufficient.
     ///
     /// Note that the allocator may give the collection more space than it
@@ -999,11 +1006,11 @@ impl<T> BinaryHeap<T> {
         self.data.try_reserve_exact(additional)
     }
 
-    /// Tries to reserve capacity for at least `additional` more elements to be inserted
-    /// in the given `BinaryHeap<T>`. The collection may reserve more space to avoid
-    /// frequent reallocations. After calling `try_reserve`, capacity will be
-    /// greater than or equal to `self.len() + additional`. Does nothing if
-    /// capacity is already sufficient.
+    /// Tries to reserve capacity for at least `additional` elements more than the
+    /// current length. The allocator may reserve more space to speculatively
+    /// avoid frequent allocations. After calling `try_reserve`, capacity will be
+    /// greater than or equal to `self.len() + additional` if it returns
+    /// `Ok(())`. Does nothing if capacity is already sufficient.
     ///
     /// # Errors
     ///
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
index 28068a88060..0bddd7a9906 100644
--- a/library/alloc/src/collections/btree/map.rs
+++ b/library/alloc/src/collections/btree/map.rs
@@ -16,6 +16,7 @@ use super::dedup_sorted_iter::DedupSortedIter;
 use super::navigate::{LazyLeafRange, LeafRange};
 use super::node::{self, marker, ForceResult::*, Handle, NodeRef, Root};
 use super::search::SearchResult::*;
+use super::set_val::SetValZST;
 
 mod entry;
 
@@ -271,7 +272,7 @@ impl<K: Clone, V: Clone, A: Allocator + Clone> Clone for BTreeMap<K, V, A> {
     }
 }
 
-impl<K, Q: ?Sized, A: Allocator + Clone> super::Recover<Q> for BTreeMap<K, (), A>
+impl<K, Q: ?Sized, A: Allocator + Clone> super::Recover<Q> for BTreeMap<K, SetValZST, A>
 where
     K: Borrow<Q> + Ord,
     Q: Ord,
@@ -318,7 +319,7 @@ where
                     alloc: (*map.alloc).clone(),
                     _marker: PhantomData,
                 }
-                .insert(());
+                .insert(SetValZST::default());
                 None
             }
         }
diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs
index 5504959c34d..4c372b1d60a 100644
--- a/library/alloc/src/collections/btree/map/tests.rs
+++ b/library/alloc/src/collections/btree/map/tests.rs
@@ -897,6 +897,39 @@ fn test_range_mut() {
     map.check();
 }
 
+#[should_panic(expected = "range start is greater than range end in BTreeMap")]
+#[test]
+fn test_range_panic_1() {
+    let mut map = BTreeMap::new();
+    map.insert(3, "a");
+    map.insert(5, "b");
+    map.insert(8, "c");
+
+    let _invalid_range = map.range((Included(&8), Included(&3)));
+}
+
+#[should_panic(expected = "range start and end are equal and excluded in BTreeMap")]
+#[test]
+fn test_range_panic_2() {
+    let mut map = BTreeMap::new();
+    map.insert(3, "a");
+    map.insert(5, "b");
+    map.insert(8, "c");
+
+    let _invalid_range = map.range((Excluded(&5), Excluded(&5)));
+}
+
+#[should_panic(expected = "range start and end are equal and excluded in BTreeMap")]
+#[test]
+fn test_range_panic_3() {
+    let mut map: BTreeMap<i32, ()> = BTreeMap::new();
+    map.insert(3, ());
+    map.insert(5, ());
+    map.insert(8, ());
+
+    let _invalid_range = map.range((Excluded(&5), Excluded(&5)));
+}
+
 #[test]
 fn test_retain() {
     let mut map = BTreeMap::from_iter((0..100).map(|x| (x, x * 10)));
diff --git a/library/alloc/src/collections/btree/mod.rs b/library/alloc/src/collections/btree/mod.rs
index 9571b3d594d..9d43ac5c5be 100644
--- a/library/alloc/src/collections/btree/mod.rs
+++ b/library/alloc/src/collections/btree/mod.rs
@@ -10,6 +10,7 @@ mod node;
 mod remove;
 mod search;
 pub mod set;
+mod set_val;
 mod split;
 
 #[doc(hidden)]
diff --git a/library/alloc/src/collections/btree/node/tests.rs b/library/alloc/src/collections/btree/node/tests.rs
index c8634e6c06f..aadb0dc9c40 100644
--- a/library/alloc/src/collections/btree/node/tests.rs
+++ b/library/alloc/src/collections/btree/node/tests.rs
@@ -68,10 +68,10 @@ fn test_splitpoint() {
 
 #[test]
 fn test_partial_eq() {
-    let mut root1 = NodeRef::new_leaf(&Global);
+    let mut root1 = NodeRef::new_leaf(Global);
     root1.borrow_mut().push(1, ());
-    let mut root1 = NodeRef::new_internal(root1.forget_type(), &Global).forget_type();
-    let root2 = Root::new(&Global);
+    let mut root1 = NodeRef::new_internal(root1.forget_type(), Global).forget_type();
+    let root2 = Root::new(Global);
     root1.reborrow().assert_back_pointers();
     root2.reborrow().assert_back_pointers();
 
@@ -87,9 +87,9 @@ fn test_partial_eq() {
     assert!(top_edge_1 == top_edge_1);
     assert!(top_edge_1 != top_edge_2);
 
-    root1.pop_internal_level(&Global);
-    unsafe { root1.into_dying().deallocate_and_ascend(&Global) };
-    unsafe { root2.into_dying().deallocate_and_ascend(&Global) };
+    root1.pop_internal_level(Global);
+    unsafe { root1.into_dying().deallocate_and_ascend(Global) };
+    unsafe { root2.into_dying().deallocate_and_ascend(Global) };
 }
 
 #[test]
diff --git a/library/alloc/src/collections/btree/search.rs b/library/alloc/src/collections/btree/search.rs
index 5651a03c47a..ad3522b4e04 100644
--- a/library/alloc/src/collections/btree/search.rs
+++ b/library/alloc/src/collections/btree/search.rs
@@ -97,17 +97,28 @@ impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Lea
         K: Borrow<Q>,
         R: RangeBounds<Q>,
     {
+        // Determine if map or set is being searched
+        let is_set = <V as super::set_val::IsSetVal>::is_set_val();
+
         // Inlining these variables should be avoided. We assume the bounds reported by `range`
         // remain the same, but an adversarial implementation could change between calls (#81138).
         let (start, end) = (range.start_bound(), range.end_bound());
         match (start, end) {
             (Bound::Excluded(s), Bound::Excluded(e)) if s == e => {
-                panic!("range start and end are equal and excluded in BTreeMap")
+                if is_set {
+                    panic!("range start and end are equal and excluded in BTreeSet")
+                } else {
+                    panic!("range start and end are equal and excluded in BTreeMap")
+                }
             }
             (Bound::Included(s) | Bound::Excluded(s), Bound::Included(e) | Bound::Excluded(e))
                 if s > e =>
             {
-                panic!("range start is greater than range end in BTreeMap")
+                if is_set {
+                    panic!("range start is greater than range end in BTreeSet")
+                } else {
+                    panic!("range start is greater than range end in BTreeMap")
+                }
             }
             _ => {}
         }
diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs
index 0d3fdc9019e..2cfc0807409 100644
--- a/library/alloc/src/collections/btree/set.rs
+++ b/library/alloc/src/collections/btree/set.rs
@@ -13,6 +13,7 @@ use core::ops::{BitAnd, BitOr, BitXor, RangeBounds, Sub};
 
 use super::map::{BTreeMap, Keys};
 use super::merge_iter::MergeIterInner;
+use super::set_val::SetValZST;
 use super::Recover;
 
 use crate::alloc::{Allocator, Global};
@@ -81,7 +82,7 @@ pub struct BTreeSet<
     T,
     #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
 > {
-    map: BTreeMap<T, (), A>,
+    map: BTreeMap<T, SetValZST, A>,
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -135,7 +136,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for BTreeSet<T, A> {
 #[must_use = "iterators are lazy and do nothing unless consumed"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Iter<'a, T: 'a> {
-    iter: Keys<'a, T, ()>,
+    iter: Keys<'a, T, SetValZST>,
 }
 
 #[stable(feature = "collection_debug", since = "1.17.0")]
@@ -158,7 +159,7 @@ pub struct IntoIter<
     T,
     #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
 > {
-    iter: super::map::IntoIter<T, (), A>,
+    iter: super::map::IntoIter<T, SetValZST, A>,
 }
 
 /// An iterator over a sub-range of items in a `BTreeSet`.
@@ -171,7 +172,7 @@ pub struct IntoIter<
 #[derive(Debug)]
 #[stable(feature = "btree_range", since = "1.17.0")]
 pub struct Range<'a, T: 'a> {
-    iter: super::map::Range<'a, T, ()>,
+    iter: super::map::Range<'a, T, SetValZST>,
 }
 
 /// A lazy iterator producing elements in the difference of `BTreeSet`s.
@@ -375,6 +376,11 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
     /// `range((Excluded(4), Included(10)))` will yield a left-exclusive, right-inclusive
     /// range from 4 to 10.
     ///
+    /// # Panics
+    ///
+    /// Panics if range `start > end`.
+    /// Panics if range `start == end` and both bounds are `Excluded`.
+    ///
     /// # Examples
     ///
     /// ```
@@ -905,7 +911,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
     where
         T: Ord,
     {
-        self.map.insert(value, ()).is_none()
+        self.map.insert(value, SetValZST::default()).is_none()
     }
 
     /// Adds a value to the set, replacing the existing element, if any, that is
@@ -1210,7 +1216,7 @@ impl<T: Ord> FromIterator<T> for BTreeSet<T> {
 
 impl<T: Ord, A: Allocator + Clone> BTreeSet<T, A> {
     fn from_sorted_iter<I: Iterator<Item = T>>(iter: I, alloc: A) -> BTreeSet<T, A> {
-        let iter = iter.map(|k| (k, ()));
+        let iter = iter.map(|k| (k, SetValZST::default()));
         let map = BTreeMap::bulk_build_from_sorted_iter(iter, alloc);
         BTreeSet { map }
     }
@@ -1234,7 +1240,7 @@ impl<T: Ord, const N: usize> From<[T; N]> for BTreeSet<T> {
 
         // use stable sort to preserve the insertion order.
         arr.sort();
-        let iter = IntoIterator::into_iter(arr).map(|k| (k, ()));
+        let iter = IntoIterator::into_iter(arr).map(|k| (k, SetValZST::default()));
         let map = BTreeMap::bulk_build_from_sorted_iter(iter, Global);
         BTreeSet { map }
     }
@@ -1284,7 +1290,7 @@ pub struct DrainFilter<
     F: 'a + FnMut(&T) -> bool,
 {
     pred: F,
-    inner: super::map::DrainFilterInner<'a, T, ()>,
+    inner: super::map::DrainFilterInner<'a, T, SetValZST>,
     /// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`.
     alloc: A,
 }
@@ -1319,7 +1325,7 @@ where
 
     fn next(&mut self) -> Option<T> {
         let pred = &mut self.pred;
-        let mut mapped_pred = |k: &T, _v: &mut ()| pred(k);
+        let mut mapped_pred = |k: &T, _v: &mut SetValZST| pred(k);
         self.inner.next(&mut mapped_pred, self.alloc.clone()).map(|(k, _)| k)
     }
 
diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs
index 429b1644976..502d3e1d126 100644
--- a/library/alloc/src/collections/btree/set/tests.rs
+++ b/library/alloc/src/collections/btree/set/tests.rs
@@ -5,6 +5,7 @@ use crate::vec::Vec;
 use std::cmp::Ordering;
 use std::hash::{Hash, Hasher};
 use std::iter::FromIterator;
+use std::ops::Bound::{Excluded, Included};
 use std::panic::{catch_unwind, AssertUnwindSafe};
 
 #[test]
@@ -831,3 +832,25 @@ fn from_array() {
     let unordered_duplicates = BTreeSet::from([4, 1, 4, 3, 2]);
     assert_eq!(set, unordered_duplicates);
 }
+
+#[should_panic(expected = "range start is greater than range end in BTreeSet")]
+#[test]
+fn test_range_panic_1() {
+    let mut set = BTreeSet::new();
+    set.insert(3);
+    set.insert(5);
+    set.insert(8);
+
+    let _invalid_range = set.range((Included(&8), Included(&3)));
+}
+
+#[should_panic(expected = "range start and end are equal and excluded in BTreeSet")]
+#[test]
+fn test_range_panic_2() {
+    let mut set = BTreeSet::new();
+    set.insert(3);
+    set.insert(5);
+    set.insert(8);
+
+    let _invalid_range = set.range((Excluded(&5), Excluded(&5)));
+}
diff --git a/library/alloc/src/collections/btree/set_val.rs b/library/alloc/src/collections/btree/set_val.rs
new file mode 100644
index 00000000000..80c459bcf81
--- /dev/null
+++ b/library/alloc/src/collections/btree/set_val.rs
@@ -0,0 +1,29 @@
+/// Zero-Sized Type (ZST) for internal `BTreeSet` values.
+/// Used instead of `()` to differentiate between:
+/// * `BTreeMap<T, ()>` (possible user-defined map)
+/// * `BTreeMap<T, SetValZST>` (internal set representation)
+#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone, Default)]
+pub struct SetValZST;
+
+/// A trait to differentiate between `BTreeMap` and `BTreeSet` values.
+/// Returns `true` only for type `SetValZST`, `false` for all other types (blanket implementation).
+/// `TypeId` requires a `'static` lifetime, use of this trait avoids that restriction.
+///
+/// [`TypeId`]: std::any::TypeId
+pub trait IsSetVal {
+    fn is_set_val() -> bool;
+}
+
+// Blanket implementation
+impl<V> IsSetVal for V {
+    default fn is_set_val() -> bool {
+        false
+    }
+}
+
+// Specialization
+impl IsSetVal for SetValZST {
+    fn is_set_val() -> bool {
+        true
+    }
+}
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index f92e5759b6f..4d895d83745 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -688,7 +688,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
         self.cap() - 1
     }
 
-    /// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the
+    /// Reserves the minimum capacity for at least `additional` more elements to be inserted in the
     /// given deque. Does nothing if the capacity is already sufficient.
     ///
     /// Note that the allocator may give the collection more space than it requests. Therefore
@@ -716,7 +716,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
     }
 
     /// Reserves capacity for at least `additional` more elements to be inserted in the given
-    /// deque. The collection may reserve more space to avoid frequent reallocations.
+    /// deque. The collection may reserve more space to speculatively avoid frequent reallocations.
     ///
     /// # Panics
     ///
@@ -748,10 +748,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
         }
     }
 
-    /// Tries to reserve the minimum capacity for exactly `additional` more elements to
+    /// Tries to reserve the minimum capacity for at least `additional` more elements to
     /// be inserted in the given deque. After calling `try_reserve_exact`,
-    /// capacity will be greater than or equal to `self.len() + additional`.
-    /// Does nothing if the capacity is already sufficient.
+    /// capacity will be greater than or equal to `self.len() + additional` if
+    /// it returns `Ok(())`. Does nothing if the capacity is already sufficient.
     ///
     /// Note that the allocator may give the collection more space than it
     /// requests. Therefore, capacity can not be relied upon to be precisely
@@ -791,10 +791,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
     }
 
     /// Tries to reserve capacity for at least `additional` more elements to be inserted
-    /// in the given deque. The collection may reserve more space to avoid
+    /// in the given deque. The collection may reserve more space to speculatively avoid
     /// frequent reallocations. After calling `try_reserve`, capacity will be
-    /// greater than or equal to `self.len() + additional`. Does nothing if
-    /// capacity is already sufficient.
+    /// greater than or equal to `self.len() + additional` if it returns
+    /// `Ok(())`. Does nothing if capacity is already sufficient.
     ///
     /// # Errors
     ///
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index 668af60611b..88838807265 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -455,13 +455,13 @@ impl String {
         String { vec: Vec::new() }
     }
 
-    /// Creates a new empty `String` with a particular capacity.
+    /// Creates a new empty `String` with at least the specified capacity.
     ///
     /// `String`s have an internal buffer to hold their data. The capacity is
     /// the length of that buffer, and can be queried with the [`capacity`]
     /// method. This method creates an empty `String`, but one with an initial
-    /// buffer that can hold `capacity` bytes. This is useful when you may be
-    /// appending a bunch of data to the `String`, reducing the number of
+    /// buffer that can hold at least `capacity` bytes. This is useful when you
+    /// may be appending a bunch of data to the `String`, reducing the number of
     /// reallocations it needs to do.
     ///
     /// [`capacity`]: String::capacity
@@ -979,21 +979,16 @@ impl String {
         self.vec.capacity()
     }
 
-    /// Ensures that this `String`'s capacity is at least `additional` bytes
-    /// larger than its length.
-    ///
-    /// The capacity may be increased by more than `additional` bytes if it
-    /// chooses, to prevent frequent reallocations.
-    ///
-    /// If you do not want this "at least" behavior, see the [`reserve_exact`]
-    /// method.
+    /// Reserves capacity for at least `additional` bytes more than the
+    /// current length. The allocator may reserve more space to speculatively
+    /// avoid frequent allocations. After calling `reserve`,
+    /// capacity will be greater than or equal to `self.len() + additional`.
+    /// Does nothing if capacity is already sufficient.
     ///
     /// # Panics
     ///
     /// Panics if the new capacity overflows [`usize`].
     ///
-    /// [`reserve_exact`]: String::reserve_exact
-    ///
     /// # Examples
     ///
     /// Basic usage:
@@ -1013,15 +1008,16 @@ impl String {
     /// s.push('a');
     /// s.push('b');
     ///
-    /// // s now has a length of 2 and a capacity of 10
+    /// // s now has a length of 2 and a capacity of at least 10
+    /// let capacity = s.capacity();
     /// assert_eq!(2, s.len());
-    /// assert_eq!(10, s.capacity());
+    /// assert!(capacity >= 10);
     ///
-    /// // Since we already have an extra 8 capacity, calling this...
+    /// // Since we already have at least an extra 8 capacity, calling this...
     /// s.reserve(8);
     ///
     /// // ... doesn't actually increase.
-    /// assert_eq!(10, s.capacity());
+    /// assert_eq!(capacity, s.capacity());
     /// ```
     #[cfg(not(no_global_oom_handling))]
     #[inline]
@@ -1030,17 +1026,18 @@ impl String {
         self.vec.reserve(additional)
     }
 
-    /// Ensures that this `String`'s capacity is `additional` bytes
-    /// larger than its length.
-    ///
-    /// Consider using the [`reserve`] method unless you absolutely know
-    /// better than the allocator.
+    /// Reserves the minimum capacity for at least `additional` bytes more than
+    /// the current length. Unlike [`reserve`], this will not
+    /// deliberately over-allocate to speculatively avoid frequent allocations.
+    /// After calling `reserve_exact`, capacity will be greater than or equal to
+    /// `self.len() + additional`. Does nothing if the capacity is already
+    /// sufficient.
     ///
     /// [`reserve`]: String::reserve
     ///
     /// # Panics
     ///
-    /// Panics if the new capacity overflows `usize`.
+    /// Panics if the new capacity overflows [`usize`].
     ///
     /// # Examples
     ///
@@ -1061,15 +1058,16 @@ impl String {
     /// s.push('a');
     /// s.push('b');
     ///
-    /// // s now has a length of 2 and a capacity of 10
+    /// // s now has a length of 2 and a capacity of at least 10
+    /// let capacity = s.capacity();
     /// assert_eq!(2, s.len());
-    /// assert_eq!(10, s.capacity());
+    /// assert!(capacity >= 10);
     ///
-    /// // Since we already have an extra 8 capacity, calling this...
+    /// // Since we already have at least an extra 8 capacity, calling this...
     /// s.reserve_exact(8);
     ///
     /// // ... doesn't actually increase.
-    /// assert_eq!(10, s.capacity());
+    /// assert_eq!(capacity, s.capacity());
     /// ```
     #[cfg(not(no_global_oom_handling))]
     #[inline]
@@ -1078,11 +1076,11 @@ impl String {
         self.vec.reserve_exact(additional)
     }
 
-    /// Tries to reserve capacity for at least `additional` more elements to be inserted
-    /// in the given `String`. The collection may reserve more space to avoid
-    /// frequent reallocations. After calling `reserve`, capacity will be
-    /// greater than or equal to `self.len() + additional`. Does nothing if
-    /// capacity is already sufficient.
+    /// Tries to reserve capacity for at least `additional` bytes more than the
+    /// current length. The allocator may reserve more space to speculatively
+    /// avoid frequent allocations. After calling `try_reserve`, capacity will be
+    /// greater than or equal to `self.len() + additional` if it returns
+    /// `Ok(())`. Does nothing if capacity is already sufficient.
     ///
     /// # Errors
     ///
@@ -1112,9 +1110,11 @@ impl String {
         self.vec.try_reserve(additional)
     }
 
-    /// Tries to reserve the minimum capacity for exactly `additional` more elements to
-    /// be inserted in the given `String`. After calling `try_reserve_exact`,
-    /// capacity will be greater than or equal to `self.len() + additional`.
+    /// Tries to reserve the minimum capacity for at least `additional` bytes
+    /// more than the current length. Unlike [`try_reserve`], this will not
+    /// deliberately over-allocate to speculatively avoid frequent allocations.
+    /// After calling `try_reserve_exact`, capacity will be greater than or
+    /// equal to `self.len() + additional` if it returns `Ok(())`.
     /// Does nothing if the capacity is already sufficient.
     ///
     /// Note that the allocator may give the collection more space than it
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index 2670b15982a..24e849aab4c 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -1355,15 +1355,16 @@ impl<T: ?Sized> Clone for Arc<T> {
         // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
         let old_size = self.inner().strong.fetch_add(1, Relaxed);
 
-        // However we need to guard against massive refcounts in case someone
-        // is `mem::forget`ing Arcs. If we don't do this the count can overflow
-        // and users will use-after free. We racily saturate to `isize::MAX` on
-        // the assumption that there aren't ~2 billion threads incrementing
-        // the reference count at once. This branch will never be taken in
-        // any realistic program.
+        // However we need to guard against massive refcounts in case someone is `mem::forget`ing
+        // Arcs. If we don't do this the count can overflow and users will use-after free. This
+        // branch will never be taken in any realistic program. We abort because such a program is
+        // incredibly degenerate, and we don't care to support it.
         //
-        // We abort because such a program is incredibly degenerate, and we
-        // don't care to support it.
+        // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
+        // But we do that check *after* having done the increment, so there is a chance here that
+        // the worst already happened and we actually do overflow the `usize` counter. However, that
+        // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
+        // above and the `abort` below, which seems exceedingly unlikely.
         if old_size > MAX_REFCOUNT {
             abort();
         }
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index 1c0cb6636a1..e25f98d8aa6 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -425,17 +425,25 @@ impl<T> Vec<T> {
         Vec { buf: RawVec::NEW, len: 0 }
     }
 
-    /// Constructs a new, empty `Vec<T>` with the specified capacity.
+    /// Constructs a new, empty `Vec<T>` with at least the specified capacity.
     ///
-    /// The vector will be able to hold exactly `capacity` elements without
-    /// reallocating. If `capacity` is 0, the vector will not allocate.
+    /// The vector will be able to hold at least `capacity` elements without
+    /// reallocating. This method is allowed to allocate for more elements than
+    /// `capacity`. If `capacity` is 0, the vector will not allocate.
     ///
     /// It is important to note that although the returned vector has the
-    /// *capacity* specified, the vector will have a zero *length*. For an
-    /// explanation of the difference between length and capacity, see
+    /// minimum *capacity* specified, the vector will have a zero *length*. For
+    /// an explanation of the difference between length and capacity, see
     /// *[Capacity and reallocation]*.
     ///
+    /// If it is imporant to know the exact allocated capacity of a `Vec`,
+    /// always use the [`capacity`] method after construction.
+    ///
+    /// For `Vec<T>` where `T` is a zero-sized type, there will be no allocation
+    /// and the capacity will always be `usize::MAX`.
+    ///
     /// [Capacity and reallocation]: #capacity-and-reallocation
+    /// [`capacity`]: Vec::capacity
     ///
     /// # Panics
     ///
@@ -448,19 +456,24 @@ impl<T> Vec<T> {
     ///
     /// // The vector contains no items, even though it has capacity for more
     /// assert_eq!(vec.len(), 0);
-    /// assert_eq!(vec.capacity(), 10);
+    /// assert!(vec.capacity() >= 10);
     ///
     /// // These are all done without reallocating...
     /// for i in 0..10 {
     ///     vec.push(i);
     /// }
     /// assert_eq!(vec.len(), 10);
-    /// assert_eq!(vec.capacity(), 10);
+    /// assert!(vec.capacity() >= 10);
     ///
     /// // ...but this may make the vector reallocate
     /// vec.push(11);
     /// assert_eq!(vec.len(), 11);
     /// assert!(vec.capacity() >= 11);
+    ///
+    /// // A vector of a zero-sized type will always over-allocate, since no
+    /// // allocation is necessary
+    /// let vec_units = Vec::<()>::with_capacity(10);
+    /// assert_eq!(vec_units.capacity(), usize::MAX);
     /// ```
     #[cfg(not(no_global_oom_handling))]
     #[inline]
@@ -566,18 +579,26 @@ impl<T, A: Allocator> Vec<T, A> {
         Vec { buf: RawVec::new_in(alloc), len: 0 }
     }
 
-    /// Constructs a new, empty `Vec<T, A>` with the specified capacity with the provided
-    /// allocator.
+    /// Constructs a new, empty `Vec<T, A>` with at least the specified capacity
+    /// with the provided allocator.
     ///
-    /// The vector will be able to hold exactly `capacity` elements without
-    /// reallocating. If `capacity` is 0, the vector will not allocate.
+    /// The vector will be able to hold at least `capacity` elements without
+    /// reallocating. This method is allowed to allocate for more elements than
+    /// `capacity`. If `capacity` is 0, the vector will not allocate.
     ///
     /// It is important to note that although the returned vector has the
-    /// *capacity* specified, the vector will have a zero *length*. For an
-    /// explanation of the difference between length and capacity, see
+    /// minimum *capacity* specified, the vector will have a zero *length*. For
+    /// an explanation of the difference between length and capacity, see
     /// *[Capacity and reallocation]*.
     ///
+    /// If it is imporant to know the exact allocated capacity of a `Vec`,
+    /// always use the [`capacity`] method after construction.
+    ///
+    /// For `Vec<T, A>` where `T` is a zero-sized type, there will be no allocation
+    /// and the capacity will always be `usize::MAX`.
+    ///
     /// [Capacity and reallocation]: #capacity-and-reallocation
+    /// [`capacity`]: Vec::capacity
     ///
     /// # Panics
     ///
@@ -607,6 +628,11 @@ impl<T, A: Allocator> Vec<T, A> {
     /// vec.push(11);
     /// assert_eq!(vec.len(), 11);
     /// assert!(vec.capacity() >= 11);
+    ///
+    /// // A vector of a zero-sized type will always over-allocate, since no
+    /// // allocation is necessary
+    /// let vec_units = Vec::<(), System>::with_capacity_in(10, System);
+    /// assert_eq!(vec_units.capacity(), usize::MAX);
     /// ```
     #[cfg(not(no_global_oom_handling))]
     #[inline]
@@ -793,10 +819,10 @@ impl<T, A: Allocator> Vec<T, A> {
     }
 
     /// Reserves capacity for at least `additional` more elements to be inserted
-    /// in the given `Vec<T>`. The collection may reserve more space to avoid
-    /// frequent reallocations. After calling `reserve`, capacity will be
-    /// greater than or equal to `self.len() + additional`. Does nothing if
-    /// capacity is already sufficient.
+    /// in the given `Vec<T>`. The collection may reserve more space to
+    /// speculatively avoid frequent reallocations. After calling `reserve`,
+    /// capacity will be greater than or equal to `self.len() + additional`.
+    /// Does nothing if capacity is already sufficient.
     ///
     /// # Panics
     ///
@@ -815,10 +841,12 @@ impl<T, A: Allocator> Vec<T, A> {
         self.buf.reserve(self.len, additional);
     }
 
-    /// Reserves the minimum capacity for exactly `additional` more elements to
-    /// be inserted in the given `Vec<T>`. After calling `reserve_exact`,
-    /// capacity will be greater than or equal to `self.len() + additional`.
-    /// Does nothing if the capacity is already sufficient.
+    /// Reserves the minimum capacity for at least `additional` more elements to
+    /// be inserted in the given `Vec<T>`. Unlike [`reserve`], this will not
+    /// deliberately over-allocate to speculatively avoid frequent allocations.
+    /// After calling `reserve_exact`, capacity will be greater than or equal to
+    /// `self.len() + additional`. Does nothing if the capacity is already
+    /// sufficient.
     ///
     /// Note that the allocator may give the collection more space than it
     /// requests. Therefore, capacity can not be relied upon to be precisely
@@ -844,10 +872,10 @@ impl<T, A: Allocator> Vec<T, A> {
     }
 
     /// Tries to reserve capacity for at least `additional` more elements to be inserted
-    /// in the given `Vec<T>`. The collection may reserve more space to avoid
+    /// in the given `Vec<T>`. The collection may reserve more space to speculatively avoid
     /// frequent reallocations. After calling `try_reserve`, capacity will be
-    /// greater than or equal to `self.len() + additional`. Does nothing if
-    /// capacity is already sufficient.
+    /// greater than or equal to `self.len() + additional` if it returns
+    /// `Ok(())`. Does nothing if capacity is already sufficient.
     ///
     /// # Errors
     ///
@@ -879,10 +907,11 @@ impl<T, A: Allocator> Vec<T, A> {
         self.buf.try_reserve(self.len, additional)
     }
 
-    /// Tries to reserve the minimum capacity for exactly `additional`
-    /// elements to be inserted in the given `Vec<T>`. After calling
-    /// `try_reserve_exact`, capacity will be greater than or equal to
-    /// `self.len() + additional` if it returns `Ok(())`.
+    /// Tries to reserve the minimum capacity for at least `additional`
+    /// elements to be inserted in the given `Vec<T>`. Unlike [`try_reserve`],
+    /// this will not deliberately over-allocate to speculatively avoid frequent
+    /// allocations. After calling `try_reserve_exact`, capacity will be greater
+    /// than or equal to `self.len() + additional` if it returns `Ok(())`.
     /// Does nothing if the capacity is already sufficient.
     ///
     /// Note that the allocator may give the collection more space than it
diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs
index cc768c73c0e..5520f6ebf19 100644
--- a/library/alloc/tests/vec.rs
+++ b/library/alloc/tests/vec.rs
@@ -2129,6 +2129,15 @@ fn test_vec_cycle_wrapped() {
 }
 
 #[test]
+fn test_zero_sized_capacity() {
+    for len in [0, 1, 2, 4, 8, 16, 32, 64, 128, 256] {
+        let v = Vec::<()>::with_capacity(len);
+        assert_eq!(v.len(), 0);
+        assert_eq!(v.capacity(), usize::MAX);
+    }
+}
+
+#[test]
 fn test_zero_sized_vec_push() {
     const N: usize = 8;
 
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
index 2ea4458bf64..c9823a136bc 100644
--- a/library/core/src/array/mod.rs
+++ b/library/core/src/array/mod.rs
@@ -780,8 +780,8 @@ where
 }
 
 /// Pulls `N` items from `iter` and returns them as an array. If the iterator
-/// yields fewer than `N` items, `None` is returned and all already yielded
-/// items are dropped.
+/// yields fewer than `N` items, `Err` is returned containing an iterator over
+/// the already yielded items.
 ///
 /// Since the iterator is passed as a mutable reference and this function calls
 /// `next` at most `N` times, the iterator can still be used afterwards to
@@ -789,7 +789,10 @@ where
 ///
 /// If `iter.next()` panicks, all items already yielded by the iterator are
 /// dropped.
-fn try_collect_into_array<I, T, R, const N: usize>(iter: &mut I) -> Option<R::TryType>
+#[inline]
+fn try_collect_into_array<I, T, R, const N: usize>(
+    iter: &mut I,
+) -> Result<R::TryType, IntoIter<T, N>>
 where
     I: Iterator,
     I::Item: Try<Output = T, Residual = R>,
@@ -797,7 +800,7 @@ where
 {
     if N == 0 {
         // SAFETY: An empty array is always inhabited and has no validity invariants.
-        return unsafe { Some(Try::from_output(mem::zeroed())) };
+        return Ok(Try::from_output(unsafe { mem::zeroed() }));
     }
 
     struct Guard<'a, T, const N: usize> {
@@ -821,35 +824,49 @@ where
     let mut array = MaybeUninit::uninit_array::<N>();
     let mut guard = Guard { array_mut: &mut array, initialized: 0 };
 
-    while let Some(item_rslt) = iter.next() {
-        let item = match item_rslt.branch() {
-            ControlFlow::Break(r) => {
-                return Some(FromResidual::from_residual(r));
+    for _ in 0..N {
+        match iter.next() {
+            Some(item_rslt) => {
+                let item = match item_rslt.branch() {
+                    ControlFlow::Break(r) => {
+                        return Ok(FromResidual::from_residual(r));
+                    }
+                    ControlFlow::Continue(elem) => elem,
+                };
+
+                // SAFETY: `guard.initialized` starts at 0, is increased by one in the
+                // loop and the loop is aborted once it reaches N (which is
+                // `array.len()`).
+                unsafe {
+                    guard.array_mut.get_unchecked_mut(guard.initialized).write(item);
+                }
+                guard.initialized += 1;
+            }
+            None => {
+                let alive = 0..guard.initialized;
+                mem::forget(guard);
+                // SAFETY: `array` was initialized with exactly `initialized`
+                // number of elements.
+                return Err(unsafe { IntoIter::new_unchecked(array, alive) });
             }
-            ControlFlow::Continue(elem) => elem,
-        };
-
-        // SAFETY: `guard.initialized` starts at 0, is increased by one in the
-        // loop and the loop is aborted once it reaches N (which is
-        // `array.len()`).
-        unsafe {
-            guard.array_mut.get_unchecked_mut(guard.initialized).write(item);
-        }
-        guard.initialized += 1;
-
-        // Check if the whole array was initialized.
-        if guard.initialized == N {
-            mem::forget(guard);
-
-            // SAFETY: the condition above asserts that all elements are
-            // initialized.
-            let out = unsafe { MaybeUninit::array_assume_init(array) };
-            return Some(Try::from_output(out));
         }
     }
 
-    // This is only reached if the iterator is exhausted before
-    // `guard.initialized` reaches `N`. Also note that `guard` is dropped here,
-    // dropping all already initialized elements.
-    None
+    mem::forget(guard);
+    // SAFETY: All elements of the array were populated in the loop above.
+    let output = unsafe { MaybeUninit::array_assume_init(array) };
+    Ok(Try::from_output(output))
+}
+
+/// Returns the next chunk of `N` items from the iterator or errors with an
+/// iterator over the remainder. Used for `Iterator::next_chunk`.
+#[inline]
+pub(crate) fn iter_next_chunk<I, const N: usize>(
+    iter: &mut I,
+) -> Result<[I::Item; N], IntoIter<I::Item, N>>
+where
+    I: Iterator,
+{
+    let mut map = iter.map(NeverShortCircuit);
+    try_collect_into_array(&mut map).map(|NeverShortCircuit(arr)| arr)
 }
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
index 1cc9133fc3d..326b98ec947 100644
--- a/library/core/src/iter/traits/iterator.rs
+++ b/library/core/src/iter/traits/iterator.rs
@@ -1,3 +1,4 @@
+use crate::array;
 use crate::cmp::{self, Ordering};
 use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, Residual, Try};
 
@@ -102,6 +103,47 @@ pub trait Iterator {
     #[stable(feature = "rust1", since = "1.0.0")]
     fn next(&mut self) -> Option<Self::Item>;
 
+    /// Advances the iterator and returns an array containing the next `N` values.
+    ///
+    /// If there are not enough elements to fill the array then `Err` is returned
+    /// containing an iterator over the remaining elements.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// #![feature(iter_next_chunk)]
+    ///
+    /// let mut iter = "lorem".chars();
+    ///
+    /// assert_eq!(iter.next_chunk().unwrap(), ['l', 'o']);              // N is inferred as 2
+    /// assert_eq!(iter.next_chunk().unwrap(), ['r', 'e', 'm']);         // N is inferred as 3
+    /// assert_eq!(iter.next_chunk::<4>().unwrap_err().as_slice(), &[]); // N is explicitly 4
+    /// ```
+    ///
+    /// Split a string and get the first three items.
+    ///
+    /// ```
+    /// #![feature(iter_next_chunk)]
+    ///
+    /// let quote = "not all those who wander are lost";
+    /// let [first, second, third] = quote.split_whitespace().next_chunk().unwrap();
+    /// assert_eq!(first, "not");
+    /// assert_eq!(second, "all");
+    /// assert_eq!(third, "those");
+    /// ```
+    #[inline]
+    #[unstable(feature = "iter_next_chunk", reason = "recently added", issue = "98326")]
+    fn next_chunk<const N: usize>(
+        &mut self,
+    ) -> Result<[Self::Item; N], array::IntoIter<Self::Item, N>>
+    where
+        Self: Sized,
+    {
+        array::iter_next_chunk(self)
+    }
+
     /// Returns the bounds on the remaining length of the iterator.
     ///
     /// Specifically, `size_hint()` returns a tuple where the first element
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index cedeb27d6d9..a66ecc35bbd 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -4,6 +4,12 @@
 //! threads, and are the building blocks of other concurrent
 //! types.
 //!
+//! Rust atomics currently follow the same rules as [C++20 atomics][cpp], specifically `atomic_ref`.
+//! Basically, creating a *shared reference* to one of the Rust atomic types corresponds to creating
+//! an `atomic_ref` in C++; the `atomic_ref` is destroyed when the lifetime of the shared reference
+//! ends. (A Rust atomic type that is exclusively owned or behind a mutable reference does *not*
+//! correspond to an "atomic object" in C++, since it can be accessed via non-atomic operations.)
+//!
 //! This module defines atomic versions of a select number of primitive
 //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
 //! [`AtomicI8`], [`AtomicU16`], etc.
@@ -14,6 +20,7 @@
 //! the memory barrier for that operation. These orderings are the
 //! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
 //!
+//! [cpp]: https://en.cppreference.com/w/cpp/atomic
 //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
 //! [2]: ../../../nomicon/atomics.html
 //!
diff --git a/library/core/tests/iter/traits/iterator.rs b/library/core/tests/iter/traits/iterator.rs
index 731b1592d41..37345c1d381 100644
--- a/library/core/tests/iter/traits/iterator.rs
+++ b/library/core/tests/iter/traits/iterator.rs
@@ -575,6 +575,15 @@ fn iter_try_collect_uses_try_fold_not_next() {
     // validation is just that it didn't panic.
 }
 
+#[test]
+fn test_next_chunk() {
+    let mut it = 0..12;
+    assert_eq!(it.next_chunk().unwrap(), [0, 1, 2, 3]);
+    assert_eq!(it.next_chunk().unwrap(), []);
+    assert_eq!(it.next_chunk().unwrap(), [4, 5, 6, 7, 8, 9]);
+    assert_eq!(it.next_chunk::<4>().unwrap_err().as_slice(), &[10, 11]);
+}
+
 // just tests by whether or not this compiles
 fn _empty_impl_all_auto_traits<T>() {
     use std::panic::{RefUnwindSafe, UnwindSafe};
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
index 63c9602abe7..9611e197a41 100644
--- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs
@@ -67,6 +67,7 @@
 #![feature(iter_partition_in_place)]
 #![feature(iter_intersperse)]
 #![feature(iter_is_partitioned)]
+#![feature(iter_next_chunk)]
 #![feature(iter_order_by)]
 #![feature(iterator_try_collect)]
 #![feature(iterator_try_reduce)]
diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs
index 237c5ee7340..db811343fa3 100644
--- a/library/std/src/collections/hash/map.rs
+++ b/library/std/src/collections/hash/map.rs
@@ -233,10 +233,11 @@ impl<K, V> HashMap<K, V, RandomState> {
         Default::default()
     }
 
-    /// Creates an empty `HashMap` with the specified capacity.
+    /// Creates an empty `HashMap` with at least the specified capacity.
     ///
     /// The hash map will be able to hold at least `capacity` elements without
-    /// reallocating. If `capacity` is 0, the hash map will not allocate.
+    /// reallocating. This method is allowed to allocate for more elements than
+    /// `capacity`. If `capacity` is 0, the hash set will not allocate.
     ///
     /// # Examples
     ///
@@ -282,18 +283,19 @@ impl<K, V, S> HashMap<K, V, S> {
         HashMap { base: base::HashMap::with_hasher(hash_builder) }
     }
 
-    /// Creates an empty `HashMap` with the specified capacity, using `hash_builder`
-    /// to hash the keys.
+    /// Creates an empty `HashMap` with at least the specified capacity, using
+    /// `hasher` to hash the keys.
     ///
     /// The hash map will be able to hold at least `capacity` elements without
-    /// reallocating. If `capacity` is 0, the hash map will not allocate.
+    /// reallocating. This method is allowed to allocate for more elements than
+    /// `capacity`. If `capacity` is 0, the hash map will not allocate.
     ///
-    /// Warning: `hash_builder` is normally randomly generated, and
+    /// Warning: `hasher` is normally randomly generated, and
     /// is designed to allow HashMaps to be resistant to attacks that
     /// cause many collisions and very poor performance. Setting it
     /// manually using this function can expose a DoS attack vector.
     ///
-    /// The `hash_builder` passed should implement the [`BuildHasher`] trait for
+    /// The `hasher` passed should implement the [`BuildHasher`] trait for
     /// the HashMap to be useful, see its documentation for details.
     ///
     /// # Examples
@@ -308,8 +310,8 @@ impl<K, V, S> HashMap<K, V, S> {
     /// ```
     #[inline]
     #[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
-    pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> HashMap<K, V, S> {
-        HashMap { base: base::HashMap::with_capacity_and_hasher(capacity, hash_builder) }
+    pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> HashMap<K, V, S> {
+        HashMap { base: base::HashMap::with_capacity_and_hasher(capacity, hasher) }
     }
 
     /// Returns the number of elements the map can hold without reallocating.
@@ -731,8 +733,10 @@ where
     S: BuildHasher,
 {
     /// Reserves capacity for at least `additional` more elements to be inserted
-    /// in the `HashMap`. The collection may reserve more space to avoid
-    /// frequent reallocations.
+    /// in the `HashMap`. The collection may reserve more space to speculatively
+    /// avoid frequent reallocations. After calling `reserve`,
+    /// capacity will be greater than or equal to `self.len() + additional`.
+    /// Does nothing if capacity is already sufficient.
     ///
     /// # Panics
     ///
@@ -752,8 +756,11 @@ where
     }
 
     /// Tries to reserve capacity for at least `additional` more elements to be inserted
-    /// in the given `HashMap<K, V>`. The collection may reserve more space to avoid
-    /// frequent reallocations.
+    /// in the `HashMap`. The collection may reserve more space to speculatively
+    /// avoid frequent reallocations. After calling `reserve`,
+    /// capacity will be greater than or equal to `self.len() + additional` if
+    /// it returns `Ok(())`.
+    /// Does nothing if capacity is already sufficient.
     ///
     /// # Errors
     ///
@@ -766,7 +773,7 @@ where
     /// use std::collections::HashMap;
     ///
     /// let mut map: HashMap<&str, isize> = HashMap::new();
-    /// map.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?");
+    /// map.try_reserve(10).expect("why is the test harness OOMing on a handful of bytes?");
     /// ```
     #[inline]
     #[stable(feature = "try_reserve", since = "1.57.0")]
diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs
index fa498a987d6..abff82788a3 100644
--- a/library/std/src/collections/hash/set.rs
+++ b/library/std/src/collections/hash/set.rs
@@ -133,10 +133,11 @@ impl<T> HashSet<T, RandomState> {
         Default::default()
     }
 
-    /// Creates an empty `HashSet` with the specified capacity.
+    /// Creates an empty `HashSet` with at least the specified capacity.
     ///
     /// The hash set will be able to hold at least `capacity` elements without
-    /// reallocating. If `capacity` is 0, the hash set will not allocate.
+    /// reallocating. This method is allowed to allocate for more elements than
+    /// `capacity`. If `capacity` is 0, the hash set will not allocate.
     ///
     /// # Examples
     ///
@@ -379,11 +380,12 @@ impl<T, S> HashSet<T, S> {
         HashSet { base: base::HashSet::with_hasher(hasher) }
     }
 
-    /// Creates an empty `HashSet` with the specified capacity, using
+    /// Creates an empty `HashSet` with at least the specified capacity, using
     /// `hasher` to hash the keys.
     ///
     /// The hash set will be able to hold at least `capacity` elements without
-    /// reallocating. If `capacity` is 0, the hash set will not allocate.
+    /// reallocating. This method is allowed to allocate for more elements than
+    /// `capacity`. If `capacity` is 0, the hash set will not allocate.
     ///
     /// Warning: `hasher` is normally randomly generated, and
     /// is designed to allow `HashSet`s to be resistant to attacks that
@@ -434,8 +436,10 @@ where
     S: BuildHasher,
 {
     /// Reserves capacity for at least `additional` more elements to be inserted
-    /// in the `HashSet`. The collection may reserve more space to avoid
-    /// frequent reallocations.
+    /// in the `HashSet`. The collection may reserve more space to speculatively
+    /// avoid frequent reallocations. After calling `reserve`,
+    /// capacity will be greater than or equal to `self.len() + additional`.
+    /// Does nothing if capacity is already sufficient.
     ///
     /// # Panics
     ///
@@ -456,8 +460,11 @@ where
     }
 
     /// Tries to reserve capacity for at least `additional` more elements to be inserted
-    /// in the given `HashSet<K, V>`. The collection may reserve more space to avoid
-    /// frequent reallocations.
+    /// in the `HashSet`. The collection may reserve more space to speculatively
+    /// avoid frequent reallocations. After calling `reserve`,
+    /// capacity will be greater than or equal to `self.len() + additional` if
+    /// it returns `Ok(())`.
+    /// Does nothing if capacity is already sufficient.
     ///
     /// # Errors
     ///
@@ -469,7 +476,7 @@ where
     /// ```
     /// use std::collections::HashSet;
     /// let mut set: HashSet<i32> = HashSet::new();
-    /// set.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?");
+    /// set.try_reserve(10).expect("why is the test harness OOMing on a handful of bytes?");
     /// ```
     #[inline]
     #[stable(feature = "try_reserve", since = "1.57.0")]
diff --git a/library/std/src/ffi/os_str.rs b/library/std/src/ffi/os_str.rs
index 95d274c3c91..f2bbcc85cec 100644
--- a/library/std/src/ffi/os_str.rs
+++ b/library/std/src/ffi/os_str.rs
@@ -196,10 +196,11 @@ impl OsString {
         self.inner.push_slice(&s.as_ref().inner)
     }
 
-    /// Creates a new `OsString` with the given capacity.
+    /// Creates a new `OsString` with at least the given capacity.
     ///
-    /// The string will be able to hold exactly `capacity` length units of other
-    /// OS strings without reallocating. If `capacity` is 0, the string will not
+    /// The string will be able to hold at least `capacity` length units of other
+    /// OS strings without reallocating. This method is allowed to allocate for
+    /// more units than `capacity`. If `capacity` is 0, the string will not
     /// allocate.
     ///
     /// See the main `OsString` documentation information about encoding and capacity units.
@@ -263,9 +264,10 @@ impl OsString {
     }
 
     /// Reserves capacity for at least `additional` more capacity to be inserted
-    /// in the given `OsString`.
+    /// in the given `OsString`. Does nothing if the capacity is
+    /// already sufficient.
     ///
-    /// The collection may reserve more space to avoid frequent reallocations.
+    /// The collection may reserve more space to speculatively avoid frequent reallocations.
     ///
     /// See the main `OsString` documentation information about encoding and capacity units.
     ///
@@ -285,10 +287,10 @@ impl OsString {
     }
 
     /// Tries to reserve capacity for at least `additional` more length units
-    /// in the given `OsString`. The string may reserve more space to avoid
+    /// in the given `OsString`. The string may reserve more space to speculatively avoid
     /// frequent reallocations. After calling `try_reserve`, capacity will be
-    /// greater than or equal to `self.len() + additional`. Does nothing if
-    /// capacity is already sufficient.
+    /// greater than or equal to `self.len() + additional` if it returns `Ok(())`.
+    /// Does nothing if capacity is already sufficient.
     ///
     /// See the main `OsString` documentation information about encoding and capacity units.
     ///
@@ -322,7 +324,7 @@ impl OsString {
         self.inner.try_reserve(additional)
     }
 
-    /// Reserves the minimum capacity for exactly `additional` more capacity to
+    /// Reserves the minimum capacity for at least `additional` more capacity to
     /// be inserted in the given `OsString`. Does nothing if the capacity is
     /// already sufficient.
     ///
@@ -349,7 +351,7 @@ impl OsString {
         self.inner.reserve_exact(additional)
     }
 
-    /// Tries to reserve the minimum capacity for exactly `additional`
+    /// Tries to reserve the minimum capacity for at least `additional`
     /// more length units in the given `OsString`. After calling
     /// `try_reserve_exact`, capacity will be greater than or equal to
     /// `self.len() + additional` if it returns `Ok(())`.
diff --git a/library/std/src/io/buffered/bufwriter.rs b/library/std/src/io/buffered/bufwriter.rs
index 2d3a0f37b4c..6acb937e784 100644
--- a/library/std/src/io/buffered/bufwriter.rs
+++ b/library/std/src/io/buffered/bufwriter.rs
@@ -97,11 +97,11 @@ impl<W: Write> BufWriter<W> {
         BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
     }
 
-    /// Creates a new `BufWriter<W>` with the specified buffer capacity.
+    /// Creates a new `BufWriter<W>` with at least the specified buffer capacity.
     ///
     /// # Examples
     ///
-    /// Creating a buffer with a buffer of a hundred bytes.
+    /// Creating a buffer with a buffer of at least a hundred bytes.
     ///
     /// ```no_run
     /// use std::io::BufWriter;
diff --git a/library/std/src/io/buffered/linewriter.rs b/library/std/src/io/buffered/linewriter.rs
index d7b620d6f91..a26a4ab330e 100644
--- a/library/std/src/io/buffered/linewriter.rs
+++ b/library/std/src/io/buffered/linewriter.rs
@@ -89,8 +89,8 @@ impl<W: Write> LineWriter<W> {
         LineWriter::with_capacity(1024, inner)
     }
 
-    /// Creates a new `LineWriter` with a specified capacity for the internal
-    /// buffer.
+    /// Creates a new `LineWriter` with at least the specified capacity for the
+    /// internal buffer.
     ///
     /// # Examples
     ///
diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs
index 1192c08cb1a..6e4a2cfc82a 100644
--- a/library/std/src/sync/rwlock.rs
+++ b/library/std/src/sync/rwlock.rs
@@ -4,6 +4,7 @@ mod tests;
 use crate::cell::UnsafeCell;
 use crate::fmt;
 use crate::ops::{Deref, DerefMut};
+use crate::ptr::NonNull;
 use crate::sync::{poison, LockResult, TryLockError, TryLockResult};
 use crate::sys_common::rwlock as sys;
 
@@ -101,7 +102,12 @@ unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
 #[stable(feature = "rust1", since = "1.0.0")]
 #[clippy::has_significant_drop]
 pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
-    lock: &'a RwLock<T>,
+    // NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a
+    // `Ref` argument doesn't hold immutability for its whole scope, only until it drops.
+    // `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull`
+    // is preferable over `const* T` to allow for niche optimization.
+    data: NonNull<T>,
+    inner_lock: &'a sys::MovableRwLock,
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -511,12 +517,21 @@ impl<T> From<T> for RwLock<T> {
 }
 
 impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
+    /// Create a new instance of `RwLockReadGuard<T>` from a `RwLock<T>`.
+    // SAFETY: if and only if `lock.inner.read()` (or `lock.inner.try_read()`) has been
+    // successfully called from the same thread before instantiating this object.
     unsafe fn new(lock: &'rwlock RwLock<T>) -> LockResult<RwLockReadGuard<'rwlock, T>> {
-        poison::map_result(lock.poison.borrow(), |()| RwLockReadGuard { lock })
+        poison::map_result(lock.poison.borrow(), |()| RwLockReadGuard {
+            data: NonNull::new_unchecked(lock.data.get()),
+            inner_lock: &lock.inner,
+        })
     }
 }
 
 impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
+    /// Create a new instance of `RwLockWriteGuard<T>` from a `RwLock<T>`.
+    // SAFETY: if and only if `lock.inner.write()` (or `lock.inner.try_write()`) has been
+    // successfully called from the same thread before instantiating this object.
     unsafe fn new(lock: &'rwlock RwLock<T>) -> LockResult<RwLockWriteGuard<'rwlock, T>> {
         poison::map_result(lock.poison.guard(), |guard| RwLockWriteGuard { lock, poison: guard })
     }
@@ -555,7 +570,8 @@ impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> {
     type Target = T;
 
     fn deref(&self) -> &T {
-        unsafe { &*self.lock.data.get() }
+        // SAFETY: the conditions of `RwLockGuard::new` were satisfied when created.
+        unsafe { self.data.as_ref() }
     }
 }
 
@@ -564,6 +580,7 @@ impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
     type Target = T;
 
     fn deref(&self) -> &T {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
         unsafe { &*self.lock.data.get() }
     }
 }
@@ -571,6 +588,7 @@ impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
     fn deref_mut(&mut self) -> &mut T {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
         unsafe { &mut *self.lock.data.get() }
     }
 }
@@ -578,8 +596,9 @@ impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
     fn drop(&mut self) {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
         unsafe {
-            self.lock.inner.read_unlock();
+            self.inner_lock.read_unlock();
         }
     }
 }
@@ -588,6 +607,7 @@ impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
 impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> {
     fn drop(&mut self) {
         self.lock.poison.done(&self.poison);
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
         unsafe {
             self.lock.inner.write_unlock();
         }
diff --git a/library/std/src/sync/rwlock/tests.rs b/library/std/src/sync/rwlock/tests.rs
index 53aa2b1e38a..08255c985f5 100644
--- a/library/std/src/sync/rwlock/tests.rs
+++ b/library/std/src/sync/rwlock/tests.rs
@@ -1,6 +1,6 @@
 use crate::sync::atomic::{AtomicUsize, Ordering};
 use crate::sync::mpsc::channel;
-use crate::sync::{Arc, RwLock, TryLockError};
+use crate::sync::{Arc, RwLock, RwLockReadGuard, TryLockError};
 use crate::thread;
 use rand::{self, Rng};
 
@@ -245,3 +245,15 @@ fn test_get_mut_poison() {
         Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {x:?}"),
     }
 }
+
+#[test]
+fn test_read_guard_covariance() {
+    fn do_stuff<'a>(_: RwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
+    let j: i32 = 5;
+    let lock = RwLock::new(&j);
+    {
+        let i = 6;
+        do_stuff(lock.read().unwrap(), &i);
+    }
+    drop(lock);
+}
diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
index 3792a3820a5..ea24fedd0eb 100644
--- a/library/std/src/sys/sgx/abi/usercalls/alloc.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
@@ -1,13 +1,16 @@
 #![allow(unused)]
 
+use crate::arch::asm;
 use crate::cell::UnsafeCell;
+use crate::cmp;
+use crate::convert::TryInto;
 use crate::mem;
 use crate::ops::{CoerceUnsized, Deref, DerefMut, Index, IndexMut};
 use crate::ptr::{self, NonNull};
 use crate::slice;
 use crate::slice::SliceIndex;
 
-use super::super::mem::is_user_range;
+use super::super::mem::{is_enclave_range, is_user_range};
 use fortanix_sgx_abi::*;
 
 /// A type that can be safely read from or written to userspace.
@@ -210,7 +213,9 @@ where
         unsafe {
             // Mustn't call alloc with size 0.
             let ptr = if size > 0 {
-                rtunwrap!(Ok, super::alloc(size, T::align_of())) as _
+                // `copy_to_userspace` is more efficient when data is 8-byte aligned
+                let alignment = cmp::max(T::align_of(), 8);
+                rtunwrap!(Ok, super::alloc(size, alignment)) as _
             } else {
                 T::align_of() as _ // dangling pointer ok for size 0
             };
@@ -225,13 +230,9 @@ where
     /// Copies `val` into freshly allocated space in user memory.
     pub fn new_from_enclave(val: &T) -> Self {
         unsafe {
-            let ret = Self::new_uninit_bytes(mem::size_of_val(val));
-            ptr::copy(
-                val as *const T as *const u8,
-                ret.0.as_ptr() as *mut u8,
-                mem::size_of_val(val),
-            );
-            ret
+            let mut user = Self::new_uninit_bytes(mem::size_of_val(val));
+            user.copy_from_enclave(val);
+            user
         }
     }
 
@@ -304,6 +305,105 @@ where
     }
 }
 
+/// Copies `len` bytes of data from enclave pointer `src` to userspace `dst`
+///
+/// This function mitigates stale data vulnerabilities by ensuring all writes to untrusted memory are either:
+///  - preceded by the VERW instruction and followed by the MFENCE; LFENCE instruction sequence
+///  - or are in multiples of 8 bytes, aligned to an 8-byte boundary
+///
+/// # Panics
+/// This function panics if:
+///
+/// * The `src` pointer is null
+/// * The `dst` pointer is null
+/// * The `src` memory range is not in enclave memory
+/// * The `dst` memory range is not in user memory
+///
+/// # References
+///  - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00615.html
+///  - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/processor-mmio-stale-data-vulnerabilities.html#inpage-nav-3-2-2
+pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
+    unsafe fn copy_bytewise_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
+        unsafe {
+            let mut seg_sel: u16 = 0;
+            for off in 0..len {
+                asm!("
+                    mov %ds, ({seg_sel})
+                    verw ({seg_sel})
+                    movb {val}, ({dst})
+                    mfence
+                    lfence
+                    ",
+                    val = in(reg_byte) *src.offset(off as isize),
+                    dst = in(reg) dst.offset(off as isize),
+                    seg_sel = in(reg) &mut seg_sel,
+                    options(nostack, att_syntax)
+                );
+            }
+        }
+    }
+
+    unsafe fn copy_aligned_quadwords_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
+        unsafe {
+            asm!(
+                "rep movsq (%rsi), (%rdi)",
+                inout("rcx") len / 8 => _,
+                inout("rdi") dst => _,
+                inout("rsi") src => _,
+                options(att_syntax, nostack, preserves_flags)
+            );
+        }
+    }
+    assert!(!src.is_null());
+    assert!(!dst.is_null());
+    assert!(is_enclave_range(src, len));
+    assert!(is_user_range(dst, len));
+    assert!(len < isize::MAX as usize);
+    assert!(!(src as usize).overflowing_add(len).1);
+    assert!(!(dst as usize).overflowing_add(len).1);
+
+    if len < 8 {
+        // Can't align on 8 byte boundary: copy safely byte per byte
+        unsafe {
+            copy_bytewise_to_userspace(src, dst, len);
+        }
+    } else if len % 8 == 0 && dst as usize % 8 == 0 {
+        // Copying 8-byte aligned quadwords: copy quad word per quad word
+        unsafe {
+            copy_aligned_quadwords_to_userspace(src, dst, len);
+        }
+    } else {
+        // Split copies into three parts:
+        //   +--------+
+        //   | small0 | Chunk smaller than 8 bytes
+        //   +--------+
+        //   |   big  | Chunk 8-byte aligned, and size a multiple of 8 bytes
+        //   +--------+
+        //   | small1 | Chunk smaller than 8 bytes
+        //   +--------+
+
+        unsafe {
+            // Copy small0
+            let small0_size = (8 - dst as usize % 8) as u8;
+            let small0_src = src;
+            let small0_dst = dst;
+            copy_bytewise_to_userspace(small0_src as _, small0_dst, small0_size as _);
+
+            // Copy big
+            let small1_size = ((len - small0_size as usize) % 8) as u8;
+            let big_size = len - small0_size as usize - small1_size as usize;
+            let big_src = src.offset(small0_size as _);
+            let big_dst = dst.offset(small0_size as _);
+            copy_aligned_quadwords_to_userspace(big_src as _, big_dst, big_size);
+
+            // Copy small1
+            let small1_src = src.offset(big_size as isize + small0_size as isize);
+            let small1_dst = dst.offset(big_size as isize + small0_size as isize);
+            copy_bytewise_to_userspace(small1_src, small1_dst, small1_size as _);
+        }
+    }
+}
+
 #[unstable(feature = "sgx_platform", issue = "56975")]
 impl<T: ?Sized> UserRef<T>
 where
@@ -352,7 +452,7 @@ where
     pub fn copy_from_enclave(&mut self, val: &T) {
         unsafe {
             assert_eq!(mem::size_of_val(val), mem::size_of_val(&*self.0.get()));
-            ptr::copy(
+            copy_to_userspace(
                 val as *const T as *const u8,
                 self.0.get() as *mut T as *mut u8,
                 mem::size_of_val(val),
diff --git a/library/std/src/sys/sgx/abi/usercalls/mod.rs b/library/std/src/sys/sgx/abi/usercalls/mod.rs
index 2f99abba776..79d1db5e1c5 100644
--- a/library/std/src/sys/sgx/abi/usercalls/mod.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/mod.rs
@@ -6,6 +6,8 @@ use crate::time::{Duration, Instant};
 pub(crate) mod alloc;
 #[macro_use]
 pub(crate) mod raw;
+#[cfg(test)]
+mod tests;
 
 use self::raw::*;
 
diff --git a/library/std/src/sys/sgx/abi/usercalls/tests.rs b/library/std/src/sys/sgx/abi/usercalls/tests.rs
new file mode 100644
index 00000000000..cbf7d7d54f7
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/usercalls/tests.rs
@@ -0,0 +1,30 @@
+use super::alloc::copy_to_userspace;
+use super::alloc::User;
+
+#[test]
+fn test_copy_function() {
+    let mut src = [0u8; 100];
+    let mut dst = User::<[u8]>::uninitialized(100);
+
+    for i in 0..src.len() {
+        src[i] = i as _;
+    }
+
+    for size in 0..48 {
+        // For all possible alignment
+        for offset in 0..8 {
+            // overwrite complete dst
+            dst.copy_from_enclave(&[0u8; 100]);
+
+            // Copy src[0..size] to dst + offset
+            unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().offset(offset), size) };
+
+            // Verify copy
+            for byte in 0..size {
+                unsafe {
+                    assert_eq!(*dst.as_ptr().offset(offset + byte as isize), src[byte as usize]);
+                }
+            }
+        }
+    }
+}
diff --git a/library/std/src/sys/unix/futex.rs b/library/std/src/sys/unix/futex.rs
index 8d05cb44b94..ab516a7f76d 100644
--- a/library/std/src/sys/unix/futex.rs
+++ b/library/std/src/sys/unix/futex.rs
@@ -5,6 +5,7 @@
     target_os = "freebsd",
     target_os = "openbsd",
     target_os = "dragonfly",
+    target_os = "fuchsia",
 ))]
 
 use crate::sync::atomic::AtomicU32;
@@ -237,3 +238,52 @@ pub fn futex_wake(futex: &AtomicU32) -> bool {
 pub fn futex_wake_all(futex: &AtomicU32) {
     unsafe { emscripten_futex_wake(futex, i32::MAX) };
 }
+
+#[cfg(target_os = "fuchsia")]
+mod zircon {
+    type zx_time_t = i64;
+    type zx_futex_t = crate::sync::atomic::AtomicU32;
+    type zx_handle_t = u32;
+    type zx_status_t = i32;
+
+    pub const ZX_HANDLE_INVALID: zx_handle_t = 0;
+    pub const ZX_ERR_TIMED_OUT: zx_status_t = -21;
+    pub const ZX_TIME_INFINITE: zx_time_t = zx_time_t::MAX;
+
+    extern "C" {
+        pub fn zx_futex_wait(
+            value_ptr: *const zx_futex_t,
+            current_value: zx_futex_t,
+            new_futex_owner: zx_handle_t,
+            deadline: zx_time_t,
+        ) -> zx_status_t;
+        pub fn zx_futex_wake(value_ptr: *const zx_futex_t, wake_count: u32) -> zx_status_t;
+        pub fn zx_clock_get_monotonic() -> zx_time_t;
+    }
+}
+
+#[cfg(target_os = "fuchsia")]
+pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
+    use crate::convert::TryFrom;
+
+    // Sleep forever if the timeout is longer than fits in a i64.
+    let deadline = timeout
+        .and_then(|d| {
+            i64::try_from(d.as_nanos())
+                .ok()?
+                .checked_add(unsafe { zircon::zx_clock_get_monotonic() })
+        })
+        .unwrap_or(zircon::ZX_TIME_INFINITE);
+
+    unsafe {
+        zircon::zx_futex_wait(futex, AtomicU32::new(expected), zircon::ZX_HANDLE_INVALID, deadline)
+            != zircon::ZX_ERR_TIMED_OUT
+    }
+}
+
+// Fuchsia doesn't tell us how many threads are woken up, so this always returns false.
+#[cfg(target_os = "fuchsia")]
+pub fn futex_wake(futex: &AtomicU32) -> bool {
+    unsafe { zircon::zx_futex_wake(futex, 1) };
+    false
+}
diff --git a/library/std/src/sys/unix/locks/pthread_mutex.rs b/library/std/src/sys/unix/locks/pthread_mutex.rs
index 916e898d890..98afee69ba6 100644
--- a/library/std/src/sys/unix/locks/pthread_mutex.rs
+++ b/library/std/src/sys/unix/locks/pthread_mutex.rs
@@ -1,5 +1,5 @@
 use crate::cell::UnsafeCell;
-use crate::mem::MaybeUninit;
+use crate::mem::{forget, MaybeUninit};
 use crate::sys::cvt_nz;
 use crate::sys_common::lazy_box::{LazyBox, LazyInit};
 
@@ -23,6 +23,24 @@ impl LazyInit for Mutex {
         unsafe { mutex.init() };
         mutex
     }
+
+    fn destroy(mutex: Box<Self>) {
+        // We're not allowed to pthread_mutex_destroy a locked mutex,
+        // so check first if it's unlocked.
+        if unsafe { mutex.try_lock() } {
+            unsafe { mutex.unlock() };
+            drop(mutex);
+        } else {
+            // The mutex is locked. This happens if a MutexGuard is leaked.
+            // In this case, we just leak the Mutex too.
+            forget(mutex);
+        }
+    }
+
+    fn cancel_init(_: Box<Self>) {
+        // In this case, we can just drop it without any checks,
+        // since it cannot have been locked yet.
+    }
 }
 
 impl Mutex {
diff --git a/library/std/src/sys/unix/locks/pthread_rwlock.rs b/library/std/src/sys/unix/locks/pthread_rwlock.rs
index 75e5759c787..adfe2a88338 100644
--- a/library/std/src/sys/unix/locks/pthread_rwlock.rs
+++ b/library/std/src/sys/unix/locks/pthread_rwlock.rs
@@ -1,4 +1,5 @@
 use crate::cell::UnsafeCell;
+use crate::mem::forget;
 use crate::sync::atomic::{AtomicUsize, Ordering};
 use crate::sys_common::lazy_box::{LazyBox, LazyInit};
 
@@ -17,6 +18,21 @@ impl LazyInit for RwLock {
     fn init() -> Box<Self> {
         Box::new(Self::new())
     }
+
+    fn destroy(mut rwlock: Box<Self>) {
+        // We're not allowed to pthread_rwlock_destroy a locked rwlock,
+        // so check first if it's unlocked.
+        if *rwlock.write_locked.get_mut() || *rwlock.num_readers.get_mut() != 0 {
+            // The rwlock is locked. This happens if a RwLock{Read,Write}Guard is leaked.
+            // In this case, we just leak the RwLock too.
+            forget(rwlock);
+        }
+    }
+
+    fn cancel_init(_: Box<Self>) {
+        // In this case, we can just drop it without any checks,
+        // since it cannot have been locked yet.
+    }
 }
 
 impl RwLock {
diff --git a/library/std/src/sys/unix/thread_parker.rs b/library/std/src/sys/unix/thread_parker.rs
index 76278ae30f1..9f4d4f7e736 100644
--- a/library/std/src/sys/unix/thread_parker.rs
+++ b/library/std/src/sys/unix/thread_parker.rs
@@ -7,6 +7,7 @@
     target_os = "freebsd",
     target_os = "openbsd",
     target_os = "dragonfly",
+    target_os = "fuchsia",
 )))]
 
 use crate::cell::UnsafeCell;
diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs
index 157d8a044d3..e9b10069077 100644
--- a/library/std/src/sys/windows/fs.rs
+++ b/library/std/src/sys/windows/fs.rs
@@ -13,6 +13,7 @@ use crate::sys::handle::Handle;
 use crate::sys::time::SystemTime;
 use crate::sys::{c, cvt};
 use crate::sys_common::{AsInner, FromInner, IntoInner};
+use crate::thread;
 
 use super::path::maybe_verbatim;
 use super::to_u16s;
@@ -679,7 +680,7 @@ impl<'a> DirBuffIter<'a> {
     }
 }
 impl<'a> Iterator for DirBuffIter<'a> {
-    type Item = &'a [u16];
+    type Item = (&'a [u16], bool);
     fn next(&mut self) -> Option<Self::Item> {
         use crate::mem::size_of;
         let buffer = &self.buffer?[self.cursor..];
@@ -688,14 +689,16 @@ impl<'a> Iterator for DirBuffIter<'a> {
         // SAFETY: The buffer contains a `FILE_ID_BOTH_DIR_INFO` struct but the
         // last field (the file name) is unsized. So an offset has to be
         // used to get the file name slice.
-        let (name, next_entry) = unsafe {
+        let (name, is_directory, next_entry) = unsafe {
             let info = buffer.as_ptr().cast::<c::FILE_ID_BOTH_DIR_INFO>();
             let next_entry = (*info).NextEntryOffset as usize;
             let name = crate::slice::from_raw_parts(
                 (*info).FileName.as_ptr().cast::<u16>(),
                 (*info).FileNameLength as usize / size_of::<u16>(),
             );
-            (name, next_entry)
+            let is_directory = ((*info).FileAttributes & c::FILE_ATTRIBUTE_DIRECTORY) != 0;
+
+            (name, is_directory, next_entry)
         };
 
         if next_entry == 0 {
@@ -708,7 +711,7 @@ impl<'a> Iterator for DirBuffIter<'a> {
         const DOT: u16 = b'.' as u16;
         match name {
             [DOT] | [DOT, DOT] => self.next(),
-            _ => Some(name),
+            _ => Some((name, is_directory)),
         }
     }
 }
@@ -993,89 +996,92 @@ pub fn remove_dir_all(path: &Path) -> io::Result<()> {
     if (file.basic_info()?.FileAttributes & c::FILE_ATTRIBUTE_DIRECTORY) == 0 {
         return Err(io::Error::from_raw_os_error(c::ERROR_DIRECTORY as _));
     }
-    let mut delete: fn(&File) -> io::Result<()> = File::posix_delete;
-    let result = match delete(&file) {
-        Err(e) if e.kind() == io::ErrorKind::DirectoryNotEmpty => {
-            match remove_dir_all_recursive(&file, delete) {
-                // Return unexpected errors.
-                Err(e) if e.kind() != io::ErrorKind::DirectoryNotEmpty => return Err(e),
-                result => result,
-            }
-        }
-        // If POSIX delete is not supported for this filesystem then fallback to win32 delete.
-        Err(e)
-            if e.raw_os_error() == Some(c::ERROR_NOT_SUPPORTED as i32)
-                || e.raw_os_error() == Some(c::ERROR_INVALID_PARAMETER as i32) =>
-        {
-            delete = File::win32_delete;
-            Err(e)
-        }
-        result => result,
-    };
-    if result.is_ok() {
-        Ok(())
-    } else {
-        // This is a fallback to make sure the directory is actually deleted.
-        // Otherwise this function is prone to failing with `DirectoryNotEmpty`
-        // due to possible delays between marking a file for deletion and the
-        // file actually being deleted from the filesystem.
-        //
-        // So we retry a few times before giving up.
-        for _ in 0..5 {
-            match remove_dir_all_recursive(&file, delete) {
-                Err(e) if e.kind() == io::ErrorKind::DirectoryNotEmpty => {}
-                result => return result,
+
+    match remove_dir_all_iterative(&file, File::posix_delete) {
+        Err(e) => {
+            if let Some(code) = e.raw_os_error() {
+                match code as u32 {
+                    // If POSIX delete is not supported for this filesystem then fallback to win32 delete.
+                    c::ERROR_NOT_SUPPORTED
+                    | c::ERROR_INVALID_FUNCTION
+                    | c::ERROR_INVALID_PARAMETER => {
+                        remove_dir_all_iterative(&file, File::win32_delete)
+                    }
+                    _ => Err(e),
+                }
+            } else {
+                Err(e)
             }
         }
-        // Try one last time.
-        delete(&file)
+        ok => ok,
     }
 }
 
-fn remove_dir_all_recursive(f: &File, delete: fn(&File) -> io::Result<()>) -> io::Result<()> {
+fn remove_dir_all_iterative(f: &File, delete: fn(&File) -> io::Result<()>) -> io::Result<()> {
+    // When deleting files we may loop this many times when certain error conditions occur.
+    // This allows remove_dir_all to succeed when the error is temporary.
+    const MAX_RETRIES: u32 = 10;
+
     let mut buffer = DirBuff::new();
-    let mut restart = true;
-    // Fill the buffer and iterate the entries.
-    while f.fill_dir_buff(&mut buffer, restart)? {
-        for name in buffer.iter() {
-            // Open the file without following symlinks and try deleting it.
-            // We try opening will all needed permissions and if that is denied
-            // fallback to opening without `FILE_LIST_DIRECTORY` permission.
-            // Note `SYNCHRONIZE` permission is needed for synchronous access.
-            let mut result =
-                open_link_no_reparse(&f, name, c::SYNCHRONIZE | c::DELETE | c::FILE_LIST_DIRECTORY);
-            if matches!(&result, Err(e) if e.kind() == io::ErrorKind::PermissionDenied) {
-                result = open_link_no_reparse(&f, name, c::SYNCHRONIZE | c::DELETE);
+    let mut dirlist = vec![f.duplicate()?];
+
+    // FIXME: This is a hack so we can push to the dirlist vec after borrowing from it.
+    fn copy_handle(f: &File) -> mem::ManuallyDrop<File> {
+        unsafe { mem::ManuallyDrop::new(File::from_raw_handle(f.as_raw_handle())) }
+    }
+
+    while let Some(dir) = dirlist.last() {
+        let dir = copy_handle(dir);
+
+        // Fill the buffer and iterate the entries.
+        let more_data = dir.fill_dir_buff(&mut buffer, false)?;
+        for (name, is_directory) in buffer.iter() {
+            if is_directory {
+                let child_dir = open_link_no_reparse(
+                    &dir,
+                    name,
+                    c::SYNCHRONIZE | c::DELETE | c::FILE_LIST_DIRECTORY,
+                )?;
+                dirlist.push(child_dir);
+            } else {
+                for i in 1..=MAX_RETRIES {
+                    let result = open_link_no_reparse(&dir, name, c::SYNCHRONIZE | c::DELETE);
+                    match result {
+                        Ok(f) => delete(&f)?,
+                        // Already deleted, so skip.
+                        Err(e) if e.kind() == io::ErrorKind::NotFound => break,
+                        // Retry a few times if the file is locked or a delete is already in progress.
+                        Err(e)
+                            if i < MAX_RETRIES
+                                && (e.raw_os_error() == Some(c::ERROR_DELETE_PENDING as _)
+                                    || e.raw_os_error()
+                                        == Some(c::ERROR_SHARING_VIOLATION as _)) => {}
+                        // Otherwise return the error.
+                        Err(e) => return Err(e),
+                    }
+                    thread::yield_now();
+                }
             }
-            match result {
-                Ok(file) => match delete(&file) {
-                    Err(e) if e.kind() == io::ErrorKind::DirectoryNotEmpty => {
-                        // Iterate the directory's files.
-                        // Ignore `DirectoryNotEmpty` errors here. They will be
-                        // caught when `remove_dir_all` tries to delete the top
-                        // level directory. It can then decide if to retry or not.
-                        match remove_dir_all_recursive(&file, delete) {
-                            Err(e) if e.kind() == io::ErrorKind::DirectoryNotEmpty => {}
-                            result => result?,
+        }
+        // If there were no more files then delete the directory.
+        if !more_data {
+            if let Some(dir) = dirlist.pop() {
+                // Retry deleting a few times in case we need to wait for a file to be deleted.
+                for i in 1..=MAX_RETRIES {
+                    let result = delete(&dir);
+                    if let Err(e) = result {
+                        if i == MAX_RETRIES || e.kind() != io::ErrorKind::DirectoryNotEmpty {
+                            return Err(e);
                         }
+                        thread::yield_now();
+                    } else {
+                        break;
                     }
-                    result => result?,
-                },
-                // Ignore error if a delete is already in progress or the file
-                // has already been deleted. It also ignores sharing violations
-                // (where a file is locked by another process) as these are
-                // usually temporary.
-                Err(e)
-                    if e.raw_os_error() == Some(c::ERROR_DELETE_PENDING as _)
-                        || e.kind() == io::ErrorKind::NotFound
-                        || e.raw_os_error() == Some(c::ERROR_SHARING_VIOLATION as _) => {}
-                Err(e) => return Err(e),
+                }
             }
         }
-        // Continue reading directory entries without restarting from the beginning,
-        restart = false;
     }
-    delete(&f)
+    Ok(())
 }
 
 pub fn readlink(path: &Path) -> io::Result<PathBuf> {
diff --git a/library/std/src/sys_common/lazy_box.rs b/library/std/src/sys_common/lazy_box.rs
index 647c13d2437..63c3316bdeb 100644
--- a/library/std/src/sys_common/lazy_box.rs
+++ b/library/std/src/sys_common/lazy_box.rs
@@ -21,8 +21,21 @@ pub(crate) trait LazyInit {
     ///
     /// It might be called more than once per LazyBox, as multiple threads
     /// might race to initialize it concurrently, each constructing and initializing
-    /// their own box. (All but one of them will be destroyed right after.)
+    /// their own box. All but one of them will be passed to `cancel_init` right after.
     fn init() -> Box<Self>;
+
+    /// Any surplus boxes from `init()` that lost the initialization race
+    /// are passed to this function for disposal.
+    ///
+    /// The default implementation calls destroy().
+    fn cancel_init(x: Box<Self>) {
+        Self::destroy(x);
+    }
+
+    /// This is called to destroy a used box.
+    ///
+    /// The default implementation just drops it.
+    fn destroy(_: Box<Self>) {}
 }
 
 impl<T: LazyInit> LazyBox<T> {
@@ -45,7 +58,7 @@ impl<T: LazyInit> LazyBox<T> {
             Err(ptr) => {
                 // Lost the race to another thread.
                 // Drop the box we created, and use the one from the other thread instead.
-                drop(unsafe { Box::from_raw(new_ptr) });
+                T::cancel_init(unsafe { Box::from_raw(new_ptr) });
                 ptr
             }
         }
@@ -71,7 +84,7 @@ impl<T: LazyInit> Drop for LazyBox<T> {
     fn drop(&mut self) {
         let ptr = *self.ptr.get_mut();
         if !ptr.is_null() {
-            drop(unsafe { Box::from_raw(ptr) });
+            T::destroy(unsafe { Box::from_raw(ptr) });
         }
     }
 }
diff --git a/library/std/src/sys_common/thread_parker/mod.rs b/library/std/src/sys_common/thread_parker/mod.rs
index c789a388e05..7e8bfb2565e 100644
--- a/library/std/src/sys_common/thread_parker/mod.rs
+++ b/library/std/src/sys_common/thread_parker/mod.rs
@@ -6,6 +6,7 @@ cfg_if::cfg_if! {
         target_os = "freebsd",
         target_os = "openbsd",
         target_os = "dragonfly",
+        target_os = "fuchsia",
     ))] {
         mod futex;
         pub use futex::Parker;