about summary refs log tree commit diff
path: root/library/std/src
diff options
context:
space:
mode:
Diffstat (limited to 'library/std/src')
-rw-r--r--library/std/src/collections/hash/map.rs35
-rw-r--r--library/std/src/collections/hash/set.rs25
-rw-r--r--library/std/src/ffi/os_str.rs22
-rw-r--r--library/std/src/io/buffered/bufwriter.rs4
-rw-r--r--library/std/src/io/buffered/linewriter.rs4
-rw-r--r--library/std/src/sync/rwlock.rs28
-rw-r--r--library/std/src/sync/rwlock/tests.rs14
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/alloc.rs120
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/mod.rs2
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/tests.rs30
-rw-r--r--library/std/src/sys/unix/futex.rs50
-rw-r--r--library/std/src/sys/unix/locks/pthread_mutex.rs20
-rw-r--r--library/std/src/sys/unix/locks/pthread_rwlock.rs16
-rw-r--r--library/std/src/sys/unix/thread_parker.rs1
-rw-r--r--library/std/src/sys/windows/fs.rs156
-rw-r--r--library/std/src/sys_common/lazy_box.rs19
-rw-r--r--library/std/src/sys_common/thread_parker/mod.rs1
17 files changed, 416 insertions, 131 deletions
diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs
index 237c5ee7340..db811343fa3 100644
--- a/library/std/src/collections/hash/map.rs
+++ b/library/std/src/collections/hash/map.rs
@@ -233,10 +233,11 @@ impl<K, V> HashMap<K, V, RandomState> {
         Default::default()
     }
 
-    /// Creates an empty `HashMap` with the specified capacity.
+    /// Creates an empty `HashMap` with at least the specified capacity.
     ///
     /// The hash map will be able to hold at least `capacity` elements without
-    /// reallocating. If `capacity` is 0, the hash map will not allocate.
+    /// reallocating. This method is allowed to allocate for more elements than
+    /// `capacity`. If `capacity` is 0, the hash set will not allocate.
     ///
     /// # Examples
     ///
@@ -282,18 +283,19 @@ impl<K, V, S> HashMap<K, V, S> {
         HashMap { base: base::HashMap::with_hasher(hash_builder) }
     }
 
-    /// Creates an empty `HashMap` with the specified capacity, using `hash_builder`
-    /// to hash the keys.
+    /// Creates an empty `HashMap` with at least the specified capacity, using
+    /// `hasher` to hash the keys.
     ///
     /// The hash map will be able to hold at least `capacity` elements without
-    /// reallocating. If `capacity` is 0, the hash map will not allocate.
+    /// reallocating. This method is allowed to allocate for more elements than
+    /// `capacity`. If `capacity` is 0, the hash map will not allocate.
     ///
-    /// Warning: `hash_builder` is normally randomly generated, and
+    /// Warning: `hasher` is normally randomly generated, and
     /// is designed to allow HashMaps to be resistant to attacks that
     /// cause many collisions and very poor performance. Setting it
     /// manually using this function can expose a DoS attack vector.
     ///
-    /// The `hash_builder` passed should implement the [`BuildHasher`] trait for
+    /// The `hasher` passed should implement the [`BuildHasher`] trait for
     /// the HashMap to be useful, see its documentation for details.
     ///
     /// # Examples
@@ -308,8 +310,8 @@ impl<K, V, S> HashMap<K, V, S> {
     /// ```
     #[inline]
     #[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
-    pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> HashMap<K, V, S> {
-        HashMap { base: base::HashMap::with_capacity_and_hasher(capacity, hash_builder) }
+    pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> HashMap<K, V, S> {
+        HashMap { base: base::HashMap::with_capacity_and_hasher(capacity, hasher) }
     }
 
     /// Returns the number of elements the map can hold without reallocating.
@@ -731,8 +733,10 @@ where
     S: BuildHasher,
 {
     /// Reserves capacity for at least `additional` more elements to be inserted
-    /// in the `HashMap`. The collection may reserve more space to avoid
-    /// frequent reallocations.
+    /// in the `HashMap`. The collection may reserve more space to speculatively
+    /// avoid frequent reallocations. After calling `reserve`,
+    /// capacity will be greater than or equal to `self.len() + additional`.
+    /// Does nothing if capacity is already sufficient.
     ///
     /// # Panics
     ///
@@ -752,8 +756,11 @@ where
     }
 
     /// Tries to reserve capacity for at least `additional` more elements to be inserted
-    /// in the given `HashMap<K, V>`. The collection may reserve more space to avoid
-    /// frequent reallocations.
+    /// in the `HashMap`. The collection may reserve more space to speculatively
+    /// avoid frequent reallocations. After calling `reserve`,
+    /// capacity will be greater than or equal to `self.len() + additional` if
+    /// it returns `Ok(())`.
+    /// Does nothing if capacity is already sufficient.
     ///
     /// # Errors
     ///
@@ -766,7 +773,7 @@ where
     /// use std::collections::HashMap;
     ///
     /// let mut map: HashMap<&str, isize> = HashMap::new();
-    /// map.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?");
+    /// map.try_reserve(10).expect("why is the test harness OOMing on a handful of bytes?");
     /// ```
     #[inline]
     #[stable(feature = "try_reserve", since = "1.57.0")]
diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs
index fa498a987d6..abff82788a3 100644
--- a/library/std/src/collections/hash/set.rs
+++ b/library/std/src/collections/hash/set.rs
@@ -133,10 +133,11 @@ impl<T> HashSet<T, RandomState> {
         Default::default()
     }
 
-    /// Creates an empty `HashSet` with the specified capacity.
+    /// Creates an empty `HashSet` with at least the specified capacity.
     ///
     /// The hash set will be able to hold at least `capacity` elements without
-    /// reallocating. If `capacity` is 0, the hash set will not allocate.
+    /// reallocating. This method is allowed to allocate for more elements than
+    /// `capacity`. If `capacity` is 0, the hash set will not allocate.
     ///
     /// # Examples
     ///
@@ -379,11 +380,12 @@ impl<T, S> HashSet<T, S> {
         HashSet { base: base::HashSet::with_hasher(hasher) }
     }
 
-    /// Creates an empty `HashSet` with the specified capacity, using
+    /// Creates an empty `HashSet` with at least the specified capacity, using
     /// `hasher` to hash the keys.
     ///
     /// The hash set will be able to hold at least `capacity` elements without
-    /// reallocating. If `capacity` is 0, the hash set will not allocate.
+    /// reallocating. This method is allowed to allocate for more elements than
+    /// `capacity`. If `capacity` is 0, the hash set will not allocate.
     ///
     /// Warning: `hasher` is normally randomly generated, and
     /// is designed to allow `HashSet`s to be resistant to attacks that
@@ -434,8 +436,10 @@ where
     S: BuildHasher,
 {
     /// Reserves capacity for at least `additional` more elements to be inserted
-    /// in the `HashSet`. The collection may reserve more space to avoid
-    /// frequent reallocations.
+    /// in the `HashSet`. The collection may reserve more space to speculatively
+    /// avoid frequent reallocations. After calling `reserve`,
+    /// capacity will be greater than or equal to `self.len() + additional`.
+    /// Does nothing if capacity is already sufficient.
     ///
     /// # Panics
     ///
@@ -456,8 +460,11 @@ where
     }
 
     /// Tries to reserve capacity for at least `additional` more elements to be inserted
-    /// in the given `HashSet<K, V>`. The collection may reserve more space to avoid
-    /// frequent reallocations.
+    /// in the `HashSet`. The collection may reserve more space to speculatively
+    /// avoid frequent reallocations. After calling `reserve`,
+    /// capacity will be greater than or equal to `self.len() + additional` if
+    /// it returns `Ok(())`.
+    /// Does nothing if capacity is already sufficient.
     ///
     /// # Errors
     ///
@@ -469,7 +476,7 @@ where
     /// ```
     /// use std::collections::HashSet;
     /// let mut set: HashSet<i32> = HashSet::new();
-    /// set.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?");
+    /// set.try_reserve(10).expect("why is the test harness OOMing on a handful of bytes?");
     /// ```
     #[inline]
     #[stable(feature = "try_reserve", since = "1.57.0")]
diff --git a/library/std/src/ffi/os_str.rs b/library/std/src/ffi/os_str.rs
index 95d274c3c91..f2bbcc85cec 100644
--- a/library/std/src/ffi/os_str.rs
+++ b/library/std/src/ffi/os_str.rs
@@ -196,10 +196,11 @@ impl OsString {
         self.inner.push_slice(&s.as_ref().inner)
     }
 
-    /// Creates a new `OsString` with the given capacity.
+    /// Creates a new `OsString` with at least the given capacity.
     ///
-    /// The string will be able to hold exactly `capacity` length units of other
-    /// OS strings without reallocating. If `capacity` is 0, the string will not
+    /// The string will be able to hold at least `capacity` length units of other
+    /// OS strings without reallocating. This method is allowed to allocate for
+    /// more units than `capacity`. If `capacity` is 0, the string will not
     /// allocate.
     ///
     /// See the main `OsString` documentation information about encoding and capacity units.
@@ -263,9 +264,10 @@ impl OsString {
     }
 
     /// Reserves capacity for at least `additional` more capacity to be inserted
-    /// in the given `OsString`.
+    /// in the given `OsString`. Does nothing if the capacity is
+    /// already sufficient.
     ///
-    /// The collection may reserve more space to avoid frequent reallocations.
+    /// The collection may reserve more space to speculatively avoid frequent reallocations.
     ///
     /// See the main `OsString` documentation information about encoding and capacity units.
     ///
@@ -285,10 +287,10 @@ impl OsString {
     }
 
     /// Tries to reserve capacity for at least `additional` more length units
-    /// in the given `OsString`. The string may reserve more space to avoid
+    /// in the given `OsString`. The string may reserve more space to speculatively avoid
     /// frequent reallocations. After calling `try_reserve`, capacity will be
-    /// greater than or equal to `self.len() + additional`. Does nothing if
-    /// capacity is already sufficient.
+    /// greater than or equal to `self.len() + additional` if it returns `Ok(())`.
+    /// Does nothing if capacity is already sufficient.
     ///
     /// See the main `OsString` documentation information about encoding and capacity units.
     ///
@@ -322,7 +324,7 @@ impl OsString {
         self.inner.try_reserve(additional)
     }
 
-    /// Reserves the minimum capacity for exactly `additional` more capacity to
+    /// Reserves the minimum capacity for at least `additional` more capacity to
     /// be inserted in the given `OsString`. Does nothing if the capacity is
     /// already sufficient.
     ///
@@ -349,7 +351,7 @@ impl OsString {
         self.inner.reserve_exact(additional)
     }
 
-    /// Tries to reserve the minimum capacity for exactly `additional`
+    /// Tries to reserve the minimum capacity for at least `additional`
     /// more length units in the given `OsString`. After calling
     /// `try_reserve_exact`, capacity will be greater than or equal to
     /// `self.len() + additional` if it returns `Ok(())`.
diff --git a/library/std/src/io/buffered/bufwriter.rs b/library/std/src/io/buffered/bufwriter.rs
index 2d3a0f37b4c..6acb937e784 100644
--- a/library/std/src/io/buffered/bufwriter.rs
+++ b/library/std/src/io/buffered/bufwriter.rs
@@ -97,11 +97,11 @@ impl<W: Write> BufWriter<W> {
         BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
     }
 
-    /// Creates a new `BufWriter<W>` with the specified buffer capacity.
+    /// Creates a new `BufWriter<W>` with at least the specified buffer capacity.
     ///
     /// # Examples
     ///
-    /// Creating a buffer with a buffer of a hundred bytes.
+    /// Creating a buffer with a buffer of at least a hundred bytes.
     ///
     /// ```no_run
     /// use std::io::BufWriter;
diff --git a/library/std/src/io/buffered/linewriter.rs b/library/std/src/io/buffered/linewriter.rs
index d7b620d6f91..a26a4ab330e 100644
--- a/library/std/src/io/buffered/linewriter.rs
+++ b/library/std/src/io/buffered/linewriter.rs
@@ -89,8 +89,8 @@ impl<W: Write> LineWriter<W> {
         LineWriter::with_capacity(1024, inner)
     }
 
-    /// Creates a new `LineWriter` with a specified capacity for the internal
-    /// buffer.
+    /// Creates a new `LineWriter` with at least the specified capacity for the
+    /// internal buffer.
     ///
     /// # Examples
     ///
diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs
index 1192c08cb1a..6e4a2cfc82a 100644
--- a/library/std/src/sync/rwlock.rs
+++ b/library/std/src/sync/rwlock.rs
@@ -4,6 +4,7 @@ mod tests;
 use crate::cell::UnsafeCell;
 use crate::fmt;
 use crate::ops::{Deref, DerefMut};
+use crate::ptr::NonNull;
 use crate::sync::{poison, LockResult, TryLockError, TryLockResult};
 use crate::sys_common::rwlock as sys;
 
@@ -101,7 +102,12 @@ unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
 #[stable(feature = "rust1", since = "1.0.0")]
 #[clippy::has_significant_drop]
 pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
-    lock: &'a RwLock<T>,
+    // NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a
+    // `Ref` argument doesn't hold immutability for its whole scope, only until it drops.
+    // `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull`
+    // is preferable over `const* T` to allow for niche optimization.
+    data: NonNull<T>,
+    inner_lock: &'a sys::MovableRwLock,
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -511,12 +517,21 @@ impl<T> From<T> for RwLock<T> {
 }
 
 impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
+    /// Create a new instance of `RwLockReadGuard<T>` from a `RwLock<T>`.
+    // SAFETY: if and only if `lock.inner.read()` (or `lock.inner.try_read()`) has been
+    // successfully called from the same thread before instantiating this object.
     unsafe fn new(lock: &'rwlock RwLock<T>) -> LockResult<RwLockReadGuard<'rwlock, T>> {
-        poison::map_result(lock.poison.borrow(), |()| RwLockReadGuard { lock })
+        poison::map_result(lock.poison.borrow(), |()| RwLockReadGuard {
+            data: NonNull::new_unchecked(lock.data.get()),
+            inner_lock: &lock.inner,
+        })
     }
 }
 
 impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
+    /// Create a new instance of `RwLockWriteGuard<T>` from a `RwLock<T>`.
+    // SAFETY: if and only if `lock.inner.write()` (or `lock.inner.try_write()`) has been
+    // successfully called from the same thread before instantiating this object.
     unsafe fn new(lock: &'rwlock RwLock<T>) -> LockResult<RwLockWriteGuard<'rwlock, T>> {
         poison::map_result(lock.poison.guard(), |guard| RwLockWriteGuard { lock, poison: guard })
     }
@@ -555,7 +570,8 @@ impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> {
     type Target = T;
 
     fn deref(&self) -> &T {
-        unsafe { &*self.lock.data.get() }
+        // SAFETY: the conditions of `RwLockGuard::new` were satisfied when created.
+        unsafe { self.data.as_ref() }
     }
 }
 
@@ -564,6 +580,7 @@ impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
     type Target = T;
 
     fn deref(&self) -> &T {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
         unsafe { &*self.lock.data.get() }
     }
 }
@@ -571,6 +588,7 @@ impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
     fn deref_mut(&mut self) -> &mut T {
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
         unsafe { &mut *self.lock.data.get() }
     }
 }
@@ -578,8 +596,9 @@ impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
     fn drop(&mut self) {
+        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
         unsafe {
-            self.lock.inner.read_unlock();
+            self.inner_lock.read_unlock();
         }
     }
 }
@@ -588,6 +607,7 @@ impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
 impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> {
     fn drop(&mut self) {
         self.lock.poison.done(&self.poison);
+        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
         unsafe {
             self.lock.inner.write_unlock();
         }
diff --git a/library/std/src/sync/rwlock/tests.rs b/library/std/src/sync/rwlock/tests.rs
index 53aa2b1e38a..08255c985f5 100644
--- a/library/std/src/sync/rwlock/tests.rs
+++ b/library/std/src/sync/rwlock/tests.rs
@@ -1,6 +1,6 @@
 use crate::sync::atomic::{AtomicUsize, Ordering};
 use crate::sync::mpsc::channel;
-use crate::sync::{Arc, RwLock, TryLockError};
+use crate::sync::{Arc, RwLock, RwLockReadGuard, TryLockError};
 use crate::thread;
 use rand::{self, Rng};
 
@@ -245,3 +245,15 @@ fn test_get_mut_poison() {
         Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {x:?}"),
     }
 }
+
+#[test]
+fn test_read_guard_covariance() {
+    fn do_stuff<'a>(_: RwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
+    let j: i32 = 5;
+    let lock = RwLock::new(&j);
+    {
+        let i = 6;
+        do_stuff(lock.read().unwrap(), &i);
+    }
+    drop(lock);
+}
diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
index 3792a3820a5..ea24fedd0eb 100644
--- a/library/std/src/sys/sgx/abi/usercalls/alloc.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
@@ -1,13 +1,16 @@
 #![allow(unused)]
 
+use crate::arch::asm;
 use crate::cell::UnsafeCell;
+use crate::cmp;
+use crate::convert::TryInto;
 use crate::mem;
 use crate::ops::{CoerceUnsized, Deref, DerefMut, Index, IndexMut};
 use crate::ptr::{self, NonNull};
 use crate::slice;
 use crate::slice::SliceIndex;
 
-use super::super::mem::is_user_range;
+use super::super::mem::{is_enclave_range, is_user_range};
 use fortanix_sgx_abi::*;
 
 /// A type that can be safely read from or written to userspace.
@@ -210,7 +213,9 @@ where
         unsafe {
             // Mustn't call alloc with size 0.
             let ptr = if size > 0 {
-                rtunwrap!(Ok, super::alloc(size, T::align_of())) as _
+                // `copy_to_userspace` is more efficient when data is 8-byte aligned
+                let alignment = cmp::max(T::align_of(), 8);
+                rtunwrap!(Ok, super::alloc(size, alignment)) as _
             } else {
                 T::align_of() as _ // dangling pointer ok for size 0
             };
@@ -225,13 +230,9 @@ where
     /// Copies `val` into freshly allocated space in user memory.
     pub fn new_from_enclave(val: &T) -> Self {
         unsafe {
-            let ret = Self::new_uninit_bytes(mem::size_of_val(val));
-            ptr::copy(
-                val as *const T as *const u8,
-                ret.0.as_ptr() as *mut u8,
-                mem::size_of_val(val),
-            );
-            ret
+            let mut user = Self::new_uninit_bytes(mem::size_of_val(val));
+            user.copy_from_enclave(val);
+            user
         }
     }
 
@@ -304,6 +305,105 @@ where
     }
 }
 
+/// Copies `len` bytes of data from enclave pointer `src` to userspace `dst`
+///
+/// This function mitigates stale data vulnerabilities by ensuring all writes to untrusted memory are either:
+///  - preceded by the VERW instruction and followed by the MFENCE; LFENCE instruction sequence
+///  - or are in multiples of 8 bytes, aligned to an 8-byte boundary
+///
+/// # Panics
+/// This function panics if:
+///
+/// * The `src` pointer is null
+/// * The `dst` pointer is null
+/// * The `src` memory range is not in enclave memory
+/// * The `dst` memory range is not in user memory
+///
+/// # References
+///  - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00615.html
+///  - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/processor-mmio-stale-data-vulnerabilities.html#inpage-nav-3-2-2
+pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
+    unsafe fn copy_bytewise_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
+        unsafe {
+            let mut seg_sel: u16 = 0;
+            for off in 0..len {
+                asm!("
+                    mov %ds, ({seg_sel})
+                    verw ({seg_sel})
+                    movb {val}, ({dst})
+                    mfence
+                    lfence
+                    ",
+                    val = in(reg_byte) *src.offset(off as isize),
+                    dst = in(reg) dst.offset(off as isize),
+                    seg_sel = in(reg) &mut seg_sel,
+                    options(nostack, att_syntax)
+                );
+            }
+        }
+    }
+
+    unsafe fn copy_aligned_quadwords_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
+        unsafe {
+            asm!(
+                "rep movsq (%rsi), (%rdi)",
+                inout("rcx") len / 8 => _,
+                inout("rdi") dst => _,
+                inout("rsi") src => _,
+                options(att_syntax, nostack, preserves_flags)
+            );
+        }
+    }
+    assert!(!src.is_null());
+    assert!(!dst.is_null());
+    assert!(is_enclave_range(src, len));
+    assert!(is_user_range(dst, len));
+    assert!(len < isize::MAX as usize);
+    assert!(!(src as usize).overflowing_add(len).1);
+    assert!(!(dst as usize).overflowing_add(len).1);
+
+    if len < 8 {
+        // Can't align on 8 byte boundary: copy safely byte per byte
+        unsafe {
+            copy_bytewise_to_userspace(src, dst, len);
+        }
+    } else if len % 8 == 0 && dst as usize % 8 == 0 {
+        // Copying 8-byte aligned quadwords: copy quad word per quad word
+        unsafe {
+            copy_aligned_quadwords_to_userspace(src, dst, len);
+        }
+    } else {
+        // Split copies into three parts:
+        //   +--------+
+        //   | small0 | Chunk smaller than 8 bytes
+        //   +--------+
+        //   |   big  | Chunk 8-byte aligned, and size a multiple of 8 bytes
+        //   +--------+
+        //   | small1 | Chunk smaller than 8 bytes
+        //   +--------+
+
+        unsafe {
+            // Copy small0
+            let small0_size = (8 - dst as usize % 8) as u8;
+            let small0_src = src;
+            let small0_dst = dst;
+            copy_bytewise_to_userspace(small0_src as _, small0_dst, small0_size as _);
+
+            // Copy big
+            let small1_size = ((len - small0_size as usize) % 8) as u8;
+            let big_size = len - small0_size as usize - small1_size as usize;
+            let big_src = src.offset(small0_size as _);
+            let big_dst = dst.offset(small0_size as _);
+            copy_aligned_quadwords_to_userspace(big_src as _, big_dst, big_size);
+
+            // Copy small1
+            let small1_src = src.offset(big_size as isize + small0_size as isize);
+            let small1_dst = dst.offset(big_size as isize + small0_size as isize);
+            copy_bytewise_to_userspace(small1_src, small1_dst, small1_size as _);
+        }
+    }
+}
+
 #[unstable(feature = "sgx_platform", issue = "56975")]
 impl<T: ?Sized> UserRef<T>
 where
@@ -352,7 +452,7 @@ where
     pub fn copy_from_enclave(&mut self, val: &T) {
         unsafe {
             assert_eq!(mem::size_of_val(val), mem::size_of_val(&*self.0.get()));
-            ptr::copy(
+            copy_to_userspace(
                 val as *const T as *const u8,
                 self.0.get() as *mut T as *mut u8,
                 mem::size_of_val(val),
diff --git a/library/std/src/sys/sgx/abi/usercalls/mod.rs b/library/std/src/sys/sgx/abi/usercalls/mod.rs
index 2f99abba776..79d1db5e1c5 100644
--- a/library/std/src/sys/sgx/abi/usercalls/mod.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/mod.rs
@@ -6,6 +6,8 @@ use crate::time::{Duration, Instant};
 pub(crate) mod alloc;
 #[macro_use]
 pub(crate) mod raw;
+#[cfg(test)]
+mod tests;
 
 use self::raw::*;
 
diff --git a/library/std/src/sys/sgx/abi/usercalls/tests.rs b/library/std/src/sys/sgx/abi/usercalls/tests.rs
new file mode 100644
index 00000000000..cbf7d7d54f7
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/usercalls/tests.rs
@@ -0,0 +1,30 @@
+use super::alloc::copy_to_userspace;
+use super::alloc::User;
+
+#[test]
+fn test_copy_function() {
+    let mut src = [0u8; 100];
+    let mut dst = User::<[u8]>::uninitialized(100);
+
+    for i in 0..src.len() {
+        src[i] = i as _;
+    }
+
+    for size in 0..48 {
+        // For all possible alignment
+        for offset in 0..8 {
+            // overwrite complete dst
+            dst.copy_from_enclave(&[0u8; 100]);
+
+            // Copy src[0..size] to dst + offset
+            unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().offset(offset), size) };
+
+            // Verify copy
+            for byte in 0..size {
+                unsafe {
+                    assert_eq!(*dst.as_ptr().offset(offset + byte as isize), src[byte as usize]);
+                }
+            }
+        }
+    }
+}
diff --git a/library/std/src/sys/unix/futex.rs b/library/std/src/sys/unix/futex.rs
index 8d05cb44b94..ab516a7f76d 100644
--- a/library/std/src/sys/unix/futex.rs
+++ b/library/std/src/sys/unix/futex.rs
@@ -5,6 +5,7 @@
     target_os = "freebsd",
     target_os = "openbsd",
     target_os = "dragonfly",
+    target_os = "fuchsia",
 ))]
 
 use crate::sync::atomic::AtomicU32;
@@ -237,3 +238,52 @@ pub fn futex_wake(futex: &AtomicU32) -> bool {
 pub fn futex_wake_all(futex: &AtomicU32) {
     unsafe { emscripten_futex_wake(futex, i32::MAX) };
 }
+
+#[cfg(target_os = "fuchsia")]
+mod zircon {
+    type zx_time_t = i64;
+    type zx_futex_t = crate::sync::atomic::AtomicU32;
+    type zx_handle_t = u32;
+    type zx_status_t = i32;
+
+    pub const ZX_HANDLE_INVALID: zx_handle_t = 0;
+    pub const ZX_ERR_TIMED_OUT: zx_status_t = -21;
+    pub const ZX_TIME_INFINITE: zx_time_t = zx_time_t::MAX;
+
+    extern "C" {
+        pub fn zx_futex_wait(
+            value_ptr: *const zx_futex_t,
+            current_value: zx_futex_t,
+            new_futex_owner: zx_handle_t,
+            deadline: zx_time_t,
+        ) -> zx_status_t;
+        pub fn zx_futex_wake(value_ptr: *const zx_futex_t, wake_count: u32) -> zx_status_t;
+        pub fn zx_clock_get_monotonic() -> zx_time_t;
+    }
+}
+
+#[cfg(target_os = "fuchsia")]
+pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
+    use crate::convert::TryFrom;
+
+    // Sleep forever if the timeout is longer than fits in a i64.
+    let deadline = timeout
+        .and_then(|d| {
+            i64::try_from(d.as_nanos())
+                .ok()?
+                .checked_add(unsafe { zircon::zx_clock_get_monotonic() })
+        })
+        .unwrap_or(zircon::ZX_TIME_INFINITE);
+
+    unsafe {
+        zircon::zx_futex_wait(futex, AtomicU32::new(expected), zircon::ZX_HANDLE_INVALID, deadline)
+            != zircon::ZX_ERR_TIMED_OUT
+    }
+}
+
+// Fuchsia doesn't tell us how many threads are woken up, so this always returns false.
+#[cfg(target_os = "fuchsia")]
+pub fn futex_wake(futex: &AtomicU32) -> bool {
+    unsafe { zircon::zx_futex_wake(futex, 1) };
+    false
+}
diff --git a/library/std/src/sys/unix/locks/pthread_mutex.rs b/library/std/src/sys/unix/locks/pthread_mutex.rs
index 916e898d890..98afee69ba6 100644
--- a/library/std/src/sys/unix/locks/pthread_mutex.rs
+++ b/library/std/src/sys/unix/locks/pthread_mutex.rs
@@ -1,5 +1,5 @@
 use crate::cell::UnsafeCell;
-use crate::mem::MaybeUninit;
+use crate::mem::{forget, MaybeUninit};
 use crate::sys::cvt_nz;
 use crate::sys_common::lazy_box::{LazyBox, LazyInit};
 
@@ -23,6 +23,24 @@ impl LazyInit for Mutex {
         unsafe { mutex.init() };
         mutex
     }
+
+    fn destroy(mutex: Box<Self>) {
+        // We're not allowed to pthread_mutex_destroy a locked mutex,
+        // so check first if it's unlocked.
+        if unsafe { mutex.try_lock() } {
+            unsafe { mutex.unlock() };
+            drop(mutex);
+        } else {
+            // The mutex is locked. This happens if a MutexGuard is leaked.
+            // In this case, we just leak the Mutex too.
+            forget(mutex);
+        }
+    }
+
+    fn cancel_init(_: Box<Self>) {
+        // In this case, we can just drop it without any checks,
+        // since it cannot have been locked yet.
+    }
 }
 
 impl Mutex {
diff --git a/library/std/src/sys/unix/locks/pthread_rwlock.rs b/library/std/src/sys/unix/locks/pthread_rwlock.rs
index 75e5759c787..adfe2a88338 100644
--- a/library/std/src/sys/unix/locks/pthread_rwlock.rs
+++ b/library/std/src/sys/unix/locks/pthread_rwlock.rs
@@ -1,4 +1,5 @@
 use crate::cell::UnsafeCell;
+use crate::mem::forget;
 use crate::sync::atomic::{AtomicUsize, Ordering};
 use crate::sys_common::lazy_box::{LazyBox, LazyInit};
 
@@ -17,6 +18,21 @@ impl LazyInit for RwLock {
     fn init() -> Box<Self> {
         Box::new(Self::new())
     }
+
+    fn destroy(mut rwlock: Box<Self>) {
+        // We're not allowed to pthread_rwlock_destroy a locked rwlock,
+        // so check first if it's unlocked.
+        if *rwlock.write_locked.get_mut() || *rwlock.num_readers.get_mut() != 0 {
+            // The rwlock is locked. This happens if a RwLock{Read,Write}Guard is leaked.
+            // In this case, we just leak the RwLock too.
+            forget(rwlock);
+        }
+    }
+
+    fn cancel_init(_: Box<Self>) {
+        // In this case, we can just drop it without any checks,
+        // since it cannot have been locked yet.
+    }
 }
 
 impl RwLock {
diff --git a/library/std/src/sys/unix/thread_parker.rs b/library/std/src/sys/unix/thread_parker.rs
index 76278ae30f1..9f4d4f7e736 100644
--- a/library/std/src/sys/unix/thread_parker.rs
+++ b/library/std/src/sys/unix/thread_parker.rs
@@ -7,6 +7,7 @@
     target_os = "freebsd",
     target_os = "openbsd",
     target_os = "dragonfly",
+    target_os = "fuchsia",
 )))]
 
 use crate::cell::UnsafeCell;
diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs
index 157d8a044d3..e9b10069077 100644
--- a/library/std/src/sys/windows/fs.rs
+++ b/library/std/src/sys/windows/fs.rs
@@ -13,6 +13,7 @@ use crate::sys::handle::Handle;
 use crate::sys::time::SystemTime;
 use crate::sys::{c, cvt};
 use crate::sys_common::{AsInner, FromInner, IntoInner};
+use crate::thread;
 
 use super::path::maybe_verbatim;
 use super::to_u16s;
@@ -679,7 +680,7 @@ impl<'a> DirBuffIter<'a> {
     }
 }
 impl<'a> Iterator for DirBuffIter<'a> {
-    type Item = &'a [u16];
+    type Item = (&'a [u16], bool);
     fn next(&mut self) -> Option<Self::Item> {
         use crate::mem::size_of;
         let buffer = &self.buffer?[self.cursor..];
@@ -688,14 +689,16 @@ impl<'a> Iterator for DirBuffIter<'a> {
         // SAFETY: The buffer contains a `FILE_ID_BOTH_DIR_INFO` struct but the
         // last field (the file name) is unsized. So an offset has to be
         // used to get the file name slice.
-        let (name, next_entry) = unsafe {
+        let (name, is_directory, next_entry) = unsafe {
             let info = buffer.as_ptr().cast::<c::FILE_ID_BOTH_DIR_INFO>();
             let next_entry = (*info).NextEntryOffset as usize;
             let name = crate::slice::from_raw_parts(
                 (*info).FileName.as_ptr().cast::<u16>(),
                 (*info).FileNameLength as usize / size_of::<u16>(),
             );
-            (name, next_entry)
+            let is_directory = ((*info).FileAttributes & c::FILE_ATTRIBUTE_DIRECTORY) != 0;
+
+            (name, is_directory, next_entry)
         };
 
         if next_entry == 0 {
@@ -708,7 +711,7 @@ impl<'a> Iterator for DirBuffIter<'a> {
         const DOT: u16 = b'.' as u16;
         match name {
             [DOT] | [DOT, DOT] => self.next(),
-            _ => Some(name),
+            _ => Some((name, is_directory)),
         }
     }
 }
@@ -993,89 +996,92 @@ pub fn remove_dir_all(path: &Path) -> io::Result<()> {
     if (file.basic_info()?.FileAttributes & c::FILE_ATTRIBUTE_DIRECTORY) == 0 {
         return Err(io::Error::from_raw_os_error(c::ERROR_DIRECTORY as _));
     }
-    let mut delete: fn(&File) -> io::Result<()> = File::posix_delete;
-    let result = match delete(&file) {
-        Err(e) if e.kind() == io::ErrorKind::DirectoryNotEmpty => {
-            match remove_dir_all_recursive(&file, delete) {
-                // Return unexpected errors.
-                Err(e) if e.kind() != io::ErrorKind::DirectoryNotEmpty => return Err(e),
-                result => result,
-            }
-        }
-        // If POSIX delete is not supported for this filesystem then fallback to win32 delete.
-        Err(e)
-            if e.raw_os_error() == Some(c::ERROR_NOT_SUPPORTED as i32)
-                || e.raw_os_error() == Some(c::ERROR_INVALID_PARAMETER as i32) =>
-        {
-            delete = File::win32_delete;
-            Err(e)
-        }
-        result => result,
-    };
-    if result.is_ok() {
-        Ok(())
-    } else {
-        // This is a fallback to make sure the directory is actually deleted.
-        // Otherwise this function is prone to failing with `DirectoryNotEmpty`
-        // due to possible delays between marking a file for deletion and the
-        // file actually being deleted from the filesystem.
-        //
-        // So we retry a few times before giving up.
-        for _ in 0..5 {
-            match remove_dir_all_recursive(&file, delete) {
-                Err(e) if e.kind() == io::ErrorKind::DirectoryNotEmpty => {}
-                result => return result,
+
+    match remove_dir_all_iterative(&file, File::posix_delete) {
+        Err(e) => {
+            if let Some(code) = e.raw_os_error() {
+                match code as u32 {
+                    // If POSIX delete is not supported for this filesystem then fallback to win32 delete.
+                    c::ERROR_NOT_SUPPORTED
+                    | c::ERROR_INVALID_FUNCTION
+                    | c::ERROR_INVALID_PARAMETER => {
+                        remove_dir_all_iterative(&file, File::win32_delete)
+                    }
+                    _ => Err(e),
+                }
+            } else {
+                Err(e)
             }
         }
-        // Try one last time.
-        delete(&file)
+        ok => ok,
     }
 }
 
-fn remove_dir_all_recursive(f: &File, delete: fn(&File) -> io::Result<()>) -> io::Result<()> {
+fn remove_dir_all_iterative(f: &File, delete: fn(&File) -> io::Result<()>) -> io::Result<()> {
+    // When deleting files we may loop this many times when certain error conditions occur.
+    // This allows remove_dir_all to succeed when the error is temporary.
+    const MAX_RETRIES: u32 = 10;
+
     let mut buffer = DirBuff::new();
-    let mut restart = true;
-    // Fill the buffer and iterate the entries.
-    while f.fill_dir_buff(&mut buffer, restart)? {
-        for name in buffer.iter() {
-            // Open the file without following symlinks and try deleting it.
-            // We try opening will all needed permissions and if that is denied
-            // fallback to opening without `FILE_LIST_DIRECTORY` permission.
-            // Note `SYNCHRONIZE` permission is needed for synchronous access.
-            let mut result =
-                open_link_no_reparse(&f, name, c::SYNCHRONIZE | c::DELETE | c::FILE_LIST_DIRECTORY);
-            if matches!(&result, Err(e) if e.kind() == io::ErrorKind::PermissionDenied) {
-                result = open_link_no_reparse(&f, name, c::SYNCHRONIZE | c::DELETE);
+    let mut dirlist = vec![f.duplicate()?];
+
+    // FIXME: This is a hack so we can push to the dirlist vec after borrowing from it.
+    fn copy_handle(f: &File) -> mem::ManuallyDrop<File> {
+        unsafe { mem::ManuallyDrop::new(File::from_raw_handle(f.as_raw_handle())) }
+    }
+
+    while let Some(dir) = dirlist.last() {
+        let dir = copy_handle(dir);
+
+        // Fill the buffer and iterate the entries.
+        let more_data = dir.fill_dir_buff(&mut buffer, false)?;
+        for (name, is_directory) in buffer.iter() {
+            if is_directory {
+                let child_dir = open_link_no_reparse(
+                    &dir,
+                    name,
+                    c::SYNCHRONIZE | c::DELETE | c::FILE_LIST_DIRECTORY,
+                )?;
+                dirlist.push(child_dir);
+            } else {
+                for i in 1..=MAX_RETRIES {
+                    let result = open_link_no_reparse(&dir, name, c::SYNCHRONIZE | c::DELETE);
+                    match result {
+                        Ok(f) => delete(&f)?,
+                        // Already deleted, so skip.
+                        Err(e) if e.kind() == io::ErrorKind::NotFound => break,
+                        // Retry a few times if the file is locked or a delete is already in progress.
+                        Err(e)
+                            if i < MAX_RETRIES
+                                && (e.raw_os_error() == Some(c::ERROR_DELETE_PENDING as _)
+                                    || e.raw_os_error()
+                                        == Some(c::ERROR_SHARING_VIOLATION as _)) => {}
+                        // Otherwise return the error.
+                        Err(e) => return Err(e),
+                    }
+                    thread::yield_now();
+                }
             }
-            match result {
-                Ok(file) => match delete(&file) {
-                    Err(e) if e.kind() == io::ErrorKind::DirectoryNotEmpty => {
-                        // Iterate the directory's files.
-                        // Ignore `DirectoryNotEmpty` errors here. They will be
-                        // caught when `remove_dir_all` tries to delete the top
-                        // level directory. It can then decide if to retry or not.
-                        match remove_dir_all_recursive(&file, delete) {
-                            Err(e) if e.kind() == io::ErrorKind::DirectoryNotEmpty => {}
-                            result => result?,
+        }
+        // If there were no more files then delete the directory.
+        if !more_data {
+            if let Some(dir) = dirlist.pop() {
+                // Retry deleting a few times in case we need to wait for a file to be deleted.
+                for i in 1..=MAX_RETRIES {
+                    let result = delete(&dir);
+                    if let Err(e) = result {
+                        if i == MAX_RETRIES || e.kind() != io::ErrorKind::DirectoryNotEmpty {
+                            return Err(e);
                         }
+                        thread::yield_now();
+                    } else {
+                        break;
                     }
-                    result => result?,
-                },
-                // Ignore error if a delete is already in progress or the file
-                // has already been deleted. It also ignores sharing violations
-                // (where a file is locked by another process) as these are
-                // usually temporary.
-                Err(e)
-                    if e.raw_os_error() == Some(c::ERROR_DELETE_PENDING as _)
-                        || e.kind() == io::ErrorKind::NotFound
-                        || e.raw_os_error() == Some(c::ERROR_SHARING_VIOLATION as _) => {}
-                Err(e) => return Err(e),
+                }
             }
         }
-        // Continue reading directory entries without restarting from the beginning,
-        restart = false;
     }
-    delete(&f)
+    Ok(())
 }
 
 pub fn readlink(path: &Path) -> io::Result<PathBuf> {
diff --git a/library/std/src/sys_common/lazy_box.rs b/library/std/src/sys_common/lazy_box.rs
index 647c13d2437..63c3316bdeb 100644
--- a/library/std/src/sys_common/lazy_box.rs
+++ b/library/std/src/sys_common/lazy_box.rs
@@ -21,8 +21,21 @@ pub(crate) trait LazyInit {
     ///
     /// It might be called more than once per LazyBox, as multiple threads
     /// might race to initialize it concurrently, each constructing and initializing
-    /// their own box. (All but one of them will be destroyed right after.)
+    /// their own box. All but one of them will be passed to `cancel_init` right after.
     fn init() -> Box<Self>;
+
+    /// Any surplus boxes from `init()` that lost the initialization race
+    /// are passed to this function for disposal.
+    ///
+    /// The default implementation calls destroy().
+    fn cancel_init(x: Box<Self>) {
+        Self::destroy(x);
+    }
+
+    /// This is called to destroy a used box.
+    ///
+    /// The default implementation just drops it.
+    fn destroy(_: Box<Self>) {}
 }
 
 impl<T: LazyInit> LazyBox<T> {
@@ -45,7 +58,7 @@ impl<T: LazyInit> LazyBox<T> {
             Err(ptr) => {
                 // Lost the race to another thread.
                 // Drop the box we created, and use the one from the other thread instead.
-                drop(unsafe { Box::from_raw(new_ptr) });
+                T::cancel_init(unsafe { Box::from_raw(new_ptr) });
                 ptr
             }
         }
@@ -71,7 +84,7 @@ impl<T: LazyInit> Drop for LazyBox<T> {
     fn drop(&mut self) {
         let ptr = *self.ptr.get_mut();
         if !ptr.is_null() {
-            drop(unsafe { Box::from_raw(ptr) });
+            T::destroy(unsafe { Box::from_raw(ptr) });
         }
     }
 }
diff --git a/library/std/src/sys_common/thread_parker/mod.rs b/library/std/src/sys_common/thread_parker/mod.rs
index c789a388e05..7e8bfb2565e 100644
--- a/library/std/src/sys_common/thread_parker/mod.rs
+++ b/library/std/src/sys_common/thread_parker/mod.rs
@@ -6,6 +6,7 @@ cfg_if::cfg_if! {
         target_os = "freebsd",
         target_os = "openbsd",
         target_os = "dragonfly",
+        target_os = "fuchsia",
     ))] {
         mod futex;
         pub use futex::Parker;