about summary refs log tree commit diff
path: root/library/std/src
diff options
context:
space:
mode:
authorCorey Farwell <coreyf@rwell.org>2020-12-31 23:27:33 -0500
committerCorey Farwell <coreyf@rwell.org>2020-12-31 23:27:33 -0500
commitd482de30ea70d537dced8ec04a3903e3264cf106 (patch)
tree67d6cd380ef7a66e785a54993bb0ca93b07b43ec /library/std/src
parent26cc060756d0456b17fdc53ac5d34e7f7bdc873d (diff)
parent99ad5a1a2824fea1ecf60068fd3636beae7ea2da (diff)
downloadrust-d482de30ea70d537dced8ec04a3903e3264cf106.tar.gz
rust-d482de30ea70d537dced8ec04a3903e3264cf106.zip
Merge remote-tracking branch 'origin/master' into frewsxcv-san
Diffstat (limited to 'library/std/src')
-rw-r--r--library/std/src/alloc.rs2
-rw-r--r--library/std/src/collections/hash/map.rs32
-rw-r--r--library/std/src/collections/hash/set.rs19
-rw-r--r--library/std/src/f32.rs41
-rw-r--r--library/std/src/f64.rs41
-rw-r--r--library/std/src/ffi/os_str.rs5
-rw-r--r--library/std/src/io/stdio.rs64
-rw-r--r--library/std/src/keyword_docs.rs11
-rw-r--r--library/std/src/lazy.rs55
-rw-r--r--library/std/src/lib.rs13
-rw-r--r--library/std/src/macros.rs2
-rw-r--r--library/std/src/net/ip.rs49
-rw-r--r--library/std/src/panicking.rs10
-rw-r--r--library/std/src/path/tests.rs8
-rw-r--r--library/std/src/prelude/mod.rs25
-rw-r--r--library/std/src/primitive_docs.rs9
-rw-r--r--library/std/src/sync/mpsc/blocking.rs6
-rw-r--r--library/std/src/sync/mpsc/oneshot.rs14
-rw-r--r--library/std/src/sync/mpsc/shared.rs11
-rw-r--r--library/std/src/sync/mpsc/stream.rs9
-rw-r--r--library/std/src/sync/once.rs18
-rw-r--r--library/std/src/sys/sgx/abi/mod.rs8
-rw-r--r--library/std/src/sys/sgx/waitqueue/spin_mutex.rs2
-rw-r--r--library/std/src/sys/unix/ext/net/ancillary.rs6
-rw-r--r--library/std/src/sys/unix/fd.rs9
-rw-r--r--library/std/src/sys/unix/fd/tests.rs2
-rw-r--r--library/std/src/sys/unix/kernel_copy.rs86
-rw-r--r--library/std/src/sys/unix/kernel_copy/tests.rs19
-rw-r--r--library/std/src/sys/unix/time.rs2
-rw-r--r--library/std/src/sys/windows/c.rs47
-rw-r--r--library/std/src/sys/windows/compat.rs1
-rw-r--r--library/std/src/sys/windows/mod.rs1
-rw-r--r--library/std/src/sys/windows/mutex.rs6
-rw-r--r--library/std/src/sys/windows/path.rs174
-rw-r--r--library/std/src/sys/windows/path/tests.rs39
-rw-r--r--library/std/src/sys/windows/thread_local_key.rs11
-rw-r--r--library/std/src/sys/windows/thread_parker.rs250
-rw-r--r--library/std/src/sys_common/condvar/check.rs6
-rw-r--r--library/std/src/sys_common/net.rs5
-rw-r--r--library/std/src/sys_common/remutex.rs55
-rw-r--r--library/std/src/sys_common/remutex/tests.rs35
-rw-r--r--library/std/src/sys_common/thread_local_key.rs8
-rw-r--r--library/std/src/sys_common/thread_parker/futex.rs2
-rw-r--r--library/std/src/sys_common/thread_parker/mod.rs2
44 files changed, 834 insertions, 386 deletions
diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs
index 819d57a934d..8491ff40033 100644
--- a/library/std/src/alloc.rs
+++ b/library/std/src/alloc.rs
@@ -1,4 +1,4 @@
-//! Memory allocation APIs
+//! Memory allocation APIs.
 //!
 //! In a given program, the standard library has one “global” memory allocator
 //! that is used for example by `Box<T>` and `Vec<T>`.
diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs
index 27d90e66137..0680b1fc329 100644
--- a/library/std/src/collections/hash/map.rs
+++ b/library/std/src/collections/hash/map.rs
@@ -195,7 +195,6 @@ use crate::sys;
 /// // use the values stored in map
 /// ```
 
-#[derive(Clone)]
 #[cfg_attr(not(test), rustc_diagnostic_item = "hashmap_type")]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct HashMap<K, V, S = RandomState> {
@@ -449,6 +448,7 @@ impl<K, V, S> HashMap<K, V, S> {
     /// a.insert(1, "a");
     /// assert_eq!(a.len(), 1);
     /// ```
+    #[doc(alias = "length")]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn len(&self) -> usize {
         self.base.len()
@@ -1030,6 +1030,24 @@ where
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, S> Clone for HashMap<K, V, S>
+where
+    K: Clone,
+    V: Clone,
+    S: Clone,
+{
+    #[inline]
+    fn clone(&self) -> Self {
+        Self { base: self.base.clone() }
+    }
+
+    #[inline]
+    fn clone_from(&mut self, other: &Self) {
+        self.base.clone_from(&other.base);
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
 impl<K, V, S> PartialEq for HashMap<K, V, S>
 where
     K: Eq + Hash,
@@ -2219,14 +2237,16 @@ impl<'a, K, V> Entry<'a, K, V> {
         }
     }
 
-    /// Ensures a value is in the entry by inserting, if empty, the result of the default function,
-    /// which takes the key as its argument, and returns a mutable reference to the value in the
-    /// entry.
+    /// Ensures a value is in the entry by inserting, if empty, the result of the default function.
+    /// This method allows for generating key-derived values for insertion by providing the default
+    /// function a reference to the key that was moved during the `.entry(key)` method call.
+    ///
+    /// The reference to the moved key is provided so that cloning or copying the key is
+    /// unnecessary, unlike with `.or_insert_with(|| ... )`.
     ///
     /// # Examples
     ///
     /// ```
-    /// #![feature(or_insert_with_key)]
     /// use std::collections::HashMap;
     ///
     /// let mut map: HashMap<&str, usize> = HashMap::new();
@@ -2236,7 +2256,7 @@ impl<'a, K, V> Entry<'a, K, V> {
     /// assert_eq!(map["poneyland"], 9);
     /// ```
     #[inline]
-    #[unstable(feature = "or_insert_with_key", issue = "71024")]
+    #[stable(feature = "or_insert_with_key", since = "1.50.0")]
     pub fn or_insert_with_key<F: FnOnce(&K) -> V>(self, default: F) -> &'a mut V {
         match self {
             Occupied(entry) => entry.into_mut(),
diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs
index 3299fd12e02..f49e5801c35 100644
--- a/library/std/src/collections/hash/set.rs
+++ b/library/std/src/collections/hash/set.rs
@@ -106,7 +106,6 @@ use super::map::{map_try_reserve_error, RandomState};
 /// [`HashMap`]: crate::collections::HashMap
 /// [`RefCell`]: crate::cell::RefCell
 /// [`Cell`]: crate::cell::Cell
-#[derive(Clone)]
 #[cfg_attr(not(test), rustc_diagnostic_item = "hashset_type")]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct HashSet<T, S = RandomState> {
@@ -200,6 +199,7 @@ impl<T, S> HashSet<T, S> {
     /// v.insert(1);
     /// assert_eq!(v.len(), 1);
     /// ```
+    #[doc(alias = "length")]
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn len(&self) -> usize {
@@ -933,6 +933,23 @@ where
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> Clone for HashSet<T, S>
+where
+    T: Clone,
+    S: Clone,
+{
+    #[inline]
+    fn clone(&self) -> Self {
+        Self { base: self.base.clone() }
+    }
+
+    #[inline]
+    fn clone_from(&mut self, other: &Self) {
+        self.base.clone_from(&other.base);
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
 impl<T, S> PartialEq for HashSet<T, S>
 where
     T: Eq + Hash,
diff --git a/library/std/src/f32.rs b/library/std/src/f32.rs
index 09a9b184e3a..c30458c0545 100644
--- a/library/std/src/f32.rs
+++ b/library/std/src/f32.rs
@@ -206,8 +206,10 @@ impl f32 {
     /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
     /// error, yielding a more accurate result than an unfused multiply-add.
     ///
-    /// Using `mul_add` can be more performant than an unfused multiply-add if
-    /// the target architecture has a dedicated `fma` CPU instruction.
+    /// Using `mul_add` *may* be more performant than an unfused multiply-add if
+    /// the target architecture has a dedicated `fma` CPU instruction. However,
+    /// this is not always true, and will be heavily dependant on designing
+    /// algorithms with specific target hardware in mind.
     ///
     /// # Examples
     ///
@@ -877,39 +879,4 @@ impl f32 {
     pub fn atanh(self) -> f32 {
         0.5 * ((2.0 * self) / (1.0 - self)).ln_1p()
     }
-
-    /// Restrict a value to a certain interval unless it is NaN.
-    ///
-    /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
-    /// less than `min`. Otherwise this returns `self`.
-    ///
-    /// Note that this function returns NaN if the initial value was NaN as
-    /// well.
-    ///
-    /// # Panics
-    ///
-    /// Panics if `min > max`, `min` is NaN, or `max` is NaN.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// assert!((-3.0f32).clamp(-2.0, 1.0) == -2.0);
-    /// assert!((0.0f32).clamp(-2.0, 1.0) == 0.0);
-    /// assert!((2.0f32).clamp(-2.0, 1.0) == 1.0);
-    /// assert!((f32::NAN).clamp(-2.0, 1.0).is_nan());
-    /// ```
-    #[must_use = "method returns a new number and does not mutate the original value"]
-    #[stable(feature = "clamp", since = "1.50.0")]
-    #[inline]
-    pub fn clamp(self, min: f32, max: f32) -> f32 {
-        assert!(min <= max);
-        let mut x = self;
-        if x < min {
-            x = min;
-        }
-        if x > max {
-            x = max;
-        }
-        x
-    }
 }
diff --git a/library/std/src/f64.rs b/library/std/src/f64.rs
index 64bb7cd9fd1..f4cc53979d1 100644
--- a/library/std/src/f64.rs
+++ b/library/std/src/f64.rs
@@ -206,8 +206,10 @@ impl f64 {
     /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
     /// error, yielding a more accurate result than an unfused multiply-add.
     ///
-    /// Using `mul_add` can be more performant than an unfused multiply-add if
-    /// the target architecture has a dedicated `fma` CPU instruction.
+    /// Using `mul_add` *may* be more performant than an unfused multiply-add if
+    /// the target architecture has a dedicated `fma` CPU instruction. However,
+    /// this is not always true, and will be heavily dependant on designing
+    /// algorithms with specific target hardware in mind.
     ///
     /// # Examples
     ///
@@ -880,41 +882,6 @@ impl f64 {
         0.5 * ((2.0 * self) / (1.0 - self)).ln_1p()
     }
 
-    /// Restrict a value to a certain interval unless it is NaN.
-    ///
-    /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
-    /// less than `min`. Otherwise this returns `self`.
-    ///
-    /// Note that this function returns NaN if the initial value was NaN as
-    /// well.
-    ///
-    /// # Panics
-    ///
-    /// Panics if `min > max`, `min` is NaN, or `max` is NaN.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// assert!((-3.0f64).clamp(-2.0, 1.0) == -2.0);
-    /// assert!((0.0f64).clamp(-2.0, 1.0) == 0.0);
-    /// assert!((2.0f64).clamp(-2.0, 1.0) == 1.0);
-    /// assert!((f64::NAN).clamp(-2.0, 1.0).is_nan());
-    /// ```
-    #[must_use = "method returns a new number and does not mutate the original value"]
-    #[stable(feature = "clamp", since = "1.50.0")]
-    #[inline]
-    pub fn clamp(self, min: f64, max: f64) -> f64 {
-        assert!(min <= max);
-        let mut x = self;
-        if x < min {
-            x = min;
-        }
-        if x > max {
-            x = max;
-        }
-        x
-    }
-
     // Solaris/Illumos requires a wrapper around log, log2, and log10 functions
     // because of their non-standard behavior (e.g., log(-n) returns -Inf instead
     // of expected NaN).
diff --git a/library/std/src/ffi/os_str.rs b/library/std/src/ffi/os_str.rs
index 7e7a28be2b0..2eef4d58507 100644
--- a/library/std/src/ffi/os_str.rs
+++ b/library/std/src/ffi/os_str.rs
@@ -653,6 +653,7 @@ impl OsStr {
     /// let os_str = OsStr::new("foo");
     /// assert_eq!(os_str.len(), 3);
     /// ```
+    #[doc(alias = "length")]
     #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
     pub fn len(&self) -> usize {
         self.inner.inner.len()
@@ -667,10 +668,10 @@ impl OsStr {
 
     /// Gets the underlying byte representation.
     ///
-    /// Note: it is *crucial* that this API is private, to avoid
+    /// Note: it is *crucial* that this API is not externally public, to avoid
     /// revealing the internal, platform-specific encodings.
     #[inline]
-    fn bytes(&self) -> &[u8] {
+    pub(crate) fn bytes(&self) -> &[u8] {
         unsafe { &*(&self.inner as *const _ as *const [u8]) }
     }
 
diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs
index 6ea7704d422..1160011f352 100644
--- a/library/std/src/io/stdio.rs
+++ b/library/std/src/io/stdio.rs
@@ -9,6 +9,7 @@ use crate::cell::{Cell, RefCell};
 use crate::fmt;
 use crate::io::{self, BufReader, Initializer, IoSlice, IoSliceMut, LineWriter};
 use crate::lazy::SyncOnceCell;
+use crate::pin::Pin;
 use crate::sync::atomic::{AtomicBool, Ordering};
 use crate::sync::{Arc, Mutex, MutexGuard};
 use crate::sys::stdio;
@@ -490,7 +491,7 @@ pub struct Stdout {
     // FIXME: this should be LineWriter or BufWriter depending on the state of
     //        stdout (tty or not). Note that if this is not line buffered it
     //        should also flush-on-panic or some form of flush-on-abort.
-    inner: &'static ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>,
+    inner: Pin<&'static ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>>,
 }
 
 /// A locked reference to the `Stdout` handle.
@@ -550,25 +551,29 @@ pub struct StdoutLock<'a> {
 pub fn stdout() -> Stdout {
     static INSTANCE: SyncOnceCell<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> =
         SyncOnceCell::new();
+
+    fn cleanup() {
+        if let Some(instance) = INSTANCE.get() {
+            // Flush the data and disable buffering during shutdown
+            // by replacing the line writer by one with zero
+            // buffering capacity.
+            // We use try_lock() instead of lock(), because someone
+            // might have leaked a StdoutLock, which would
+            // otherwise cause a deadlock here.
+            if let Some(lock) = Pin::static_ref(instance).try_lock() {
+                *lock.borrow_mut() = LineWriter::with_capacity(0, stdout_raw());
+            }
+        }
+    }
+
     Stdout {
-        inner: INSTANCE.get_or_init(|| unsafe {
-            let _ = sys_common::at_exit(|| {
-                if let Some(instance) = INSTANCE.get() {
-                    // Flush the data and disable buffering during shutdown
-                    // by replacing the line writer by one with zero
-                    // buffering capacity.
-                    // We use try_lock() instead of lock(), because someone
-                    // might have leaked a StdoutLock, which would
-                    // otherwise cause a deadlock here.
-                    if let Some(lock) = instance.try_lock() {
-                        *lock.borrow_mut() = LineWriter::with_capacity(0, stdout_raw());
-                    }
-                }
-            });
-            let r = ReentrantMutex::new(RefCell::new(LineWriter::new(stdout_raw())));
-            r.init();
-            r
-        }),
+        inner: Pin::static_ref(&INSTANCE).get_or_init_pin(
+            || unsafe {
+                let _ = sys_common::at_exit(cleanup);
+                ReentrantMutex::new(RefCell::new(LineWriter::new(stdout_raw())))
+            },
+            |mutex| unsafe { mutex.init() },
+        ),
     }
 }
 
@@ -700,7 +705,7 @@ impl fmt::Debug for StdoutLock<'_> {
 /// an error.
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Stderr {
-    inner: &'static ReentrantMutex<RefCell<StderrRaw>>,
+    inner: Pin<&'static ReentrantMutex<RefCell<StderrRaw>>>,
 }
 
 /// A locked reference to the `Stderr` handle.
@@ -756,21 +761,16 @@ pub struct StderrLock<'a> {
 /// ```
 #[stable(feature = "rust1", since = "1.0.0")]
 pub fn stderr() -> Stderr {
-    // Note that unlike `stdout()` we don't use `Lazy` here which registers a
-    // destructor. Stderr is not buffered nor does the `stderr_raw` type consume
-    // any owned resources, so there's no need to run any destructors at some
-    // point in the future.
-    //
-    // This has the added benefit of allowing `stderr` to be usable during
-    // process shutdown as well!
+    // Note that unlike `stdout()` we don't use `at_exit` here to register a
+    // destructor. Stderr is not buffered , so there's no need to run a
+    // destructor for flushing the buffer
     static INSTANCE: SyncOnceCell<ReentrantMutex<RefCell<StderrRaw>>> = SyncOnceCell::new();
 
     Stderr {
-        inner: INSTANCE.get_or_init(|| unsafe {
-            let r = ReentrantMutex::new(RefCell::new(stderr_raw()));
-            r.init();
-            r
-        }),
+        inner: Pin::static_ref(&INSTANCE).get_or_init_pin(
+            || unsafe { ReentrantMutex::new(RefCell::new(stderr_raw())) },
+            |mutex| unsafe { mutex.init() },
+        ),
     }
 }
 
diff --git a/library/std/src/keyword_docs.rs b/library/std/src/keyword_docs.rs
index dad3add5c55..417a54e9dff 100644
--- a/library/std/src/keyword_docs.rs
+++ b/library/std/src/keyword_docs.rs
@@ -565,8 +565,12 @@ mod fn_keyword {}
 ///
 /// For more information on for-loops, see the [Rust book] or the [Reference].
 ///
+/// See also, [`loop`], [`while`].
+///
 /// [`in`]: keyword.in.html
 /// [`impl`]: keyword.impl.html
+/// [`loop`]: keyword.loop.html
+/// [`while`]: keyword.while.html
 /// [higher-ranked trait bounds]: ../reference/trait-bounds.html#higher-ranked-trait-bounds
 /// [Rust book]:
 /// ../book/ch03-05-control-flow.html#looping-through-a-collection-with-for
@@ -842,6 +846,8 @@ mod let_keyword {}
 ///
 /// For more information on `while` and loops in general, see the [reference].
 ///
+/// See also, [`for`], [`loop`].
+///
 /// [`for`]: keyword.for.html
 /// [`loop`]: keyword.loop.html
 /// [reference]: ../reference/expressions/loop-expr.html#predicate-loops
@@ -890,6 +896,10 @@ mod while_keyword {}
 ///
 /// For more information on `loop` and loops in general, see the [Reference].
 ///
+/// See also, [`for`], [`while`].
+///
+/// [`for`]: keyword.for.html
+/// [`while`]: keyword.while.html
 /// [Reference]: ../reference/expressions/loop-expr.html
 mod loop_keyword {}
 
@@ -2186,6 +2196,7 @@ mod where_keyword {}
 
 // 2018 Edition keywords
 
+#[doc(alias = "promise")]
 #[doc(keyword = "async")]
 //
 /// Return a [`Future`] instead of blocking the current thread.
diff --git a/library/std/src/lazy.rs b/library/std/src/lazy.rs
index e0095e64faf..68f57958bb2 100644
--- a/library/std/src/lazy.rs
+++ b/library/std/src/lazy.rs
@@ -10,6 +10,7 @@ use crate::{
     mem::MaybeUninit,
     ops::{Deref, Drop},
     panic::{RefUnwindSafe, UnwindSafe},
+    pin::Pin,
     sync::Once,
 };
 
@@ -297,6 +298,60 @@ impl<T> SyncOnceCell<T> {
         Ok(unsafe { self.get_unchecked() })
     }
 
+    /// Internal-only API that gets the contents of the cell, initializing it
+    /// in two steps with `f` and `g` if the cell was empty.
+    ///
+    /// `f` is called to construct the value, which is then moved into the cell
+    /// and given as a (pinned) mutable reference to `g` to finish
+    /// initialization.
+    ///
+    /// This allows `g` to inspect an manipulate the value after it has been
+    /// moved into its final place in the cell, but before the cell is
+    /// considered initialized.
+    ///
+    /// # Panics
+    ///
+    /// If `f` or `g` panics, the panic is propagated to the caller, and the
+    /// cell remains uninitialized.
+    ///
+    /// With the current implementation, if `g` panics, the value from `f` will
+    /// not be dropped. This should probably be fixed if this is ever used for
+    /// a type where this matters.
+    ///
+    /// It is an error to reentrantly initialize the cell from `f`. The exact
+    /// outcome is unspecified. Current implementation deadlocks, but this may
+    /// be changed to a panic in the future.
+    pub(crate) fn get_or_init_pin<F, G>(self: Pin<&Self>, f: F, g: G) -> Pin<&T>
+    where
+        F: FnOnce() -> T,
+        G: FnOnce(Pin<&mut T>),
+    {
+        if let Some(value) = self.get_ref().get() {
+            // SAFETY: The inner value was already initialized, and will not be
+            // moved anymore.
+            return unsafe { Pin::new_unchecked(value) };
+        }
+
+        let slot = &self.value;
+
+        // Ignore poisoning from other threads
+        // If another thread panics, then we'll be able to run our closure
+        self.once.call_once_force(|_| {
+            let value = f();
+            // SAFETY: We use the Once (self.once) to guarantee unique access
+            // to the UnsafeCell (slot).
+            let value: &mut T = unsafe { (&mut *slot.get()).write(value) };
+            // SAFETY: The value has been written to its final place in
+            // self.value. We do not to move it anymore, which we promise here
+            // with a Pin<&mut T>.
+            g(unsafe { Pin::new_unchecked(value) });
+        });
+
+        // SAFETY: The inner value has been initialized, and will not be moved
+        // anymore.
+        unsafe { Pin::new_unchecked(self.get_ref().get_unchecked()) }
+    }
+
     /// Consumes the `SyncOnceCell`, returning the wrapped value. Returns
     /// `None` if the cell was empty.
     ///
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index 6c240cb4c3e..2c6f03fe224 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -175,7 +175,7 @@
 //! [`str`]: prim@str
 //! [`mpsc`]: sync::mpsc
 //! [`std::cmp`]: cmp
-//! [`std::slice`]: slice
+//! [`std::slice`]: mod@slice
 //! [`use std::env`]: env/index.html
 //! [`use`]: ../book/ch07-02-defining-modules-to-control-scope-and-privacy.html
 //! [crates.io]: https://crates.io
@@ -185,7 +185,8 @@
 //! [other]: #what-is-in-the-standard-library-documentation
 //! [primitive types]: ../book/ch03-02-data-types.html
 //! [rust-discord]: https://discord.gg/rust-lang
-
+#![cfg_attr(not(bootstrap), doc = "[array]: prim@array")]
+#![cfg_attr(not(bootstrap), doc = "[slice]: prim@slice")]
 #![cfg_attr(not(feature = "restricted-std"), stable(feature = "rust1", since = "1.0.0"))]
 #![cfg_attr(feature = "restricted-std", unstable(feature = "restricted_std", issue = "none"))]
 #![doc(
@@ -266,6 +267,7 @@
 #![feature(format_args_nl)]
 #![feature(gen_future)]
 #![feature(generator_trait)]
+#![feature(get_mut_unchecked)]
 #![feature(global_asm)]
 #![feature(hashmap_internals)]
 #![feature(int_error_internals)]
@@ -287,12 +289,12 @@
 #![feature(nll)]
 #![feature(nonnull_slice_from_raw_parts)]
 #![feature(once_cell)]
-#![cfg_attr(bootstrap, feature(optin_builtin_traits))]
-#![cfg_attr(not(bootstrap), feature(auto_traits))]
+#![feature(auto_traits)]
 #![feature(or_patterns)]
 #![feature(panic_info_message)]
 #![feature(panic_internals)]
 #![feature(panic_unwind)]
+#![feature(pin_static_ref)]
 #![feature(prelude_import)]
 #![feature(ptr_internals)]
 #![feature(raw)]
@@ -302,7 +304,6 @@
 #![feature(rustc_private)]
 #![feature(shrink_to)]
 #![feature(slice_concat_ext)]
-#![feature(slice_fill)]
 #![feature(slice_internals)]
 #![feature(slice_ptr_get)]
 #![feature(slice_ptr_len)]
@@ -312,6 +313,7 @@
 #![feature(stdsimd)]
 #![feature(stmt_expr_attributes)]
 #![feature(str_internals)]
+#![feature(str_split_once)]
 #![feature(test)]
 #![feature(thread_local)]
 #![feature(thread_local_internals)]
@@ -322,7 +324,6 @@
 #![feature(try_reserve)]
 #![feature(unboxed_closures)]
 #![feature(unsafe_block_in_unsafe_fn)]
-#![feature(unsafe_cell_get_mut)]
 #![feature(unsafe_cell_raw_get)]
 #![feature(unwind_attributes)]
 #![feature(vec_into_raw_parts)]
diff --git a/library/std/src/macros.rs b/library/std/src/macros.rs
index de072e83dfc..5a70aa070e8 100644
--- a/library/std/src/macros.rs
+++ b/library/std/src/macros.rs
@@ -8,7 +8,7 @@
 #[macro_export]
 #[stable(feature = "rust1", since = "1.0.0")]
 #[allow_internal_unstable(libstd_sys_internals)]
-#[cfg_attr(not(any(bootstrap, test)), rustc_diagnostic_item = "std_panic_macro")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "std_panic_macro")]
 macro_rules! panic {
     () => ({ $crate::panic!("explicit panic") });
     ($msg:expr $(,)?) => ({ $crate::rt::begin_panic($msg) });
diff --git a/library/std/src/net/ip.rs b/library/std/src/net/ip.rs
index 87bbd33bc01..d33b772633d 100644
--- a/library/std/src/net/ip.rs
+++ b/library/std/src/net/ip.rs
@@ -148,7 +148,7 @@ impl IpAddr {
     /// assert_eq!(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)).is_unspecified(), true);
     /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)).is_unspecified(), true);
     /// ```
-    #[rustc_const_unstable(feature = "const_ip", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ip", since = "1.50.0")]
     #[stable(feature = "ip_shared", since = "1.12.0")]
     pub const fn is_unspecified(&self) -> bool {
         match self {
@@ -170,7 +170,7 @@ impl IpAddr {
     /// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).is_loopback(), true);
     /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1)).is_loopback(), true);
     /// ```
-    #[rustc_const_unstable(feature = "const_ip", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ip", since = "1.50.0")]
     #[stable(feature = "ip_shared", since = "1.12.0")]
     pub const fn is_loopback(&self) -> bool {
         match self {
@@ -215,7 +215,7 @@ impl IpAddr {
     /// assert_eq!(IpAddr::V4(Ipv4Addr::new(224, 254, 0, 0)).is_multicast(), true);
     /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0)).is_multicast(), true);
     /// ```
-    #[rustc_const_unstable(feature = "const_ip", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ip", since = "1.50.0")]
     #[stable(feature = "ip_shared", since = "1.12.0")]
     pub const fn is_multicast(&self) -> bool {
         match self {
@@ -301,8 +301,8 @@ impl Ipv4Addr {
     ///
     /// let addr = Ipv4Addr::new(127, 0, 0, 1);
     /// ```
-    #[stable(feature = "rust1", since = "1.0.0")]
     #[rustc_const_stable(feature = "const_ipv4", since = "1.32.0")]
+    #[stable(feature = "rust1", since = "1.0.0")]
     pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr {
         // `s_addr` is stored as BE on all machine and the array is in BE order.
         // So the native endian conversion method is used so that it's never swapped.
@@ -358,7 +358,7 @@ impl Ipv4Addr {
     /// let addr = Ipv4Addr::new(127, 0, 0, 1);
     /// assert_eq!(addr.octets(), [127, 0, 0, 1]);
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ipv4", since = "1.50.0")]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub const fn octets(&self) -> [u8; 4] {
         // This returns the order we want because s_addr is stored in big-endian.
@@ -380,8 +380,8 @@ impl Ipv4Addr {
     /// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_unspecified(), true);
     /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_unspecified(), false);
     /// ```
-    #[stable(feature = "ip_shared", since = "1.12.0")]
     #[rustc_const_stable(feature = "const_ipv4", since = "1.32.0")]
+    #[stable(feature = "ip_shared", since = "1.12.0")]
     pub const fn is_unspecified(&self) -> bool {
         self.inner.s_addr == 0
     }
@@ -400,7 +400,7 @@ impl Ipv4Addr {
     /// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_loopback(), true);
     /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_loopback(), false);
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ipv4", since = "1.50.0")]
     #[stable(since = "1.7.0", feature = "ip_17")]
     pub const fn is_loopback(&self) -> bool {
         self.octets()[0] == 127
@@ -429,7 +429,7 @@ impl Ipv4Addr {
     /// assert_eq!(Ipv4Addr::new(192, 168, 0, 2).is_private(), true);
     /// assert_eq!(Ipv4Addr::new(192, 169, 0, 2).is_private(), false);
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ipv4", since = "1.50.0")]
     #[stable(since = "1.7.0", feature = "ip_17")]
     pub const fn is_private(&self) -> bool {
         match self.octets() {
@@ -455,7 +455,7 @@ impl Ipv4Addr {
     /// assert_eq!(Ipv4Addr::new(169, 254, 10, 65).is_link_local(), true);
     /// assert_eq!(Ipv4Addr::new(16, 89, 10, 65).is_link_local(), false);
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ipv4", since = "1.50.0")]
     #[stable(since = "1.7.0", feature = "ip_17")]
     pub const fn is_link_local(&self) -> bool {
         matches!(self.octets(), [169, 254, ..])
@@ -675,7 +675,7 @@ impl Ipv4Addr {
     /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_multicast(), true);
     /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_multicast(), false);
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ipv4", since = "1.50.0")]
     #[stable(since = "1.7.0", feature = "ip_17")]
     pub const fn is_multicast(&self) -> bool {
         self.octets()[0] >= 224 && self.octets()[0] <= 239
@@ -695,7 +695,7 @@ impl Ipv4Addr {
     /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_broadcast(), true);
     /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_broadcast(), false);
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ipv4", since = "1.50.0")]
     #[stable(since = "1.7.0", feature = "ip_17")]
     pub const fn is_broadcast(&self) -> bool {
         u32::from_be_bytes(self.octets()) == u32::from_be_bytes(Self::BROADCAST.octets())
@@ -721,7 +721,7 @@ impl Ipv4Addr {
     /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_documentation(), true);
     /// assert_eq!(Ipv4Addr::new(193, 34, 17, 19).is_documentation(), false);
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ipv4", since = "1.50.0")]
     #[stable(since = "1.7.0", feature = "ip_17")]
     pub const fn is_documentation(&self) -> bool {
         match self.octets() {
@@ -751,7 +751,7 @@ impl Ipv4Addr {
     ///     Ipv6Addr::new(0, 0, 0, 0, 0, 0, 49152, 767)
     /// );
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ipv4", since = "1.50.0")]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub const fn to_ipv6_compatible(&self) -> Ipv6Addr {
         let [a, b, c, d] = self.octets();
@@ -774,7 +774,7 @@ impl Ipv4Addr {
     /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).to_ipv6_mapped(),
     ///            Ipv6Addr::new(0, 0, 0, 0, 0, 65535, 49152, 767));
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ipv4", since = "1.50.0")]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub const fn to_ipv6_mapped(&self) -> Ipv6Addr {
         let [a, b, c, d] = self.octets();
@@ -1043,9 +1043,9 @@ impl Ipv6Addr {
     ///
     /// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff);
     /// ```
-    #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_stable(feature = "const_ipv6", since = "1.32.0")]
     #[rustc_allow_const_fn_unstable(const_fn_transmute)]
+    #[rustc_const_stable(feature = "const_ipv6", since = "1.32.0")]
+    #[stable(feature = "rust1", since = "1.0.0")]
     pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr {
         let addr16 = [
             a.to_be(),
@@ -1061,6 +1061,8 @@ impl Ipv6Addr {
             inner: c::in6_addr {
                 // All elements in `addr16` are big endian.
                 // SAFETY: `[u16; 8]` is always safe to transmute to `[u8; 16]`.
+                // rustc_allow_const_fn_unstable: the transmute could be written as stable const
+                // code, but that leads to worse code generation (#75085)
                 s6_addr: unsafe { transmute::<_, [u8; 16]>(addr16) },
             },
         }
@@ -1102,11 +1104,14 @@ impl Ipv6Addr {
     /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).segments(),
     ///            [0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff]);
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+    #[rustc_allow_const_fn_unstable(const_fn_transmute)]
+    #[rustc_const_stable(feature = "const_ipv6", since = "1.50.0")]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub const fn segments(&self) -> [u16; 8] {
         // All elements in `s6_addr` must be big endian.
         // SAFETY: `[u8; 16]` is always safe to transmute to `[u16; 8]`.
+        // rustc_allow_const_fn_unstable: the transmute could be written as stable const code, but
+        // that leads to worse code generation (#75085)
         let [a, b, c, d, e, f, g, h] = unsafe { transmute::<_, [u16; 8]>(self.inner.s6_addr) };
         // We want native endian u16
         [
@@ -1135,7 +1140,7 @@ impl Ipv6Addr {
     /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unspecified(), false);
     /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).is_unspecified(), true);
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ipv6", since = "1.50.0")]
     #[stable(since = "1.7.0", feature = "ip_17")]
     pub const fn is_unspecified(&self) -> bool {
         u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets())
@@ -1155,7 +1160,7 @@ impl Ipv6Addr {
     /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_loopback(), false);
     /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_loopback(), true);
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ipv6", since = "1.50.0")]
     #[stable(since = "1.7.0", feature = "ip_17")]
     pub const fn is_loopback(&self) -> bool {
         u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets())
@@ -1465,7 +1470,7 @@ impl Ipv6Addr {
     /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_multicast(), true);
     /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_multicast(), false);
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ipv6", since = "1.50.0")]
     #[stable(since = "1.7.0", feature = "ip_17")]
     pub const fn is_multicast(&self) -> bool {
         (self.segments()[0] & 0xff00) == 0xff00
@@ -1520,7 +1525,7 @@ impl Ipv6Addr {
     /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4(),
     ///            Some(Ipv4Addr::new(0, 0, 0, 1)));
     /// ```
-    #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+    #[rustc_const_stable(feature = "const_ipv6", since = "1.50.0")]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub const fn to_ipv4(&self) -> Option<Ipv4Addr> {
         if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() {
@@ -1540,8 +1545,8 @@ impl Ipv6Addr {
     /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).octets(),
     ///            [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
     /// ```
-    #[stable(feature = "ipv6_to_octets", since = "1.12.0")]
     #[rustc_const_stable(feature = "const_ipv6", since = "1.32.0")]
+    #[stable(feature = "ipv6_to_octets", since = "1.12.0")]
     pub const fn octets(&self) -> [u8; 16] {
         self.inner.s6_addr
     }
diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs
index 8ba3feccb6b..6cd572cbe87 100644
--- a/library/std/src/panicking.rs
+++ b/library/std/src/panicking.rs
@@ -44,11 +44,11 @@ use realstd::io::set_output_capture;
 extern "C" {
     fn __rust_panic_cleanup(payload: *mut u8) -> *mut (dyn Any + Send + 'static);
 
-    /// `payload` is actually a `*mut &mut dyn BoxMeUp` but that would cause FFI warnings.
-    /// It cannot be `Box<dyn BoxMeUp>` because the other end of this call does not depend
-    /// on liballoc, and thus cannot use `Box`.
+    /// `payload` is passed through another layer of raw pointers as `&mut dyn Trait` is not
+    /// FFI-safe. `BoxMeUp` lazily performs allocation only when needed (this avoids allocations
+    /// when using the "abort" panic runtime).
     #[unwind(allowed)]
-    fn __rust_start_panic(payload: usize) -> u32;
+    fn __rust_start_panic(payload: *mut &mut dyn BoxMeUp) -> u32;
 }
 
 /// This function is called by the panic runtime if FFI code catches a Rust
@@ -637,7 +637,7 @@ pub fn rust_panic_without_hook(payload: Box<dyn Any + Send>) -> ! {
 fn rust_panic(mut msg: &mut dyn BoxMeUp) -> ! {
     let code = unsafe {
         let obj = &mut msg as *mut &mut dyn BoxMeUp;
-        __rust_start_panic(obj as usize)
+        __rust_start_panic(obj)
     };
     rtabort!("failed to initiate panic, error {}", code)
 }
diff --git a/library/std/src/path/tests.rs b/library/std/src/path/tests.rs
index ff94fda5a22..896d6c2a64c 100644
--- a/library/std/src/path/tests.rs
+++ b/library/std/src/path/tests.rs
@@ -873,12 +873,12 @@ pub fn test_decompositions_windows() {
     );
 
     t!("\\\\.\\foo/bar",
-    iter: ["\\\\.\\foo/bar", "\\"],
+    iter: ["\\\\.\\foo", "\\", "bar"],
     has_root: true,
     is_absolute: true,
-    parent: None,
-    file_name: None,
-    file_stem: None,
+    parent: Some("\\\\.\\foo/"),
+    file_name: Some("bar"),
+    file_stem: Some("bar"),
     extension: None
     );
 
diff --git a/library/std/src/prelude/mod.rs b/library/std/src/prelude/mod.rs
index c7a7c779d3c..a3776681d03 100644
--- a/library/std/src/prelude/mod.rs
+++ b/library/std/src/prelude/mod.rs
@@ -26,38 +26,37 @@
 //! # Prelude contents
 //!
 //! The current version of the prelude (version 1) lives in
-//! [`std::prelude::v1`], and re-exports the following.
+//! [`std::prelude::v1`], and re-exports the following:
 //!
-//! * [`std::marker`]::{[`Copy`], [`Send`], [`Sized`], [`Sync`], [`Unpin`]}. The
-//!   marker traits indicate fundamental properties of types.
-//! * [`std::ops`]::{[`Drop`], [`Fn`], [`FnMut`], [`FnOnce`]}. Various
+//! * [`std::marker`]::{[`Copy`], [`Send`], [`Sized`], [`Sync`], [`Unpin`]},
+//!   marker traits that indicate fundamental properties of types.
+//! * [`std::ops`]::{[`Drop`], [`Fn`], [`FnMut`], [`FnOnce`]}, various
 //!   operations for both destructors and overloading `()`.
 //! * [`std::mem`]::[`drop`][`mem::drop`], a convenience function for explicitly
 //!   dropping a value.
 //! * [`std::boxed`]::[`Box`], a way to allocate values on the heap.
-//! * [`std::borrow`]::[`ToOwned`], The conversion trait that defines
+//! * [`std::borrow`]::[`ToOwned`], the conversion trait that defines
 //!   [`to_owned`], the generic method for creating an owned type from a
 //!   borrowed type.
 //! * [`std::clone`]::[`Clone`], the ubiquitous trait that defines
 //!   [`clone`][`Clone::clone`], the method for producing a copy of a value.
-//! * [`std::cmp`]::{[`PartialEq`], [`PartialOrd`], [`Eq`], [`Ord`] }. The
+//! * [`std::cmp`]::{[`PartialEq`], [`PartialOrd`], [`Eq`], [`Ord`] }, the
 //!   comparison traits, which implement the comparison operators and are often
 //!   seen in trait bounds.
-//! * [`std::convert`]::{[`AsRef`], [`AsMut`], [`Into`], [`From`]}. Generic
+//! * [`std::convert`]::{[`AsRef`], [`AsMut`], [`Into`], [`From`]}, generic
 //!   conversions, used by savvy API authors to create overloaded methods.
 //! * [`std::default`]::[`Default`], types that have default values.
-//! * [`std::iter`]::{[`Iterator`], [`Extend`], [`IntoIterator`],
-//!   [`DoubleEndedIterator`], [`ExactSizeIterator`]}. Iterators of various
+//! * [`std::iter`]::{[`Iterator`], [`Extend`], [`IntoIterator`]
+//!   [`DoubleEndedIterator`], [`ExactSizeIterator`]}, iterators of various
 //!   kinds.
-//! * [`std::option`]::[`Option`]::{[`self`][`Option`], [`Some`], [`None`]}. A
+//! * [`std::option`]::[`Option`]::{[`self`][`Option`], [`Some`], [`None`]}, a
 //!   type which expresses the presence or absence of a value. This type is so
 //!   commonly used, its variants are also exported.
-//! * [`std::result`]::[`Result`]::{[`self`][`Result`], [`Ok`], [`Err`]}. A type
+//! * [`std::result`]::[`Result`]::{[`self`][`Result`], [`Ok`], [`Err`]}, a type
 //!   for functions that may succeed or fail. Like [`Option`], its variants are
 //!   exported as well.
 //! * [`std::string`]::{[`String`], [`ToString`]}, heap allocated strings.
-//! * [`std::vec`]::[`Vec`], a growable, heap-allocated
-//!   vector.
+//! * [`std::vec`]::[`Vec`], a growable, heap-allocated vector.
 //!
 //! [`mem::drop`]: crate::mem::drop
 //! [`std::borrow`]: crate::borrow
diff --git a/library/std/src/primitive_docs.rs b/library/std/src/primitive_docs.rs
index 55171ef2292..ec12e9f09d8 100644
--- a/library/std/src/primitive_docs.rs
+++ b/library/std/src/primitive_docs.rs
@@ -478,8 +478,10 @@ mod prim_unit {}
 #[stable(feature = "rust1", since = "1.0.0")]
 mod prim_pointer {}
 
+#[doc(alias = "[]")]
+#[doc(alias = "[T;N]")] // unfortunately, rustdoc doesn't have fuzzy search for aliases
+#[doc(alias = "[T; N]")]
 #[doc(primitive = "array")]
-//
 /// A fixed-size array, denoted `[T; N]`, for the element type, `T`, and the
 /// non-negative compile-time constant size, `N`.
 ///
@@ -489,6 +491,10 @@ mod prim_pointer {}
 /// * A repeat expression `[x; N]`, which produces an array with `N` copies of `x`.
 ///   The type of `x` must be [`Copy`].
 ///
+/// Note that `[expr; 0]` is allowed, and produces an empty array.
+/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
+/// be mindful of side effects.
+///
 /// Arrays of *any* size implement the following traits if the element type allows it:
 ///
 /// - [`Copy`]
@@ -920,6 +926,7 @@ mod prim_usize {}
 
 #[doc(primitive = "reference")]
 #[doc(alias = "&")]
+#[doc(alias = "&mut")]
 //
 /// References, both shared and mutable.
 ///
diff --git a/library/std/src/sync/mpsc/blocking.rs b/library/std/src/sync/mpsc/blocking.rs
index d34de6a4fac..4c852b8ee81 100644
--- a/library/std/src/sync/mpsc/blocking.rs
+++ b/library/std/src/sync/mpsc/blocking.rs
@@ -36,7 +36,11 @@ pub fn tokens() -> (WaitToken, SignalToken) {
 
 impl SignalToken {
     pub fn signal(&self) -> bool {
-        let wake = !self.inner.woken.compare_and_swap(false, true, Ordering::SeqCst);
+        let wake = self
+            .inner
+            .woken
+            .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
+            .is_ok();
         if wake {
             self.inner.thread.unpark();
         }
diff --git a/library/std/src/sync/mpsc/oneshot.rs b/library/std/src/sync/mpsc/oneshot.rs
index 75f5621fa12..3dcf03f579a 100644
--- a/library/std/src/sync/mpsc/oneshot.rs
+++ b/library/std/src/sync/mpsc/oneshot.rs
@@ -129,7 +129,7 @@ impl<T> Packet<T> {
             let ptr = unsafe { signal_token.cast_to_usize() };
 
             // race with senders to enter the blocking state
-            if self.state.compare_and_swap(EMPTY, ptr, Ordering::SeqCst) == EMPTY {
+            if self.state.compare_exchange(EMPTY, ptr, Ordering::SeqCst, Ordering::SeqCst).is_ok() {
                 if let Some(deadline) = deadline {
                     let timed_out = !wait_token.wait_max_until(deadline);
                     // Try to reset the state
@@ -161,7 +161,12 @@ impl<T> Packet<T> {
                 // the state changes under our feet we'd rather just see that state
                 // change.
                 DATA => {
-                    self.state.compare_and_swap(DATA, EMPTY, Ordering::SeqCst);
+                    let _ = self.state.compare_exchange(
+                        DATA,
+                        EMPTY,
+                        Ordering::SeqCst,
+                        Ordering::SeqCst,
+                    );
                     match (&mut *self.data.get()).take() {
                         Some(data) => Ok(data),
                         None => unreachable!(),
@@ -264,7 +269,10 @@ impl<T> Packet<T> {
 
             // If we've got a blocked thread, then use an atomic to gain ownership
             // of it (may fail)
-            ptr => self.state.compare_and_swap(ptr, EMPTY, Ordering::SeqCst),
+            ptr => self
+                .state
+                .compare_exchange(ptr, EMPTY, Ordering::SeqCst, Ordering::SeqCst)
+                .unwrap_or_else(|x| x),
         };
 
         // Now that we've got ownership of our state, figure out what to do
diff --git a/library/std/src/sync/mpsc/shared.rs b/library/std/src/sync/mpsc/shared.rs
index 898654f21f2..0c32e636a56 100644
--- a/library/std/src/sync/mpsc/shared.rs
+++ b/library/std/src/sync/mpsc/shared.rs
@@ -385,8 +385,15 @@ impl<T> Packet<T> {
         self.port_dropped.store(true, Ordering::SeqCst);
         let mut steals = unsafe { *self.steals.get() };
         while {
-            let cnt = self.cnt.compare_and_swap(steals, DISCONNECTED, Ordering::SeqCst);
-            cnt != DISCONNECTED && cnt != steals
+            match self.cnt.compare_exchange(
+                steals,
+                DISCONNECTED,
+                Ordering::SeqCst,
+                Ordering::SeqCst,
+            ) {
+                Ok(_) => false,
+                Err(old) => old != DISCONNECTED,
+            }
         } {
             // See the discussion in 'try_recv' for why we yield
             // control of this thread.
diff --git a/library/std/src/sync/mpsc/stream.rs b/library/std/src/sync/mpsc/stream.rs
index 9f7c1af8951..a652f24c58a 100644
--- a/library/std/src/sync/mpsc/stream.rs
+++ b/library/std/src/sync/mpsc/stream.rs
@@ -322,12 +322,15 @@ impl<T> Packet<T> {
         // (because there is a bounded number of senders).
         let mut steals = unsafe { *self.queue.consumer_addition().steals.get() };
         while {
-            let cnt = self.queue.producer_addition().cnt.compare_and_swap(
+            match self.queue.producer_addition().cnt.compare_exchange(
                 steals,
                 DISCONNECTED,
                 Ordering::SeqCst,
-            );
-            cnt != DISCONNECTED && cnt != steals
+                Ordering::SeqCst,
+            ) {
+                Ok(_) => false,
+                Err(old) => old != DISCONNECTED,
+            }
         } {
             while self.queue.pop().is_some() {
                 steals += 1;
diff --git a/library/std/src/sync/once.rs b/library/std/src/sync/once.rs
index de5ddf1daf2..6a330834489 100644
--- a/library/std/src/sync/once.rs
+++ b/library/std/src/sync/once.rs
@@ -65,7 +65,7 @@
 //       must do so with Release ordering to make the result available.
 //     - `wait` inserts `Waiter` nodes as a pointer in `state_and_queue`, and
 //       needs to make the nodes available with Release ordering. The load in
-//       its `compare_and_swap` can be Relaxed because it only has to compare
+//       its `compare_exchange` can be Relaxed because it only has to compare
 //       the atomic, not to read other data.
 //     - `WaiterQueue::Drop` must see the `Waiter` nodes, so it must load
 //       `state_and_queue` with Acquire ordering.
@@ -110,7 +110,7 @@ use crate::thread::{self, Thread};
 /// ```
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Once {
-    // `state_and_queue` is actually an a pointer to a `Waiter` with extra state
+    // `state_and_queue` is actually a pointer to a `Waiter` with extra state
     // bits, so we add the `PhantomData` appropriately.
     state_and_queue: AtomicUsize,
     _marker: marker::PhantomData<*const Waiter>,
@@ -395,12 +395,13 @@ impl Once {
                 }
                 POISONED | INCOMPLETE => {
                     // Try to register this thread as the one RUNNING.
-                    let old = self.state_and_queue.compare_and_swap(
+                    let exchange_result = self.state_and_queue.compare_exchange(
                         state_and_queue,
                         RUNNING,
                         Ordering::Acquire,
+                        Ordering::Acquire,
                     );
-                    if old != state_and_queue {
+                    if let Err(old) = exchange_result {
                         state_and_queue = old;
                         continue;
                     }
@@ -452,8 +453,13 @@ fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) {
 
         // Try to slide in the node at the head of the linked list, making sure
         // that another thread didn't just replace the head of the linked list.
-        let old = state_and_queue.compare_and_swap(current_state, me | RUNNING, Ordering::Release);
-        if old != current_state {
+        let exchange_result = state_and_queue.compare_exchange(
+            current_state,
+            me | RUNNING,
+            Ordering::Release,
+            Ordering::Relaxed,
+        );
+        if let Err(old) = exchange_result {
             current_state = old;
             continue;
         }
diff --git a/library/std/src/sys/sgx/abi/mod.rs b/library/std/src/sys/sgx/abi/mod.rs
index a0eb12c3d15..a5e45303476 100644
--- a/library/std/src/sys/sgx/abi/mod.rs
+++ b/library/std/src/sys/sgx/abi/mod.rs
@@ -36,20 +36,20 @@ unsafe extern "C" fn tcs_init(secondary: bool) {
     }
 
     // Try to atomically swap UNINIT with BUSY. The returned state can be:
-    match RELOC_STATE.compare_and_swap(UNINIT, BUSY, Ordering::Acquire) {
+    match RELOC_STATE.compare_exchange(UNINIT, BUSY, Ordering::Acquire, Ordering::Acquire) {
         // This thread just obtained the lock and other threads will observe BUSY
-        UNINIT => {
+        Ok(_) => {
             reloc::relocate_elf_rela();
             RELOC_STATE.store(DONE, Ordering::Release);
         }
         // We need to wait until the initialization is done.
-        BUSY => {
+        Err(BUSY) => {
             while RELOC_STATE.load(Ordering::Acquire) == BUSY {
                 core::hint::spin_loop();
             }
         }
         // Initialization is done.
-        DONE => {}
+        Err(DONE) => {}
         _ => unreachable!(),
     }
 }
diff --git a/library/std/src/sys/sgx/waitqueue/spin_mutex.rs b/library/std/src/sys/sgx/waitqueue/spin_mutex.rs
index d99ce895da5..9140041c584 100644
--- a/library/std/src/sys/sgx/waitqueue/spin_mutex.rs
+++ b/library/std/src/sys/sgx/waitqueue/spin_mutex.rs
@@ -42,7 +42,7 @@ impl<T> SpinMutex<T> {
 
     #[inline(always)]
     pub fn try_lock(&self) -> Option<SpinMutexGuard<'_, T>> {
-        if !self.lock.compare_and_swap(false, true, Ordering::Acquire) {
+        if self.lock.compare_exchange(false, true, Ordering::Acquire, Ordering::Acquire).is_ok() {
             Some(SpinMutexGuard { mutex: self })
         } else {
             None
diff --git a/library/std/src/sys/unix/ext/net/ancillary.rs b/library/std/src/sys/unix/ext/net/ancillary.rs
index 2c91ba70dd0..0964b6335aa 100644
--- a/library/std/src/sys/unix/ext/net/ancillary.rs
+++ b/library/std/src/sys/unix/ext/net/ancillary.rs
@@ -360,7 +360,11 @@ impl<'a> AncillaryData<'a> {
     fn try_from_cmsghdr(cmsg: &'a libc::cmsghdr) -> Result<Self, AncillaryError> {
         unsafe {
             cfg_if::cfg_if! {
-                if #[cfg(any(target_os = "android", all(target_os = "linux", target_env = "gnu")))] {
+                if #[cfg(any(
+                        target_os = "android",
+                        all(target_os = "linux", target_env = "gnu"),
+                        all(target_os = "linux", target_env = "uclibc"),
+                   ))] {
                     let cmsg_len_zero = libc::CMSG_LEN(0) as libc::size_t;
                 } else if #[cfg(any(
                               target_os = "dragonfly",
diff --git a/library/std/src/sys/unix/fd.rs b/library/std/src/sys/unix/fd.rs
index d3a279a2355..821851a6c65 100644
--- a/library/std/src/sys/unix/fd.rs
+++ b/library/std/src/sys/unix/fd.rs
@@ -12,6 +12,11 @@ use crate::sys_common::AsInner;
 use libc::{c_int, c_void};
 
 #[derive(Debug)]
+#[rustc_layout_scalar_valid_range_start(0)]
+// libstd/os/raw/mod.rs assures me that every libstd-supported platform has a
+// 32-bit c_int. Below is -2, in two's complement, but that only works out
+// because c_int is 32 bits.
+#[rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FE)]
 pub struct FileDesc {
     fd: c_int,
 }
@@ -63,7 +68,9 @@ const fn max_iov() -> usize {
 
 impl FileDesc {
     pub fn new(fd: c_int) -> FileDesc {
-        FileDesc { fd }
+        assert_ne!(fd, -1i32);
+        // SAFETY: we just asserted that the value is in the valid range and isn't `-1` (the only value bigger than `0xFF_FF_FF_FE` unsigned)
+        unsafe { FileDesc { fd } }
     }
 
     pub fn raw(&self) -> c_int {
diff --git a/library/std/src/sys/unix/fd/tests.rs b/library/std/src/sys/unix/fd/tests.rs
index a932043cbc6..c9520485c3c 100644
--- a/library/std/src/sys/unix/fd/tests.rs
+++ b/library/std/src/sys/unix/fd/tests.rs
@@ -3,7 +3,7 @@ use core::mem::ManuallyDrop;
 
 #[test]
 fn limit_vector_count() {
-    let stdout = ManuallyDrop::new(FileDesc { fd: 1 });
+    let stdout = ManuallyDrop::new(unsafe { FileDesc { fd: 1 } });
     let bufs = (0..1500).map(|_| IoSlice::new(&[])).collect::<Vec<_>>();
     assert!(stdout.write_vectored(&bufs).is_ok());
 }
diff --git a/library/std/src/sys/unix/kernel_copy.rs b/library/std/src/sys/unix/kernel_copy.rs
index 5bfac803153..200dbf06ff8 100644
--- a/library/std/src/sys/unix/kernel_copy.rs
+++ b/library/std/src/sys/unix/kernel_copy.rs
@@ -59,7 +59,7 @@ use crate::os::unix::io::{AsRawFd, FromRawFd, RawFd};
 use crate::os::unix::net::UnixStream;
 use crate::process::{ChildStderr, ChildStdin, ChildStdout};
 use crate::ptr;
-use crate::sync::atomic::{AtomicBool, Ordering};
+use crate::sync::atomic::{AtomicBool, AtomicU8, Ordering};
 use crate::sys::cvt;
 
 #[cfg(test)]
@@ -491,8 +491,15 @@ impl CopyResult {
     }
 }
 
-/// linux-specific implementation that will attempt to use copy_file_range for copy offloading
-/// as the name says, it only works on regular files
+/// Invalid file descriptor.
+///
+/// Valid file descriptors are guaranteed to be positive numbers (see `open()` manpage)
+/// while negative values are used to indicate errors.
+/// Thus -1 will never be overlap with a valid open file.
+const INVALID_FD: RawFd = -1;
+
+/// Linux-specific implementation that will attempt to use copy_file_range for copy offloading.
+/// As the name says, it only works on regular files.
 ///
 /// Callers must handle fallback to a generic copy loop.
 /// `Fallback` may indicate non-zero number of bytes already written
@@ -500,9 +507,13 @@ impl CopyResult {
 pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) -> CopyResult {
     use crate::cmp;
 
+    const NOT_PROBED: u8 = 0;
+    const UNAVAILABLE: u8 = 1;
+    const AVAILABLE: u8 = 2;
+
     // Kernel prior to 4.5 don't have copy_file_range
     // We store the availability in a global to avoid unnecessary syscalls
-    static HAS_COPY_FILE_RANGE: AtomicBool = AtomicBool::new(true);
+    static HAS_COPY_FILE_RANGE: AtomicU8 = AtomicU8::new(NOT_PROBED);
 
     syscall! {
         fn copy_file_range(
@@ -515,39 +526,39 @@ pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) ->
         ) -> libc::ssize_t
     }
 
-    let has_copy_file_range = HAS_COPY_FILE_RANGE.load(Ordering::Relaxed);
-    let mut written = 0u64;
-    while written < max_len {
-        let copy_result = if has_copy_file_range {
-            let bytes_to_copy = cmp::min(max_len - written, usize::MAX as u64);
-            // cap to 1GB chunks in case u64::MAX is passed as max_len and the file has a non-zero seek position
-            // this allows us to copy large chunks without hitting EOVERFLOW,
-            // unless someone sets a file offset close to u64::MAX - 1GB, in which case a fallback would be required
-            let bytes_to_copy = cmp::min(bytes_to_copy as usize, 0x4000_0000usize);
-            let copy_result = unsafe {
-                // We actually don't have to adjust the offsets,
-                // because copy_file_range adjusts the file offset automatically
-                cvt(copy_file_range(
-                    reader,
-                    ptr::null_mut(),
-                    writer,
-                    ptr::null_mut(),
-                    bytes_to_copy,
-                    0,
-                ))
+    match HAS_COPY_FILE_RANGE.load(Ordering::Relaxed) {
+        NOT_PROBED => {
+            // EPERM can indicate seccomp filters or an immutable file.
+            // To distinguish these cases we probe with invalid file descriptors which should result in EBADF if the syscall is supported
+            // and some other error (ENOSYS or EPERM) if it's not available
+            let result = unsafe {
+                cvt(copy_file_range(INVALID_FD, ptr::null_mut(), INVALID_FD, ptr::null_mut(), 1, 0))
             };
-            if let Err(ref copy_err) = copy_result {
-                match copy_err.raw_os_error() {
-                    Some(libc::ENOSYS | libc::EPERM | libc::EOPNOTSUPP) => {
-                        HAS_COPY_FILE_RANGE.store(false, Ordering::Relaxed);
-                    }
-                    _ => {}
-                }
+
+            if matches!(result.map_err(|e| e.raw_os_error()), Err(Some(libc::EBADF))) {
+                HAS_COPY_FILE_RANGE.store(AVAILABLE, Ordering::Relaxed);
+            } else {
+                HAS_COPY_FILE_RANGE.store(UNAVAILABLE, Ordering::Relaxed);
+                return CopyResult::Fallback(0);
             }
-            copy_result
-        } else {
-            Err(Error::from_raw_os_error(libc::ENOSYS))
+        }
+        UNAVAILABLE => return CopyResult::Fallback(0),
+        _ => {}
+    };
+
+    let mut written = 0u64;
+    while written < max_len {
+        let bytes_to_copy = cmp::min(max_len - written, usize::MAX as u64);
+        // cap to 1GB chunks in case u64::MAX is passed as max_len and the file has a non-zero seek position
+        // this allows us to copy large chunks without hitting EOVERFLOW,
+        // unless someone sets a file offset close to u64::MAX - 1GB, in which case a fallback would be required
+        let bytes_to_copy = cmp::min(bytes_to_copy as usize, 0x4000_0000usize);
+        let copy_result = unsafe {
+            // We actually don't have to adjust the offsets,
+            // because copy_file_range adjusts the file offset automatically
+            cvt(copy_file_range(reader, ptr::null_mut(), writer, ptr::null_mut(), bytes_to_copy, 0))
         };
+
         match copy_result {
             Ok(0) if written == 0 => {
                 // fallback to work around several kernel bugs where copy_file_range will fail to
@@ -567,11 +578,14 @@ pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) ->
                         libc::ENOSYS | libc::EXDEV | libc::EINVAL | libc::EPERM | libc::EOPNOTSUPP,
                     ) => {
                         // Try fallback io::copy if either:
-                        // - Kernel version is < 4.5 (ENOSYS)
+                        // - Kernel version is < 4.5 (ENOSYS¹)
                         // - Files are mounted on different fs (EXDEV)
                         // - copy_file_range is broken in various ways on RHEL/CentOS 7 (EOPNOTSUPP)
-                        // - copy_file_range is disallowed, for example by seccomp (EPERM)
+                        // - copy_file_range file is immutable or syscall is blocked by seccomp¹ (EPERM)
                         // - copy_file_range cannot be used with pipes or device nodes (EINVAL)
+                        //
+                        // ¹ these cases should be detected by the initial probe but we handle them here
+                        //   anyway in case syscall interception changes during runtime
                         assert_eq!(written, 0);
                         CopyResult::Fallback(0)
                     }
diff --git a/library/std/src/sys/unix/kernel_copy/tests.rs b/library/std/src/sys/unix/kernel_copy/tests.rs
index 3937a1ffa38..77369cdd35f 100644
--- a/library/std/src/sys/unix/kernel_copy/tests.rs
+++ b/library/std/src/sys/unix/kernel_copy/tests.rs
@@ -1,18 +1,18 @@
-use crate::env::temp_dir;
 use crate::fs::OpenOptions;
 use crate::io;
 use crate::io::Result;
 use crate::io::SeekFrom;
 use crate::io::{BufRead, Read, Seek, Write};
 use crate::os::unix::io::AsRawFd;
+use crate::sys_common::io::test::tmpdir;
 
 #[test]
 fn copy_specialization() -> Result<()> {
     use crate::io::{BufReader, BufWriter};
 
-    let path = crate::env::temp_dir();
-    let source_path = path.join("copy-spec.source");
-    let sink_path = path.join("copy-spec.sink");
+    let tmp_path = tmpdir();
+    let source_path = tmp_path.join("copy-spec.source");
+    let sink_path = tmp_path.join("copy-spec.sink");
 
     let result: Result<()> = try {
         let mut source = crate::fs::OpenOptions::new()
@@ -68,7 +68,8 @@ fn copy_specialization() -> Result<()> {
 #[bench]
 fn bench_file_to_file_copy(b: &mut test::Bencher) {
     const BYTES: usize = 128 * 1024;
-    let src_path = temp_dir().join("file-copy-bench-src");
+    let temp_path = tmpdir();
+    let src_path = temp_path.join("file-copy-bench-src");
     let mut src = crate::fs::OpenOptions::new()
         .create(true)
         .truncate(true)
@@ -78,7 +79,7 @@ fn bench_file_to_file_copy(b: &mut test::Bencher) {
         .unwrap();
     src.write(&vec![0u8; BYTES]).unwrap();
 
-    let sink_path = temp_dir().join("file-copy-bench-sink");
+    let sink_path = temp_path.join("file-copy-bench-sink");
     let mut sink = crate::fs::OpenOptions::new()
         .create(true)
         .truncate(true)
@@ -97,7 +98,8 @@ fn bench_file_to_file_copy(b: &mut test::Bencher) {
 #[bench]
 fn bench_file_to_socket_copy(b: &mut test::Bencher) {
     const BYTES: usize = 128 * 1024;
-    let src_path = temp_dir().join("pipe-copy-bench-src");
+    let temp_path = tmpdir();
+    let src_path = temp_path.join("pipe-copy-bench-src");
     let mut src = OpenOptions::new()
         .create(true)
         .truncate(true)
@@ -128,7 +130,8 @@ fn bench_file_to_socket_copy(b: &mut test::Bencher) {
 #[bench]
 fn bench_file_to_uds_copy(b: &mut test::Bencher) {
     const BYTES: usize = 128 * 1024;
-    let src_path = temp_dir().join("uds-copy-bench-src");
+    let temp_path = tmpdir();
+    let src_path = temp_path.join("uds-copy-bench-src");
     let mut src = OpenOptions::new()
         .create(true)
         .truncate(true)
diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs
index fac4b05ad0b..23a5c81c005 100644
--- a/library/std/src/sys/unix/time.rs
+++ b/library/std/src/sys/unix/time.rs
@@ -237,7 +237,7 @@ mod inner {
         // `denom` field.
         //
         // Encoding this as a single `AtomicU64` allows us to use `Relaxed`
-        // operations, as we are only interested in in the effects on a single
+        // operations, as we are only interested in the effects on a single
         // memory location.
         static INFO_BITS: AtomicU64 = AtomicU64::new(0);
 
diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs
index 657421e3fa4..2b1bc92dc84 100644
--- a/library/std/src/sys/windows/c.rs
+++ b/library/std/src/sys/windows/c.rs
@@ -31,6 +31,8 @@ pub type WORD = u16;
 pub type CHAR = c_char;
 pub type ULONG_PTR = usize;
 pub type ULONG = c_ulong;
+pub type NTSTATUS = LONG;
+pub type ACCESS_MASK = DWORD;
 
 pub type LPBOOL = *mut BOOL;
 pub type LPBYTE = *mut BYTE;
@@ -285,6 +287,8 @@ pub const STACK_SIZE_PARAM_IS_A_RESERVATION: DWORD = 0x00010000;
 
 pub const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
 
+pub const STATUS_SUCCESS: NTSTATUS = 0x00000000;
+
 #[repr(C)]
 #[cfg(not(target_pointer_width = "64"))]
 pub struct WSADATA {
@@ -1085,3 +1089,46 @@ compat_fn! {
         panic!("rwlocks not available")
     }
 }
+compat_fn! {
+    "api-ms-win-core-synch-l1-2-0":
+    pub fn WaitOnAddress(
+        Address: LPVOID,
+        CompareAddress: LPVOID,
+        AddressSize: SIZE_T,
+        dwMilliseconds: DWORD
+    ) -> BOOL {
+        panic!("WaitOnAddress not available")
+    }
+    pub fn WakeByAddressSingle(Address: LPVOID) -> () {
+        // If this api is unavailable, there cannot be anything waiting, because
+        // WaitOnAddress would've panicked. So it's fine to do nothing here.
+    }
+}
+
+compat_fn! {
+    "ntdll":
+    pub fn NtCreateKeyedEvent(
+        KeyedEventHandle: LPHANDLE,
+        DesiredAccess: ACCESS_MASK,
+        ObjectAttributes: LPVOID,
+        Flags: ULONG
+    ) -> NTSTATUS {
+        panic!("keyed events not available")
+    }
+    pub fn NtReleaseKeyedEvent(
+        EventHandle: HANDLE,
+        Key: LPVOID,
+        Alertable: BOOLEAN,
+        Timeout: PLARGE_INTEGER
+    ) -> NTSTATUS {
+        panic!("keyed events not available")
+    }
+    pub fn NtWaitForKeyedEvent(
+        EventHandle: HANDLE,
+        Key: LPVOID,
+        Alertable: BOOLEAN,
+        Timeout: PLARGE_INTEGER
+    ) -> NTSTATUS {
+        panic!("keyed events not available")
+    }
+}
diff --git a/library/std/src/sys/windows/compat.rs b/library/std/src/sys/windows/compat.rs
index dd1523b422c..e9588e29758 100644
--- a/library/std/src/sys/windows/compat.rs
+++ b/library/std/src/sys/windows/compat.rs
@@ -34,6 +34,7 @@ macro_rules! compat_fn {
     )*) => ($(
         $(#[$meta])*
         pub mod $symbol {
+            #[allow(unused_imports)]
             use super::*;
             use crate::sync::atomic::{AtomicUsize, Ordering};
             use crate::mem;
diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs
index c36c6196d79..fcbff59dec0 100644
--- a/library/std/src/sys/windows/mod.rs
+++ b/library/std/src/sys/windows/mod.rs
@@ -35,6 +35,7 @@ pub mod rwlock;
 pub mod thread;
 pub mod thread_local_dtor;
 pub mod thread_local_key;
+pub mod thread_parker;
 pub mod time;
 cfg_if::cfg_if! {
     if #[cfg(not(target_vendor = "uwp"))] {
diff --git a/library/std/src/sys/windows/mutex.rs b/library/std/src/sys/windows/mutex.rs
index fa51b006c34..d4cc56d4cb3 100644
--- a/library/std/src/sys/windows/mutex.rs
+++ b/library/std/src/sys/windows/mutex.rs
@@ -123,9 +123,9 @@ impl Mutex {
         let inner = box Inner { remutex: ReentrantMutex::uninitialized(), held: Cell::new(false) };
         inner.remutex.init();
         let inner = Box::into_raw(inner);
-        match self.lock.compare_and_swap(0, inner as usize, Ordering::SeqCst) {
-            0 => inner,
-            n => {
+        match self.lock.compare_exchange(0, inner as usize, Ordering::SeqCst, Ordering::SeqCst) {
+            Ok(_) => inner,
+            Err(n) => {
                 Box::from_raw(inner).remutex.destroy();
                 n as *const _
             }
diff --git a/library/std/src/sys/windows/path.rs b/library/std/src/sys/windows/path.rs
index dda3ed68cfc..c10c0df4a3a 100644
--- a/library/std/src/sys/windows/path.rs
+++ b/library/std/src/sys/windows/path.rs
@@ -8,15 +8,12 @@ mod tests;
 pub const MAIN_SEP_STR: &str = "\\";
 pub const MAIN_SEP: char = '\\';
 
-// The unsafety here stems from converting between `&OsStr` and `&[u8]`
-// and back. This is safe to do because (1) we only look at ASCII
-// contents of the encoding and (2) new &OsStr values are produced
-// only from ASCII-bounded slices of existing &OsStr values.
-fn os_str_as_u8_slice(s: &OsStr) -> &[u8] {
-    unsafe { mem::transmute(s) }
-}
-unsafe fn u8_slice_as_os_str(s: &[u8]) -> &OsStr {
-    mem::transmute(s)
+// Safety: `bytes` must be a valid wtf8 encoded slice
+#[inline]
+unsafe fn bytes_as_os_str(bytes: &[u8]) -> &OsStr {
+    // &OsStr is layout compatible with &Slice, which is compatible with &Wtf8,
+    // which is compatible with &[u8].
+    mem::transmute(bytes)
 }
 
 #[inline]
@@ -29,79 +26,116 @@ pub fn is_verbatim_sep(b: u8) -> bool {
     b == b'\\'
 }
 
-// In most DOS systems, it is not possible to have more than 26 drive letters.
-// See <https://en.wikipedia.org/wiki/Drive_letter_assignment#Common_assignments>.
-pub fn is_valid_drive_letter(disk: u8) -> bool {
-    disk.is_ascii_alphabetic()
-}
-
 pub fn parse_prefix(path: &OsStr) -> Option<Prefix<'_>> {
     use Prefix::{DeviceNS, Disk, Verbatim, VerbatimDisk, VerbatimUNC, UNC};
 
-    let path = os_str_as_u8_slice(path);
-
-    // \\
-    if let Some(path) = path.strip_prefix(br"\\") {
-        // \\?\
-        if let Some(path) = path.strip_prefix(br"?\") {
-            // \\?\UNC\server\share
-            if let Some(path) = path.strip_prefix(br"UNC\") {
-                let (server, share) = match get_first_two_components(path, is_verbatim_sep) {
-                    Some((server, share)) => unsafe {
-                        (u8_slice_as_os_str(server), u8_slice_as_os_str(share))
-                    },
-                    None => (unsafe { u8_slice_as_os_str(path) }, OsStr::new("")),
-                };
-                return Some(VerbatimUNC(server, share));
+    if let Some(path) = strip_prefix(path, r"\\") {
+        // \\
+        if let Some(path) = strip_prefix(path, r"?\") {
+            // \\?\
+            if let Some(path) = strip_prefix(path, r"UNC\") {
+                // \\?\UNC\server\share
+
+                let (server, path) = parse_next_component(path, true);
+                let (share, _) = parse_next_component(path, true);
+
+                Some(VerbatimUNC(server, share))
             } else {
-                // \\?\path
-                match path {
-                    // \\?\C:\path
-                    [c, b':', b'\\', ..] if is_valid_drive_letter(*c) => {
-                        return Some(VerbatimDisk(c.to_ascii_uppercase()));
-                    }
-                    // \\?\cat_pics
-                    _ => {
-                        let idx = path.iter().position(|&b| b == b'\\').unwrap_or(path.len());
-                        let slice = &path[..idx];
-                        return Some(Verbatim(unsafe { u8_slice_as_os_str(slice) }));
-                    }
+                let (prefix, _) = parse_next_component(path, true);
+
+                // in verbatim paths only recognize an exact drive prefix
+                if let Some(drive) = parse_drive_exact(prefix) {
+                    // \\?\C:
+                    Some(VerbatimDisk(drive))
+                } else {
+                    // \\?\prefix
+                    Some(Verbatim(prefix))
                 }
             }
-        } else if let Some(path) = path.strip_prefix(b".\\") {
+        } else if let Some(path) = strip_prefix(path, r".\") {
             // \\.\COM42
-            let idx = path.iter().position(|&b| b == b'\\').unwrap_or(path.len());
-            let slice = &path[..idx];
-            return Some(DeviceNS(unsafe { u8_slice_as_os_str(slice) }));
-        }
-        match get_first_two_components(path, is_sep_byte) {
-            Some((server, share)) if !server.is_empty() && !share.is_empty() => {
+            let (prefix, _) = parse_next_component(path, false);
+            Some(DeviceNS(prefix))
+        } else {
+            let (server, path) = parse_next_component(path, false);
+            let (share, _) = parse_next_component(path, false);
+
+            if !server.is_empty() && !share.is_empty() {
                 // \\server\share
-                return Some(unsafe { UNC(u8_slice_as_os_str(server), u8_slice_as_os_str(share)) });
+                Some(UNC(server, share))
+            } else {
+                // no valid prefix beginning with "\\" recognized
+                None
             }
-            _ => {}
         }
-    } else if let [c, b':', ..] = path {
+    } else if let Some(drive) = parse_drive(path) {
         // C:
-        if is_valid_drive_letter(*c) {
-            return Some(Disk(c.to_ascii_uppercase()));
-        }
+        Some(Disk(drive))
+    } else {
+        // no prefix
+        None
     }
-    None
 }
 
-/// Returns the first two path components with predicate `f`.
-///
-/// The two components returned will be use by caller
-/// to construct `VerbatimUNC` or `UNC` Windows path prefix.
-///
-/// Returns [`None`] if there are no separators in path.
-fn get_first_two_components(path: &[u8], f: fn(u8) -> bool) -> Option<(&[u8], &[u8])> {
-    let idx = path.iter().position(|&x| f(x))?;
-    // Panic safe
-    // The max `idx+1` is `path.len()` and `path[path.len()..]` is a valid index.
-    let (first, path) = (&path[..idx], &path[idx + 1..]);
-    let idx = path.iter().position(|&x| f(x)).unwrap_or(path.len());
-    let second = &path[..idx];
-    Some((first, second))
+// Parses a drive prefix, e.g. "C:" and "C:\whatever"
+fn parse_drive(prefix: &OsStr) -> Option<u8> {
+    // In most DOS systems, it is not possible to have more than 26 drive letters.
+    // See <https://en.wikipedia.org/wiki/Drive_letter_assignment#Common_assignments>.
+    fn is_valid_drive_letter(drive: &u8) -> bool {
+        drive.is_ascii_alphabetic()
+    }
+
+    match prefix.bytes() {
+        [drive, b':', ..] if is_valid_drive_letter(drive) => Some(drive.to_ascii_uppercase()),
+        _ => None,
+    }
+}
+
+// Parses a drive prefix exactly, e.g. "C:"
+fn parse_drive_exact(prefix: &OsStr) -> Option<u8> {
+    // only parse two bytes: the drive letter and the drive separator
+    if prefix.len() == 2 { parse_drive(prefix) } else { None }
+}
+
+fn strip_prefix<'a>(path: &'a OsStr, prefix: &str) -> Option<&'a OsStr> {
+    // `path` and `prefix` are valid wtf8 and utf8 encoded slices respectively, `path[prefix.len()]`
+    // is thus a code point boundary and `path[prefix.len()..]` is a valid wtf8 encoded slice.
+    match path.bytes().strip_prefix(prefix.as_bytes()) {
+        Some(path) => unsafe { Some(bytes_as_os_str(path)) },
+        None => None,
+    }
+}
+
+// Parse the next path component.
+//
+// Returns the next component and the rest of the path excluding the component and separator.
+// Does not recognize `/` as a separator character if `verbatim` is true.
+fn parse_next_component(path: &OsStr, verbatim: bool) -> (&OsStr, &OsStr) {
+    let separator = if verbatim { is_verbatim_sep } else { is_sep_byte };
+
+    match path.bytes().iter().position(|&x| separator(x)) {
+        Some(separator_start) => {
+            let mut separator_end = separator_start + 1;
+
+            // a series of multiple separator characters is treated as a single separator,
+            // except in verbatim paths
+            while !verbatim && separator_end < path.len() && separator(path.bytes()[separator_end])
+            {
+                separator_end += 1;
+            }
+
+            let component = &path.bytes()[..separator_start];
+
+            // Panic safe
+            // The max `separator_end` is `bytes.len()` and `bytes[bytes.len()..]` is a valid index.
+            let path = &path.bytes()[separator_end..];
+
+            // Safety: `path` is a valid wtf8 encoded slice and each of the separators ('/', '\')
+            // is encoded in a single byte, therefore `bytes[separator_start]` and
+            // `bytes[separator_end]` must be code point boundaries and thus
+            // `bytes[..separator_start]` and `bytes[separator_end..]` are valid wtf8 slices.
+            unsafe { (bytes_as_os_str(component), bytes_as_os_str(path)) }
+        }
+        None => (path, OsStr::new("")),
+    }
 }
diff --git a/library/std/src/sys/windows/path/tests.rs b/library/std/src/sys/windows/path/tests.rs
index fbac1dc1ca1..9675da6ff88 100644
--- a/library/std/src/sys/windows/path/tests.rs
+++ b/library/std/src/sys/windows/path/tests.rs
@@ -1,21 +1,44 @@
 use super::*;
 
 #[test]
-fn test_get_first_two_components() {
+fn test_parse_next_component() {
     assert_eq!(
-        get_first_two_components(br"server\share", is_verbatim_sep),
-        Some((&b"server"[..], &b"share"[..])),
+        parse_next_component(OsStr::new(r"server\share"), true),
+        (OsStr::new(r"server"), OsStr::new(r"share"))
     );
 
     assert_eq!(
-        get_first_two_components(br"server\", is_verbatim_sep),
-        Some((&b"server"[..], &b""[..]))
+        parse_next_component(OsStr::new(r"server/share"), true),
+        (OsStr::new(r"server/share"), OsStr::new(r""))
     );
 
     assert_eq!(
-        get_first_two_components(br"\server\", is_verbatim_sep),
-        Some((&b""[..], &b"server"[..]))
+        parse_next_component(OsStr::new(r"server/share"), false),
+        (OsStr::new(r"server"), OsStr::new(r"share"))
     );
 
-    assert_eq!(get_first_two_components(br"there are no separators here", is_verbatim_sep), None,);
+    assert_eq!(
+        parse_next_component(OsStr::new(r"server\"), false),
+        (OsStr::new(r"server"), OsStr::new(r""))
+    );
+
+    assert_eq!(
+        parse_next_component(OsStr::new(r"\server\"), false),
+        (OsStr::new(r""), OsStr::new(r"server\"))
+    );
+
+    assert_eq!(
+        parse_next_component(OsStr::new(r"servershare"), false),
+        (OsStr::new(r"servershare"), OsStr::new(""))
+    );
+
+    assert_eq!(
+        parse_next_component(OsStr::new(r"server/\//\/\\\\/////\/share"), false),
+        (OsStr::new(r"server"), OsStr::new(r"share"))
+    );
+
+    assert_eq!(
+        parse_next_component(OsStr::new(r"server\\\\\\\\\\\\\\share"), true),
+        (OsStr::new(r"server"), OsStr::new(r"\\\\\\\\\\\\\share"))
+    );
 }
diff --git a/library/std/src/sys/windows/thread_local_key.rs b/library/std/src/sys/windows/thread_local_key.rs
index fb2bb0613ee..065365e5572 100644
--- a/library/std/src/sys/windows/thread_local_key.rs
+++ b/library/std/src/sys/windows/thread_local_key.rs
@@ -1,4 +1,4 @@
-use crate::mem;
+use crate::mem::ManuallyDrop;
 use crate::ptr;
 use crate::sync::atomic::AtomicPtr;
 use crate::sync::atomic::Ordering::SeqCst;
@@ -111,16 +111,13 @@ struct Node {
 }
 
 unsafe fn register_dtor(key: Key, dtor: Dtor) {
-    let mut node = Box::new(Node { key, dtor, next: ptr::null_mut() });
+    let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() }));
 
     let mut head = DTORS.load(SeqCst);
     loop {
         node.next = head;
-        match DTORS.compare_exchange(head, &mut *node, SeqCst, SeqCst) {
-            Ok(_) => {
-                mem::forget(node);
-                return;
-            }
+        match DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) {
+            Ok(_) => return, // nothing to drop, we successfully added the node to the list
             Err(cur) => head = cur,
         }
     }
diff --git a/library/std/src/sys/windows/thread_parker.rs b/library/std/src/sys/windows/thread_parker.rs
new file mode 100644
index 00000000000..9e4c9aa0a51
--- /dev/null
+++ b/library/std/src/sys/windows/thread_parker.rs
@@ -0,0 +1,250 @@
+// Thread parker implementation for Windows.
+//
+// This uses WaitOnAddress and WakeByAddressSingle if available (Windows 8+).
+// This modern API is exactly the same as the futex syscalls the Linux thread
+// parker uses. When These APIs are available, the implementation of this
+// thread parker matches the Linux thread parker exactly.
+//
+// However, when the modern API is not available, this implementation falls
+// back to NT Keyed Events, which are similar, but have some important
+// differences. These are available since Windows XP.
+//
+// WaitOnAddress first checks the state of the thread parker to make sure it no
+// WakeByAddressSingle calls can be missed between updating the parker state
+// and calling the function.
+//
+// NtWaitForKeyedEvent does not have this option, and unconditionally blocks
+// without checking the parker state first. Instead, NtReleaseKeyedEvent
+// (unlike WakeByAddressSingle) *blocks* until it woke up a thread waiting for
+// it by NtWaitForKeyedEvent. This way, we can be sure no events are missed,
+// but we need to be careful not to block unpark() if park_timeout() was woken
+// up by a timeout instead of unpark().
+//
+// Unlike WaitOnAddress, NtWaitForKeyedEvent/NtReleaseKeyedEvent operate on a
+// HANDLE (created with NtCreateKeyedEvent). This means that we can be sure
+// a succesfully awoken park() was awoken by unpark() and not a
+// NtReleaseKeyedEvent call from some other code, as these events are not only
+// matched by the key (address of the parker (state)), but also by this HANDLE.
+// We lazily allocate this handle the first time it is needed.
+//
+// The fast path (calling park() after unpark() was already called) and the
+// possible states are the same for both implementations. This is used here to
+// make sure the fast path does not even check which API to use, but can return
+// right away, independent of the used API. Only the slow paths (which will
+// actually block/wake a thread) check which API is available and have
+// different implementations.
+//
+// Unfortunately, NT Keyed Events are an undocumented Windows API. However:
+// - This API is relatively simple with obvious behaviour, and there are
+//   several (unofficial) articles documenting the details. [1]
+// - `parking_lot` has been using this API for years (on Windows versions
+//   before Windows 8). [2] Many big projects extensively use parking_lot,
+//   such as servo and the Rust compiler itself.
+// - It is the underlying API used by Windows SRW locks and Windows critical
+//   sections. [3] [4]
+// - The source code of the implementations of Wine, ReactOs, and Windows XP
+//   are available and match the expected behaviour.
+// - The main risk with an undocumented API is that it might change in the
+//   future. But since we only use it for older versions of Windows, that's not
+//   a problem.
+// - Even if these functions do not block or wake as we expect (which is
+//   unlikely, see all previous points), this implementation would still be
+//   memory safe. The NT Keyed Events API is only used to sleep/block in the
+//   right place.
+//
+// [1]: http://www.locklessinc.com/articles/keyed_events/
+// [2]: https://github.com/Amanieu/parking_lot/commit/43abbc964e
+// [3]: https://docs.microsoft.com/en-us/archive/msdn-magazine/2012/november/windows-with-c-the-evolution-of-synchronization-in-windows-and-c
+// [4]: Windows Internals, Part 1, ISBN 9780735671300
+
+use crate::convert::TryFrom;
+use crate::ptr;
+use crate::sync::atomic::{
+    AtomicI8, AtomicUsize,
+    Ordering::{Acquire, Relaxed, Release},
+};
+use crate::sys::{c, dur2timeout};
+use crate::time::Duration;
+
+pub struct Parker {
+    state: AtomicI8,
+}
+
+const PARKED: i8 = -1;
+const EMPTY: i8 = 0;
+const NOTIFIED: i8 = 1;
+
+// Notes about memory ordering:
+//
+// Memory ordering is only relevant for the relative ordering of operations
+// between different variables. Even Ordering::Relaxed guarantees a
+// monotonic/consistent order when looking at just a single atomic variable.
+//
+// So, since this parker is just a single atomic variable, we only need to look
+// at the ordering guarantees we need to provide to the 'outside world'.
+//
+// The only memory ordering guarantee that parking and unparking provide, is
+// that things which happened before unpark() are visible on the thread
+// returning from park() afterwards. Otherwise, it was effectively unparked
+// before unpark() was called while still consuming the 'token'.
+//
+// In other words, unpark() needs to synchronize with the part of park() that
+// consumes the token and returns.
+//
+// This is done with a release-acquire synchronization, by using
+// Ordering::Release when writing NOTIFIED (the 'token') in unpark(), and using
+// Ordering::Acquire when reading this state in park() after waking up.
+impl Parker {
+    pub fn new() -> Self {
+        Self { state: AtomicI8::new(EMPTY) }
+    }
+
+    // Assumes this is only called by the thread that owns the Parker,
+    // which means that `self.state != PARKED`.
+    pub unsafe fn park(&self) {
+        // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the
+        // first case.
+        if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+            return;
+        }
+
+        if c::WaitOnAddress::is_available() {
+            loop {
+                // Wait for something to happen, assuming it's still set to PARKED.
+                c::WaitOnAddress(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, c::INFINITE);
+                // Change NOTIFIED=>EMPTY but leave PARKED alone.
+                if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Acquire).is_ok() {
+                    // Actually woken up by unpark().
+                    return;
+                } else {
+                    // Spurious wake up. We loop to try again.
+                }
+            }
+        } else {
+            // Wait for unpark() to produce this event.
+            c::NtWaitForKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut());
+            // Set the state back to EMPTY (from either PARKED or NOTIFIED).
+            // Note that we don't just write EMPTY, but use swap() to also
+            // include an acquire-ordered read to synchronize with unpark()'s
+            // release-ordered write.
+            self.state.swap(EMPTY, Acquire);
+        }
+    }
+
+    // Assumes this is only called by the thread that owns the Parker,
+    // which means that `self.state != PARKED`.
+    pub unsafe fn park_timeout(&self, timeout: Duration) {
+        // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the
+        // first case.
+        if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+            return;
+        }
+
+        if c::WaitOnAddress::is_available() {
+            // Wait for something to happen, assuming it's still set to PARKED.
+            c::WaitOnAddress(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, dur2timeout(timeout));
+            // Set the state back to EMPTY (from either PARKED or NOTIFIED).
+            // Note that we don't just write EMPTY, but use swap() to also
+            // include an acquire-ordered read to synchronize with unpark()'s
+            // release-ordered write.
+            if self.state.swap(EMPTY, Acquire) == NOTIFIED {
+                // Actually woken up by unpark().
+            } else {
+                // Timeout or spurious wake up.
+                // We return either way, because we can't easily tell if it was the
+                // timeout or not.
+            }
+        } else {
+            // Need to wait for unpark() using NtWaitForKeyedEvent.
+            let handle = keyed_event_handle();
+
+            // NtWaitForKeyedEvent uses a unit of 100ns, and uses negative
+            // values to indicate a relative time on the monotonic clock.
+            // This is documented here for the underlying KeWaitForSingleObject function:
+            // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/nf-wdm-kewaitforsingleobject
+            let mut timeout = match i64::try_from((timeout.as_nanos() + 99) / 100) {
+                Ok(t) => -t,
+                Err(_) => i64::MIN,
+            };
+
+            // Wait for unpark() to produce this event.
+            let unparked =
+                c::NtWaitForKeyedEvent(handle, self.ptr(), 0, &mut timeout) == c::STATUS_SUCCESS;
+
+            // Set the state back to EMPTY (from either PARKED or NOTIFIED).
+            let prev_state = self.state.swap(EMPTY, Acquire);
+
+            if !unparked && prev_state == NOTIFIED {
+                // We were awoken by a timeout, not by unpark(), but the state
+                // was set to NOTIFIED, which means we *just* missed an
+                // unpark(), which is now blocked on us to wait for it.
+                // Wait for it to consume the event and unblock that thread.
+                c::NtWaitForKeyedEvent(handle, self.ptr(), 0, ptr::null_mut());
+            }
+        }
+    }
+
+    pub fn unpark(&self) {
+        // Change PARKED=>NOTIFIED, EMPTY=>NOTIFIED, or NOTIFIED=>NOTIFIED, and
+        // wake the thread in the first case.
+        //
+        // Note that even NOTIFIED=>NOTIFIED results in a write. This is on
+        // purpose, to make sure every unpark() has a release-acquire ordering
+        // with park().
+        if self.state.swap(NOTIFIED, Release) == PARKED {
+            if c::WakeByAddressSingle::is_available() {
+                unsafe {
+                    c::WakeByAddressSingle(self.ptr());
+                }
+            } else {
+                // If we run NtReleaseKeyedEvent before the waiting thread runs
+                // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up.
+                // If the waiting thread wakes up before we run NtReleaseKeyedEvent
+                // (e.g. due to a timeout), this blocks until we do wake up a thread.
+                // To prevent this thread from blocking indefinitely in that case,
+                // park_impl() will, after seeing the state set to NOTIFIED after
+                // waking up, call NtWaitForKeyedEvent again to unblock us.
+                unsafe {
+                    c::NtReleaseKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut());
+                }
+            }
+        }
+    }
+
+    fn ptr(&self) -> c::LPVOID {
+        &self.state as *const _ as c::LPVOID
+    }
+}
+
+fn keyed_event_handle() -> c::HANDLE {
+    const INVALID: usize = !0;
+    static HANDLE: AtomicUsize = AtomicUsize::new(INVALID);
+    match HANDLE.load(Relaxed) {
+        INVALID => {
+            let mut handle = c::INVALID_HANDLE_VALUE;
+            unsafe {
+                match c::NtCreateKeyedEvent(
+                    &mut handle,
+                    c::GENERIC_READ | c::GENERIC_WRITE,
+                    ptr::null_mut(),
+                    0,
+                ) {
+                    c::STATUS_SUCCESS => {}
+                    r => panic!("Unable to create keyed event handle: error {}", r),
+                }
+            }
+            match HANDLE.compare_exchange(INVALID, handle as usize, Relaxed, Relaxed) {
+                Ok(_) => handle,
+                Err(h) => {
+                    // Lost the race to another thread initializing HANDLE before we did.
+                    // Closing our handle and using theirs instead.
+                    unsafe {
+                        c::CloseHandle(handle);
+                    }
+                    h as c::HANDLE
+                }
+            }
+        }
+        handle => handle as c::HANDLE,
+    }
+}
diff --git a/library/std/src/sys_common/condvar/check.rs b/library/std/src/sys_common/condvar/check.rs
index fecb732b910..1578a2de60c 100644
--- a/library/std/src/sys_common/condvar/check.rs
+++ b/library/std/src/sys_common/condvar/check.rs
@@ -23,9 +23,9 @@ impl SameMutexCheck {
     }
     pub fn verify(&self, mutex: &MovableMutex) {
         let addr = mutex.raw() as *const mutex_imp::Mutex as usize;
-        match self.addr.compare_and_swap(0, addr, Ordering::SeqCst) {
-            0 => {}              // Stored the address
-            n if n == addr => {} // Lost a race to store the same address
+        match self.addr.compare_exchange(0, addr, Ordering::SeqCst, Ordering::SeqCst) {
+            Ok(_) => {}               // Stored the address
+            Err(n) if n == addr => {} // Lost a race to store the same address
             _ => panic!("attempted to use a condition variable with two mutexes"),
         }
     }
diff --git a/library/std/src/sys_common/net.rs b/library/std/src/sys_common/net.rs
index 48ba4ddfc0b..38ba0d2fbdb 100644
--- a/library/std/src/sys_common/net.rs
+++ b/library/std/src/sys_common/net.rs
@@ -177,11 +177,8 @@ impl TryFrom<&str> for LookupHost {
         }
 
         // split the string by ':' and convert the second part to u16
-        let mut parts_iter = s.rsplitn(2, ':');
-        let port_str = try_opt!(parts_iter.next(), "invalid socket address");
-        let host = try_opt!(parts_iter.next(), "invalid socket address");
+        let (host, port_str) = try_opt!(s.rsplit_once(':'), "invalid socket address");
         let port: u16 = try_opt!(port_str.parse().ok(), "invalid port value");
-
         (host, port).try_into()
     }
 }
diff --git a/library/std/src/sys_common/remutex.rs b/library/std/src/sys_common/remutex.rs
index 162eab2388d..475bfca9b6d 100644
--- a/library/std/src/sys_common/remutex.rs
+++ b/library/std/src/sys_common/remutex.rs
@@ -1,10 +1,10 @@
 #[cfg(all(test, not(target_os = "emscripten")))]
 mod tests;
 
-use crate::fmt;
-use crate::marker;
+use crate::marker::PhantomPinned;
 use crate::ops::Deref;
 use crate::panic::{RefUnwindSafe, UnwindSafe};
+use crate::pin::Pin;
 use crate::sys::mutex as sys;
 
 /// A re-entrant mutual exclusion
@@ -15,6 +15,7 @@ use crate::sys::mutex as sys;
 pub struct ReentrantMutex<T> {
     inner: sys::ReentrantMutex,
     data: T,
+    _pinned: PhantomPinned,
 }
 
 unsafe impl<T: Send> Send for ReentrantMutex<T> {}
@@ -37,10 +38,10 @@ impl<T> RefUnwindSafe for ReentrantMutex<T> {}
 /// guarded data.
 #[must_use = "if unused the ReentrantMutex will immediately unlock"]
 pub struct ReentrantMutexGuard<'a, T: 'a> {
-    lock: &'a ReentrantMutex<T>,
+    lock: Pin<&'a ReentrantMutex<T>>,
 }
 
-impl<T> !marker::Send for ReentrantMutexGuard<'_, T> {}
+impl<T> !Send for ReentrantMutexGuard<'_, T> {}
 
 impl<T> ReentrantMutex<T> {
     /// Creates a new reentrant mutex in an unlocked state.
@@ -51,7 +52,11 @@ impl<T> ReentrantMutex<T> {
     /// once this mutex is in its final resting place, and only then are the
     /// lock/unlock methods safe.
     pub const unsafe fn new(t: T) -> ReentrantMutex<T> {
-        ReentrantMutex { inner: sys::ReentrantMutex::uninitialized(), data: t }
+        ReentrantMutex {
+            inner: sys::ReentrantMutex::uninitialized(),
+            data: t,
+            _pinned: PhantomPinned,
+        }
     }
 
     /// Initializes this mutex so it's ready for use.
@@ -60,8 +65,8 @@ impl<T> ReentrantMutex<T> {
     ///
     /// Unsafe to call more than once, and must be called after this will no
     /// longer move in memory.
-    pub unsafe fn init(&self) {
-        self.inner.init();
+    pub unsafe fn init(self: Pin<&mut Self>) {
+        self.get_unchecked_mut().inner.init()
     }
 
     /// Acquires a mutex, blocking the current thread until it is able to do so.
@@ -76,9 +81,9 @@ impl<T> ReentrantMutex<T> {
     /// If another user of this mutex panicked while holding the mutex, then
     /// this call will return failure if the mutex would otherwise be
     /// acquired.
-    pub fn lock(&self) -> ReentrantMutexGuard<'_, T> {
+    pub fn lock(self: Pin<&Self>) -> ReentrantMutexGuard<'_, T> {
         unsafe { self.inner.lock() }
-        ReentrantMutexGuard::new(&self)
+        ReentrantMutexGuard { lock: self }
     }
 
     /// Attempts to acquire this lock.
@@ -93,8 +98,12 @@ impl<T> ReentrantMutex<T> {
     /// If another user of this mutex panicked while holding the mutex, then
     /// this call will return failure if the mutex would otherwise be
     /// acquired.
-    pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, T>> {
-        if unsafe { self.inner.try_lock() } { Some(ReentrantMutexGuard::new(&self)) } else { None }
+    pub fn try_lock(self: Pin<&Self>) -> Option<ReentrantMutexGuard<'_, T>> {
+        if unsafe { self.inner.try_lock() } {
+            Some(ReentrantMutexGuard { lock: self })
+        } else {
+            None
+        }
     }
 }
 
@@ -107,30 +116,6 @@ impl<T> Drop for ReentrantMutex<T> {
     }
 }
 
-impl<T: fmt::Debug + 'static> fmt::Debug for ReentrantMutex<T> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        match self.try_lock() {
-            Some(guard) => f.debug_struct("ReentrantMutex").field("data", &*guard).finish(),
-            None => {
-                struct LockedPlaceholder;
-                impl fmt::Debug for LockedPlaceholder {
-                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-                        f.write_str("<locked>")
-                    }
-                }
-
-                f.debug_struct("ReentrantMutex").field("data", &LockedPlaceholder).finish()
-            }
-        }
-    }
-}
-
-impl<'mutex, T> ReentrantMutexGuard<'mutex, T> {
-    fn new(lock: &'mutex ReentrantMutex<T>) -> ReentrantMutexGuard<'mutex, T> {
-        ReentrantMutexGuard { lock }
-    }
-}
-
 impl<T> Deref for ReentrantMutexGuard<'_, T> {
     type Target = T;
 
diff --git a/library/std/src/sys_common/remutex/tests.rs b/library/std/src/sys_common/remutex/tests.rs
index 9c686e579d7..88453ded2f9 100644
--- a/library/std/src/sys_common/remutex/tests.rs
+++ b/library/std/src/sys_common/remutex/tests.rs
@@ -1,4 +1,6 @@
+use crate::boxed::Box;
 use crate::cell::RefCell;
+use crate::pin::Pin;
 use crate::sync::Arc;
 use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
 use crate::thread;
@@ -6,10 +8,11 @@ use crate::thread;
 #[test]
 fn smoke() {
     let m = unsafe {
-        let m = ReentrantMutex::new(());
-        m.init();
+        let mut m = Box::pin(ReentrantMutex::new(()));
+        m.as_mut().init();
         m
     };
+    let m = m.as_ref();
     {
         let a = m.lock();
         {
@@ -27,18 +30,19 @@ fn smoke() {
 #[test]
 fn is_mutex() {
     let m = unsafe {
-        let m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
-        m.init();
-        m
+        // FIXME: Simplify this if Arc gets a Arc::get_pin_mut.
+        let mut m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
+        Pin::new_unchecked(Arc::get_mut_unchecked(&mut m)).init();
+        Pin::new_unchecked(m)
     };
     let m2 = m.clone();
-    let lock = m.lock();
+    let lock = m.as_ref().lock();
     let child = thread::spawn(move || {
-        let lock = m2.lock();
+        let lock = m2.as_ref().lock();
         assert_eq!(*lock.borrow(), 4950);
     });
     for i in 0..100 {
-        let lock = m.lock();
+        let lock = m.as_ref().lock();
         *lock.borrow_mut() += i;
     }
     drop(lock);
@@ -48,20 +52,21 @@ fn is_mutex() {
 #[test]
 fn trylock_works() {
     let m = unsafe {
-        let m = Arc::new(ReentrantMutex::new(()));
-        m.init();
-        m
+        // FIXME: Simplify this if Arc gets a Arc::get_pin_mut.
+        let mut m = Arc::new(ReentrantMutex::new(()));
+        Pin::new_unchecked(Arc::get_mut_unchecked(&mut m)).init();
+        Pin::new_unchecked(m)
     };
     let m2 = m.clone();
-    let _lock = m.try_lock();
-    let _lock2 = m.try_lock();
+    let _lock = m.as_ref().try_lock();
+    let _lock2 = m.as_ref().try_lock();
     thread::spawn(move || {
-        let lock = m2.try_lock();
+        let lock = m2.as_ref().try_lock();
         assert!(lock.is_none());
     })
     .join()
     .unwrap();
-    let _lock3 = m.try_lock();
+    let _lock3 = m.as_ref().try_lock();
 }
 
 pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell<u32>>);
diff --git a/library/std/src/sys_common/thread_local_key.rs b/library/std/src/sys_common/thread_local_key.rs
index dbcb7b36265..32cd5641665 100644
--- a/library/std/src/sys_common/thread_local_key.rs
+++ b/library/std/src/sys_common/thread_local_key.rs
@@ -168,7 +168,7 @@ impl StaticKey {
             return key;
         }
 
-        // POSIX allows the key created here to be 0, but the compare_and_swap
+        // POSIX allows the key created here to be 0, but the compare_exchange
         // below relies on using 0 as a sentinel value to check who won the
         // race to set the shared TLS key. As far as I know, there is no
         // guaranteed value that cannot be returned as a posix_key_create key,
@@ -186,11 +186,11 @@ impl StaticKey {
             key2
         };
         rtassert!(key != 0);
-        match self.key.compare_and_swap(0, key as usize, Ordering::SeqCst) {
+        match self.key.compare_exchange(0, key as usize, Ordering::SeqCst, Ordering::SeqCst) {
             // The CAS succeeded, so we've created the actual key
-            0 => key as usize,
+            Ok(_) => key as usize,
             // If someone beat us to the punch, use their key instead
-            n => {
+            Err(n) => {
                 imp::destroy(key);
                 n
             }
diff --git a/library/std/src/sys_common/thread_parker/futex.rs b/library/std/src/sys_common/thread_parker/futex.rs
index a5d4927dcc5..0132743b244 100644
--- a/library/std/src/sys_common/thread_parker/futex.rs
+++ b/library/std/src/sys_common/thread_parker/futex.rs
@@ -49,7 +49,7 @@ impl Parker {
             // Wait for something to happen, assuming it's still set to PARKED.
             futex_wait(&self.state, PARKED, None);
             // Change NOTIFIED=>EMPTY and return in that case.
-            if self.state.compare_and_swap(NOTIFIED, EMPTY, Acquire) == NOTIFIED {
+            if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Acquire).is_ok() {
                 return;
             } else {
                 // Spurious wake up. We loop to try again.
diff --git a/library/std/src/sys_common/thread_parker/mod.rs b/library/std/src/sys_common/thread_parker/mod.rs
index 5e75ac65de4..ba896bd676b 100644
--- a/library/std/src/sys_common/thread_parker/mod.rs
+++ b/library/std/src/sys_common/thread_parker/mod.rs
@@ -6,6 +6,8 @@ cfg_if::cfg_if! {
     ))] {
         mod futex;
         pub use futex::Parker;
+    } else if #[cfg(windows)] {
+        pub use crate::sys::thread_parker::Parker;
     } else {
         mod generic;
         pub use generic::Parker;