about summary refs log tree commit diff
path: root/library/std/src
diff options
context:
space:
mode:
Diffstat (limited to 'library/std/src')
-rw-r--r--library/std/src/collections/hash/map/tests.rs24
-rw-r--r--library/std/src/io/tests.rs3
-rw-r--r--library/std/src/lib.rs20
-rw-r--r--library/std/src/net/ip.rs51
-rw-r--r--library/std/src/net/ip/display_buffer.rs40
-rw-r--r--library/std/src/os/fortanix_sgx/mod.rs1
-rw-r--r--library/std/src/os/unix/net/listener.rs12
-rw-r--r--library/std/src/path/tests.rs5
-rw-r--r--library/std/src/sync/mpsc/mpsc_queue/tests.rs2
-rw-r--r--library/std/src/sync/mpsc/spsc_queue/tests.rs5
-rw-r--r--library/std/src/sync/mpsc/sync_tests.rs21
-rw-r--r--library/std/src/sync/mpsc/tests.rs12
-rw-r--r--library/std/src/sync/rwlock/tests.rs2
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/alloc.rs161
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/mod.rs8
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/raw.rs24
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/tests.rs30
-rw-r--r--library/std/src/sys/unix/fs.rs2
-rw-r--r--library/std/src/sys/unix/os_str.rs40
-rw-r--r--library/std/src/sys/unix/os_str/tests.rs8
-rw-r--r--library/std/src/sys/windows/c.rs26
-rw-r--r--library/std/src/sys/windows/compat.rs121
-rw-r--r--library/std/src/sys/windows/mod.rs18
-rw-r--r--library/std/src/sys/windows/thread_parker.rs27
-rw-r--r--library/std/src/thread/tests.rs19
-rw-r--r--library/std/src/time/tests.rs3
26 files changed, 490 insertions, 195 deletions
diff --git a/library/std/src/collections/hash/map/tests.rs b/library/std/src/collections/hash/map/tests.rs
index 7ebc41588b3..cb3032719fa 100644
--- a/library/std/src/collections/hash/map/tests.rs
+++ b/library/std/src/collections/hash/map/tests.rs
@@ -268,10 +268,13 @@ fn test_lots_of_insertions() {
 
     // Try this a few times to make sure we never screw up the hashmap's
     // internal state.
-    for _ in 0..10 {
+    let loops = if cfg!(miri) { 2 } else { 10 };
+    for _ in 0..loops {
         assert!(m.is_empty());
 
-        for i in 1..1001 {
+        let count = if cfg!(miri) { 101 } else { 1001 };
+
+        for i in 1..count {
             assert!(m.insert(i, i).is_none());
 
             for j in 1..=i {
@@ -279,42 +282,42 @@ fn test_lots_of_insertions() {
                 assert_eq!(r, Some(&j));
             }
 
-            for j in i + 1..1001 {
+            for j in i + 1..count {
                 let r = m.get(&j);
                 assert_eq!(r, None);
             }
         }
 
-        for i in 1001..2001 {
+        for i in count..(2 * count) {
             assert!(!m.contains_key(&i));
         }
 
         // remove forwards
-        for i in 1..1001 {
+        for i in 1..count {
             assert!(m.remove(&i).is_some());
 
             for j in 1..=i {
                 assert!(!m.contains_key(&j));
             }
 
-            for j in i + 1..1001 {
+            for j in i + 1..count {
                 assert!(m.contains_key(&j));
             }
         }
 
-        for i in 1..1001 {
+        for i in 1..count {
             assert!(!m.contains_key(&i));
         }
 
-        for i in 1..1001 {
+        for i in 1..count {
             assert!(m.insert(i, i).is_none());
         }
 
         // remove backwards
-        for i in (1..1001).rev() {
+        for i in (1..count).rev() {
             assert!(m.remove(&i).is_some());
 
-            for j in i..1001 {
+            for j in i..count {
                 assert!(!m.contains_key(&j));
             }
 
@@ -817,6 +820,7 @@ fn test_retain() {
 }
 
 #[test]
+#[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
 #[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
 fn test_try_reserve() {
     let mut empty_bytes: HashMap<u8, u8> = HashMap::new();
diff --git a/library/std/src/io/tests.rs b/library/std/src/io/tests.rs
index f357f33ec52..68a19eccc0e 100644
--- a/library/std/src/io/tests.rs
+++ b/library/std/src/io/tests.rs
@@ -94,7 +94,7 @@ fn read_to_end() {
     assert_eq!(c.read_to_end(&mut v).unwrap(), 1);
     assert_eq!(v, b"1");
 
-    let cap = 1024 * 1024;
+    let cap = if cfg!(miri) { 1024 } else { 1024 * 1024 };
     let data = (0..cap).map(|i| (i / 3) as u8).collect::<Vec<_>>();
     let mut v = Vec::new();
     let (a, b) = data.split_at(data.len() / 2);
@@ -309,6 +309,7 @@ fn chain_zero_length_read_is_not_eof() {
 
 #[bench]
 #[cfg_attr(target_os = "emscripten", ignore)]
+#[cfg_attr(miri, ignore)] // Miri isn't fast...
 fn bench_read_to_end(b: &mut test::Bencher) {
     b.iter(|| {
         let mut lr = repeat(1).take(10000000);
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index 475a1d9fd99..5029023121f 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -187,6 +187,7 @@
 //! [rust-discord]: https://discord.gg/rust-lang
 //! [array]: prim@array
 //! [slice]: prim@slice
+
 #![cfg_attr(not(feature = "restricted-std"), stable(feature = "rust1", since = "1.0.0"))]
 #![cfg_attr(feature = "restricted-std", unstable(feature = "restricted_std", issue = "none"))]
 #![doc(
@@ -201,25 +202,35 @@
     no_global_oom_handling,
     not(no_global_oom_handling)
 ))]
+// To run libstd tests without x.py without ending up with two copies of libstd, Miri needs to be
+// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
+// rustc itself never sets the feature, so this line has no affect there.
+#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
+// miri-test-libstd also prefers to make std use the sysroot versions of the dependencies.
+#![cfg_attr(feature = "miri-test-libstd", feature(rustc_private))]
 // Don't link to std. We are std.
 #![no_std]
+// Tell the compiler to link to either panic_abort or panic_unwind
+#![needs_panic_runtime]
+//
+// Lints:
 #![warn(deprecated_in_future)]
 #![warn(missing_docs)]
 #![warn(missing_debug_implementations)]
 #![allow(explicit_outlives_requirements)]
 #![allow(unused_lifetimes)]
-// Tell the compiler to link to either panic_abort or panic_unwind
-#![needs_panic_runtime]
+#![deny(rustc::existing_doc_keyword)]
 // Ensure that std can be linked against panic_abort despite compiled with `-C panic=unwind`
 #![deny(ffi_unwind_calls)]
 // std may use features in a platform-specific way
 #![allow(unused_features)]
+//
+// Features:
 #![cfg_attr(test, feature(internal_output_capture, print_internals, update_panic_count, rt))]
 #![cfg_attr(
     all(target_vendor = "fortanix", target_env = "sgx"),
     feature(slice_index_methods, coerce_unsized, sgx_platform)
 )]
-#![deny(rustc::existing_doc_keyword)]
 //
 // Language features:
 #![feature(alloc_error_handler)]
@@ -258,6 +269,7 @@
 #![feature(staged_api)]
 #![feature(thread_local)]
 #![feature(try_blocks)]
+#![feature(utf8_chunks)]
 //
 // Library features (core):
 #![feature(array_error_internals)]
@@ -294,6 +306,8 @@
 #![feature(std_internals)]
 #![feature(str_internals)]
 #![feature(strict_provenance)]
+#![feature(maybe_uninit_uninit_array)]
+#![feature(const_maybe_uninit_uninit_array)]
 //
 // Library features (alloc):
 #![feature(alloc_layout_extra)]
diff --git a/library/std/src/net/ip.rs b/library/std/src/net/ip.rs
index 41ca9ba8425..189754a161e 100644
--- a/library/std/src/net/ip.rs
+++ b/library/std/src/net/ip.rs
@@ -3,12 +3,14 @@
 mod tests;
 
 use crate::cmp::Ordering;
-use crate::fmt::{self, Write as FmtWrite};
-use crate::io::Write as IoWrite;
+use crate::fmt::{self, Write};
 use crate::mem::transmute;
 use crate::sys::net::netc as c;
 use crate::sys_common::{FromInner, IntoInner};
 
+mod display_buffer;
+use display_buffer::IpDisplayBuffer;
+
 /// An IP address, either IPv4 or IPv6.
 ///
 /// This enum can contain either an [`Ipv4Addr`] or an [`Ipv6Addr`], see their
@@ -991,21 +993,19 @@ impl From<Ipv6Addr> for IpAddr {
 impl fmt::Display for Ipv4Addr {
     fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
         let octets = self.octets();
-        // Fast Path: if there's no alignment stuff, write directly to the buffer
+
+        // If there are no alignment requirements, write the IP address directly to `f`.
+        // Otherwise, write it to a local buffer and then use `f.pad`.
         if fmt.precision().is_none() && fmt.width().is_none() {
             write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3])
         } else {
-            const IPV4_BUF_LEN: usize = 15; // Long enough for the longest possible IPv4 address
-            let mut buf = [0u8; IPV4_BUF_LEN];
-            let mut buf_slice = &mut buf[..];
+            const LONGEST_IPV4_ADDR: &str = "255.255.255.255";
 
-            // Note: The call to write should never fail, hence the unwrap
-            write!(buf_slice, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]).unwrap();
-            let len = IPV4_BUF_LEN - buf_slice.len();
+            let mut buf = IpDisplayBuffer::<{ LONGEST_IPV4_ADDR.len() }>::new();
+            // Buffer is long enough for the longest possible IPv4 address, so this should never fail.
+            write!(buf, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]).unwrap();
 
-            // This unsafe is OK because we know what is being written to the buffer
-            let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
-            fmt.pad(buf)
+            fmt.pad(buf.as_str())
         }
     }
 }
@@ -1708,8 +1708,8 @@ impl Ipv6Addr {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl fmt::Display for Ipv6Addr {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        // If there are no alignment requirements, write out the IP address to
-        // f. Otherwise, write it to a local buffer, then use f.pad.
+        // If there are no alignment requirements, write the IP address directly to `f`.
+        // Otherwise, write it to a local buffer and then use `f.pad`.
         if f.precision().is_none() && f.width().is_none() {
             let segments = self.segments();
 
@@ -1780,22 +1780,13 @@ impl fmt::Display for Ipv6Addr {
                 }
             }
         } else {
-            // Slow path: write the address to a local buffer, then use f.pad.
-            // Defined recursively by using the fast path to write to the
-            // buffer.
-
-            // This is the largest possible size of an IPv6 address
-            const IPV6_BUF_LEN: usize = (4 * 8) + 7;
-            let mut buf = [0u8; IPV6_BUF_LEN];
-            let mut buf_slice = &mut buf[..];
-
-            // Note: This call to write should never fail, so unwrap is okay.
-            write!(buf_slice, "{}", self).unwrap();
-            let len = IPV6_BUF_LEN - buf_slice.len();
-
-            // This is safe because we know exactly what can be in this buffer
-            let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
-            f.pad(buf)
+            const LONGEST_IPV6_ADDR: &str = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff";
+
+            let mut buf = IpDisplayBuffer::<{ LONGEST_IPV6_ADDR.len() }>::new();
+            // Buffer is long enough for the longest possible IPv6 address, so this should never fail.
+            write!(buf, "{}", self).unwrap();
+
+            f.pad(buf.as_str())
         }
     }
 }
diff --git a/library/std/src/net/ip/display_buffer.rs b/library/std/src/net/ip/display_buffer.rs
new file mode 100644
index 00000000000..bd852d5da8e
--- /dev/null
+++ b/library/std/src/net/ip/display_buffer.rs
@@ -0,0 +1,40 @@
+use crate::fmt;
+use crate::mem::MaybeUninit;
+use crate::str;
+
+/// Used for slow path in `Display` implementations when alignment is required.
+pub struct IpDisplayBuffer<const SIZE: usize> {
+    buf: [MaybeUninit<u8>; SIZE],
+    len: usize,
+}
+
+impl<const SIZE: usize> IpDisplayBuffer<SIZE> {
+    #[inline]
+    pub const fn new() -> Self {
+        Self { buf: MaybeUninit::uninit_array(), len: 0 }
+    }
+
+    #[inline]
+    pub fn as_str(&self) -> &str {
+        // SAFETY: `buf` is only written to by the `fmt::Write::write_str` implementation
+        // which writes a valid UTF-8 string to `buf` and correctly sets `len`.
+        unsafe {
+            let s = MaybeUninit::slice_assume_init_ref(&self.buf[..self.len]);
+            str::from_utf8_unchecked(s)
+        }
+    }
+}
+
+impl<const SIZE: usize> fmt::Write for IpDisplayBuffer<SIZE> {
+    fn write_str(&mut self, s: &str) -> fmt::Result {
+        let bytes = s.as_bytes();
+
+        if let Some(buf) = self.buf.get_mut(self.len..(self.len + bytes.len())) {
+            MaybeUninit::write_slice(buf, bytes);
+            self.len += bytes.len();
+            Ok(())
+        } else {
+            Err(fmt::Error)
+        }
+    }
+}
diff --git a/library/std/src/os/fortanix_sgx/mod.rs b/library/std/src/os/fortanix_sgx/mod.rs
index a40dabe190a..da100b689db 100644
--- a/library/std/src/os/fortanix_sgx/mod.rs
+++ b/library/std/src/os/fortanix_sgx/mod.rs
@@ -26,6 +26,7 @@ pub mod usercalls {
             free, insecure_time, launch_thread, read, read_alloc, send, wait, write,
         };
         pub use crate::sys::abi::usercalls::raw::{do_usercall, Usercalls as UsercallNrs};
+        pub use crate::sys::abi::usercalls::raw::{Register, RegisterArgument, ReturnValue};
 
         // fortanix-sgx-abi re-exports
         pub use crate::sys::abi::usercalls::raw::Error;
diff --git a/library/std/src/os/unix/net/listener.rs b/library/std/src/os/unix/net/listener.rs
index 7c0d539504d..3b2601e755a 100644
--- a/library/std/src/os/unix/net/listener.rs
+++ b/library/std/src/os/unix/net/listener.rs
@@ -73,9 +73,13 @@ impl UnixListener {
         unsafe {
             let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
             let (addr, len) = sockaddr_un(path.as_ref())?;
+            #[cfg(target_os = "linux")]
+            const backlog: libc::c_int = -1;
+            #[cfg(not(target_os = "linux"))]
+            const backlog: libc::c_int = 128;
 
             cvt(libc::bind(inner.as_inner().as_raw_fd(), &addr as *const _ as *const _, len as _))?;
-            cvt(libc::listen(inner.as_inner().as_raw_fd(), 128))?;
+            cvt(libc::listen(inner.as_inner().as_raw_fd(), backlog))?;
 
             Ok(UnixListener(inner))
         }
@@ -109,12 +113,16 @@ impl UnixListener {
     pub fn bind_addr(socket_addr: &SocketAddr) -> io::Result<UnixListener> {
         unsafe {
             let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
+            #[cfg(target_os = "linux")]
+            const backlog: libc::c_int = -1;
+            #[cfg(not(target_os = "linux"))]
+            const backlog: libc::c_int = 128;
             cvt(libc::bind(
                 inner.as_raw_fd(),
                 &socket_addr.addr as *const _ as *const _,
                 socket_addr.len as _,
             ))?;
-            cvt(libc::listen(inner.as_raw_fd(), 128))?;
+            cvt(libc::listen(inner.as_raw_fd(), backlog))?;
             Ok(UnixListener(inner))
         }
     }
diff --git a/library/std/src/path/tests.rs b/library/std/src/path/tests.rs
index 351cf698810..dd307022c6d 100644
--- a/library/std/src/path/tests.rs
+++ b/library/std/src/path/tests.rs
@@ -1768,6 +1768,7 @@ fn test_windows_absolute() {
 }
 
 #[bench]
+#[cfg_attr(miri, ignore)] // Miri isn't fast...
 fn bench_path_cmp_fast_path_buf_sort(b: &mut test::Bencher) {
     let prefix = "my/home";
     let mut paths: Vec<_> =
@@ -1781,6 +1782,7 @@ fn bench_path_cmp_fast_path_buf_sort(b: &mut test::Bencher) {
 }
 
 #[bench]
+#[cfg_attr(miri, ignore)] // Miri isn't fast...
 fn bench_path_cmp_fast_path_long(b: &mut test::Bencher) {
     let prefix = "/my/home/is/my/castle/and/my/castle/has/a/rusty/workbench/";
     let paths: Vec<_> =
@@ -1799,6 +1801,7 @@ fn bench_path_cmp_fast_path_long(b: &mut test::Bencher) {
 }
 
 #[bench]
+#[cfg_attr(miri, ignore)] // Miri isn't fast...
 fn bench_path_cmp_fast_path_short(b: &mut test::Bencher) {
     let prefix = "my/home";
     let paths: Vec<_> =
@@ -1817,6 +1820,7 @@ fn bench_path_cmp_fast_path_short(b: &mut test::Bencher) {
 }
 
 #[bench]
+#[cfg_attr(miri, ignore)] // Miri isn't fast...
 fn bench_path_hashset(b: &mut test::Bencher) {
     let prefix = "/my/home/is/my/castle/and/my/castle/has/a/rusty/workbench/";
     let paths: Vec<_> =
@@ -1835,6 +1839,7 @@ fn bench_path_hashset(b: &mut test::Bencher) {
 }
 
 #[bench]
+#[cfg_attr(miri, ignore)] // Miri isn't fast...
 fn bench_path_hashset_miss(b: &mut test::Bencher) {
     let prefix = "/my/home/is/my/castle/and/my/castle/has/a/rusty/workbench/";
     let paths: Vec<_> =
diff --git a/library/std/src/sync/mpsc/mpsc_queue/tests.rs b/library/std/src/sync/mpsc/mpsc_queue/tests.rs
index 9f4f31ed051..34b2a9a98ac 100644
--- a/library/std/src/sync/mpsc/mpsc_queue/tests.rs
+++ b/library/std/src/sync/mpsc/mpsc_queue/tests.rs
@@ -13,7 +13,7 @@ fn test_full() {
 #[test]
 fn test() {
     let nthreads = 8;
-    let nmsgs = 1000;
+    let nmsgs = if cfg!(miri) { 100 } else { 1000 };
     let q = Queue::new();
     match q.pop() {
         Empty => {}
diff --git a/library/std/src/sync/mpsc/spsc_queue/tests.rs b/library/std/src/sync/mpsc/spsc_queue/tests.rs
index 467ef3dbdcb..eb6d5c2cf66 100644
--- a/library/std/src/sync/mpsc/spsc_queue/tests.rs
+++ b/library/std/src/sync/mpsc/spsc_queue/tests.rs
@@ -77,12 +77,13 @@ fn stress() {
     }
 
     unsafe fn stress_bound(bound: usize) {
+        let count = if cfg!(miri) { 1000 } else { 100000 };
         let q = Arc::new(Queue::with_additions(bound, (), ()));
 
         let (tx, rx) = channel();
         let q2 = q.clone();
         let _t = thread::spawn(move || {
-            for _ in 0..100000 {
+            for _ in 0..count {
                 loop {
                     match q2.pop() {
                         Some(1) => break,
@@ -93,7 +94,7 @@ fn stress() {
             }
             tx.send(()).unwrap();
         });
-        for _ in 0..100000 {
+        for _ in 0..count {
             q.push(1);
         }
         rx.recv().unwrap();
diff --git a/library/std/src/sync/mpsc/sync_tests.rs b/library/std/src/sync/mpsc/sync_tests.rs
index e58649bab6e..63c79436974 100644
--- a/library/std/src/sync/mpsc/sync_tests.rs
+++ b/library/std/src/sync/mpsc/sync_tests.rs
@@ -113,23 +113,25 @@ fn chan_gone_concurrent() {
 
 #[test]
 fn stress() {
+    let count = if cfg!(miri) { 100 } else { 10000 };
     let (tx, rx) = sync_channel::<i32>(0);
     thread::spawn(move || {
-        for _ in 0..10000 {
+        for _ in 0..count {
             tx.send(1).unwrap();
         }
     });
-    for _ in 0..10000 {
+    for _ in 0..count {
         assert_eq!(rx.recv().unwrap(), 1);
     }
 }
 
 #[test]
 fn stress_recv_timeout_two_threads() {
+    let count = if cfg!(miri) { 100 } else { 10000 };
     let (tx, rx) = sync_channel::<i32>(0);
 
     thread::spawn(move || {
-        for _ in 0..10000 {
+        for _ in 0..count {
             tx.send(1).unwrap();
         }
     });
@@ -146,12 +148,12 @@ fn stress_recv_timeout_two_threads() {
         }
     }
 
-    assert_eq!(recv_count, 10000);
+    assert_eq!(recv_count, count);
 }
 
 #[test]
 fn stress_recv_timeout_shared() {
-    const AMT: u32 = 1000;
+    const AMT: u32 = if cfg!(miri) { 100 } else { 1000 };
     const NTHREADS: u32 = 8;
     let (tx, rx) = sync_channel::<i32>(0);
     let (dtx, drx) = sync_channel::<()>(0);
@@ -191,7 +193,7 @@ fn stress_recv_timeout_shared() {
 
 #[test]
 fn stress_shared() {
-    const AMT: u32 = 1000;
+    const AMT: u32 = if cfg!(miri) { 100 } else { 1000 };
     const NTHREADS: u32 = 8;
     let (tx, rx) = sync_channel::<i32>(0);
     let (dtx, drx) = sync_channel::<()>(0);
@@ -438,12 +440,13 @@ fn stream_send_recv_stress() {
 
 #[test]
 fn recv_a_lot() {
+    let count = if cfg!(miri) { 1000 } else { 10000 };
     // Regression test that we don't run out of stack in scheduler context
-    let (tx, rx) = sync_channel(10000);
-    for _ in 0..10000 {
+    let (tx, rx) = sync_channel(count);
+    for _ in 0..count {
         tx.send(()).unwrap();
     }
-    for _ in 0..10000 {
+    for _ in 0..count {
         rx.recv().unwrap();
     }
 }
diff --git a/library/std/src/sync/mpsc/tests.rs b/library/std/src/sync/mpsc/tests.rs
index 4deb3e59615..f6d0796f604 100644
--- a/library/std/src/sync/mpsc/tests.rs
+++ b/library/std/src/sync/mpsc/tests.rs
@@ -120,13 +120,14 @@ fn chan_gone_concurrent() {
 
 #[test]
 fn stress() {
+    let count = if cfg!(miri) { 100 } else { 10000 };
     let (tx, rx) = channel::<i32>();
     let t = thread::spawn(move || {
-        for _ in 0..10000 {
+        for _ in 0..count {
             tx.send(1).unwrap();
         }
     });
-    for _ in 0..10000 {
+    for _ in 0..count {
         assert_eq!(rx.recv().unwrap(), 1);
     }
     t.join().ok().expect("thread panicked");
@@ -134,7 +135,7 @@ fn stress() {
 
 #[test]
 fn stress_shared() {
-    const AMT: u32 = 10000;
+    const AMT: u32 = if cfg!(miri) { 100 } else { 10000 };
     const NTHREADS: u32 = 8;
     let (tx, rx) = channel::<i32>();
 
@@ -504,12 +505,13 @@ fn very_long_recv_timeout_wont_panic() {
 
 #[test]
 fn recv_a_lot() {
+    let count = if cfg!(miri) { 1000 } else { 10000 };
     // Regression test that we don't run out of stack in scheduler context
     let (tx, rx) = channel();
-    for _ in 0..10000 {
+    for _ in 0..count {
         tx.send(()).unwrap();
     }
-    for _ in 0..10000 {
+    for _ in 0..count {
         rx.recv().unwrap();
     }
 }
diff --git a/library/std/src/sync/rwlock/tests.rs b/library/std/src/sync/rwlock/tests.rs
index 08255c985f5..b5b3ad9898e 100644
--- a/library/std/src/sync/rwlock/tests.rs
+++ b/library/std/src/sync/rwlock/tests.rs
@@ -19,7 +19,7 @@ fn smoke() {
 #[test]
 fn frob() {
     const N: u32 = 10;
-    const M: usize = 1000;
+    const M: usize = if cfg!(miri) { 100 } else { 1000 };
 
     let r = Arc::new(RwLock::new(()));
 
diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
index 66fa1efbf10..fe8392f78cd 100644
--- a/library/std/src/sys/sgx/abi/usercalls/alloc.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
@@ -56,6 +56,8 @@ unsafe impl UserSafeSized for Usercall {}
 #[unstable(feature = "sgx_platform", issue = "56975")]
 unsafe impl UserSafeSized for Return {}
 #[unstable(feature = "sgx_platform", issue = "56975")]
+unsafe impl UserSafeSized for Cancel {}
+#[unstable(feature = "sgx_platform", issue = "56975")]
 unsafe impl<T: UserSafeSized> UserSafeSized for [T; 2] {}
 
 /// A type that can be represented in memory as one or more `UserSafeSized`s.
@@ -305,6 +307,34 @@ where
     }
 }
 
+// Split a memory region ptr..ptr + len into three parts:
+//   +--------+
+//   | small0 | Chunk smaller than 8 bytes
+//   +--------+
+//   |   big  | Chunk 8-byte aligned, and size a multiple of 8 bytes
+//   +--------+
+//   | small1 | Chunk smaller than 8 bytes
+//   +--------+
+fn region_as_aligned_chunks(ptr: *const u8, len: usize) -> (usize, usize, usize) {
+    let small0_size = if ptr as usize % 8 == 0 { 0 } else { 8 - ptr as usize % 8 };
+    let small1_size = (len - small0_size as usize) % 8;
+    let big_size = len - small0_size as usize - small1_size as usize;
+
+    (small0_size, big_size, small1_size)
+}
+
+unsafe fn copy_quadwords(src: *const u8, dst: *mut u8, len: usize) {
+    unsafe {
+        asm!(
+            "rep movsq (%rsi), (%rdi)",
+            inout("rcx") len / 8 => _,
+            inout("rdi") dst => _,
+            inout("rsi") src => _,
+            options(att_syntax, nostack, preserves_flags)
+        );
+    }
+}
+
 /// Copies `len` bytes of data from enclave pointer `src` to userspace `dst`
 ///
 /// This function mitigates stale data vulnerabilities by ensuring all writes to untrusted memory are either:
@@ -343,17 +373,6 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
         }
     }
 
-    unsafe fn copy_aligned_quadwords_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
-        unsafe {
-            asm!(
-                "rep movsq (%rsi), (%rdi)",
-                inout("rcx") len / 8 => _,
-                inout("rdi") dst => _,
-                inout("rsi") src => _,
-                options(att_syntax, nostack, preserves_flags)
-            );
-        }
-    }
     assert!(!src.is_null());
     assert!(!dst.is_null());
     assert!(is_enclave_range(src, len));
@@ -370,7 +389,7 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
     } else if len % 8 == 0 && dst as usize % 8 == 0 {
         // Copying 8-byte aligned quadwords: copy quad word per quad word
         unsafe {
-            copy_aligned_quadwords_to_userspace(src, dst, len);
+            copy_quadwords(src, dst, len);
         }
     } else {
         // Split copies into three parts:
@@ -381,20 +400,16 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
         //   +--------+
         //   | small1 | Chunk smaller than 8 bytes
         //   +--------+
+        let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len);
 
         unsafe {
             // Copy small0
-            let small0_size = (8 - dst as usize % 8) as u8;
-            let small0_src = src;
-            let small0_dst = dst;
-            copy_bytewise_to_userspace(small0_src as _, small0_dst, small0_size as _);
+            copy_bytewise_to_userspace(src, dst, small0_size as _);
 
             // Copy big
-            let small1_size = ((len - small0_size as usize) % 8) as u8;
-            let big_size = len - small0_size as usize - small1_size as usize;
             let big_src = src.offset(small0_size as _);
             let big_dst = dst.offset(small0_size as _);
-            copy_aligned_quadwords_to_userspace(big_src as _, big_dst, big_size);
+            copy_quadwords(big_src as _, big_dst, big_size);
 
             // Copy small1
             let small1_src = src.offset(big_size as isize + small0_size as isize);
@@ -404,6 +419,106 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
     }
 }
 
+/// Copies `len` bytes of data from userspace pointer `src` to enclave pointer `dst`
+///
+/// This function mitigates AEPIC leak vulnerabilities by ensuring all reads from untrusted memory are 8-byte aligned
+///
+/// # Panics
+/// This function panics if:
+///
+/// * The `src` pointer is null
+/// * The `dst` pointer is null
+/// * The `src` memory range is not in user memory
+/// * The `dst` memory range is not in enclave memory
+///
+/// # References
+///  - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00657.html
+///  - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/stale-data-read-from-xapic.html
+pub(crate) unsafe fn copy_from_userspace(src: *const u8, dst: *mut u8, len: usize) {
+    // Copies memory region `src..src + len` to the enclave at `dst`. The source memory region
+    // is:
+    //  - strictly less than 8 bytes in size and may be
+    //  - located at a misaligned memory location
+    fn copy_misaligned_chunk_to_enclave(src: *const u8, dst: *mut u8, len: usize) {
+        let mut tmp_buff = [0u8; 16];
+
+        unsafe {
+            // Compute an aligned memory region to read from
+            // +--------+ <-- aligned_src + aligned_len (8B-aligned)
+            // |  pad1  |
+            // +--------+ <-- src + len (misaligned)
+            // |        |
+            // |        |
+            // |        |
+            // +--------+ <-- src (misaligned)
+            // |  pad0  |
+            // +--------+ <-- aligned_src (8B-aligned)
+            let pad0_size = src as usize % 8;
+            let aligned_src = src.sub(pad0_size);
+
+            let pad1_size = 8 - (src.add(len) as usize % 8);
+            let aligned_len = pad0_size + len + pad1_size;
+
+            debug_assert!(len < 8);
+            debug_assert_eq!(aligned_src as usize % 8, 0);
+            debug_assert_eq!(aligned_len % 8, 0);
+            debug_assert!(aligned_len <= 16);
+
+            // Copy the aligned buffer to a temporary buffer
+            // Note: copying from a slightly different memory location is a bit odd. In this case it
+            // can't lead to page faults or inadvertent copying from the enclave as we only ensured
+            // that the `src` pointer is aligned at an 8 byte boundary. As pages are 4096 bytes
+            // aligned, `aligned_src` must be on the same page as `src`. A similar argument can be made
+            // for `src + len`
+            copy_quadwords(aligned_src as _, tmp_buff.as_mut_ptr(), aligned_len);
+
+            // Copy the correct parts of the temporary buffer to the destination
+            ptr::copy(tmp_buff.as_ptr().add(pad0_size), dst, len);
+        }
+    }
+
+    assert!(!src.is_null());
+    assert!(!dst.is_null());
+    assert!(is_user_range(src, len));
+    assert!(is_enclave_range(dst, len));
+    assert!(!(src as usize).overflowing_add(len + 8).1);
+    assert!(!(dst as usize).overflowing_add(len + 8).1);
+
+    if len < 8 {
+        copy_misaligned_chunk_to_enclave(src, dst, len);
+    } else if len % 8 == 0 && src as usize % 8 == 0 {
+        // Copying 8-byte aligned quadwords: copy quad word per quad word
+        unsafe {
+            copy_quadwords(src, dst, len);
+        }
+    } else {
+        // Split copies into three parts:
+        //   +--------+
+        //   | small0 | Chunk smaller than 8 bytes
+        //   +--------+
+        //   |   big  | Chunk 8-byte aligned, and size a multiple of 8 bytes
+        //   +--------+
+        //   | small1 | Chunk smaller than 8 bytes
+        //   +--------+
+        let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len);
+
+        unsafe {
+            // Copy small0
+            copy_misaligned_chunk_to_enclave(src, dst, small0_size);
+
+            // Copy big
+            let big_src = src.add(small0_size);
+            let big_dst = dst.add(small0_size);
+            copy_quadwords(big_src, big_dst, big_size);
+
+            // Copy small1
+            let small1_src = src.add(big_size + small0_size);
+            let small1_dst = dst.add(big_size + small0_size);
+            copy_misaligned_chunk_to_enclave(small1_src, small1_dst, small1_size);
+        }
+    }
+}
+
 #[unstable(feature = "sgx_platform", issue = "56975")]
 impl<T: ?Sized> UserRef<T>
 where
@@ -468,7 +583,7 @@ where
     pub fn copy_to_enclave(&self, dest: &mut T) {
         unsafe {
             assert_eq!(mem::size_of_val(dest), mem::size_of_val(&*self.0.get()));
-            ptr::copy(
+            copy_from_userspace(
                 self.0.get() as *const T as *const u8,
                 dest as *mut T as *mut u8,
                 mem::size_of_val(dest),
@@ -494,7 +609,11 @@ where
 {
     /// Copies the value from user memory into enclave memory.
     pub fn to_enclave(&self) -> T {
-        unsafe { ptr::read(self.0.get()) }
+        unsafe {
+            let mut data: T = mem::MaybeUninit::uninit().assume_init();
+            copy_from_userspace(self.0.get() as _, &mut data as *mut T as _, mem::size_of::<T>());
+            data
+        }
     }
 }
 
diff --git a/library/std/src/sys/sgx/abi/usercalls/mod.rs b/library/std/src/sys/sgx/abi/usercalls/mod.rs
index 79d1db5e1c5..e19e843267a 100644
--- a/library/std/src/sys/sgx/abi/usercalls/mod.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/mod.rs
@@ -292,12 +292,17 @@ fn check_os_error(err: Result) -> i32 {
     }
 }
 
-trait FromSgxResult {
+/// Translate the raw result of an SGX usercall.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub trait FromSgxResult {
+    /// Return type
     type Return;
 
+    /// Translate the raw result of an SGX usercall.
     fn from_sgx_result(self) -> IoResult<Self::Return>;
 }
 
+#[unstable(feature = "sgx_platform", issue = "56975")]
 impl<T> FromSgxResult for (Result, T) {
     type Return = T;
 
@@ -310,6 +315,7 @@ impl<T> FromSgxResult for (Result, T) {
     }
 }
 
+#[unstable(feature = "sgx_platform", issue = "56975")]
 impl FromSgxResult for Result {
     type Return = ();
 
diff --git a/library/std/src/sys/sgx/abi/usercalls/raw.rs b/library/std/src/sys/sgx/abi/usercalls/raw.rs
index 4267b96ccd5..10c1456d4fd 100644
--- a/library/std/src/sys/sgx/abi/usercalls/raw.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/raw.rs
@@ -37,14 +37,23 @@ pub unsafe fn do_usercall(
     (a, b)
 }
 
-type Register = u64;
+/// A value passed or returned in a CPU register.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub type Register = u64;
 
-trait RegisterArgument {
+/// Translate a type from/to Register to be used as an argument.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub trait RegisterArgument {
+    /// Translate a Register to Self.
     fn from_register(_: Register) -> Self;
+    /// Translate self to a Register.
     fn into_register(self) -> Register;
 }
 
-trait ReturnValue {
+/// Translate a pair of Registers to the raw usercall return value.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub trait ReturnValue {
+    /// Translate a pair of Registers to the raw usercall return value.
     fn from_registers(call: &'static str, regs: (Register, Register)) -> Self;
 }
 
@@ -68,6 +77,7 @@ macro_rules! define_usercalls {
 
 macro_rules! define_ra {
     (< $i:ident > $t:ty) => {
+        #[unstable(feature = "sgx_platform", issue = "56975")]
         impl<$i> RegisterArgument for $t {
             fn from_register(a: Register) -> Self {
                 a as _
@@ -78,6 +88,7 @@ macro_rules! define_ra {
         }
     };
     ($i:ty as $t:ty) => {
+        #[unstable(feature = "sgx_platform", issue = "56975")]
         impl RegisterArgument for $t {
             fn from_register(a: Register) -> Self {
                 a as $i as _
@@ -88,6 +99,7 @@ macro_rules! define_ra {
         }
     };
     ($t:ty) => {
+        #[unstable(feature = "sgx_platform", issue = "56975")]
         impl RegisterArgument for $t {
             fn from_register(a: Register) -> Self {
                 a as _
@@ -112,6 +124,7 @@ define_ra!(usize as isize);
 define_ra!(<T> *const T);
 define_ra!(<T> *mut T);
 
+#[unstable(feature = "sgx_platform", issue = "56975")]
 impl RegisterArgument for bool {
     fn from_register(a: Register) -> bool {
         if a != 0 { true } else { false }
@@ -121,6 +134,7 @@ impl RegisterArgument for bool {
     }
 }
 
+#[unstable(feature = "sgx_platform", issue = "56975")]
 impl<T: RegisterArgument> RegisterArgument for Option<NonNull<T>> {
     fn from_register(a: Register) -> Option<NonNull<T>> {
         NonNull::new(a as _)
@@ -130,12 +144,14 @@ impl<T: RegisterArgument> RegisterArgument for Option<NonNull<T>> {
     }
 }
 
+#[unstable(feature = "sgx_platform", issue = "56975")]
 impl ReturnValue for ! {
     fn from_registers(call: &'static str, _regs: (Register, Register)) -> Self {
         rtabort!("Usercall {call}: did not expect to be re-entered");
     }
 }
 
+#[unstable(feature = "sgx_platform", issue = "56975")]
 impl ReturnValue for () {
     fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self {
         rtassert!(usercall_retval.0 == 0);
@@ -144,6 +160,7 @@ impl ReturnValue for () {
     }
 }
 
+#[unstable(feature = "sgx_platform", issue = "56975")]
 impl<T: RegisterArgument> ReturnValue for T {
     fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self {
         rtassert!(usercall_retval.1 == 0);
@@ -151,6 +168,7 @@ impl<T: RegisterArgument> ReturnValue for T {
     }
 }
 
+#[unstable(feature = "sgx_platform", issue = "56975")]
 impl<T: RegisterArgument, U: RegisterArgument> ReturnValue for (T, U) {
     fn from_registers(_call: &'static str, regs: (Register, Register)) -> Self {
         (T::from_register(regs.0), U::from_register(regs.1))
diff --git a/library/std/src/sys/sgx/abi/usercalls/tests.rs b/library/std/src/sys/sgx/abi/usercalls/tests.rs
index cbf7d7d54f7..4320f0bccd1 100644
--- a/library/std/src/sys/sgx/abi/usercalls/tests.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/tests.rs
@@ -1,8 +1,8 @@
-use super::alloc::copy_to_userspace;
 use super::alloc::User;
+use super::alloc::{copy_from_userspace, copy_to_userspace};
 
 #[test]
-fn test_copy_function() {
+fn test_copy_to_userspace_function() {
     let mut src = [0u8; 100];
     let mut dst = User::<[u8]>::uninitialized(100);
 
@@ -28,3 +28,29 @@ fn test_copy_function() {
         }
     }
 }
+
+#[test]
+fn test_copy_from_userspace_function() {
+    let mut dst = [0u8; 100];
+    let mut src = User::<[u8]>::uninitialized(100);
+
+    src.copy_from_enclave(&[0u8; 100]);
+
+    for size in 0..48 {
+        // For all possible alignment
+        for offset in 0..8 {
+            // overwrite complete dst
+            dst = [0u8; 100];
+
+            // Copy src[0..size] to dst + offset
+            unsafe { copy_from_userspace(src.as_ptr().offset(offset), dst.as_mut_ptr(), size) };
+
+            // Verify copy
+            for byte in 0..size {
+                unsafe {
+                    assert_eq!(dst[byte as usize], *src.as_ptr().offset(offset + byte as isize));
+                }
+            }
+        }
+    }
+}
diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs
index 7778033eaa9..f38d2fd3d70 100644
--- a/library/std/src/sys/unix/fs.rs
+++ b/library/std/src/sys/unix/fs.rs
@@ -829,6 +829,7 @@ impl DirEntry {
         target_os = "fuchsia",
         target_os = "redox"
     )))]
+    #[cfg_attr(miri, allow(unused))]
     fn name_cstr(&self) -> &CStr {
         unsafe { CStr::from_ptr(self.entry.d_name.as_ptr()) }
     }
@@ -840,6 +841,7 @@ impl DirEntry {
         target_os = "fuchsia",
         target_os = "redox"
     ))]
+    #[cfg_attr(miri, allow(unused))]
     fn name_cstr(&self) -> &CStr {
         &self.name
     }
diff --git a/library/std/src/sys/unix/os_str.rs b/library/std/src/sys/unix/os_str.rs
index ccbc182240c..017e2af29d4 100644
--- a/library/std/src/sys/unix/os_str.rs
+++ b/library/std/src/sys/unix/os_str.rs
@@ -11,7 +11,7 @@ use crate::str;
 use crate::sync::Arc;
 use crate::sys_common::{AsInner, IntoInner};
 
-use core::str::lossy::{Utf8Lossy, Utf8LossyChunk};
+use core::str::Utf8Chunks;
 
 #[cfg(test)]
 #[path = "../unix/os_str/tests.rs"]
@@ -29,26 +29,32 @@ pub struct Slice {
 }
 
 impl fmt::Debug for Slice {
-    fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
-        // Writes out a valid unicode string with the correct escape sequences
-
-        formatter.write_str("\"")?;
-        for Utf8LossyChunk { valid, broken } in Utf8Lossy::from_bytes(&self.inner).chunks() {
-            for c in valid.chars().flat_map(|c| c.escape_debug()) {
-                formatter.write_char(c)?
-            }
-
-            for b in broken {
-                write!(formatter, "\\x{:02X}", b)?;
-            }
-        }
-        formatter.write_str("\"")
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&Utf8Chunks::new(&self.inner).debug(), f)
     }
 }
 
 impl fmt::Display for Slice {
-    fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
-        fmt::Display::fmt(&Utf8Lossy::from_bytes(&self.inner), formatter)
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        // If we're the empty string then our iterator won't actually yield
+        // anything, so perform the formatting manually
+        if self.inner.is_empty() {
+            return "".fmt(f);
+        }
+
+        for chunk in Utf8Chunks::new(&self.inner) {
+            let valid = chunk.valid();
+            // If we successfully decoded the whole chunk as a valid string then
+            // we can return a direct formatting of the string which will also
+            // respect various formatting flags if possible.
+            if chunk.invalid().is_empty() {
+                return valid.fmt(f);
+            }
+
+            f.write_str(valid)?;
+            f.write_char(char::REPLACEMENT_CHARACTER)?;
+        }
+        Ok(())
     }
 }
 
diff --git a/library/std/src/sys/unix/os_str/tests.rs b/library/std/src/sys/unix/os_str/tests.rs
index 213277f01f2..22ba0c92350 100644
--- a/library/std/src/sys/unix/os_str/tests.rs
+++ b/library/std/src/sys/unix/os_str/tests.rs
@@ -8,3 +8,11 @@ fn slice_debug_output() {
 
     assert_eq!(output, expected);
 }
+
+#[test]
+fn display() {
+    assert_eq!(
+        "Hello\u{FFFD}\u{FFFD} There\u{FFFD} Goodbye",
+        Slice::from_u8_slice(b"Hello\xC0\x80 There\xE6\x83 Goodbye").to_string(),
+    );
+}
diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs
index c5a30f8bac8..ef3f6a9ba17 100644
--- a/library/std/src/sys/windows/c.rs
+++ b/library/std/src/sys/windows/c.rs
@@ -228,6 +228,8 @@ pub const IPV6_ADD_MEMBERSHIP: c_int = 12;
 pub const IPV6_DROP_MEMBERSHIP: c_int = 13;
 pub const MSG_PEEK: c_int = 0x2;
 
+pub const LOAD_LIBRARY_SEARCH_SYSTEM32: u32 = 0x800;
+
 #[repr(C)]
 #[derive(Copy, Clone)]
 pub struct linger {
@@ -1030,6 +1032,7 @@ extern "system" {
     pub fn GetProcAddress(handle: HMODULE, name: LPCSTR) -> *mut c_void;
     pub fn GetModuleHandleA(lpModuleName: LPCSTR) -> HMODULE;
     pub fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE;
+    pub fn LoadLibraryExA(lplibfilename: *const i8, hfile: HANDLE, dwflags: u32) -> HINSTANCE;
 
     pub fn GetSystemTimeAsFileTime(lpSystemTimeAsFileTime: LPFILETIME);
     pub fn GetSystemInfo(lpSystemInfo: LPSYSTEM_INFO);
@@ -1250,21 +1253,16 @@ compat_fn_with_fallback! {
     }
 }
 
-compat_fn_with_fallback! {
-    pub static SYNCH_API: &CStr = ansi_str!("api-ms-win-core-synch-l1-2-0");
-    #[allow(unused)]
-    fn WakeByAddressSingle(Address: LPVOID) -> () {
-        // This fallback is currently tightly coupled to its use in Parker::unpark.
-        //
-        // FIXME: If `WakeByAddressSingle` needs to be used anywhere other than
-        // Parker::unpark then this fallback will be wrong and will need to be decoupled.
-        crate::sys::windows::thread_parker::unpark_keyed_event(Address)
-    }
+compat_fn_optional! {
+    crate::sys::compat::load_synch_functions();
+    pub fn WaitOnAddress(
+        Address: LPVOID,
+        CompareAddress: LPVOID,
+        AddressSize: SIZE_T,
+        dwMilliseconds: DWORD
+    );
+    pub fn WakeByAddressSingle(Address: LPVOID);
 }
-pub use crate::sys::compat::WaitOnAddress;
-// Change exported name of `WakeByAddressSingle` to make the strange fallback
-// behaviour clear.
-pub use WakeByAddressSingle::call as wake_by_address_single_or_unpark_keyed_event;
 
 compat_fn_with_fallback! {
     pub static NTDLL: &CStr = ansi_str!("ntdll");
diff --git a/library/std/src/sys/windows/compat.rs b/library/std/src/sys/windows/compat.rs
index 473544c4d4f..9c8ddc3aa1d 100644
--- a/library/std/src/sys/windows/compat.rs
+++ b/library/std/src/sys/windows/compat.rs
@@ -21,6 +21,7 @@
 
 use crate::ffi::{c_void, CStr};
 use crate::ptr::NonNull;
+use crate::sync::atomic::{AtomicBool, Ordering};
 use crate::sys::c;
 
 /// Helper macro for creating CStrs from literals and symbol names.
@@ -74,6 +75,20 @@ impl Module {
         NonNull::new(module).map(Self)
     }
 
+    /// Load the library (if not already loaded)
+    ///
+    /// # Safety
+    ///
+    /// The module must not be unloaded.
+    pub unsafe fn load_system_library(name: &CStr) -> Option<Self> {
+        let module = c::LoadLibraryExA(
+            name.as_ptr(),
+            crate::ptr::null_mut(),
+            c::LOAD_LIBRARY_SEARCH_SYSTEM32,
+        );
+        NonNull::new(module).map(Self)
+    }
+
     // Try to get the address of a function.
     pub fn proc_address(self, name: &CStr) -> Option<NonNull<c_void>> {
         // SAFETY:
@@ -144,61 +159,63 @@ macro_rules! compat_fn_with_fallback {
     )*)
 }
 
-/// Optionally load `WaitOnAddress`.
-/// Unlike the dynamic loading described above, this does not have a fallback.
+/// Optionally loaded functions.
 ///
-/// This is rexported from sys::c. You should prefer to import
-/// from there in case this changes again in the future.
-pub mod WaitOnAddress {
-    use super::*;
-    use crate::mem;
-    use crate::ptr;
-    use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
-    use crate::sys::c;
-
-    static MODULE_NAME: &CStr = ansi_str!("api-ms-win-core-synch-l1-2-0");
-    static SYMBOL_NAME: &CStr = ansi_str!("WaitOnAddress");
-
-    // WaitOnAddress function signature.
-    type F = unsafe extern "system" fn(
-        Address: c::LPVOID,
-        CompareAddress: c::LPVOID,
-        AddressSize: c::SIZE_T,
-        dwMilliseconds: c::DWORD,
-    );
-
-    // A place to store the loaded function atomically.
-    static WAIT_ON_ADDRESS: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut());
-
-    // We can skip trying to load again if we already tried.
-    static LOAD_MODULE: AtomicBool = AtomicBool::new(true);
-
-    #[inline(always)]
-    pub fn option() -> Option<F> {
-        let f = WAIT_ON_ADDRESS.load(Ordering::Acquire);
-        if !f.is_null() { Some(unsafe { mem::transmute(f) }) } else { try_load() }
-    }
+/// Actual loading of the function defers to $load_functions.
+macro_rules! compat_fn_optional {
+    ($load_functions:expr;
+    $(
+        $(#[$meta:meta])*
+        $vis:vis fn $symbol:ident($($argname:ident: $argtype:ty),*) $(-> $rettype:ty)?;
+    )+) => (
+        $(
+            pub mod $symbol {
+                use super::*;
+                use crate::ffi::c_void;
+                use crate::mem;
+                use crate::ptr::{self, NonNull};
+                use crate::sync::atomic::{AtomicPtr, Ordering};
+
+                pub(in crate::sys) static PTR: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut());
+
+                type F = unsafe extern "system" fn($($argtype),*) $(-> $rettype)?;
+
+                #[inline(always)]
+                pub fn option() -> Option<F> {
+                    let f = PTR.load(Ordering::Acquire);
+                    if !f.is_null() { Some(unsafe { mem::transmute(f) }) } else { try_load() }
+                }
 
-    #[cold]
-    fn try_load() -> Option<F> {
-        if LOAD_MODULE.load(Ordering::Acquire) {
-            // load the module
-            let mut wait_on_address = None;
-            if let Some(func) = try_load_inner() {
-                WAIT_ON_ADDRESS.store(func.as_ptr(), Ordering::Release);
-                wait_on_address = Some(unsafe { mem::transmute(func) });
+                #[cold]
+                fn try_load() -> Option<F> {
+                    $load_functions;
+                    NonNull::new(PTR.load(Ordering::Acquire)).map(|f| unsafe { mem::transmute(f) })
+                }
             }
-            // Don't try to load the module again even if loading failed.
-            LOAD_MODULE.store(false, Ordering::Release);
-            wait_on_address
-        } else {
-            None
-        }
-    }
+        )+
+    )
+}
 
-    // In the future this could be a `try` block but until then I think it's a
-    // little bit cleaner as a separate function.
-    fn try_load_inner() -> Option<NonNull<c_void>> {
-        unsafe { Module::new(MODULE_NAME)?.proc_address(SYMBOL_NAME) }
+/// Load all needed functions from "api-ms-win-core-synch-l1-2-0".
+pub(super) fn load_synch_functions() {
+    fn try_load() -> Option<()> {
+        const MODULE_NAME: &CStr = ansi_str!("api-ms-win-core-synch-l1-2-0");
+        const WAIT_ON_ADDRESS: &CStr = ansi_str!("WaitOnAddress");
+        const WAKE_BY_ADDRESS_SINGLE: &CStr = ansi_str!("WakeByAddressSingle");
+
+        // Try loading the library and all the required functions.
+        // If any step fails, then they all fail.
+        let library = unsafe { Module::load_system_library(MODULE_NAME) }?;
+        let wait_on_address = library.proc_address(WAIT_ON_ADDRESS)?;
+        let wake_by_address_single = library.proc_address(WAKE_BY_ADDRESS_SINGLE)?;
+
+        c::WaitOnAddress::PTR.store(wait_on_address.as_ptr(), Ordering::Release);
+        c::WakeByAddressSingle::PTR.store(wake_by_address_single.as_ptr(), Ordering::Release);
+        Some(())
     }
+
+    // Try to load the module but skip loading if a previous attempt failed.
+    static LOAD_MODULE: AtomicBool = AtomicBool::new(true);
+    let module_loaded = LOAD_MODULE.load(Ordering::Acquire) && try_load().is_some();
+    LOAD_MODULE.store(module_loaded, Ordering::Release)
 }
diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs
index b3f6d2d0aae..a9846a48488 100644
--- a/library/std/src/sys/windows/mod.rs
+++ b/library/std/src/sys/windows/mod.rs
@@ -2,6 +2,7 @@
 
 use crate::ffi::{CStr, OsStr, OsString};
 use crate::io::ErrorKind;
+use crate::mem::MaybeUninit;
 use crate::os::windows::ffi::{OsStrExt, OsStringExt};
 use crate::path::PathBuf;
 use crate::time::Duration;
@@ -204,8 +205,8 @@ where
     // This initial size also works around `GetFullPathNameW` returning
     // incorrect size hints for some short paths:
     // https://github.com/dylni/normpath/issues/5
-    let mut stack_buf = [0u16; 512];
-    let mut heap_buf = Vec::new();
+    let mut stack_buf: [MaybeUninit<u16>; 512] = MaybeUninit::uninit_array();
+    let mut heap_buf: Vec<MaybeUninit<u16>> = Vec::new();
     unsafe {
         let mut n = stack_buf.len();
         loop {
@@ -214,6 +215,11 @@ where
             } else {
                 let extra = n - heap_buf.len();
                 heap_buf.reserve(extra);
+                // We used `reserve` and not `reserve_exact`, so in theory we
+                // may have gotten more than requested. If so, we'd like to use
+                // it... so long as we won't cause overflow.
+                n = heap_buf.capacity().min(c::DWORD::MAX as usize);
+                // Safety: MaybeUninit<u16> does not need initialization
                 heap_buf.set_len(n);
                 &mut heap_buf[..]
             };
@@ -228,13 +234,13 @@ where
             // error" is still 0 then we interpret it as a 0 length buffer and
             // not an actual error.
             c::SetLastError(0);
-            let k = match f1(buf.as_mut_ptr(), n as c::DWORD) {
+            let k = match f1(buf.as_mut_ptr().cast::<u16>(), n as c::DWORD) {
                 0 if c::GetLastError() == 0 => 0,
                 0 => return Err(crate::io::Error::last_os_error()),
                 n => n,
             } as usize;
             if k == n && c::GetLastError() == c::ERROR_INSUFFICIENT_BUFFER {
-                n *= 2;
+                n = n.saturating_mul(2).min(c::DWORD::MAX as usize);
             } else if k > n {
                 n = k;
             } else if k == n {
@@ -244,7 +250,9 @@ where
                 // Therefore k never equals n.
                 unreachable!();
             } else {
-                return Ok(f2(&buf[..k]));
+                // Safety: First `k` values are initialized.
+                let slice: &[u16] = MaybeUninit::slice_assume_init_ref(&buf[..k]);
+                return Ok(f2(slice));
             }
         }
     }
diff --git a/library/std/src/sys/windows/thread_parker.rs b/library/std/src/sys/windows/thread_parker.rs
index 16863c9903a..2f7ae863b6a 100644
--- a/library/std/src/sys/windows/thread_parker.rs
+++ b/library/std/src/sys/windows/thread_parker.rs
@@ -198,8 +198,18 @@ impl Parker {
         // with park().
         if self.state.swap(NOTIFIED, Release) == PARKED {
             unsafe {
-                // This calls either WakeByAddressSingle or unpark_keyed_event (see below).
-                c::wake_by_address_single_or_unpark_keyed_event(self.ptr());
+                if let Some(wake_by_address_single) = c::WakeByAddressSingle::option() {
+                    wake_by_address_single(self.ptr());
+                } else {
+                    // If we run NtReleaseKeyedEvent before the waiting thread runs
+                    // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up.
+                    // If the waiting thread wakes up before we run NtReleaseKeyedEvent
+                    // (e.g. due to a timeout), this blocks until we do wake up a thread.
+                    // To prevent this thread from blocking indefinitely in that case,
+                    // park_impl() will, after seeing the state set to NOTIFIED after
+                    // waking up, call NtWaitForKeyedEvent again to unblock us.
+                    c::NtReleaseKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut());
+                }
             }
         }
     }
@@ -209,19 +219,6 @@ impl Parker {
     }
 }
 
-// This function signature makes it compatible with c::WakeByAddressSingle
-// so that it can be used as a fallback for that function.
-pub unsafe extern "C" fn unpark_keyed_event(address: c::LPVOID) {
-    // If we run NtReleaseKeyedEvent before the waiting thread runs
-    // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up.
-    // If the waiting thread wakes up before we run NtReleaseKeyedEvent
-    // (e.g. due to a timeout), this blocks until we do wake up a thread.
-    // To prevent this thread from blocking indefinitely in that case,
-    // park_impl() will, after seeing the state set to NOTIFIED after
-    // waking up, call NtWaitForKeyedEvent again to unblock us.
-    c::NtReleaseKeyedEvent(keyed_event_handle(), address, 0, ptr::null_mut());
-}
-
 fn keyed_event_handle() -> c::HANDLE {
     const INVALID: c::HANDLE = ptr::invalid_mut(!0);
     static HANDLE: AtomicPtr<libc::c_void> = AtomicPtr::new(INVALID);
diff --git a/library/std/src/thread/tests.rs b/library/std/src/thread/tests.rs
index ec68b529188..130e47c8d44 100644
--- a/library/std/src/thread/tests.rs
+++ b/library/std/src/thread/tests.rs
@@ -329,3 +329,22 @@ fn test_scoped_threads_nll() {
     let x = 42_u8;
     foo(&x);
 }
+
+// Regression test for https://github.com/rust-lang/rust/issues/98498.
+#[test]
+#[cfg(miri)] // relies on Miri's data race detector
+fn scope_join_race() {
+    for _ in 0..100 {
+        let a_bool = AtomicBool::new(false);
+
+        thread::scope(|s| {
+            for _ in 0..5 {
+                s.spawn(|| a_bool.load(Ordering::Relaxed));
+            }
+
+            for _ in 0..5 {
+                s.spawn(|| a_bool.load(Ordering::Relaxed));
+            }
+        });
+    }
+}
diff --git a/library/std/src/time/tests.rs b/library/std/src/time/tests.rs
index d710a574465..6229556c85f 100644
--- a/library/std/src/time/tests.rs
+++ b/library/std/src/time/tests.rs
@@ -31,7 +31,8 @@ fn instant_monotonic_concurrent() -> crate::thread::Result<()> {
         .map(|_| {
             crate::thread::spawn(|| {
                 let mut old = Instant::now();
-                for _ in 0..5_000_000 {
+                let count = if cfg!(miri) { 1_000 } else { 5_000_000 };
+                for _ in 0..count {
                     let new = Instant::now();
                     assert!(new >= old);
                     old = new;