about summary refs log tree commit diff
path: root/library/std/src
diff options
context:
space:
mode:
Diffstat (limited to 'library/std/src')
-rw-r--r--library/std/src/alloc.rs6
-rw-r--r--library/std/src/collections/hash/map.rs2
-rw-r--r--library/std/src/collections/hash/set.rs2
-rw-r--r--library/std/src/io/buffered/bufreader.rs7
-rw-r--r--library/std/src/io/error.rs2
-rw-r--r--library/std/src/io/stdio.rs2
-rw-r--r--library/std/src/lib.rs4
-rw-r--r--library/std/src/net/test.rs4
-rw-r--r--library/std/src/os/unix/net/stream.rs6
-rw-r--r--library/std/src/os/unix/net/ucred.rs3
-rw-r--r--library/std/src/panicking.rs2
-rw-r--r--library/std/src/sync/barrier.rs2
-rw-r--r--library/std/src/sync/condvar/tests.rs4
-rw-r--r--library/std/src/sys/pal/unix/thread_parking/pthread.rs22
-rw-r--r--library/std/src/sys/pal/wasm/alloc.rs9
-rw-r--r--library/std/src/sys/pal/windows/pipe.rs8
-rw-r--r--library/std/src/sys/pal/xous/alloc.rs9
-rw-r--r--library/std/src/sys/pal/xous/net/tcpstream.rs2
-rw-r--r--library/std/src/sys/pal/xous/thread_local_key.rs10
-rw-r--r--library/std/src/sys/sync/mutex/xous.rs11
-rw-r--r--library/std/src/sys/thread_local/static_local.rs3
-rw-r--r--library/std/src/sys_common/thread_local_key.rs6
-rw-r--r--library/std/src/thread/local/tests.rs19
23 files changed, 79 insertions, 66 deletions
diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs
index a834b36697c..dc0e302a810 100644
--- a/library/std/src/alloc.rs
+++ b/library/std/src/alloc.rs
@@ -329,7 +329,7 @@ static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
 /// ```
 #[unstable(feature = "alloc_error_hook", issue = "51245")]
 pub fn set_alloc_error_hook(hook: fn(Layout)) {
-    HOOK.store(hook as *mut (), Ordering::SeqCst);
+    HOOK.store(hook as *mut (), Ordering::Release);
 }
 
 /// Unregisters the current allocation error hook, returning it.
@@ -339,7 +339,7 @@ pub fn set_alloc_error_hook(hook: fn(Layout)) {
 /// If no custom hook is registered, the default hook will be returned.
 #[unstable(feature = "alloc_error_hook", issue = "51245")]
 pub fn take_alloc_error_hook() -> fn(Layout) {
-    let hook = HOOK.swap(ptr::null_mut(), Ordering::SeqCst);
+    let hook = HOOK.swap(ptr::null_mut(), Ordering::Acquire);
     if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } }
 }
 
@@ -362,7 +362,7 @@ fn default_alloc_error_hook(layout: Layout) {
 #[alloc_error_handler]
 #[unstable(feature = "alloc_internals", issue = "none")]
 pub fn rust_oom(layout: Layout) -> ! {
-    let hook = HOOK.load(Ordering::SeqCst);
+    let hook = HOOK.load(Ordering::Acquire);
     let hook: fn(Layout) =
         if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } };
     hook(layout);
diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs
index 627befb63a1..2cc9afe9249 100644
--- a/library/std/src/collections/hash/map.rs
+++ b/library/std/src/collections/hash/map.rs
@@ -1101,7 +1101,7 @@ where
     /// ```
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_confusables("push", "append")]
+    #[rustc_confusables("push", "append", "put")]
     pub fn insert(&mut self, k: K, v: V) -> Option<V> {
         self.base.insert(k, v)
     }
diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs
index 371201ff44c..3910100f212 100644
--- a/library/std/src/collections/hash/set.rs
+++ b/library/std/src/collections/hash/set.rs
@@ -885,7 +885,7 @@ where
     /// ```
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_confusables("push", "append")]
+    #[rustc_confusables("push", "append", "put")]
     pub fn insert(&mut self, value: T) -> bool {
         self.base.insert(value)
     }
diff --git a/library/std/src/io/buffered/bufreader.rs b/library/std/src/io/buffered/bufreader.rs
index 83db332ee25..acaa7e9228e 100644
--- a/library/std/src/io/buffered/bufreader.rs
+++ b/library/std/src/io/buffered/bufreader.rs
@@ -328,10 +328,9 @@ impl<R: ?Sized + Read> Read for BufReader<R> {
             self.discard_buffer();
             return self.inner.read_vectored(bufs);
         }
-        let nread = {
-            let mut rem = self.fill_buf()?;
-            rem.read_vectored(bufs)?
-        };
+        let mut rem = self.fill_buf()?;
+        let nread = rem.read_vectored(bufs)?;
+
         self.consume(nread);
         Ok(nread)
     }
diff --git a/library/std/src/io/error.rs b/library/std/src/io/error.rs
index 7ae15e0fd01..85625116d02 100644
--- a/library/std/src/io/error.rs
+++ b/library/std/src/io/error.rs
@@ -83,7 +83,7 @@ impl From<alloc::ffi::NulError> for Error {
     }
 }
 
-#[stable(feature = "io_error_from_try_reserve", since = "CURRENT_RUSTC_VERSION")]
+#[stable(feature = "io_error_from_try_reserve", since = "1.78.0")]
 impl From<alloc::collections::TryReserveError> for Error {
     /// Converts `TryReserveError` to an error with [`ErrorKind::OutOfMemory`].
     ///
diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs
index ccc2ed91688..8f60b3b1535 100644
--- a/library/std/src/io/stdio.rs
+++ b/library/std/src/io/stdio.rs
@@ -453,7 +453,7 @@ impl Read for Stdin {
     }
 }
 
-#[stable(feature = "read_shared_stdin", since = "CURRENT_RUSTC_VERSION")]
+#[stable(feature = "read_shared_stdin", since = "1.78.0")]
 impl Read for &Stdin {
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
         self.lock().read(buf)
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index 3db5cda83b7..c457c39e0c1 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -270,9 +270,6 @@
 //
 // Language features:
 // tidy-alphabetical-start
-#![cfg_attr(bootstrap, feature(exhaustive_patterns))]
-#![cfg_attr(bootstrap, feature(platform_intrinsics))]
-#![cfg_attr(not(bootstrap), feature(min_exhaustive_patterns))]
 #![feature(alloc_error_handler)]
 #![feature(allocator_internals)]
 #![feature(allow_internal_unsafe)]
@@ -297,6 +294,7 @@
 #![feature(let_chains)]
 #![feature(link_cfg)]
 #![feature(linkage)]
+#![feature(min_exhaustive_patterns)]
 #![feature(min_specialization)]
 #![feature(must_not_suspend)]
 #![feature(needs_panic_runtime)]
diff --git a/library/std/src/net/test.rs b/library/std/src/net/test.rs
index 37937b5ea95..d318d457f35 100644
--- a/library/std/src/net/test.rs
+++ b/library/std/src/net/test.rs
@@ -7,12 +7,12 @@ use crate::sync::atomic::{AtomicUsize, Ordering};
 static PORT: AtomicUsize = AtomicUsize::new(0);
 
 pub fn next_test_ip4() -> SocketAddr {
-    let port = PORT.fetch_add(1, Ordering::SeqCst) as u16 + base_port();
+    let port = PORT.fetch_add(1, Ordering::Relaxed) as u16 + base_port();
     SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), port))
 }
 
 pub fn next_test_ip6() -> SocketAddr {
-    let port = PORT.fetch_add(1, Ordering::SeqCst) as u16 + base_port();
+    let port = PORT.fetch_add(1, Ordering::Relaxed) as u16 + base_port();
     SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), port, 0, 0))
 }
 
diff --git a/library/std/src/os/unix/net/stream.rs b/library/std/src/os/unix/net/stream.rs
index d2e23bdee6c..d67493aaf4d 100644
--- a/library/std/src/os/unix/net/stream.rs
+++ b/library/std/src/os/unix/net/stream.rs
@@ -8,7 +8,8 @@
     target_os = "macos",
     target_os = "watchos",
     target_os = "netbsd",
-    target_os = "openbsd"
+    target_os = "openbsd",
+    target_os = "nto"
 ))]
 use super::{peer_cred, UCred};
 #[cfg(any(doc, target_os = "android", target_os = "linux"))]
@@ -234,7 +235,8 @@ impl UnixStream {
         target_os = "macos",
         target_os = "watchos",
         target_os = "netbsd",
-        target_os = "openbsd"
+        target_os = "openbsd",
+        target_os = "nto"
     ))]
     pub fn peer_cred(&self) -> io::Result<UCred> {
         peer_cred(self)
diff --git a/library/std/src/os/unix/net/ucred.rs b/library/std/src/os/unix/net/ucred.rs
index de09c93840a..4c915c57906 100644
--- a/library/std/src/os/unix/net/ucred.rs
+++ b/library/std/src/os/unix/net/ucred.rs
@@ -30,7 +30,8 @@ pub(super) use self::impl_linux::peer_cred;
     target_os = "dragonfly",
     target_os = "freebsd",
     target_os = "openbsd",
-    target_os = "netbsd"
+    target_os = "netbsd",
+    target_os = "nto"
 ))]
 pub(super) use self::impl_bsd::peer_cred;
 
diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs
index 464a46264cb..b0bcab7994c 100644
--- a/library/std/src/panicking.rs
+++ b/library/std/src/panicking.rs
@@ -272,7 +272,7 @@ fn default_hook(info: &PanicInfo<'_>) {
                 drop(backtrace::print(err, crate::backtrace_rs::PrintFmt::Full))
             }
             Some(BacktraceStyle::Off) => {
-                if FIRST_PANIC.swap(false, Ordering::SeqCst) {
+                if FIRST_PANIC.swap(false, Ordering::Relaxed) {
                     let _ = writeln!(
                         err,
                         "note: run with `RUST_BACKTRACE=1` environment variable to display a \
diff --git a/library/std/src/sync/barrier.rs b/library/std/src/sync/barrier.rs
index 764fa284794..b4bac081e7a 100644
--- a/library/std/src/sync/barrier.rs
+++ b/library/std/src/sync/barrier.rs
@@ -81,7 +81,7 @@ impl Barrier {
     /// let barrier = Barrier::new(10);
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_stable(feature = "const_barrier", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_barrier", since = "1.78.0")]
     #[must_use]
     #[inline]
     pub const fn new(n: usize) -> Barrier {
diff --git a/library/std/src/sync/condvar/tests.rs b/library/std/src/sync/condvar/tests.rs
index 24f467f0b03..12d13a6b20b 100644
--- a/library/std/src/sync/condvar/tests.rs
+++ b/library/std/src/sync/condvar/tests.rs
@@ -170,14 +170,14 @@ fn wait_timeout_wake() {
         let t = thread::spawn(move || {
             let _g = m2.lock().unwrap();
             thread::sleep(Duration::from_millis(1));
-            notified_copy.store(true, Ordering::SeqCst);
+            notified_copy.store(true, Ordering::Relaxed);
             c2.notify_one();
         });
         let (g, timeout_res) = c.wait_timeout(g, Duration::from_millis(u64::MAX)).unwrap();
         assert!(!timeout_res.timed_out());
         // spurious wakeups mean this isn't necessarily true
         // so execute test again, if not notified
-        if !notified.load(Ordering::SeqCst) {
+        if !notified.load(Ordering::Relaxed) {
             t.join().unwrap();
             continue;
         }
diff --git a/library/std/src/sys/pal/unix/thread_parking/pthread.rs b/library/std/src/sys/pal/unix/thread_parking/pthread.rs
index ae805d84399..bb79cf9548e 100644
--- a/library/std/src/sys/pal/unix/thread_parking/pthread.rs
+++ b/library/std/src/sys/pal/unix/thread_parking/pthread.rs
@@ -5,7 +5,7 @@ use crate::marker::PhantomPinned;
 use crate::pin::Pin;
 use crate::ptr::addr_of_mut;
 use crate::sync::atomic::AtomicUsize;
-use crate::sync::atomic::Ordering::SeqCst;
+use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release};
 #[cfg(not(target_os = "nto"))]
 use crate::sys::time::TIMESPEC_MAX;
 #[cfg(target_os = "nto")]
@@ -150,16 +150,18 @@ impl Parker {
 
     // This implementation doesn't require `unsafe`, but other implementations
     // may assume this is only called by the thread that owns the Parker.
+    //
+    // For memory ordering, see std/src/sys_common/thread_parking/futex.rs
     pub unsafe fn park(self: Pin<&Self>) {
         // If we were previously notified then we consume this notification and
         // return quickly.
-        if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
+        if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed).is_ok() {
             return;
         }
 
         // Otherwise we need to coordinate going to sleep
         lock(self.lock.get());
-        match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+        match self.state.compare_exchange(EMPTY, PARKED, Relaxed, Relaxed) {
             Ok(_) => {}
             Err(NOTIFIED) => {
                 // We must read here, even though we know it will be `NOTIFIED`.
@@ -168,7 +170,7 @@ impl Parker {
                 // acquire operation that synchronizes with that `unpark` to observe
                 // any writes it made before the call to unpark. To do that we must
                 // read from the write it made to `state`.
-                let old = self.state.swap(EMPTY, SeqCst);
+                let old = self.state.swap(EMPTY, Acquire);
 
                 unlock(self.lock.get());
 
@@ -185,7 +187,7 @@ impl Parker {
         loop {
             wait(self.cvar.get(), self.lock.get());
 
-            match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) {
+            match self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed) {
                 Ok(_) => break, // got a notification
                 Err(_) => {}    // spurious wakeup, go back to sleep
             }
@@ -201,16 +203,16 @@ impl Parker {
         // Like `park` above we have a fast path for an already-notified thread, and
         // afterwards we start coordinating for a sleep.
         // return quickly.
-        if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
+        if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed).is_ok() {
             return;
         }
 
         lock(self.lock.get());
-        match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+        match self.state.compare_exchange(EMPTY, PARKED, Relaxed, Relaxed) {
             Ok(_) => {}
             Err(NOTIFIED) => {
                 // We must read again here, see `park`.
-                let old = self.state.swap(EMPTY, SeqCst);
+                let old = self.state.swap(EMPTY, Acquire);
                 unlock(self.lock.get());
 
                 assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
@@ -228,7 +230,7 @@ impl Parker {
         // parked.
         wait_timeout(self.cvar.get(), self.lock.get(), dur);
 
-        match self.state.swap(EMPTY, SeqCst) {
+        match self.state.swap(EMPTY, Acquire) {
             NOTIFIED => unlock(self.lock.get()), // got a notification, hurray!
             PARKED => unlock(self.lock.get()),   // no notification, alas
             n => {
@@ -245,7 +247,7 @@ impl Parker {
         // `state` is already `NOTIFIED`. That is why this must be a swap
         // rather than a compare-and-swap that returns if it reads `NOTIFIED`
         // on failure.
-        match self.state.swap(NOTIFIED, SeqCst) {
+        match self.state.swap(NOTIFIED, Release) {
             EMPTY => return,    // no one was waiting
             NOTIFIED => return, // already unparked
             PARKED => {}        // gotta go wake someone up
diff --git a/library/std/src/sys/pal/wasm/alloc.rs b/library/std/src/sys/pal/wasm/alloc.rs
index 6dceb1689a8..b74ce0d4742 100644
--- a/library/std/src/sys/pal/wasm/alloc.rs
+++ b/library/std/src/sys/pal/wasm/alloc.rs
@@ -57,7 +57,10 @@ unsafe impl GlobalAlloc for System {
 
 #[cfg(target_feature = "atomics")]
 mod lock {
-    use crate::sync::atomic::{AtomicI32, Ordering::SeqCst};
+    use crate::sync::atomic::{
+        AtomicI32,
+        Ordering::{Acquire, Release},
+    };
 
     static LOCKED: AtomicI32 = AtomicI32::new(0);
 
@@ -65,7 +68,7 @@ mod lock {
 
     pub fn lock() -> DropLock {
         loop {
-            if LOCKED.swap(1, SeqCst) == 0 {
+            if LOCKED.swap(1, Acquire) == 0 {
                 return DropLock;
             }
             // Ok so here's where things get a little depressing. At this point
@@ -143,7 +146,7 @@ mod lock {
 
     impl Drop for DropLock {
         fn drop(&mut self) {
-            let r = LOCKED.swap(0, SeqCst);
+            let r = LOCKED.swap(0, Release);
             debug_assert_eq!(r, 1);
 
             // Note that due to the above logic we don't actually need to wake
diff --git a/library/std/src/sys/pal/windows/pipe.rs b/library/std/src/sys/pal/windows/pipe.rs
index 013f588676a..dfa938d4d57 100644
--- a/library/std/src/sys/pal/windows/pipe.rs
+++ b/library/std/src/sys/pal/windows/pipe.rs
@@ -7,7 +7,7 @@ use crate::path::Path;
 use crate::ptr;
 use crate::slice;
 use crate::sync::atomic::AtomicUsize;
-use crate::sync::atomic::Ordering::SeqCst;
+use crate::sync::atomic::Ordering::Relaxed;
 use crate::sys::c;
 use crate::sys::fs::{File, OpenOptions};
 use crate::sys::handle::Handle;
@@ -214,11 +214,11 @@ pub fn spawn_pipe_relay(
 fn random_number() -> usize {
     static N: AtomicUsize = AtomicUsize::new(0);
     loop {
-        if N.load(SeqCst) != 0 {
-            return N.fetch_add(1, SeqCst);
+        if N.load(Relaxed) != 0 {
+            return N.fetch_add(1, Relaxed);
         }
 
-        N.store(hashmap_random_keys().0 as usize, SeqCst);
+        N.store(hashmap_random_keys().0 as usize, Relaxed);
     }
 }
 
diff --git a/library/std/src/sys/pal/xous/alloc.rs b/library/std/src/sys/pal/xous/alloc.rs
index 0d540e95520..601411173aa 100644
--- a/library/std/src/sys/pal/xous/alloc.rs
+++ b/library/std/src/sys/pal/xous/alloc.rs
@@ -46,7 +46,10 @@ unsafe impl GlobalAlloc for System {
 }
 
 mod lock {
-    use crate::sync::atomic::{AtomicI32, Ordering::SeqCst};
+    use crate::sync::atomic::{
+        AtomicI32,
+        Ordering::{Acquire, Release},
+    };
 
     static LOCKED: AtomicI32 = AtomicI32::new(0);
 
@@ -54,7 +57,7 @@ mod lock {
 
     pub fn lock() -> DropLock {
         loop {
-            if LOCKED.swap(1, SeqCst) == 0 {
+            if LOCKED.swap(1, Acquire) == 0 {
                 return DropLock;
             }
             crate::os::xous::ffi::do_yield();
@@ -63,7 +66,7 @@ mod lock {
 
     impl Drop for DropLock {
         fn drop(&mut self) {
-            let r = LOCKED.swap(0, SeqCst);
+            let r = LOCKED.swap(0, Release);
             debug_assert_eq!(r, 1);
         }
     }
diff --git a/library/std/src/sys/pal/xous/net/tcpstream.rs b/library/std/src/sys/pal/xous/net/tcpstream.rs
index 7149678118a..aebef02acda 100644
--- a/library/std/src/sys/pal/xous/net/tcpstream.rs
+++ b/library/std/src/sys/pal/xous/net/tcpstream.rs
@@ -406,7 +406,7 @@ impl TcpStream {
     }
 
     pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
-        self.nonblocking.store(nonblocking, Ordering::SeqCst);
+        self.nonblocking.store(nonblocking, Ordering::Relaxed);
         Ok(())
     }
 }
diff --git a/library/std/src/sys/pal/xous/thread_local_key.rs b/library/std/src/sys/pal/xous/thread_local_key.rs
index 59a668c3df6..2aaf46d0244 100644
--- a/library/std/src/sys/pal/xous/thread_local_key.rs
+++ b/library/std/src/sys/pal/xous/thread_local_key.rs
@@ -2,7 +2,7 @@ use crate::mem::ManuallyDrop;
 use crate::ptr;
 use crate::sync::atomic::AtomicPtr;
 use crate::sync::atomic::AtomicUsize;
-use crate::sync::atomic::Ordering::SeqCst;
+use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release};
 use core::arch::asm;
 
 use crate::os::xous::ffi::{map_memory, unmap_memory, MemoryFlags};
@@ -92,7 +92,7 @@ fn tls_table() -> &'static mut [*mut u8] {
 pub unsafe fn create(dtor: Option<Dtor>) -> Key {
     // Allocate a new TLS key. These keys are shared among all threads.
     #[allow(unused_unsafe)]
-    let key = unsafe { TLS_KEY_INDEX.fetch_add(1, SeqCst) };
+    let key = unsafe { TLS_KEY_INDEX.fetch_add(1, Relaxed) };
     if let Some(f) = dtor {
         unsafe { register_dtor(key, f) };
     }
@@ -154,11 +154,11 @@ unsafe fn register_dtor(key: Key, dtor: Dtor) {
     let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() }));
 
     #[allow(unused_unsafe)]
-    let mut head = unsafe { DTORS.load(SeqCst) };
+    let mut head = unsafe { DTORS.load(Acquire) };
     loop {
         node.next = head;
         #[allow(unused_unsafe)]
-        match unsafe { DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) } {
+        match unsafe { DTORS.compare_exchange(head, &mut **node, Release, Acquire) } {
             Ok(_) => return, // nothing to drop, we successfully added the node to the list
             Err(cur) => head = cur,
         }
@@ -199,7 +199,7 @@ unsafe fn run_dtors() {
         }
         any_run = false;
         #[allow(unused_unsafe)]
-        let mut cur = unsafe { DTORS.load(SeqCst) };
+        let mut cur = unsafe { DTORS.load(Acquire) };
         while !cur.is_null() {
             let ptr = unsafe { get((*cur).key) };
 
diff --git a/library/std/src/sys/sync/mutex/xous.rs b/library/std/src/sys/sync/mutex/xous.rs
index a8c9518ff0b..1426e48f8b7 100644
--- a/library/std/src/sys/sync/mutex/xous.rs
+++ b/library/std/src/sys/sync/mutex/xous.rs
@@ -1,6 +1,9 @@
 use crate::os::xous::ffi::{blocking_scalar, do_yield};
 use crate::os::xous::services::{ticktimer_server, TicktimerScalar};
-use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed, Ordering::SeqCst};
+use crate::sync::atomic::{
+    AtomicBool, AtomicUsize,
+    Ordering::{Acquire, Relaxed, Release},
+};
 
 pub struct Mutex {
     /// The "locked" value indicates how many threads are waiting on this
@@ -68,7 +71,7 @@ impl Mutex {
 
     #[inline]
     pub unsafe fn unlock(&self) {
-        let prev = self.locked.fetch_sub(1, SeqCst);
+        let prev = self.locked.fetch_sub(1, Release);
 
         // If the previous value was 1, then this was a "fast path" unlock, so no
         // need to involve the Ticktimer server
@@ -89,12 +92,12 @@ impl Mutex {
 
     #[inline]
     pub unsafe fn try_lock(&self) -> bool {
-        self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok()
+        self.locked.compare_exchange(0, 1, Acquire, Relaxed).is_ok()
     }
 
     #[inline]
     pub unsafe fn try_lock_or_poison(&self) -> bool {
-        self.locked.fetch_add(1, SeqCst) == 0
+        self.locked.fetch_add(1, Acquire) == 0
     }
 }
 
diff --git a/library/std/src/sys/thread_local/static_local.rs b/library/std/src/sys/thread_local/static_local.rs
index 4f2b6868962..206e62bb5e2 100644
--- a/library/std/src/sys/thread_local/static_local.rs
+++ b/library/std/src/sys/thread_local/static_local.rs
@@ -12,8 +12,7 @@ pub macro thread_local_inner {
         #[inline] // see comments below
         #[deny(unsafe_op_in_unsafe_fn)]
         // FIXME: Use `SyncUnsafeCell` instead of allowing `static_mut_refs` lint
-        #[cfg_attr(bootstrap, allow(static_mut_ref))]
-        #[cfg_attr(not(bootstrap), allow(static_mut_refs))]
+        #[allow(static_mut_refs)]
         unsafe fn __getit(
             _init: $crate::option::Option<&mut $crate::option::Option<$t>>,
         ) -> $crate::option::Option<&'static $t> {
diff --git a/library/std/src/sys_common/thread_local_key.rs b/library/std/src/sys_common/thread_local_key.rs
index 204834984a2..7dcc1141099 100644
--- a/library/std/src/sys_common/thread_local_key.rs
+++ b/library/std/src/sys_common/thread_local_key.rs
@@ -128,7 +128,7 @@ impl StaticKey {
 
     #[inline]
     unsafe fn key(&self) -> imp::Key {
-        match self.key.load(Ordering::Relaxed) {
+        match self.key.load(Ordering::Acquire) {
             KEY_SENTVAL => self.lazy_init() as imp::Key,
             n => n as imp::Key,
         }
@@ -156,8 +156,8 @@ impl StaticKey {
         match self.key.compare_exchange(
             KEY_SENTVAL,
             key as usize,
-            Ordering::SeqCst,
-            Ordering::SeqCst,
+            Ordering::Release,
+            Ordering::Acquire,
         ) {
             // The CAS succeeded, so we've created the actual key
             Ok(_) => key as usize,
diff --git a/library/std/src/thread/local/tests.rs b/library/std/src/thread/local/tests.rs
index 964c7fc5b0c..25019b554bb 100644
--- a/library/std/src/thread/local/tests.rs
+++ b/library/std/src/thread/local/tests.rs
@@ -255,6 +255,9 @@ fn join_orders_after_tls_destructors() {
     // observe the channel in the `THREAD1_WAITING` state. If this does occur,
     // we switch to the “poison” state `THREAD2_JOINED` and panic all around.
     // (This is equivalent to “sending” from an alternate producer thread.)
+    //
+    // Relaxed memory ordering is fine because and spawn()/join() already provide all the
+    // synchronization we need here.
     const FRESH: u8 = 0;
     const THREAD2_LAUNCHED: u8 = 1;
     const THREAD1_WAITING: u8 = 2;
@@ -263,7 +266,7 @@ fn join_orders_after_tls_destructors() {
     static SYNC_STATE: AtomicU8 = AtomicU8::new(FRESH);
 
     for _ in 0..10 {
-        SYNC_STATE.store(FRESH, Ordering::SeqCst);
+        SYNC_STATE.store(FRESH, Ordering::Relaxed);
 
         let jh = thread::Builder::new()
             .name("thread1".into())
@@ -272,7 +275,7 @@ fn join_orders_after_tls_destructors() {
 
                 impl Drop for TlDrop {
                     fn drop(&mut self) {
-                        let mut sync_state = SYNC_STATE.swap(THREAD1_WAITING, Ordering::SeqCst);
+                        let mut sync_state = SYNC_STATE.swap(THREAD1_WAITING, Ordering::Relaxed);
                         loop {
                             match sync_state {
                                 THREAD2_LAUNCHED | THREAD1_WAITING => thread::yield_now(),
@@ -282,7 +285,7 @@ fn join_orders_after_tls_destructors() {
                                 ),
                                 v => unreachable!("sync state: {}", v),
                             }
-                            sync_state = SYNC_STATE.load(Ordering::SeqCst);
+                            sync_state = SYNC_STATE.load(Ordering::Relaxed);
                         }
                     }
                 }
@@ -294,7 +297,7 @@ fn join_orders_after_tls_destructors() {
                 TL_DROP.with(|_| {});
 
                 loop {
-                    match SYNC_STATE.load(Ordering::SeqCst) {
+                    match SYNC_STATE.load(Ordering::Relaxed) {
                         FRESH => thread::yield_now(),
                         THREAD2_LAUNCHED => break,
                         v => unreachable!("sync state: {}", v),
@@ -306,9 +309,9 @@ fn join_orders_after_tls_destructors() {
         let jh2 = thread::Builder::new()
             .name("thread2".into())
             .spawn(move || {
-                assert_eq!(SYNC_STATE.swap(THREAD2_LAUNCHED, Ordering::SeqCst), FRESH);
+                assert_eq!(SYNC_STATE.swap(THREAD2_LAUNCHED, Ordering::Relaxed), FRESH);
                 jh.join().unwrap();
-                match SYNC_STATE.swap(THREAD2_JOINED, Ordering::SeqCst) {
+                match SYNC_STATE.swap(THREAD2_JOINED, Ordering::Relaxed) {
                     MAIN_THREAD_RENDEZVOUS => return,
                     THREAD2_LAUNCHED | THREAD1_WAITING => {
                         panic!("Thread 2 running after thread 1 join before main thread rendezvous")
@@ -322,8 +325,8 @@ fn join_orders_after_tls_destructors() {
             match SYNC_STATE.compare_exchange(
                 THREAD1_WAITING,
                 MAIN_THREAD_RENDEZVOUS,
-                Ordering::SeqCst,
-                Ordering::SeqCst,
+                Ordering::Relaxed,
+                Ordering::Relaxed,
             ) {
                 Ok(_) => break,
                 Err(FRESH) => thread::yield_now(),