about summary refs log tree commit diff
path: root/library
diff options
context:
space:
mode:
Diffstat (limited to 'library')
-rw-r--r--library/Cargo.lock9
-rw-r--r--library/alloc/Cargo.toml2
-rw-r--r--library/alloc/src/lib.rs1
-rw-r--r--library/alloc/src/slice.rs4
-rw-r--r--library/alloc/src/string.rs24
-rw-r--r--library/core/src/macros/mod.rs2
-rw-r--r--library/core/src/mem/drop_guard.rs155
-rw-r--r--library/core/src/mem/mod.rs4
-rw-r--r--library/core/src/ops/range.rs30
-rw-r--r--library/core/src/ptr/mod.rs6
-rw-r--r--library/core/src/range.rs18
-rw-r--r--library/core/src/slice/iter.rs249
-rw-r--r--library/core/src/slice/mod.rs78
-rw-r--r--library/core/src/str/iter.rs2
-rw-r--r--library/core/src/str/mod.rs40
-rw-r--r--library/core/src/sync/atomic.rs30
-rw-r--r--library/coretests/tests/lib.rs3
-rw-r--r--library/coretests/tests/macros.rs6
-rw-r--r--library/coretests/tests/mem.rs46
-rw-r--r--library/coretests/tests/slice.rs184
-rw-r--r--library/rustc-std-workspace-alloc/Cargo.toml3
-rw-r--r--library/rustc-std-workspace-core/Cargo.toml3
-rw-r--r--library/rustc-std-workspace-std/Cargo.toml3
-rw-r--r--library/std/Cargo.toml4
-rw-r--r--library/std/src/env.rs2
-rw-r--r--library/std/src/lib.rs2
-rw-r--r--library/std/src/sync/mod.rs2
-rw-r--r--library/std/src/sync/nonpoison.rs37
-rw-r--r--library/std/src/sync/nonpoison/mutex.rs611
-rw-r--r--library/std/src/sync/poison.rs6
-rw-r--r--library/std/src/sync/poison/condvar.rs2
-rw-r--r--library/std/src/sync/poison/mutex.rs2
-rw-r--r--library/std/src/sys/pal/sgx/abi/usercalls/mod.rs2
-rw-r--r--library/std/src/sys/pal/unix/os.rs5
-rw-r--r--library/std/src/sys/random/sgx.rs14
-rw-r--r--library/std/src/sys/random/uefi.rs5
-rw-r--r--library/std/src/thread/mod.rs8
-rw-r--r--library/std/tests/sync/lib.rs55
-rw-r--r--library/std/tests/sync/mutex.rs525
-rw-r--r--library/std_detect/src/detect/macros.rs5
-rw-r--r--library/std_detect/src/detect/os/riscv.rs1
-rw-r--r--library/stdarch/crates/core_arch/src/wasm32/mod.rs10
-rw-r--r--library/sysroot/Cargo.toml4
-rw-r--r--library/windows_targets/Cargo.toml5
-rw-r--r--library/windows_targets/src/lib.rs16
45 files changed, 1423 insertions, 802 deletions
diff --git a/library/Cargo.lock b/library/Cargo.lock
index cb356480ead..a9a611fe1ed 100644
--- a/library/Cargo.lock
+++ b/library/Cargo.lock
@@ -90,11 +90,10 @@ dependencies = [
 
 [[package]]
 name = "fortanix-sgx-abi"
-version = "0.5.0"
+version = "0.6.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "57cafc2274c10fab234f176b25903ce17e690fca7597090d50880e047a0389c5"
+checksum = "5efc85edd5b83e8394f4371dd0da6859dff63dd387dab8568fece6af4cde6f84"
 dependencies = [
- "compiler_builtins",
  "rustc-std-workspace-core",
 ]
 
@@ -238,9 +237,9 @@ dependencies = [
 
 [[package]]
 name = "rand"
-version = "0.9.1"
+version = "0.9.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97"
+checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
 dependencies = [
  "rand_core",
 ]
diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
index 017c790ecac..9ba7c5bd28a 100644
--- a/library/alloc/Cargo.toml
+++ b/library/alloc/Cargo.toml
@@ -21,9 +21,7 @@ compiler_builtins = { path = "../compiler-builtins/compiler-builtins", features
 [features]
 compiler-builtins-mem = ['compiler_builtins/mem']
 compiler-builtins-c = ["compiler_builtins/c"]
-compiler-builtins-no-asm = ["compiler_builtins/no-asm"]
 compiler-builtins-no-f16-f128 = ["compiler_builtins/no-f16-f128"]
-compiler-builtins-mangled-names = ["compiler_builtins/mangled-names"]
 # Make panics and failed asserts immediately abort without formatting any message
 panic_immediate_abort = ["core/panic_immediate_abort"]
 # Choose algorithms that are optimized for binary size instead of runtime performance
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index 6b6e4df4cba..c091e496c50 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -94,7 +94,6 @@
 // tidy-alphabetical-start
 #![feature(alloc_layout_extra)]
 #![feature(allocator_api)]
-#![feature(array_chunks)]
 #![feature(array_into_iter_constructors)]
 #![feature(array_windows)]
 #![feature(ascii_char)]
diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs
index b4da56578c8..ce9f967cc38 100644
--- a/library/alloc/src/slice.rs
+++ b/library/alloc/src/slice.rs
@@ -16,10 +16,6 @@ use core::cmp::Ordering::{self, Less};
 use core::mem::MaybeUninit;
 #[cfg(not(no_global_oom_handling))]
 use core::ptr;
-#[unstable(feature = "array_chunks", issue = "74985")]
-pub use core::slice::ArrayChunks;
-#[unstable(feature = "array_chunks", issue = "74985")]
-pub use core::slice::ArrayChunksMut;
 #[unstable(feature = "array_windows", issue = "75027")]
 pub use core::slice::ArrayWindows;
 #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index a189c00a6b6..d58240f3051 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -787,12 +787,12 @@ impl String {
     #[cfg(not(no_global_oom_handling))]
     #[unstable(feature = "str_from_utf16_endian", issue = "116258")]
     pub fn from_utf16le(v: &[u8]) -> Result<String, FromUtf16Error> {
-        if v.len() % 2 != 0 {
+        let (chunks, []) = v.as_chunks::<2>() else {
             return Err(FromUtf16Error(()));
-        }
+        };
         match (cfg!(target_endian = "little"), unsafe { v.align_to::<u16>() }) {
             (true, ([], v, [])) => Self::from_utf16(v),
-            _ => char::decode_utf16(v.array_chunks::<2>().copied().map(u16::from_le_bytes))
+            _ => char::decode_utf16(chunks.iter().copied().map(u16::from_le_bytes))
                 .collect::<Result<_, _>>()
                 .map_err(|_| FromUtf16Error(())),
         }
@@ -830,11 +830,11 @@ impl String {
             (true, ([], v, [])) => Self::from_utf16_lossy(v),
             (true, ([], v, [_remainder])) => Self::from_utf16_lossy(v) + "\u{FFFD}",
             _ => {
-                let mut iter = v.array_chunks::<2>();
-                let string = char::decode_utf16(iter.by_ref().copied().map(u16::from_le_bytes))
+                let (chunks, remainder) = v.as_chunks::<2>();
+                let string = char::decode_utf16(chunks.iter().copied().map(u16::from_le_bytes))
                     .map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER))
                     .collect();
-                if iter.remainder().is_empty() { string } else { string + "\u{FFFD}" }
+                if remainder.is_empty() { string } else { string + "\u{FFFD}" }
             }
         }
     }
@@ -862,12 +862,12 @@ impl String {
     #[cfg(not(no_global_oom_handling))]
     #[unstable(feature = "str_from_utf16_endian", issue = "116258")]
     pub fn from_utf16be(v: &[u8]) -> Result<String, FromUtf16Error> {
-        if v.len() % 2 != 0 {
+        let (chunks, []) = v.as_chunks::<2>() else {
             return Err(FromUtf16Error(()));
-        }
+        };
         match (cfg!(target_endian = "big"), unsafe { v.align_to::<u16>() }) {
             (true, ([], v, [])) => Self::from_utf16(v),
-            _ => char::decode_utf16(v.array_chunks::<2>().copied().map(u16::from_be_bytes))
+            _ => char::decode_utf16(chunks.iter().copied().map(u16::from_be_bytes))
                 .collect::<Result<_, _>>()
                 .map_err(|_| FromUtf16Error(())),
         }
@@ -905,11 +905,11 @@ impl String {
             (true, ([], v, [])) => Self::from_utf16_lossy(v),
             (true, ([], v, [_remainder])) => Self::from_utf16_lossy(v) + "\u{FFFD}",
             _ => {
-                let mut iter = v.array_chunks::<2>();
-                let string = char::decode_utf16(iter.by_ref().copied().map(u16::from_be_bytes))
+                let (chunks, remainder) = v.as_chunks::<2>();
+                let string = char::decode_utf16(chunks.iter().copied().map(u16::from_be_bytes))
                     .map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER))
                     .collect();
-                if iter.remainder().is_empty() { string } else { string + "\u{FFFD}" }
+                if remainder.is_empty() { string } else { string + "\u{FFFD}" }
             }
         }
     }
diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs
index 8ac6ce2242d..3d57da63683 100644
--- a/library/core/src/macros/mod.rs
+++ b/library/core/src/macros/mod.rs
@@ -426,8 +426,10 @@ pub macro debug_assert_matches($($arg:tt)*) {
 #[macro_export]
 #[stable(feature = "matches_macro", since = "1.42.0")]
 #[rustc_diagnostic_item = "matches_macro"]
+#[allow_internal_unstable(non_exhaustive_omitted_patterns_lint, stmt_expr_attributes)]
 macro_rules! matches {
     ($expression:expr, $pattern:pat $(if $guard:expr)? $(,)?) => {
+        #[allow(non_exhaustive_omitted_patterns)]
         match $expression {
             $pattern $(if $guard)? => true,
             _ => false
diff --git a/library/core/src/mem/drop_guard.rs b/library/core/src/mem/drop_guard.rs
new file mode 100644
index 00000000000..47ccb69acc8
--- /dev/null
+++ b/library/core/src/mem/drop_guard.rs
@@ -0,0 +1,155 @@
+use crate::fmt::{self, Debug};
+use crate::mem::ManuallyDrop;
+use crate::ops::{Deref, DerefMut};
+
+/// Wrap a value and run a closure when dropped.
+///
+/// This is useful for quickly creating desructors inline.
+///
+/// # Examples
+///
+/// ```rust
+/// # #![allow(unused)]
+/// #![feature(drop_guard)]
+///
+/// use std::mem::DropGuard;
+///
+/// {
+///     // Create a new guard around a string that will
+///     // print its value when dropped.
+///     let s = String::from("Chashu likes tuna");
+///     let mut s = DropGuard::new(s, |s| println!("{s}"));
+///
+///     // Modify the string contained in the guard.
+///     s.push_str("!!!");
+///
+///     // The guard will be dropped here, printing:
+///     // "Chashu likes tuna!!!"
+/// }
+/// ```
+#[unstable(feature = "drop_guard", issue = "144426")]
+#[doc(alias = "ScopeGuard")]
+#[doc(alias = "defer")]
+pub struct DropGuard<T, F>
+where
+    F: FnOnce(T),
+{
+    inner: ManuallyDrop<T>,
+    f: ManuallyDrop<F>,
+}
+
+impl<T, F> DropGuard<T, F>
+where
+    F: FnOnce(T),
+{
+    /// Create a new instance of `DropGuard`.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// # #![allow(unused)]
+    /// #![feature(drop_guard)]
+    ///
+    /// use std::mem::DropGuard;
+    ///
+    /// let value = String::from("Chashu likes tuna");
+    /// let guard = DropGuard::new(value, |s| println!("{s}"));
+    /// ```
+    #[unstable(feature = "drop_guard", issue = "144426")]
+    #[must_use]
+    pub const fn new(inner: T, f: F) -> Self {
+        Self { inner: ManuallyDrop::new(inner), f: ManuallyDrop::new(f) }
+    }
+
+    /// Consumes the `DropGuard`, returning the wrapped value.
+    ///
+    /// This will not execute the closure. This is implemented as an associated
+    /// function to prevent any potential conflicts with any other methods called
+    /// `into_inner` from the `Deref` and `DerefMut` impls.
+    ///
+    /// It is typically preferred to call this function instead of `mem::forget`
+    /// because it will return the stored value and drop variables captured
+    /// by the closure instead of leaking their owned resources.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// # #![allow(unused)]
+    /// #![feature(drop_guard)]
+    ///
+    /// use std::mem::DropGuard;
+    ///
+    /// let value = String::from("Nori likes chicken");
+    /// let guard = DropGuard::new(value, |s| println!("{s}"));
+    /// assert_eq!(DropGuard::into_inner(guard), "Nori likes chicken");
+    /// ```
+    #[unstable(feature = "drop_guard", issue = "144426")]
+    #[inline]
+    pub fn into_inner(guard: Self) -> T {
+        // First we ensure that dropping the guard will not trigger
+        // its destructor
+        let mut guard = ManuallyDrop::new(guard);
+
+        // Next we manually read the stored value from the guard.
+        //
+        // SAFETY: this is safe because we've taken ownership of the guard.
+        let value = unsafe { ManuallyDrop::take(&mut guard.inner) };
+
+        // Finally we drop the stored closure. We do this *after* having read
+        // the value, so that even if the closure's `drop` function panics,
+        // unwinding still tries to drop the value.
+        //
+        // SAFETY: this is safe because we've taken ownership of the guard.
+        unsafe { ManuallyDrop::drop(&mut guard.f) };
+        value
+    }
+}
+
+#[unstable(feature = "drop_guard", issue = "144426")]
+impl<T, F> Deref for DropGuard<T, F>
+where
+    F: FnOnce(T),
+{
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        &*self.inner
+    }
+}
+
+#[unstable(feature = "drop_guard", issue = "144426")]
+impl<T, F> DerefMut for DropGuard<T, F>
+where
+    F: FnOnce(T),
+{
+    fn deref_mut(&mut self) -> &mut T {
+        &mut *self.inner
+    }
+}
+
+#[unstable(feature = "drop_guard", issue = "144426")]
+impl<T, F> Drop for DropGuard<T, F>
+where
+    F: FnOnce(T),
+{
+    fn drop(&mut self) {
+        // SAFETY: `DropGuard` is in the process of being dropped.
+        let inner = unsafe { ManuallyDrop::take(&mut self.inner) };
+
+        // SAFETY: `DropGuard` is in the process of being dropped.
+        let f = unsafe { ManuallyDrop::take(&mut self.f) };
+
+        f(inner);
+    }
+}
+
+#[unstable(feature = "drop_guard", issue = "144426")]
+impl<T, F> Debug for DropGuard<T, F>
+where
+    T: Debug,
+    F: FnOnce(T),
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index 33407637ab3..db4c8e9e551 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -21,6 +21,10 @@ mod transmutability;
 #[unstable(feature = "transmutability", issue = "99571")]
 pub use transmutability::{Assume, TransmuteFrom};
 
+mod drop_guard;
+#[unstable(feature = "drop_guard", issue = "144426")]
+pub use drop_guard::DropGuard;
+
 // This one has to be a re-export (rather than wrapping the underlying intrinsic) so that we can do
 // the special magic "types have equal size" check at the call site.
 #[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/core/src/ops/range.rs b/library/core/src/ops/range.rs
index ad3b6439a61..f33a33e6b75 100644
--- a/library/core/src/ops/range.rs
+++ b/library/core/src/ops/range.rs
@@ -1141,6 +1141,12 @@ impl<'a, T: ?Sized + 'a> RangeBounds<T> for (Bound<&'a T>, Bound<&'a T>) {
     }
 }
 
+// This impl intentionally does not have `T: ?Sized`;
+// see https://github.com/rust-lang/rust/pull/61584 for discussion of why.
+//
+/// If you need to use this implementation where `T` is unsized,
+/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound],
+/// i.e. replace `start..` with `(Bound::Included(start), Bound::Unbounded)`.
 #[stable(feature = "collections_range", since = "1.28.0")]
 impl<T> RangeBounds<T> for RangeFrom<&T> {
     fn start_bound(&self) -> Bound<&T> {
@@ -1151,6 +1157,12 @@ impl<T> RangeBounds<T> for RangeFrom<&T> {
     }
 }
 
+// This impl intentionally does not have `T: ?Sized`;
+// see https://github.com/rust-lang/rust/pull/61584 for discussion of why.
+//
+/// If you need to use this implementation where `T` is unsized,
+/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound],
+/// i.e. replace `..end` with `(Bound::Unbounded, Bound::Excluded(end))`.
 #[stable(feature = "collections_range", since = "1.28.0")]
 impl<T> RangeBounds<T> for RangeTo<&T> {
     fn start_bound(&self) -> Bound<&T> {
@@ -1161,6 +1173,12 @@ impl<T> RangeBounds<T> for RangeTo<&T> {
     }
 }
 
+// This impl intentionally does not have `T: ?Sized`;
+// see https://github.com/rust-lang/rust/pull/61584 for discussion of why.
+//
+/// If you need to use this implementation where `T` is unsized,
+/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound],
+/// i.e. replace `start..end` with `(Bound::Included(start), Bound::Excluded(end))`.
 #[stable(feature = "collections_range", since = "1.28.0")]
 impl<T> RangeBounds<T> for Range<&T> {
     fn start_bound(&self) -> Bound<&T> {
@@ -1171,6 +1189,12 @@ impl<T> RangeBounds<T> for Range<&T> {
     }
 }
 
+// This impl intentionally does not have `T: ?Sized`;
+// see https://github.com/rust-lang/rust/pull/61584 for discussion of why.
+//
+/// If you need to use this implementation where `T` is unsized,
+/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound],
+/// i.e. replace `start..=end` with `(Bound::Included(start), Bound::Included(end))`.
 #[stable(feature = "collections_range", since = "1.28.0")]
 impl<T> RangeBounds<T> for RangeInclusive<&T> {
     fn start_bound(&self) -> Bound<&T> {
@@ -1181,6 +1205,12 @@ impl<T> RangeBounds<T> for RangeInclusive<&T> {
     }
 }
 
+// This impl intentionally does not have `T: ?Sized`;
+// see https://github.com/rust-lang/rust/pull/61584 for discussion of why.
+//
+/// If you need to use this implementation where `T` is unsized,
+/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound],
+/// i.e. replace `..=end` with `(Bound::Unbounded, Bound::Included(end))`.
 #[stable(feature = "collections_range", since = "1.28.0")]
 impl<T> RangeBounds<T> for RangeToInclusive<&T> {
     fn start_bound(&self) -> Bound<&T> {
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index dbe3999b4a4..1a2a5182567 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -974,9 +974,10 @@ pub const fn dangling_mut<T>() -> *mut T {
 #[must_use]
 #[inline(always)]
 #[stable(feature = "exposed_provenance", since = "1.84.0")]
+#[rustc_const_unstable(feature = "const_exposed_provenance", issue = "144538")]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
 #[allow(fuzzy_provenance_casts)] // this *is* the explicit provenance API one should use instead
-pub fn with_exposed_provenance<T>(addr: usize) -> *const T {
+pub const fn with_exposed_provenance<T>(addr: usize) -> *const T {
     addr as *const T
 }
 
@@ -1014,9 +1015,10 @@ pub fn with_exposed_provenance<T>(addr: usize) -> *const T {
 #[must_use]
 #[inline(always)]
 #[stable(feature = "exposed_provenance", since = "1.84.0")]
+#[rustc_const_unstable(feature = "const_exposed_provenance", issue = "144538")]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
 #[allow(fuzzy_provenance_casts)] // this *is* the explicit provenance API one should use instead
-pub fn with_exposed_provenance_mut<T>(addr: usize) -> *mut T {
+pub const fn with_exposed_provenance_mut<T>(addr: usize) -> *mut T {
     addr as *mut T
 }
 
diff --git a/library/core/src/range.rs b/library/core/src/range.rs
index 5cd7956291c..7158fa0fcf0 100644
--- a/library/core/src/range.rs
+++ b/library/core/src/range.rs
@@ -167,6 +167,12 @@ impl<T> RangeBounds<T> for Range<T> {
     }
 }
 
+// This impl intentionally does not have `T: ?Sized`;
+// see https://github.com/rust-lang/rust/pull/61584 for discussion of why.
+//
+/// If you need to use this implementation where `T` is unsized,
+/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound],
+/// i.e. replace `start..end` with `(Bound::Included(start), Bound::Excluded(end))`.
 #[unstable(feature = "new_range_api", issue = "125687")]
 impl<T> RangeBounds<T> for Range<&T> {
     fn start_bound(&self) -> Bound<&T> {
@@ -346,6 +352,12 @@ impl<T> RangeBounds<T> for RangeInclusive<T> {
     }
 }
 
+// This impl intentionally does not have `T: ?Sized`;
+// see https://github.com/rust-lang/rust/pull/61584 for discussion of why.
+//
+/// If you need to use this implementation where `T` is unsized,
+/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound],
+/// i.e. replace `start..=end` with `(Bound::Included(start), Bound::Included(end))`.
 #[unstable(feature = "new_range_api", issue = "125687")]
 impl<T> RangeBounds<T> for RangeInclusive<&T> {
     fn start_bound(&self) -> Bound<&T> {
@@ -491,6 +503,12 @@ impl<T> RangeBounds<T> for RangeFrom<T> {
     }
 }
 
+// This impl intentionally does not have `T: ?Sized`;
+// see https://github.com/rust-lang/rust/pull/61584 for discussion of why.
+//
+/// If you need to use this implementation where `T` is unsized,
+/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound],
+/// i.e. replace `start..` with `(Bound::Included(start), Bound::Unbounded)`.
 #[unstable(feature = "new_range_api", issue = "125687")]
 impl<T> RangeBounds<T> for RangeFrom<&T> {
     fn start_bound(&self) -> Bound<&T> {
diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs
index 33132dcc714..ae910e05252 100644
--- a/library/core/src/slice/iter.rs
+++ b/library/core/src/slice/iter.rs
@@ -2301,255 +2301,6 @@ impl<T, const N: usize> ExactSizeIterator for ArrayWindows<'_, T, N> {
     }
 }
 
-/// An iterator over a slice in (non-overlapping) chunks (`N` elements at a
-/// time), starting at the beginning of the slice.
-///
-/// When the slice len is not evenly divided by the chunk size, the last
-/// up to `N-1` elements will be omitted but can be retrieved from
-/// the [`remainder`] function from the iterator.
-///
-/// This struct is created by the [`array_chunks`] method on [slices].
-///
-/// # Example
-///
-/// ```
-/// #![feature(array_chunks)]
-///
-/// let slice = ['l', 'o', 'r', 'e', 'm'];
-/// let mut iter = slice.array_chunks::<2>();
-/// assert_eq!(iter.next(), Some(&['l', 'o']));
-/// assert_eq!(iter.next(), Some(&['r', 'e']));
-/// assert_eq!(iter.next(), None);
-/// ```
-///
-/// [`array_chunks`]: slice::array_chunks
-/// [`remainder`]: ArrayChunks::remainder
-/// [slices]: slice
-#[derive(Debug)]
-#[unstable(feature = "array_chunks", issue = "74985")]
-#[must_use = "iterators are lazy and do nothing unless consumed"]
-pub struct ArrayChunks<'a, T: 'a, const N: usize> {
-    iter: Iter<'a, [T; N]>,
-    rem: &'a [T],
-}
-
-impl<'a, T, const N: usize> ArrayChunks<'a, T, N> {
-    #[rustc_const_unstable(feature = "const_slice_make_iter", issue = "137737")]
-    #[inline]
-    pub(super) const fn new(slice: &'a [T]) -> Self {
-        let (array_slice, rem) = slice.as_chunks();
-        Self { iter: array_slice.iter(), rem }
-    }
-
-    /// Returns the remainder of the original slice that is not going to be
-    /// returned by the iterator. The returned slice has at most `N-1`
-    /// elements.
-    #[must_use]
-    #[unstable(feature = "array_chunks", issue = "74985")]
-    pub fn remainder(&self) -> &'a [T] {
-        self.rem
-    }
-}
-
-// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
-#[unstable(feature = "array_chunks", issue = "74985")]
-impl<T, const N: usize> Clone for ArrayChunks<'_, T, N> {
-    fn clone(&self) -> Self {
-        ArrayChunks { iter: self.iter.clone(), rem: self.rem }
-    }
-}
-
-#[unstable(feature = "array_chunks", issue = "74985")]
-impl<'a, T, const N: usize> Iterator for ArrayChunks<'a, T, N> {
-    type Item = &'a [T; N];
-
-    #[inline]
-    fn next(&mut self) -> Option<&'a [T; N]> {
-        self.iter.next()
-    }
-
-    #[inline]
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        self.iter.size_hint()
-    }
-
-    #[inline]
-    fn count(self) -> usize {
-        self.iter.count()
-    }
-
-    #[inline]
-    fn nth(&mut self, n: usize) -> Option<Self::Item> {
-        self.iter.nth(n)
-    }
-
-    #[inline]
-    fn last(self) -> Option<Self::Item> {
-        self.iter.last()
-    }
-
-    unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> &'a [T; N] {
-        // SAFETY: The safety guarantees of `__iterator_get_unchecked` are
-        // transferred to the caller.
-        unsafe { self.iter.__iterator_get_unchecked(i) }
-    }
-}
-
-#[unstable(feature = "array_chunks", issue = "74985")]
-impl<'a, T, const N: usize> DoubleEndedIterator for ArrayChunks<'a, T, N> {
-    #[inline]
-    fn next_back(&mut self) -> Option<&'a [T; N]> {
-        self.iter.next_back()
-    }
-
-    #[inline]
-    fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
-        self.iter.nth_back(n)
-    }
-}
-
-#[unstable(feature = "array_chunks", issue = "74985")]
-impl<T, const N: usize> ExactSizeIterator for ArrayChunks<'_, T, N> {
-    fn is_empty(&self) -> bool {
-        self.iter.is_empty()
-    }
-}
-
-#[unstable(feature = "trusted_len", issue = "37572")]
-unsafe impl<T, const N: usize> TrustedLen for ArrayChunks<'_, T, N> {}
-
-#[unstable(feature = "array_chunks", issue = "74985")]
-impl<T, const N: usize> FusedIterator for ArrayChunks<'_, T, N> {}
-
-#[doc(hidden)]
-#[unstable(feature = "array_chunks", issue = "74985")]
-unsafe impl<'a, T, const N: usize> TrustedRandomAccess for ArrayChunks<'a, T, N> {}
-
-#[doc(hidden)]
-#[unstable(feature = "array_chunks", issue = "74985")]
-unsafe impl<'a, T, const N: usize> TrustedRandomAccessNoCoerce for ArrayChunks<'a, T, N> {
-    const MAY_HAVE_SIDE_EFFECT: bool = false;
-}
-
-/// An iterator over a slice in (non-overlapping) mutable chunks (`N` elements
-/// at a time), starting at the beginning of the slice.
-///
-/// When the slice len is not evenly divided by the chunk size, the last
-/// up to `N-1` elements will be omitted but can be retrieved from
-/// the [`into_remainder`] function from the iterator.
-///
-/// This struct is created by the [`array_chunks_mut`] method on [slices].
-///
-/// # Example
-///
-/// ```
-/// #![feature(array_chunks)]
-///
-/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
-/// let iter = slice.array_chunks_mut::<2>();
-/// ```
-///
-/// [`array_chunks_mut`]: slice::array_chunks_mut
-/// [`into_remainder`]: ../../std/slice/struct.ArrayChunksMut.html#method.into_remainder
-/// [slices]: slice
-#[derive(Debug)]
-#[unstable(feature = "array_chunks", issue = "74985")]
-#[must_use = "iterators are lazy and do nothing unless consumed"]
-pub struct ArrayChunksMut<'a, T: 'a, const N: usize> {
-    iter: IterMut<'a, [T; N]>,
-    rem: &'a mut [T],
-}
-
-impl<'a, T, const N: usize> ArrayChunksMut<'a, T, N> {
-    #[rustc_const_unstable(feature = "const_slice_make_iter", issue = "137737")]
-    #[inline]
-    pub(super) const fn new(slice: &'a mut [T]) -> Self {
-        let (array_slice, rem) = slice.as_chunks_mut();
-        Self { iter: array_slice.iter_mut(), rem }
-    }
-
-    /// Returns the remainder of the original slice that is not going to be
-    /// returned by the iterator. The returned slice has at most `N-1`
-    /// elements.
-    #[must_use = "`self` will be dropped if the result is not used"]
-    #[unstable(feature = "array_chunks", issue = "74985")]
-    pub fn into_remainder(self) -> &'a mut [T] {
-        self.rem
-    }
-}
-
-#[unstable(feature = "array_chunks", issue = "74985")]
-impl<'a, T, const N: usize> Iterator for ArrayChunksMut<'a, T, N> {
-    type Item = &'a mut [T; N];
-
-    #[inline]
-    fn next(&mut self) -> Option<&'a mut [T; N]> {
-        self.iter.next()
-    }
-
-    #[inline]
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        self.iter.size_hint()
-    }
-
-    #[inline]
-    fn count(self) -> usize {
-        self.iter.count()
-    }
-
-    #[inline]
-    fn nth(&mut self, n: usize) -> Option<Self::Item> {
-        self.iter.nth(n)
-    }
-
-    #[inline]
-    fn last(self) -> Option<Self::Item> {
-        self.iter.last()
-    }
-
-    unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> &'a mut [T; N] {
-        // SAFETY: The safety guarantees of `__iterator_get_unchecked` are transferred to
-        // the caller.
-        unsafe { self.iter.__iterator_get_unchecked(i) }
-    }
-}
-
-#[unstable(feature = "array_chunks", issue = "74985")]
-impl<'a, T, const N: usize> DoubleEndedIterator for ArrayChunksMut<'a, T, N> {
-    #[inline]
-    fn next_back(&mut self) -> Option<&'a mut [T; N]> {
-        self.iter.next_back()
-    }
-
-    #[inline]
-    fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
-        self.iter.nth_back(n)
-    }
-}
-
-#[unstable(feature = "array_chunks", issue = "74985")]
-impl<T, const N: usize> ExactSizeIterator for ArrayChunksMut<'_, T, N> {
-    fn is_empty(&self) -> bool {
-        self.iter.is_empty()
-    }
-}
-
-#[unstable(feature = "trusted_len", issue = "37572")]
-unsafe impl<T, const N: usize> TrustedLen for ArrayChunksMut<'_, T, N> {}
-
-#[unstable(feature = "array_chunks", issue = "74985")]
-impl<T, const N: usize> FusedIterator for ArrayChunksMut<'_, T, N> {}
-
-#[doc(hidden)]
-#[unstable(feature = "array_chunks", issue = "74985")]
-unsafe impl<'a, T, const N: usize> TrustedRandomAccess for ArrayChunksMut<'a, T, N> {}
-
-#[doc(hidden)]
-#[unstable(feature = "array_chunks", issue = "74985")]
-unsafe impl<'a, T, const N: usize> TrustedRandomAccessNoCoerce for ArrayChunksMut<'a, T, N> {
-    const MAY_HAVE_SIDE_EFFECT: bool = false;
-}
-
 /// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
 /// time), starting at the end of the slice.
 ///
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index 6fe5affc48b..14042997bc2 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -52,8 +52,6 @@ pub use index::SliceIndex;
 pub use index::{range, try_range};
 #[unstable(feature = "array_windows", issue = "75027")]
 pub use iter::ArrayWindows;
-#[unstable(feature = "array_chunks", issue = "74985")]
-pub use iter::{ArrayChunks, ArrayChunksMut};
 #[stable(feature = "slice_group_by", since = "1.77.0")]
 pub use iter::{ChunkBy, ChunkByMut};
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1232,7 +1230,7 @@ impl<T> [T] {
     ///
     /// [`chunks`]: slice::chunks
     /// [`rchunks_exact`]: slice::rchunks_exact
-    /// [`as_chunks`]: slice::chunks
+    /// [`as_chunks`]: slice::as_chunks
     #[stable(feature = "chunks_exact", since = "1.31.0")]
     #[rustc_const_unstable(feature = "const_slice_make_iter", issue = "137737")]
     #[inline]
@@ -1448,42 +1446,6 @@ impl<T> [T] {
         (remainder, array_slice)
     }
 
-    /// Returns an iterator over `N` elements of the slice at a time, starting at the
-    /// beginning of the slice.
-    ///
-    /// The chunks are array references and do not overlap. If `N` does not divide the
-    /// length of the slice, then the last up to `N-1` elements will be omitted and can be
-    /// retrieved from the `remainder` function of the iterator.
-    ///
-    /// This method is the const generic equivalent of [`chunks_exact`].
-    ///
-    /// # Panics
-    ///
-    /// Panics if `N` is zero. This check will most probably get changed to a compile time
-    /// error before this method gets stabilized.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// #![feature(array_chunks)]
-    /// let slice = ['l', 'o', 'r', 'e', 'm'];
-    /// let mut iter = slice.array_chunks();
-    /// assert_eq!(iter.next().unwrap(), &['l', 'o']);
-    /// assert_eq!(iter.next().unwrap(), &['r', 'e']);
-    /// assert!(iter.next().is_none());
-    /// assert_eq!(iter.remainder(), &['m']);
-    /// ```
-    ///
-    /// [`chunks_exact`]: slice::chunks_exact
-    #[unstable(feature = "array_chunks", issue = "74985")]
-    #[rustc_const_unstable(feature = "const_slice_make_iter", issue = "137737")]
-    #[inline]
-    #[track_caller]
-    pub const fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> {
-        assert!(N != 0, "chunk size must be non-zero");
-        ArrayChunks::new(self)
-    }
-
     /// Splits the slice into a slice of `N`-element arrays,
     /// assuming that there's no remainder.
     ///
@@ -1646,44 +1608,6 @@ impl<T> [T] {
         (remainder, array_slice)
     }
 
-    /// Returns an iterator over `N` elements of the slice at a time, starting at the
-    /// beginning of the slice.
-    ///
-    /// The chunks are mutable array references and do not overlap. If `N` does not divide
-    /// the length of the slice, then the last up to `N-1` elements will be omitted and
-    /// can be retrieved from the `into_remainder` function of the iterator.
-    ///
-    /// This method is the const generic equivalent of [`chunks_exact_mut`].
-    ///
-    /// # Panics
-    ///
-    /// Panics if `N` is zero. This check will most probably get changed to a compile time
-    /// error before this method gets stabilized.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// #![feature(array_chunks)]
-    /// let v = &mut [0, 0, 0, 0, 0];
-    /// let mut count = 1;
-    ///
-    /// for chunk in v.array_chunks_mut() {
-    ///     *chunk = [count; 2];
-    ///     count += 1;
-    /// }
-    /// assert_eq!(v, &[1, 1, 2, 2, 0]);
-    /// ```
-    ///
-    /// [`chunks_exact_mut`]: slice::chunks_exact_mut
-    #[unstable(feature = "array_chunks", issue = "74985")]
-    #[rustc_const_unstable(feature = "const_slice_make_iter", issue = "137737")]
-    #[inline]
-    #[track_caller]
-    pub const fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> {
-        assert!(N != 0, "chunk size must be non-zero");
-        ArrayChunksMut::new(self)
-    }
-
     /// Returns an iterator over overlapping windows of `N` elements of a slice,
     /// starting at the beginning of the slice.
     ///
diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs
index bcf886484ad..d2985d8a186 100644
--- a/library/core/src/str/iter.rs
+++ b/library/core/src/str/iter.rs
@@ -52,7 +52,7 @@ impl<'a> Iterator for Chars<'a> {
         const CHUNK_SIZE: usize = 32;
 
         if remainder >= CHUNK_SIZE {
-            let mut chunks = self.iter.as_slice().array_chunks::<CHUNK_SIZE>();
+            let mut chunks = self.iter.as_slice().as_chunks::<CHUNK_SIZE>().0.iter();
             let mut bytes_skipped: usize = 0;
 
             while remainder > CHUNK_SIZE
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index 029abf17539..c40af4de7e0 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -407,17 +407,22 @@ impl str {
     /// ```
     #[unstable(feature = "round_char_boundary", issue = "93743")]
     #[inline]
-    pub fn floor_char_boundary(&self, index: usize) -> usize {
+    pub const fn floor_char_boundary(&self, index: usize) -> usize {
         if index >= self.len() {
             self.len()
         } else {
-            let lower_bound = index.saturating_sub(3);
-            let new_index = self.as_bytes()[lower_bound..=index]
-                .iter()
-                .rposition(|b| b.is_utf8_char_boundary());
-
-            // SAFETY: we know that the character boundary will be within four bytes
-            unsafe { lower_bound + new_index.unwrap_unchecked() }
+            let mut i = index;
+            while i > 0 {
+                if self.as_bytes()[i].is_utf8_char_boundary() {
+                    break;
+                }
+                i -= 1;
+            }
+
+            //  The character boundary will be within four bytes of the index
+            debug_assert!(i >= index.saturating_sub(3));
+
+            i
         }
     }
 
@@ -445,15 +450,22 @@ impl str {
     /// ```
     #[unstable(feature = "round_char_boundary", issue = "93743")]
     #[inline]
-    pub fn ceil_char_boundary(&self, index: usize) -> usize {
+    pub const fn ceil_char_boundary(&self, index: usize) -> usize {
         if index >= self.len() {
             self.len()
         } else {
-            let upper_bound = Ord::min(index + 4, self.len());
-            self.as_bytes()[index..upper_bound]
-                .iter()
-                .position(|b| b.is_utf8_char_boundary())
-                .map_or(upper_bound, |pos| pos + index)
+            let mut i = index;
+            while i < self.len() {
+                if self.as_bytes()[i].is_utf8_char_boundary() {
+                    break;
+                }
+                i += 1;
+            }
+
+            //  The character boundary will be within four bytes of the index
+            debug_assert!(i <= index + 3);
+
+            i
         }
     }
 
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index 546f3d91a80..70c02ead358 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -563,8 +563,8 @@ impl AtomicBool {
     ///   `align_of::<AtomicBool>() == 1`).
     /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
     /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
-    ///   allowed to mix atomic and non-atomic accesses, or atomic accesses of different sizes,
-    ///   without synchronization.
+    ///   allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different
+    ///   sizes, without synchronization.
     ///
     /// [valid]: crate::ptr#safety
     /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
@@ -1245,8 +1245,8 @@ impl AtomicBool {
     /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
     /// atomic types work with interior mutability. All modifications of an atomic change the value
     /// through a shared reference, and can do so safely as long as they use atomic operations. Any
-    /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
-    /// restriction: operations on it must be atomic.
+    /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the
+    /// requirements of the [memory model].
     ///
     /// # Examples
     ///
@@ -1264,6 +1264,8 @@ impl AtomicBool {
     /// }
     /// # }
     /// ```
+    ///
+    /// [memory model]: self#memory-model-for-atomic-accesses
     #[inline]
     #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
     #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
@@ -1519,8 +1521,8 @@ impl<T> AtomicPtr<T> {
     ///   can be bigger than `align_of::<*mut T>()`).
     /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
     /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
-    ///   allowed to mix atomic and non-atomic accesses, or atomic accesses of different sizes,
-    ///   without synchronization.
+    ///   allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different
+    ///   sizes, without synchronization.
     ///
     /// [valid]: crate::ptr#safety
     /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
@@ -2487,8 +2489,8 @@ impl<T> AtomicPtr<T> {
     /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
     /// atomic types work with interior mutability. All modifications of an atomic change the value
     /// through a shared reference, and can do so safely as long as they use atomic operations. Any
-    /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
-    /// restriction: operations on it must be atomic.
+    /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the
+    /// requirements of the [memory model].
     ///
     /// # Examples
     ///
@@ -2507,6 +2509,8 @@ impl<T> AtomicPtr<T> {
     ///     my_atomic_op(atomic.as_ptr());
     /// }
     /// ```
+    ///
+    /// [memory model]: self#memory-model-for-atomic-accesses
     #[inline]
     #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
     #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
@@ -2698,8 +2702,8 @@ macro_rules! atomic_int {
             }]
             /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
             /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
-            ///   allowed to mix atomic and non-atomic accesses, or atomic accesses of different sizes,
-            ///   without synchronization.
+            ///   allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different
+            ///   sizes, without synchronization.
             ///
             /// [valid]: crate::ptr#safety
             /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
@@ -3619,8 +3623,8 @@ macro_rules! atomic_int {
             /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
             /// atomic types work with interior mutability. All modifications of an atomic change the value
             /// through a shared reference, and can do so safely as long as they use atomic operations. Any
-            /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
-            /// restriction: operations on it must be atomic.
+            /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the
+            /// requirements of the [memory model].
             ///
             /// # Examples
             ///
@@ -3640,6 +3644,8 @@ macro_rules! atomic_int {
             /// }
             /// # }
             /// ```
+            ///
+            /// [memory model]: self#memory-model-for-atomic-accesses
             #[inline]
             #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
             #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
diff --git a/library/coretests/tests/lib.rs b/library/coretests/tests/lib.rs
index 4cfac9ecc2a..029a7b00ad3 100644
--- a/library/coretests/tests/lib.rs
+++ b/library/coretests/tests/lib.rs
@@ -2,7 +2,6 @@
 #![cfg_attr(target_has_atomic = "128", feature(integer_atomics))]
 #![cfg_attr(test, feature(cfg_select))]
 #![feature(alloc_layout_extra)]
-#![feature(array_chunks)]
 #![feature(array_ptr_get)]
 #![feature(array_try_from_fn)]
 #![feature(array_windows)]
@@ -30,6 +29,7 @@
 #![feature(core_private_diy_float)]
 #![feature(cstr_display)]
 #![feature(dec2flt)]
+#![feature(drop_guard)]
 #![feature(duration_constants)]
 #![feature(duration_constructors)]
 #![feature(duration_constructors_lite)]
@@ -76,6 +76,7 @@
 #![feature(min_specialization)]
 #![feature(never_type)]
 #![feature(next_index)]
+#![feature(non_exhaustive_omitted_patterns_lint)]
 #![feature(numfmt)]
 #![feature(pattern)]
 #![feature(pointer_is_aligned_to)]
diff --git a/library/coretests/tests/macros.rs b/library/coretests/tests/macros.rs
index 1c6aa90dfbc..50b5eb63e43 100644
--- a/library/coretests/tests/macros.rs
+++ b/library/coretests/tests/macros.rs
@@ -213,3 +213,9 @@ fn _expression() {
         }
     );
 }
+
+#[deny(non_exhaustive_omitted_patterns)]
+fn _matches_does_not_trigger_non_exhaustive_omitted_patterns_lint(o: core::sync::atomic::Ordering) {
+    // Ordering is a #[non_exhaustive] enum from a separate crate
+    let _m = matches!(o, core::sync::atomic::Ordering::Relaxed);
+}
diff --git a/library/coretests/tests/mem.rs b/library/coretests/tests/mem.rs
index 9c15be4a8c4..e896c61ef48 100644
--- a/library/coretests/tests/mem.rs
+++ b/library/coretests/tests/mem.rs
@@ -1,5 +1,6 @@
 use core::mem::*;
 use core::{array, ptr};
+use std::cell::Cell;
 #[cfg(panic = "unwind")]
 use std::rc::Rc;
 
@@ -795,3 +796,48 @@ fn const_maybe_uninit_zeroed() {
 
     assert_eq!(unsafe { (*UNINIT.0.cast::<[[u8; SIZE]; 1]>())[0] }, [0u8; SIZE]);
 }
+
+#[test]
+fn drop_guards_only_dropped_by_closure_when_run() {
+    let value_drops = Cell::new(0);
+    let value = DropGuard::new((), |()| value_drops.set(1 + value_drops.get()));
+    let closure_drops = Cell::new(0);
+    let guard = DropGuard::new(value, |_| closure_drops.set(1 + closure_drops.get()));
+    assert_eq!(value_drops.get(), 0);
+    assert_eq!(closure_drops.get(), 0);
+    drop(guard);
+    assert_eq!(value_drops.get(), 1);
+    assert_eq!(closure_drops.get(), 1);
+}
+
+#[test]
+fn drop_guard_into_inner() {
+    let dropped = Cell::new(false);
+    let value = DropGuard::new(42, |_| dropped.set(true));
+    let guard = DropGuard::new(value, |_| dropped.set(true));
+    let inner = DropGuard::into_inner(guard);
+    assert_eq!(dropped.get(), false);
+    assert_eq!(*inner, 42);
+}
+
+#[test]
+#[cfg(panic = "unwind")]
+fn drop_guard_always_drops_value_if_closure_drop_unwinds() {
+    // Create a value with a destructor, which we will validate ran successfully.
+    let mut value_was_dropped = false;
+    let value_with_tracked_destruction = DropGuard::new((), |_| value_was_dropped = true);
+
+    // Create a closure that will begin unwinding when dropped.
+    let drop_bomb = DropGuard::new((), |_| panic!());
+    let closure_that_panics_on_drop = move |_| {
+        let _drop_bomb = drop_bomb;
+    };
+
+    // This will run the closure, which will panic when dropped. This should
+    // run the destructor of the value we passed, which we validate.
+    let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
+        let guard = DropGuard::new(value_with_tracked_destruction, closure_that_panics_on_drop);
+        DropGuard::into_inner(guard);
+    }));
+    assert!(value_was_dropped);
+}
diff --git a/library/coretests/tests/slice.rs b/library/coretests/tests/slice.rs
index d17e681480c..992f24cb18f 100644
--- a/library/coretests/tests/slice.rs
+++ b/library/coretests/tests/slice.rs
@@ -612,190 +612,6 @@ fn test_chunks_exact_mut_zip() {
 }
 
 #[test]
-fn test_array_chunks_infer() {
-    let v: &[i32] = &[0, 1, 2, 3, 4, -4];
-    let c = v.array_chunks();
-    for &[a, b, c] in c {
-        assert_eq!(a + b + c, 3);
-    }
-
-    let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
-    let total = v2.array_chunks().map(|&[a, b]| a * b).sum::<i32>();
-    assert_eq!(total, 2 * 3 + 4 * 5);
-}
-
-#[test]
-fn test_array_chunks_count() {
-    let v: &[i32] = &[0, 1, 2, 3, 4, 5];
-    let c = v.array_chunks::<3>();
-    assert_eq!(c.count(), 2);
-
-    let v2: &[i32] = &[0, 1, 2, 3, 4];
-    let c2 = v2.array_chunks::<2>();
-    assert_eq!(c2.count(), 2);
-
-    let v3: &[i32] = &[];
-    let c3 = v3.array_chunks::<2>();
-    assert_eq!(c3.count(), 0);
-}
-
-#[test]
-fn test_array_chunks_nth() {
-    let v: &[i32] = &[0, 1, 2, 3, 4, 5];
-    let mut c = v.array_chunks::<2>();
-    assert_eq!(c.nth(1).unwrap(), &[2, 3]);
-    assert_eq!(c.next().unwrap(), &[4, 5]);
-
-    let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
-    let mut c2 = v2.array_chunks::<3>();
-    assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]);
-    assert_eq!(c2.next(), None);
-}
-
-#[test]
-fn test_array_chunks_nth_back() {
-    let v: &[i32] = &[0, 1, 2, 3, 4, 5];
-    let mut c = v.array_chunks::<2>();
-    assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
-    assert_eq!(c.next().unwrap(), &[0, 1]);
-    assert_eq!(c.next(), None);
-
-    let v2: &[i32] = &[0, 1, 2, 3, 4];
-    let mut c2 = v2.array_chunks::<3>();
-    assert_eq!(c2.nth_back(0).unwrap(), &[0, 1, 2]);
-    assert_eq!(c2.next(), None);
-    assert_eq!(c2.next_back(), None);
-
-    let v3: &[i32] = &[0, 1, 2, 3, 4];
-    let mut c3 = v3.array_chunks::<10>();
-    assert_eq!(c3.nth_back(0), None);
-}
-
-#[test]
-fn test_array_chunks_last() {
-    let v: &[i32] = &[0, 1, 2, 3, 4, 5];
-    let c = v.array_chunks::<2>();
-    assert_eq!(c.last().unwrap(), &[4, 5]);
-
-    let v2: &[i32] = &[0, 1, 2, 3, 4];
-    let c2 = v2.array_chunks::<2>();
-    assert_eq!(c2.last().unwrap(), &[2, 3]);
-}
-
-#[test]
-fn test_array_chunks_remainder() {
-    let v: &[i32] = &[0, 1, 2, 3, 4];
-    let c = v.array_chunks::<2>();
-    assert_eq!(c.remainder(), &[4]);
-}
-
-#[test]
-fn test_array_chunks_zip() {
-    let v1: &[i32] = &[0, 1, 2, 3, 4];
-    let v2: &[i32] = &[6, 7, 8, 9, 10];
-
-    let res = v1
-        .array_chunks::<2>()
-        .zip(v2.array_chunks::<2>())
-        .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
-        .collect::<Vec<_>>();
-    assert_eq!(res, vec![14, 22]);
-}
-
-#[test]
-fn test_array_chunks_mut_infer() {
-    let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
-    for a in v.array_chunks_mut() {
-        let sum = a.iter().sum::<i32>();
-        *a = [sum; 3];
-    }
-    assert_eq!(v, &[3, 3, 3, 12, 12, 12, 6]);
-
-    let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
-    v2.array_chunks_mut().for_each(|[a, b]| core::mem::swap(a, b));
-    assert_eq!(v2, &[1, 0, 3, 2, 5, 4, 6]);
-}
-
-#[test]
-fn test_array_chunks_mut_count() {
-    let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
-    let c = v.array_chunks_mut::<3>();
-    assert_eq!(c.count(), 2);
-
-    let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
-    let c2 = v2.array_chunks_mut::<2>();
-    assert_eq!(c2.count(), 2);
-
-    let v3: &mut [i32] = &mut [];
-    let c3 = v3.array_chunks_mut::<2>();
-    assert_eq!(c3.count(), 0);
-}
-
-#[test]
-fn test_array_chunks_mut_nth() {
-    let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
-    let mut c = v.array_chunks_mut::<2>();
-    assert_eq!(c.nth(1).unwrap(), &[2, 3]);
-    assert_eq!(c.next().unwrap(), &[4, 5]);
-
-    let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
-    let mut c2 = v2.array_chunks_mut::<3>();
-    assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]);
-    assert_eq!(c2.next(), None);
-}
-
-#[test]
-fn test_array_chunks_mut_nth_back() {
-    let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
-    let mut c = v.array_chunks_mut::<2>();
-    assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
-    assert_eq!(c.next().unwrap(), &[0, 1]);
-    assert_eq!(c.next(), None);
-
-    let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
-    let mut c2 = v2.array_chunks_mut::<3>();
-    assert_eq!(c2.nth_back(0).unwrap(), &[0, 1, 2]);
-    assert_eq!(c2.next(), None);
-    assert_eq!(c2.next_back(), None);
-
-    let v3: &mut [i32] = &mut [0, 1, 2, 3, 4];
-    let mut c3 = v3.array_chunks_mut::<10>();
-    assert_eq!(c3.nth_back(0), None);
-}
-
-#[test]
-fn test_array_chunks_mut_last() {
-    let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
-    let c = v.array_chunks_mut::<2>();
-    assert_eq!(c.last().unwrap(), &[4, 5]);
-
-    let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
-    let c2 = v2.array_chunks_mut::<2>();
-    assert_eq!(c2.last().unwrap(), &[2, 3]);
-}
-
-#[test]
-fn test_array_chunks_mut_remainder() {
-    let v: &mut [i32] = &mut [0, 1, 2, 3, 4];
-    let c = v.array_chunks_mut::<2>();
-    assert_eq!(c.into_remainder(), &[4]);
-}
-
-#[test]
-fn test_array_chunks_mut_zip() {
-    let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
-    let v2: &[i32] = &[6, 7, 8, 9, 10];
-
-    for (a, b) in v1.array_chunks_mut::<2>().zip(v2.array_chunks::<2>()) {
-        let sum = b.iter().sum::<i32>();
-        for v in a {
-            *v += sum;
-        }
-    }
-    assert_eq!(v1, [13, 14, 19, 20, 4]);
-}
-
-#[test]
 fn test_array_windows_infer() {
     let v: &[i32] = &[0, 1, 0, 1];
     assert_eq!(v.array_windows::<2>().count(), 3);
diff --git a/library/rustc-std-workspace-alloc/Cargo.toml b/library/rustc-std-workspace-alloc/Cargo.toml
index 5a177808d1b..a5b51059119 100644
--- a/library/rustc-std-workspace-alloc/Cargo.toml
+++ b/library/rustc-std-workspace-alloc/Cargo.toml
@@ -9,6 +9,9 @@ edition = "2024"
 
 [lib]
 path = "lib.rs"
+test = false
+bench = false
+doc = false
 
 [dependencies]
 alloc = { path = "../alloc" }
diff --git a/library/rustc-std-workspace-core/Cargo.toml b/library/rustc-std-workspace-core/Cargo.toml
index 1ddc112380f..d68965c6345 100644
--- a/library/rustc-std-workspace-core/Cargo.toml
+++ b/library/rustc-std-workspace-core/Cargo.toml
@@ -11,6 +11,9 @@ edition = "2024"
 
 [lib]
 path = "lib.rs"
+test = false
+bench = false
+doc = false
 
 [dependencies]
 core = { path = "../core", public = true }
diff --git a/library/rustc-std-workspace-std/Cargo.toml b/library/rustc-std-workspace-std/Cargo.toml
index f70994e1f88..6079dc85d90 100644
--- a/library/rustc-std-workspace-std/Cargo.toml
+++ b/library/rustc-std-workspace-std/Cargo.toml
@@ -9,6 +9,9 @@ edition = "2024"
 
 [lib]
 path = "lib.rs"
+test = false
+bench = false
+doc = false
 
 [dependencies]
 std = { path = "../std" }
diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml
index 57859ea9147..29ab9be0e69 100644
--- a/library/std/Cargo.toml
+++ b/library/std/Cargo.toml
@@ -66,7 +66,7 @@ rand_xorshift = "0.4.0"
 dlmalloc = { version = "0.2.10", features = ['rustc-dep-of-std'] }
 
 [target.x86_64-fortanix-unknown-sgx.dependencies]
-fortanix-sgx-abi = { version = "0.5.0", features = [
+fortanix-sgx-abi = { version = "0.6.1", features = [
     'rustc-dep-of-std',
 ], public = true }
 
@@ -97,9 +97,7 @@ backtrace-trace-only = []
 panic-unwind = ["dep:panic_unwind"]
 compiler-builtins-c = ["alloc/compiler-builtins-c"]
 compiler-builtins-mem = ["alloc/compiler-builtins-mem"]
-compiler-builtins-no-asm = ["alloc/compiler-builtins-no-asm"]
 compiler-builtins-no-f16-f128 = ["alloc/compiler-builtins-no-f16-f128"]
-compiler-builtins-mangled-names = ["alloc/compiler-builtins-mangled-names"]
 llvm-libunwind = ["unwind/llvm-libunwind"]
 system-llvm-libunwind = ["unwind/system-llvm-libunwind"]
 
diff --git a/library/std/src/env.rs b/library/std/src/env.rs
index 6d7d576b32a..9f17ff76445 100644
--- a/library/std/src/env.rs
+++ b/library/std/src/env.rs
@@ -617,7 +617,7 @@ impl Error for JoinPathsError {
 /// # Unix
 ///
 /// - Returns the value of the 'HOME' environment variable if it is set
-///   (including to an empty string).
+///   (and not an empty string).
 /// - Otherwise, it tries to determine the home directory by invoking the `getpwuid_r` function
 ///   using the UID of the current user. An empty home directory field returned from the
 ///   `getpwuid_r` function is considered to be a valid value.
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index 323742a75b0..77301d7228e 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -324,13 +324,13 @@
 //
 // Library features (core):
 // tidy-alphabetical-start
-#![feature(array_chunks)]
 #![feature(bstr)]
 #![feature(bstr_internals)]
 #![feature(char_internals)]
 #![feature(clone_to_uninit)]
 #![feature(core_intrinsics)]
 #![feature(core_io_borrowed_buf)]
+#![feature(drop_guard)]
 #![feature(duration_constants)]
 #![feature(error_generic_member_access)]
 #![feature(error_iter)]
diff --git a/library/std/src/sync/mod.rs b/library/std/src/sync/mod.rs
index e67b4f6f22f..6ef3bf25cf6 100644
--- a/library/std/src/sync/mod.rs
+++ b/library/std/src/sync/mod.rs
@@ -225,6 +225,8 @@ pub use self::poison::{MappedMutexGuard, MappedRwLockReadGuard, MappedRwLockWrit
 pub mod mpmc;
 pub mod mpsc;
 
+#[unstable(feature = "sync_nonpoison", issue = "134645")]
+pub mod nonpoison;
 #[unstable(feature = "sync_poison_mod", issue = "134646")]
 pub mod poison;
 
diff --git a/library/std/src/sync/nonpoison.rs b/library/std/src/sync/nonpoison.rs
new file mode 100644
index 00000000000..2bbf226dc2c
--- /dev/null
+++ b/library/std/src/sync/nonpoison.rs
@@ -0,0 +1,37 @@
+//! Non-poisoning synchronous locks.
+//!
+//! The difference from the locks in the [`poison`] module is that the locks in this module will not
+//! become poisoned when a thread panics while holding a guard.
+//!
+//! [`poison`]: super::poison
+
+use crate::fmt;
+
+/// A type alias for the result of a nonblocking locking method.
+#[unstable(feature = "sync_nonpoison", issue = "134645")]
+pub type TryLockResult<Guard> = Result<Guard, WouldBlock>;
+
+/// A lock could not be acquired at this time because the operation would otherwise block.
+#[unstable(feature = "sync_nonpoison", issue = "134645")]
+pub struct WouldBlock;
+
+#[unstable(feature = "sync_nonpoison", issue = "134645")]
+impl fmt::Debug for WouldBlock {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        "WouldBlock".fmt(f)
+    }
+}
+
+#[unstable(feature = "sync_nonpoison", issue = "134645")]
+impl fmt::Display for WouldBlock {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        "try_lock failed because the operation would block".fmt(f)
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+pub use self::mutex::MappedMutexGuard;
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+pub use self::mutex::{Mutex, MutexGuard};
+
+mod mutex;
diff --git a/library/std/src/sync/nonpoison/mutex.rs b/library/std/src/sync/nonpoison/mutex.rs
new file mode 100644
index 00000000000..b6861c78f00
--- /dev/null
+++ b/library/std/src/sync/nonpoison/mutex.rs
@@ -0,0 +1,611 @@
+use crate::cell::UnsafeCell;
+use crate::fmt;
+use crate::marker::PhantomData;
+use crate::mem::{self, ManuallyDrop};
+use crate::ops::{Deref, DerefMut};
+use crate::ptr::NonNull;
+use crate::sync::nonpoison::{TryLockResult, WouldBlock};
+use crate::sys::sync as sys;
+
+/// A mutual exclusion primitive useful for protecting shared data that does not keep track of
+/// lock poisoning.
+///
+/// For more information about mutexes, check out the documentation for the poisoning variant of
+/// this lock at [`poison::Mutex`].
+///
+/// [`poison::Mutex`]: crate::sync::poison::Mutex
+///
+/// # Examples
+///
+/// Note that this `Mutex` does **not** propagate threads that panic while holding the lock via
+/// poisoning. If you need this functionality, see [`poison::Mutex`].
+///
+/// ```
+/// #![feature(nonpoison_mutex)]
+///
+/// use std::thread;
+/// use std::sync::{Arc, nonpoison::Mutex};
+///
+/// let mutex = Arc::new(Mutex::new(0u32));
+/// let mut handles = Vec::new();
+///
+/// for n in 0..10 {
+///     let m = Arc::clone(&mutex);
+///     let handle = thread::spawn(move || {
+///         let mut guard = m.lock();
+///         *guard += 1;
+///         panic!("panic from thread {n} {guard}")
+///     });
+///     handles.push(handle);
+/// }
+///
+/// for h in handles {
+///     let _ = h.join();
+/// }
+///
+/// println!("Finished, locked {} times", mutex.lock());
+/// ```
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonMutex")]
+pub struct Mutex<T: ?Sized> {
+    inner: sys::Mutex,
+    data: UnsafeCell<T>,
+}
+
+/// `T` must be `Send` for a [`Mutex`] to be `Send` because it is possible to acquire
+/// the owned `T` from the `Mutex` via [`into_inner`].
+///
+/// [`into_inner`]: Mutex::into_inner
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
+
+/// `T` must be `Send` for [`Mutex`] to be `Sync`.
+/// This ensures that the protected data can be accessed safely from multiple threads
+/// without causing data races or other unsafe behavior.
+///
+/// [`Mutex<T>`] provides mutable access to `T` to one thread at a time. However, it's essential
+/// for `T` to be `Send` because it's not safe for non-`Send` structures to be accessed in
+/// this manner. For instance, consider [`Rc`], a non-atomic reference counted smart pointer,
+/// which is not `Send`. With `Rc`, we can have multiple copies pointing to the same heap
+/// allocation with a non-atomic reference count. If we were to use `Mutex<Rc<_>>`, it would
+/// only protect one instance of `Rc` from shared access, leaving other copies vulnerable
+/// to potential data races.
+///
+/// Also note that it is not necessary for `T` to be `Sync` as `&T` is only made available
+/// to one thread at a time if `T` is not `Sync`.
+///
+/// [`Rc`]: crate::rc::Rc
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
+
+/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
+/// dropped (falls out of scope), the lock will be unlocked.
+///
+/// The data protected by the mutex can be accessed through this guard via its
+/// [`Deref`] and [`DerefMut`] implementations.
+///
+/// This structure is created by the [`lock`] and [`try_lock`] methods on
+/// [`Mutex`].
+///
+/// [`lock`]: Mutex::lock
+/// [`try_lock`]: Mutex::try_lock
+#[must_use = "if unused the Mutex will immediately unlock"]
+#[must_not_suspend = "holding a MutexGuard across suspend \
+                      points can cause deadlocks, delays, \
+                      and cause Futures to not implement `Send`"]
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonMutexGuard")]
+pub struct MutexGuard<'a, T: ?Sized + 'a> {
+    lock: &'a Mutex<T>,
+}
+
+/// A [`MutexGuard`] is not `Send` to maximize platform portablity.
+///
+/// On platforms that use POSIX threads (commonly referred to as pthreads) there is a requirement to
+/// release mutex locks on the same thread they were acquired.
+/// For this reason, [`MutexGuard`] must not implement `Send` to prevent it being dropped from
+/// another thread.
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+impl<T: ?Sized> !Send for MutexGuard<'_, T> {}
+
+/// `T` must be `Sync` for a [`MutexGuard<T>`] to be `Sync`
+/// because it is possible to get a `&T` from `&MutexGuard` (via `Deref`).
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+unsafe impl<T: ?Sized + Sync> Sync for MutexGuard<'_, T> {}
+
+// FIXME(nonpoison_condvar): Use this link instead: [`Condvar`]: crate::sync::nonpoison::Condvar
+/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
+/// subfield of the protected data. When this structure is dropped (falls out
+/// of scope), the lock will be unlocked.
+///
+/// The main difference between `MappedMutexGuard` and [`MutexGuard`] is that the
+/// former cannot be used with [`Condvar`], since that could introduce soundness issues if the
+/// locked object is modified by another thread while the `Mutex` is unlocked.
+///
+/// The data protected by the mutex can be accessed through this guard via its
+/// [`Deref`] and [`DerefMut`] implementations.
+///
+/// This structure is created by the [`map`] and [`filter_map`] methods on
+/// [`MutexGuard`].
+///
+/// [`map`]: MutexGuard::map
+/// [`filter_map`]: MutexGuard::filter_map
+/// [`Condvar`]: crate::sync::Condvar
+#[must_use = "if unused the Mutex will immediately unlock"]
+#[must_not_suspend = "holding a MappedMutexGuard across suspend \
+                      points can cause deadlocks, delays, \
+                      and cause Futures to not implement `Send`"]
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+#[clippy::has_significant_drop]
+pub struct MappedMutexGuard<'a, T: ?Sized + 'a> {
+    // NB: we use a pointer instead of `&'a mut T` to avoid `noalias` violations, because a
+    // `MappedMutexGuard` argument doesn't hold uniqueness for its whole scope, only until it drops.
+    // `NonNull` is covariant over `T`, so we add a `PhantomData<&'a mut T>` field
+    // below for the correct variance over `T` (invariance).
+    data: NonNull<T>,
+    inner: &'a sys::Mutex,
+    _variance: PhantomData<&'a mut T>,
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+impl<T: ?Sized> !Send for MappedMutexGuard<'_, T> {}
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+unsafe impl<T: ?Sized + Sync> Sync for MappedMutexGuard<'_, T> {}
+
+impl<T> Mutex<T> {
+    /// Creates a new mutex in an unlocked state ready for use.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_mutex)]
+    ///
+    /// use std::sync::nonpoison::Mutex;
+    ///
+    /// let mutex = Mutex::new(0);
+    /// ```
+    #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+    #[inline]
+    pub const fn new(t: T) -> Mutex<T> {
+        Mutex { inner: sys::Mutex::new(), data: UnsafeCell::new(t) }
+    }
+
+    /// Returns the contained value by cloning it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_mutex)]
+    /// #![feature(lock_value_accessors)]
+    ///
+    /// use std::sync::nonpoison::Mutex;
+    ///
+    /// let mut mutex = Mutex::new(7);
+    ///
+    /// assert_eq!(mutex.get_cloned(), 7);
+    /// ```
+    #[unstable(feature = "lock_value_accessors", issue = "133407")]
+    // #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+    pub fn get_cloned(&self) -> T
+    where
+        T: Clone,
+    {
+        self.lock().clone()
+    }
+
+    /// Sets the contained value.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_mutex)]
+    /// #![feature(lock_value_accessors)]
+    ///
+    /// use std::sync::nonpoison::Mutex;
+    ///
+    /// let mut mutex = Mutex::new(7);
+    ///
+    /// assert_eq!(mutex.get_cloned(), 7);
+    /// mutex.set(11);
+    /// assert_eq!(mutex.get_cloned(), 11);
+    /// ```
+    #[unstable(feature = "lock_value_accessors", issue = "133407")]
+    // #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+    pub fn set(&self, value: T) {
+        if mem::needs_drop::<T>() {
+            // If the contained value has a non-trivial destructor, we
+            // call that destructor after the lock has been released.
+            drop(self.replace(value))
+        } else {
+            *self.lock() = value;
+        }
+    }
+
+    /// Replaces the contained value with `value`, and returns the old contained value.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_mutex)]
+    /// #![feature(lock_value_accessors)]
+    ///
+    /// use std::sync::nonpoison::Mutex;
+    ///
+    /// let mut mutex = Mutex::new(7);
+    ///
+    /// assert_eq!(mutex.replace(11), 7);
+    /// assert_eq!(mutex.get_cloned(), 11);
+    /// ```
+    #[unstable(feature = "lock_value_accessors", issue = "133407")]
+    // #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+    pub fn replace(&self, value: T) -> T {
+        let mut guard = self.lock();
+        mem::replace(&mut *guard, value)
+    }
+}
+
+impl<T: ?Sized> Mutex<T> {
+    /// Acquires a mutex, blocking the current thread until it is able to do so.
+    ///
+    /// This function will block the local thread until it is available to acquire
+    /// the mutex. Upon returning, the thread is the only thread with the lock
+    /// held. An RAII guard is returned to allow scoped unlock of the lock. When
+    /// the guard goes out of scope, the mutex will be unlocked.
+    ///
+    /// The exact behavior on locking a mutex in the thread which already holds
+    /// the lock is left unspecified. However, this function will not return on
+    /// the second call (it might panic or deadlock, for example).
+    ///
+    /// # Panics
+    ///
+    /// This function might panic when called if the lock is already held by
+    /// the current thread.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_mutex)]
+    ///
+    /// use std::sync::{Arc, nonpoison::Mutex};
+    /// use std::thread;
+    ///
+    /// let mutex = Arc::new(Mutex::new(0));
+    /// let c_mutex = Arc::clone(&mutex);
+    ///
+    /// thread::spawn(move || {
+    ///     *c_mutex.lock() = 10;
+    /// }).join().expect("thread::spawn failed");
+    /// assert_eq!(*mutex.lock(), 10);
+    /// ```
+    #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+    pub fn lock(&self) -> MutexGuard<'_, T> {
+        unsafe {
+            self.inner.lock();
+            MutexGuard::new(self)
+        }
+    }
+
+    /// Attempts to acquire this lock.
+    ///
+    /// This function does not block. If the lock could not be acquired at this time, then
+    /// [`WouldBlock`] is returned. Otherwise, an RAII guard is returned.
+    ///
+    /// The lock will be unlocked when the guard is dropped.
+    ///
+    /// # Errors
+    ///
+    /// If the mutex could not be acquired because it is already locked, then this call will return
+    /// the [`WouldBlock`] error.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::sync::{Arc, Mutex};
+    /// use std::thread;
+    ///
+    /// let mutex = Arc::new(Mutex::new(0));
+    /// let c_mutex = Arc::clone(&mutex);
+    ///
+    /// thread::spawn(move || {
+    ///     let mut lock = c_mutex.try_lock();
+    ///     if let Ok(ref mut mutex) = lock {
+    ///         **mutex = 10;
+    ///     } else {
+    ///         println!("try_lock failed");
+    ///     }
+    /// }).join().expect("thread::spawn failed");
+    /// assert_eq!(*mutex.lock().unwrap(), 10);
+    /// ```
+    #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+    pub fn try_lock(&self) -> TryLockResult<MutexGuard<'_, T>> {
+        unsafe { if self.inner.try_lock() { Ok(MutexGuard::new(self)) } else { Err(WouldBlock) } }
+    }
+
+    /// Consumes this mutex, returning the underlying data.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_mutex)]
+    ///
+    /// use std::sync::nonpoison::Mutex;
+    ///
+    /// let mutex = Mutex::new(0);
+    /// assert_eq!(mutex.into_inner(), 0);
+    /// ```
+    #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+    pub fn into_inner(self) -> T
+    where
+        T: Sized,
+    {
+        self.data.into_inner()
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the `Mutex` mutably, no actual locking needs to
+    /// take place -- the mutable borrow statically guarantees no locks exist.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(nonpoison_mutex)]
+    ///
+    /// use std::sync::nonpoison::Mutex;
+    ///
+    /// let mut mutex = Mutex::new(0);
+    /// *mutex.get_mut() = 10;
+    /// assert_eq!(*mutex.lock(), 10);
+    /// ```
+    #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+    pub fn get_mut(&mut self) -> &mut T {
+        self.data.get_mut()
+    }
+
+    /// Returns a raw pointer to the underlying data.
+    ///
+    /// The returned pointer is always non-null and properly aligned, but it is
+    /// the user's responsibility to ensure that any reads and writes through it
+    /// are properly synchronized to avoid data races, and that it is not read
+    /// or written through after the mutex is dropped.
+    #[unstable(feature = "mutex_data_ptr", issue = "140368")]
+    // #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+    pub fn data_ptr(&self) -> *mut T {
+        self.data.get()
+    }
+}
+
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+impl<T> From<T> for Mutex<T> {
+    /// Creates a new mutex in an unlocked state ready for use.
+    /// This is equivalent to [`Mutex::new`].
+    fn from(t: T) -> Self {
+        Mutex::new(t)
+    }
+}
+
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+impl<T: ?Sized + Default> Default for Mutex<T> {
+    /// Creates a `Mutex<T>`, with the `Default` value for T.
+    fn default() -> Mutex<T> {
+        Mutex::new(Default::default())
+    }
+}
+
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let mut d = f.debug_struct("Mutex");
+        match self.try_lock() {
+            Ok(guard) => {
+                d.field("data", &&*guard);
+            }
+            Err(WouldBlock) => {
+                d.field("data", &"<locked>");
+            }
+        }
+        d.finish_non_exhaustive()
+    }
+}
+
+impl<'mutex, T: ?Sized> MutexGuard<'mutex, T> {
+    unsafe fn new(lock: &'mutex Mutex<T>) -> MutexGuard<'mutex, T> {
+        return MutexGuard { lock };
+    }
+}
+
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+impl<T: ?Sized> Deref for MutexGuard<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        unsafe { &*self.lock.data.get() }
+    }
+}
+
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.lock.data.get() }
+    }
+}
+
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+impl<T: ?Sized> Drop for MutexGuard<'_, T> {
+    #[inline]
+    fn drop(&mut self) {
+        unsafe {
+            self.lock.inner.unlock();
+        }
+    }
+}
+
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+#[unstable(feature = "nonpoison_mutex", issue = "134645")]
+impl<T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+impl<'a, T: ?Sized> MutexGuard<'a, T> {
+    /// Makes a [`MappedMutexGuard`] for a component of the borrowed data, e.g.
+    /// an enum variant.
+    ///
+    /// The `Mutex` is already locked, so this cannot fail.
+    ///
+    /// This is an associated function that needs to be used as
+    /// `MutexGuard::map(...)`. A method would interfere with methods of the
+    /// same name on the contents of the `MutexGuard` used through `Deref`.
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    // #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+    pub fn map<U, F>(orig: Self, f: F) -> MappedMutexGuard<'a, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
+        // passed to it. If the closure panics, the guard will be dropped.
+        let data = NonNull::from(f(unsafe { &mut *orig.lock.data.get() }));
+        let orig = ManuallyDrop::new(orig);
+        MappedMutexGuard { data, inner: &orig.lock.inner, _variance: PhantomData }
+    }
+
+    /// Makes a [`MappedMutexGuard`] for a component of the borrowed data. The
+    /// original guard is returned as an `Err(...)` if the closure returns
+    /// `None`.
+    ///
+    /// The `Mutex` is already locked, so this cannot fail.
+    ///
+    /// This is an associated function that needs to be used as
+    /// `MutexGuard::filter_map(...)`. A method would interfere with methods of the
+    /// same name on the contents of the `MutexGuard` used through `Deref`.
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    // #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+    pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedMutexGuard<'a, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
+        // passed to it. If the closure panics, the guard will be dropped.
+        match f(unsafe { &mut *orig.lock.data.get() }) {
+            Some(data) => {
+                let data = NonNull::from(data);
+                let orig = ManuallyDrop::new(orig);
+                Ok(MappedMutexGuard { data, inner: &orig.lock.inner, _variance: PhantomData })
+            }
+            None => Err(orig),
+        }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized> Deref for MappedMutexGuard<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        unsafe { self.data.as_ref() }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized> DerefMut for MappedMutexGuard<'_, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { self.data.as_mut() }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized> Drop for MappedMutexGuard<'_, T> {
+    #[inline]
+    fn drop(&mut self) {
+        unsafe {
+            self.inner.unlock();
+        }
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedMutexGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl<T: ?Sized + fmt::Display> fmt::Display for MappedMutexGuard<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+impl<'a, T: ?Sized> MappedMutexGuard<'a, T> {
+    /// Makes a [`MappedMutexGuard`] for a component of the borrowed data, e.g.
+    /// an enum variant.
+    ///
+    /// The `Mutex` is already locked, so this cannot fail.
+    ///
+    /// This is an associated function that needs to be used as
+    /// `MappedMutexGuard::map(...)`. A method would interfere with methods of the
+    /// same name on the contents of the `MutexGuard` used through `Deref`.
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    // #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+    pub fn map<U, F>(mut orig: Self, f: F) -> MappedMutexGuard<'a, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
+        // passed to it. If the closure panics, the guard will be dropped.
+        let data = NonNull::from(f(unsafe { orig.data.as_mut() }));
+        let orig = ManuallyDrop::new(orig);
+        MappedMutexGuard { data, inner: orig.inner, _variance: PhantomData }
+    }
+
+    /// Makes a [`MappedMutexGuard`] for a component of the borrowed data. The
+    /// original guard is returned as an `Err(...)` if the closure returns
+    /// `None`.
+    ///
+    /// The `Mutex` is already locked, so this cannot fail.
+    ///
+    /// This is an associated function that needs to be used as
+    /// `MappedMutexGuard::filter_map(...)`. A method would interfere with methods of the
+    /// same name on the contents of the `MutexGuard` used through `Deref`.
+    #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+    // #[unstable(feature = "nonpoison_mutex", issue = "134645")]
+    pub fn filter_map<U, F>(mut orig: Self, f: F) -> Result<MappedMutexGuard<'a, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+        U: ?Sized,
+    {
+        // SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
+        // was created, and have been upheld throughout `map` and/or `filter_map`.
+        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
+        // passed to it. If the closure panics, the guard will be dropped.
+        match f(unsafe { orig.data.as_mut() }) {
+            Some(data) => {
+                let data = NonNull::from(data);
+                let orig = ManuallyDrop::new(orig);
+                Ok(MappedMutexGuard { data, inner: orig.inner, _variance: PhantomData })
+            }
+            None => Err(orig),
+        }
+    }
+}
diff --git a/library/std/src/sync/poison.rs b/library/std/src/sync/poison.rs
index 0c05f152ef8..b901a5701a4 100644
--- a/library/std/src/sync/poison.rs
+++ b/library/std/src/sync/poison.rs
@@ -13,7 +13,9 @@
 //! depend on the primitive. See [#Overview] below.
 //!
 //! For the alternative implementations that do not employ poisoning,
-//! see `std::sync::nonpoisoning`.
+//! see [`std::sync::nonpoison`].
+//!
+//! [`std::sync::nonpoison`]: crate::sync::nonpoison
 //!
 //! # Overview
 //!
@@ -56,8 +58,6 @@
 //!   while it is locked exclusively (write mode). If a panic occurs in any reader,
 //!   then the lock will not be poisoned.
 
-// FIXME(sync_nonpoison) add links to sync::nonpoison to the doc comment above.
-
 #[stable(feature = "rust1", since = "1.0.0")]
 pub use self::condvar::{Condvar, WaitTimeoutResult};
 #[unstable(feature = "mapped_lock_guards", issue = "117108")]
diff --git a/library/std/src/sync/poison/condvar.rs b/library/std/src/sync/poison/condvar.rs
index 7f0f3f652bc..0e9d4233c65 100644
--- a/library/std/src/sync/poison/condvar.rs
+++ b/library/std/src/sync/poison/condvar.rs
@@ -13,7 +13,7 @@ use crate::time::{Duration, Instant};
 #[stable(feature = "wait_timeout", since = "1.5.0")]
 pub struct WaitTimeoutResult(bool);
 
-// FIXME(sync_nonpoison): `WaitTimeoutResult` is actually poisoning-agnostic, it seems.
+// FIXME(nonpoison_condvar): `WaitTimeoutResult` is actually poisoning-agnostic, it seems.
 // Should we take advantage of this fact?
 impl WaitTimeoutResult {
     /// Returns `true` if the wait was known to have timed out.
diff --git a/library/std/src/sync/poison/mutex.rs b/library/std/src/sync/poison/mutex.rs
index 30325be685c..64744f18c74 100644
--- a/library/std/src/sync/poison/mutex.rs
+++ b/library/std/src/sync/poison/mutex.rs
@@ -650,7 +650,7 @@ impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
                 d.field("data", &&**err.get_ref());
             }
             Err(TryLockError::WouldBlock) => {
-                d.field("data", &format_args!("<locked>"));
+                d.field("data", &"<locked>");
             }
         }
         d.field("poisoned", &self.poison.get());
diff --git a/library/std/src/sys/pal/sgx/abi/usercalls/mod.rs b/library/std/src/sys/pal/sgx/abi/usercalls/mod.rs
index dea44124f45..5041770faf6 100644
--- a/library/std/src/sys/pal/sgx/abi/usercalls/mod.rs
+++ b/library/std/src/sys/pal/sgx/abi/usercalls/mod.rs
@@ -267,7 +267,7 @@ pub fn send(event_set: u64, tcs: Option<Tcs>) -> IoResult<()> {
 /// Usercall `insecure_time`. See the ABI documentation for more information.
 #[unstable(feature = "sgx_platform", issue = "56975")]
 pub fn insecure_time() -> Duration {
-    let t = unsafe { raw::insecure_time() };
+    let t = unsafe { raw::insecure_time().0 };
     Duration::new(t / 1_000_000_000, (t % 1_000_000_000) as _)
 }
 
diff --git a/library/std/src/sys/pal/unix/os.rs b/library/std/src/sys/pal/unix/os.rs
index 850bdfdf5b5..0e68313cc3e 100644
--- a/library/std/src/sys/pal/unix/os.rs
+++ b/library/std/src/sys/pal/unix/os.rs
@@ -633,7 +633,10 @@ pub fn temp_dir() -> PathBuf {
 }
 
 pub fn home_dir() -> Option<PathBuf> {
-    return crate::env::var_os("HOME").or_else(|| unsafe { fallback() }).map(PathBuf::from);
+    return crate::env::var_os("HOME")
+        .filter(|s| !s.is_empty())
+        .or_else(|| unsafe { fallback() })
+        .map(PathBuf::from);
 
     #[cfg(any(
         target_os = "android",
diff --git a/library/std/src/sys/random/sgx.rs b/library/std/src/sys/random/sgx.rs
index c3647a8df22..462b19003fa 100644
--- a/library/std/src/sys/random/sgx.rs
+++ b/library/std/src/sys/random/sgx.rs
@@ -46,22 +46,22 @@ fn rdrand16() -> u16 {
 }
 
 pub fn fill_bytes(bytes: &mut [u8]) {
-    let mut chunks = bytes.array_chunks_mut();
-    for chunk in &mut chunks {
+    let (chunks, remainder) = bytes.as_chunks_mut();
+    for chunk in chunks {
         *chunk = rdrand64().to_ne_bytes();
     }
 
-    let mut chunks = chunks.into_remainder().array_chunks_mut();
-    for chunk in &mut chunks {
+    let (chunks, remainder) = remainder.as_chunks_mut();
+    for chunk in chunks {
         *chunk = rdrand32().to_ne_bytes();
     }
 
-    let mut chunks = chunks.into_remainder().array_chunks_mut();
-    for chunk in &mut chunks {
+    let (chunks, remainder) = remainder.as_chunks_mut();
+    for chunk in chunks {
         *chunk = rdrand16().to_ne_bytes();
     }
 
-    if let [byte] = chunks.into_remainder() {
+    if let [byte] = remainder {
         *byte = rdrand16() as u8;
     }
 }
diff --git a/library/std/src/sys/random/uefi.rs b/library/std/src/sys/random/uefi.rs
index 5f001f0f532..4a71d32fffe 100644
--- a/library/std/src/sys/random/uefi.rs
+++ b/library/std/src/sys/random/uefi.rs
@@ -138,12 +138,11 @@ mod rdrand {
     }
 
     unsafe fn rdrand_exact(dest: &mut [u8]) -> Option<()> {
-        let mut chunks = dest.array_chunks_mut();
-        for chunk in &mut chunks {
+        let (chunks, tail) = dest.as_chunks_mut();
+        for chunk in chunks {
             *chunk = unsafe { rdrand() }?.to_ne_bytes();
         }
 
-        let tail = chunks.into_remainder();
         let n = tail.len();
         if n > 0 {
             let src = unsafe { rdrand() }?.to_ne_bytes();
diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs
index 62ecacccd2e..dff981c900c 100644
--- a/library/std/src/thread/mod.rs
+++ b/library/std/src/thread/mod.rs
@@ -1399,6 +1399,11 @@ where
 }
 
 /// The internal representation of a `Thread` handle
+///
+/// We explicitly set the alignment for our guarantee in Thread::into_raw. This
+/// allows applications to stuff extra metadata bits into the alignment, which
+/// can be rather useful when working with atomics.
+#[repr(align(8))]
 struct Inner {
     name: Option<ThreadNameString>,
     id: ThreadId,
@@ -1582,7 +1587,8 @@ impl Thread {
     /// Consumes the `Thread`, returning a raw pointer.
     ///
     /// To avoid a memory leak the pointer must be converted
-    /// back into a `Thread` using [`Thread::from_raw`].
+    /// back into a `Thread` using [`Thread::from_raw`]. The pointer is
+    /// guaranteed to be aligned to at least 8 bytes.
     ///
     /// # Examples
     ///
diff --git a/library/std/tests/sync/lib.rs b/library/std/tests/sync/lib.rs
index 51190f0894f..94f1fe96b6a 100644
--- a/library/std/tests/sync/lib.rs
+++ b/library/std/tests/sync/lib.rs
@@ -6,7 +6,10 @@
 #![feature(reentrant_lock)]
 #![feature(rwlock_downgrade)]
 #![feature(std_internals)]
+#![feature(sync_nonpoison)]
+#![feature(nonpoison_mutex)]
 #![allow(internal_features)]
+#![feature(macro_metavar_expr_concat)] // For concatenating identifiers in macros.
 
 mod barrier;
 mod condvar;
@@ -29,3 +32,55 @@ mod rwlock;
 
 #[path = "../common/mod.rs"]
 mod common;
+
+#[track_caller]
+fn result_unwrap<T, E: std::fmt::Debug>(x: Result<T, E>) -> T {
+    x.unwrap()
+}
+
+/// A macro that generates two test cases for both the poison and nonpoison locks.
+///
+/// To write a test that tests both `poison` and `nonpoison` locks, import any of the types
+/// under both `poison` and `nonpoison` using the module name `locks` instead. For example, write
+/// `use locks::Mutex;` instead of `use std::sync::poiosn::Mutex`. This will import the correct type
+/// for each test variant.
+///
+/// Write a test as normal in the `test_body`, but instead of calling `unwrap` on `poison` methods
+/// that return a `LockResult` or similar, call the function `maybe_unwrap(...)` on the result.
+///
+/// For example, call `maybe_unwrap(mutex.lock())` instead of `mutex.lock().unwrap()` or
+/// `maybe_unwrap(rwlock.read())` instead of `rwlock.read().unwrap()`.
+///
+/// For the `poison` types, `maybe_unwrap` will simply unwrap the `Result` (usually this is a form
+/// of `LockResult`, but it could also be other kinds of results). For the `nonpoison` types, it is
+/// a no-op (the identity function).
+///
+/// The test names will be prefiex with `poison_` or `nonpoison_`.
+macro_rules! nonpoison_and_poison_unwrap_test {
+    (
+        name: $name:ident,
+        test_body: {$($test_body:tt)*}
+    ) => {
+        // Creates the nonpoison test.
+        #[test]
+        fn ${concat(nonpoison_, $name)}() {
+            #[allow(unused_imports)]
+            use ::std::convert::identity as maybe_unwrap;
+            use ::std::sync::nonpoison as locks;
+
+            $($test_body)*
+        }
+
+        // Creates the poison test with the suffix `_unwrap_poisoned`.
+        #[test]
+        fn ${concat(poison_, $name)}() {
+            #[allow(unused_imports)]
+            use super::result_unwrap as maybe_unwrap;
+            use ::std::sync::poison as locks;
+
+            $($test_body)*
+        }
+    }
+}
+
+use nonpoison_and_poison_unwrap_test;
diff --git a/library/std/tests/sync/mutex.rs b/library/std/tests/sync/mutex.rs
index ac82914d6de..90cefc0d594 100644
--- a/library/std/tests/sync/mutex.rs
+++ b/library/std/tests/sync/mutex.rs
@@ -6,7 +6,71 @@ use std::sync::mpsc::channel;
 use std::sync::{Arc, Condvar, MappedMutexGuard, Mutex, MutexGuard, TryLockError};
 use std::{hint, mem, thread};
 
-struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Nonpoison & Poison Tests
+////////////////////////////////////////////////////////////////////////////////////////////////////
+use super::nonpoison_and_poison_unwrap_test;
+
+nonpoison_and_poison_unwrap_test!(
+    name: smoke,
+    test_body: {
+        use locks::Mutex;
+
+        let m = Mutex::new(());
+        drop(maybe_unwrap(m.lock()));
+        drop(maybe_unwrap(m.lock()));
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: lots_and_lots,
+    test_body: {
+        use locks::Mutex;
+
+        const J: u32 = 1000;
+        const K: u32 = 3;
+
+        let m = Arc::new(Mutex::new(0));
+
+        fn inc(m: &Mutex<u32>) {
+            for _ in 0..J {
+                *maybe_unwrap(m.lock()) += 1;
+            }
+        }
+
+        let (tx, rx) = channel();
+        for _ in 0..K {
+            let tx2 = tx.clone();
+            let m2 = m.clone();
+            thread::spawn(move || {
+                inc(&m2);
+                tx2.send(()).unwrap();
+            });
+            let tx2 = tx.clone();
+            let m2 = m.clone();
+            thread::spawn(move || {
+                inc(&m2);
+                tx2.send(()).unwrap();
+            });
+        }
+
+        drop(tx);
+        for _ in 0..2 * K {
+            rx.recv().unwrap();
+        }
+        assert_eq!(*maybe_unwrap(m.lock()), J * K * 2);
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: try_lock,
+    test_body: {
+        use locks::Mutex;
+
+        let m = Mutex::new(());
+        *m.try_lock().unwrap() = ();
+    }
+);
 
 #[derive(Eq, PartialEq, Debug)]
 struct NonCopy(i32);
@@ -26,58 +90,278 @@ fn test_needs_drop() {
     assert!(mem::needs_drop::<NonCopyNeedsDrop>());
 }
 
-#[derive(Clone, Eq, PartialEq, Debug)]
-struct Cloneable(i32);
+nonpoison_and_poison_unwrap_test!(
+    name: test_into_inner,
+    test_body: {
+        use locks::Mutex;
 
-#[test]
-fn smoke() {
-    let m = Mutex::new(());
-    drop(m.lock().unwrap());
-    drop(m.lock().unwrap());
-}
+        let m = Mutex::new(NonCopy(10));
+        assert_eq!(maybe_unwrap(m.into_inner()), NonCopy(10));
+    }
+);
 
-#[test]
-fn lots_and_lots() {
-    const J: u32 = 1000;
-    const K: u32 = 3;
+nonpoison_and_poison_unwrap_test!(
+    name: test_into_inner_drop,
+    test_body: {
+        use locks::Mutex;
 
-    let m = Arc::new(Mutex::new(0));
+        struct Foo(Arc<AtomicUsize>);
+        impl Drop for Foo {
+            fn drop(&mut self) {
+                self.0.fetch_add(1, Ordering::SeqCst);
+            }
+        }
 
-    fn inc(m: &Mutex<u32>) {
-        for _ in 0..J {
-            *m.lock().unwrap() += 1;
+        let num_drops = Arc::new(AtomicUsize::new(0));
+        let m = Mutex::new(Foo(num_drops.clone()));
+        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        {
+            let _inner = maybe_unwrap(m.into_inner());
+            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
         }
+        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
     }
+);
 
-    let (tx, rx) = channel();
-    for _ in 0..K {
-        let tx2 = tx.clone();
-        let m2 = m.clone();
-        thread::spawn(move || {
-            inc(&m2);
-            tx2.send(()).unwrap();
-        });
-        let tx2 = tx.clone();
-        let m2 = m.clone();
-        thread::spawn(move || {
-            inc(&m2);
-            tx2.send(()).unwrap();
-        });
+nonpoison_and_poison_unwrap_test!(
+    name: test_get_mut,
+    test_body: {
+        use locks::Mutex;
+
+        let mut m = Mutex::new(NonCopy(10));
+        *maybe_unwrap(m.get_mut()) = NonCopy(20);
+        assert_eq!(maybe_unwrap(m.into_inner()), NonCopy(20));
     }
+);
 
-    drop(tx);
-    for _ in 0..2 * K {
-        rx.recv().unwrap();
+nonpoison_and_poison_unwrap_test!(
+    name: test_get_cloned,
+    test_body: {
+        use locks::Mutex;
+
+        #[derive(Clone, Eq, PartialEq, Debug)]
+        struct Cloneable(i32);
+
+        let m = Mutex::new(Cloneable(10));
+
+        assert_eq!(maybe_unwrap(m.get_cloned()), Cloneable(10));
     }
-    assert_eq!(*m.lock().unwrap(), J * K * 2);
-}
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_set,
+    test_body: {
+        use locks::Mutex;
+
+        fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
+        where
+            T: Debug + Eq,
+        {
+            let m = Mutex::new(init());
+
+            assert_eq!(*maybe_unwrap(m.lock()), init());
+            maybe_unwrap(m.set(value()));
+            assert_eq!(*maybe_unwrap(m.lock()), value());
+        }
+
+        inner(|| NonCopy(10), || NonCopy(20));
+        inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
+    }
+);
+
+// Ensure that old values that are replaced by `set` are correctly dropped.
+nonpoison_and_poison_unwrap_test!(
+    name: test_set_drop,
+    test_body: {
+        use locks::Mutex;
+
+        struct Foo(Arc<AtomicUsize>);
+        impl Drop for Foo {
+            fn drop(&mut self) {
+                self.0.fetch_add(1, Ordering::SeqCst);
+            }
+        }
+
+        let num_drops = Arc::new(AtomicUsize::new(0));
+        let m = Mutex::new(Foo(num_drops.clone()));
+        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+
+        let different = Foo(Arc::new(AtomicUsize::new(42)));
+        maybe_unwrap(m.set(different));
+        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_replace,
+    test_body: {
+        use locks::Mutex;
+
+        fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
+        where
+            T: Debug + Eq,
+        {
+            let m = Mutex::new(init());
+
+            assert_eq!(*maybe_unwrap(m.lock()), init());
+            assert_eq!(maybe_unwrap(m.replace(value())), init());
+            assert_eq!(*maybe_unwrap(m.lock()), value());
+        }
 
+        inner(|| NonCopy(10), || NonCopy(20));
+        inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
+    }
+);
+
+// FIXME(nonpoison_condvar): Move this to the `condvar.rs` test file once `nonpoison::condvar` gets
+// implemented.
 #[test]
-fn try_lock() {
-    let m = Mutex::new(());
-    *m.try_lock().unwrap() = ();
+fn test_mutex_arc_condvar() {
+    struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
+
+    let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
+    let packet2 = Packet(packet.0.clone());
+
+    let (tx, rx) = channel();
+
+    let _t = thread::spawn(move || {
+        // Wait until our parent has taken the lock.
+        rx.recv().unwrap();
+        let &(ref lock, ref cvar) = &*packet2.0;
+
+        // Set the data to `true` and wake up our parent.
+        let mut guard = lock.lock().unwrap();
+        *guard = true;
+        cvar.notify_one();
+    });
+
+    let &(ref lock, ref cvar) = &*packet.0;
+    let mut guard = lock.lock().unwrap();
+    // Wake up our child.
+    tx.send(()).unwrap();
+
+    // Wait until our child has set the data to `true`.
+    assert!(!*guard);
+    while !*guard {
+        guard = cvar.wait(guard).unwrap();
+    }
 }
 
+nonpoison_and_poison_unwrap_test!(
+    name: test_mutex_arc_nested,
+    test_body: {
+        use locks::Mutex;
+
+        // Tests nested mutexes and access
+        // to underlying data.
+        let arc = Arc::new(Mutex::new(1));
+        let arc2 = Arc::new(Mutex::new(arc));
+        let (tx, rx) = channel();
+        let _t = thread::spawn(move || {
+            let lock = maybe_unwrap(arc2.lock());
+            let lock2 = maybe_unwrap(lock.lock());
+            assert_eq!(*lock2, 1);
+            tx.send(()).unwrap();
+        });
+        rx.recv().unwrap();
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_mutex_unsized,
+    test_body: {
+        use locks::Mutex;
+
+        let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
+        {
+            let b = &mut *maybe_unwrap(mutex.lock());
+            b[0] = 4;
+            b[2] = 5;
+        }
+        let comp: &[i32] = &[4, 2, 5];
+        assert_eq!(&*maybe_unwrap(mutex.lock()), comp);
+    }
+);
+
+nonpoison_and_poison_unwrap_test!(
+    name: test_mapping_mapped_guard,
+    test_body: {
+        use locks::{Mutex, MutexGuard, MappedMutexGuard};
+
+        let arr = [0; 4];
+        let lock = Mutex::new(arr);
+        let guard = maybe_unwrap(lock.lock());
+        let guard = MutexGuard::map(guard, |arr| &mut arr[..2]);
+        let mut guard = MappedMutexGuard::map(guard, |slice| &mut slice[1..]);
+        assert_eq!(guard.len(), 1);
+        guard[0] = 42;
+        drop(guard);
+        assert_eq!(*maybe_unwrap(lock.lock()), [0, 42, 0, 0]);
+    }
+);
+
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+nonpoison_and_poison_unwrap_test!(
+    name: test_panics,
+    test_body: {
+        use locks::Mutex;
+
+        let mutex = Mutex::new(42);
+
+        let catch_unwind_result1 = panic::catch_unwind(AssertUnwindSafe(|| {
+            let _guard1 = maybe_unwrap(mutex.lock());
+
+            panic!("test panic with mutex once");
+        }));
+        assert!(catch_unwind_result1.is_err());
+
+        let catch_unwind_result2 = panic::catch_unwind(AssertUnwindSafe(|| {
+            let _guard2 = maybe_unwrap(mutex.lock());
+
+            panic!("test panic with mutex twice");
+        }));
+        assert!(catch_unwind_result2.is_err());
+
+        let catch_unwind_result3 = panic::catch_unwind(AssertUnwindSafe(|| {
+            let _guard3 = maybe_unwrap(mutex.lock());
+
+            panic!("test panic with mutex thrice");
+        }));
+        assert!(catch_unwind_result3.is_err());
+    }
+);
+
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+nonpoison_and_poison_unwrap_test!(
+    name: test_mutex_arc_access_in_unwind,
+    test_body: {
+        use locks::Mutex;
+
+        let arc = Arc::new(Mutex::new(1));
+        let arc2 = arc.clone();
+        let _ = thread::spawn(move || -> () {
+            struct Unwinder {
+                i: Arc<Mutex<i32>>,
+            }
+            impl Drop for Unwinder {
+                fn drop(&mut self) {
+                    *maybe_unwrap(self.i.lock()) += 1;
+                }
+            }
+            let _u = Unwinder { i: arc2 };
+            panic!();
+        })
+        .join();
+        let lock = maybe_unwrap(arc.lock());
+        assert_eq!(*lock, 2);
+    }
+);
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Poison Tests
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/// Creates a mutex that is immediately poisoned.
 fn new_poisoned_mutex<T>(value: T) -> Mutex<T> {
     let mutex = Mutex::new(value);
 
@@ -94,30 +378,6 @@ fn new_poisoned_mutex<T>(value: T) -> Mutex<T> {
 }
 
 #[test]
-fn test_into_inner() {
-    let m = Mutex::new(NonCopy(10));
-    assert_eq!(m.into_inner().unwrap(), NonCopy(10));
-}
-
-#[test]
-fn test_into_inner_drop() {
-    struct Foo(Arc<AtomicUsize>);
-    impl Drop for Foo {
-        fn drop(&mut self) {
-            self.0.fetch_add(1, Ordering::SeqCst);
-        }
-    }
-    let num_drops = Arc::new(AtomicUsize::new(0));
-    let m = Mutex::new(Foo(num_drops.clone()));
-    assert_eq!(num_drops.load(Ordering::SeqCst), 0);
-    {
-        let _inner = m.into_inner().unwrap();
-        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
-    }
-    assert_eq!(num_drops.load(Ordering::SeqCst), 1);
-}
-
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
 fn test_into_inner_poison() {
     let m = new_poisoned_mutex(NonCopy(10));
@@ -129,15 +389,11 @@ fn test_into_inner_poison() {
 }
 
 #[test]
-fn test_get_cloned() {
-    let m = Mutex::new(Cloneable(10));
-
-    assert_eq!(m.get_cloned().unwrap(), Cloneable(10));
-}
-
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
 fn test_get_cloned_poison() {
+    #[derive(Clone, Eq, PartialEq, Debug)]
+    struct Cloneable(i32);
+
     let m = new_poisoned_mutex(Cloneable(10));
 
     match m.get_cloned() {
@@ -147,13 +403,6 @@ fn test_get_cloned_poison() {
 }
 
 #[test]
-fn test_get_mut() {
-    let mut m = Mutex::new(NonCopy(10));
-    *m.get_mut().unwrap() = NonCopy(20);
-    assert_eq!(m.into_inner().unwrap(), NonCopy(20));
-}
-
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
 fn test_get_mut_poison() {
     let mut m = new_poisoned_mutex(NonCopy(10));
@@ -165,23 +414,6 @@ fn test_get_mut_poison() {
 }
 
 #[test]
-fn test_set() {
-    fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
-    where
-        T: Debug + Eq,
-    {
-        let m = Mutex::new(init());
-
-        assert_eq!(*m.lock().unwrap(), init());
-        m.set(value()).unwrap();
-        assert_eq!(*m.lock().unwrap(), value());
-    }
-
-    inner(|| NonCopy(10), || NonCopy(20));
-    inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
-}
-
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
 fn test_set_poison() {
     fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
@@ -204,23 +436,6 @@ fn test_set_poison() {
 }
 
 #[test]
-fn test_replace() {
-    fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
-    where
-        T: Debug + Eq,
-    {
-        let m = Mutex::new(init());
-
-        assert_eq!(*m.lock().unwrap(), init());
-        assert_eq!(m.replace(value()).unwrap(), init());
-        assert_eq!(*m.lock().unwrap(), value());
-    }
-
-    inner(|| NonCopy(10), || NonCopy(20));
-    inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
-}
-
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
 fn test_replace_poison() {
     fn inner<T>(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
@@ -243,31 +458,10 @@ fn test_replace_poison() {
 }
 
 #[test]
-fn test_mutex_arc_condvar() {
-    let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
-    let packet2 = Packet(packet.0.clone());
-    let (tx, rx) = channel();
-    let _t = thread::spawn(move || {
-        // wait until parent gets in
-        rx.recv().unwrap();
-        let &(ref lock, ref cvar) = &*packet2.0;
-        let mut lock = lock.lock().unwrap();
-        *lock = true;
-        cvar.notify_one();
-    });
-
-    let &(ref lock, ref cvar) = &*packet.0;
-    let mut lock = lock.lock().unwrap();
-    tx.send(()).unwrap();
-    assert!(!*lock);
-    while !*lock {
-        lock = cvar.wait(lock).unwrap();
-    }
-}
-
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
 fn test_arc_condvar_poison() {
+    struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
+
     let packet = Packet(Arc::new((Mutex::new(1), Condvar::new())));
     let packet2 = Packet(packet.0.clone());
     let (tx, rx) = channel();
@@ -327,69 +521,6 @@ fn test_mutex_arc_poison_mapped() {
 }
 
 #[test]
-fn test_mutex_arc_nested() {
-    // Tests nested mutexes and access
-    // to underlying data.
-    let arc = Arc::new(Mutex::new(1));
-    let arc2 = Arc::new(Mutex::new(arc));
-    let (tx, rx) = channel();
-    let _t = thread::spawn(move || {
-        let lock = arc2.lock().unwrap();
-        let lock2 = lock.lock().unwrap();
-        assert_eq!(*lock2, 1);
-        tx.send(()).unwrap();
-    });
-    rx.recv().unwrap();
-}
-
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_mutex_arc_access_in_unwind() {
-    let arc = Arc::new(Mutex::new(1));
-    let arc2 = arc.clone();
-    let _ = thread::spawn(move || -> () {
-        struct Unwinder {
-            i: Arc<Mutex<i32>>,
-        }
-        impl Drop for Unwinder {
-            fn drop(&mut self) {
-                *self.i.lock().unwrap() += 1;
-            }
-        }
-        let _u = Unwinder { i: arc2 };
-        panic!();
-    })
-    .join();
-    let lock = arc.lock().unwrap();
-    assert_eq!(*lock, 2);
-}
-
-#[test]
-fn test_mutex_unsized() {
-    let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
-    {
-        let b = &mut *mutex.lock().unwrap();
-        b[0] = 4;
-        b[2] = 5;
-    }
-    let comp: &[i32] = &[4, 2, 5];
-    assert_eq!(&*mutex.lock().unwrap(), comp);
-}
-
-#[test]
-fn test_mapping_mapped_guard() {
-    let arr = [0; 4];
-    let mut lock = Mutex::new(arr);
-    let guard = lock.lock().unwrap();
-    let guard = MutexGuard::map(guard, |arr| &mut arr[..2]);
-    let mut guard = MappedMutexGuard::map(guard, |slice| &mut slice[1..]);
-    assert_eq!(guard.len(), 1);
-    guard[0] = 42;
-    drop(guard);
-    assert_eq!(*lock.get_mut().unwrap(), [0, 42, 0, 0]);
-}
-
-#[test]
 #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
 fn panic_while_mapping_unlocked_poison() {
     let lock = Mutex::new(());
diff --git a/library/std_detect/src/detect/macros.rs b/library/std_detect/src/detect/macros.rs
index c2a006d3753..17140e15653 100644
--- a/library/std_detect/src/detect/macros.rs
+++ b/library/std_detect/src/detect/macros.rs
@@ -131,14 +131,13 @@ macro_rules! features {
             };
         }
 
-        #[test] //tidy:skip
         #[deny(unexpected_cfgs)]
         #[deny(unfulfilled_lint_expectations)]
-        fn unexpected_cfgs() {
+        const _: () = {
             $(
                 check_cfg_feature!($feature, $feature_lit $(, without cfg check: $feature_cfg_check)? $(: $($target_feature_lit),*)?);
             )*
-        }
+        };
 
         /// Each variant denotes a position in a bitset for a particular feature.
         ///
diff --git a/library/std_detect/src/detect/os/riscv.rs b/library/std_detect/src/detect/os/riscv.rs
index 46b7dd71eb3..dc9a4036d86 100644
--- a/library/std_detect/src/detect/os/riscv.rs
+++ b/library/std_detect/src/detect/os/riscv.rs
@@ -135,4 +135,5 @@ pub(crate) fn imply_features(mut value: cache::Initializer) -> cache::Initialize
 }
 
 #[cfg(test)]
+#[path = "riscv/tests.rs"]
 mod tests;
diff --git a/library/stdarch/crates/core_arch/src/wasm32/mod.rs b/library/stdarch/crates/core_arch/src/wasm32/mod.rs
index 2c4361f1639..60049c73295 100644
--- a/library/stdarch/crates/core_arch/src/wasm32/mod.rs
+++ b/library/stdarch/crates/core_arch/src/wasm32/mod.rs
@@ -191,6 +191,16 @@ unsafe extern "C-unwind" {
 // #[cfg_attr(test, assert_instr(throw, TAG = 0, ptr = core::ptr::null_mut()))]
 #[inline]
 #[unstable(feature = "wasm_exception_handling_intrinsics", issue = "122465")]
+// FIXME: Since this instruction unwinds, `core` built with `-C panic=unwind`
+//        cannot be linked with `-C panic=abort` programs. But that's not
+//        entirely supported anyway, because runtimes without EH support won't
+//        be able to handle `try` blocks in `-C panic=unwind` crates either.
+//        We ship `-C panic=abort` `core`, so this doesn't affect users
+//        directly. Resolving this will likely require patching out both `try`
+//        and `throw` instructions, at which point we can look into whitelisting
+//        this function in the compiler to allow linking.
+//        See https://github.com/rust-lang/rust/issues/118168.
+#[allow(ffi_unwind_calls)]
 pub unsafe fn throw<const TAG: i32>(ptr: *mut u8) -> ! {
     static_assert!(TAG == 0); // LLVM only supports tag 0 == C++ right now.
     wasm_throw(TAG, ptr)
diff --git a/library/sysroot/Cargo.toml b/library/sysroot/Cargo.toml
index 032f5272a9c..7b4aeed94e9 100644
--- a/library/sysroot/Cargo.toml
+++ b/library/sysroot/Cargo.toml
@@ -6,6 +6,8 @@ version = "0.0.0"
 edition = "2024"
 
 [lib]
+test = false
+bench = false
 # make sure this crate isn't included in public standard library docs
 doc = false
 
@@ -23,9 +25,7 @@ backtrace = ["std/backtrace"]
 backtrace-trace-only = ["std/backtrace-trace-only"]
 compiler-builtins-c = ["std/compiler-builtins-c"]
 compiler-builtins-mem = ["std/compiler-builtins-mem"]
-compiler-builtins-no-asm = ["std/compiler-builtins-no-asm"]
 compiler-builtins-no-f16-f128 = ["std/compiler-builtins-no-f16-f128"]
-compiler-builtins-mangled-names = ["std/compiler-builtins-mangled-names"]
 debug_refcell = ["std/debug_refcell"]
 llvm-libunwind = ["std/llvm-libunwind"]
 system-llvm-libunwind = ["std/system-llvm-libunwind"]
diff --git a/library/windows_targets/Cargo.toml b/library/windows_targets/Cargo.toml
index 705c9e04381..1c804a0ab39 100644
--- a/library/windows_targets/Cargo.toml
+++ b/library/windows_targets/Cargo.toml
@@ -4,6 +4,11 @@ description = "A drop-in replacement for the real windows-targets crate for use
 version = "0.0.0"
 edition = "2024"
 
+[lib]
+test = false
+bench = false
+doc = false
+
 [features]
 # Enable using raw-dylib for Windows imports.
 # This will eventually be the default.
diff --git a/library/windows_targets/src/lib.rs b/library/windows_targets/src/lib.rs
index 9e82e6a7200..3446e2113dd 100644
--- a/library/windows_targets/src/lib.rs
+++ b/library/windows_targets/src/lib.rs
@@ -34,22 +34,12 @@ pub macro link_dylib {
 
 #[cfg(feature = "windows_raw_dylib")]
 pub macro link($($tt:tt)*) {
-    $crate::link_raw_dylib!($($tt)*)
+    $crate::link_raw_dylib!($($tt)*);
 }
 
 #[cfg(not(feature = "windows_raw_dylib"))]
-pub macro link {
-    ($library:literal $abi:literal $($link_name:literal)? $(#[$doc:meta])? fn $($function:tt)*) => (
-        // Note: the windows-targets crate uses a pre-built Windows.lib import library which we don't
-        // have in this repo. So instead we always link kernel32.lib and add the rest of the import
-        // libraries below by using an empty extern block. This works because extern blocks are not
-        // connected to the library given in the #[link] attribute.
-        #[link(name = "kernel32")]
-        unsafe extern $abi {
-            $(#[link_name=$link_name])?
-            pub fn $($function)*;
-        }
-    )
+pub macro link($($tt:tt)*) {
+    $crate::link_dylib!($($tt)*);
 }
 
 #[cfg(not(feature = "windows_raw_dylib"))]