about summary refs log tree commit diff
path: root/library/alloc/src
diff options
context:
space:
mode:
Diffstat (limited to 'library/alloc/src')
-rw-r--r--library/alloc/src/alloc.rs26
-rw-r--r--library/alloc/src/boxed.rs34
-rw-r--r--library/alloc/src/boxed/thin.rs47
-rw-r--r--library/alloc/src/collections/binary_heap/tests.rs102
-rw-r--r--library/alloc/src/collections/btree/map/tests.rs6
-rw-r--r--library/alloc/src/collections/btree/mod.rs3
-rw-r--r--library/alloc/src/collections/btree/node.rs28
-rw-r--r--library/alloc/src/collections/btree/set/tests.rs4
-rw-r--r--library/alloc/src/collections/linked_list/tests.rs69
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs17
-rw-r--r--library/alloc/src/lib.rs28
-rw-r--r--library/alloc/src/rc.rs6
-rw-r--r--library/alloc/src/slice.rs5
-rw-r--r--library/alloc/src/slice/tests.rs359
-rw-r--r--library/alloc/src/string.rs13
-rw-r--r--library/alloc/src/sync.rs6
-rw-r--r--library/alloc/src/testing/crash_test.rs (renamed from library/alloc/src/collections/btree/testing/crash_test.rs)0
-rw-r--r--library/alloc/src/testing/mod.rs (renamed from library/alloc/src/collections/btree/testing/mod.rs)0
-rw-r--r--library/alloc/src/testing/ord_chaos.rs (renamed from library/alloc/src/collections/btree/testing/ord_chaos.rs)0
-rw-r--r--library/alloc/src/testing/rng.rs (renamed from library/alloc/src/collections/btree/testing/rng.rs)0
-rw-r--r--library/alloc/src/vec/into_iter.rs23
-rw-r--r--library/alloc/src/vec/mod.rs11
22 files changed, 617 insertions, 170 deletions
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index e5fbfc55761..fe6de1cf879 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -23,7 +23,7 @@ extern "Rust" {
     // These are the magic symbols to call the global allocator.  rustc generates
     // them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
     // (the code expanding that attribute macro generates those functions), or to call
-    // the default implementations in libstd (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
+    // the default implementations in std (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
     // otherwise.
     // The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them
     // like `malloc`, `realloc`, and `free`, respectively.
@@ -402,20 +402,20 @@ pub mod __alloc_error_handler {
     // `#[alloc_error_handler]`.
     #[rustc_std_internal_symbol]
     pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! {
-        panic!("memory allocation of {size} bytes failed")
-    }
-
-    #[cfg(bootstrap)]
-    #[rustc_std_internal_symbol]
-    pub unsafe fn __rg_oom(size: usize, align: usize) -> ! {
-        use crate::alloc::Layout;
-
-        let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
         extern "Rust" {
-            #[lang = "oom"]
-            fn oom_impl(layout: Layout) -> !;
+            // This symbol is emitted by rustc next to __rust_alloc_error_handler.
+            // Its value depends on the -Zoom={panic,abort} compiler option.
+            static __rust_alloc_error_handler_should_panic: u8;
+        }
+
+        #[allow(unused_unsafe)]
+        if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
+            panic!("memory allocation of {size} bytes failed")
+        } else {
+            core::panicking::panic_nounwind_fmt(format_args!(
+                "memory allocation of {size} bytes failed"
+            ))
         }
-        unsafe { oom_impl(layout) }
     }
 }
 
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index e5f6b0c0c65..a563b258723 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -158,7 +158,6 @@ use core::hash::{Hash, Hasher};
 #[cfg(not(no_global_oom_handling))]
 use core::iter::FromIterator;
 use core::iter::{FusedIterator, Iterator};
-#[cfg(not(bootstrap))]
 use core::marker::Tuple;
 use core::marker::{Destruct, Unpin, Unsize};
 use core::mem;
@@ -954,7 +953,7 @@ impl<T: ?Sized> Box<T> {
     /// [`Layout`]: crate::Layout
     #[stable(feature = "box_raw", since = "1.4.0")]
     #[inline]
-    #[must_use = "call `drop(from_raw(ptr))` if you intend to drop the `Box`"]
+    #[must_use = "call `drop(Box::from_raw(ptr))` if you intend to drop the `Box`"]
     pub unsafe fn from_raw(raw: *mut T) -> Self {
         unsafe { Self::from_raw_in(raw, Global) }
     }
@@ -1981,17 +1980,6 @@ impl<I: ExactSizeIterator + ?Sized, A: Allocator> ExactSizeIterator for Box<I, A
 #[stable(feature = "fused", since = "1.26.0")]
 impl<I: FusedIterator + ?Sized, A: Allocator> FusedIterator for Box<I, A> {}
 
-#[cfg(bootstrap)]
-#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
-    type Output = <F as FnOnce<Args>>::Output;
-
-    extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
-        <F as FnOnce<Args>>::call_once(*self, args)
-    }
-}
-
-#[cfg(not(bootstrap))]
 #[stable(feature = "boxed_closure_impls", since = "1.35.0")]
 impl<Args: Tuple, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
     type Output = <F as FnOnce<Args>>::Output;
@@ -2001,15 +1989,6 @@ impl<Args: Tuple, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F
     }
 }
 
-#[cfg(bootstrap)]
-#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
-    extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
-        <F as FnMut<Args>>::call_mut(self, args)
-    }
-}
-
-#[cfg(not(bootstrap))]
 #[stable(feature = "boxed_closure_impls", since = "1.35.0")]
 impl<Args: Tuple, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
     extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
@@ -2017,15 +1996,6 @@ impl<Args: Tuple, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F,
     }
 }
 
-#[cfg(bootstrap)]
-#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
-    extern "rust-call" fn call(&self, args: Args) -> Self::Output {
-        <F as Fn<Args>>::call(self, args)
-    }
-}
-
-#[cfg(not(bootstrap))]
 #[stable(feature = "boxed_closure_impls", since = "1.35.0")]
 impl<Args: Tuple, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
     extern "rust-call" fn call(&self, args: Args) -> Self::Output {
@@ -2033,7 +2003,7 @@ impl<Args: Tuple, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
     }
 }
 
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
 impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Box<U, A>> for Box<T, A> {}
 
 #[unstable(feature = "dispatch_from_dyn", issue = "none")]
diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs
index c477c44906c..c1a82e452f6 100644
--- a/library/alloc/src/boxed/thin.rs
+++ b/library/alloc/src/boxed/thin.rs
@@ -226,24 +226,45 @@ impl<H> WithHeader<H> {
     // - Assumes that either `value` can be dereferenced, or is the
     //   `NonNull::dangling()` we use when both `T` and `H` are ZSTs.
     unsafe fn drop<T: ?Sized>(&self, value: *mut T) {
+        struct DropGuard<H> {
+            ptr: NonNull<u8>,
+            value_layout: Layout,
+            _marker: PhantomData<H>,
+        }
+
+        impl<H> Drop for DropGuard<H> {
+            fn drop(&mut self) {
+                unsafe {
+                    // SAFETY: Layout must have been computable if we're in drop
+                    let (layout, value_offset) =
+                        WithHeader::<H>::alloc_layout(self.value_layout).unwrap_unchecked();
+
+                    // Note: Don't deallocate if the layout size is zero, because the pointer
+                    // didn't come from the allocator.
+                    if layout.size() != 0 {
+                        alloc::dealloc(self.ptr.as_ptr().sub(value_offset), layout);
+                    } else {
+                        debug_assert!(
+                            value_offset == 0
+                                && mem::size_of::<H>() == 0
+                                && self.value_layout.size() == 0
+                        );
+                    }
+                }
+            }
+        }
+
         unsafe {
-            let value_layout = Layout::for_value_raw(value);
-            // SAFETY: Layout must have been computable if we're in drop
-            let (layout, value_offset) = Self::alloc_layout(value_layout).unwrap_unchecked();
+            // `_guard` will deallocate the memory when dropped, even if `drop_in_place` unwinds.
+            let _guard = DropGuard {
+                ptr: self.0,
+                value_layout: Layout::for_value_raw(value),
+                _marker: PhantomData::<H>,
+            };
 
             // We only drop the value because the Pointee trait requires that the metadata is copy
             // aka trivially droppable.
             ptr::drop_in_place::<T>(value);
-
-            // Note: Don't deallocate if the layout size is zero, because the pointer
-            // didn't come from the allocator.
-            if layout.size() != 0 {
-                alloc::dealloc(self.0.as_ptr().sub(value_offset), layout);
-            } else {
-                debug_assert!(
-                    value_offset == 0 && mem::size_of::<H>() == 0 && value_layout.size() == 0
-                );
-            }
         }
     }
 
diff --git a/library/alloc/src/collections/binary_heap/tests.rs b/library/alloc/src/collections/binary_heap/tests.rs
index 5a05215aeed..59c516374c0 100644
--- a/library/alloc/src/collections/binary_heap/tests.rs
+++ b/library/alloc/src/collections/binary_heap/tests.rs
@@ -1,8 +1,8 @@
 use super::*;
 use crate::boxed::Box;
+use crate::testing::crash_test::{CrashTestDummy, Panic};
 use std::iter::TrustedLen;
 use std::panic::{catch_unwind, AssertUnwindSafe};
-use std::sync::atomic::{AtomicU32, Ordering};
 
 #[test]
 fn test_iterator() {
@@ -291,33 +291,83 @@ fn test_drain_sorted() {
 
 #[test]
 fn test_drain_sorted_leak() {
-    static DROPS: AtomicU32 = AtomicU32::new(0);
-
-    #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
-    struct D(u32, bool);
-
-    impl Drop for D {
-        fn drop(&mut self) {
-            DROPS.fetch_add(1, Ordering::SeqCst);
-
-            if self.1 {
-                panic!("panic in `drop`");
-            }
-        }
-    }
-
+    let d0 = CrashTestDummy::new(0);
+    let d1 = CrashTestDummy::new(1);
+    let d2 = CrashTestDummy::new(2);
+    let d3 = CrashTestDummy::new(3);
+    let d4 = CrashTestDummy::new(4);
+    let d5 = CrashTestDummy::new(5);
     let mut q = BinaryHeap::from(vec![
-        D(0, false),
-        D(1, false),
-        D(2, false),
-        D(3, true),
-        D(4, false),
-        D(5, false),
+        d0.spawn(Panic::Never),
+        d1.spawn(Panic::Never),
+        d2.spawn(Panic::Never),
+        d3.spawn(Panic::InDrop),
+        d4.spawn(Panic::Never),
+        d5.spawn(Panic::Never),
     ]);
 
-    catch_unwind(AssertUnwindSafe(|| drop(q.drain_sorted()))).ok();
+    catch_unwind(AssertUnwindSafe(|| drop(q.drain_sorted()))).unwrap_err();
+
+    assert_eq!(d0.dropped(), 1);
+    assert_eq!(d1.dropped(), 1);
+    assert_eq!(d2.dropped(), 1);
+    assert_eq!(d3.dropped(), 1);
+    assert_eq!(d4.dropped(), 1);
+    assert_eq!(d5.dropped(), 1);
+    assert!(q.is_empty());
+}
 
-    assert_eq!(DROPS.load(Ordering::SeqCst), 6);
+#[test]
+fn test_drain_forget() {
+    let a = CrashTestDummy::new(0);
+    let b = CrashTestDummy::new(1);
+    let c = CrashTestDummy::new(2);
+    let mut q =
+        BinaryHeap::from(vec![a.spawn(Panic::Never), b.spawn(Panic::Never), c.spawn(Panic::Never)]);
+
+    catch_unwind(AssertUnwindSafe(|| {
+        let mut it = q.drain();
+        it.next();
+        mem::forget(it);
+    }))
+    .unwrap();
+    // Behaviour after leaking is explicitly unspecified and order is arbitrary,
+    // so it's fine if these start failing, but probably worth knowing.
+    assert!(q.is_empty());
+    assert_eq!(a.dropped() + b.dropped() + c.dropped(), 1);
+    assert_eq!(a.dropped(), 0);
+    assert_eq!(b.dropped(), 0);
+    assert_eq!(c.dropped(), 1);
+    drop(q);
+    assert_eq!(a.dropped(), 0);
+    assert_eq!(b.dropped(), 0);
+    assert_eq!(c.dropped(), 1);
+}
+
+#[test]
+fn test_drain_sorted_forget() {
+    let a = CrashTestDummy::new(0);
+    let b = CrashTestDummy::new(1);
+    let c = CrashTestDummy::new(2);
+    let mut q =
+        BinaryHeap::from(vec![a.spawn(Panic::Never), b.spawn(Panic::Never), c.spawn(Panic::Never)]);
+
+    catch_unwind(AssertUnwindSafe(|| {
+        let mut it = q.drain_sorted();
+        it.next();
+        mem::forget(it);
+    }))
+    .unwrap();
+    // Behaviour after leaking is explicitly unspecified,
+    // so it's fine if these start failing, but probably worth knowing.
+    assert_eq!(q.len(), 2);
+    assert_eq!(a.dropped(), 0);
+    assert_eq!(b.dropped(), 0);
+    assert_eq!(c.dropped(), 1);
+    drop(q);
+    assert_eq!(a.dropped(), 1);
+    assert_eq!(b.dropped(), 1);
+    assert_eq!(c.dropped(), 1);
 }
 
 #[test]
@@ -415,7 +465,7 @@ fn test_retain() {
 #[test]
 #[cfg(not(target_os = "emscripten"))]
 fn panic_safe() {
-    use rand::{seq::SliceRandom, thread_rng};
+    use rand::seq::SliceRandom;
     use std::cmp;
     use std::panic::{self, AssertUnwindSafe};
     use std::sync::atomic::{AtomicUsize, Ordering};
@@ -440,7 +490,7 @@ fn panic_safe() {
             self.0.partial_cmp(&other.0)
         }
     }
-    let mut rng = thread_rng();
+    let mut rng = crate::test_helpers::test_rng();
     const DATASZ: usize = 32;
     // Miri is too slow
     let ntest = if cfg!(miri) { 1 } else { 10 };
diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs
index 4c372b1d60a..700b1463bfd 100644
--- a/library/alloc/src/collections/btree/map/tests.rs
+++ b/library/alloc/src/collections/btree/map/tests.rs
@@ -1,12 +1,12 @@
-use super::super::testing::crash_test::{CrashTestDummy, Panic};
-use super::super::testing::ord_chaos::{Cyclic3, Governed, Governor};
-use super::super::testing::rng::DeterministicRng;
 use super::Entry::{Occupied, Vacant};
 use super::*;
 use crate::boxed::Box;
 use crate::fmt::Debug;
 use crate::rc::Rc;
 use crate::string::{String, ToString};
+use crate::testing::crash_test::{CrashTestDummy, Panic};
+use crate::testing::ord_chaos::{Cyclic3, Governed, Governor};
+use crate::testing::rng::DeterministicRng;
 use crate::vec::Vec;
 use std::cmp::Ordering;
 use std::convert::TryFrom;
diff --git a/library/alloc/src/collections/btree/mod.rs b/library/alloc/src/collections/btree/mod.rs
index 9d43ac5c5be..7552f2fc04c 100644
--- a/library/alloc/src/collections/btree/mod.rs
+++ b/library/alloc/src/collections/btree/mod.rs
@@ -21,6 +21,3 @@ trait Recover<Q: ?Sized> {
     fn take(&mut self, key: &Q) -> Option<Self::Key>;
     fn replace(&mut self, key: Self::Key) -> Option<Self::Key>;
 }
-
-#[cfg(test)]
-mod testing;
diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs
index da766b67a32..6912466448f 100644
--- a/library/alloc/src/collections/btree/node.rs
+++ b/library/alloc/src/collections/btree/node.rs
@@ -318,7 +318,10 @@ impl<BorrowType: marker::BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type>
     pub fn ascend(
         self,
     ) -> Result<Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>, Self> {
-        let _ = BorrowType::TRAVERSAL_PERMIT;
+        const {
+            assert!(BorrowType::TRAVERSAL_PERMIT);
+        }
+
         // We need to use raw pointers to nodes because, if BorrowType is marker::ValMut,
         // there might be outstanding mutable references to values that we must not invalidate.
         let leaf_ptr: *const _ = Self::as_leaf_ptr(&self);
@@ -1003,7 +1006,10 @@ impl<BorrowType: marker::BorrowType, K, V>
     /// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
     /// both, upon success, do nothing.
     pub fn descend(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
-        let _ = BorrowType::TRAVERSAL_PERMIT;
+        const {
+            assert!(BorrowType::TRAVERSAL_PERMIT);
+        }
+
         // We need to use raw pointers to nodes because, if BorrowType is
         // marker::ValMut, there might be outstanding mutable references to
         // values that we must not invalidate. There's no worry accessing the
@@ -1666,17 +1672,17 @@ pub mod marker {
     pub struct ValMut<'a>(PhantomData<&'a mut ()>);
 
     pub trait BorrowType {
-        // If node references of this borrow type allow traversing to other
-        // nodes in the tree, this constant can be evaluated. Thus reading it
-        // serves as a compile-time assertion.
-        const TRAVERSAL_PERMIT: () = ();
+        /// If node references of this borrow type allow traversing to other
+        /// nodes in the tree, this constant is set to `true`. It can be used
+        /// for a compile-time assertion.
+        const TRAVERSAL_PERMIT: bool = true;
     }
     impl BorrowType for Owned {
-        // Reject evaluation, because traversal isn't needed. Instead traversal
-        // happens using the result of `borrow_mut`.
-        // By disabling traversal, and only creating new references to roots,
-        // we know that every reference of the `Owned` type is to a root node.
-        const TRAVERSAL_PERMIT: () = panic!();
+        /// Reject traversal, because it isn't needed. Instead traversal
+        /// happens using the result of `borrow_mut`.
+        /// By disabling traversal, and only creating new references to roots,
+        /// we know that every reference of the `Owned` type is to a root node.
+        const TRAVERSAL_PERMIT: bool = false;
     }
     impl BorrowType for Dying {}
     impl<'a> BorrowType for Immut<'a> {}
diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs
index 502d3e1d126..7b8d41a6031 100644
--- a/library/alloc/src/collections/btree/set/tests.rs
+++ b/library/alloc/src/collections/btree/set/tests.rs
@@ -1,6 +1,6 @@
-use super::super::testing::crash_test::{CrashTestDummy, Panic};
-use super::super::testing::rng::DeterministicRng;
 use super::*;
+use crate::testing::crash_test::{CrashTestDummy, Panic};
+use crate::testing::rng::DeterministicRng;
 use crate::vec::Vec;
 use std::cmp::Ordering;
 use std::hash::{Hash, Hasher};
diff --git a/library/alloc/src/collections/linked_list/tests.rs b/library/alloc/src/collections/linked_list/tests.rs
index f8fbfa1bfbc..04594d55b6a 100644
--- a/library/alloc/src/collections/linked_list/tests.rs
+++ b/library/alloc/src/collections/linked_list/tests.rs
@@ -1,10 +1,11 @@
 use super::*;
+use crate::testing::crash_test::{CrashTestDummy, Panic};
 use crate::vec::Vec;
 
 use std::panic::{catch_unwind, AssertUnwindSafe};
 use std::thread;
 
-use rand::{thread_rng, RngCore};
+use rand::RngCore;
 
 #[test]
 fn test_basic() {
@@ -480,12 +481,12 @@ fn test_split_off_2() {
     }
 }
 
-fn fuzz_test(sz: i32) {
+fn fuzz_test(sz: i32, rng: &mut impl RngCore) {
     let mut m: LinkedList<_> = LinkedList::new();
     let mut v = vec![];
     for i in 0..sz {
         check_links(&m);
-        let r: u8 = thread_rng().next_u32() as u8;
+        let r: u8 = rng.next_u32() as u8;
         match r % 6 {
             0 => {
                 m.pop_back();
@@ -520,11 +521,12 @@ fn fuzz_test(sz: i32) {
 
 #[test]
 fn test_fuzz() {
+    let mut rng = crate::test_helpers::test_rng();
     for _ in 0..25 {
-        fuzz_test(3);
-        fuzz_test(16);
+        fuzz_test(3, &mut rng);
+        fuzz_test(16, &mut rng);
         #[cfg(not(miri))] // Miri is too slow
-        fuzz_test(189);
+        fuzz_test(189, &mut rng);
     }
 }
 
@@ -984,35 +986,34 @@ fn drain_filter_complex() {
 
 #[test]
 fn drain_filter_drop_panic_leak() {
-    static mut DROPS: i32 = 0;
-
-    struct D(bool);
-
-    impl Drop for D {
-        fn drop(&mut self) {
-            unsafe {
-                DROPS += 1;
-            }
-
-            if self.0 {
-                panic!("panic in `drop`");
-            }
-        }
-    }
-
+    let d0 = CrashTestDummy::new(0);
+    let d1 = CrashTestDummy::new(1);
+    let d2 = CrashTestDummy::new(2);
+    let d3 = CrashTestDummy::new(3);
+    let d4 = CrashTestDummy::new(4);
+    let d5 = CrashTestDummy::new(5);
+    let d6 = CrashTestDummy::new(6);
+    let d7 = CrashTestDummy::new(7);
     let mut q = LinkedList::new();
-    q.push_back(D(false));
-    q.push_back(D(false));
-    q.push_back(D(false));
-    q.push_back(D(false));
-    q.push_back(D(false));
-    q.push_front(D(false));
-    q.push_front(D(true));
-    q.push_front(D(false));
-
-    catch_unwind(AssertUnwindSafe(|| drop(q.drain_filter(|_| true)))).ok();
-
-    assert_eq!(unsafe { DROPS }, 8);
+    q.push_back(d3.spawn(Panic::Never));
+    q.push_back(d4.spawn(Panic::Never));
+    q.push_back(d5.spawn(Panic::Never));
+    q.push_back(d6.spawn(Panic::Never));
+    q.push_back(d7.spawn(Panic::Never));
+    q.push_front(d2.spawn(Panic::Never));
+    q.push_front(d1.spawn(Panic::InDrop));
+    q.push_front(d0.spawn(Panic::Never));
+
+    catch_unwind(AssertUnwindSafe(|| drop(q.drain_filter(|_| true)))).unwrap_err();
+
+    assert_eq!(d0.dropped(), 1);
+    assert_eq!(d1.dropped(), 1);
+    assert_eq!(d2.dropped(), 1);
+    assert_eq!(d3.dropped(), 1);
+    assert_eq!(d4.dropped(), 1);
+    assert_eq!(d5.dropped(), 1);
+    assert_eq!(d6.dropped(), 1);
+    assert_eq!(d7.dropped(), 1);
     assert!(q.is_empty());
 }
 
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index be615b70ced..451e4936bc5 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -535,12 +535,13 @@ impl<T> VecDeque<T> {
     ///
     /// let deque: VecDeque<u32> = VecDeque::new();
     /// ```
-    // FIXME: This should probably be const
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
+    #[rustc_const_stable(feature = "const_vec_deque_new", since = "CURRENT_RUSTC_VERSION")]
     #[must_use]
-    pub fn new() -> VecDeque<T> {
-        VecDeque::new_in(Global)
+    pub const fn new() -> VecDeque<T> {
+        // FIXME: This should just be `VecDeque::new_in(Global)` once that hits stable.
+        VecDeque { head: 0, len: 0, buf: RawVec::NEW }
     }
 
     /// Creates an empty deque with space for at least `capacity` elements.
@@ -635,6 +636,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
     /// buf.push_back(3);
     /// buf.push_back(4);
     /// buf.push_back(5);
+    /// buf.push_back(6);
     /// assert_eq!(buf.get(1), Some(&4));
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -660,10 +662,11 @@ impl<T, A: Allocator> VecDeque<T, A> {
     /// buf.push_back(3);
     /// buf.push_back(4);
     /// buf.push_back(5);
+    /// buf.push_back(6);
+    /// assert_eq!(buf[1], 4);
     /// if let Some(elem) = buf.get_mut(1) {
     ///     *elem = 7;
     /// }
-    ///
     /// assert_eq!(buf[1], 7);
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -2820,9 +2823,9 @@ impl<T, A: Allocator> From<Vec<T, A>> for VecDeque<T, A> {
     /// [`Vec<T>`]: crate::vec::Vec
     /// [`VecDeque<T>`]: crate::collections::VecDeque
     ///
-    /// In its current implementation, this is a very cheap
-    /// conversion. This isn't yet a guarantee though, and
-    /// shouldn't be relied on.
+    /// This conversion is guaranteed to run in *O*(1) time
+    /// and to not re-allocate the `Vec`'s buffer or allocate
+    /// any additional memory.
     #[inline]
     fn from(other: Vec<T, A>) -> Self {
         let (ptr, len, cap, alloc) = other.into_raw_parts_with_alloc();
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index 96960d43f58..4e812529c2c 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -3,7 +3,7 @@
 //! This library provides smart pointers and collections for managing
 //! heap-allocated values.
 //!
-//! This library, like libcore, normally doesn’t need to be used directly
+//! This library, like core, normally doesn’t need to be used directly
 //! since its contents are re-exported in the [`std` crate](../std/index.html).
 //! Crates that use the `#![no_std]` attribute however will typically
 //! not depend on `std`, so they’d use this crate instead.
@@ -75,7 +75,7 @@
 ))]
 #![no_std]
 #![needs_allocator]
-// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
+// To run alloc tests without x.py without ending up with two copies of alloc, Miri needs to be
 // able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
 // rustc itself never sets the feature, so this line has no affect there.
 #![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
@@ -110,6 +110,7 @@
 #![feature(const_maybe_uninit_as_mut_ptr)]
 #![feature(const_refs_to_cell)]
 #![feature(core_intrinsics)]
+#![feature(core_panic)]
 #![feature(const_eval_select)]
 #![feature(const_pin)]
 #![feature(const_waker)]
@@ -122,6 +123,7 @@
 #![feature(fmt_internals)]
 #![feature(fn_traits)]
 #![feature(hasher_prefixfree_extras)]
+#![feature(inline_const)]
 #![feature(inplace_iteration)]
 #![feature(iter_advance_by)]
 #![feature(iter_next_chunk)]
@@ -152,7 +154,7 @@
 #![feature(trusted_len)]
 #![feature(trusted_random_access)]
 #![feature(try_trait_v2)]
-#![cfg_attr(not(bootstrap), feature(tuple_trait))]
+#![feature(tuple_trait)]
 #![feature(unchecked_math)]
 #![feature(unicode_internals)]
 #![feature(unsize)]
@@ -190,6 +192,7 @@
 #![feature(unsized_fn_params)]
 #![feature(c_unwind)]
 #![feature(with_negative_coherence)]
+#![cfg_attr(test, feature(panic_update_hook))]
 //
 // Rustdoc features:
 #![feature(doc_cfg)]
@@ -206,6 +209,8 @@
 extern crate std;
 #[cfg(test)]
 extern crate test;
+#[cfg(test)]
+mod testing;
 
 // Module with internal macros used by other modules (needs to be included before other modules).
 #[macro_use]
@@ -251,3 +256,20 @@ pub mod vec;
 pub mod __export {
     pub use core::format_args;
 }
+
+#[cfg(test)]
+#[allow(dead_code)] // Not used in all configurations
+pub(crate) mod test_helpers {
+    /// Copied from `std::test_helpers::test_rng`, since these tests rely on the
+    /// seed not being the same for every RNG invocation too.
+    pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
+        use std::hash::{BuildHasher, Hash, Hasher};
+        let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
+        std::panic::Location::caller().hash(&mut hasher);
+        let hc64 = hasher.finish();
+        let seed_vec =
+            hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<crate::vec::Vec<u8>>();
+        let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap();
+        rand::SeedableRng::from_seed(seed)
+    }
+}
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index 38e31b1802a..c1d853ed652 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -336,7 +336,7 @@ impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Rc<T> {}
 #[stable(feature = "rc_ref_unwind_safe", since = "1.58.0")]
 impl<T: RefUnwindSafe + ?Sized> RefUnwindSafe for Rc<T> {}
 
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {}
 
 #[unstable(feature = "dispatch_from_dyn", issue = "none")]
@@ -2190,7 +2190,7 @@ impl<T: ?Sized> !marker::Send for Weak<T> {}
 #[stable(feature = "rc_weak", since = "1.4.0")]
 impl<T: ?Sized> !marker::Sync for Weak<T> {}
 
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
 
 #[unstable(feature = "dispatch_from_dyn", issue = "none")]
@@ -2561,7 +2561,7 @@ impl<T: ?Sized> Clone for Weak<T> {
 }
 
 #[stable(feature = "rc_weak", since = "1.4.0")]
-impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
+impl<T: ?Sized> fmt::Debug for Weak<T> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         write!(f, "(Weak)")
     }
diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs
index 1b61ede3476..e9886fc5717 100644
--- a/library/alloc/src/slice.rs
+++ b/library/alloc/src/slice.rs
@@ -28,6 +28,9 @@ use crate::borrow::ToOwned;
 use crate::boxed::Box;
 use crate::vec::Vec;
 
+#[cfg(test)]
+mod tests;
+
 #[unstable(feature = "slice_range", issue = "76393")]
 pub use core::slice::range;
 #[unstable(feature = "array_chunks", issue = "74985")]
@@ -653,7 +656,7 @@ impl [u8] {
 ///
 /// ```error
 /// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
-///    --> src/liballoc/slice.rs:608:6
+///    --> library/alloc/src/slice.rs:608:6
 ///     |
 /// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
 ///     |      ^ unconstrained type parameter
diff --git a/library/alloc/src/slice/tests.rs b/library/alloc/src/slice/tests.rs
new file mode 100644
index 00000000000..f674530aaa5
--- /dev/null
+++ b/library/alloc/src/slice/tests.rs
@@ -0,0 +1,359 @@
+use crate::borrow::ToOwned;
+use crate::rc::Rc;
+use crate::string::ToString;
+use crate::test_helpers::test_rng;
+use crate::vec::Vec;
+
+use core::cell::Cell;
+use core::cmp::Ordering::{self, Equal, Greater, Less};
+use core::convert::identity;
+use core::fmt;
+use core::mem;
+use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
+use rand::{distributions::Standard, prelude::*, Rng, RngCore};
+use std::panic;
+
+macro_rules! do_test {
+    ($input:ident, $func:ident) => {
+        let len = $input.len();
+
+        // Work out the total number of comparisons required to sort
+        // this array...
+        let mut count = 0usize;
+        $input.to_owned().$func(|a, b| {
+            count += 1;
+            a.cmp(b)
+        });
+
+        // ... and then panic on each and every single one.
+        for panic_countdown in 0..count {
+            // Refresh the counters.
+            VERSIONS.store(0, Relaxed);
+            for i in 0..len {
+                DROP_COUNTS[i].store(0, Relaxed);
+            }
+
+            let v = $input.to_owned();
+            let _ = std::panic::catch_unwind(move || {
+                let mut v = v;
+                let mut panic_countdown = panic_countdown;
+                v.$func(|a, b| {
+                    if panic_countdown == 0 {
+                        SILENCE_PANIC.with(|s| s.set(true));
+                        panic!();
+                    }
+                    panic_countdown -= 1;
+                    a.cmp(b)
+                })
+            });
+
+            // Check that the number of things dropped is exactly
+            // what we expect (i.e., the contents of `v`).
+            for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
+                let count = c.load(Relaxed);
+                assert!(count == 1, "found drop count == {} for i == {}, len == {}", count, i, len);
+            }
+
+            // Check that the most recent versions of values were dropped.
+            assert_eq!(VERSIONS.load(Relaxed), 0);
+        }
+    };
+}
+
+const MAX_LEN: usize = 80;
+
+static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [
+    // FIXME(RFC 1109): AtomicUsize is not Copy.
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+    AtomicUsize::new(0),
+];
+
+static VERSIONS: AtomicUsize = AtomicUsize::new(0);
+
+#[derive(Clone, Eq)]
+struct DropCounter {
+    x: u32,
+    id: usize,
+    version: Cell<usize>,
+}
+
+impl PartialEq for DropCounter {
+    fn eq(&self, other: &Self) -> bool {
+        self.partial_cmp(other) == Some(Ordering::Equal)
+    }
+}
+
+impl PartialOrd for DropCounter {
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        self.version.set(self.version.get() + 1);
+        other.version.set(other.version.get() + 1);
+        VERSIONS.fetch_add(2, Relaxed);
+        self.x.partial_cmp(&other.x)
+    }
+}
+
+impl Ord for DropCounter {
+    fn cmp(&self, other: &Self) -> Ordering {
+        self.partial_cmp(other).unwrap()
+    }
+}
+
+impl Drop for DropCounter {
+    fn drop(&mut self) {
+        DROP_COUNTS[self.id].fetch_add(1, Relaxed);
+        VERSIONS.fetch_sub(self.version.get(), Relaxed);
+    }
+}
+
+std::thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)] // no threads
+fn panic_safe() {
+    panic::update_hook(move |prev, info| {
+        if !SILENCE_PANIC.with(|s| s.get()) {
+            prev(info);
+        }
+    });
+
+    let mut rng = test_rng();
+
+    // Miri is too slow (but still need to `chain` to make the types match)
+    let lens = if cfg!(miri) { (1..10).chain(0..0) } else { (1..20).chain(70..MAX_LEN) };
+    let moduli: &[u32] = if cfg!(miri) { &[5] } else { &[5, 20, 50] };
+
+    for len in lens {
+        for &modulus in moduli {
+            for &has_runs in &[false, true] {
+                let mut input = (0..len)
+                    .map(|id| DropCounter {
+                        x: rng.next_u32() % modulus,
+                        id: id,
+                        version: Cell::new(0),
+                    })
+                    .collect::<Vec<_>>();
+
+                if has_runs {
+                    for c in &mut input {
+                        c.x = c.id as u32;
+                    }
+
+                    for _ in 0..5 {
+                        let a = rng.gen::<usize>() % len;
+                        let b = rng.gen::<usize>() % len;
+                        if a < b {
+                            input[a..b].reverse();
+                        } else {
+                            input.swap(a, b);
+                        }
+                    }
+                }
+
+                do_test!(input, sort_by);
+                do_test!(input, sort_unstable_by);
+            }
+        }
+    }
+
+    // Set default panic hook again.
+    drop(panic::take_hook());
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn test_sort() {
+    let mut rng = test_rng();
+
+    for len in (2..25).chain(500..510) {
+        for &modulus in &[5, 10, 100, 1000] {
+            for _ in 0..10 {
+                let orig: Vec<_> = (&mut rng)
+                    .sample_iter::<i32, _>(&Standard)
+                    .map(|x| x % modulus)
+                    .take(len)
+                    .collect();
+
+                // Sort in default order.
+                let mut v = orig.clone();
+                v.sort();
+                assert!(v.windows(2).all(|w| w[0] <= w[1]));
+
+                // Sort in ascending order.
+                let mut v = orig.clone();
+                v.sort_by(|a, b| a.cmp(b));
+                assert!(v.windows(2).all(|w| w[0] <= w[1]));
+
+                // Sort in descending order.
+                let mut v = orig.clone();
+                v.sort_by(|a, b| b.cmp(a));
+                assert!(v.windows(2).all(|w| w[0] >= w[1]));
+
+                // Sort in lexicographic order.
+                let mut v1 = orig.clone();
+                let mut v2 = orig.clone();
+                v1.sort_by_key(|x| x.to_string());
+                v2.sort_by_cached_key(|x| x.to_string());
+                assert!(v1.windows(2).all(|w| w[0].to_string() <= w[1].to_string()));
+                assert!(v1 == v2);
+
+                // Sort with many pre-sorted runs.
+                let mut v = orig.clone();
+                v.sort();
+                v.reverse();
+                for _ in 0..5 {
+                    let a = rng.gen::<usize>() % len;
+                    let b = rng.gen::<usize>() % len;
+                    if a < b {
+                        v[a..b].reverse();
+                    } else {
+                        v.swap(a, b);
+                    }
+                }
+                v.sort();
+                assert!(v.windows(2).all(|w| w[0] <= w[1]));
+            }
+        }
+    }
+
+    // Sort using a completely random comparison function.
+    // This will reorder the elements *somehow*, but won't panic.
+    let mut v = [0; 500];
+    for i in 0..v.len() {
+        v[i] = i as i32;
+    }
+    v.sort_by(|_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap());
+    v.sort();
+    for i in 0..v.len() {
+        assert_eq!(v[i], i as i32);
+    }
+
+    // Should not panic.
+    [0i32; 0].sort();
+    [(); 10].sort();
+    [(); 100].sort();
+
+    let mut v = [0xDEADBEEFu64];
+    v.sort();
+    assert!(v == [0xDEADBEEF]);
+}
+
+#[test]
+fn test_sort_stability() {
+    // Miri is too slow
+    let large_range = if cfg!(miri) { 0..0 } else { 500..510 };
+    let rounds = if cfg!(miri) { 1 } else { 10 };
+
+    let mut rng = test_rng();
+    for len in (2..25).chain(large_range) {
+        for _ in 0..rounds {
+            let mut counts = [0; 10];
+
+            // create a vector like [(6, 1), (5, 1), (6, 2), ...],
+            // where the first item of each tuple is random, but
+            // the second item represents which occurrence of that
+            // number this element is, i.e., the second elements
+            // will occur in sorted order.
+            let orig: Vec<_> = (0..len)
+                .map(|_| {
+                    let n = rng.gen::<usize>() % 10;
+                    counts[n] += 1;
+                    (n, counts[n])
+                })
+                .collect();
+
+            let mut v = orig.clone();
+            // Only sort on the first element, so an unstable sort
+            // may mix up the counts.
+            v.sort_by(|&(a, _), &(b, _)| a.cmp(&b));
+
+            // This comparison includes the count (the second item
+            // of the tuple), so elements with equal first items
+            // will need to be ordered with increasing
+            // counts... i.e., exactly asserting that this sort is
+            // stable.
+            assert!(v.windows(2).all(|w| w[0] <= w[1]));
+
+            let mut v = orig.clone();
+            v.sort_by_cached_key(|&(x, _)| x);
+            assert!(v.windows(2).all(|w| w[0] <= w[1]));
+        }
+    }
+}
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index 7a8e6f088f3..3118c7189a5 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -363,7 +363,7 @@ use crate::vec::Vec;
 /// [`as_str()`]: String::as_str
 #[derive(PartialOrd, Eq, Ord)]
 #[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(all(not(bootstrap), not(test)), lang = "String")]
+#[cfg_attr(not(test), lang = "String")]
 pub struct String {
     vec: Vec<u8>,
 }
@@ -2549,6 +2549,15 @@ impl ToString for char {
 }
 
 #[cfg(not(no_global_oom_handling))]
+#[stable(feature = "bool_to_string_specialization", since = "CURRENT_RUSTC_VERSION")]
+impl ToString for bool {
+    #[inline]
+    fn to_string(&self) -> String {
+        String::from(if *self { "true" } else { "false" })
+    }
+}
+
+#[cfg(not(no_global_oom_handling))]
 #[stable(feature = "u8_to_string_specialization", since = "1.54.0")]
 impl ToString for u8 {
     #[inline]
@@ -2678,7 +2687,7 @@ impl From<&String> for String {
     }
 }
 
-// note: test pulls in libstd, which causes errors here
+// note: test pulls in std, which causes errors here
 #[cfg(not(test))]
 #[stable(feature = "string_from_box", since = "1.18.0")]
 impl From<Box<str>> for String {
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index f7dc4d1094c..d833d4d1dfb 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -254,7 +254,7 @@ unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
 #[stable(feature = "catch_unwind", since = "1.9.0")]
 impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Arc<T> {}
 
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
 
 #[unstable(feature = "dispatch_from_dyn", issue = "none")]
@@ -306,13 +306,13 @@ unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
 #[stable(feature = "arc_weak", since = "1.4.0")]
 unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
 
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
 #[unstable(feature = "dispatch_from_dyn", issue = "none")]
 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
 
 #[stable(feature = "arc_weak", since = "1.4.0")]
-impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
+impl<T: ?Sized> fmt::Debug for Weak<T> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         write!(f, "(Weak)")
     }
diff --git a/library/alloc/src/collections/btree/testing/crash_test.rs b/library/alloc/src/testing/crash_test.rs
index bcf5f5f7251..bcf5f5f7251 100644
--- a/library/alloc/src/collections/btree/testing/crash_test.rs
+++ b/library/alloc/src/testing/crash_test.rs
diff --git a/library/alloc/src/collections/btree/testing/mod.rs b/library/alloc/src/testing/mod.rs
index 7a094f8a595..7a094f8a595 100644
--- a/library/alloc/src/collections/btree/testing/mod.rs
+++ b/library/alloc/src/testing/mod.rs
diff --git a/library/alloc/src/collections/btree/testing/ord_chaos.rs b/library/alloc/src/testing/ord_chaos.rs
index 96ce7c15790..96ce7c15790 100644
--- a/library/alloc/src/collections/btree/testing/ord_chaos.rs
+++ b/library/alloc/src/testing/ord_chaos.rs
diff --git a/library/alloc/src/collections/btree/testing/rng.rs b/library/alloc/src/testing/rng.rs
index ecf543bee03..ecf543bee03 100644
--- a/library/alloc/src/collections/btree/testing/rng.rs
+++ b/library/alloc/src/testing/rng.rs
diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs
index 6bcde6d899c..b207b3210f1 100644
--- a/library/alloc/src/vec/into_iter.rs
+++ b/library/alloc/src/vec/into_iter.rs
@@ -40,7 +40,9 @@ pub struct IntoIter<
     // to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
     pub(super) alloc: ManuallyDrop<A>,
     pub(super) ptr: *const T,
-    pub(super) end: *const T,
+    pub(super) end: *const T, // If T is a ZST, this is actually ptr+len.  This encoding is picked so that
+                              // ptr == end is a quick test for the Iterator being empty, that works
+                              // for both ZST and non-ZST.
 }
 
 #[stable(feature = "vec_intoiter_debug", since = "1.13.0")]
@@ -132,7 +134,9 @@ impl<T, A: Allocator> IntoIter<T, A> {
 
     /// Forgets to Drop the remaining elements while still allowing the backing allocation to be freed.
     pub(crate) fn forget_remaining_elements(&mut self) {
-        self.ptr = self.end;
+        // For th ZST case, it is crucial that we mutate `end` here, not `ptr`.
+        // `ptr` must stay aligned, while `end` may be unaligned.
+        self.end = self.ptr;
     }
 
     #[cfg(not(no_global_oom_handling))]
@@ -184,10 +188,9 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
         if self.ptr == self.end {
             None
         } else if T::IS_ZST {
-            // purposefully don't use 'ptr.offset' because for
-            // vectors with 0-size elements this would return the
-            // same pointer.
-            self.ptr = self.ptr.wrapping_byte_add(1);
+            // `ptr` has to stay where it is to remain aligned, so we reduce the length by 1 by
+            // reducing the `end`.
+            self.end = self.end.wrapping_byte_sub(1);
 
             // Make up a value of this ZST.
             Some(unsafe { mem::zeroed() })
@@ -214,10 +217,8 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
         let step_size = self.len().min(n);
         let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
         if T::IS_ZST {
-            // SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound
-            // effectively results in unsigned pointers representing positions 0..usize::MAX,
-            // which is valid for ZSTs.
-            self.ptr = self.ptr.wrapping_byte_add(step_size);
+            // See `next` for why we sub `end` here.
+            self.end = self.end.wrapping_byte_sub(step_size);
         } else {
             // SAFETY: the min() above ensures that step_size is in bounds
             self.ptr = unsafe { self.ptr.add(step_size) };
@@ -250,7 +251,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
                 return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) });
             }
 
-            self.ptr = self.ptr.wrapping_byte_add(N);
+            self.end = self.end.wrapping_byte_sub(N);
             // Safety: ditto
             return Ok(unsafe { raw_ary.transpose().assume_init() });
         }
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index ba34ab6800f..36cfac8ee9e 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -166,7 +166,7 @@ mod spec_extend;
 /// vec[0] = 7;
 /// assert_eq!(vec[0], 7);
 ///
-/// vec.extend([1, 2, 3].iter().copied());
+/// vec.extend([1, 2, 3]);
 ///
 /// for x in &vec {
 ///     println!("{x}");
@@ -490,6 +490,8 @@ impl<T> Vec<T> {
     /// This is highly unsafe, due to the number of invariants that aren't
     /// checked:
     ///
+    /// * `ptr` must have been allocated using the global allocator, such as via
+    ///   the [`alloc::alloc`] function.
     /// * `T` needs to have the same alignment as what `ptr` was allocated with.
     ///   (`T` having a less strict alignment is not sufficient, the alignment really
     ///   needs to be equal to satisfy the [`dealloc`] requirement that memory must be
@@ -526,6 +528,7 @@ impl<T> Vec<T> {
     /// function.
     ///
     /// [`String`]: crate::string::String
+    /// [`alloc::alloc`]: crate::alloc::alloc
     /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
     ///
     /// # Examples
@@ -681,6 +684,7 @@ impl<T, A: Allocator> Vec<T, A> {
     /// This is highly unsafe, due to the number of invariants that aren't
     /// checked:
     ///
+    /// * `ptr` must be [*currently allocated*] via the given allocator `alloc`.
     /// * `T` needs to have the same alignment as what `ptr` was allocated with.
     ///   (`T` having a less strict alignment is not sufficient, the alignment really
     ///   needs to be equal to satisfy the [`dealloc`] requirement that memory must be
@@ -714,6 +718,7 @@ impl<T, A: Allocator> Vec<T, A> {
     ///
     /// [`String`]: crate::string::String
     /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+    /// [*currently allocated*]: crate::alloc::Allocator#currently-allocated-memory
     /// [*fit*]: crate::alloc::Allocator#memory-fitting
     ///
     /// # Examples
@@ -3191,7 +3196,7 @@ where
     }
 }
 
-// note: test pulls in libstd, which causes errors here
+// note: test pulls in std, which causes errors here
 #[cfg(not(test))]
 #[stable(feature = "vec_from_box", since = "1.18.0")]
 impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
@@ -3209,7 +3214,7 @@ impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
     }
 }
 
-// note: test pulls in libstd, which causes errors here
+// note: test pulls in std, which causes errors here
 #[cfg(not(no_global_oom_handling))]
 #[cfg(not(test))]
 #[stable(feature = "box_from_vec", since = "1.20.0")]