about summary refs log tree commit diff
path: root/src/liballoc
diff options
context:
space:
mode:
authorPankaj Chaudhary <pankajchaudhary172@gmail.com>2020-07-13 14:14:37 +0530
committerPankajChaudhary5 <pankajchaudhary172@gmail.com>2020-07-13 14:57:22 +0530
commitbc2b37aad7f9cbd1c97e416e6b16325b607422b8 (patch)
tree867a6176bcf763bba6e5de12bc08f034a320ac56 /src/liballoc
parente3ae4c7345cfd06b06c6996536d7c158ce6970db (diff)
parent9d09331e00b02f81c714b0c41ce3a38380dd36a2 (diff)
downloadrust-bc2b37aad7f9cbd1c97e416e6b16325b607422b8.tar.gz
rust-bc2b37aad7f9cbd1c97e416e6b16325b607422b8.zip
Merge branch 'master' into E0688
Diffstat (limited to 'src/liballoc')
-rw-r--r--src/liballoc/Cargo.toml1
-rw-r--r--src/liballoc/alloc.rs45
-rw-r--r--src/liballoc/alloc/tests.rs2
-rw-r--r--src/liballoc/benches/btree/map.rs118
-rw-r--r--src/liballoc/benches/lib.rs3
-rw-r--r--src/liballoc/boxed.rs119
-rw-r--r--src/liballoc/collections/binary_heap.rs62
-rw-r--r--src/liballoc/collections/btree/map.rs227
-rw-r--r--src/liballoc/collections/btree/mod.rs4
-rw-r--r--src/liballoc/collections/btree/navigate.rs100
-rw-r--r--src/liballoc/collections/btree/node.rs54
-rw-r--r--src/liballoc/collections/btree/set.rs48
-rw-r--r--src/liballoc/collections/linked_list.rs101
-rw-r--r--src/liballoc/collections/linked_list/tests.rs1
-rw-r--r--src/liballoc/collections/vec_deque.rs239
-rw-r--r--src/liballoc/collections/vec_deque/tests.rs75
-rw-r--r--src/liballoc/fmt.rs22
-rw-r--r--src/liballoc/lib.rs14
-rw-r--r--src/liballoc/raw_vec.rs411
-rw-r--r--src/liballoc/raw_vec/tests.rs5
-rw-r--r--src/liballoc/rc.rs242
-rw-r--r--src/liballoc/rc/tests.rs4
-rw-r--r--src/liballoc/slice.rs55
-rw-r--r--src/liballoc/str.rs2
-rw-r--r--src/liballoc/string.rs78
-rw-r--r--src/liballoc/sync.rs346
-rw-r--r--src/liballoc/sync/tests.rs12
-rw-r--r--src/liballoc/task.rs17
-rw-r--r--src/liballoc/tests/binary_heap.rs16
-rw-r--r--src/liballoc/tests/borrow.rs47
-rw-r--r--src/liballoc/tests/boxed.rs33
-rw-r--r--src/liballoc/tests/btree/map.rs84
-rw-r--r--src/liballoc/tests/btree/set.rs37
-rw-r--r--src/liballoc/tests/lib.rs3
-rw-r--r--src/liballoc/tests/slice.rs24
-rw-r--r--src/liballoc/tests/str.rs4
-rw-r--r--src/liballoc/tests/string.rs7
-rw-r--r--src/liballoc/tests/vec.rs204
-rw-r--r--src/liballoc/tests/vec_deque.rs14
-rw-r--r--src/liballoc/vec.rs219
40 files changed, 2039 insertions, 1060 deletions
diff --git a/src/liballoc/Cargo.toml b/src/liballoc/Cargo.toml
index d1119f7b7c0..914195f015b 100644
--- a/src/liballoc/Cargo.toml
+++ b/src/liballoc/Cargo.toml
@@ -25,6 +25,7 @@ path = "../liballoc/tests/lib.rs"
 [[bench]]
 name = "collectionsbenches"
 path = "../liballoc/benches/lib.rs"
+test = true
 
 [[bench]]
 name = "vec_deque_append_bench"
diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs
index d31c73cc1bd..98c7ac3f2ef 100644
--- a/src/liballoc/alloc.rs
+++ b/src/liballoc/alloc.rs
@@ -77,7 +77,7 @@ pub struct Global;
 #[stable(feature = "global_alloc", since = "1.28.0")]
 #[inline]
 pub unsafe fn alloc(layout: Layout) -> *mut u8 {
-    __rust_alloc(layout.size(), layout.align())
+    unsafe { __rust_alloc(layout.size(), layout.align()) }
 }
 
 /// Deallocate memory with the global allocator.
@@ -99,7 +99,7 @@ pub unsafe fn alloc(layout: Layout) -> *mut u8 {
 #[stable(feature = "global_alloc", since = "1.28.0")]
 #[inline]
 pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
-    __rust_dealloc(ptr, layout.size(), layout.align())
+    unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
 }
 
 /// Reallocate memory with the global allocator.
@@ -121,7 +121,7 @@ pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
 #[stable(feature = "global_alloc", since = "1.28.0")]
 #[inline]
 pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
-    __rust_realloc(ptr, layout.size(), layout.align(), new_size)
+    unsafe { __rust_realloc(ptr, layout.size(), layout.align(), new_size) }
 }
 
 /// Allocate zero-initialized memory with the global allocator.
@@ -158,7 +158,7 @@ pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8
 #[stable(feature = "global_alloc", since = "1.28.0")]
 #[inline]
 pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
-    __rust_alloc_zeroed(layout.size(), layout.align())
+    unsafe { __rust_alloc_zeroed(layout.size(), layout.align()) }
 }
 
 #[unstable(feature = "allocator_api", issue = "32838")]
@@ -183,7 +183,7 @@ unsafe impl AllocRef for Global {
     #[inline]
     unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
         if layout.size() != 0 {
-            dealloc(ptr.as_ptr(), layout)
+            unsafe { dealloc(ptr.as_ptr(), layout) }
         }
     }
 
@@ -209,16 +209,21 @@ unsafe impl AllocRef for Global {
         match placement {
             ReallocPlacement::InPlace => Err(AllocErr),
             ReallocPlacement::MayMove if layout.size() == 0 => {
-                let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
+                let new_layout =
+                    unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
                 self.alloc(new_layout, init)
             }
             ReallocPlacement::MayMove => {
                 // `realloc` probably checks for `new_size > size` or something similar.
-                intrinsics::assume(new_size > size);
-                let ptr = realloc(ptr.as_ptr(), layout, new_size);
+                let ptr = unsafe {
+                    intrinsics::assume(new_size > size);
+                    realloc(ptr.as_ptr(), layout, new_size)
+                };
                 let memory =
                     MemoryBlock { ptr: NonNull::new(ptr).ok_or(AllocErr)?, size: new_size };
-                init.init_offset(memory, size);
+                unsafe {
+                    init.init_offset(memory, size);
+                }
                 Ok(memory)
             }
         }
@@ -245,13 +250,17 @@ unsafe impl AllocRef for Global {
         match placement {
             ReallocPlacement::InPlace => Err(AllocErr),
             ReallocPlacement::MayMove if new_size == 0 => {
-                self.dealloc(ptr, layout);
+                unsafe {
+                    self.dealloc(ptr, layout);
+                }
                 Ok(MemoryBlock { ptr: layout.dangling(), size: 0 })
             }
             ReallocPlacement::MayMove => {
                 // `realloc` probably checks for `new_size < size` or something similar.
-                intrinsics::assume(new_size < size);
-                let ptr = realloc(ptr.as_ptr(), layout, new_size);
+                let ptr = unsafe {
+                    intrinsics::assume(new_size < size);
+                    realloc(ptr.as_ptr(), layout, new_size)
+                };
                 Ok(MemoryBlock { ptr: NonNull::new(ptr).ok_or(AllocErr)?, size: new_size })
             }
         }
@@ -264,7 +273,7 @@ unsafe impl AllocRef for Global {
 #[lang = "exchange_malloc"]
 #[inline]
 unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
-    let layout = Layout::from_size_align_unchecked(size, align);
+    let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
     match Global.alloc(layout, AllocInit::Uninitialized) {
         Ok(memory) => memory.ptr.as_ptr(),
         Err(_) => handle_alloc_error(layout),
@@ -279,10 +288,12 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
 // For example if `Box` is changed to  `struct Box<T: ?Sized, A: AllocRef>(Unique<T>, A)`,
 // this function has to be changed to `fn box_free<T: ?Sized, A: AllocRef>(Unique<T>, A)` as well.
 pub(crate) unsafe fn box_free<T: ?Sized>(ptr: Unique<T>) {
-    let size = size_of_val(ptr.as_ref());
-    let align = min_align_of_val(ptr.as_ref());
-    let layout = Layout::from_size_align_unchecked(size, align);
-    Global.dealloc(ptr.cast().into(), layout)
+    unsafe {
+        let size = size_of_val(ptr.as_ref());
+        let align = min_align_of_val(ptr.as_ref());
+        let layout = Layout::from_size_align_unchecked(size, align);
+        Global.dealloc(ptr.cast().into(), layout)
+    }
 }
 
 /// Abort on memory allocation error or failure.
diff --git a/src/liballoc/alloc/tests.rs b/src/liballoc/alloc/tests.rs
index 1ad40eca93b..1c003983df9 100644
--- a/src/liballoc/alloc/tests.rs
+++ b/src/liballoc/alloc/tests.rs
@@ -23,7 +23,7 @@ fn allocate_zeroed() {
 }
 
 #[bench]
-#[cfg_attr(miri, ignore)] // Miri does not support benchmarks
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
 fn alloc_owned_small(b: &mut Bencher) {
     b.iter(|| {
         let _: Box<_> = box 10;
diff --git a/src/liballoc/benches/btree/map.rs b/src/liballoc/benches/btree/map.rs
index 83cdebf0e3f..38d19c59ad1 100644
--- a/src/liballoc/benches/btree/map.rs
+++ b/src/liballoc/benches/btree/map.rs
@@ -1,6 +1,6 @@
 use std::collections::BTreeMap;
 use std::iter::Iterator;
-use std::ops::Bound::{Excluded, Unbounded};
+use std::ops::RangeBounds;
 use std::vec::Vec;
 
 use rand::{seq::SliceRandom, thread_rng, Rng};
@@ -117,7 +117,7 @@ map_find_rand_bench! {find_rand_10_000, 10_000, BTreeMap}
 map_find_seq_bench! {find_seq_100,    100,    BTreeMap}
 map_find_seq_bench! {find_seq_10_000, 10_000, BTreeMap}
 
-fn bench_iter(b: &mut Bencher, size: i32) {
+fn bench_iteration(b: &mut Bencher, size: i32) {
     let mut map = BTreeMap::<i32, i32>::new();
     let mut rng = thread_rng();
 
@@ -133,21 +133,21 @@ fn bench_iter(b: &mut Bencher, size: i32) {
 }
 
 #[bench]
-pub fn iter_20(b: &mut Bencher) {
-    bench_iter(b, 20);
+pub fn iteration_20(b: &mut Bencher) {
+    bench_iteration(b, 20);
 }
 
 #[bench]
-pub fn iter_1000(b: &mut Bencher) {
-    bench_iter(b, 1000);
+pub fn iteration_1000(b: &mut Bencher) {
+    bench_iteration(b, 1000);
 }
 
 #[bench]
-pub fn iter_100000(b: &mut Bencher) {
-    bench_iter(b, 100000);
+pub fn iteration_100000(b: &mut Bencher) {
+    bench_iteration(b, 100000);
 }
 
-fn bench_iter_mut(b: &mut Bencher, size: i32) {
+fn bench_iteration_mut(b: &mut Bencher, size: i32) {
     let mut map = BTreeMap::<i32, i32>::new();
     let mut rng = thread_rng();
 
@@ -163,18 +163,18 @@ fn bench_iter_mut(b: &mut Bencher, size: i32) {
 }
 
 #[bench]
-pub fn iter_mut_20(b: &mut Bencher) {
-    bench_iter_mut(b, 20);
+pub fn iteration_mut_20(b: &mut Bencher) {
+    bench_iteration_mut(b, 20);
 }
 
 #[bench]
-pub fn iter_mut_1000(b: &mut Bencher) {
-    bench_iter_mut(b, 1000);
+pub fn iteration_mut_1000(b: &mut Bencher) {
+    bench_iteration_mut(b, 1000);
 }
 
 #[bench]
-pub fn iter_mut_100000(b: &mut Bencher) {
-    bench_iter_mut(b, 100000);
+pub fn iteration_mut_100000(b: &mut Bencher) {
+    bench_iteration_mut(b, 100000);
 }
 
 fn bench_first_and_last(b: &mut Bencher, size: i32) {
@@ -202,57 +202,83 @@ pub fn first_and_last_10k(b: &mut Bencher) {
     bench_first_and_last(b, 10_000);
 }
 
-#[bench]
-pub fn range_excluded_excluded(b: &mut Bencher) {
-    let size = 144;
-    let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
+const BENCH_RANGE_SIZE: i32 = 145;
+const BENCH_RANGE_COUNT: i32 = BENCH_RANGE_SIZE * (BENCH_RANGE_SIZE - 1) / 2;
+
+fn bench_range<F, R>(b: &mut Bencher, f: F)
+where
+    F: Fn(i32, i32) -> R,
+    R: RangeBounds<i32>,
+{
+    let map: BTreeMap<_, _> = (0..BENCH_RANGE_SIZE).map(|i| (i, i)).collect();
     b.iter(|| {
-        for first in 0..size {
-            for last in first + 1..size {
-                black_box(map.range((Excluded(first), Excluded(last))));
+        let mut c = 0;
+        for i in 0..BENCH_RANGE_SIZE {
+            for j in i + 1..BENCH_RANGE_SIZE {
+                black_box(map.range(f(i, j)));
+                c += 1;
             }
         }
+        debug_assert_eq!(c, BENCH_RANGE_COUNT);
     });
 }
 
 #[bench]
-pub fn range_excluded_unbounded(b: &mut Bencher) {
-    let size = 144;
-    let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
-    b.iter(|| {
-        for first in 0..size {
-            black_box(map.range((Excluded(first), Unbounded)));
-        }
-    });
+pub fn range_included_excluded(b: &mut Bencher) {
+    bench_range(b, |i, j| i..j);
 }
 
 #[bench]
 pub fn range_included_included(b: &mut Bencher) {
-    let size = 144;
-    let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
-    b.iter(|| {
-        for first in 0..size {
-            for last in first..size {
-                black_box(map.range(first..=last));
-            }
-        }
-    });
+    bench_range(b, |i, j| i..=j);
 }
 
 #[bench]
 pub fn range_included_unbounded(b: &mut Bencher) {
-    let size = 144;
+    bench_range(b, |i, _| i..);
+}
+
+#[bench]
+pub fn range_unbounded_unbounded(b: &mut Bencher) {
+    bench_range(b, |_, _| ..);
+}
+
+fn bench_iter(b: &mut Bencher, repeats: i32, size: i32) {
     let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
     b.iter(|| {
-        for first in 0..size {
-            black_box(map.range(first..));
+        for _ in 0..repeats {
+            black_box(map.iter());
         }
     });
 }
 
+/// Contrast range_unbounded_unbounded with `iter()`.
 #[bench]
-pub fn range_unbounded_unbounded(b: &mut Bencher) {
-    let size = 144;
-    let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
-    b.iter(|| map.range(..));
+pub fn range_unbounded_vs_iter(b: &mut Bencher) {
+    bench_iter(b, BENCH_RANGE_COUNT, BENCH_RANGE_SIZE);
+}
+
+#[bench]
+pub fn iter_0(b: &mut Bencher) {
+    bench_iter(b, 1_000, 0);
+}
+
+#[bench]
+pub fn iter_1(b: &mut Bencher) {
+    bench_iter(b, 1_000, 1);
+}
+
+#[bench]
+pub fn iter_100(b: &mut Bencher) {
+    bench_iter(b, 1_000, 100);
+}
+
+#[bench]
+pub fn iter_10k(b: &mut Bencher) {
+    bench_iter(b, 1_000, 10_000);
+}
+
+#[bench]
+pub fn iter_1m(b: &mut Bencher) {
+    bench_iter(b, 1_000, 1_000_000);
 }
diff --git a/src/liballoc/benches/lib.rs b/src/liballoc/benches/lib.rs
index f31717d9fd5..608eafc88d2 100644
--- a/src/liballoc/benches/lib.rs
+++ b/src/liballoc/benches/lib.rs
@@ -1,3 +1,6 @@
+// Disabling on android for the time being
+// See https://github.com/rust-lang/rust/issues/73535#event-3477699747
+#![cfg(not(target_os = "android"))]
 #![feature(btree_drain_filter)]
 #![feature(map_first_last)]
 #![feature(repr_simd)]
diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs
index db7420954a0..3320ebdf821 100644
--- a/src/liballoc/boxed.rs
+++ b/src/liballoc/boxed.rs
@@ -92,11 +92,13 @@
 //! pub struct Foo;
 //!
 //! #[no_mangle]
+//! #[allow(improper_ctypes_definitions)]
 //! pub extern "C" fn foo_new() -> Box<Foo> {
 //!     Box::new(Foo)
 //! }
 //!
 //! #[no_mangle]
+//! #[allow(improper_ctypes_definitions)]
 //! pub extern "C" fn foo_delete(_: Option<Box<Foo>>) {}
 //! ```
 //!
@@ -146,6 +148,7 @@ use core::ptr::{self, NonNull, Unique};
 use core::task::{Context, Poll};
 
 use crate::alloc::{self, AllocInit, AllocRef, Global};
+use crate::borrow::Cow;
 use crate::raw_vec::RawVec;
 use crate::str::from_boxed_utf8_unchecked;
 use crate::vec::Vec;
@@ -239,6 +242,16 @@ impl<T> Box<T> {
     pub fn pin(x: T) -> Pin<Box<T>> {
         (box x).into()
     }
+
+    /// Converts a `Box<T>` into a `Box<[T]>`
+    ///
+    /// This conversion does not allocate on the heap and happens in place.
+    ///
+    #[unstable(feature = "box_into_boxed_slice", issue = "71582")]
+    pub fn into_boxed_slice(boxed: Box<T>) -> Box<[T]> {
+        // *mut T and *mut [T; 1] have the same size and alignment
+        unsafe { Box::from_raw(Box::into_raw(boxed) as *mut [T; 1]) }
+    }
 }
 
 impl<T> Box<[T]> {
@@ -300,7 +313,7 @@ impl<T> Box<mem::MaybeUninit<T>> {
     #[unstable(feature = "new_uninit", issue = "63291")]
     #[inline]
     pub unsafe fn assume_init(self) -> Box<T> {
-        Box::from_raw(Box::into_raw(self) as *mut T)
+        unsafe { Box::from_raw(Box::into_raw(self) as *mut T) }
     }
 }
 
@@ -338,7 +351,7 @@ impl<T> Box<[mem::MaybeUninit<T>]> {
     #[unstable(feature = "new_uninit", issue = "63291")]
     #[inline]
     pub unsafe fn assume_init(self) -> Box<[T]> {
-        Box::from_raw(Box::into_raw(self) as *mut [T])
+        unsafe { Box::from_raw(Box::into_raw(self) as *mut [T]) }
     }
 }
 
@@ -371,7 +384,10 @@ impl<T: ?Sized> Box<T> {
     ///
     /// unsafe {
     ///     let ptr = alloc(Layout::new::<i32>()) as *mut i32;
-    ///     *ptr = 5;
+    ///     // In general .write is required to avoid attempting to destruct
+    ///     // the (uninitialized) previous contents of `ptr`, though for this
+    ///     // simple example `*ptr = 5` would have worked as well.
+    ///     ptr.write(5);
     ///     let x = Box::from_raw(ptr);
     /// }
     /// ```
@@ -382,7 +398,7 @@ impl<T: ?Sized> Box<T> {
     #[stable(feature = "box_raw", since = "1.4.0")]
     #[inline]
     pub unsafe fn from_raw(raw: *mut T) -> Self {
-        Box(Unique::new_unchecked(raw))
+        Box(unsafe { Unique::new_unchecked(raw) })
     }
 
     /// Consumes the `Box`, returning a wrapped raw pointer.
@@ -428,7 +444,12 @@ impl<T: ?Sized> Box<T> {
     #[stable(feature = "box_raw", since = "1.4.0")]
     #[inline]
     pub fn into_raw(b: Box<T>) -> *mut T {
-        Box::into_raw_non_null(b).as_ptr()
+        // Box is recognized as a "unique pointer" by Stacked Borrows, but internally it is a
+        // raw pointer for the type system. Turning it directly into a raw pointer would not be
+        // recognized as "releasing" the unique pointer to permit aliased raw accesses,
+        // so all raw pointer methods go through `leak` which creates a (unique)
+        // mutable reference. Turning *that* to a raw pointer behaves correctly.
+        Box::leak(b) as *mut T
     }
 
     /// Consumes the `Box`, returning the wrapped pointer as `NonNull<T>`.
@@ -451,6 +472,7 @@ impl<T: ?Sized> Box<T> {
     ///
     /// ```
     /// #![feature(box_into_raw_non_null)]
+    /// #![allow(deprecated)]
     ///
     /// let x = Box::new(5);
     /// let ptr = Box::into_raw_non_null(x);
@@ -460,24 +482,34 @@ impl<T: ?Sized> Box<T> {
     /// let x = unsafe { Box::from_raw(ptr.as_ptr()) };
     /// ```
     #[unstable(feature = "box_into_raw_non_null", issue = "47336")]
+    #[rustc_deprecated(
+        since = "1.44.0",
+        reason = "use `Box::leak(b).into()` or `NonNull::from(Box::leak(b))` instead"
+    )]
     #[inline]
     pub fn into_raw_non_null(b: Box<T>) -> NonNull<T> {
-        Box::into_unique(b).into()
-    }
-
-    #[unstable(feature = "ptr_internals", issue = "none", reason = "use into_raw_non_null instead")]
+        // Box is recognized as a "unique pointer" by Stacked Borrows, but internally it is a
+        // raw pointer for the type system. Turning it directly into a raw pointer would not be
+        // recognized as "releasing" the unique pointer to permit aliased raw accesses,
+        // so all raw pointer methods go through `leak` which creates a (unique)
+        // mutable reference. Turning *that* to a raw pointer behaves correctly.
+        Box::leak(b).into()
+    }
+
+    #[unstable(
+        feature = "ptr_internals",
+        issue = "none",
+        reason = "use `Box::leak(b).into()` or `Unique::from(Box::leak(b))` instead"
+    )]
     #[inline]
     #[doc(hidden)]
     pub fn into_unique(b: Box<T>) -> Unique<T> {
-        let b = mem::ManuallyDrop::new(b);
-        let mut unique = b.0;
-        // Box is kind-of a library type, but recognized as a "unique pointer" by
-        // Stacked Borrows.  This function here corresponds to "reborrowing to
-        // a raw pointer", but there is no actual reborrow here -- so
-        // without some care, the pointer we are returning here still carries
-        // the tag of `b`, with `Unique` permission.
-        // We round-trip through a mutable reference to avoid that.
-        unsafe { Unique::new_unchecked(unique.as_mut() as *mut T) }
+        // Box is recognized as a "unique pointer" by Stacked Borrows, but internally it is a
+        // raw pointer for the type system. Turning it directly into a raw pointer would not be
+        // recognized as "releasing" the unique pointer to permit aliased raw accesses,
+        // so all raw pointer methods go through `leak` which creates a (unique)
+        // mutable reference. Turning *that* to a raw pointer behaves correctly.
+        Box::leak(b).into()
     }
 
     /// Consumes and leaks the `Box`, returning a mutable reference,
@@ -523,7 +555,7 @@ impl<T: ?Sized> Box<T> {
     where
         T: 'a, // Technically not needed, but kept to be explicit.
     {
-        unsafe { &mut *Box::into_raw(b) }
+        unsafe { &mut *mem::ManuallyDrop::new(b).0.as_ptr() }
     }
 
     /// Converts a `Box<T>` into a `Pin<Box<T>>`
@@ -774,6 +806,17 @@ impl<T: Copy> From<&[T]> for Box<[T]> {
     }
 }
 
+#[stable(feature = "box_from_cow", since = "1.45.0")]
+impl<T: Copy> From<Cow<'_, [T]>> for Box<[T]> {
+    #[inline]
+    fn from(cow: Cow<'_, [T]>) -> Box<[T]> {
+        match cow {
+            Cow::Borrowed(slice) => Box::from(slice),
+            Cow::Owned(slice) => Box::from(slice),
+        }
+    }
+}
+
 #[stable(feature = "box_from_slice", since = "1.17.0")]
 impl From<&str> for Box<str> {
     /// Converts a `&str` into a `Box<str>`
@@ -792,6 +835,17 @@ impl From<&str> for Box<str> {
     }
 }
 
+#[stable(feature = "box_from_cow", since = "1.45.0")]
+impl From<Cow<'_, str>> for Box<str> {
+    #[inline]
+    fn from(cow: Cow<'_, str>) -> Box<str> {
+        match cow {
+            Cow::Borrowed(s) => Box::from(s),
+            Cow::Owned(s) => Box::from(s),
+        }
+    }
+}
+
 #[stable(feature = "boxed_str_conv", since = "1.19.0")]
 impl From<Box<str>> for Box<[u8]> {
     /// Converts a `Box<str>>` into a `Box<[u8]>`
@@ -816,6 +870,25 @@ impl From<Box<str>> for Box<[u8]> {
     }
 }
 
+#[stable(feature = "box_from_array", since = "1.45.0")]
+impl<T, const N: usize> From<[T; N]> for Box<[T]>
+where
+    [T; N]: LengthAtMost32,
+{
+    /// Converts a `[T; N]` into a `Box<[T]>`
+    ///
+    /// This conversion moves the array to newly heap-allocated memory.
+    ///
+    /// # Examples
+    /// ```rust
+    /// let boxed: Box<[u8]> = Box::from([4, 2]);
+    /// println!("{:?}", boxed);
+    /// ```
+    fn from(array: [T; N]) -> Box<[T]> {
+        box array
+    }
+}
+
 #[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
 impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]>
 where
@@ -1041,6 +1114,14 @@ impl<T: Clone> Clone for Box<[T]> {
     fn clone(&self) -> Self {
         self.to_vec().into_boxed_slice()
     }
+
+    fn clone_from(&mut self, other: &Self) {
+        if self.len() == other.len() {
+            self.clone_from_slice(&other);
+        } else {
+            *self = other.clone();
+        }
+    }
 }
 
 #[stable(feature = "box_borrow", since = "1.1.0")]
diff --git a/src/liballoc/collections/binary_heap.rs b/src/liballoc/collections/binary_heap.rs
index 03c9164fb90..15313e333ce 100644
--- a/src/liballoc/collections/binary_heap.rs
+++ b/src/liballoc/collections/binary_heap.rs
@@ -665,6 +665,34 @@ impl<T: Ord> BinaryHeap<T> {
     pub fn drain_sorted(&mut self) -> DrainSorted<'_, T> {
         DrainSorted { inner: self }
     }
+
+    /// Retains only the elements specified by the predicate.
+    ///
+    /// In other words, remove all elements `e` such that `f(&e)` returns
+    /// `false`. The elements are visited in unsorted (and unspecified) order.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// #![feature(binary_heap_retain)]
+    /// use std::collections::BinaryHeap;
+    ///
+    /// let mut heap = BinaryHeap::from(vec![-10, -5, 1, 2, 4, 13]);
+    ///
+    /// heap.retain(|x| x % 2 == 0); // only keep even numbers
+    ///
+    /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
+    /// ```
+    #[unstable(feature = "binary_heap_retain", issue = "71503")]
+    pub fn retain<F>(&mut self, f: F)
+    where
+        F: FnMut(&T) -> bool,
+    {
+        self.data.retain(f);
+        self.rebuild();
+    }
 }
 
 impl<T> BinaryHeap<T> {
@@ -975,7 +1003,7 @@ impl<'a, T> Hole<'a, T> {
     unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
         debug_assert!(pos < data.len());
         // SAFE: pos should be inside the slice
-        let elt = ptr::read(data.get_unchecked(pos));
+        let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
         Hole { data, elt: ManuallyDrop::new(elt), pos }
     }
 
@@ -997,7 +1025,7 @@ impl<'a, T> Hole<'a, T> {
     unsafe fn get(&self, index: usize) -> &T {
         debug_assert!(index != self.pos);
         debug_assert!(index < self.data.len());
-        self.data.get_unchecked(index)
+        unsafe { self.data.get_unchecked(index) }
     }
 
     /// Move hole to new location
@@ -1007,9 +1035,11 @@ impl<'a, T> Hole<'a, T> {
     unsafe fn move_to(&mut self, index: usize) {
         debug_assert!(index != self.pos);
         debug_assert!(index < self.data.len());
-        let index_ptr: *const _ = self.data.get_unchecked(index);
-        let hole_ptr = self.data.get_unchecked_mut(self.pos);
-        ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
+        unsafe {
+            let index_ptr: *const _ = self.data.get_unchecked(index);
+            let hole_ptr = self.data.get_unchecked_mut(self.pos);
+            ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
+        }
         self.pos = index;
     }
 }
@@ -1241,7 +1271,7 @@ impl<'a, T: Ord> Drop for DrainSorted<'a, T> {
 
         impl<'r, 'a, T: Ord> Drop for DropGuard<'r, 'a, T> {
             fn drop(&mut self) {
-                while let Some(_) = self.0.inner.pop() {}
+                while self.0.inner.pop().is_some() {}
             }
         }
 
@@ -1348,6 +1378,16 @@ impl<T: Ord> Extend<T> for BinaryHeap<T> {
     fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
         <Self as SpecExtend<I>>::spec_extend(self, iter);
     }
+
+    #[inline]
+    fn extend_one(&mut self, item: T) {
+        self.push(item);
+    }
+
+    #[inline]
+    fn extend_reserve(&mut self, additional: usize) {
+        self.reserve(additional);
+    }
 }
 
 impl<T: Ord, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T> {
@@ -1378,4 +1418,14 @@ impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
     fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
         self.extend(iter.into_iter().cloned());
     }
+
+    #[inline]
+    fn extend_one(&mut self, &item: &'a T) {
+        self.push(item);
+    }
+
+    #[inline]
+    fn extend_reserve(&mut self, additional: usize) {
+        self.reserve(additional);
+    }
 }
diff --git a/src/liballoc/collections/btree/map.rs b/src/liballoc/collections/btree/map.rs
index 91d93a1be1c..bb9091a6659 100644
--- a/src/liballoc/collections/btree/map.rs
+++ b/src/liballoc/collections/btree/map.rs
@@ -215,59 +215,6 @@ impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> {
             clone_subtree(self.root.as_ref().unwrap().as_ref())
         }
     }
-
-    fn clone_from(&mut self, other: &Self) {
-        BTreeClone::clone_from(self, other);
-    }
-}
-
-trait BTreeClone {
-    fn clone_from(&mut self, other: &Self);
-}
-
-impl<K: Clone, V: Clone> BTreeClone for BTreeMap<K, V> {
-    default fn clone_from(&mut self, other: &Self) {
-        *self = other.clone();
-    }
-}
-
-impl<K: Clone + Ord, V: Clone> BTreeClone for BTreeMap<K, V> {
-    fn clone_from(&mut self, other: &Self) {
-        // This truncates `self` to `other.len()` by calling `split_off` on
-        // the first key after `other.len()` elements if it exists.
-        let split_off_key = if self.len() > other.len() {
-            let diff = self.len() - other.len();
-            if diff <= other.len() {
-                self.iter().nth_back(diff - 1).map(|pair| (*pair.0).clone())
-            } else {
-                self.iter().nth(other.len()).map(|pair| (*pair.0).clone())
-            }
-        } else {
-            None
-        };
-        if let Some(key) = split_off_key {
-            self.split_off(&key);
-        }
-
-        let mut siter = self.range_mut(..);
-        let mut oiter = other.iter();
-        // After truncation, `self` is at most as long as `other` so this loop
-        // replaces every key-value pair in `self`. Since `oiter` is in sorted
-        // order and the structure of the `BTreeMap` stays the same,
-        // the BTree invariants are maintained at the end of the loop.
-        while !siter.is_empty() {
-            if let Some((ok, ov)) = oiter.next() {
-                // SAFETY: This is safe because `siter` is nonempty.
-                let (sk, sv) = unsafe { siter.next_unchecked() };
-                sk.clone_from(ok);
-                sv.clone_from(ov);
-            } else {
-                break;
-            }
-        }
-        // If `other` is longer than `self`, the remaining elements are inserted.
-        self.extend(oiter.map(|(k, v)| ((*k).clone(), (*v).clone())));
-    }
 }
 
 impl<K, Q: ?Sized> super::Recover<Q> for BTreeMap<K, ()>
@@ -541,7 +488,9 @@ struct MergeIter<K, V, I: Iterator<Item = (K, V)>> {
 }
 
 impl<K: Ord, V> BTreeMap<K, V> {
-    /// Makes a new empty BTreeMap with a reasonable choice for B.
+    /// Makes a new empty BTreeMap.
+    ///
+    /// Does not allocate anything on its own.
     ///
     /// # Examples
     ///
@@ -556,7 +505,8 @@ impl<K: Ord, V> BTreeMap<K, V> {
     /// map.insert(1, "a");
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
-    pub fn new() -> BTreeMap<K, V> {
+    #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+    pub const fn new() -> BTreeMap<K, V> {
         BTreeMap { root: None, length: 0 }
     }
 
@@ -923,7 +873,6 @@ impl<K: Ord, V> BTreeMap<K, V> {
     /// Basic usage:
     ///
     /// ```
-    /// #![feature(btreemap_remove_entry)]
     /// use std::collections::BTreeMap;
     ///
     /// let mut map = BTreeMap::new();
@@ -931,7 +880,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
     /// assert_eq!(map.remove_entry(&1), Some((1, "a")));
     /// assert_eq!(map.remove_entry(&1), None);
     /// ```
-    #[unstable(feature = "btreemap_remove_entry", issue = "66714")]
+    #[stable(feature = "btreemap_remove_entry", since = "1.45.0")]
     pub fn remove_entry<Q: ?Sized>(&mut self, key: &Q) -> Option<(K, V)>
     where
         K: Borrow<Q>,
@@ -1034,9 +983,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
         R: RangeBounds<T>,
     {
         if let Some(root) = &self.root {
-            let root1 = root.as_ref();
-            let root2 = root.as_ref();
-            let (f, b) = range_search(root1, root2, range);
+            let (f, b) = range_search(root.as_ref(), range);
 
             Range { front: Some(f), back: Some(b) }
         } else {
@@ -1082,9 +1029,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
         R: RangeBounds<T>,
     {
         if let Some(root) = &mut self.root {
-            let root1 = root.as_mut();
-            let root2 = unsafe { ptr::read(&root1) };
-            let (f, b) = range_search(root1, root2, range);
+            let (f, b) = range_search(root.as_mut(), range);
 
             RangeMut { front: Some(f), back: Some(b), _marker: PhantomData }
         } else {
@@ -1451,6 +1396,14 @@ impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> {
     fn last(mut self) -> Option<(&'a K, &'a V)> {
         self.next_back()
     }
+
+    fn min(mut self) -> Option<(&'a K, &'a V)> {
+        self.next()
+    }
+
+    fn max(mut self) -> Option<(&'a K, &'a V)> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "fused", since = "1.26.0")]
@@ -1513,6 +1466,14 @@ impl<'a, K: 'a, V: 'a> Iterator for IterMut<'a, K, V> {
     fn last(mut self) -> Option<(&'a K, &'a mut V)> {
         self.next_back()
     }
+
+    fn min(mut self) -> Option<(&'a K, &'a mut V)> {
+        self.next()
+    }
+
+    fn max(mut self) -> Option<(&'a K, &'a mut V)> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1545,16 +1506,10 @@ impl<K, V> IntoIterator for BTreeMap<K, V> {
 
     fn into_iter(self) -> IntoIter<K, V> {
         let mut me = ManuallyDrop::new(self);
-        if let Some(root) = me.root.as_mut() {
-            let root1 = unsafe { ptr::read(root).into_ref() };
-            let root2 = unsafe { ptr::read(root).into_ref() };
-            let len = me.length;
-
-            IntoIter {
-                front: Some(root1.first_leaf_edge()),
-                back: Some(root2.last_leaf_edge()),
-                length: len,
-            }
+        if let Some(root) = me.root.take() {
+            let (f, b) = full_range_search(root.into_ref());
+
+            IntoIter { front: Some(f), back: Some(b), length: me.length }
         } else {
             IntoIter { front: None, back: None, length: 0 }
         }
@@ -1656,6 +1611,14 @@ impl<'a, K, V> Iterator for Keys<'a, K, V> {
     fn last(mut self) -> Option<&'a K> {
         self.next_back()
     }
+
+    fn min(mut self) -> Option<&'a K> {
+        self.next()
+    }
+
+    fn max(mut self) -> Option<&'a K> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1788,7 +1751,7 @@ impl<'a, K: 'a, V: 'a> DrainFilterInner<'a, K, V> {
         &mut self,
     ) -> Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV>> {
         let edge = self.cur_leaf_edge.as_ref()?;
-        ptr::read(edge).next_kv().ok()
+        unsafe { ptr::read(edge).next_kv().ok() }
     }
 
     /// Implementation of a typical `DrainFilter::next` method, given the predicate.
@@ -1829,6 +1792,14 @@ impl<'a, K, V> Iterator for Range<'a, K, V> {
     fn last(mut self) -> Option<(&'a K, &'a V)> {
         self.next_back()
     }
+
+    fn min(mut self) -> Option<(&'a K, &'a V)> {
+        self.next()
+    }
+
+    fn max(mut self) -> Option<(&'a K, &'a V)> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "map_values_mut", since = "1.10.0")]
@@ -1871,7 +1842,7 @@ impl<'a, K, V> Range<'a, K, V> {
     }
 
     unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) {
-        unwrap_unchecked(self.front.as_mut()).next_unchecked()
+        unsafe { unwrap_unchecked(self.front.as_mut()).next_unchecked() }
     }
 }
 
@@ -1884,7 +1855,7 @@ impl<'a, K, V> DoubleEndedIterator for Range<'a, K, V> {
 
 impl<'a, K, V> Range<'a, K, V> {
     unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a V) {
-        unwrap_unchecked(self.back.as_mut()).next_back_unchecked()
+        unsafe { unwrap_unchecked(self.back.as_mut()).next_back_unchecked() }
     }
 }
 
@@ -1914,6 +1885,14 @@ impl<'a, K, V> Iterator for RangeMut<'a, K, V> {
     fn last(mut self) -> Option<(&'a K, &'a mut V)> {
         self.next_back()
     }
+
+    fn min(mut self) -> Option<(&'a K, &'a mut V)> {
+        self.next()
+    }
+
+    fn max(mut self) -> Option<(&'a K, &'a mut V)> {
+        self.next_back()
+    }
 }
 
 impl<'a, K, V> RangeMut<'a, K, V> {
@@ -1922,7 +1901,7 @@ impl<'a, K, V> RangeMut<'a, K, V> {
     }
 
     unsafe fn next_unchecked(&mut self) -> (&'a mut K, &'a mut V) {
-        unwrap_unchecked(self.front.as_mut()).next_unchecked()
+        unsafe { unwrap_unchecked(self.front.as_mut()).next_unchecked() }
     }
 }
 
@@ -1943,7 +1922,7 @@ impl<K, V> FusedIterator for RangeMut<'_, K, V> {}
 
 impl<'a, K, V> RangeMut<'a, K, V> {
     unsafe fn next_back_unchecked(&mut self) -> (&'a mut K, &'a mut V) {
-        unwrap_unchecked(self.back.as_mut()).next_back_unchecked()
+        unsafe { unwrap_unchecked(self.back.as_mut()).next_back_unchecked() }
     }
 }
 
@@ -1964,6 +1943,11 @@ impl<K: Ord, V> Extend<(K, V)> for BTreeMap<K, V> {
             self.insert(k, v);
         });
     }
+
+    #[inline]
+    fn extend_one(&mut self, (k, v): (K, V)) {
+        self.insert(k, v);
+    }
 }
 
 #[stable(feature = "extend_ref", since = "1.2.0")]
@@ -1971,6 +1955,11 @@ impl<'a, K: Ord + Copy, V: Copy> Extend<(&'a K, &'a V)> for BTreeMap<K, V> {
     fn extend<I: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: I) {
         self.extend(iter.into_iter().map(|(&key, &value)| (key, value)));
     }
+
+    #[inline]
+    fn extend_one(&mut self, (&k, &v): (&'a K, &'a V)) {
+        self.insert(k, v);
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -2042,9 +2031,9 @@ where
     }
 }
 
+/// Finds the leaf edges delimiting a specified range in or underneath a node.
 fn range_search<BorrowType, K, V, Q: ?Sized, R: RangeBounds<Q>>(
-    root1: NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
-    root2: NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
+    root: NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
     range: R,
 ) -> (
     Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>,
@@ -2064,8 +2053,10 @@ where
         _ => {}
     };
 
-    let mut min_node = root1;
-    let mut max_node = root2;
+    // We duplicate the root NodeRef here -- we will never access it in a way
+    // that overlaps references obtained from the root.
+    let mut min_node = unsafe { ptr::read(&root) };
+    let mut max_node = root;
     let mut min_found = false;
     let mut max_found = false;
 
@@ -2126,6 +2117,33 @@ where
     }
 }
 
+/// Equivalent to `range_search(k, v, ..)` without the `Ord` bound.
+fn full_range_search<BorrowType, K, V>(
+    root: NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
+) -> (
+    Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>,
+    Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>,
+) {
+    // We duplicate the root NodeRef here -- we will never access it in a way
+    // that overlaps references obtained from the root.
+    let mut min_node = unsafe { ptr::read(&root) };
+    let mut max_node = root;
+    loop {
+        let front = min_node.first_edge();
+        let back = max_node.last_edge();
+        match (front.force(), back.force()) {
+            (Leaf(f), Leaf(b)) => {
+                return (f, b);
+            }
+            (Internal(min_int), Internal(max_int)) => {
+                min_node = min_int.descend();
+                max_node = max_int.descend();
+            }
+            _ => unreachable!("BTreeMap has different depths"),
+        };
+    }
+}
+
 impl<K, V> BTreeMap<K, V> {
     /// Gets an iterator over the entries of the map, sorted by key.
     ///
@@ -2150,12 +2168,12 @@ impl<K, V> BTreeMap<K, V> {
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn iter(&self) -> Iter<'_, K, V> {
-        Iter {
-            range: Range {
-                front: self.root.as_ref().map(|r| r.as_ref().first_leaf_edge()),
-                back: self.root.as_ref().map(|r| r.as_ref().last_leaf_edge()),
-            },
-            length: self.length,
+        if let Some(root) = &self.root {
+            let (f, b) = full_range_search(root.as_ref());
+
+            Iter { range: Range { front: Some(f), back: Some(b) }, length: self.length }
+        } else {
+            Iter { range: Range { front: None, back: None }, length: 0 }
         }
     }
 
@@ -2182,19 +2200,15 @@ impl<K, V> BTreeMap<K, V> {
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
-        IterMut {
-            range: if let Some(root) = &mut self.root {
-                let root1 = root.as_mut();
-                let root2 = unsafe { ptr::read(&root1) };
-                RangeMut {
-                    front: Some(root1.first_leaf_edge()),
-                    back: Some(root2.last_leaf_edge()),
-                    _marker: PhantomData,
-                }
-            } else {
-                RangeMut { front: None, back: None, _marker: PhantomData }
-            },
-            length: self.length,
+        if let Some(root) = &mut self.root {
+            let (f, b) = full_range_search(root.as_mut());
+
+            IterMut {
+                range: RangeMut { front: Some(f), back: Some(b), _marker: PhantomData },
+                length: self.length,
+            }
+        } else {
+            IterMut { range: RangeMut { front: None, back: None, _marker: PhantomData }, length: 0 }
         }
     }
 
@@ -2503,15 +2517,14 @@ impl<'a, K: Ord, V> VacantEntry<'a, K, V> {
     ///
     /// ```
     /// use std::collections::BTreeMap;
+    /// use std::collections::btree_map::Entry;
     ///
-    /// let mut count: BTreeMap<&str, usize> = BTreeMap::new();
+    /// let mut map: BTreeMap<&str, u32> = BTreeMap::new();
     ///
-    /// // count the number of occurrences of letters in the vec
-    /// for x in vec!["a","b","a","c","a","b"] {
-    ///     *count.entry(x).or_insert(0) += 1;
+    /// if let Entry::Vacant(o) = map.entry("poneyland") {
+    ///     o.insert(37);
     /// }
-    ///
-    /// assert_eq!(count["a"], 3);
+    /// assert_eq!(map["poneyland"], 37);
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn insert(self, value: V) -> &'a mut V {
diff --git a/src/liballoc/collections/btree/mod.rs b/src/liballoc/collections/btree/mod.rs
index fb5825ee21a..543ff41a4d4 100644
--- a/src/liballoc/collections/btree/mod.rs
+++ b/src/liballoc/collections/btree/mod.rs
@@ -19,7 +19,9 @@ pub unsafe fn unwrap_unchecked<T>(val: Option<T>) -> T {
         if cfg!(debug_assertions) {
             panic!("'unchecked' unwrap on None in BTreeMap");
         } else {
-            core::intrinsics::unreachable();
+            unsafe {
+                core::intrinsics::unreachable();
+            }
         }
     })
 }
diff --git a/src/liballoc/collections/btree/navigate.rs b/src/liballoc/collections/btree/navigate.rs
index 5e8dcf247ae..5478d822438 100644
--- a/src/liballoc/collections/btree/navigate.rs
+++ b/src/liballoc/collections/btree/navigate.rs
@@ -64,8 +64,10 @@ macro_rules! def_next_kv_uncheched_dealloc {
                 edge = match edge.$adjacent_kv() {
                     Ok(internal_kv) => return internal_kv,
                     Err(last_edge) => {
-                        let parent_edge = last_edge.into_node().deallocate_and_ascend();
-                        unwrap_unchecked(parent_edge).forget_node_type()
+                        unsafe {
+                            let parent_edge = last_edge.into_node().deallocate_and_ascend();
+                            unwrap_unchecked(parent_edge).forget_node_type()
+                        }
                     }
                 }
             }
@@ -82,9 +84,11 @@ def_next_kv_uncheched_dealloc! {unsafe fn next_back_kv_unchecked_dealloc: left_k
 /// Safety: The change closure must not panic.
 #[inline]
 unsafe fn replace<T, R>(v: &mut T, change: impl FnOnce(T) -> (T, R)) -> R {
-    let value = ptr::read(v);
+    let value = unsafe { ptr::read(v) };
     let (new_value, ret) = change(value);
-    ptr::write(v, new_value);
+    unsafe {
+        ptr::write(v, new_value);
+    }
     ret
 }
 
@@ -93,22 +97,26 @@ impl<'a, K, V> Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Ed
     /// key and value in between.
     /// Unsafe because the caller must ensure that the leaf edge is not the last one in the tree.
     pub unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) {
-        replace(self, |leaf_edge| {
-            let kv = leaf_edge.next_kv();
-            let kv = unwrap_unchecked(kv.ok());
-            (kv.next_leaf_edge(), kv.into_kv())
-        })
+        unsafe {
+            replace(self, |leaf_edge| {
+                let kv = leaf_edge.next_kv();
+                let kv = unwrap_unchecked(kv.ok());
+                (kv.next_leaf_edge(), kv.into_kv())
+            })
+        }
     }
 
     /// Moves the leaf edge handle to the previous leaf edge and returns references to the
     /// key and value in between.
     /// Unsafe because the caller must ensure that the leaf edge is not the first one in the tree.
     pub unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a V) {
-        replace(self, |leaf_edge| {
-            let kv = leaf_edge.next_back_kv();
-            let kv = unwrap_unchecked(kv.ok());
-            (kv.next_back_leaf_edge(), kv.into_kv())
-        })
+        unsafe {
+            replace(self, |leaf_edge| {
+                let kv = leaf_edge.next_back_kv();
+                let kv = unwrap_unchecked(kv.ok());
+                (kv.next_back_leaf_edge(), kv.into_kv())
+            })
+        }
     }
 }
 
@@ -119,14 +127,16 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge
     /// - The caller must ensure that the leaf edge is not the last one in the tree.
     /// - Using the updated handle may well invalidate the returned references.
     pub unsafe fn next_unchecked(&mut self) -> (&'a mut K, &'a mut V) {
-        let kv = replace(self, |leaf_edge| {
-            let kv = leaf_edge.next_kv();
-            let kv = unwrap_unchecked(kv.ok());
-            (ptr::read(&kv).next_leaf_edge(), kv)
-        });
-        // Doing the descend (and perhaps another move) invalidates the references
-        // returned by `into_kv_mut`, so we have to do this last.
-        kv.into_kv_mut()
+        unsafe {
+            let kv = replace(self, |leaf_edge| {
+                let kv = leaf_edge.next_kv();
+                let kv = unwrap_unchecked(kv.ok());
+                (ptr::read(&kv).next_leaf_edge(), kv)
+            });
+            // Doing the descend (and perhaps another move) invalidates the references
+            // returned by `into_kv_mut`, so we have to do this last.
+            kv.into_kv_mut()
+        }
     }
 
     /// Moves the leaf edge handle to the previous leaf and returns references to the
@@ -135,14 +145,16 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge
     /// - The caller must ensure that the leaf edge is not the first one in the tree.
     /// - Using the updated handle may well invalidate the returned references.
     pub unsafe fn next_back_unchecked(&mut self) -> (&'a mut K, &'a mut V) {
-        let kv = replace(self, |leaf_edge| {
-            let kv = leaf_edge.next_back_kv();
-            let kv = unwrap_unchecked(kv.ok());
-            (ptr::read(&kv).next_back_leaf_edge(), kv)
-        });
-        // Doing the descend (and perhaps another move) invalidates the references
-        // returned by `into_kv_mut`, so we have to do this last.
-        kv.into_kv_mut()
+        unsafe {
+            let kv = replace(self, |leaf_edge| {
+                let kv = leaf_edge.next_back_kv();
+                let kv = unwrap_unchecked(kv.ok());
+                (ptr::read(&kv).next_back_leaf_edge(), kv)
+            });
+            // Doing the descend (and perhaps another move) invalidates the references
+            // returned by `into_kv_mut`, so we have to do this last.
+            kv.into_kv_mut()
+        }
     }
 }
 
@@ -159,12 +171,14 @@ impl<K, V> Handle<NodeRef<marker::Owned, K, V, marker::Leaf>, marker::Edge> {
     ///   if the two preconditions above hold.
     /// - Using the updated handle may well invalidate the returned references.
     pub unsafe fn next_unchecked(&mut self) -> (K, V) {
-        replace(self, |leaf_edge| {
-            let kv = next_kv_unchecked_dealloc(leaf_edge);
-            let k = ptr::read(kv.reborrow().into_kv().0);
-            let v = ptr::read(kv.reborrow().into_kv().1);
-            (kv.next_leaf_edge(), (k, v))
-        })
+        unsafe {
+            replace(self, |leaf_edge| {
+                let kv = next_kv_unchecked_dealloc(leaf_edge);
+                let k = ptr::read(kv.reborrow().into_kv().0);
+                let v = ptr::read(kv.reborrow().into_kv().1);
+                (kv.next_leaf_edge(), (k, v))
+            })
+        }
     }
 
     /// Moves the leaf edge handle to the previous leaf edge and returns the key
@@ -179,12 +193,14 @@ impl<K, V> Handle<NodeRef<marker::Owned, K, V, marker::Leaf>, marker::Edge> {
     ///   if the two preconditions above hold.
     /// - Using the updated handle may well invalidate the returned references.
     pub unsafe fn next_back_unchecked(&mut self) -> (K, V) {
-        replace(self, |leaf_edge| {
-            let kv = next_back_kv_unchecked_dealloc(leaf_edge);
-            let k = ptr::read(kv.reborrow().into_kv().0);
-            let v = ptr::read(kv.reborrow().into_kv().1);
-            (kv.next_back_leaf_edge(), (k, v))
-        })
+        unsafe {
+            replace(self, |leaf_edge| {
+                let kv = next_back_kv_unchecked_dealloc(leaf_edge);
+                let k = ptr::read(kv.reborrow().into_kv().0);
+                let v = ptr::read(kv.reborrow().into_kv().1);
+                (kv.next_back_leaf_edge(), (k, v))
+            })
+        }
     }
 }
 
diff --git a/src/liballoc/collections/btree/node.rs b/src/liballoc/collections/btree/node.rs
index 84d34db2d45..a4b6cf12a23 100644
--- a/src/liballoc/collections/btree/node.rs
+++ b/src/liballoc/collections/btree/node.rs
@@ -107,7 +107,7 @@ impl<K, V> InternalNode<K, V> {
     /// `len` of 0), there must be one initialized and valid edge. This function does not set up
     /// such an edge.
     unsafe fn new() -> Self {
-        InternalNode { data: LeafNode::new(), edges: [MaybeUninit::UNINIT; 2 * B] }
+        InternalNode { data: unsafe { LeafNode::new() }, edges: [MaybeUninit::UNINIT; 2 * B] }
     }
 }
 
@@ -131,7 +131,7 @@ impl<K, V> BoxedNode<K, V> {
     }
 
     unsafe fn from_ptr(ptr: NonNull<LeafNode<K, V>>) -> Self {
-        BoxedNode { ptr: Unique::from(ptr) }
+        BoxedNode { ptr: unsafe { Unique::new_unchecked(ptr.as_ptr()) } }
     }
 
     fn as_ptr(&self) -> NonNull<LeafNode<K, V>> {
@@ -392,14 +392,16 @@ impl<K, V> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> {
         let height = self.height;
         let node = self.node;
         let ret = self.ascend().ok();
-        Global.dealloc(
-            node.cast(),
-            if height > 0 {
-                Layout::new::<InternalNode<K, V>>()
-            } else {
-                Layout::new::<LeafNode<K, V>>()
-            },
-        );
+        unsafe {
+            Global.dealloc(
+                node.cast(),
+                if height > 0 {
+                    Layout::new::<InternalNode<K, V>>()
+                } else {
+                    Layout::new::<LeafNode<K, V>>()
+                },
+            );
+        }
         ret
     }
 }
@@ -565,7 +567,7 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
         debug_assert!(first <= self.len());
         debug_assert!(after_last <= self.len() + 1);
         for i in first..after_last {
-            Handle::new_edge(self.reborrow_mut(), i).correct_parent_link();
+            unsafe { Handle::new_edge(self.reborrow_mut(), i) }.correct_parent_link();
         }
     }
 
@@ -789,7 +791,7 @@ impl<'a, K, V, NodeType, HandleType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeT
         &mut self,
     ) -> Handle<NodeRef<marker::Mut<'_>, K, V, NodeType>, HandleType> {
         // We can't use Handle::new_kv or Handle::new_edge because we don't know our type
-        Handle { node: self.node.reborrow_mut(), idx: self.idx, _marker: PhantomData }
+        Handle { node: unsafe { self.node.reborrow_mut() }, idx: self.idx, _marker: PhantomData }
     }
 }
 
@@ -885,7 +887,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
     unsafe fn cast_unchecked<NewType>(
         &mut self,
     ) -> Handle<NodeRef<marker::Mut<'_>, K, V, NewType>, marker::Edge> {
-        Handle::new_edge(self.node.cast_unchecked(), self.idx)
+        unsafe { Handle::new_edge(self.node.cast_unchecked(), self.idx) }
     }
 
     /// Inserts a new key/value pair and an edge that will go to the right of that new pair
@@ -1330,8 +1332,10 @@ unsafe fn move_kv<K, V>(
     dest_offset: usize,
     count: usize,
 ) {
-    ptr::copy_nonoverlapping(source.0.add(source_offset), dest.0.add(dest_offset), count);
-    ptr::copy_nonoverlapping(source.1.add(source_offset), dest.1.add(dest_offset), count);
+    unsafe {
+        ptr::copy_nonoverlapping(source.0.add(source_offset), dest.0.add(dest_offset), count);
+        ptr::copy_nonoverlapping(source.1.add(source_offset), dest.1.add(dest_offset), count);
+    }
 }
 
 // Source and destination must have the same height.
@@ -1344,8 +1348,10 @@ unsafe fn move_edges<K, V>(
 ) {
     let source_ptr = source.as_internal_mut().edges.as_mut_ptr();
     let dest_ptr = dest.as_internal_mut().edges.as_mut_ptr();
-    ptr::copy_nonoverlapping(source_ptr.add(source_offset), dest_ptr.add(dest_offset), count);
-    dest.correct_childrens_parent_links(dest_offset, dest_offset + count);
+    unsafe {
+        ptr::copy_nonoverlapping(source_ptr.add(source_offset), dest_ptr.add(dest_offset), count);
+        dest.correct_childrens_parent_links(dest_offset, dest_offset + count);
+    }
 }
 
 impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
@@ -1459,12 +1465,16 @@ pub mod marker {
 }
 
 unsafe fn slice_insert<T>(slice: &mut [T], idx: usize, val: T) {
-    ptr::copy(slice.as_ptr().add(idx), slice.as_mut_ptr().add(idx + 1), slice.len() - idx);
-    ptr::write(slice.get_unchecked_mut(idx), val);
+    unsafe {
+        ptr::copy(slice.as_ptr().add(idx), slice.as_mut_ptr().add(idx + 1), slice.len() - idx);
+        ptr::write(slice.get_unchecked_mut(idx), val);
+    }
 }
 
 unsafe fn slice_remove<T>(slice: &mut [T], idx: usize) -> T {
-    let ret = ptr::read(slice.get_unchecked(idx));
-    ptr::copy(slice.as_ptr().add(idx + 1), slice.as_mut_ptr().add(idx), slice.len() - idx - 1);
-    ret
+    unsafe {
+        let ret = ptr::read(slice.get_unchecked(idx));
+        ptr::copy(slice.as_ptr().add(idx + 1), slice.as_mut_ptr().add(idx), slice.len() - idx - 1);
+        ret
+    }
 }
diff --git a/src/liballoc/collections/btree/set.rs b/src/liballoc/collections/btree/set.rs
index 9bf483f269f..d8959966fe5 100644
--- a/src/liballoc/collections/btree/set.rs
+++ b/src/liballoc/collections/btree/set.rs
@@ -309,7 +309,8 @@ impl<T: Ord> BTreeSet<T> {
     /// let mut set: BTreeSet<i32> = BTreeSet::new();
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
-    pub fn new() -> BTreeSet<T> {
+    #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+    pub const fn new() -> BTreeSet<T> {
         BTreeSet { map: BTreeMap::new() }
     }
 
@@ -1151,6 +1152,11 @@ impl<T: Ord> Extend<T> for BTreeSet<T> {
             self.insert(elem);
         });
     }
+
+    #[inline]
+    fn extend_one(&mut self, elem: T) {
+        self.insert(elem);
+    }
 }
 
 #[stable(feature = "extend_ref", since = "1.2.0")]
@@ -1158,6 +1164,11 @@ impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BTreeSet<T> {
     fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
         self.extend(iter.into_iter().cloned());
     }
+
+    #[inline]
+    fn extend_one(&mut self, &elem: &'a T) {
+        self.insert(elem);
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1280,12 +1291,22 @@ impl<'a, T> Iterator for Iter<'a, T> {
     fn next(&mut self) -> Option<&'a T> {
         self.iter.next()
     }
+
     fn size_hint(&self) -> (usize, Option<usize>) {
         self.iter.size_hint()
     }
+
     fn last(mut self) -> Option<&'a T> {
         self.next_back()
     }
+
+    fn min(mut self) -> Option<&'a T> {
+        self.next()
+    }
+
+    fn max(mut self) -> Option<&'a T> {
+        self.next_back()
+    }
 }
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
@@ -1310,6 +1331,7 @@ impl<T> Iterator for IntoIter<T> {
     fn next(&mut self) -> Option<T> {
         self.iter.next().map(|(k, _)| k)
     }
+
     fn size_hint(&self) -> (usize, Option<usize>) {
         self.iter.size_hint()
     }
@@ -1348,6 +1370,14 @@ impl<'a, T> Iterator for Range<'a, T> {
     fn last(mut self) -> Option<&'a T> {
         self.next_back()
     }
+
+    fn min(mut self) -> Option<&'a T> {
+        self.next()
+    }
+
+    fn max(mut self) -> Option<&'a T> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "btree_range", since = "1.17.0")]
@@ -1418,6 +1448,10 @@ impl<'a, T: Ord> Iterator for Difference<'a, T> {
         };
         (self_len.saturating_sub(other_len), Some(self_len))
     }
+
+    fn min(mut self) -> Option<&'a T> {
+        self.next()
+    }
 }
 
 #[stable(feature = "fused", since = "1.26.0")]
@@ -1449,6 +1483,10 @@ impl<'a, T: Ord> Iterator for SymmetricDifference<'a, T> {
         // the number of elements to less than half the range of usize.
         (0, Some(a_len + b_len))
     }
+
+    fn min(mut self) -> Option<&'a T> {
+        self.next()
+    }
 }
 
 #[stable(feature = "fused", since = "1.26.0")]
@@ -1505,6 +1543,10 @@ impl<'a, T: Ord> Iterator for Intersection<'a, T> {
             IntersectionInner::Answer(Some(_)) => (1, Some(1)),
         }
     }
+
+    fn min(mut self) -> Option<&'a T> {
+        self.next()
+    }
 }
 
 #[stable(feature = "fused", since = "1.26.0")]
@@ -1530,6 +1572,10 @@ impl<'a, T: Ord> Iterator for Union<'a, T> {
         // No checked_add - see SymmetricDifference::size_hint.
         (max(a_len, b_len), Some(a_len + b_len))
     }
+
+    fn min(mut self) -> Option<&'a T> {
+        self.next()
+    }
 }
 
 #[stable(feature = "fused", since = "1.26.0")]
diff --git a/src/liballoc/collections/linked_list.rs b/src/liballoc/collections/linked_list.rs
index af341e6c1ca..36b5785fdf6 100644
--- a/src/liballoc/collections/linked_list.rs
+++ b/src/liballoc/collections/linked_list.rs
@@ -143,7 +143,7 @@ impl<T> LinkedList<T> {
         unsafe {
             node.next = self.head;
             node.prev = None;
-            let node = Some(Box::into_raw_non_null(node));
+            let node = Some(Box::leak(node).into());
 
             match self.head {
                 None => self.tail = node,
@@ -184,7 +184,7 @@ impl<T> LinkedList<T> {
         unsafe {
             node.next = None;
             node.prev = self.tail;
-            let node = Some(Box::into_raw_non_null(node));
+            let node = Some(Box::leak(node).into());
 
             match self.tail {
                 None => self.head = node,
@@ -225,17 +225,17 @@ impl<T> LinkedList<T> {
     /// maintain validity of aliasing pointers.
     #[inline]
     unsafe fn unlink_node(&mut self, mut node: NonNull<Node<T>>) {
-        let node = node.as_mut(); // this one is ours now, we can create an &mut.
+        let node = unsafe { node.as_mut() }; // this one is ours now, we can create an &mut.
 
         // Not creating new mutable (unique!) references overlapping `element`.
         match node.prev {
-            Some(prev) => (*prev.as_ptr()).next = node.next,
+            Some(prev) => unsafe { (*prev.as_ptr()).next = node.next },
             // this node is the head node
             None => self.head = node.next,
         };
 
         match node.next {
-            Some(next) => (*next.as_ptr()).prev = node.prev,
+            Some(next) => unsafe { (*next.as_ptr()).prev = node.prev },
             // this node is the tail node
             None => self.tail = node.prev,
         };
@@ -258,17 +258,23 @@ impl<T> LinkedList<T> {
         // This method takes care not to create multiple mutable references to whole nodes at the same time,
         // to maintain validity of aliasing pointers into `element`.
         if let Some(mut existing_prev) = existing_prev {
-            existing_prev.as_mut().next = Some(splice_start);
+            unsafe {
+                existing_prev.as_mut().next = Some(splice_start);
+            }
         } else {
             self.head = Some(splice_start);
         }
         if let Some(mut existing_next) = existing_next {
-            existing_next.as_mut().prev = Some(splice_end);
+            unsafe {
+                existing_next.as_mut().prev = Some(splice_end);
+            }
         } else {
             self.tail = Some(splice_end);
         }
-        splice_start.as_mut().prev = existing_prev;
-        splice_end.as_mut().next = existing_next;
+        unsafe {
+            splice_start.as_mut().prev = existing_prev;
+            splice_end.as_mut().next = existing_next;
+        }
 
         self.len += splice_length;
     }
@@ -297,9 +303,13 @@ impl<T> LinkedList<T> {
         if let Some(mut split_node) = split_node {
             let first_part_head;
             let first_part_tail;
-            first_part_tail = split_node.as_mut().prev.take();
+            unsafe {
+                first_part_tail = split_node.as_mut().prev.take();
+            }
             if let Some(mut tail) = first_part_tail {
-                tail.as_mut().next = None;
+                unsafe {
+                    tail.as_mut().next = None;
+                }
                 first_part_head = self.head;
             } else {
                 first_part_head = None;
@@ -333,9 +343,13 @@ impl<T> LinkedList<T> {
         if let Some(mut split_node) = split_node {
             let second_part_head;
             let second_part_tail;
-            second_part_head = split_node.as_mut().next.take();
+            unsafe {
+                second_part_head = split_node.as_mut().next.take();
+            }
             if let Some(mut head) = second_part_head {
-                head.as_mut().prev = None;
+                unsafe {
+                    head.as_mut().prev = None;
+                }
                 second_part_tail = self.tail;
             } else {
                 second_part_tail = None;
@@ -972,7 +986,7 @@ unsafe impl<#[may_dangle] T> Drop for LinkedList<T> {
             fn drop(&mut self) {
                 // Continue the same loop we do below. This only runs when a destructor has
                 // panicked. If another one panics this will abort.
-                while let Some(_) = self.0.pop_front_node() {}
+                while self.0.pop_front_node().is_some() {}
             }
         }
 
@@ -1133,11 +1147,9 @@ impl<T> IterMut<'_, T> {
                     Some(prev) => prev,
                 };
 
-                let node = Some(Box::into_raw_non_null(box Node {
-                    next: Some(head),
-                    prev: Some(prev),
-                    element,
-                }));
+                let node = Some(
+                    Box::leak(box Node { next: Some(head), prev: Some(prev), element }).into(),
+                );
 
                 // Not creating references to entire nodes to not invalidate the
                 // reference to `element` we handed to the user.
@@ -1450,7 +1462,7 @@ impl<'a, T> CursorMut<'a, T> {
     #[unstable(feature = "linked_list_cursors", issue = "58533")]
     pub fn insert_after(&mut self, item: T) {
         unsafe {
-            let spliced_node = Box::into_raw_non_null(Box::new(Node::new(item)));
+            let spliced_node = Box::leak(Box::new(Node::new(item))).into();
             let node_next = match self.current {
                 None => self.list.head,
                 Some(node) => node.as_ref().next,
@@ -1470,7 +1482,7 @@ impl<'a, T> CursorMut<'a, T> {
     #[unstable(feature = "linked_list_cursors", issue = "58533")]
     pub fn insert_before(&mut self, item: T) {
         unsafe {
-            let spliced_node = Box::into_raw_non_null(Box::new(Node::new(item)));
+            let spliced_node = Box::leak(Box::new(Node::new(item))).into();
             let node_prev = match self.current {
                 None => self.list.tail,
                 Some(node) => node.as_ref().prev,
@@ -1498,6 +1510,31 @@ impl<'a, T> CursorMut<'a, T> {
         }
     }
 
+    /// Removes the current element from the `LinkedList` without deallocating the list node.
+    ///
+    /// The node that was removed is returned as a new `LinkedList` containing only this node.
+    /// The cursor is moved to point to the next element in the current `LinkedList`.
+    ///
+    /// If the cursor is currently pointing to the "ghost" non-element then no element
+    /// is removed and `None` is returned.
+    #[unstable(feature = "linked_list_cursors", issue = "58533")]
+    pub fn remove_current_as_list(&mut self) -> Option<LinkedList<T>> {
+        let mut unlinked_node = self.current?;
+        unsafe {
+            self.current = unlinked_node.as_ref().next;
+            self.list.unlink_node(unlinked_node);
+
+            unlinked_node.as_mut().prev = None;
+            unlinked_node.as_mut().next = None;
+            Some(LinkedList {
+                head: Some(unlinked_node),
+                tail: Some(unlinked_node),
+                len: 1,
+                marker: PhantomData,
+            })
+        }
+    }
+
     /// Inserts the elements from the given `LinkedList` after the current one.
     ///
     /// If the cursor is pointing at the "ghost" non-element then the new elements are
@@ -1725,6 +1762,11 @@ impl<T> Extend<T> for LinkedList<T> {
     fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
         <Self as SpecExtend<I>>::spec_extend(self, iter);
     }
+
+    #[inline]
+    fn extend_one(&mut self, elem: T) {
+        self.push_back(elem);
+    }
 }
 
 impl<I: IntoIterator> SpecExtend<I> for LinkedList<I::Item> {
@@ -1744,6 +1786,11 @@ impl<'a, T: 'a + Copy> Extend<&'a T> for LinkedList<T> {
     fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
         self.extend(iter.into_iter().cloned());
     }
+
+    #[inline]
+    fn extend_one(&mut self, &elem: &'a T) {
+        self.push_back(elem);
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1843,3 +1890,15 @@ unsafe impl<T: Send> Send for IterMut<'_, T> {}
 
 #[stable(feature = "rust1", since = "1.0.0")]
 unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+unsafe impl<T: Sync> Send for Cursor<'_, T> {}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+unsafe impl<T: Sync> Sync for Cursor<'_, T> {}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+unsafe impl<T: Send> Send for CursorMut<'_, T> {}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+unsafe impl<T: Sync> Sync for CursorMut<'_, T> {}
diff --git a/src/liballoc/collections/linked_list/tests.rs b/src/liballoc/collections/linked_list/tests.rs
index 085f734ed91..b8c93a28bba 100644
--- a/src/liballoc/collections/linked_list/tests.rs
+++ b/src/liballoc/collections/linked_list/tests.rs
@@ -182,7 +182,6 @@ fn test_insert_prev() {
 
 #[test]
 #[cfg_attr(target_os = "emscripten", ignore)]
-#[cfg_attr(miri, ignore)] // Miri does not support threads
 fn test_send() {
     let n = list_from(&[1, 2, 3]);
     thread::spawn(move || {
diff --git a/src/liballoc/collections/vec_deque.rs b/src/liballoc/collections/vec_deque.rs
index 091b068b0b2..2efb94e8afe 100644
--- a/src/liballoc/collections/vec_deque.rs
+++ b/src/liballoc/collections/vec_deque.rs
@@ -7,6 +7,8 @@
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
+// ignore-tidy-filelength
+
 use core::array::LengthAtMost32;
 use core::cmp::{self, Ordering};
 use core::fmt;
@@ -50,6 +52,7 @@ const MAXIMUM_ZST_CAPACITY: usize = 1 << (64 - 1); // Largest possible power of
 /// [`pop_front`]: #method.pop_front
 /// [`extend`]: #method.extend
 /// [`append`]: #method.append
+#[cfg_attr(not(test), rustc_diagnostic_item = "vecdeque_type")]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct VecDeque<T> {
     // tail and head are pointers into the buffer. Tail always points
@@ -72,7 +75,7 @@ pub struct VecDeque<T> {
 /// It produces the following sequence of matching slices:
 ///
 /// ([0 1], [a b])
-/// ([2], [c])
+/// (\[2\], \[c\])
 /// ([3 4], [d e])
 ///
 /// and the uneven remainder of either A or B is skipped.
@@ -200,25 +203,27 @@ impl<T> VecDeque<T> {
     /// Turn ptr into a slice
     #[inline]
     unsafe fn buffer_as_slice(&self) -> &[T] {
-        slice::from_raw_parts(self.ptr(), self.cap())
+        unsafe { slice::from_raw_parts(self.ptr(), self.cap()) }
     }
 
     /// Turn ptr into a mut slice
     #[inline]
     unsafe fn buffer_as_mut_slice(&mut self) -> &mut [T] {
-        slice::from_raw_parts_mut(self.ptr(), self.cap())
+        unsafe { slice::from_raw_parts_mut(self.ptr(), self.cap()) }
     }
 
     /// Moves an element out of the buffer
     #[inline]
     unsafe fn buffer_read(&mut self, off: usize) -> T {
-        ptr::read(self.ptr().add(off))
+        unsafe { ptr::read(self.ptr().add(off)) }
     }
 
     /// Writes an element into the buffer, moving it.
     #[inline]
     unsafe fn buffer_write(&mut self, off: usize, value: T) {
-        ptr::write(self.ptr().add(off), value);
+        unsafe {
+            ptr::write(self.ptr().add(off), value);
+        }
     }
 
     /// Returns `true` if the buffer is at full capacity.
@@ -267,7 +272,9 @@ impl<T> VecDeque<T> {
             len,
             self.cap()
         );
-        ptr::copy(self.ptr().add(src), self.ptr().add(dst), len);
+        unsafe {
+            ptr::copy(self.ptr().add(src), self.ptr().add(dst), len);
+        }
     }
 
     /// Copies a contiguous block of memory len long from src to dst
@@ -289,7 +296,9 @@ impl<T> VecDeque<T> {
             len,
             self.cap()
         );
-        ptr::copy_nonoverlapping(self.ptr().add(src), self.ptr().add(dst), len);
+        unsafe {
+            ptr::copy_nonoverlapping(self.ptr().add(src), self.ptr().add(dst), len);
+        }
     }
 
     /// Copies a potentially wrapping block of memory len long from src to dest.
@@ -329,7 +338,9 @@ impl<T> VecDeque<T> {
                 // 2 [_ _ A A A A B B _]
                 //            D . . .
                 //
-                self.copy(dst, src, len);
+                unsafe {
+                    self.copy(dst, src, len);
+                }
             }
             (false, false, true) => {
                 // dst before src, src doesn't wrap, dst wraps
@@ -340,8 +351,10 @@ impl<T> VecDeque<T> {
                 // 3 [B B B B _ _ _ A A]
                 //    . .           D .
                 //
-                self.copy(dst, src, dst_pre_wrap_len);
-                self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
+                unsafe {
+                    self.copy(dst, src, dst_pre_wrap_len);
+                    self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
+                }
             }
             (true, false, true) => {
                 // src before dst, src doesn't wrap, dst wraps
@@ -352,8 +365,10 @@ impl<T> VecDeque<T> {
                 // 3 [B B _ _ _ A A A A]
                 //    . .           D .
                 //
-                self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
-                self.copy(dst, src, dst_pre_wrap_len);
+                unsafe {
+                    self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
+                    self.copy(dst, src, dst_pre_wrap_len);
+                }
             }
             (false, true, false) => {
                 // dst before src, src wraps, dst doesn't wrap
@@ -364,8 +379,10 @@ impl<T> VecDeque<T> {
                 // 3 [C C _ _ _ B B C C]
                 //              D . . .
                 //
-                self.copy(dst, src, src_pre_wrap_len);
-                self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
+                unsafe {
+                    self.copy(dst, src, src_pre_wrap_len);
+                    self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
+                }
             }
             (true, true, false) => {
                 // src before dst, src wraps, dst doesn't wrap
@@ -376,8 +393,10 @@ impl<T> VecDeque<T> {
                 // 3 [C C A A _ _ _ C C]
                 //    D . . .
                 //
-                self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
-                self.copy(dst, src, src_pre_wrap_len);
+                unsafe {
+                    self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
+                    self.copy(dst, src, src_pre_wrap_len);
+                }
             }
             (false, true, true) => {
                 // dst before src, src wraps, dst wraps
@@ -391,9 +410,11 @@ impl<T> VecDeque<T> {
                 //
                 debug_assert!(dst_pre_wrap_len > src_pre_wrap_len);
                 let delta = dst_pre_wrap_len - src_pre_wrap_len;
-                self.copy(dst, src, src_pre_wrap_len);
-                self.copy(dst + src_pre_wrap_len, 0, delta);
-                self.copy(0, delta, len - dst_pre_wrap_len);
+                unsafe {
+                    self.copy(dst, src, src_pre_wrap_len);
+                    self.copy(dst + src_pre_wrap_len, 0, delta);
+                    self.copy(0, delta, len - dst_pre_wrap_len);
+                }
             }
             (true, true, true) => {
                 // src before dst, src wraps, dst wraps
@@ -407,9 +428,11 @@ impl<T> VecDeque<T> {
                 //
                 debug_assert!(src_pre_wrap_len > dst_pre_wrap_len);
                 let delta = src_pre_wrap_len - dst_pre_wrap_len;
-                self.copy(delta, 0, len - src_pre_wrap_len);
-                self.copy(0, self.cap() - delta, delta);
-                self.copy(dst, src, dst_pre_wrap_len);
+                unsafe {
+                    self.copy(delta, 0, len - src_pre_wrap_len);
+                    self.copy(0, self.cap() - delta, delta);
+                    self.copy(dst, src, dst_pre_wrap_len);
+                }
             }
         }
     }
@@ -439,13 +462,17 @@ impl<T> VecDeque<T> {
             // Nop
         } else if self.head < old_capacity - self.tail {
             // B
-            self.copy_nonoverlapping(old_capacity, 0, self.head);
+            unsafe {
+                self.copy_nonoverlapping(old_capacity, 0, self.head);
+            }
             self.head += old_capacity;
             debug_assert!(self.head > self.tail);
         } else {
             // C
             let new_tail = new_capacity - (old_capacity - self.tail);
-            self.copy_nonoverlapping(new_tail, self.tail, old_capacity - self.tail);
+            unsafe {
+                self.copy_nonoverlapping(new_tail, self.tail, old_capacity - self.tail);
+            }
             self.tail = new_tail;
             debug_assert!(self.head < self.tail);
         }
@@ -1057,6 +1084,108 @@ impl<T> VecDeque<T> {
         self.tail == self.head
     }
 
+    fn range_start_end<R>(&self, range: R) -> (usize, usize)
+    where
+        R: RangeBounds<usize>,
+    {
+        let len = self.len();
+        let start = match range.start_bound() {
+            Included(&n) => n,
+            Excluded(&n) => n + 1,
+            Unbounded => 0,
+        };
+        let end = match range.end_bound() {
+            Included(&n) => n + 1,
+            Excluded(&n) => n,
+            Unbounded => len,
+        };
+        assert!(start <= end, "lower bound was too large");
+        assert!(end <= len, "upper bound was too large");
+        (start, end)
+    }
+
+    /// Creates an iterator that covers the specified range in the `VecDeque`.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the starting point is greater than the end point or if
+    /// the end point is greater than the length of the vector.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(deque_range)]
+    ///
+    /// use std::collections::VecDeque;
+    ///
+    /// let v: VecDeque<_> = vec![1, 2, 3].into_iter().collect();
+    /// let range = v.range(2..).copied().collect::<VecDeque<_>>();
+    /// assert_eq!(range, [3]);
+    ///
+    /// // A full range covers all contents
+    /// let all = v.range(..);
+    /// assert_eq!(all.len(), 3);
+    /// ```
+    #[inline]
+    #[unstable(feature = "deque_range", issue = "74217")]
+    pub fn range<R>(&self, range: R) -> Iter<'_, T>
+    where
+        R: RangeBounds<usize>,
+    {
+        let (start, end) = self.range_start_end(range);
+        let tail = self.wrap_add(self.tail, start);
+        let head = self.wrap_add(self.tail, end);
+        Iter {
+            tail,
+            head,
+            // The shared reference we have in &self is maintained in the '_ of Iter.
+            ring: unsafe { self.buffer_as_slice() },
+        }
+    }
+
+    /// Creates an iterator that covers the specified mutable range in the `VecDeque`.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the starting point is greater than the end point or if
+    /// the end point is greater than the length of the vector.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(deque_range)]
+    ///
+    /// use std::collections::VecDeque;
+    ///
+    /// let mut v: VecDeque<_> = vec![1, 2, 3].into_iter().collect();
+    /// for v in v.range_mut(2..) {
+    ///   *v *= 2;
+    /// }
+    /// assert_eq!(v, vec![1, 2, 6]);
+    ///
+    /// // A full range covers all contents
+    /// for v in v.range_mut(..) {
+    ///   *v *= 2;
+    /// }
+    /// assert_eq!(v, vec![2, 4, 12]);
+    /// ```
+    #[inline]
+    #[unstable(feature = "deque_range", issue = "74217")]
+    pub fn range_mut<R>(&mut self, range: R) -> IterMut<'_, T>
+    where
+        R: RangeBounds<usize>,
+    {
+        let (start, end) = self.range_start_end(range);
+        let tail = self.wrap_add(self.tail, start);
+        let head = self.wrap_add(self.tail, end);
+        IterMut {
+            tail,
+            head,
+            // The shared reference we have in &mut self is maintained in the '_ of IterMut.
+            ring: unsafe { self.buffer_as_mut_slice() },
+        }
+    }
+
     /// Creates a draining iterator that removes the specified range in the
     /// `VecDeque` and yields the removed items.
     ///
@@ -1102,19 +1231,7 @@ impl<T> VecDeque<T> {
         // When finished, the remaining data will be copied back to cover the hole,
         // and the head/tail values will be restored correctly.
         //
-        let len = self.len();
-        let start = match range.start_bound() {
-            Included(&n) => n,
-            Excluded(&n) => n + 1,
-            Unbounded => 0,
-        };
-        let end = match range.end_bound() {
-            Included(&n) => n + 1,
-            Excluded(&n) => n,
-            Unbounded => len,
-        };
-        assert!(start <= end, "drain lower bound was too large");
-        assert!(end <= len, "drain upper bound was too large");
+        let (start, end) = self.range_start_end(range);
 
         // The deque's elements are parted into three segments:
         // * self.tail  -> drain_tail
@@ -1353,7 +1470,9 @@ impl<T> VecDeque<T> {
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn push_front(&mut self, value: T) {
-        self.grow_if_necessary();
+        if self.is_full() {
+            self.grow();
+        }
 
         self.tail = self.wrap_sub(self.tail, 1);
         let tail = self.tail;
@@ -1376,7 +1495,9 @@ impl<T> VecDeque<T> {
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn push_back(&mut self, value: T) {
-        self.grow_if_necessary();
+        if self.is_full() {
+            self.grow();
+        }
 
         let head = self.head;
         self.head = self.wrap_add(self.head, 1);
@@ -1484,7 +1605,9 @@ impl<T> VecDeque<T> {
     #[stable(feature = "deque_extras_15", since = "1.5.0")]
     pub fn insert(&mut self, index: usize, value: T) {
         assert!(index <= self.len(), "index out of bounds");
-        self.grow_if_necessary();
+        if self.is_full() {
+            self.grow();
+        }
 
         // Move the least number of elements in the ring buffer and insert
         // the given object
@@ -2002,11 +2125,13 @@ impl<T> VecDeque<T> {
     }
 
     // This may panic or abort
-    #[inline]
-    fn grow_if_necessary(&mut self) {
+    #[inline(never)]
+    fn grow(&mut self) {
         if self.is_full() {
             let old_cap = self.cap();
-            self.buf.double();
+            // Double the buffer size.
+            self.buf.reserve_exact(old_cap, old_cap);
+            assert!(self.cap() == old_cap * 2);
             unsafe {
                 self.handle_capacity_increase(old_cap);
             }
@@ -2288,7 +2413,9 @@ impl<T> VecDeque<T> {
 
     unsafe fn rotate_left_inner(&mut self, mid: usize) {
         debug_assert!(mid * 2 <= self.len());
-        self.wrap_copy(self.head, self.tail, mid);
+        unsafe {
+            self.wrap_copy(self.head, self.tail, mid);
+        }
         self.head = self.wrap_add(self.head, mid);
         self.tail = self.wrap_add(self.tail, mid);
     }
@@ -2297,7 +2424,9 @@ impl<T> VecDeque<T> {
         debug_assert!(k * 2 <= self.len());
         self.head = self.wrap_sub(self.head, k);
         self.tail = self.wrap_sub(self.tail, k);
-        self.wrap_copy(self.tail, self.head, k);
+        unsafe {
+            self.wrap_copy(self.tail, self.head, k);
+        }
     }
 }
 
@@ -2872,6 +3001,16 @@ impl<A> Extend<A> for VecDeque<A> {
             }
         }
     }
+
+    #[inline]
+    fn extend_one(&mut self, elem: A) {
+        self.push_back(elem);
+    }
+
+    #[inline]
+    fn extend_reserve(&mut self, additional: usize) {
+        self.reserve(additional);
+    }
 }
 
 #[stable(feature = "extend_ref", since = "1.2.0")]
@@ -2879,6 +3018,16 @@ impl<'a, T: 'a + Copy> Extend<&'a T> for VecDeque<T> {
     fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
         self.extend(iter.into_iter().cloned());
     }
+
+    #[inline]
+    fn extend_one(&mut self, &elem: &T) {
+        self.push_back(elem);
+    }
+
+    #[inline]
+    fn extend_reserve(&mut self, additional: usize) {
+        self.reserve(additional);
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
diff --git a/src/liballoc/collections/vec_deque/tests.rs b/src/liballoc/collections/vec_deque/tests.rs
index 8ef5ec78e05..e5edfe02a52 100644
--- a/src/liballoc/collections/vec_deque/tests.rs
+++ b/src/liballoc/collections/vec_deque/tests.rs
@@ -1,9 +1,7 @@
 use super::*;
 
-use test;
-
 #[bench]
-#[cfg_attr(miri, ignore)] // Miri does not support benchmarks
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
 fn bench_push_back_100(b: &mut test::Bencher) {
     let mut deq = VecDeque::with_capacity(101);
     b.iter(|| {
@@ -16,7 +14,7 @@ fn bench_push_back_100(b: &mut test::Bencher) {
 }
 
 #[bench]
-#[cfg_attr(miri, ignore)] // Miri does not support benchmarks
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
 fn bench_push_front_100(b: &mut test::Bencher) {
     let mut deq = VecDeque::with_capacity(101);
     b.iter(|| {
@@ -29,7 +27,7 @@ fn bench_push_front_100(b: &mut test::Bencher) {
 }
 
 #[bench]
-#[cfg_attr(miri, ignore)] // Miri does not support benchmarks
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
 fn bench_pop_back_100(b: &mut test::Bencher) {
     let mut deq = VecDeque::<i32>::with_capacity(101);
 
@@ -43,7 +41,7 @@ fn bench_pop_back_100(b: &mut test::Bencher) {
 }
 
 #[bench]
-#[cfg_attr(miri, ignore)] // Miri does not support benchmarks
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
 fn bench_pop_front_100(b: &mut test::Bencher) {
     let mut deq = VecDeque::<i32>::with_capacity(101);
 
@@ -249,6 +247,65 @@ fn test_remove() {
 }
 
 #[test]
+fn test_range() {
+    let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
+
+    let cap = tester.capacity();
+    for len in 0..=cap {
+        for tail in 0..=cap {
+            for start in 0..=len {
+                for end in start..=len {
+                    tester.tail = tail;
+                    tester.head = tail;
+                    for i in 0..len {
+                        tester.push_back(i);
+                    }
+
+                    // Check that we iterate over the correct values
+                    let range: VecDeque<_> = tester.range(start..end).copied().collect();
+                    let expected: VecDeque<_> = (start..end).collect();
+                    assert_eq!(range, expected);
+                }
+            }
+        }
+    }
+}
+
+#[test]
+fn test_range_mut() {
+    let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
+
+    let cap = tester.capacity();
+    for len in 0..=cap {
+        for tail in 0..=cap {
+            for start in 0..=len {
+                for end in start..=len {
+                    tester.tail = tail;
+                    tester.head = tail;
+                    for i in 0..len {
+                        tester.push_back(i);
+                    }
+
+                    let head_was = tester.head;
+                    let tail_was = tester.tail;
+
+                    // Check that we iterate over the correct values
+                    let range: VecDeque<_> = tester.range_mut(start..end).map(|v| *v).collect();
+                    let expected: VecDeque<_> = (start..end).collect();
+                    assert_eq!(range, expected);
+
+                    // We shouldn't have changed the capacity or made the
+                    // head or tail out of bounds
+                    assert_eq!(tester.capacity(), cap);
+                    assert_eq!(tester.tail, tail_was);
+                    assert_eq!(tester.head, head_was);
+                }
+            }
+        }
+    }
+}
+
+#[test]
 fn test_drain() {
     let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
 
@@ -386,10 +443,8 @@ fn test_vec_from_vecdeque() {
         assert!(vec.into_iter().eq(vd));
     }
 
-    #[cfg(not(miri))] // Miri is too slow
-    let max_pwr = 7;
-    #[cfg(miri)]
-    let max_pwr = 5;
+    // Miri is too slow
+    let max_pwr = if cfg!(miri) { 5 } else { 7 };
 
     for cap_pwr in 0..max_pwr {
         // Make capacity as a (2^x)-1, so that the ring size is 2^x
diff --git a/src/liballoc/fmt.rs b/src/liballoc/fmt.rs
index 13ef2f063f9..26077f3c8d1 100644
--- a/src/liballoc/fmt.rs
+++ b/src/liballoc/fmt.rs
@@ -50,8 +50,8 @@
 //! The internal iterator over the argument has not been advanced by the time
 //! the first `{}` is seen, so it prints the first argument. Then upon reaching
 //! the second `{}`, the iterator has advanced forward to the second argument.
-//! Essentially, parameters which explicitly name their argument do not affect
-//! parameters which do not name an argument in terms of positional specifiers.
+//! Essentially, parameters that explicitly name their argument do not affect
+//! parameters that do not name an argument in terms of positional specifiers.
 //!
 //! A format string is required to use all of its arguments, otherwise it is a
 //! compile-time error. You may refer to the same argument more than once in the
@@ -60,7 +60,7 @@
 //! ## Named parameters
 //!
 //! Rust itself does not have a Python-like equivalent of named parameters to a
-//! function, but the [`format!`] macro is a syntax extension which allows it to
+//! function, but the [`format!`] macro is a syntax extension that allows it to
 //! leverage named parameters. Named parameters are listed at the end of the
 //! argument list and have the syntax:
 //!
@@ -77,7 +77,7 @@
 //! ```
 //!
 //! It is not valid to put positional parameters (those without names) after
-//! arguments which have names. Like with positional parameters, it is not
+//! arguments that have names. Like with positional parameters, it is not
 //! valid to provide named parameters that are unused by the format string.
 //!
 //! # Formatting Parameters
@@ -130,7 +130,7 @@
 //!
 //! The default [fill/alignment](#fillalignment) for non-numerics is a space and
 //! left-aligned. The
-//! defaults for numeric formatters is also a space but with right-alignment. If
+//! default for numeric formatters is also a space character but with right-alignment. If
 //! the `0` flag (see below) is specified for numerics, then the implicit fill character is
 //! `0`.
 //!
@@ -161,7 +161,7 @@
 //!         `Signed` trait. This flag indicates that the correct sign (`+` or `-`)
 //!         should always be printed.
 //! * `-` - Currently not used
-//! * `#` - This flag is indicates that the "alternate" form of printing should
+//! * `#` - This flag indicates that the "alternate" form of printing should
 //!         be used. The alternate forms are:
 //!     * `#?` - pretty-print the [`Debug`] formatting
 //!     * `#x` - precedes the argument with a `0x`
@@ -173,9 +173,9 @@
 //!         like `{:08}` would yield `00000001` for the integer `1`, while the
 //!         same format would yield `-0000001` for the integer `-1`. Notice that
 //!         the negative version has one fewer zero than the positive version.
-//!         Note that padding zeroes are always placed after the sign (if any)
+//!         Note that padding zeros are always placed after the sign (if any)
 //!         and before the digits. When used together with the `#` flag, a similar
-//!         rule applies: padding zeroes are inserted after the prefix but before
+//!         rule applies: padding zeros are inserted after the prefix but before
 //!         the digits. The prefix is included in the total width.
 //!
 //! ## Precision
@@ -251,7 +251,7 @@
 //!
 //! In some programming languages, the behavior of string formatting functions
 //! depends on the operating system's locale setting. The format functions
-//! provided by Rust's standard library do not have any concept of locale, and
+//! provided by Rust's standard library do not have any concept of locale and
 //! will produce the same results on all systems regardless of user
 //! configuration.
 //!
@@ -470,7 +470,7 @@
 //!
 //! ### `format_args!`
 //!
-//! This is a curious macro which is used to safely pass around
+//! This is a curious macro used to safely pass around
 //! an opaque object describing the format string. This object
 //! does not require any heap allocations to create, and it only
 //! references information on the stack. Under the hood, all of
@@ -495,7 +495,7 @@
 //! This structure can then be passed to the [`write`] and [`format`] functions
 //! inside this module in order to process the format string.
 //! The goal of this macro is to even further prevent intermediate allocations
-//! when dealing formatting strings.
+//! when dealing with formatting strings.
 //!
 //! For example, a logging library could use the standard formatting syntax, but
 //! it would internally pass around this structure until it has been determined
diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs
index a2071844d5d..2ec777ac85c 100644
--- a/src/liballoc/lib.rs
+++ b/src/liballoc/lib.rs
@@ -72,34 +72,38 @@
 #![deny(intra_doc_link_resolution_failure)] // rustdoc is run without -D warnings
 #![allow(explicit_outlives_requirements)]
 #![allow(incomplete_features)]
+#![deny(unsafe_op_in_unsafe_fn)]
 #![cfg_attr(not(test), feature(generator_trait))]
 #![cfg_attr(test, feature(test))]
 #![feature(allocator_api)]
 #![feature(allow_internal_unstable)]
 #![feature(arbitrary_self_types)]
-#![feature(box_into_raw_non_null)]
 #![feature(box_patterns)]
 #![feature(box_syntax)]
 #![feature(cfg_sanitize)]
 #![feature(cfg_target_has_atomic)]
 #![feature(coerce_unsized)]
+#![feature(const_btree_new)]
 #![feature(const_generic_impls_guard)]
 #![feature(const_generics)]
 #![feature(const_in_array_repeat_expressions)]
-#![feature(const_if_match)]
+#![cfg_attr(bootstrap, feature(const_if_match))]
 #![feature(cow_is_borrowed)]
+#![feature(deque_range)]
 #![feature(dispatch_from_dyn)]
 #![feature(core_intrinsics)]
 #![feature(container_error_extra)]
 #![feature(dropck_eyepatch)]
 #![feature(exact_size_is_empty)]
+#![feature(extend_one)]
 #![feature(fmt_internals)]
 #![feature(fn_traits)]
 #![feature(fundamental)]
 #![feature(internal_uninit_const)]
 #![feature(lang_items)]
+#![feature(layout_for_ptr)]
 #![feature(libc)]
-#![cfg_attr(not(bootstrap), feature(negative_impls))]
+#![feature(negative_impls)]
 #![feature(new_uninit)]
 #![feature(nll)]
 #![feature(optin_builtin_traits)]
@@ -107,9 +111,10 @@
 #![feature(pattern)]
 #![feature(ptr_internals)]
 #![feature(ptr_offset_from)]
+#![feature(raw_ref_op)]
 #![feature(rustc_attrs)]
 #![feature(receiver_trait)]
-#![feature(specialization)]
+#![feature(min_specialization)]
 #![feature(staged_api)]
 #![feature(std_internals)]
 #![feature(str_internals)]
@@ -117,6 +122,7 @@
 #![feature(try_reserve)]
 #![feature(unboxed_closures)]
 #![feature(unicode_internals)]
+#![feature(unsafe_block_in_unsafe_fn)]
 #![feature(unsize)]
 #![feature(unsized_locals)]
 #![feature(allocator_internals)]
diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs
index 0780b33e53a..ed81ce71ddf 100644
--- a/src/liballoc/raw_vec.rs
+++ b/src/liballoc/raw_vec.rs
@@ -1,7 +1,7 @@
 #![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "none")]
 #![doc(hidden)]
 
-use core::alloc::MemoryBlock;
+use core::alloc::{LayoutErr, MemoryBlock};
 use core::cmp;
 use core::mem::{self, ManuallyDrop, MaybeUninit};
 use core::ops::Drop;
@@ -9,7 +9,7 @@ use core::ptr::{NonNull, Unique};
 use core::slice;
 
 use crate::alloc::{
-    handle_alloc_error, AllocErr,
+    handle_alloc_error,
     AllocInit::{self, *},
     AllocRef, Global, Layout,
     ReallocPlacement::{self, *},
@@ -25,9 +25,9 @@ mod tests;
 /// involved. This type is excellent for building your own data structures like Vec and VecDeque.
 /// In particular:
 ///
-/// * Produces `Unique::empty()` on zero-sized types.
-/// * Produces `Unique::empty()` on zero-length allocations.
-/// * Avoids freeing `Unique::empty()`.
+/// * Produces `Unique::dangling()` on zero-sized types.
+/// * Produces `Unique::dangling()` on zero-length allocations.
+/// * Avoids freeing `Unique::dangling()`.
 /// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
 /// * Guards against 32-bit systems allocating more than isize::MAX bytes.
 /// * Guards against overflowing your length.
@@ -60,7 +60,7 @@ impl<T> RawVec<T, Global> {
     /// `#[rustc_force_min_const_fn]` attribute which requires conformance
     /// with `min_const_fn` but does not necessarily allow calling it in
     /// `stable(...) const fn` / user code not enabling `foo` when
-    /// `#[rustc_const_unstable(feature = "foo", ..)]` is present.
+    /// `#[rustc_const_unstable(feature = "foo", issue = "01234")]` is present.
     pub const NEW: Self = Self::new();
 
     /// Creates the biggest possible `RawVec` (on the system heap)
@@ -80,9 +80,7 @@ impl<T> RawVec<T, Global> {
     ///
     /// # Panics
     ///
-    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
-    /// * Panics on 32-bit platforms if the requested capacity exceeds
-    ///   `isize::MAX` bytes.
+    /// Panics if the requested capacity exceeds `isize::MAX` bytes.
     ///
     /// # Aborts
     ///
@@ -108,7 +106,7 @@ impl<T> RawVec<T, Global> {
     /// If the `ptr` and `capacity` come from a `RawVec`, then this is guaranteed.
     #[inline]
     pub unsafe fn from_raw_parts(ptr: *mut T, capacity: usize) -> Self {
-        Self::from_raw_parts_in(ptr, capacity, Global)
+        unsafe { Self::from_raw_parts_in(ptr, capacity, Global) }
     }
 
     /// Converts a `Box<[T]>` into a `RawVec<T>`.
@@ -118,6 +116,32 @@ impl<T> RawVec<T, Global> {
             RawVec::from_raw_parts(slice.as_mut_ptr(), slice.len())
         }
     }
+
+    /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
+    ///
+    /// Note that this will correctly reconstitute any `cap` changes
+    /// that may have been performed. (See description of type for details.)
+    ///
+    /// # Safety
+    ///
+    /// * `len` must be greater than or equal to the most recently requested capacity, and
+    /// * `len` must be less than or equal to `self.capacity()`.
+    ///
+    /// Note, that the requested capacity and `self.capacity()` could differ, as
+    /// an allocator could overallocate and return a greater memory block than requested.
+    pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>]> {
+        // Sanity-check one half of the safety requirement (we cannot check the other half).
+        debug_assert!(
+            len <= self.capacity(),
+            "`len` must be smaller than or equal to `self.capacity()`"
+        );
+
+        let me = ManuallyDrop::new(self);
+        unsafe {
+            let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
+            Box::from_raw(slice)
+        }
+    }
 }
 
 impl<T, A: AllocRef> RawVec<T, A> {
@@ -125,7 +149,7 @@ impl<T, A: AllocRef> RawVec<T, A> {
     /// the returned `RawVec`.
     pub const fn new_in(alloc: A) -> Self {
         // `cap: 0` means "unallocated". zero-sized types are ignored.
-        Self { ptr: Unique::empty(), cap: 0, alloc }
+        Self { ptr: Unique::dangling(), cap: 0, alloc }
     }
 
     /// Like `with_capacity`, but parameterized over the choice of
@@ -146,12 +170,23 @@ impl<T, A: AllocRef> RawVec<T, A> {
         if mem::size_of::<T>() == 0 {
             Self::new_in(alloc)
         } else {
-            let layout = Layout::array::<T>(capacity).unwrap_or_else(|_| capacity_overflow());
-            alloc_guard(layout.size()).unwrap_or_else(|_| capacity_overflow());
+            // We avoid `unwrap_or_else` here because it bloats the amount of
+            // LLVM IR generated.
+            let layout = match Layout::array::<T>(capacity) {
+                Ok(layout) => layout,
+                Err(_) => capacity_overflow(),
+            };
+            match alloc_guard(layout.size()) {
+                Ok(_) => {}
+                Err(_) => capacity_overflow(),
+            }
+            let memory = match alloc.alloc(layout, init) {
+                Ok(memory) => memory,
+                Err(_) => handle_alloc_error(layout),
+            };
 
-            let memory = alloc.alloc(layout, init).unwrap_or_else(|_| handle_alloc_error(layout));
             Self {
-                ptr: memory.ptr.cast().into(),
+                ptr: unsafe { Unique::new_unchecked(memory.ptr.cast().as_ptr()) },
                 cap: Self::capacity_from_bytes(memory.size),
                 alloc,
             }
@@ -168,11 +203,11 @@ impl<T, A: AllocRef> RawVec<T, A> {
     /// If the `ptr` and `capacity` come from a `RawVec` created via `a`, then this is guaranteed.
     #[inline]
     pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self {
-        Self { ptr: Unique::new_unchecked(ptr), cap: capacity, alloc: a }
+        Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc: a }
     }
 
     /// Gets a raw pointer to the start of the allocation. Note that this is
-    /// `Unique::empty()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
+    /// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
     /// be careful.
     pub fn ptr(&self) -> *mut T {
         self.ptr.as_ptr()
@@ -211,89 +246,13 @@ impl<T, A: AllocRef> RawVec<T, A> {
         }
     }
 
-    /// Doubles the size of the type's backing allocation. This is common enough
-    /// to want to do that it's easiest to just have a dedicated method. Slightly
-    /// more efficient logic can be provided for this than the general case.
-    ///
-    /// This function is ideal for when pushing elements one-at-a-time because
-    /// you don't need to incur the costs of the more general computations
-    /// reserve needs to do to guard against overflow. You do however need to
-    /// manually check if your `len == capacity`.
-    ///
-    /// # Panics
-    ///
-    /// * Panics if `T` is zero-sized on the assumption that you managed to exhaust
-    ///   all `usize::MAX` slots in your imaginary buffer.
-    /// * Panics on 32-bit platforms if the requested capacity exceeds
-    ///   `isize::MAX` bytes.
-    ///
-    /// # Aborts
-    ///
-    /// Aborts on OOM
-    ///
-    /// # Examples
+    /// Ensures that the buffer contains at least enough space to hold `len +
+    /// additional` elements. If it doesn't already have enough capacity, will
+    /// reallocate enough space plus comfortable slack space to get amortized
+    /// `O(1)` behavior. Will limit this behavior if it would needlessly cause
+    /// itself to panic.
     ///
-    /// ```
-    /// # #![feature(raw_vec_internals)]
-    /// # extern crate alloc;
-    /// # use std::ptr;
-    /// # use alloc::raw_vec::RawVec;
-    /// struct MyVec<T> {
-    ///     buf: RawVec<T>,
-    ///     len: usize,
-    /// }
-    ///
-    /// impl<T> MyVec<T> {
-    ///     pub fn push(&mut self, elem: T) {
-    ///         if self.len == self.buf.capacity() { self.buf.double(); }
-    ///         // double would have aborted or panicked if the len exceeded
-    ///         // `isize::MAX` so this is safe to do unchecked now.
-    ///         unsafe {
-    ///             ptr::write(self.buf.ptr().add(self.len), elem);
-    ///         }
-    ///         self.len += 1;
-    ///     }
-    /// }
-    /// # fn main() {
-    /// #   let mut vec = MyVec { buf: RawVec::new(), len: 0 };
-    /// #   vec.push(1);
-    /// # }
-    /// ```
-    #[inline(never)]
-    #[cold]
-    pub fn double(&mut self) {
-        match self.grow(Double, MayMove, Uninitialized) {
-            Err(CapacityOverflow) => capacity_overflow(),
-            Err(AllocError { layout, .. }) => handle_alloc_error(layout),
-            Ok(()) => { /* yay */ }
-        }
-    }
-
-    /// Attempts to double the size of the type's backing allocation in place. This is common
-    /// enough to want to do that it's easiest to just have a dedicated method. Slightly
-    /// more efficient logic can be provided for this than the general case.
-    ///
-    /// Returns `true` if the reallocation attempt has succeeded.
-    ///
-    /// # Panics
-    ///
-    /// * Panics if `T` is zero-sized on the assumption that you managed to exhaust
-    ///   all `usize::MAX` slots in your imaginary buffer.
-    /// * Panics on 32-bit platforms if the requested capacity exceeds
-    ///   `isize::MAX` bytes.
-    #[inline(never)]
-    #[cold]
-    pub fn double_in_place(&mut self) -> bool {
-        self.grow(Double, InPlace, Uninitialized).is_ok()
-    }
-
-    /// Ensures that the buffer contains at least enough space to hold
-    /// `used_capacity + needed_extra_capacity` elements. If it doesn't already have
-    /// enough capacity, will reallocate enough space plus comfortable slack
-    /// space to get amortized `O(1)` behavior. Will limit this behavior
-    /// if it would needlessly cause itself to panic.
-    ///
-    /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
+    /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
     /// the requested space. This is not really unsafe, but the unsafe
     /// code *you* write that relies on the behavior of this function may break.
     ///
@@ -301,9 +260,7 @@ impl<T, A: AllocRef> RawVec<T, A> {
     ///
     /// # Panics
     ///
-    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
-    /// * Panics on 32-bit platforms if the requested capacity exceeds
-    ///   `isize::MAX` bytes.
+    /// Panics if the new capacity exceeds `isize::MAX` bytes.
     ///
     /// # Aborts
     ///
@@ -339,8 +296,8 @@ impl<T, A: AllocRef> RawVec<T, A> {
     /// #   vector.push_all(&[1, 3, 5, 7, 9]);
     /// # }
     /// ```
-    pub fn reserve(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
-        match self.try_reserve(used_capacity, needed_extra_capacity) {
+    pub fn reserve(&mut self, len: usize, additional: usize) {
+        match self.try_reserve(len, additional) {
             Err(CapacityOverflow) => capacity_overflow(),
             Err(AllocError { layout, .. }) => handle_alloc_error(layout),
             Ok(()) => { /* yay */ }
@@ -348,68 +305,33 @@ impl<T, A: AllocRef> RawVec<T, A> {
     }
 
     /// The same as `reserve`, but returns on errors instead of panicking or aborting.
-    pub fn try_reserve(
-        &mut self,
-        used_capacity: usize,
-        needed_extra_capacity: usize,
-    ) -> Result<(), TryReserveError> {
-        if self.needs_to_grow(used_capacity, needed_extra_capacity) {
-            self.grow(Amortized { used_capacity, needed_extra_capacity }, MayMove, Uninitialized)
+    pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+        if self.needs_to_grow(len, additional) {
+            self.grow_amortized(len, additional)
         } else {
             Ok(())
         }
     }
 
-    /// Attempts to ensure that the buffer contains at least enough space to hold
-    /// `used_capacity + needed_extra_capacity` elements. If it doesn't already have
-    /// enough capacity, will reallocate in place enough space plus comfortable slack
-    /// space to get amortized `O(1)` behavior. Will limit this behaviour
-    /// if it would needlessly cause itself to panic.
-    ///
-    /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
-    /// the requested space. This is not really unsafe, but the unsafe
-    /// code *you* write that relies on the behavior of this function may break.
-    ///
-    /// Returns `true` if the reallocation attempt has succeeded.
-    ///
-    /// # Panics
-    ///
-    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
-    /// * Panics on 32-bit platforms if the requested capacity exceeds
-    ///   `isize::MAX` bytes.
-    pub fn reserve_in_place(&mut self, used_capacity: usize, needed_extra_capacity: usize) -> bool {
-        // This is more readable than putting this in one line:
-        // `!self.needs_to_grow(...) || self.grow(...).is_ok()`
-        if self.needs_to_grow(used_capacity, needed_extra_capacity) {
-            self.grow(Amortized { used_capacity, needed_extra_capacity }, InPlace, Uninitialized)
-                .is_ok()
-        } else {
-            true
-        }
-    }
-
-    /// Ensures that the buffer contains at least enough space to hold
-    /// `used_capacity + needed_extra_capacity` elements. If it doesn't already,
-    /// will reallocate the minimum possible amount of memory necessary.
-    /// Generally this will be exactly the amount of memory necessary,
-    /// but in principle the allocator is free to give back more than
-    /// we asked for.
+    /// Ensures that the buffer contains at least enough space to hold `len +
+    /// additional` elements. If it doesn't already, will reallocate the
+    /// minimum possible amount of memory necessary. Generally this will be
+    /// exactly the amount of memory necessary, but in principle the allocator
+    /// is free to give back more than we asked for.
     ///
-    /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
-    /// the requested space. This is not really unsafe, but the unsafe
-    /// code *you* write that relies on the behavior of this function may break.
+    /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
+    /// the requested space. This is not really unsafe, but the unsafe code
+    /// *you* write that relies on the behavior of this function may break.
     ///
     /// # Panics
     ///
-    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
-    /// * Panics on 32-bit platforms if the requested capacity exceeds
-    ///   `isize::MAX` bytes.
+    /// Panics if the new capacity exceeds `isize::MAX` bytes.
     ///
     /// # Aborts
     ///
     /// Aborts on OOM.
-    pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
-        match self.try_reserve_exact(used_capacity, needed_extra_capacity) {
+    pub fn reserve_exact(&mut self, len: usize, additional: usize) {
+        match self.try_reserve_exact(len, additional) {
             Err(CapacityOverflow) => capacity_overflow(),
             Err(AllocError { layout, .. }) => handle_alloc_error(layout),
             Ok(()) => { /* yay */ }
@@ -419,14 +341,10 @@ impl<T, A: AllocRef> RawVec<T, A> {
     /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
     pub fn try_reserve_exact(
         &mut self,
-        used_capacity: usize,
-        needed_extra_capacity: usize,
+        len: usize,
+        additional: usize,
     ) -> Result<(), TryReserveError> {
-        if self.needs_to_grow(used_capacity, needed_extra_capacity) {
-            self.grow(Exact { used_capacity, needed_extra_capacity }, MayMove, Uninitialized)
-        } else {
-            Ok(())
-        }
+        if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) }
     }
 
     /// Shrinks the allocation down to the specified amount. If the given amount
@@ -448,19 +366,11 @@ impl<T, A: AllocRef> RawVec<T, A> {
     }
 }
 
-#[derive(Copy, Clone)]
-enum Strategy {
-    Double,
-    Amortized { used_capacity: usize, needed_extra_capacity: usize },
-    Exact { used_capacity: usize, needed_extra_capacity: usize },
-}
-use Strategy::*;
-
 impl<T, A: AllocRef> RawVec<T, A> {
     /// Returns if the buffer needs to grow to fulfill the needed extra capacity.
     /// Mainly used to make inlining reserve-calls possible without inlining `grow`.
-    fn needs_to_grow(&self, used_capacity: usize, needed_extra_capacity: usize) -> bool {
-        needed_extra_capacity > self.capacity().wrapping_sub(used_capacity)
+    fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
+        additional > self.capacity().wrapping_sub(len)
     }
 
     fn capacity_from_bytes(excess: usize) -> usize {
@@ -469,72 +379,73 @@ impl<T, A: AllocRef> RawVec<T, A> {
     }
 
     fn set_memory(&mut self, memory: MemoryBlock) {
-        self.ptr = memory.ptr.cast().into();
+        self.ptr = unsafe { Unique::new_unchecked(memory.ptr.cast().as_ptr()) };
         self.cap = Self::capacity_from_bytes(memory.size);
     }
 
-    /// Single method to handle all possibilities of growing the buffer.
-    fn grow(
-        &mut self,
-        strategy: Strategy,
-        placement: ReallocPlacement,
-        init: AllocInit,
-    ) -> Result<(), TryReserveError> {
-        let elem_size = mem::size_of::<T>();
-        if elem_size == 0 {
+    // This method is usually instantiated many times. So we want it to be as
+    // small as possible, to improve compile times. But we also want as much of
+    // its contents to be statically computable as possible, to make the
+    // generated code run faster. Therefore, this method is carefully written
+    // so that all of the code that depends on `T` is within it, while as much
+    // of the code that doesn't depend on `T` as possible is in functions that
+    // are non-generic over `T`.
+    fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+        // This is ensured by the calling contexts.
+        debug_assert!(additional > 0);
+
+        if mem::size_of::<T>() == 0 {
             // Since we return a capacity of `usize::MAX` when `elem_size` is
             // 0, getting to here necessarily means the `RawVec` is overfull.
             return Err(CapacityOverflow);
         }
-        let new_layout = match strategy {
-            Double => unsafe {
-                // Since we guarantee that we never allocate more than `isize::MAX` bytes,
-                // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow.
-                // Additionally the alignment will never be too large as to "not be satisfiable",
-                // so `Layout::from_size_align` will always return `Some`.
-                //
-                // TL;DR, we bypass runtime checks due to dynamic assertions in this module,
-                // allowing us to use `from_size_align_unchecked`.
-                let cap = if self.cap == 0 {
-                    // Skip to 4 because tiny `Vec`'s are dumb; but not if that would cause overflow.
-                    if elem_size > usize::MAX / 8 { 1 } else { 4 }
-                } else {
-                    self.cap * 2
-                };
-                Layout::from_size_align_unchecked(cap * elem_size, mem::align_of::<T>())
-            },
-            Amortized { used_capacity, needed_extra_capacity } => {
-                // Nothing we can really do about these checks, sadly.
-                let required_cap =
-                    used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
-                // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
-                let double_cap = self.cap * 2;
-                // `double_cap` guarantees exponential growth.
-                let cap = cmp::max(double_cap, required_cap);
-                Layout::array::<T>(cap).map_err(|_| CapacityOverflow)?
-            }
-            Exact { used_capacity, needed_extra_capacity } => {
-                let cap =
-                    used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
-                Layout::array::<T>(cap).map_err(|_| CapacityOverflow)?
-            }
-        };
-        alloc_guard(new_layout.size())?;
 
-        let memory = if let Some((ptr, old_layout)) = self.current_memory() {
-            debug_assert_eq!(old_layout.align(), new_layout.align());
-            unsafe {
-                self.alloc
-                    .grow(ptr, old_layout, new_layout.size(), placement, init)
-                    .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
-            }
+        // Nothing we can really do about these checks, sadly.
+        let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
+
+        // This guarantees exponential growth. The doubling cannot overflow
+        // because `cap <= isize::MAX` and the type of `cap` is `usize`.
+        let cap = cmp::max(self.cap * 2, required_cap);
+
+        // Tiny Vecs are dumb. Skip to:
+        // - 8 if the element size is 1, because any heap allocators is likely
+        //   to round up a request of less than 8 bytes to at least 8 bytes.
+        // - 4 if elements are moderate-sized (<= 1 KiB).
+        // - 1 otherwise, to avoid wasting too much space for very short Vecs.
+        // Note that `min_non_zero_cap` is computed statically.
+        let elem_size = mem::size_of::<T>();
+        let min_non_zero_cap = if elem_size == 1 {
+            8
+        } else if elem_size <= 1024 {
+            4
         } else {
-            match placement {
-                MayMove => self.alloc.alloc(new_layout, init),
-                InPlace => Err(AllocErr),
-            }
-            .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
+            1
         };
+        let cap = cmp::max(min_non_zero_cap, cap);
+
+        let new_layout = Layout::array::<T>(cap);
+
+        // `finish_grow` is non-generic over `T`.
+        let memory = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
+        self.set_memory(memory);
+        Ok(())
+    }
+
+    // The constraints on this method are much the same as those on
+    // `grow_amortized`, but this method is usually instantiated less often so
+    // it's less critical.
+    fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+        if mem::size_of::<T>() == 0 {
+            // Since we return a capacity of `usize::MAX` when the type size is
+            // 0, getting to here necessarily means the `RawVec` is overfull.
+            return Err(CapacityOverflow);
+        }
+
+        let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
+        let new_layout = Layout::array::<T>(cap);
+
+        // `finish_grow` is non-generic over `T`.
+        let memory = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
         self.set_memory(memory);
         Ok(())
     }
@@ -562,30 +473,32 @@ impl<T, A: AllocRef> RawVec<T, A> {
     }
 }
 
-impl<T> RawVec<T, Global> {
-    /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
-    ///
-    /// Note that this will correctly reconstitute any `cap` changes
-    /// that may have been performed. (See description of type for details.)
-    ///
-    /// # Safety
-    ///
-    /// * `len` must be greater than or equal to the most recently requested capacity, and
-    /// * `len` must be less than or equal to `self.capacity()`.
-    ///
-    /// Note, that the requested capacity and `self.capacity()` could differ, as
-    /// an allocator could overallocate and return a greater memory block than requested.
-    pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>]> {
-        // Sanity-check one half of the safety requirement (we cannot check the other half).
-        debug_assert!(
-            len <= self.capacity(),
-            "`len` must be smaller than or equal to `self.capacity()`"
-        );
-
-        let me = ManuallyDrop::new(self);
-        let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
-        Box::from_raw(slice)
+// This function is outside `RawVec` to minimize compile times. See the comment
+// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
+// significant, because the number of different `A` types seen in practice is
+// much smaller than the number of `T` types.)
+fn finish_grow<A>(
+    new_layout: Result<Layout, LayoutErr>,
+    current_memory: Option<(NonNull<u8>, Layout)>,
+    alloc: &mut A,
+) -> Result<MemoryBlock, TryReserveError>
+where
+    A: AllocRef,
+{
+    // Check for the error here to minimize the size of `RawVec::grow_*`.
+    let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
+
+    alloc_guard(new_layout.size())?;
+
+    let memory = if let Some((ptr, old_layout)) = current_memory {
+        debug_assert_eq!(old_layout.align(), new_layout.align());
+        unsafe { alloc.grow(ptr, old_layout, new_layout.size(), MayMove, Uninitialized) }
+    } else {
+        alloc.alloc(new_layout, Uninitialized)
     }
+    .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?;
+
+    Ok(memory)
 }
 
 unsafe impl<#[may_dangle] T, A: AllocRef> Drop for RawVec<T, A> {
diff --git a/src/liballoc/raw_vec/tests.rs b/src/liballoc/raw_vec/tests.rs
index e7ab8a305d2..5408faa079c 100644
--- a/src/liballoc/raw_vec/tests.rs
+++ b/src/liballoc/raw_vec/tests.rs
@@ -12,7 +12,6 @@ fn allocator_param() {
     //
     // Instead, this just checks that the `RawVec` methods do at
     // least go through the Allocator API when it reserves
-
     // storage.
 
     // A dumb allocator that consumes a fixed amount of fuel
@@ -35,7 +34,7 @@ fn allocator_param() {
             }
         }
         unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
-            Global.dealloc(ptr, layout)
+            unsafe { Global.dealloc(ptr, layout) }
         }
     }
 
@@ -59,7 +58,7 @@ fn reserve_does_not_overallocate() {
         let mut v: RawVec<u32> = RawVec::new();
         v.reserve(0, 7);
         assert_eq!(7, v.capacity());
-        // 97 if more than double of 7, so `reserve` should work
+        // 97 is more than double of 7, so `reserve` should work
         // like `reserve_exact`.
         v.reserve(7, 90);
         assert_eq!(97, v.capacity());
diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs
index e106b4354e4..77ff567aa7a 100644
--- a/src/liballoc/rc.rs
+++ b/src/liballoc/rc.rs
@@ -245,13 +245,14 @@ use core::hash::{Hash, Hasher};
 use core::intrinsics::abort;
 use core::iter;
 use core::marker::{self, PhantomData, Unpin, Unsize};
-use core::mem::{self, align_of, align_of_val, forget, size_of_val};
+use core::mem::{self, align_of_val_raw, forget, size_of_val};
 use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
 use core::pin::Pin;
 use core::ptr::{self, NonNull};
-use core::slice::{self, from_raw_parts_mut};
+use core::slice::from_raw_parts_mut;
 
 use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout};
+use crate::borrow::{Cow, ToOwned};
 use crate::string::String;
 use crate::vec::Vec;
 
@@ -279,7 +280,6 @@ struct RcBox<T: ?Sized> {
 /// type `T`.
 ///
 /// [get_mut]: #method.get_mut
-#[cfg_attr(all(bootstrap, not(test)), lang = "rc")]
 #[cfg_attr(not(test), rustc_diagnostic_item = "Rc")]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Rc<T: ?Sized> {
@@ -304,7 +304,7 @@ impl<T: ?Sized> Rc<T> {
     }
 
     unsafe fn from_ptr(ptr: *mut RcBox<T>) -> Self {
-        Self::from_inner(NonNull::new_unchecked(ptr))
+        Self::from_inner(unsafe { NonNull::new_unchecked(ptr) })
     }
 }
 
@@ -324,11 +324,9 @@ impl<T> Rc<T> {
         // pointers, which ensures that the weak destructor never frees
         // the allocation while the strong destructor is running, even
         // if the weak pointer is stored inside the strong one.
-        Self::from_inner(Box::into_raw_non_null(box RcBox {
-            strong: Cell::new(1),
-            weak: Cell::new(1),
-            value,
-        }))
+        Self::from_inner(
+            Box::leak(box RcBox { strong: Cell::new(1), weak: Cell::new(1), value }).into(),
+        )
     }
 
     /// Constructs a new `Rc` with uninitialized contents.
@@ -546,7 +544,7 @@ impl<T> Rc<[mem::MaybeUninit<T>]> {
     #[unstable(feature = "new_uninit", issue = "63291")]
     #[inline]
     pub unsafe fn assume_init(self) -> Rc<[T]> {
-        Rc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _)
+        unsafe { Rc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _) }
     }
 }
 
@@ -582,8 +580,6 @@ impl<T: ?Sized> Rc<T> {
     /// # Examples
     ///
     /// ```
-    /// #![feature(weak_into_raw)]
-    ///
     /// use std::rc::Rc;
     ///
     /// let x = Rc::new("hello".to_owned());
@@ -592,20 +588,14 @@ impl<T: ?Sized> Rc<T> {
     /// assert_eq!(x_ptr, Rc::as_ptr(&y));
     /// assert_eq!(unsafe { &*x_ptr }, "hello");
     /// ```
-    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    #[stable(feature = "weak_into_raw", since = "1.45.0")]
     pub fn as_ptr(this: &Self) -> *const T {
         let ptr: *mut RcBox<T> = NonNull::as_ptr(this.ptr);
-        let fake_ptr = ptr as *mut T;
 
-        // SAFETY: This cannot go through Deref::deref.
-        // Instead, we manually offset the pointer rather than manifesting a reference.
-        // This is so that the returned pointer retains the same provenance as our pointer.
-        // This is required so that e.g. `get_mut` can write through the pointer
-        // after the Rc is recovered through `from_raw`.
-        unsafe {
-            let offset = data_offset(&(*ptr).value);
-            set_data_ptr(fake_ptr, (ptr as *mut u8).offset(offset))
-        }
+        // SAFETY: This cannot go through Deref::deref or Rc::inner because
+        // this is required to retain raw/mut provenance such that e.g. `get_mut` can
+        // write through the pointer after the Rc is recovered through `from_raw`.
+        unsafe { &raw const (*ptr).value }
     }
 
     /// Constructs an `Rc<T>` from a raw pointer.
@@ -647,13 +637,13 @@ impl<T: ?Sized> Rc<T> {
     /// ```
     #[stable(feature = "rc_raw", since = "1.17.0")]
     pub unsafe fn from_raw(ptr: *const T) -> Self {
-        let offset = data_offset(ptr);
+        let offset = unsafe { data_offset(ptr) };
 
         // Reverse the offset to find the original RcBox.
         let fake_ptr = ptr as *mut RcBox<T>;
-        let rc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
+        let rc_ptr = unsafe { set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset)) };
 
-        Self::from_ptr(rc_ptr)
+        unsafe { Self::from_ptr(rc_ptr) }
     }
 
     /// Consumes the `Rc`, returning the wrapped pointer as `NonNull<T>`.
@@ -662,6 +652,7 @@ impl<T: ?Sized> Rc<T> {
     ///
     /// ```
     /// #![feature(rc_into_raw_non_null)]
+    /// #![allow(deprecated)]
     ///
     /// use std::rc::Rc;
     ///
@@ -671,6 +662,7 @@ impl<T: ?Sized> Rc<T> {
     /// assert_eq!(deref, "hello");
     /// ```
     #[unstable(feature = "rc_into_raw_non_null", issue = "47336")]
+    #[rustc_deprecated(since = "1.44.0", reason = "use `Rc::into_raw` instead")]
     #[inline]
     pub fn into_raw_non_null(this: Self) -> NonNull<T> {
         // safe because Rc guarantees its pointer is non-null
@@ -807,7 +799,7 @@ impl<T: ?Sized> Rc<T> {
     #[inline]
     #[unstable(feature = "get_mut_unchecked", issue = "63292")]
     pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
-        &mut this.ptr.as_mut().value
+        unsafe { &mut this.ptr.as_mut().value }
     }
 
     #[inline]
@@ -966,10 +958,12 @@ impl<T: ?Sized> Rc<T> {
 
         // Initialize the RcBox
         let inner = mem_to_rcbox(mem.ptr.as_ptr());
-        debug_assert_eq!(Layout::for_value(&*inner), layout);
+        unsafe {
+            debug_assert_eq!(Layout::for_value(&*inner), layout);
 
-        ptr::write(&mut (*inner).strong, Cell::new(1));
-        ptr::write(&mut (*inner).weak, Cell::new(1));
+            ptr::write(&mut (*inner).strong, Cell::new(1));
+            ptr::write(&mut (*inner).weak, Cell::new(1));
+        }
 
         inner
     }
@@ -977,9 +971,11 @@ impl<T: ?Sized> Rc<T> {
     /// Allocates an `RcBox<T>` with sufficient space for an unsized inner value
     unsafe fn allocate_for_ptr(ptr: *const T) -> *mut RcBox<T> {
         // Allocate for the `RcBox<T>` using the given value.
-        Self::allocate_for_layout(Layout::for_value(&*ptr), |mem| {
-            set_data_ptr(ptr as *mut T, mem) as *mut RcBox<T>
-        })
+        unsafe {
+            Self::allocate_for_layout(Layout::for_value(&*ptr), |mem| {
+                set_data_ptr(ptr as *mut T, mem) as *mut RcBox<T>
+            })
+        }
     }
 
     fn from_box(v: Box<T>) -> Rc<T> {
@@ -1008,9 +1004,11 @@ impl<T: ?Sized> Rc<T> {
 impl<T> Rc<[T]> {
     /// Allocates an `RcBox<[T]>` with the given length.
     unsafe fn allocate_for_slice(len: usize) -> *mut RcBox<[T]> {
-        Self::allocate_for_layout(Layout::array::<T>(len).unwrap(), |mem| {
-            ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut RcBox<[T]>
-        })
+        unsafe {
+            Self::allocate_for_layout(Layout::array::<T>(len).unwrap(), |mem| {
+                ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut RcBox<[T]>
+            })
+        }
     }
 }
 
@@ -1019,20 +1017,22 @@ impl<T> Rc<[T]> {
 /// For a slice/trait object, this sets the `data` field and leaves the rest
 /// unchanged. For a sized raw pointer, this simply sets the pointer.
 unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T {
-    ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
+    unsafe {
+        ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
+    }
     ptr
 }
 
 impl<T> Rc<[T]> {
-    /// Copy elements from slice into newly allocated Rc<[T]>
+    /// Copy elements from slice into newly allocated Rc<\[T\]>
     ///
     /// Unsafe because the caller must either take ownership or bind `T: Copy`
     unsafe fn copy_from_slice(v: &[T]) -> Rc<[T]> {
-        let ptr = Self::allocate_for_slice(v.len());
-
-        ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).value as *mut [T] as *mut T, v.len());
-
-        Self::from_ptr(ptr)
+        unsafe {
+            let ptr = Self::allocate_for_slice(v.len());
+            ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).value as *mut [T] as *mut T, v.len());
+            Self::from_ptr(ptr)
+        }
     }
 
     /// Constructs an `Rc<[T]>` from an iterator known to be of a certain size.
@@ -1060,25 +1060,27 @@ impl<T> Rc<[T]> {
             }
         }
 
-        let ptr = Self::allocate_for_slice(len);
+        unsafe {
+            let ptr = Self::allocate_for_slice(len);
 
-        let mem = ptr as *mut _ as *mut u8;
-        let layout = Layout::for_value(&*ptr);
+            let mem = ptr as *mut _ as *mut u8;
+            let layout = Layout::for_value(&*ptr);
 
-        // Pointer to first element
-        let elems = &mut (*ptr).value as *mut [T] as *mut T;
+            // Pointer to first element
+            let elems = &mut (*ptr).value as *mut [T] as *mut T;
 
-        let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
+            let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
 
-        for (i, item) in iter.enumerate() {
-            ptr::write(elems.add(i), item);
-            guard.n_elems += 1;
-        }
+            for (i, item) in iter.enumerate() {
+                ptr::write(elems.add(i), item);
+                guard.n_elems += 1;
+            }
 
-        // All clear. Forget the guard so it doesn't free the new RcBox.
-        forget(guard);
+            // All clear. Forget the guard so it doesn't free the new RcBox.
+            forget(guard);
 
-        Self::from_ptr(ptr)
+            Self::from_ptr(ptr)
+        }
     }
 }
 
@@ -1222,6 +1224,12 @@ impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> {
     }
 }
 
+// Hack to allow specializing on `Eq` even though `Eq` has a method.
+#[rustc_unsafe_specialization_marker]
+pub(crate) trait MarkerEq: PartialEq<Self> {}
+
+impl<T: Eq> MarkerEq for T {}
+
 /// We're doing this specialization here, and not as a more general optimization on `&T`, because it
 /// would otherwise add a cost to all equality checks on refs. We assume that `Rc`s are used to
 /// store large values, that are slow to clone, but also heavy to check for equality, causing this
@@ -1230,7 +1238,7 @@ impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> {
 ///
 /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
 #[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + Eq> RcEqIdent<T> for Rc<T> {
+impl<T: ?Sized + MarkerEq> RcEqIdent<T> for Rc<T> {
     #[inline]
     fn eq(&self, other: &Rc<T>) -> bool {
         Rc::ptr_eq(self, other) || **self == **other
@@ -1492,6 +1500,21 @@ impl<T> From<Vec<T>> for Rc<[T]> {
     }
 }
 
+#[stable(feature = "shared_from_cow", since = "1.45.0")]
+impl<'a, B> From<Cow<'a, B>> for Rc<B>
+where
+    B: ToOwned + ?Sized,
+    Rc<B>: From<&'a B> + From<B::Owned>,
+{
+    #[inline]
+    fn from(cow: Cow<'a, B>) -> Rc<B> {
+        match cow {
+            Cow::Borrowed(s) => Rc::from(s),
+            Cow::Owned(s) => Rc::from(s),
+        }
+    }
+}
+
 #[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
 impl<T, const N: usize> TryFrom<Rc<[T]>> for Rc<[T; N]>
 where
@@ -1549,25 +1572,25 @@ impl<T> iter::FromIterator<T> for Rc<[T]> {
     /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
     /// ```
     fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
-        RcFromIter::from_iter(iter.into_iter())
+        ToRcSlice::to_rc_slice(iter.into_iter())
     }
 }
 
 /// Specialization trait used for collecting into `Rc<[T]>`.
-trait RcFromIter<T, I> {
-    fn from_iter(iter: I) -> Self;
+trait ToRcSlice<T>: Iterator<Item = T> + Sized {
+    fn to_rc_slice(self) -> Rc<[T]>;
 }
 
-impl<T, I: Iterator<Item = T>> RcFromIter<T, I> for Rc<[T]> {
-    default fn from_iter(iter: I) -> Self {
-        iter.collect::<Vec<T>>().into()
+impl<T, I: Iterator<Item = T>> ToRcSlice<T> for I {
+    default fn to_rc_slice(self) -> Rc<[T]> {
+        self.collect::<Vec<T>>().into()
     }
 }
 
-impl<T, I: iter::TrustedLen<Item = T>> RcFromIter<T, I> for Rc<[T]> {
-    default fn from_iter(iter: I) -> Self {
+impl<T, I: iter::TrustedLen<Item = T>> ToRcSlice<T> for I {
+    fn to_rc_slice(self) -> Rc<[T]> {
         // This is the case for a `TrustedLen` iterator.
-        let (low, high) = iter.size_hint();
+        let (low, high) = self.size_hint();
         if let Some(high) = high {
             debug_assert_eq!(
                 low,
@@ -1578,29 +1601,15 @@ impl<T, I: iter::TrustedLen<Item = T>> RcFromIter<T, I> for Rc<[T]> {
 
             unsafe {
                 // SAFETY: We need to ensure that the iterator has an exact length and we have.
-                Rc::from_iter_exact(iter, low)
+                Rc::from_iter_exact(self, low)
             }
         } else {
             // Fall back to normal implementation.
-            iter.collect::<Vec<T>>().into()
+            self.collect::<Vec<T>>().into()
         }
     }
 }
 
-impl<'a, T: 'a + Clone> RcFromIter<&'a T, slice::Iter<'a, T>> for Rc<[T]> {
-    fn from_iter(iter: slice::Iter<'a, T>) -> Self {
-        // Delegate to `impl<T: Clone> From<&[T]> for Rc<[T]>`.
-        //
-        // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping`
-        // which is even more performant.
-        //
-        // In the fall-back case we have `T: Clone`. This is still better
-        // than the `TrustedLen` implementation as slices have a known length
-        // and so we get to avoid calling `size_hint` and avoid the branching.
-        iter.as_slice().into()
-    }
-}
-
 /// `Weak` is a version of [`Rc`] that holds a non-owning reference to the
 /// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak`
 /// pointer, which returns an [`Option`]`<`[`Rc`]`<T>>`.
@@ -1632,6 +1641,7 @@ pub struct Weak<T: ?Sized> {
     // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
     // to allocate space on the heap.  That's not a value a real pointer
     // will ever have because RcBox has alignment at least 2.
+    // This is only possible when `T: Sized`; unsized `T` never dangle.
     ptr: NonNull<RcBox<T>>,
 }
 
@@ -1674,8 +1684,6 @@ impl<T> Weak<T> {
     /// # Examples
     ///
     /// ```
-    /// #![feature(weak_into_raw)]
-    ///
     /// use std::rc::Rc;
     /// use std::ptr;
     ///
@@ -1693,11 +1701,20 @@ impl<T> Weak<T> {
     /// ```
     ///
     /// [`null`]: ../../std/ptr/fn.null.html
-    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    #[stable(feature = "rc_as_ptr", since = "1.45.0")]
     pub fn as_ptr(&self) -> *const T {
-        let offset = data_offset_sized::<T>();
-        let ptr = self.ptr.cast::<u8>().as_ptr().wrapping_offset(offset);
-        ptr as *const T
+        let ptr: *mut RcBox<T> = NonNull::as_ptr(self.ptr);
+
+        // SAFETY: we must offset the pointer manually, and said pointer may be
+        // a dangling weak (usize::MAX) if T is sized. data_offset is safe to call,
+        // because we know that a pointer to unsized T was derived from a real
+        // unsized T, as dangling weaks are only created for sized T. wrapping_offset
+        // is used so that we can use the same code path for the non-dangling
+        // unsized case and the potentially dangling sized case.
+        unsafe {
+            let offset = data_offset(ptr as *mut T);
+            set_data_ptr(ptr as *mut T, (ptr as *mut u8).wrapping_offset(offset))
+        }
     }
 
     /// Consumes the `Weak<T>` and turns it into a raw pointer.
@@ -1711,8 +1728,6 @@ impl<T> Weak<T> {
     /// # Examples
     ///
     /// ```
-    /// #![feature(weak_into_raw)]
-    ///
     /// use std::rc::{Rc, Weak};
     ///
     /// let strong = Rc::new("hello".to_owned());
@@ -1728,7 +1743,7 @@ impl<T> Weak<T> {
     ///
     /// [`from_raw`]: struct.Weak.html#method.from_raw
     /// [`as_ptr`]: struct.Weak.html#method.as_ptr
-    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    #[stable(feature = "weak_into_raw", since = "1.45.0")]
     pub fn into_raw(self) -> *const T {
         let result = self.as_ptr();
         mem::forget(self);
@@ -1755,8 +1770,6 @@ impl<T> Weak<T> {
     /// # Examples
     ///
     /// ```
-    /// #![feature(weak_into_raw)]
-    ///
     /// use std::rc::{Rc, Weak};
     ///
     /// let strong = Rc::new("hello".to_owned());
@@ -1781,16 +1794,18 @@ impl<T> Weak<T> {
     /// [`Weak`]: struct.Weak.html
     /// [`new`]: struct.Weak.html#method.new
     /// [`forget`]: ../../std/mem/fn.forget.html
-    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    #[stable(feature = "weak_into_raw", since = "1.45.0")]
     pub unsafe fn from_raw(ptr: *const T) -> Self {
         if ptr.is_null() {
             Self::new()
         } else {
             // See Rc::from_raw for details
-            let offset = data_offset(ptr);
-            let fake_ptr = ptr as *mut RcBox<T>;
-            let ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
-            Weak { ptr: NonNull::new(ptr).expect("Invalid pointer passed to from_raw") }
+            unsafe {
+                let offset = data_offset(ptr);
+                let fake_ptr = ptr as *mut RcBox<T>;
+                let ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
+                Weak { ptr: NonNull::new(ptr).expect("Invalid pointer passed to from_raw") }
+            }
         }
     }
 }
@@ -2035,10 +2050,8 @@ trait RcBoxPtr<T: ?Sized> {
         // The reference count will never be zero when this is called;
         // nevertheless, we insert an abort here to hint LLVM at
         // an otherwise missed optimization.
-        if strong == 0 || strong == usize::max_value() {
-            unsafe {
-                abort();
-            }
+        if strong == 0 || strong == usize::MAX {
+            abort();
         }
         self.inner().strong.set(strong + 1);
     }
@@ -2061,10 +2074,8 @@ trait RcBoxPtr<T: ?Sized> {
         // The reference count will never be zero when this is called;
         // nevertheless, we insert an abort here to hint LLVM at
         // an otherwise missed optimization.
-        if weak == 0 || weak == usize::max_value() {
-            unsafe {
-                abort();
-            }
+        if weak == 0 || weak == usize::MAX {
+            abort();
         }
         self.inner().weak.set(weak + 1);
     }
@@ -2106,19 +2117,22 @@ impl<T: ?Sized> AsRef<T> for Rc<T> {
 #[stable(feature = "pin", since = "1.33.0")]
 impl<T: ?Sized> Unpin for Rc<T> {}
 
+/// Get the offset within an `ArcInner` for
+/// a payload of type described by a pointer.
+///
+/// # Safety
+///
+/// This has the same safety requirements as `align_of_val_raw`. In effect:
+///
+/// - This function is safe for any argument if `T` is sized, and
+/// - if `T` is unsized, the pointer must have appropriate pointer metadata
+///   aquired from the real instance that you are getting this offset for.
 unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
     // Align the unsized value to the end of the `RcBox`.
     // Because it is ?Sized, it will always be the last field in memory.
     // Note: This is a detail of the current implementation of the compiler,
     // and is not a guaranteed language detail. Do not rely on it outside of std.
-    data_offset_align(align_of_val(&*ptr))
-}
-
-/// Computes the offset of the data field within `RcBox`.
-///
-/// Unlike [`data_offset`], this doesn't need the pointer, but it works only on `T: Sized`.
-fn data_offset_sized<T>() -> isize {
-    data_offset_align(align_of::<T>())
+    unsafe { data_offset_align(align_of_val_raw(ptr)) }
 }
 
 #[inline]
diff --git a/src/liballoc/rc/tests.rs b/src/liballoc/rc/tests.rs
index 56788bb56d5..e88385faf4f 100644
--- a/src/liballoc/rc/tests.rs
+++ b/src/liballoc/rc/tests.rs
@@ -407,14 +407,14 @@ fn test_from_vec() {
 fn test_downcast() {
     use std::any::Any;
 
-    let r1: Rc<dyn Any> = Rc::new(i32::max_value());
+    let r1: Rc<dyn Any> = Rc::new(i32::MAX);
     let r2: Rc<dyn Any> = Rc::new("abc");
 
     assert!(r1.clone().downcast::<u32>().is_err());
 
     let r1i32 = r1.downcast::<i32>();
     assert!(r1i32.is_ok());
-    assert_eq!(r1i32.unwrap(), Rc::new(i32::max_value()));
+    assert_eq!(r1i32.unwrap(), Rc::new(i32::MAX));
 
     assert!(r2.clone().downcast::<i32>().is_err());
 
diff --git a/src/liballoc/slice.rs b/src/liballoc/slice.rs
index 53477288b59..3d51115fe01 100644
--- a/src/liballoc/slice.rs
+++ b/src/liballoc/slice.rs
@@ -136,8 +136,6 @@ pub use hack::to_vec;
 // `test_permutations` test
 mod hack {
     use crate::boxed::Box;
-    #[cfg(test)]
-    use crate::string::ToString;
     use crate::vec::Vec;
 
     // We shouldn't add inline attribute to this since this is used in
@@ -156,9 +154,9 @@ mod hack {
     where
         T: Clone,
     {
-        let mut vector = Vec::with_capacity(s.len());
-        vector.extend_from_slice(s);
-        vector
+        let mut vec = Vec::with_capacity(s.len());
+        vec.extend_from_slice(s);
+        vec
     }
 }
 
@@ -831,8 +829,7 @@ where
 {
     let len = v.len();
     let v = v.as_mut_ptr();
-    let v_mid = v.add(mid);
-    let v_end = v.add(len);
+    let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
 
     // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
     // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
@@ -855,8 +852,10 @@ where
 
     if mid <= len - mid {
         // The left run is shorter.
-        ptr::copy_nonoverlapping(v, buf, mid);
-        hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
+        unsafe {
+            ptr::copy_nonoverlapping(v, buf, mid);
+            hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
+        }
 
         // Initially, these pointers point to the beginnings of their arrays.
         let left = &mut hole.start;
@@ -866,17 +865,21 @@ where
         while *left < hole.end && right < v_end {
             // Consume the lesser side.
             // If equal, prefer the left run to maintain stability.
-            let to_copy = if is_less(&*right, &**left) {
-                get_and_increment(&mut right)
-            } else {
-                get_and_increment(left)
-            };
-            ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
+            unsafe {
+                let to_copy = if is_less(&*right, &**left) {
+                    get_and_increment(&mut right)
+                } else {
+                    get_and_increment(left)
+                };
+                ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
+            }
         }
     } else {
         // The right run is shorter.
-        ptr::copy_nonoverlapping(v_mid, buf, len - mid);
-        hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
+        unsafe {
+            ptr::copy_nonoverlapping(v_mid, buf, len - mid);
+            hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
+        }
 
         // Initially, these pointers point past the ends of their arrays.
         let left = &mut hole.dest;
@@ -886,12 +889,14 @@ where
         while v < *left && buf < *right {
             // Consume the greater side.
             // If equal, prefer the right run to maintain stability.
-            let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
-                decrement_and_get(left)
-            } else {
-                decrement_and_get(right)
-            };
-            ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
+            unsafe {
+                let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
+                    decrement_and_get(left)
+                } else {
+                    decrement_and_get(right)
+                };
+                ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
+            }
         }
     }
     // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
@@ -899,12 +904,12 @@ where
 
     unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
         let old = *ptr;
-        *ptr = ptr.offset(1);
+        *ptr = unsafe { ptr.offset(1) };
         old
     }
 
     unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
-        *ptr = ptr.offset(-1);
+        *ptr = unsafe { ptr.offset(-1) };
         *ptr
     }
 
diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs
index 70860c09a2c..57927c68847 100644
--- a/src/liballoc/str.rs
+++ b/src/liballoc/str.rs
@@ -583,5 +583,5 @@ impl str {
 #[stable(feature = "str_box_extras", since = "1.20.0")]
 #[inline]
 pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box<str> {
-    Box::from_raw(Box::into_raw(v) as *mut str)
+    unsafe { Box::from_raw(Box::into_raw(v) as *mut str) }
 }
diff --git a/src/liballoc/string.rs b/src/liballoc/string.rs
index 80fa8139915..5b671b41b5b 100644
--- a/src/liballoc/string.rs
+++ b/src/liballoc/string.rs
@@ -278,6 +278,7 @@ use crate::vec::Vec;
 /// [`Deref`]: ../../std/ops/trait.Deref.html
 /// [`as_str()`]: struct.String.html#method.as_str
 #[derive(PartialOrd, Eq, Ord)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "string_type")]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct String {
     vec: Vec<u8>,
@@ -723,7 +724,7 @@ impl String {
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub unsafe fn from_raw_parts(buf: *mut u8, length: usize, capacity: usize) -> String {
-        String { vec: Vec::from_raw_parts(buf, length, capacity) }
+        unsafe { String { vec: Vec::from_raw_parts(buf, length, capacity) } }
     }
 
     /// Converts a vector of bytes to a `String` without checking that the
@@ -1328,9 +1329,11 @@ impl String {
         let amt = bytes.len();
         self.vec.reserve(amt);
 
-        ptr::copy(self.vec.as_ptr().add(idx), self.vec.as_mut_ptr().add(idx + amt), len - idx);
-        ptr::copy(bytes.as_ptr(), self.vec.as_mut_ptr().add(idx), amt);
-        self.vec.set_len(len + amt);
+        unsafe {
+            ptr::copy(self.vec.as_ptr().add(idx), self.vec.as_mut_ptr().add(idx + amt), len - idx);
+            ptr::copy(bytes.as_ptr(), self.vec.as_mut_ptr().add(idx), amt);
+            self.vec.set_len(len + amt);
+        }
     }
 
     /// Inserts a string slice into this `String` at a byte position.
@@ -1771,6 +1774,15 @@ impl FromIterator<String> for String {
     }
 }
 
+#[stable(feature = "box_str2", since = "1.45.0")]
+impl FromIterator<Box<str>> for String {
+    fn from_iter<I: IntoIterator<Item = Box<str>>>(iter: I) -> String {
+        let mut buf = String::new();
+        buf.extend(iter);
+        buf
+    }
+}
+
 #[stable(feature = "herd_cows", since = "1.19.0")]
 impl<'a> FromIterator<Cow<'a, str>> for String {
     fn from_iter<I: IntoIterator<Item = Cow<'a, str>>>(iter: I) -> String {
@@ -1798,6 +1810,16 @@ impl Extend<char> for String {
         self.reserve(lower_bound);
         iterator.for_each(move |c| self.push(c));
     }
+
+    #[inline]
+    fn extend_one(&mut self, c: char) {
+        self.push(c);
+    }
+
+    #[inline]
+    fn extend_reserve(&mut self, additional: usize) {
+        self.reserve(additional);
+    }
 }
 
 #[stable(feature = "extend_ref", since = "1.2.0")]
@@ -1805,6 +1827,16 @@ impl<'a> Extend<&'a char> for String {
     fn extend<I: IntoIterator<Item = &'a char>>(&mut self, iter: I) {
         self.extend(iter.into_iter().cloned());
     }
+
+    #[inline]
+    fn extend_one(&mut self, &c: &'a char) {
+        self.push(c);
+    }
+
+    #[inline]
+    fn extend_reserve(&mut self, additional: usize) {
+        self.reserve(additional);
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1812,6 +1844,18 @@ impl<'a> Extend<&'a str> for String {
     fn extend<I: IntoIterator<Item = &'a str>>(&mut self, iter: I) {
         iter.into_iter().for_each(move |s| self.push_str(s));
     }
+
+    #[inline]
+    fn extend_one(&mut self, s: &'a str) {
+        self.push_str(s);
+    }
+}
+
+#[stable(feature = "box_str2", since = "1.45.0")]
+impl Extend<Box<str>> for String {
+    fn extend<I: IntoIterator<Item = Box<str>>>(&mut self, iter: I) {
+        iter.into_iter().for_each(move |s| self.push_str(&s));
+    }
 }
 
 #[stable(feature = "extend_string", since = "1.4.0")]
@@ -1819,6 +1863,11 @@ impl Extend<String> for String {
     fn extend<I: IntoIterator<Item = String>>(&mut self, iter: I) {
         iter.into_iter().for_each(move |s| self.push_str(&s));
     }
+
+    #[inline]
+    fn extend_one(&mut self, s: String) {
+        self.push_str(&s);
+    }
 }
 
 #[stable(feature = "herd_cows", since = "1.19.0")]
@@ -1826,6 +1875,11 @@ impl<'a> Extend<Cow<'a, str>> for String {
     fn extend<I: IntoIterator<Item = Cow<'a, str>>>(&mut self, iter: I) {
         iter.into_iter().for_each(move |s| self.push_str(&s));
     }
+
+    #[inline]
+    fn extend_one(&mut self, s: Cow<'a, str>) {
+        self.push_str(&s);
+    }
 }
 
 /// A convenience impl that delegates to the impl for `&str`.
@@ -2192,6 +2246,14 @@ impl<T: fmt::Display + ?Sized> ToString for T {
     }
 }
 
+#[stable(feature = "char_to_string_specialization", since = "1.46.0")]
+impl ToString for char {
+    #[inline]
+    fn to_string(&self) -> String {
+        String::from(self.encode_utf8(&mut [0; 4]))
+    }
+}
+
 #[stable(feature = "str_to_string_specialization", since = "1.9.0")]
 impl ToString for str {
     #[inline]
@@ -2472,3 +2534,11 @@ impl DoubleEndedIterator for Drain<'_> {
 
 #[stable(feature = "fused", since = "1.26.0")]
 impl FusedIterator for Drain<'_> {}
+
+#[stable(feature = "from_char_for_string", since = "1.46.0")]
+impl From<char> for String {
+    #[inline]
+    fn from(c: char) -> Self {
+        c.to_string()
+    }
+}
diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs
index 54df2b60857..0053a54f203 100644
--- a/src/liballoc/sync.rs
+++ b/src/liballoc/sync.rs
@@ -16,15 +16,16 @@ use core::hash::{Hash, Hasher};
 use core::intrinsics::abort;
 use core::iter;
 use core::marker::{PhantomData, Unpin, Unsize};
-use core::mem::{self, align_of, align_of_val, size_of_val};
+use core::mem::{self, align_of_val, size_of_val};
 use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
 use core::pin::Pin;
 use core::ptr::{self, NonNull};
-use core::slice::{self, from_raw_parts_mut};
+use core::slice::from_raw_parts_mut;
 use core::sync::atomic;
 use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
 
 use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout};
+use crate::borrow::{Cow, ToOwned};
 use crate::boxed::Box;
 use crate::rc::is_dangling;
 use crate::string::String;
@@ -207,7 +208,6 @@ macro_rules! acquire {
 /// counting in general.
 ///
 /// [rc_examples]: ../../std/rc/index.html#examples
-#[cfg_attr(all(bootstrap, not(test)), lang = "arc")]
 #[cfg_attr(not(test), rustc_diagnostic_item = "Arc")]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Arc<T: ?Sized> {
@@ -232,7 +232,7 @@ impl<T: ?Sized> Arc<T> {
     }
 
     unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
-        Self::from_inner(NonNull::new_unchecked(ptr))
+        unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
     }
 }
 
@@ -267,6 +267,7 @@ pub struct Weak<T: ?Sized> {
     // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
     // to allocate space on the heap.  That's not a value a real pointer
     // will ever have because RcBox has alignment at least 2.
+    // This is only possible when `T: Sized`; unsized `T` never dangle.
     ptr: NonNull<ArcInner<T>>,
 }
 
@@ -325,7 +326,7 @@ impl<T> Arc<T> {
             weak: atomic::AtomicUsize::new(1),
             data,
         };
-        Self::from_inner(Box::into_raw_non_null(x))
+        Self::from_inner(Box::leak(x).into())
     }
 
     /// Constructs a new `Arc` with uninitialized contents.
@@ -418,8 +419,7 @@ impl<T> Arc<T> {
     #[inline]
     #[stable(feature = "arc_unique", since = "1.4.0")]
     pub fn try_unwrap(this: Self) -> Result<T, Self> {
-        // See `drop` for why all these atomics are like this
-        if this.inner().strong.compare_exchange(1, 0, Release, Relaxed).is_err() {
+        if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
             return Err(this);
         }
 
@@ -543,7 +543,7 @@ impl<T> Arc<[mem::MaybeUninit<T>]> {
     #[unstable(feature = "new_uninit", issue = "63291")]
     #[inline]
     pub unsafe fn assume_init(self) -> Arc<[T]> {
-        Arc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _)
+        unsafe { Arc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _) }
     }
 }
 
@@ -579,8 +579,6 @@ impl<T: ?Sized> Arc<T> {
     /// # Examples
     ///
     /// ```
-    /// #![feature(weak_into_raw)]
-    ///
     /// use std::sync::Arc;
     ///
     /// let x = Arc::new("hello".to_owned());
@@ -589,20 +587,14 @@ impl<T: ?Sized> Arc<T> {
     /// assert_eq!(x_ptr, Arc::as_ptr(&y));
     /// assert_eq!(unsafe { &*x_ptr }, "hello");
     /// ```
-    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    #[stable(feature = "rc_as_ptr", since = "1.45.0")]
     pub fn as_ptr(this: &Self) -> *const T {
         let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
-        let fake_ptr = ptr as *mut T;
 
-        // SAFETY: This cannot go through Deref::deref.
-        // Instead, we manually offset the pointer rather than manifesting a reference.
-        // This is so that the returned pointer retains the same provenance as our pointer.
-        // This is required so that e.g. `get_mut` can write through the pointer
-        // after the Arc is recovered through `from_raw`.
-        unsafe {
-            let offset = data_offset(&(*ptr).data);
-            set_data_ptr(fake_ptr, (ptr as *mut u8).offset(offset))
-        }
+        // SAFETY: This cannot go through Deref::deref or RcBoxPtr::inner because
+        // this is required to retain raw/mut provenance such that e.g. `get_mut` can
+        // write through the pointer after the Rc is recovered through `from_raw`.
+        unsafe { &raw const (*ptr).data }
     }
 
     /// Constructs an `Arc<T>` from a raw pointer.
@@ -644,13 +636,15 @@ impl<T: ?Sized> Arc<T> {
     /// ```
     #[stable(feature = "rc_raw", since = "1.17.0")]
     pub unsafe fn from_raw(ptr: *const T) -> Self {
-        let offset = data_offset(ptr);
+        unsafe {
+            let offset = data_offset(ptr);
 
-        // Reverse the offset to find the original ArcInner.
-        let fake_ptr = ptr as *mut ArcInner<T>;
-        let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
+            // Reverse the offset to find the original ArcInner.
+            let fake_ptr = ptr as *mut ArcInner<T>;
+            let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
 
-        Self::from_ptr(arc_ptr)
+            Self::from_ptr(arc_ptr)
+        }
     }
 
     /// Consumes the `Arc`, returning the wrapped pointer as `NonNull<T>`.
@@ -659,6 +653,7 @@ impl<T: ?Sized> Arc<T> {
     ///
     /// ```
     /// #![feature(rc_into_raw_non_null)]
+    /// #![allow(deprecated)]
     ///
     /// use std::sync::Arc;
     ///
@@ -668,6 +663,7 @@ impl<T: ?Sized> Arc<T> {
     /// assert_eq!(deref, "hello");
     /// ```
     #[unstable(feature = "rc_into_raw_non_null", issue = "47336")]
+    #[rustc_deprecated(since = "1.44.0", reason = "use `Arc::into_raw` instead")]
     #[inline]
     pub fn into_raw_non_null(this: Self) -> NonNull<T> {
         // safe because Arc guarantees its pointer is non-null
@@ -775,6 +771,81 @@ impl<T: ?Sized> Arc<T> {
         this.inner().strong.load(SeqCst)
     }
 
+    /// Increments the strong reference count on the `Arc<T>` associated with the
+    /// provided pointer by one.
+    ///
+    /// # Safety
+    ///
+    /// The pointer must have been obtained through `Arc::into_raw`, and the
+    /// associated `Arc` instance must be valid (i.e. the strong count must be at
+    /// least 1) for the duration of this method.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(arc_mutate_strong_count)]
+    ///
+    /// use std::sync::Arc;
+    ///
+    /// let five = Arc::new(5);
+    ///
+    /// unsafe {
+    ///     let ptr = Arc::into_raw(five);
+    ///     Arc::incr_strong_count(ptr);
+    ///
+    ///     // This assertion is deterministic because we haven't shared
+    ///     // the `Arc` between threads.
+    ///     let five = Arc::from_raw(ptr);
+    ///     assert_eq!(2, Arc::strong_count(&five));
+    /// }
+    /// ```
+    #[inline]
+    #[unstable(feature = "arc_mutate_strong_count", issue = "71983")]
+    pub unsafe fn incr_strong_count(ptr: *const T) {
+        // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
+        let arc = unsafe { mem::ManuallyDrop::new(Arc::<T>::from_raw(ptr)) };
+        // Now increase refcount, but don't drop new refcount either
+        let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
+    }
+
+    /// Decrements the strong reference count on the `Arc<T>` associated with the
+    /// provided pointer by one.
+    ///
+    /// # Safety
+    ///
+    /// The pointer must have been obtained through `Arc::into_raw`, and the
+    /// associated `Arc` instance must be valid (i.e. the strong count must be at
+    /// least 1) when invoking this method. This method can be used to release the final
+    /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
+    /// released.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(arc_mutate_strong_count)]
+    ///
+    /// use std::sync::Arc;
+    ///
+    /// let five = Arc::new(5);
+    ///
+    /// unsafe {
+    ///     let ptr = Arc::into_raw(five);
+    ///     Arc::incr_strong_count(ptr);
+    ///
+    ///     // Those assertions are deterministic because we haven't shared
+    ///     // the `Arc` between threads.
+    ///     let five = Arc::from_raw(ptr);
+    ///     assert_eq!(2, Arc::strong_count(&five));
+    ///     Arc::decr_strong_count(ptr);
+    ///     assert_eq!(1, Arc::strong_count(&five));
+    /// }
+    /// ```
+    #[inline]
+    #[unstable(feature = "arc_mutate_strong_count", issue = "71983")]
+    pub unsafe fn decr_strong_count(ptr: *const T) {
+        unsafe { mem::drop(Arc::from_raw(ptr)) };
+    }
+
     #[inline]
     fn inner(&self) -> &ArcInner<T> {
         // This unsafety is ok because while this arc is alive we're guaranteed
@@ -790,12 +861,10 @@ impl<T: ?Sized> Arc<T> {
     unsafe fn drop_slow(&mut self) {
         // Destroy the data at this time, even though we may not free the box
         // allocation itself (there may still be weak pointers lying around).
-        ptr::drop_in_place(&mut self.ptr.as_mut().data);
+        unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
 
-        if self.inner().weak.fetch_sub(1, Release) == 1 {
-            acquire!(self.inner().weak);
-            Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()))
-        }
+        // Drop the weak ref collectively held by all strong references
+        drop(Weak { ptr: self.ptr });
     }
 
     #[inline]
@@ -844,10 +913,12 @@ impl<T: ?Sized> Arc<T> {
 
         // Initialize the ArcInner
         let inner = mem_to_arcinner(mem.ptr.as_ptr());
-        debug_assert_eq!(Layout::for_value(&*inner), layout);
+        debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout);
 
-        ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
-        ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
+        unsafe {
+            ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
+            ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
+        }
 
         inner
     }
@@ -855,9 +926,11 @@ impl<T: ?Sized> Arc<T> {
     /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
     unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
         // Allocate for the `ArcInner<T>` using the given value.
-        Self::allocate_for_layout(Layout::for_value(&*ptr), |mem| {
-            set_data_ptr(ptr as *mut T, mem) as *mut ArcInner<T>
-        })
+        unsafe {
+            Self::allocate_for_layout(Layout::for_value(&*ptr), |mem| {
+                set_data_ptr(ptr as *mut T, mem) as *mut ArcInner<T>
+            })
+        }
     }
 
     fn from_box(v: Box<T>) -> Arc<T> {
@@ -886,9 +959,11 @@ impl<T: ?Sized> Arc<T> {
 impl<T> Arc<[T]> {
     /// Allocates an `ArcInner<[T]>` with the given length.
     unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
-        Self::allocate_for_layout(Layout::array::<T>(len).unwrap(), |mem| {
-            ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>
-        })
+        unsafe {
+            Self::allocate_for_layout(Layout::array::<T>(len).unwrap(), |mem| {
+                ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>
+            })
+        }
     }
 }
 
@@ -897,20 +972,24 @@ impl<T> Arc<[T]> {
 /// For a slice/trait object, this sets the `data` field and leaves the rest
 /// unchanged. For a sized raw pointer, this simply sets the pointer.
 unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T {
-    ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
+    unsafe {
+        ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
+    }
     ptr
 }
 
 impl<T> Arc<[T]> {
-    /// Copy elements from slice into newly allocated Arc<[T]>
+    /// Copy elements from slice into newly allocated Arc<\[T\]>
     ///
     /// Unsafe because the caller must either take ownership or bind `T: Copy`.
     unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
-        let ptr = Self::allocate_for_slice(v.len());
+        unsafe {
+            let ptr = Self::allocate_for_slice(v.len());
 
-        ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).data as *mut [T] as *mut T, v.len());
+            ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).data as *mut [T] as *mut T, v.len());
 
-        Self::from_ptr(ptr)
+            Self::from_ptr(ptr)
+        }
     }
 
     /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
@@ -938,25 +1017,27 @@ impl<T> Arc<[T]> {
             }
         }
 
-        let ptr = Self::allocate_for_slice(len);
+        unsafe {
+            let ptr = Self::allocate_for_slice(len);
 
-        let mem = ptr as *mut _ as *mut u8;
-        let layout = Layout::for_value(&*ptr);
+            let mem = ptr as *mut _ as *mut u8;
+            let layout = Layout::for_value(&*ptr);
 
-        // Pointer to first element
-        let elems = &mut (*ptr).data as *mut [T] as *mut T;
+            // Pointer to first element
+            let elems = &mut (*ptr).data as *mut [T] as *mut T;
 
-        let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
+            let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
 
-        for (i, item) in iter.enumerate() {
-            ptr::write(elems.add(i), item);
-            guard.n_elems += 1;
-        }
+            for (i, item) in iter.enumerate() {
+                ptr::write(elems.add(i), item);
+                guard.n_elems += 1;
+            }
 
-        // All clear. Forget the guard so it doesn't free the new ArcInner.
-        mem::forget(guard);
+            // All clear. Forget the guard so it doesn't free the new ArcInner.
+            mem::forget(guard);
 
-        Self::from_ptr(ptr)
+            Self::from_ptr(ptr)
+        }
     }
 }
 
@@ -1020,9 +1101,7 @@ impl<T: ?Sized> Clone for Arc<T> {
         // We abort because such a program is incredibly degenerate, and we
         // don't care to support it.
         if old_size > MAX_REFCOUNT {
-            unsafe {
-                abort();
-            }
+            abort();
         }
 
         Self::from_inner(self.ptr)
@@ -1125,7 +1204,7 @@ impl<T: Clone> Arc<T> {
 
         // As with `get_mut()`, the unsafety is ok because our reference was
         // either unique to begin with, or became one upon cloning the contents.
-        unsafe { &mut this.ptr.as_mut().data }
+        unsafe { Self::get_mut_unchecked(this) }
     }
 }
 
@@ -1201,7 +1280,9 @@ impl<T: ?Sized> Arc<T> {
     #[inline]
     #[unstable(feature = "get_mut_unchecked", issue = "63292")]
     pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
-        &mut this.ptr.as_mut().data
+        // We are careful to *not* create a reference covering the "count" fields, as
+        // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
+        unsafe { &mut (*this.ptr.as_ptr()).data }
     }
 
     /// Determine whether this is the unique reference (including weak refs) to
@@ -1370,8 +1451,6 @@ impl<T> Weak<T> {
     /// # Examples
     ///
     /// ```
-    /// #![feature(weak_into_raw)]
-    ///
     /// use std::sync::Arc;
     /// use std::ptr;
     ///
@@ -1389,11 +1468,20 @@ impl<T> Weak<T> {
     /// ```
     ///
     /// [`null`]: ../../std/ptr/fn.null.html
-    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    #[stable(feature = "weak_into_raw", since = "1.45.0")]
     pub fn as_ptr(&self) -> *const T {
-        let offset = data_offset_sized::<T>();
-        let ptr = self.ptr.cast::<u8>().as_ptr().wrapping_offset(offset);
-        ptr as *const T
+        let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
+
+        // SAFETY: we must offset the pointer manually, and said pointer may be
+        // a dangling weak (usize::MAX) if T is sized. data_offset is safe to call,
+        // because we know that a pointer to unsized T was derived from a real
+        // unsized T, as dangling weaks are only created for sized T. wrapping_offset
+        // is used so that we can use the same code path for the non-dangling
+        // unsized case and the potentially dangling sized case.
+        unsafe {
+            let offset = data_offset(ptr as *mut T);
+            set_data_ptr(ptr as *mut T, (ptr as *mut u8).wrapping_offset(offset))
+        }
     }
 
     /// Consumes the `Weak<T>` and turns it into a raw pointer.
@@ -1407,8 +1495,6 @@ impl<T> Weak<T> {
     /// # Examples
     ///
     /// ```
-    /// #![feature(weak_into_raw)]
-    ///
     /// use std::sync::{Arc, Weak};
     ///
     /// let strong = Arc::new("hello".to_owned());
@@ -1424,7 +1510,7 @@ impl<T> Weak<T> {
     ///
     /// [`from_raw`]: struct.Weak.html#method.from_raw
     /// [`as_ptr`]: struct.Weak.html#method.as_ptr
-    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    #[stable(feature = "weak_into_raw", since = "1.45.0")]
     pub fn into_raw(self) -> *const T {
         let result = self.as_ptr();
         mem::forget(self);
@@ -1452,8 +1538,6 @@ impl<T> Weak<T> {
     /// # Examples
     ///
     /// ```
-    /// #![feature(weak_into_raw)]
-    ///
     /// use std::sync::{Arc, Weak};
     ///
     /// let strong = Arc::new("hello".to_owned());
@@ -1478,20 +1562,29 @@ impl<T> Weak<T> {
     /// [`Weak`]: struct.Weak.html
     /// [`Arc`]: struct.Arc.html
     /// [`forget`]: ../../std/mem/fn.forget.html
-    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    #[stable(feature = "weak_into_raw", since = "1.45.0")]
     pub unsafe fn from_raw(ptr: *const T) -> Self {
         if ptr.is_null() {
             Self::new()
         } else {
             // See Arc::from_raw for details
-            let offset = data_offset(ptr);
-            let fake_ptr = ptr as *mut ArcInner<T>;
-            let ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
-            Weak { ptr: NonNull::new(ptr).expect("Invalid pointer passed to from_raw") }
+            unsafe {
+                let offset = data_offset(ptr);
+                let fake_ptr = ptr as *mut ArcInner<T>;
+                let ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
+                Weak { ptr: NonNull::new(ptr).expect("Invalid pointer passed to from_raw") }
+            }
         }
     }
 }
 
+/// Helper type to allow accessing the reference counts without
+/// making any assertions about the data field.
+struct WeakInner<'a> {
+    weak: &'a atomic::AtomicUsize,
+    strong: &'a atomic::AtomicUsize,
+}
+
 impl<T: ?Sized> Weak<T> {
     /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
     /// dropping of the inner value if successful.
@@ -1538,9 +1631,7 @@ impl<T: ?Sized> Weak<T> {
 
             // See comments in `Arc::clone` for why we do this (for `mem::forget`).
             if n > MAX_REFCOUNT {
-                unsafe {
-                    abort();
-                }
+                abort();
             }
 
             // Relaxed is valid for the same reason it is on Arc's Clone impl
@@ -1597,8 +1688,18 @@ impl<T: ?Sized> Weak<T> {
     /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
     /// (i.e., when this `Weak` was created by `Weak::new`).
     #[inline]
-    fn inner(&self) -> Option<&ArcInner<T>> {
-        if is_dangling(self.ptr) { None } else { Some(unsafe { self.ptr.as_ref() }) }
+    fn inner(&self) -> Option<WeakInner<'_>> {
+        if is_dangling(self.ptr) {
+            None
+        } else {
+            // We are careful to *not* create a reference covering the "data" field, as
+            // the field may be mutated concurrently (for example, if the last `Arc`
+            // is dropped, the data field will be dropped in-place).
+            Some(unsafe {
+                let ptr = self.ptr.as_ptr();
+                WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak }
+            })
+        }
     }
 
     /// Returns `true` if the two `Weak`s point to the same allocation (similar to
@@ -1677,9 +1778,7 @@ impl<T: ?Sized> Clone for Weak<T> {
 
         // See comments in Arc::clone() for why we do this (for mem::forget).
         if old_size > MAX_REFCOUNT {
-            unsafe {
-                abort();
-            }
+            abort();
         }
 
         Weak { ptr: self.ptr }
@@ -1778,7 +1877,7 @@ impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
 ///
 /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
 #[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + Eq> ArcEqIdent<T> for Arc<T> {
+impl<T: ?Sized + crate::rc::MarkerEq> ArcEqIdent<T> for Arc<T> {
     #[inline]
     fn eq(&self, other: &Arc<T>) -> bool {
         Arc::ptr_eq(self, other) || **self == **other
@@ -2047,6 +2146,21 @@ impl<T> From<Vec<T>> for Arc<[T]> {
     }
 }
 
+#[stable(feature = "shared_from_cow", since = "1.45.0")]
+impl<'a, B> From<Cow<'a, B>> for Arc<B>
+where
+    B: ToOwned + ?Sized,
+    Arc<B>: From<&'a B> + From<B::Owned>,
+{
+    #[inline]
+    fn from(cow: Cow<'a, B>) -> Arc<B> {
+        match cow {
+            Cow::Borrowed(s) => Arc::from(s),
+            Cow::Owned(s) => Arc::from(s),
+        }
+    }
+}
+
 #[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
 impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]>
 where
@@ -2104,25 +2218,25 @@ impl<T> iter::FromIterator<T> for Arc<[T]> {
     /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
     /// ```
     fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
-        ArcFromIter::from_iter(iter.into_iter())
+        ToArcSlice::to_arc_slice(iter.into_iter())
     }
 }
 
 /// Specialization trait used for collecting into `Arc<[T]>`.
-trait ArcFromIter<T, I> {
-    fn from_iter(iter: I) -> Self;
+trait ToArcSlice<T>: Iterator<Item = T> + Sized {
+    fn to_arc_slice(self) -> Arc<[T]>;
 }
 
-impl<T, I: Iterator<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
-    default fn from_iter(iter: I) -> Self {
-        iter.collect::<Vec<T>>().into()
+impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
+    default fn to_arc_slice(self) -> Arc<[T]> {
+        self.collect::<Vec<T>>().into()
     }
 }
 
-impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
-    default fn from_iter(iter: I) -> Self {
+impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
+    fn to_arc_slice(self) -> Arc<[T]> {
         // This is the case for a `TrustedLen` iterator.
-        let (low, high) = iter.size_hint();
+        let (low, high) = self.size_hint();
         if let Some(high) = high {
             debug_assert_eq!(
                 low,
@@ -2133,29 +2247,15 @@ impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
 
             unsafe {
                 // SAFETY: We need to ensure that the iterator has an exact length and we have.
-                Arc::from_iter_exact(iter, low)
+                Arc::from_iter_exact(self, low)
             }
         } else {
             // Fall back to normal implementation.
-            iter.collect::<Vec<T>>().into()
+            self.collect::<Vec<T>>().into()
         }
     }
 }
 
-impl<'a, T: 'a + Clone> ArcFromIter<&'a T, slice::Iter<'a, T>> for Arc<[T]> {
-    fn from_iter(iter: slice::Iter<'a, T>) -> Self {
-        // Delegate to `impl<T: Clone> From<&[T]> for Arc<[T]>`.
-        //
-        // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping`
-        // which is even more performant.
-        //
-        // In the fall-back case we have `T: Clone`. This is still better
-        // than the `TrustedLen` implementation as slices have a known length
-        // and so we get to avoid calling `size_hint` and avoid the branching.
-        iter.as_slice().into()
-    }
-}
-
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
     fn borrow(&self) -> &T {
@@ -2173,20 +2273,22 @@ impl<T: ?Sized> AsRef<T> for Arc<T> {
 #[stable(feature = "pin", since = "1.33.0")]
 impl<T: ?Sized> Unpin for Arc<T> {}
 
-/// Computes the offset of the data field within `ArcInner`.
+/// Get the offset within an `ArcInner` for
+/// a payload of type described by a pointer.
+///
+/// # Safety
+///
+/// This has the same safety requirements as `align_of_val_raw`. In effect:
+///
+/// - This function is safe for any argument if `T` is sized, and
+/// - if `T` is unsized, the pointer must have appropriate pointer metadata
+///   aquired from the real instance that you are getting this offset for.
 unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
     // Align the unsized value to the end of the `ArcInner`.
     // Because it is `?Sized`, it will always be the last field in memory.
     // Note: This is a detail of the current implementation of the compiler,
     // and is not a guaranteed language detail. Do not rely on it outside of std.
-    data_offset_align(align_of_val(&*ptr))
-}
-
-/// Computes the offset of the data field within `ArcInner`.
-///
-/// Unlike [`data_offset`], this doesn't need the pointer, but it works only on `T: Sized`.
-fn data_offset_sized<T>() -> isize {
-    data_offset_align(align_of::<T>())
+    unsafe { data_offset_align(align_of_val(&*ptr)) }
 }
 
 #[inline]
diff --git a/src/liballoc/sync/tests.rs b/src/liballoc/sync/tests.rs
index edc2820ee22..6f08cd7f123 100644
--- a/src/liballoc/sync/tests.rs
+++ b/src/liballoc/sync/tests.rs
@@ -32,7 +32,6 @@ impl Drop for Canary {
 
 #[test]
 #[cfg_attr(target_os = "emscripten", ignore)]
-#[cfg_attr(miri, ignore)] // Miri does not support threads
 fn manually_share_arc() {
     let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
     let arc_v = Arc::new(v);
@@ -337,12 +336,13 @@ fn test_ptr_eq() {
 
 #[test]
 #[cfg_attr(target_os = "emscripten", ignore)]
-#[cfg_attr(miri, ignore)] // Miri does not support threads
 fn test_weak_count_locked() {
     let mut a = Arc::new(atomic::AtomicBool::new(false));
     let a2 = a.clone();
     let t = thread::spawn(move || {
-        for _i in 0..1000000 {
+        // Miri is too slow
+        let count = if cfg!(miri) { 1000 } else { 1000000 };
+        for _i in 0..count {
             Arc::get_mut(&mut a);
         }
         a.store(true, SeqCst);
@@ -351,6 +351,8 @@ fn test_weak_count_locked() {
     while !a2.load(SeqCst) {
         let n = Arc::weak_count(&a2);
         assert!(n < 2, "bad weak count: {}", n);
+        #[cfg(miri)] // Miri's scheduler does not guarantee liveness, and thus needs this hint.
+        atomic::spin_loop_hint();
     }
     t.join().unwrap();
 }
@@ -463,14 +465,14 @@ fn test_from_vec() {
 fn test_downcast() {
     use std::any::Any;
 
-    let r1: Arc<dyn Any + Send + Sync> = Arc::new(i32::max_value());
+    let r1: Arc<dyn Any + Send + Sync> = Arc::new(i32::MAX);
     let r2: Arc<dyn Any + Send + Sync> = Arc::new("abc");
 
     assert!(r1.clone().downcast::<u32>().is_err());
 
     let r1i32 = r1.downcast::<i32>();
     assert!(r1i32.is_ok());
-    assert_eq!(r1i32.unwrap(), Arc::new(i32::max_value()));
+    assert_eq!(r1i32.unwrap(), Arc::new(i32::MAX));
 
     assert!(r2.clone().downcast::<i32>().is_err());
 
diff --git a/src/liballoc/task.rs b/src/liballoc/task.rs
index a64d5d7a63b..0d1cc99df47 100644
--- a/src/liballoc/task.rs
+++ b/src/liballoc/task.rs
@@ -1,6 +1,6 @@
 #![unstable(feature = "wake_trait", issue = "69912")]
 //! Types and Traits for working with asynchronous tasks.
-use core::mem::{self, ManuallyDrop};
+use core::mem::ManuallyDrop;
 use core::task::{RawWaker, RawWakerVTable, Waker};
 
 use crate::sync::Arc;
@@ -60,26 +60,29 @@ impl<W: Wake + Send + Sync + 'static> From<Arc<W>> for RawWaker {
 fn raw_waker<W: Wake + Send + Sync + 'static>(waker: Arc<W>) -> RawWaker {
     // Increment the reference count of the arc to clone it.
     unsafe fn clone_waker<W: Wake + Send + Sync + 'static>(waker: *const ()) -> RawWaker {
-        let waker: Arc<W> = Arc::from_raw(waker as *const W);
-        mem::forget(Arc::clone(&waker));
-        raw_waker(waker)
+        unsafe { Arc::incr_strong_count(waker as *const W) };
+        RawWaker::new(
+            waker as *const (),
+            &RawWakerVTable::new(clone_waker::<W>, wake::<W>, wake_by_ref::<W>, drop_waker::<W>),
+        )
     }
 
     // Wake by value, moving the Arc into the Wake::wake function
     unsafe fn wake<W: Wake + Send + Sync + 'static>(waker: *const ()) {
-        let waker: Arc<W> = Arc::from_raw(waker as *const W);
+        let waker: Arc<W> = unsafe { Arc::from_raw(waker as *const W) };
         <W as Wake>::wake(waker);
     }
 
     // Wake by reference, wrap the waker in ManuallyDrop to avoid dropping it
     unsafe fn wake_by_ref<W: Wake + Send + Sync + 'static>(waker: *const ()) {
-        let waker: ManuallyDrop<Arc<W>> = ManuallyDrop::new(Arc::from_raw(waker as *const W));
+        let waker: ManuallyDrop<Arc<W>> =
+            unsafe { ManuallyDrop::new(Arc::from_raw(waker as *const W)) };
         <W as Wake>::wake_by_ref(&waker);
     }
 
     // Decrement the reference count of the Arc on drop
     unsafe fn drop_waker<W: Wake + Send + Sync + 'static>(waker: *const ()) {
-        mem::drop(Arc::from_raw(waker as *const W));
+        unsafe { Arc::decr_strong_count(waker as *const W) };
     }
 
     RawWaker::new(
diff --git a/src/liballoc/tests/binary_heap.rs b/src/liballoc/tests/binary_heap.rs
index be5516f54f3..62084ccf53c 100644
--- a/src/liballoc/tests/binary_heap.rs
+++ b/src/liballoc/tests/binary_heap.rs
@@ -372,6 +372,14 @@ fn assert_covariance() {
     }
 }
 
+#[test]
+fn test_retain() {
+    let mut a = BinaryHeap::from(vec![-10, -5, 1, 2, 4, 13]);
+    a.retain(|x| x % 2 == 0);
+
+    assert_eq!(a.into_sorted_vec(), [-10, 2, 4])
+}
+
 // old binaryheap failed this test
 //
 // Integrity means that all elements are present after a comparison panics,
@@ -409,16 +417,14 @@ fn panic_safe() {
     }
     let mut rng = thread_rng();
     const DATASZ: usize = 32;
-    #[cfg(not(miri))] // Miri is too slow
-    const NTEST: usize = 10;
-    #[cfg(miri)]
-    const NTEST: usize = 1;
+    // Miri is too slow
+    let ntest = if cfg!(miri) { 1 } else { 10 };
 
     // don't use 0 in the data -- we want to catch the zeroed-out case.
     let data = (1..=DATASZ).collect::<Vec<_>>();
 
     // since it's a fuzzy test, run several tries.
-    for _ in 0..NTEST {
+    for _ in 0..ntest {
         for i in 1..=DATASZ {
             DROP_COUNTER.store(0, Ordering::SeqCst);
 
diff --git a/src/liballoc/tests/borrow.rs b/src/liballoc/tests/borrow.rs
new file mode 100644
index 00000000000..8bfcf323f67
--- /dev/null
+++ b/src/liballoc/tests/borrow.rs
@@ -0,0 +1,47 @@
+use std::borrow::{Cow, ToOwned};
+use std::ffi::{CStr, OsStr};
+use std::path::Path;
+use std::rc::Rc;
+use std::sync::Arc;
+
+macro_rules! test_from_cow {
+    ($value:ident => $($ty:ty),+) => {$(
+        let borrowed = <$ty>::from(Cow::Borrowed($value));
+        let owned = <$ty>::from(Cow::Owned($value.to_owned()));
+        assert_eq!($value, &*borrowed);
+        assert_eq!($value, &*owned);
+    )+};
+    ($value:ident : & $ty:ty) => {
+        test_from_cow!($value => Box<$ty>, Rc<$ty>, Arc<$ty>);
+    }
+}
+
+#[test]
+fn test_from_cow_slice() {
+    let slice: &[i32] = &[1, 2, 3];
+    test_from_cow!(slice: &[i32]);
+}
+
+#[test]
+fn test_from_cow_str() {
+    let string = "hello";
+    test_from_cow!(string: &str);
+}
+
+#[test]
+fn test_from_cow_c_str() {
+    let string = CStr::from_bytes_with_nul(b"hello\0").unwrap();
+    test_from_cow!(string: &CStr);
+}
+
+#[test]
+fn test_from_cow_os_str() {
+    let string = OsStr::new("hello");
+    test_from_cow!(string: &OsStr);
+}
+
+#[test]
+fn test_from_cow_path() {
+    let path = Path::new("hello");
+    test_from_cow!(path: &Path);
+}
diff --git a/src/liballoc/tests/boxed.rs b/src/liballoc/tests/boxed.rs
index 66782ecbeb7..5377485da8f 100644
--- a/src/liballoc/tests/boxed.rs
+++ b/src/liballoc/tests/boxed.rs
@@ -16,3 +16,36 @@ fn unitialized_zero_size_box() {
         NonNull::<MaybeUninit<String>>::dangling().as_ptr(),
     );
 }
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+struct Dummy {
+    _data: u8,
+}
+
+#[test]
+fn box_clone_and_clone_from_equivalence() {
+    for size in (0..8).map(|i| 2usize.pow(i)) {
+        let control = vec![Dummy { _data: 42 }; size].into_boxed_slice();
+        let clone = control.clone();
+        let mut copy = vec![Dummy { _data: 84 }; size].into_boxed_slice();
+        copy.clone_from(&control);
+        assert_eq!(control, clone);
+        assert_eq!(control, copy);
+    }
+}
+
+/// This test might give a false positive in case the box realocates, but the alocator keeps the
+/// original pointer.
+///
+/// On the other hand it won't give a false negative, if it fails than the memory was definitly not
+/// reused
+#[test]
+fn box_clone_from_ptr_stability() {
+    for size in (0..8).map(|i| 2usize.pow(i)) {
+        let control = vec![Dummy { _data: 42 }; size].into_boxed_slice();
+        let mut copy = vec![Dummy { _data: 84 }; size].into_boxed_slice();
+        let copy_raw = copy.as_ptr() as usize;
+        copy.clone_from(&control);
+        assert_eq!(copy.as_ptr() as usize, copy_raw);
+    }
+}
diff --git a/src/liballoc/tests/btree/map.rs b/src/liballoc/tests/btree/map.rs
index 96b6c32a1fa..f66b5814ca0 100644
--- a/src/liballoc/tests/btree/map.rs
+++ b/src/liballoc/tests/btree/map.rs
@@ -28,10 +28,8 @@ const MIN_INSERTS_HEIGHT_2: usize = NODE_CAPACITY + (NODE_CAPACITY + 1) * NODE_C
 #[test]
 fn test_basic_large() {
     let mut map = BTreeMap::new();
-    #[cfg(not(miri))] // Miri is too slow
-    let size = 10000;
-    #[cfg(miri)]
-    let size = MIN_INSERTS_HEIGHT_2;
+    // Miri is too slow
+    let size = if cfg!(miri) { MIN_INSERTS_HEIGHT_2 } else { 10000 };
     assert_eq!(map.len(), 0);
 
     for i in 0..size {
@@ -155,10 +153,8 @@ fn test_basic_small() {
 
 #[test]
 fn test_iter() {
-    #[cfg(not(miri))] // Miri is too slow
-    let size = 10000;
-    #[cfg(miri)]
-    let size = 200;
+    // Miri is too slow
+    let size = if cfg!(miri) { 200 } else { 10000 };
 
     let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
@@ -180,10 +176,8 @@ fn test_iter() {
 
 #[test]
 fn test_iter_rev() {
-    #[cfg(not(miri))] // Miri is too slow
-    let size = 10000;
-    #[cfg(miri)]
-    let size = 200;
+    // Miri is too slow
+    let size = if cfg!(miri) { 200 } else { 10000 };
 
     let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
@@ -289,10 +283,8 @@ fn test_values_mut() {
 
 #[test]
 fn test_iter_mixed() {
-    #[cfg(not(miri))] // Miri is too slow
-    let size = 10000;
-    #[cfg(miri)]
-    let size = 200;
+    // Miri is too slow
+    let size = if cfg!(miri) { 200 } else { 10000 };
 
     let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
@@ -317,6 +309,42 @@ fn test_iter_mixed() {
     test(size, map.into_iter());
 }
 
+#[test]
+#[cfg_attr(miri, ignore)] // FIXME: fails in Miri <https://github.com/rust-lang/rust/issues/73915>
+fn test_iter_min_max() {
+    let mut a = BTreeMap::new();
+    assert_eq!(a.iter().min(), None);
+    assert_eq!(a.iter().max(), None);
+    assert_eq!(a.iter_mut().min(), None);
+    assert_eq!(a.iter_mut().max(), None);
+    assert_eq!(a.range(..).min(), None);
+    assert_eq!(a.range(..).max(), None);
+    assert_eq!(a.range_mut(..).min(), None);
+    assert_eq!(a.range_mut(..).max(), None);
+    assert_eq!(a.keys().min(), None);
+    assert_eq!(a.keys().max(), None);
+    assert_eq!(a.values().min(), None);
+    assert_eq!(a.values().max(), None);
+    assert_eq!(a.values_mut().min(), None);
+    assert_eq!(a.values_mut().max(), None);
+    a.insert(1, 42);
+    a.insert(2, 24);
+    assert_eq!(a.iter().min(), Some((&1, &42)));
+    assert_eq!(a.iter().max(), Some((&2, &24)));
+    assert_eq!(a.iter_mut().min(), Some((&1, &mut 42)));
+    assert_eq!(a.iter_mut().max(), Some((&2, &mut 24)));
+    assert_eq!(a.range(..).min(), Some((&1, &42)));
+    assert_eq!(a.range(..).max(), Some((&2, &24)));
+    assert_eq!(a.range_mut(..).min(), Some((&1, &mut 42)));
+    assert_eq!(a.range_mut(..).max(), Some((&2, &mut 24)));
+    assert_eq!(a.keys().min(), Some(&1));
+    assert_eq!(a.keys().max(), Some(&2));
+    assert_eq!(a.values().min(), Some(&24));
+    assert_eq!(a.values().max(), Some(&42));
+    assert_eq!(a.values_mut().min(), Some(&mut 24));
+    assert_eq!(a.values_mut().max(), Some(&mut 42));
+}
+
 fn range_keys(map: &BTreeMap<i32, i32>, range: impl RangeBounds<i32>) -> Vec<i32> {
     map.range(range)
         .map(|(&k, &v)| {
@@ -525,10 +553,8 @@ fn test_range_backwards_4() {
 
 #[test]
 fn test_range_1000() {
-    #[cfg(not(miri))] // Miri is too slow
-    let size = 1000;
-    #[cfg(miri)]
-    let size = MIN_INSERTS_HEIGHT_2 as u32;
+    // Miri is too slow
+    let size = if cfg!(miri) { MIN_INSERTS_HEIGHT_2 as u32 } else { 1000 };
     let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
     fn test(map: &BTreeMap<u32, u32>, size: u32, min: Bound<&u32>, max: Bound<&u32>) {
@@ -566,10 +592,8 @@ fn test_range_borrowed_key() {
 #[test]
 fn test_range() {
     let size = 200;
-    #[cfg(not(miri))] // Miri is too slow
-    let step = 1;
-    #[cfg(miri)]
-    let step = 66;
+    // Miri is too slow
+    let step = if cfg!(miri) { 66 } else { 1 };
     let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
     for i in (0..size).step_by(step) {
@@ -589,10 +613,8 @@ fn test_range() {
 #[test]
 fn test_range_mut() {
     let size = 200;
-    #[cfg(not(miri))] // Miri is too slow
-    let step = 1;
-    #[cfg(miri)]
-    let step = 66;
+    // Miri is too slow
+    let step = if cfg!(miri) { 66 } else { 1 };
     let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
     for i in (0..size).step_by(step) {
@@ -1263,10 +1285,8 @@ fn test_split_off_empty_left() {
 
 #[test]
 fn test_split_off_large_random_sorted() {
-    #[cfg(not(miri))] // Miri is too slow
-    let mut data = rand_data(1529);
-    #[cfg(miri)]
-    let mut data = rand_data(529);
+    // Miri is too slow
+    let mut data = if cfg!(miri) { rand_data(529) } else { rand_data(1529) };
     // special case with maximum height.
     data.sort();
 
diff --git a/src/liballoc/tests/btree/set.rs b/src/liballoc/tests/btree/set.rs
index 136018b9f7d..b6c34b7c6c3 100644
--- a/src/liballoc/tests/btree/set.rs
+++ b/src/liballoc/tests/btree/set.rs
@@ -33,6 +33,37 @@ fn test_hash() {
     assert_eq!(hash(&x), hash(&y));
 }
 
+#[test]
+fn test_iter_min_max() {
+    let mut a = BTreeSet::new();
+    assert_eq!(a.iter().min(), None);
+    assert_eq!(a.iter().max(), None);
+    assert_eq!(a.range(..).min(), None);
+    assert_eq!(a.range(..).max(), None);
+    assert_eq!(a.difference(&BTreeSet::new()).min(), None);
+    assert_eq!(a.difference(&BTreeSet::new()).max(), None);
+    assert_eq!(a.intersection(&a).min(), None);
+    assert_eq!(a.intersection(&a).max(), None);
+    assert_eq!(a.symmetric_difference(&BTreeSet::new()).min(), None);
+    assert_eq!(a.symmetric_difference(&BTreeSet::new()).max(), None);
+    assert_eq!(a.union(&a).min(), None);
+    assert_eq!(a.union(&a).max(), None);
+    a.insert(1);
+    a.insert(2);
+    assert_eq!(a.iter().min(), Some(&1));
+    assert_eq!(a.iter().max(), Some(&2));
+    assert_eq!(a.range(..).min(), Some(&1));
+    assert_eq!(a.range(..).max(), Some(&2));
+    assert_eq!(a.difference(&BTreeSet::new()).min(), Some(&1));
+    assert_eq!(a.difference(&BTreeSet::new()).max(), Some(&2));
+    assert_eq!(a.intersection(&a).min(), Some(&1));
+    assert_eq!(a.intersection(&a).max(), Some(&2));
+    assert_eq!(a.symmetric_difference(&BTreeSet::new()).min(), Some(&1));
+    assert_eq!(a.symmetric_difference(&BTreeSet::new()).max(), Some(&2));
+    assert_eq!(a.union(&a).min(), Some(&1));
+    assert_eq!(a.union(&a).max(), Some(&2));
+}
+
 fn check<F>(a: &[i32], b: &[i32], expected: &[i32], f: F)
 where
     F: FnOnce(&BTreeSet<i32>, &BTreeSet<i32>, &mut dyn FnMut(&i32) -> bool) -> bool,
@@ -621,10 +652,8 @@ fn test_split_off_empty_left() {
 
 #[test]
 fn test_split_off_large_random_sorted() {
-    #[cfg(not(miri))] // Miri is too slow
-    let mut data = rand_data(1529);
-    #[cfg(miri)]
-    let mut data = rand_data(529);
+    // Miri is too slow
+    let mut data = if cfg!(miri) { rand_data(529) } else { rand_data(1529) };
     // special case with maximum height.
     data.sort();
 
diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs
index ad6feaeebc6..e2dc816b015 100644
--- a/src/liballoc/tests/lib.rs
+++ b/src/liballoc/tests/lib.rs
@@ -12,14 +12,15 @@
 #![feature(associated_type_bounds)]
 #![feature(binary_heap_into_iter_sorted)]
 #![feature(binary_heap_drain_sorted)]
-#![feature(vec_remove_item)]
 #![feature(split_inclusive)]
+#![feature(binary_heap_retain)]
 
 use std::collections::hash_map::DefaultHasher;
 use std::hash::{Hash, Hasher};
 
 mod arc;
 mod binary_heap;
+mod borrow;
 mod boxed;
 mod btree;
 mod cow_str;
diff --git a/src/liballoc/tests/slice.rs b/src/liballoc/tests/slice.rs
index 8e49e6d8eba..75b76bb73ed 100644
--- a/src/liballoc/tests/slice.rs
+++ b/src/liballoc/tests/slice.rs
@@ -463,15 +463,9 @@ fn test_sort() {
 
 #[test]
 fn test_sort_stability() {
-    #[cfg(not(miri))] // Miri is too slow
-    let large_range = 500..510;
-    #[cfg(not(miri))] // Miri is too slow
-    let rounds = 10;
-
-    #[cfg(miri)]
-    let large_range = 0..0; // empty range
-    #[cfg(miri)]
-    let rounds = 1;
+    // Miri is too slow
+    let large_range = if cfg!(miri) { 0..0 } else { 500..510 };
+    let rounds = if cfg!(miri) { 1 } else { 10 };
 
     for len in (2..25).chain(large_range) {
         for _ in 0..rounds {
@@ -1727,15 +1721,9 @@ fn panic_safe() {
 
     let mut rng = thread_rng();
 
-    #[cfg(not(miri))] // Miri is too slow
-    let lens = (1..20).chain(70..MAX_LEN);
-    #[cfg(not(miri))] // Miri is too slow
-    let moduli = &[5, 20, 50];
-
-    #[cfg(miri)]
-    let lens = 1..10;
-    #[cfg(miri)]
-    let moduli = &[5];
+    // Miri is too slow
+    let lens = if cfg!(miri) { (1..10).chain(20..21) } else { (1..20).chain(70..MAX_LEN) };
+    let moduli: &[u32] = if cfg!(miri) { &[5] } else { &[5, 20, 50] };
 
     for len in lens {
         for &modulus in moduli {
diff --git a/src/liballoc/tests/str.rs b/src/liballoc/tests/str.rs
index b703df6f3cb..eee98d45340 100644
--- a/src/liballoc/tests/str.rs
+++ b/src/liballoc/tests/str.rs
@@ -566,13 +566,13 @@ mod slice_index {
                 data: "hello";
                 // note: using 0 specifically ensures that the result of overflowing is 0..0,
                 //       so that `get` doesn't simply return None for the wrong reason.
-                bad: data[0..=usize::max_value()];
+                bad: data[0..=usize::MAX];
                 message: "maximum usize";
             }
 
             in mod rangetoinclusive {
                 data: "hello";
-                bad: data[..=usize::max_value()];
+                bad: data[..=usize::MAX];
                 message: "maximum usize";
             }
         }
diff --git a/src/liballoc/tests/string.rs b/src/liballoc/tests/string.rs
index 9ea020d2d19..d38655af78c 100644
--- a/src/liballoc/tests/string.rs
+++ b/src/liballoc/tests/string.rs
@@ -714,3 +714,10 @@ fn test_try_reserve_exact() {
         }
     }
 }
+
+#[test]
+fn test_from_char() {
+    assert_eq!(String::from('a'), 'a'.to_string());
+    let s: String = 'x'.into();
+    assert_eq!(s, 'x'.to_string());
+}
diff --git a/src/liballoc/tests/vec.rs b/src/liballoc/tests/vec.rs
index 3d76324f7e8..ffff543b07f 100644
--- a/src/liballoc/tests/vec.rs
+++ b/src/liballoc/tests/vec.rs
@@ -1,5 +1,6 @@
 use std::borrow::Cow;
 use std::collections::TryReserveError::*;
+use std::fmt::Debug;
 use std::mem::size_of;
 use std::panic::{catch_unwind, AssertUnwindSafe};
 use std::vec::{Drain, IntoIter};
@@ -16,7 +17,7 @@ impl Drop for DropCounter<'_> {
 
 #[test]
 fn test_small_vec_struct() {
-    assert!(size_of::<Vec<u8>>() == size_of::<usize>() * 3);
+    assert_eq!(size_of::<Vec<u8>>(), size_of::<usize>() * 3);
 }
 
 #[test]
@@ -68,7 +69,7 @@ fn test_reserve() {
 
 #[test]
 fn test_zst_capacity() {
-    assert_eq!(Vec::<()>::new().capacity(), usize::max_value());
+    assert_eq!(Vec::<()>::new().capacity(), usize::MAX);
 }
 
 #[test]
@@ -132,21 +133,6 @@ fn test_extend_ref() {
 }
 
 #[test]
-fn test_remove_item() {
-    let mut v = vec![1, 2, 3];
-    v.remove_item(&1);
-
-    assert_eq!(v.len(), 2);
-    assert_eq!(v, [2, 3]);
-
-    let mut w = vec![1, 2, 3];
-    w.remove_item(&4);
-
-    assert_eq!(w.len(), 3);
-    w.remove_item(&4);
-}
-
-#[test]
 fn test_slice_from_mut() {
     let mut values = vec![1, 2, 3, 4, 5];
     {
@@ -563,19 +549,19 @@ fn test_drain_inclusive_range() {
 
 #[test]
 fn test_drain_max_vec_size() {
-    let mut v = Vec::<()>::with_capacity(usize::max_value());
+    let mut v = Vec::<()>::with_capacity(usize::MAX);
     unsafe {
-        v.set_len(usize::max_value());
+        v.set_len(usize::MAX);
     }
-    for _ in v.drain(usize::max_value() - 1..) {}
-    assert_eq!(v.len(), usize::max_value() - 1);
+    for _ in v.drain(usize::MAX - 1..) {}
+    assert_eq!(v.len(), usize::MAX - 1);
 
-    let mut v = Vec::<()>::with_capacity(usize::max_value());
+    let mut v = Vec::<()>::with_capacity(usize::MAX);
     unsafe {
-        v.set_len(usize::max_value());
+        v.set_len(usize::MAX);
     }
-    for _ in v.drain(usize::max_value() - 1..=usize::max_value() - 1) {}
-    assert_eq!(v.len(), usize::max_value() - 1);
+    for _ in v.drain(usize::MAX - 1..=usize::MAX - 1) {}
+    assert_eq!(v.len(), usize::MAX - 1);
 }
 
 #[test]
@@ -1473,3 +1459,171 @@ fn vec_macro_repeating_null_raw_fat_pointer() {
         vtable: *mut (),
     }
 }
+
+// This test will likely fail if you change the capacities used in
+// `RawVec::grow_amortized`.
+#[test]
+fn test_push_growth_strategy() {
+    // If the element size is 1, we jump from 0 to 8, then double.
+    {
+        let mut v1: Vec<u8> = vec![];
+        assert_eq!(v1.capacity(), 0);
+
+        for _ in 0..8 {
+            v1.push(0);
+            assert_eq!(v1.capacity(), 8);
+        }
+
+        for _ in 8..16 {
+            v1.push(0);
+            assert_eq!(v1.capacity(), 16);
+        }
+
+        for _ in 16..32 {
+            v1.push(0);
+            assert_eq!(v1.capacity(), 32);
+        }
+
+        for _ in 32..64 {
+            v1.push(0);
+            assert_eq!(v1.capacity(), 64);
+        }
+    }
+
+    // If the element size is 2..=1024, we jump from 0 to 4, then double.
+    {
+        let mut v2: Vec<u16> = vec![];
+        let mut v1024: Vec<[u8; 1024]> = vec![];
+        assert_eq!(v2.capacity(), 0);
+        assert_eq!(v1024.capacity(), 0);
+
+        for _ in 0..4 {
+            v2.push(0);
+            v1024.push([0; 1024]);
+            assert_eq!(v2.capacity(), 4);
+            assert_eq!(v1024.capacity(), 4);
+        }
+
+        for _ in 4..8 {
+            v2.push(0);
+            v1024.push([0; 1024]);
+            assert_eq!(v2.capacity(), 8);
+            assert_eq!(v1024.capacity(), 8);
+        }
+
+        for _ in 8..16 {
+            v2.push(0);
+            v1024.push([0; 1024]);
+            assert_eq!(v2.capacity(), 16);
+            assert_eq!(v1024.capacity(), 16);
+        }
+
+        for _ in 16..32 {
+            v2.push(0);
+            v1024.push([0; 1024]);
+            assert_eq!(v2.capacity(), 32);
+            assert_eq!(v1024.capacity(), 32);
+        }
+
+        for _ in 32..64 {
+            v2.push(0);
+            v1024.push([0; 1024]);
+            assert_eq!(v2.capacity(), 64);
+            assert_eq!(v1024.capacity(), 64);
+        }
+    }
+
+    // If the element size is > 1024, we jump from 0 to 1, then double.
+    {
+        let mut v1025: Vec<[u8; 1025]> = vec![];
+        assert_eq!(v1025.capacity(), 0);
+
+        for _ in 0..1 {
+            v1025.push([0; 1025]);
+            assert_eq!(v1025.capacity(), 1);
+        }
+
+        for _ in 1..2 {
+            v1025.push([0; 1025]);
+            assert_eq!(v1025.capacity(), 2);
+        }
+
+        for _ in 2..4 {
+            v1025.push([0; 1025]);
+            assert_eq!(v1025.capacity(), 4);
+        }
+
+        for _ in 4..8 {
+            v1025.push([0; 1025]);
+            assert_eq!(v1025.capacity(), 8);
+        }
+
+        for _ in 8..16 {
+            v1025.push([0; 1025]);
+            assert_eq!(v1025.capacity(), 16);
+        }
+
+        for _ in 16..32 {
+            v1025.push([0; 1025]);
+            assert_eq!(v1025.capacity(), 32);
+        }
+
+        for _ in 32..64 {
+            v1025.push([0; 1025]);
+            assert_eq!(v1025.capacity(), 64);
+        }
+    }
+}
+
+macro_rules! generate_assert_eq_vec_and_prim {
+    ($name:ident<$B:ident>($type:ty)) => {
+        fn $name<A: PartialEq<$B> + Debug, $B: Debug>(a: Vec<A>, b: $type) {
+            assert!(a == b);
+            assert_eq!(a, b);
+        }
+    };
+}
+
+generate_assert_eq_vec_and_prim! { assert_eq_vec_and_slice  <B>(&[B])   }
+generate_assert_eq_vec_and_prim! { assert_eq_vec_and_array_3<B>([B; 3]) }
+
+#[test]
+fn partialeq_vec_and_prim() {
+    assert_eq_vec_and_slice(vec![1, 2, 3], &[1, 2, 3]);
+    assert_eq_vec_and_array_3(vec![1, 2, 3], [1, 2, 3]);
+}
+
+macro_rules! assert_partial_eq_valid {
+    ($a2:ident, $a3:ident; $b2:ident, $b3: ident) => {
+        assert!($a2 == $b2);
+        assert!($a2 != $b3);
+        assert!($a3 != $b2);
+        assert!($a3 == $b3);
+        assert_eq!($a2, $b2);
+        assert_ne!($a2, $b3);
+        assert_ne!($a3, $b2);
+        assert_eq!($a3, $b3);
+    };
+}
+
+#[test]
+fn partialeq_vec_full() {
+    let vec2: Vec<_> = vec![1, 2];
+    let vec3: Vec<_> = vec![1, 2, 3];
+    let slice2: &[_] = &[1, 2];
+    let slice3: &[_] = &[1, 2, 3];
+    let slicemut2: &[_] = &mut [1, 2];
+    let slicemut3: &[_] = &mut [1, 2, 3];
+    let array2: [_; 2] = [1, 2];
+    let array3: [_; 3] = [1, 2, 3];
+    let arrayref2: &[_; 2] = &[1, 2];
+    let arrayref3: &[_; 3] = &[1, 2, 3];
+
+    assert_partial_eq_valid!(vec2,vec3; vec2,vec3);
+    assert_partial_eq_valid!(vec2,vec3; slice2,slice3);
+    assert_partial_eq_valid!(vec2,vec3; slicemut2,slicemut3);
+    assert_partial_eq_valid!(slice2,slice3; vec2,vec3);
+    assert_partial_eq_valid!(slicemut2,slicemut3; vec2,vec3);
+    assert_partial_eq_valid!(vec2,vec3; array2,array3);
+    assert_partial_eq_valid!(vec2,vec3; arrayref2,arrayref3);
+}
diff --git a/src/liballoc/tests/vec_deque.rs b/src/liballoc/tests/vec_deque.rs
index c4b6a36bb05..762dc4be44d 100644
--- a/src/liballoc/tests/vec_deque.rs
+++ b/src/liballoc/tests/vec_deque.rs
@@ -954,16 +954,14 @@ fn test_append_permutations() {
         out
     }
 
-    #[cfg(not(miri))] // Miri is too slow
-    const MAX: usize = 5;
-    #[cfg(miri)]
-    const MAX: usize = 3;
+    // Miri is too slow
+    let max = if cfg!(miri) { 3 } else { 5 };
 
     // Many different permutations of both the `VecDeque` getting appended to
     // and the one getting appended are generated to check `append`.
     // This ensures all 6 code paths of `append` are tested.
-    for src_push_back in 0..MAX {
-        for src_push_front in 0..MAX {
+    for src_push_back in 0..max {
+        for src_push_front in 0..max {
             // doesn't pop more values than are pushed
             for src_pop_back in 0..(src_push_back + src_push_front) {
                 for src_pop_front in 0..(src_push_back + src_push_front - src_pop_back) {
@@ -974,8 +972,8 @@ fn test_append_permutations() {
                         src_pop_front,
                     );
 
-                    for dst_push_back in 0..MAX {
-                        for dst_push_front in 0..MAX {
+                    for dst_push_back in 0..max {
+                        for dst_push_front in 0..max {
                             for dst_pop_back in 0..(dst_push_back + dst_push_front) {
                                 for dst_pop_front in
                                     0..(dst_push_back + dst_push_front - dst_pop_back)
diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs
index b4a9da84787..5db96a504a6 100644
--- a/src/liballoc/vec.rs
+++ b/src/liballoc/vec.rs
@@ -62,7 +62,7 @@
 use core::array::LengthAtMost32;
 use core::cmp::{self, Ordering};
 use core::fmt;
-use core::hash::{self, Hash};
+use core::hash::{Hash, Hasher};
 use core::intrinsics::{arith_offset, assume};
 use core::iter::{FromIterator, FusedIterator, TrustedLen};
 use core::marker::PhantomData;
@@ -343,14 +343,19 @@ impl<T> Vec<T> {
     ///
     /// // The vector contains no items, even though it has capacity for more
     /// assert_eq!(vec.len(), 0);
+    /// assert_eq!(vec.capacity(), 10);
     ///
     /// // These are all done without reallocating...
     /// for i in 0..10 {
     ///     vec.push(i);
     /// }
+    /// assert_eq!(vec.len(), 10);
+    /// assert_eq!(vec.capacity(), 10);
     ///
     /// // ...but this may make the vector reallocate
     /// vec.push(11);
+    /// assert_eq!(vec.len(), 11);
+    /// assert!(vec.capacity() >= 11);
     /// ```
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -460,7 +465,7 @@ impl<T> Vec<T> {
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Vec<T> {
-        Vec { buf: RawVec::from_raw_parts(ptr, capacity), len: length }
+        unsafe { Vec { buf: RawVec::from_raw_parts(ptr, capacity), len: length } }
     }
 
     /// Returns the number of elements the vector can hold without
@@ -486,7 +491,7 @@ impl<T> Vec<T> {
     ///
     /// # Panics
     ///
-    /// Panics if the new capacity overflows `usize`.
+    /// Panics if the new capacity exceeds `isize::MAX` bytes.
     ///
     /// # Examples
     ///
@@ -741,7 +746,7 @@ impl<T> Vec<T> {
                 return;
             }
             let remaining_len = self.len - len;
-            let s = slice::from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len);
+            let s = ptr::slice_from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len);
             self.len = len;
             ptr::drop_in_place(s);
         }
@@ -979,7 +984,7 @@ impl<T> Vec<T> {
             // bounds check above succeeds there must be a last element (which
             // can be self[index] itself).
             let last = ptr::read(self.as_ptr().add(len - 1));
-            let hole: *mut T = self.as_mut_ptr().add(index);
+            let hole = self.as_mut_ptr().add(index);
             self.set_len(len - 1);
             ptr::replace(hole, last)
         }
@@ -1183,7 +1188,7 @@ impl<T> Vec<T> {
     ///
     /// # Panics
     ///
-    /// Panics if the number of elements in the vector overflows a `usize`.
+    /// Panics if the new capacity exceeds `isize::MAX` bytes.
     ///
     /// # Examples
     ///
@@ -1259,10 +1264,10 @@ impl<T> Vec<T> {
     /// Appends elements to `Self` from other buffer.
     #[inline]
     unsafe fn append_elements(&mut self, other: *const [T]) {
-        let count = (*other).len();
+        let count = unsafe { (*other).len() };
         self.reserve(count);
         let len = self.len();
-        ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count);
+        unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
         self.len += count;
     }
 
@@ -1619,8 +1624,8 @@ impl<T: Default> Vec<T> {
     #[unstable(feature = "vec_resize_default", issue = "41758")]
     #[rustc_deprecated(
         reason = "This is moving towards being removed in favor \
-        of `.resize_with(Default::default)`.  If you disagree, please comment \
-        in the tracking issue.",
+                  of `.resize_with(Default::default)`.  If you disagree, please comment \
+                  in the tracking issue.",
         since = "1.33.0"
     )]
     pub fn resize_default(&mut self, new_len: usize) {
@@ -1634,7 +1639,7 @@ impl<T: Default> Vec<T> {
     }
 }
 
-// This code generalises `extend_with_{element,default}`.
+// This code generalizes `extend_with_{element,default}`.
 trait ExtendWith<T> {
     fn next(&mut self) -> T;
     fn last(self) -> T;
@@ -1755,17 +1760,15 @@ impl<T: PartialEq> Vec<T> {
 impl<T> Vec<T> {
     /// Removes the first instance of `item` from the vector if the item exists.
     ///
-    /// # Examples
-    ///
-    /// ```
-    /// # #![feature(vec_remove_item)]
-    /// let mut vec = vec![1, 2, 3, 1];
-    ///
-    /// vec.remove_item(&1);
-    ///
-    /// assert_eq!(vec, vec![2, 3, 1]);
-    /// ```
+    /// This method will be removed soon.
     #[unstable(feature = "vec_remove_item", reason = "recently added", issue = "40062")]
+    #[rustc_deprecated(
+        reason = "Removing the first item equal to a needle is already easily possible \
+            with iterators and the current Vec methods. Furthermore, having a method for \
+            one particular case of removal (linear search, only the first item, no swap remove) \
+            but not for others is inconsistent. This method will be removed soon.",
+        since = "1.46.0"
+    )]
     pub fn remove_item<V>(&mut self, item: &V) -> Option<T>
     where
         T: PartialEq<V>,
@@ -1798,6 +1801,21 @@ impl<T: Clone> SpecFromElem for T {
     }
 }
 
+impl SpecFromElem for i8 {
+    #[inline]
+    fn from_elem(elem: i8, n: usize) -> Vec<i8> {
+        if elem == 0 {
+            return Vec { buf: RawVec::with_capacity_zeroed(n), len: n };
+        }
+        unsafe {
+            let mut v = Vec::with_capacity(n);
+            ptr::write_bytes(v.as_mut_ptr(), elem as u8, n);
+            v.set_len(n);
+            v
+        }
+    }
+}
+
 impl SpecFromElem for u8 {
     #[inline]
     fn from_elem(elem: u8, n: usize) -> Vec<u8> {
@@ -1825,13 +1843,14 @@ impl<T: Clone + IsZero> SpecFromElem for T {
     }
 }
 
+#[rustc_specialization_trait]
 unsafe trait IsZero {
     /// Whether this value is zero
     fn is_zero(&self) -> bool;
 }
 
 macro_rules! impl_is_zero {
-    ($t: ty, $is_zero: expr) => {
+    ($t:ty, $is_zero:expr) => {
         unsafe impl IsZero for $t {
             #[inline]
             fn is_zero(&self) -> bool {
@@ -1841,7 +1860,6 @@ macro_rules! impl_is_zero {
     };
 }
 
-impl_is_zero!(i8, |x| x == 0);
 impl_is_zero!(i16, |x| x == 0);
 impl_is_zero!(i32, |x| x == 0);
 impl_is_zero!(i64, |x| x == 0);
@@ -1874,9 +1892,12 @@ unsafe impl<T> IsZero for *mut T {
     }
 }
 
-// `Option<&T>`, `Option<&mut T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
-// For fat pointers, the bytes that would be the pointer metadata in the `Some` variant
-// are padding in the `None` variant, so ignoring them and zero-initializing instead is ok.
+// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
+// For fat pointers, the bytes that would be the pointer metadata in the `Some`
+// variant are padding in the `None` variant, so ignoring them and
+// zero-initializing instead is ok.
+// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of
+// `SpecFromElem`.
 
 unsafe impl<T: ?Sized> IsZero for Option<&T> {
     #[inline]
@@ -1885,13 +1906,6 @@ unsafe impl<T: ?Sized> IsZero for Option<&T> {
     }
 }
 
-unsafe impl<T: ?Sized> IsZero for Option<&mut T> {
-    #[inline]
-    fn is_zero(&self) -> bool {
-        self.is_none()
-    }
-}
-
 unsafe impl<T: ?Sized> IsZero for Option<Box<T>> {
     #[inline]
     fn is_zero(&self) -> bool {
@@ -1904,6 +1918,22 @@ unsafe impl<T: ?Sized> IsZero for Option<Box<T>> {
 ////////////////////////////////////////////////////////////////////////////////
 
 #[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ops::Deref for Vec<T> {
+    type Target = [T];
+
+    fn deref(&self) -> &[T] {
+        unsafe { slice::from_raw_parts(self.as_ptr(), self.len) }
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ops::DerefMut for Vec<T> {
+    fn deref_mut(&mut self) -> &mut [T] {
+        unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
 impl<T: Clone> Clone for Vec<T> {
     #[cfg(not(test))]
     fn clone(&self) -> Vec<T> {
@@ -1927,7 +1957,7 @@ impl<T: Clone> Clone for Vec<T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: Hash> Hash for Vec<T> {
     #[inline]
-    fn hash<H: hash::Hasher>(&self, state: &mut H) {
+    fn hash<H: Hasher>(&self, state: &mut H) {
         Hash::hash(&**self, state)
     }
 }
@@ -1959,22 +1989,6 @@ impl<T, I: SliceIndex<[T]>> IndexMut<I> for Vec<T> {
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
-impl<T> ops::Deref for Vec<T> {
-    type Target = [T];
-
-    fn deref(&self) -> &[T] {
-        unsafe { slice::from_raw_parts(self.as_ptr(), self.len) }
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> ops::DerefMut for Vec<T> {
-    fn deref_mut(&mut self) -> &mut [T] {
-        unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
 impl<T> FromIterator<T> for Vec<T> {
     #[inline]
     fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Vec<T> {
@@ -2048,6 +2062,16 @@ impl<T> Extend<T> for Vec<T> {
     fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
         <Self as SpecExtend<T, I::IntoIter>>::spec_extend(self, iter.into_iter())
     }
+
+    #[inline]
+    fn extend_one(&mut self, item: T) {
+        self.push(item);
+    }
+
+    #[inline]
+    fn extend_reserve(&mut self, additional: usize) {
+        self.reserve(additional);
+    }
 }
 
 // Specialization trait used for Vec::from_iter and Vec::extend
@@ -2319,15 +2343,25 @@ impl<'a, T: 'a + Copy> Extend<&'a T> for Vec<T> {
     fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
         self.spec_extend(iter.into_iter())
     }
+
+    #[inline]
+    fn extend_one(&mut self, &item: &'a T) {
+        self.push(item);
+    }
+
+    #[inline]
+    fn extend_reserve(&mut self, additional: usize) {
+        self.reserve(additional);
+    }
 }
 
 macro_rules! __impl_slice_eq1 {
-    ([$($vars:tt)*] $lhs:ty, $rhs:ty, $($constraints:tt)*) => {
-        #[stable(feature = "rust1", since = "1.0.0")]
+    ([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?, #[$stability:meta]) => {
+        #[$stability]
         impl<A, B, $($vars)*> PartialEq<$rhs> for $lhs
         where
             A: PartialEq<B>,
-            $($constraints)*
+            $($ty: $bound)?
         {
             #[inline]
             fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] }
@@ -2337,18 +2371,23 @@ macro_rules! __impl_slice_eq1 {
     }
 }
 
-__impl_slice_eq1! { [] Vec<A>, Vec<B>, }
-__impl_slice_eq1! { [] Vec<A>, &[B], }
-__impl_slice_eq1! { [] Vec<A>, &mut [B], }
-__impl_slice_eq1! { [] Cow<'_, [A]>, &[B], A: Clone }
-__impl_slice_eq1! { [] Cow<'_, [A]>, &mut [B], A: Clone }
-__impl_slice_eq1! { [] Cow<'_, [A]>, Vec<B>, A: Clone }
-__impl_slice_eq1! { [const N: usize] Vec<A>, [B; N], [B; N]: LengthAtMost32 }
-__impl_slice_eq1! { [const N: usize] Vec<A>, &[B; N], [B; N]: LengthAtMost32 }
+__impl_slice_eq1! { [] Vec<A>, Vec<B>, #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [] Vec<A>, &[B], #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [] Vec<A>, &mut [B], #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [] &[A], Vec<B>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
+__impl_slice_eq1! { [] &mut [A], Vec<B>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
+__impl_slice_eq1! { [] Cow<'_, [A]>, Vec<B> where A: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [] Cow<'_, [A]>, &[B] where A: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [] Cow<'_, [A]>, &mut [B] where A: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [const N: usize] Vec<A>, [B; N] where [B; N]: LengthAtMost32, #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [const N: usize] Vec<A>, &[B; N] where [B; N]: LengthAtMost32, #[stable(feature = "rust1", since = "1.0.0")] }
 
 // NOTE: some less important impls are omitted to reduce code bloat
 // FIXME(Centril): Reconsider this?
 //__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], [B; N]: LengthAtMost32 }
+//__impl_slice_eq1! { [const N: usize] [A; N], Vec<B>, [A; N]: LengthAtMost32 }
+//__impl_slice_eq1! { [const N: usize] &[A; N], Vec<B>, [A; N]: LengthAtMost32 }
+//__impl_slice_eq1! { [const N: usize] &mut [A; N], Vec<B>, [A; N]: LengthAtMost32 }
 //__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], [B; N]: LengthAtMost32 }
 //__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], [B; N]: LengthAtMost32 }
 //__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], [B; N]: LengthAtMost32 }
@@ -2379,7 +2418,9 @@ unsafe impl<#[may_dangle] T> Drop for Vec<T> {
     fn drop(&mut self) {
         unsafe {
             // use drop for [T]
-            ptr::drop_in_place(&mut self[..]);
+            // use a raw slice to refer to the elements of the vector as weakest necessary type;
+            // could avoid questions of validity in certain cases
+            ptr::drop_in_place(ptr::slice_from_raw_parts_mut(self.as_mut_ptr(), self.len))
         }
         // RawVec handles deallocation
     }
@@ -2596,7 +2637,18 @@ impl<T> IntoIter<T> {
     /// ```
     #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")]
     pub fn as_mut_slice(&mut self) -> &mut [T] {
-        unsafe { slice::from_raw_parts_mut(self.ptr as *mut T, self.len()) }
+        unsafe { &mut *self.as_raw_mut_slice() }
+    }
+
+    fn as_raw_mut_slice(&mut self) -> *mut [T] {
+        ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len())
+    }
+}
+
+#[stable(feature = "vec_intoiter_as_ref", since = "1.46.0")]
+impl<T> AsRef<[T]> for IntoIter<T> {
+    fn as_ref(&self) -> &[T] {
+        self.as_slice()
     }
 }
 
@@ -2708,7 +2760,7 @@ unsafe impl<#[may_dangle] T> Drop for IntoIter<T> {
         let guard = DropGuard(self);
         // destroy the remaining elements
         unsafe {
-            ptr::drop_in_place(guard.0.as_mut_slice());
+            ptr::drop_in_place(guard.0.as_raw_mut_slice());
         }
         // now `guard` will be dropped and do the rest
     }
@@ -2744,19 +2796,25 @@ impl<'a, T> Drain<'a, T> {
     /// # Examples
     ///
     /// ```
-    /// # #![feature(vec_drain_as_slice)]
     /// let mut vec = vec!['a', 'b', 'c'];
     /// let mut drain = vec.drain(..);
     /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']);
     /// let _ = drain.next().unwrap();
     /// assert_eq!(drain.as_slice(), &['b', 'c']);
     /// ```
-    #[unstable(feature = "vec_drain_as_slice", reason = "recently added", issue = "58957")]
+    #[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
     pub fn as_slice(&self) -> &[T] {
         self.iter.as_slice()
     }
 }
 
+#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
+impl<'a, T> AsRef<[T]> for Drain<'a, T> {
+    fn as_ref(&self) -> &[T] {
+        self.as_slice()
+    }
+}
+
 #[stable(feature = "drain", since = "1.6.0")]
 unsafe impl<T: Sync> Sync for Drain<'_, T> {}
 #[stable(feature = "drain", since = "1.6.0")]
@@ -2924,15 +2982,16 @@ impl<T> Drain<'_, T> {
     /// Fill that range as much as possible with new elements from the `replace_with` iterator.
     /// Returns `true` if we filled the entire range. (`replace_with.next()` didn’t return `None`.)
     unsafe fn fill<I: Iterator<Item = T>>(&mut self, replace_with: &mut I) -> bool {
-        let vec = self.vec.as_mut();
+        let vec = unsafe { self.vec.as_mut() };
         let range_start = vec.len;
         let range_end = self.tail_start;
-        let range_slice =
-            slice::from_raw_parts_mut(vec.as_mut_ptr().add(range_start), range_end - range_start);
+        let range_slice = unsafe {
+            slice::from_raw_parts_mut(vec.as_mut_ptr().add(range_start), range_end - range_start)
+        };
 
         for place in range_slice {
             if let Some(new_item) = replace_with.next() {
-                ptr::write(place, new_item);
+                unsafe { ptr::write(place, new_item) };
                 vec.len += 1;
             } else {
                 return false;
@@ -2942,15 +3001,17 @@ impl<T> Drain<'_, T> {
     }
 
     /// Makes room for inserting more elements before the tail.
-    unsafe fn move_tail(&mut self, extra_capacity: usize) {
-        let vec = self.vec.as_mut();
-        let used_capacity = self.tail_start + self.tail_len;
-        vec.buf.reserve(used_capacity, extra_capacity);
-
-        let new_tail_start = self.tail_start + extra_capacity;
-        let src = vec.as_ptr().add(self.tail_start);
-        let dst = vec.as_mut_ptr().add(new_tail_start);
-        ptr::copy(src, dst, self.tail_len);
+    unsafe fn move_tail(&mut self, additional: usize) {
+        let vec = unsafe { self.vec.as_mut() };
+        let len = self.tail_start + self.tail_len;
+        vec.buf.reserve(len, additional);
+
+        let new_tail_start = self.tail_start + additional;
+        unsafe {
+            let src = vec.as_ptr().add(self.tail_start);
+            let dst = vec.as_mut_ptr().add(new_tail_start);
+            ptr::copy(src, dst, self.tail_len);
+        }
         self.tail_start = new_tail_start;
     }
 }