about summary refs log tree commit diff
path: root/src/liballoc
diff options
context:
space:
mode:
authorSayan Nandan <17377258+sntdevco@users.noreply.github.com>2019-08-09 13:01:05 +0530
committerGitHub <noreply@github.com>2019-08-09 13:01:05 +0530
commitfb3a01354ffecc41d7a189e4dd225d706387a522 (patch)
tree41492dfe93f1dccba847dadb56ac6aa079edaaa9 /src/liballoc
parent33445aea509cadcd715009c79795d289268daa7c (diff)
parent5aa3d9a7b5d3a46a7f158e8881146331a6bc9243 (diff)
downloadrust-fb3a01354ffecc41d7a189e4dd225d706387a522.tar.gz
rust-fb3a01354ffecc41d7a189e4dd225d706387a522.zip
Merge pull request #1 from rust-lang/master
Merge recent changes into master
Diffstat (limited to 'src/liballoc')
-rw-r--r--src/liballoc/Cargo.toml7
-rw-r--r--src/liballoc/alloc.rs55
-rw-r--r--src/liballoc/alloc/tests.rs30
-rw-r--r--src/liballoc/benches/btree/set.rs114
-rw-r--r--src/liballoc/benches/slice.rs8
-rw-r--r--src/liballoc/borrow.rs3
-rw-r--r--src/liballoc/boxed.rs262
-rw-r--r--src/liballoc/collections/binary_heap.rs76
-rw-r--r--src/liballoc/collections/btree/map.rs58
-rw-r--r--src/liballoc/collections/btree/node.rs18
-rw-r--r--src/liballoc/collections/btree/set.rs309
-rw-r--r--src/liballoc/collections/linked_list.rs339
-rw-r--r--src/liballoc/collections/linked_list/tests.rs264
-rw-r--r--src/liballoc/collections/vec_deque.rs533
-rw-r--r--src/liballoc/collections/vec_deque/tests.rs379
-rw-r--r--src/liballoc/fmt.rs7
-rw-r--r--src/liballoc/lib.rs59
-rw-r--r--src/liballoc/macros.rs2
-rw-r--r--src/liballoc/prelude.rs19
-rw-r--r--src/liballoc/prelude/mod.rs15
-rw-r--r--src/liballoc/prelude/v1.rs10
-rw-r--r--src/liballoc/raw_vec.rs205
-rw-r--r--src/liballoc/raw_vec/tests.rs73
-rw-r--r--src/liballoc/rc.rs958
-rw-r--r--src/liballoc/rc/tests.rs439
-rw-r--r--src/liballoc/slice.rs196
-rw-r--r--src/liballoc/str.rs51
-rw-r--r--src/liballoc/string.rs31
-rw-r--r--src/liballoc/sync.rs916
-rw-r--r--src/liballoc/sync/tests.rs492
-rw-r--r--src/liballoc/tests.rs (renamed from src/liballoc/boxed_test.rs)13
-rw-r--r--src/liballoc/tests/arc.rs121
-rw-r--r--src/liballoc/tests/binary_heap.rs2
-rw-r--r--src/liballoc/tests/btree/map.rs5
-rw-r--r--src/liballoc/tests/btree/set.rs61
-rw-r--r--src/liballoc/tests/cow_str.rs24
-rw-r--r--src/liballoc/tests/heap.rs32
-rw-r--r--src/liballoc/tests/lib.rs3
-rw-r--r--src/liballoc/tests/linked_list.rs2
-rw-r--r--src/liballoc/tests/rc.rs117
-rw-r--r--src/liballoc/tests/slice.rs37
-rw-r--r--src/liballoc/tests/str.rs21
-rw-r--r--src/liballoc/tests/string.rs13
-rw-r--r--src/liballoc/tests/vec.rs146
-rw-r--r--src/liballoc/tests/vec_deque.rs2
-rw-r--r--src/liballoc/vec.rs271
46 files changed, 4279 insertions, 2519 deletions
diff --git a/src/liballoc/Cargo.toml b/src/liballoc/Cargo.toml
index f6d6c1de8f5..d1119f7b7c0 100644
--- a/src/liballoc/Cargo.toml
+++ b/src/liballoc/Cargo.toml
@@ -12,11 +12,11 @@ path = "lib.rs"
 
 [dependencies]
 core = { path = "../libcore" }
-compiler_builtins = { version = "0.1.0", features = ['rustc-dep-of-std'] }
+compiler_builtins = { version = "0.1.10", features = ['rustc-dep-of-std'] }
 
 [dev-dependencies]
-rand = "0.6"
-rand_xorshift = "0.1"
+rand = "0.7"
+rand_xorshift = "0.2"
 
 [[test]]
 name = "collectionstests"
@@ -33,3 +33,4 @@ harness = false
 
 [features]
 compiler-builtins-mem = ['compiler_builtins/mem']
+compiler-builtins-c = ["compiler_builtins/c"]
diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs
index f3877e51a66..dc7fd1adc29 100644
--- a/src/liballoc/alloc.rs
+++ b/src/liballoc/alloc.rs
@@ -10,12 +10,15 @@ use core::usize;
 #[doc(inline)]
 pub use core::alloc::*;
 
+#[cfg(test)]
+mod tests;
+
 extern "Rust" {
     // These are the magic symbols to call the global allocator.  rustc generates
     // them from the `#[global_allocator]` attribute if there is one, or uses the
     // default implementations in libstd (`__rdl_alloc` etc in `src/libstd/alloc.rs`)
     // otherwise.
-    #[allocator]
+    #[rustc_allocator]
     #[rustc_allocator_nounwind]
     fn __rust_alloc(size: usize, align: usize) -> *mut u8;
     #[rustc_allocator_nounwind]
@@ -37,6 +40,8 @@ extern "Rust" {
 ///
 /// Note: while this type is unstable, the functionality it provides can be
 /// accessed through the [free functions in `alloc`](index.html#functions).
+///
+/// [`Alloc`]: trait.Alloc.html
 #[unstable(feature = "allocator_api", issue = "32838")]
 #[derive(Copy, Clone, Default, Debug)]
 pub struct Global;
@@ -54,6 +59,10 @@ pub struct Global;
 ///
 /// See [`GlobalAlloc::alloc`].
 ///
+/// [`Global`]: struct.Global.html
+/// [`Alloc`]: trait.Alloc.html
+/// [`GlobalAlloc::alloc`]: trait.GlobalAlloc.html#tymethod.alloc
+///
 /// # Examples
 ///
 /// ```
@@ -87,6 +96,10 @@ pub unsafe fn alloc(layout: Layout) -> *mut u8 {
 /// # Safety
 ///
 /// See [`GlobalAlloc::dealloc`].
+///
+/// [`Global`]: struct.Global.html
+/// [`Alloc`]: trait.Alloc.html
+/// [`GlobalAlloc::dealloc`]: trait.GlobalAlloc.html#tymethod.dealloc
 #[stable(feature = "global_alloc", since = "1.28.0")]
 #[inline]
 pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
@@ -105,6 +118,10 @@ pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
 /// # Safety
 ///
 /// See [`GlobalAlloc::realloc`].
+///
+/// [`Global`]: struct.Global.html
+/// [`Alloc`]: trait.Alloc.html
+/// [`GlobalAlloc::realloc`]: trait.GlobalAlloc.html#method.realloc
 #[stable(feature = "global_alloc", since = "1.28.0")]
 #[inline]
 pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
@@ -124,6 +141,10 @@ pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8
 ///
 /// See [`GlobalAlloc::alloc_zeroed`].
 ///
+/// [`Global`]: struct.Global.html
+/// [`Alloc`]: trait.Alloc.html
+/// [`GlobalAlloc::alloc_zeroed`]: trait.GlobalAlloc.html#method.alloc_zeroed
+///
 /// # Examples
 ///
 /// ```
@@ -226,35 +247,3 @@ pub fn handle_alloc_error(layout: Layout) -> ! {
     }
     unsafe { oom_impl(layout) }
 }
-
-#[cfg(test)]
-mod tests {
-    extern crate test;
-    use test::Bencher;
-    use crate::boxed::Box;
-    use crate::alloc::{Global, Alloc, Layout, handle_alloc_error};
-
-    #[test]
-    fn allocate_zeroed() {
-        unsafe {
-            let layout = Layout::from_size_align(1024, 1).unwrap();
-            let ptr = Global.alloc_zeroed(layout.clone())
-                .unwrap_or_else(|_| handle_alloc_error(layout));
-
-            let mut i = ptr.cast::<u8>().as_ptr();
-            let end = i.add(layout.size());
-            while i < end {
-                assert_eq!(*i, 0);
-                i = i.offset(1);
-            }
-            Global.dealloc(ptr, layout);
-        }
-    }
-
-    #[bench]
-    fn alloc_owned_small(b: &mut Bencher) {
-        b.iter(|| {
-            let _: Box<_> = box 10;
-        })
-    }
-}
diff --git a/src/liballoc/alloc/tests.rs b/src/liballoc/alloc/tests.rs
new file mode 100644
index 00000000000..c69f4e49ee1
--- /dev/null
+++ b/src/liballoc/alloc/tests.rs
@@ -0,0 +1,30 @@
+use super::*;
+
+extern crate test;
+use test::Bencher;
+use crate::boxed::Box;
+
+#[test]
+fn allocate_zeroed() {
+    unsafe {
+        let layout = Layout::from_size_align(1024, 1).unwrap();
+        let ptr = Global.alloc_zeroed(layout.clone())
+            .unwrap_or_else(|_| handle_alloc_error(layout));
+
+        let mut i = ptr.cast::<u8>().as_ptr();
+        let end = i.add(layout.size());
+        while i < end {
+            assert_eq!(*i, 0);
+            i = i.offset(1);
+        }
+        Global.dealloc(ptr, layout);
+    }
+}
+
+#[bench]
+#[cfg(not(miri))] // Miri does not support benchmarks
+fn alloc_owned_small(b: &mut Bencher) {
+    b.iter(|| {
+        let _: Box<_> = box 10;
+    })
+}
diff --git a/src/liballoc/benches/btree/set.rs b/src/liballoc/benches/btree/set.rs
index 08e1db5fbb7..6357ea3ea11 100644
--- a/src/liballoc/benches/btree/set.rs
+++ b/src/liballoc/benches/btree/set.rs
@@ -3,59 +3,49 @@ use std::collections::BTreeSet;
 use rand::{thread_rng, Rng};
 use test::{black_box, Bencher};
 
-fn random(n1: u32, n2: u32) -> [BTreeSet<usize>; 2] {
+fn random(n: usize) -> BTreeSet<usize> {
     let mut rng = thread_rng();
-    let mut set1 = BTreeSet::new();
-    let mut set2 = BTreeSet::new();
-    for _ in 0..n1 {
-        let i = rng.gen::<usize>();
-        set1.insert(i);
+    let mut set = BTreeSet::new();
+    while set.len() < n {
+        set.insert(rng.gen());
     }
-    for _ in 0..n2 {
-        let i = rng.gen::<usize>();
-        set2.insert(i);
-    }
-    [set1, set2]
+    assert_eq!(set.len(), n);
+    set
 }
 
-fn staggered(n1: u32, n2: u32) -> [BTreeSet<u32>; 2] {
-    let mut even = BTreeSet::new();
-    let mut odd = BTreeSet::new();
-    for i in 0..n1 {
-        even.insert(i * 2);
-    }
-    for i in 0..n2 {
-        odd.insert(i * 2 + 1);
+fn neg(n: usize) -> BTreeSet<i32> {
+    let mut set = BTreeSet::new();
+    for i in -(n as i32)..=-1 {
+        set.insert(i);
     }
-    [even, odd]
+    assert_eq!(set.len(), n);
+    set
 }
 
-fn neg_vs_pos(n1: u32, n2: u32) -> [BTreeSet<i32>; 2] {
-    let mut neg = BTreeSet::new();
-    let mut pos = BTreeSet::new();
-    for i in -(n1 as i32)..=-1 {
-        neg.insert(i);
-    }
-    for i in 1..=(n2 as i32) {
-        pos.insert(i);
+fn pos(n: usize) -> BTreeSet<i32> {
+    let mut set = BTreeSet::new();
+    for i in 1..=(n as i32) {
+        set.insert(i);
     }
-    [neg, pos]
+    assert_eq!(set.len(), n);
+    set
 }
 
-fn pos_vs_neg(n1: u32, n2: u32) -> [BTreeSet<i32>; 2] {
-    let mut neg = BTreeSet::new();
-    let mut pos = BTreeSet::new();
-    for i in -(n1 as i32)..=-1 {
-        neg.insert(i);
-    }
-    for i in 1..=(n2 as i32) {
-        pos.insert(i);
+
+fn stagger(n1: usize, factor: usize) -> [BTreeSet<u32>; 2] {
+    let n2 = n1 * factor;
+    let mut sets = [BTreeSet::new(), BTreeSet::new()];
+    for i in 0..(n1 + n2) {
+        let b = i % (factor + 1) != 0;
+        sets[b as usize].insert(i as u32);
     }
-    [pos, neg]
+    assert_eq!(sets[0].len(), n1);
+    assert_eq!(sets[1].len(), n2);
+    sets
 }
 
-macro_rules! set_intersection_bench {
-    ($name: ident, $sets: expr) => {
+macro_rules! set_bench {
+    ($name: ident, $set_func: ident, $result_func: ident, $sets: expr) => {
         #[bench]
         pub fn $name(b: &mut Bencher) {
             // setup
@@ -63,26 +53,36 @@ macro_rules! set_intersection_bench {
 
             // measure
             b.iter(|| {
-                let x = sets[0].intersection(&sets[1]).count();
+                let x = sets[0].$set_func(&sets[1]).$result_func();
                 black_box(x);
             })
         }
     };
 }
 
-set_intersection_bench! {intersect_random_100,          random(100, 100)}
-set_intersection_bench! {intersect_random_10k,          random(10_000, 10_000)}
-set_intersection_bench! {intersect_random_10_vs_10k,    random(10, 10_000)}
-set_intersection_bench! {intersect_random_10k_vs_10,    random(10_000, 10)}
-set_intersection_bench! {intersect_staggered_100,       staggered(100, 100)}
-set_intersection_bench! {intersect_staggered_10k,       staggered(10_000, 10_000)}
-set_intersection_bench! {intersect_staggered_10_vs_10k, staggered(10, 10_000)}
-set_intersection_bench! {intersect_staggered_10k_vs_10, staggered(10_000, 10)}
-set_intersection_bench! {intersect_neg_vs_pos_100,      neg_vs_pos(100, 100)}
-set_intersection_bench! {intersect_neg_vs_pos_10k,      neg_vs_pos(10_000, 10_000)}
-set_intersection_bench! {intersect_neg_vs_pos_10_vs_10k,neg_vs_pos(10, 10_000)}
-set_intersection_bench! {intersect_neg_vs_pos_10k_vs_10,neg_vs_pos(10_000, 10)}
-set_intersection_bench! {intersect_pos_vs_neg_100,      pos_vs_neg(100, 100)}
-set_intersection_bench! {intersect_pos_vs_neg_10k,      pos_vs_neg(10_000, 10_000)}
-set_intersection_bench! {intersect_pos_vs_neg_10_vs_10k,pos_vs_neg(10, 10_000)}
-set_intersection_bench! {intersect_pos_vs_neg_10k_vs_10,pos_vs_neg(10_000, 10)}
+set_bench! {intersection_100_neg_vs_100_pos, intersection, count, [neg(100), pos(100)]}
+set_bench! {intersection_100_neg_vs_10k_pos, intersection, count, [neg(100), pos(10_000)]}
+set_bench! {intersection_100_pos_vs_100_neg, intersection, count, [pos(100), neg(100)]}
+set_bench! {intersection_100_pos_vs_10k_neg, intersection, count, [pos(100), neg(10_000)]}
+set_bench! {intersection_10k_neg_vs_100_pos, intersection, count, [neg(10_000), pos(100)]}
+set_bench! {intersection_10k_neg_vs_10k_pos, intersection, count, [neg(10_000), pos(10_000)]}
+set_bench! {intersection_10k_pos_vs_100_neg, intersection, count, [pos(10_000), neg(100)]}
+set_bench! {intersection_10k_pos_vs_10k_neg, intersection, count, [pos(10_000), neg(10_000)]}
+set_bench! {intersection_random_100_vs_100, intersection, count, [random(100), random(100)]}
+set_bench! {intersection_random_100_vs_10k, intersection, count, [random(100), random(10_000)]}
+set_bench! {intersection_random_10k_vs_100, intersection, count, [random(10_000), random(100)]}
+set_bench! {intersection_random_10k_vs_10k, intersection, count, [random(10_000), random(10_000)]}
+set_bench! {intersection_staggered_100_vs_100, intersection, count, stagger(100, 1)}
+set_bench! {intersection_staggered_10k_vs_10k, intersection, count, stagger(10_000, 1)}
+set_bench! {intersection_staggered_100_vs_10k, intersection, count, stagger(100, 100)}
+set_bench! {difference_random_100_vs_100, difference, count, [random(100), random(100)]}
+set_bench! {difference_random_100_vs_10k, difference, count, [random(100), random(10_000)]}
+set_bench! {difference_random_10k_vs_100, difference, count, [random(10_000), random(100)]}
+set_bench! {difference_random_10k_vs_10k, difference, count, [random(10_000), random(10_000)]}
+set_bench! {difference_staggered_100_vs_100, difference, count, stagger(100, 1)}
+set_bench! {difference_staggered_10k_vs_10k, difference, count, stagger(10_000, 1)}
+set_bench! {difference_staggered_100_vs_10k, difference, count, stagger(100, 100)}
+set_bench! {is_subset_100_vs_100, is_subset, clone, [pos(100), pos(100)]}
+set_bench! {is_subset_100_vs_10k, is_subset, clone, [pos(100), pos(10_000)]}
+set_bench! {is_subset_10k_vs_100, is_subset, clone, [pos(10_000), pos(100)]}
+set_bench! {is_subset_10k_vs_10k, is_subset, clone, [pos(10_000), pos(10_000)]}
diff --git a/src/liballoc/benches/slice.rs b/src/liballoc/benches/slice.rs
index f17fb8212ce..ef91d801dc7 100644
--- a/src/liballoc/benches/slice.rs
+++ b/src/liballoc/benches/slice.rs
@@ -186,12 +186,12 @@ const SEED: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
 
 fn gen_random(len: usize) -> Vec<u64> {
     let mut rng = XorShiftRng::from_seed(SEED);
-    rng.sample_iter(&Standard).take(len).collect()
+    (&mut rng).sample_iter(&Standard).take(len).collect()
 }
 
 fn gen_random_bytes(len: usize) -> Vec<u8> {
     let mut rng = XorShiftRng::from_seed(SEED);
-    rng.sample_iter(&Standard).take(len).collect()
+    (&mut rng).sample_iter(&Standard).take(len).collect()
 }
 
 fn gen_mostly_ascending(len: usize) -> Vec<u64> {
@@ -221,14 +221,14 @@ fn gen_strings(len: usize) -> Vec<String> {
     let mut v = vec![];
     for _ in 0..len {
         let n = rng.gen::<usize>() % 20 + 1;
-        v.push(rng.sample_iter(&Alphanumeric).take(n).collect());
+        v.push((&mut rng).sample_iter(&Alphanumeric).take(n).collect());
     }
     v
 }
 
 fn gen_big_random(len: usize) -> Vec<[u64; 16]> {
     let mut rng = XorShiftRng::from_seed(SEED);
-    rng.sample_iter(&Standard).map(|x| [x; 16]).take(len).collect()
+    (&mut rng).sample_iter(&Standard).map(|x| [x; 16]).take(len).collect()
 }
 
 macro_rules! sort {
diff --git a/src/liballoc/borrow.rs b/src/liballoc/borrow.rs
index 74c80a08b12..d5e15b3719c 100644
--- a/src/liballoc/borrow.rs
+++ b/src/liballoc/borrow.rs
@@ -32,6 +32,7 @@ impl<'a, B: ?Sized> Borrow<B> for Cow<'a, B>
 /// from any borrow of a given type.
 #[stable(feature = "rust1", since = "1.0.0")]
 pub trait ToOwned {
+    /// The resulting type after obtaining ownership.
     #[stable(feature = "rust1", since = "1.0.0")]
     type Owned: Borrow<Self>;
 
@@ -135,7 +136,7 @@ impl<T> ToOwned for T
 /// Another example showing how to keep `Cow` in a struct:
 ///
 /// ```
-/// use std::borrow::{Cow, ToOwned};
+/// use std::borrow::Cow;
 ///
 /// struct Items<'a, X: 'a> where [X]: ToOwned<Owned = Vec<X>> {
 ///     values: Cow<'a, [X]>,
diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs
index 9bce142b483..c92db517cad 100644
--- a/src/liballoc/boxed.rs
+++ b/src/liballoc/boxed.rs
@@ -1,19 +1,9 @@
 //! A pointer type for heap allocation.
 //!
-//! `Box<T>`, casually referred to as a 'box', provides the simplest form of
+//! [`Box<T>`], casually referred to as a 'box', provides the simplest form of
 //! heap allocation in Rust. Boxes provide ownership for this allocation, and
 //! drop their contents when they go out of scope.
 //!
-//! For non-zero-sized values, a [`Box`] will use the [`Global`] allocator for
-//! its allocation. It is valid to convert both ways between a [`Box`] and a
-//! raw pointer allocated with the [`Global`] allocator, given that the
-//! [`Layout`] used with the allocator is correct for the type. More precisely,
-//! a `value: *mut T` that has been allocated with the [`Global`] allocator
-//! with `Layout::for_value(&*value)` may be converted into a box using
-//! `Box::<T>::from_raw(value)`. Conversely, the memory backing a `value: *mut
-//! T` obtained from `Box::<T>::into_raw` may be deallocated using the
-//! [`Global`] allocator with `Layout::for_value(&*value)`.
-//!
 //! # Examples
 //!
 //! Move a value from the stack to the heap by creating a [`Box`]:
@@ -58,18 +48,38 @@
 //!
 //! It wouldn't work. This is because the size of a `List` depends on how many
 //! elements are in the list, and so we don't know how much memory to allocate
-//! for a `Cons`. By introducing a `Box`, which has a defined size, we know how
+//! for a `Cons`. By introducing a [`Box<T>`], which has a defined size, we know how
 //! big `Cons` needs to be.
 //!
+//! # Memory layout
+//!
+//! For non-zero-sized values, a [`Box`] will use the [`Global`] allocator for
+//! its allocation. It is valid to convert both ways between a [`Box`] and a
+//! raw pointer allocated with the [`Global`] allocator, given that the
+//! [`Layout`] used with the allocator is correct for the type. More precisely,
+//! a `value: *mut T` that has been allocated with the [`Global`] allocator
+//! with `Layout::for_value(&*value)` may be converted into a box using
+//! [`Box::<T>::from_raw(value)`]. Conversely, the memory backing a `value: *mut
+//! T` obtained from [`Box::<T>::into_raw`] may be deallocated using the
+//! [`Global`] allocator with [`Layout::for_value(&*value)`].
+//!
+//!
 //! [dereferencing]: ../../std/ops/trait.Deref.html
 //! [`Box`]: struct.Box.html
+//! [`Box<T>`]: struct.Box.html
+//! [`Box::<T>::from_raw(value)`]: struct.Box.html#method.from_raw
+//! [`Box::<T>::into_raw`]: struct.Box.html#method.into_raw
+//! [`Global`]: ../alloc/struct.Global.html
+//! [`Layout`]: ../alloc/struct.Layout.html
+//! [`Layout::for_value(&*value)`]: ../alloc/struct.Layout.html#method.for_value
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
 use core::any::Any;
+use core::array::LengthAtMost32;
 use core::borrow;
 use core::cmp::Ordering;
-use core::convert::From;
+use core::convert::{From, TryFrom};
 use core::fmt;
 use core::future::Future;
 use core::hash::{Hash, Hasher};
@@ -81,7 +91,7 @@ use core::ops::{
     CoerceUnsized, DispatchFromDyn, Deref, DerefMut, Receiver, Generator, GeneratorState
 };
 use core::ptr::{self, NonNull, Unique};
-use core::task::{Waker, Poll};
+use core::task::{Context, Poll};
 
 use crate::vec::Vec;
 use crate::raw_vec::RawVec;
@@ -125,24 +135,38 @@ impl<T: ?Sized> Box<T> {
     ///
     /// After calling this function, the raw pointer is owned by the
     /// resulting `Box`. Specifically, the `Box` destructor will call
-    /// the destructor of `T` and free the allocated memory. Since the
-    /// way `Box` allocates and releases memory is unspecified, the
-    /// only valid pointer to pass to this function is the one taken
-    /// from another `Box` via the [`Box::into_raw`] function.
+    /// the destructor of `T` and free the allocated memory. For this
+    /// to be safe, the memory must have been allocated in accordance
+    /// with the [memory layout] used by `Box` .
+    ///
+    /// # Safety
     ///
     /// This function is unsafe because improper use may lead to
     /// memory problems. For example, a double-free may occur if the
     /// function is called twice on the same raw pointer.
     ///
-    /// [`Box::into_raw`]: struct.Box.html#method.into_raw
-    ///
     /// # Examples
-    ///
+    /// Recreate a `Box` which was previously converted to a raw pointer
+    /// using [`Box::into_raw`]:
     /// ```
     /// let x = Box::new(5);
     /// let ptr = Box::into_raw(x);
     /// let x = unsafe { Box::from_raw(ptr) };
     /// ```
+    /// Manually create a `Box` from scratch by using the global allocator:
+    /// ```
+    /// use std::alloc::{alloc, Layout};
+    ///
+    /// unsafe {
+    ///     let ptr = alloc(Layout::new::<i32>()) as *mut i32;
+    ///     *ptr = 5;
+    ///     let x = Box::from_raw(ptr);
+    /// }
+    /// ```
+    ///
+    /// [memory layout]: index.html#memory-layout
+    /// [`Layout`]: ../alloc/struct.Layout.html
+    /// [`Box::into_raw`]: struct.Box.html#method.into_raw
     #[stable(feature = "box_raw", since = "1.4.0")]
     #[inline]
     pub unsafe fn from_raw(raw: *mut T) -> Self {
@@ -155,22 +179,40 @@ impl<T: ?Sized> Box<T> {
     ///
     /// After calling this function, the caller is responsible for the
     /// memory previously managed by the `Box`. In particular, the
-    /// caller should properly destroy `T` and release the memory. The
-    /// proper way to do so is to convert the raw pointer back into a
-    /// `Box` with the [`Box::from_raw`] function.
+    /// caller should properly destroy `T` and release the memory, taking
+    /// into account the [memory layout] used by `Box`. The easiest way to
+    /// do this is to convert the raw pointer back into a `Box` with the
+    /// [`Box::from_raw`] function, allowing the `Box` destructor to perform
+    /// the cleanup.
     ///
     /// Note: this is an associated function, which means that you have
     /// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This
     /// is so that there is no conflict with a method on the inner type.
     ///
-    /// [`Box::from_raw`]: struct.Box.html#method.from_raw
-    ///
     /// # Examples
-    ///
+    /// Converting the raw pointer back into a `Box` with [`Box::from_raw`]
+    /// for automatic cleanup:
     /// ```
-    /// let x = Box::new(5);
+    /// let x = Box::new(String::from("Hello"));
     /// let ptr = Box::into_raw(x);
+    /// let x = unsafe { Box::from_raw(ptr) };
     /// ```
+    /// Manual cleanup by explicitly running the destructor and deallocating
+    /// the memory:
+    /// ```
+    /// use std::alloc::{dealloc, Layout};
+    /// use std::ptr;
+    ///
+    /// let x = Box::new(String::from("Hello"));
+    /// let p = Box::into_raw(x);
+    /// unsafe {
+    ///     ptr::drop_in_place(p);
+    ///     dealloc(p as *mut u8, Layout::new::<String>());
+    /// }
+    /// ```
+    ///
+    /// [memory layout]: index.html#memory-layout
+    /// [`Box::from_raw`]: struct.Box.html#method.from_raw
     #[stable(feature = "box_raw", since = "1.4.0")]
     #[inline]
     pub fn into_raw(b: Box<T>) -> *mut T {
@@ -182,7 +224,7 @@ impl<T: ?Sized> Box<T> {
     /// After calling this function, the caller is responsible for the
     /// memory previously managed by the `Box`. In particular, the
     /// caller should properly destroy `T` and release the memory. The
-    /// proper way to do so is to convert the `NonNull<T>` pointer
+    /// easiest way to do so is to convert the `NonNull<T>` pointer
     /// into a raw pointer and back into a `Box` with the [`Box::from_raw`]
     /// function.
     ///
@@ -201,6 +243,10 @@ impl<T: ?Sized> Box<T> {
     /// fn main() {
     ///     let x = Box::new(5);
     ///     let ptr = Box::into_raw_non_null(x);
+    ///
+    ///     // Clean up the memory by converting the NonNull pointer back
+    ///     // into a Box and letting the Box be dropped.
+    ///     let x = unsafe { Box::from_raw(ptr.as_ptr()) };
     /// }
     /// ```
     #[unstable(feature = "box_into_raw_non_null", issue = "47336")]
@@ -212,15 +258,16 @@ impl<T: ?Sized> Box<T> {
     #[unstable(feature = "ptr_internals", issue = "0", reason = "use into_raw_non_null instead")]
     #[inline]
     #[doc(hidden)]
-    pub fn into_unique(mut b: Box<T>) -> Unique<T> {
+    pub fn into_unique(b: Box<T>) -> Unique<T> {
+        let mut unique = b.0;
+        mem::forget(b);
         // Box is kind-of a library type, but recognized as a "unique pointer" by
         // Stacked Borrows.  This function here corresponds to "reborrowing to
         // a raw pointer", but there is no actual reborrow here -- so
         // without some care, the pointer we are returning here still carries
-        // the `Uniq` tag.  We round-trip through a mutable reference to avoid that.
-        let unique = unsafe { b.0.as_mut() as *mut T };
-        mem::forget(b);
-        unsafe { Unique::new_unchecked(unique) }
+        // the tag of `b`, with `Unique` permission.
+        // We round-trip through a mutable reference to avoid that.
+        unsafe { Unique::new_unchecked(unique.as_mut() as *mut T) }
     }
 
     /// Consumes and leaks the `Box`, returning a mutable reference,
@@ -278,7 +325,7 @@ impl<T: ?Sized> Box<T> {
     /// This conversion does not allocate on the heap and happens in place.
     ///
     /// This is also available via [`From`].
-    #[unstable(feature = "box_into_pin", issue = "0")]
+    #[unstable(feature = "box_into_pin", issue = "62370")]
     pub fn into_pin(boxed: Box<T>) -> Pin<Box<T>> {
         // It's not possible to move or replace the insides of a `Pin<Box<T>>`
         // when `T: !Unpin`,  so it's safe to pin it directly without any
@@ -325,12 +372,19 @@ impl<T: Clone> Clone for Box<T> {
     /// ```
     /// let x = Box::new(5);
     /// let y = x.clone();
+    ///
+    /// // The value is the same
+    /// assert_eq!(x, y);
+    ///
+    /// // But they are unique objects
+    /// assert_ne!(&*x as *const i32, &*y as *const i32);
     /// ```
     #[rustfmt::skip]
     #[inline]
     fn clone(&self) -> Box<T> {
         box { (**self).clone() }
     }
+
     /// Copies `source`'s contents into `self` without creating a new allocation.
     ///
     /// # Examples
@@ -338,10 +392,15 @@ impl<T: Clone> Clone for Box<T> {
     /// ```
     /// let x = Box::new(5);
     /// let mut y = Box::new(10);
+    /// let yp: *const i32 = &*y;
     ///
     /// y.clone_from(&x);
     ///
-    /// assert_eq!(*y, 5);
+    /// // The value is the same
+    /// assert_eq!(x, y);
+    ///
+    /// // And no allocation occurred
+    /// assert_eq!(yp, &*y);
     /// ```
     #[inline]
     fn clone_from(&mut self, source: &Box<T>) {
@@ -353,11 +412,10 @@ impl<T: Clone> Clone for Box<T> {
 #[stable(feature = "box_slice_clone", since = "1.3.0")]
 impl Clone for Box<str> {
     fn clone(&self) -> Self {
-        let len = self.len();
-        let buf = RawVec::with_capacity(len);
+        // this makes a copy of the data
+        let buf: Box<[u8]> = self.as_bytes().into();
         unsafe {
-            ptr::copy_nonoverlapping(self.as_ptr(), buf.ptr(), len);
-            from_boxed_utf8_unchecked(buf.into_box())
+            from_boxed_utf8_unchecked(buf)
         }
     }
 }
@@ -504,9 +562,12 @@ impl<T: Copy> From<&[T]> for Box<[T]> {
     /// println!("{:?}", boxed_slice);
     /// ```
     fn from(slice: &[T]) -> Box<[T]> {
-        let mut boxed = unsafe { RawVec::with_capacity(slice.len()).into_box() };
-        boxed.copy_from_slice(slice);
-        boxed
+        let len = slice.len();
+        let buf = RawVec::with_capacity(len);
+        unsafe {
+            ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
+            buf.into_box()
+        }
     }
 }
 
@@ -552,6 +613,22 @@ impl From<Box<str>> for Box<[u8]> {
     }
 }
 
+#[unstable(feature = "boxed_slice_try_from", issue = "0")]
+impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]>
+where
+    [T; N]: LengthAtMost32,
+{
+    type Error = Box<[T]>;
+
+    fn try_from(boxed_slice: Box<[T]>) -> Result<Self, Self::Error> {
+        if boxed_slice.len() == N {
+            Ok(unsafe { Box::from_raw(Box::into_raw(boxed_slice) as *mut [T; N]) })
+        } else {
+            Err(boxed_slice)
+        }
+    }
+}
+
 impl Box<dyn Any> {
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -672,11 +749,22 @@ impl<I: Iterator + ?Sized> Iterator for Box<I> {
         (**self).nth(n)
     }
 }
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator + Sized> Iterator for Box<I> {
+    fn last(self) -> Option<I::Item> where I: Sized {
+        (*self).last()
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for Box<I> {
     fn next_back(&mut self) -> Option<I::Item> {
         (**self).next_back()
     }
+    fn nth_back(&mut self, n: usize) -> Option<I::Item> {
+        (**self).nth_back(n)
+    }
 }
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {
@@ -691,82 +779,26 @@ impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {
 #[stable(feature = "fused", since = "1.26.0")]
 impl<I: FusedIterator + ?Sized> FusedIterator for Box<I> {}
 
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<A, F: FnOnce<A> + ?Sized> FnOnce<A> for Box<F> {
+    type Output = <F as FnOnce<A>>::Output;
 
-/// `FnBox` is a version of the `FnOnce` intended for use with boxed
-/// closure objects. The idea is that where one would normally store a
-/// `Box<dyn FnOnce()>` in a data structure, you should use
-/// `Box<dyn FnBox()>`. The two traits behave essentially the same, except
-/// that a `FnBox` closure can only be called if it is boxed. (Note
-/// that `FnBox` may be deprecated in the future if `Box<dyn FnOnce()>`
-/// closures become directly usable.)
-///
-/// # Examples
-///
-/// Here is a snippet of code which creates a hashmap full of boxed
-/// once closures and then removes them one by one, calling each
-/// closure as it is removed. Note that the type of the closures
-/// stored in the map is `Box<dyn FnBox() -> i32>` and not `Box<dyn FnOnce()
-/// -> i32>`.
-///
-/// ```
-/// #![feature(fnbox)]
-///
-/// use std::boxed::FnBox;
-/// use std::collections::HashMap;
-///
-/// fn make_map() -> HashMap<i32, Box<dyn FnBox() -> i32>> {
-///     let mut map: HashMap<i32, Box<dyn FnBox() -> i32>> = HashMap::new();
-///     map.insert(1, Box::new(|| 22));
-///     map.insert(2, Box::new(|| 44));
-///     map
-/// }
-///
-/// fn main() {
-///     let mut map = make_map();
-///     for i in &[1, 2] {
-///         let f = map.remove(&i).unwrap();
-///         assert_eq!(f(), i * 22);
-///     }
-/// }
-/// ```
-#[rustc_paren_sugar]
-#[unstable(feature = "fnbox",
-           reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
-pub trait FnBox<A> {
-    type Output;
-
-    fn call_box(self: Box<Self>, args: A) -> Self::Output;
-}
-
-#[unstable(feature = "fnbox",
-           reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
-impl<A, F> FnBox<A> for F
-    where F: FnOnce<A>
-{
-    type Output = F::Output;
-
-    fn call_box(self: Box<F>, args: A) -> F::Output {
-        self.call_once(args)
+    extern "rust-call" fn call_once(self, args: A) -> Self::Output {
+        <F as FnOnce<A>>::call_once(*self, args)
     }
 }
 
-#[unstable(feature = "fnbox",
-           reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
-impl<A, R> FnOnce<A> for Box<dyn FnBox<A, Output = R> + '_> {
-    type Output = R;
-
-    extern "rust-call" fn call_once(self, args: A) -> R {
-        self.call_box(args)
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<A, F: FnMut<A> + ?Sized> FnMut<A> for Box<F> {
+    extern "rust-call" fn call_mut(&mut self, args: A) -> Self::Output {
+        <F as FnMut<A>>::call_mut(self, args)
     }
 }
 
-#[unstable(feature = "fnbox",
-           reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
-impl<A, R> FnOnce<A> for Box<dyn FnBox<A, Output = R> + Send + '_> {
-    type Output = R;
-
-    extern "rust-call" fn call_once(self, args: A) -> R {
-        self.call_box(args)
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<A, F: Fn<A> + ?Sized> Fn<A> for Box<F> {
+    extern "rust-call" fn call(&self, args: A) -> Self::Output {
+        <F as Fn<A>>::call(self, args)
     }
 }
 
@@ -907,11 +939,11 @@ impl<G: ?Sized + Generator> Generator for Pin<Box<G>> {
     }
 }
 
-#[unstable(feature = "futures_api", issue = "50547")]
+#[stable(feature = "futures_api", since = "1.36.0")]
 impl<F: ?Sized + Future + Unpin> Future for Box<F> {
     type Output = F::Output;
 
-    fn poll(mut self: Pin<&mut Self>, waker: &Waker) -> Poll<Self::Output> {
-        F::poll(Pin::new(&mut *self), waker)
+    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+        F::poll(Pin::new(&mut *self), cx)
     }
 }
diff --git a/src/liballoc/collections/binary_heap.rs b/src/liballoc/collections/binary_heap.rs
index a171f128c24..9f531f5b83c 100644
--- a/src/liballoc/collections/binary_heap.rs
+++ b/src/liballoc/collections/binary_heap.rs
@@ -207,6 +207,44 @@ use super::SpecExtend;
 /// // The heap should now be empty.
 /// assert!(heap.is_empty())
 /// ```
+///
+/// ## Min-heap
+///
+/// Either `std::cmp::Reverse` or a custom `Ord` implementation can be used to
+/// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
+/// value instead of the greatest one.
+///
+/// ```
+/// use std::collections::BinaryHeap;
+/// use std::cmp::Reverse;
+///
+/// let mut heap = BinaryHeap::new();
+///
+/// // Wrap values in `Reverse`
+/// heap.push(Reverse(1));
+/// heap.push(Reverse(5));
+/// heap.push(Reverse(2));
+///
+/// // If we pop these scores now, they should come back in the reverse order.
+/// assert_eq!(heap.pop(), Some(Reverse(1)));
+/// assert_eq!(heap.pop(), Some(Reverse(2)));
+/// assert_eq!(heap.pop(), Some(Reverse(5)));
+/// assert_eq!(heap.pop(), None);
+/// ```
+///
+/// # Time complexity
+///
+/// | [push] | [pop]    | [peek]/[peek\_mut] |
+/// |--------|----------|--------------------|
+/// | O(1)~  | O(log n) | O(1)               |
+///
+/// The value for `push` is an expected cost; the method documentation gives a
+/// more detailed analysis.
+///
+/// [push]: #method.push
+/// [pop]: #method.pop
+/// [peek]: #method.peek
+/// [peek\_mut]: #method.peek_mut
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct BinaryHeap<T> {
     data: Vec<T>,
@@ -360,6 +398,10 @@ impl<T: Ord> BinaryHeap<T> {
     /// }
     /// assert_eq!(heap.peek(), Some(&2));
     /// ```
+    ///
+    /// # Time complexity
+    ///
+    /// Cost is O(1) in the worst case.
     #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
     pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
         if self.is_empty() {
@@ -387,6 +429,11 @@ impl<T: Ord> BinaryHeap<T> {
     /// assert_eq!(heap.pop(), Some(1));
     /// assert_eq!(heap.pop(), None);
     /// ```
+    ///
+    /// # Time complexity
+    ///
+    /// The worst case cost of `pop` on a heap containing *n* elements is O(log
+    /// n).
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn pop(&mut self) -> Option<T> {
         self.data.pop().map(|mut item| {
@@ -414,6 +461,22 @@ impl<T: Ord> BinaryHeap<T> {
     /// assert_eq!(heap.len(), 3);
     /// assert_eq!(heap.peek(), Some(&5));
     /// ```
+    ///
+    /// # Time complexity
+    ///
+    /// The expected cost of `push`, averaged over every possible ordering of
+    /// the elements being pushed, and over a sufficiently large number of
+    /// pushes, is O(1). This is the most meaningful cost metric when pushing
+    /// elements that are *not* already in any sorted pattern.
+    ///
+    /// The time complexity degrades if elements are pushed in predominantly
+    /// ascending order. In the worst case, elements are pushed in ascending
+    /// sorted order and the amortized cost per push is O(log n) against a heap
+    /// containing *n* elements.
+    ///
+    /// The worst case cost of a *single* call to `push` is O(n). The worst case
+    /// occurs when capacity is exhausted and needs a resize. The resize cost
+    /// has been amortized in the previous figures.
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn push(&mut self, item: T) {
         let old_len = self.len();
@@ -626,6 +689,10 @@ impl<T> BinaryHeap<T> {
     /// assert_eq!(heap.peek(), Some(&5));
     ///
     /// ```
+    ///
+    /// # Time complexity
+    ///
+    /// Cost is O(1) in the worst case.
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn peek(&self) -> Option<&T> {
         self.data.get(0)
@@ -968,6 +1035,11 @@ impl<'a, T> Iterator for Iter<'a, T> {
     fn size_hint(&self) -> (usize, Option<usize>) {
         self.iter.size_hint()
     }
+
+    #[inline]
+    fn last(self) -> Option<&'a T> {
+        self.iter.last()
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1177,9 +1249,7 @@ impl<T: Ord> BinaryHeap<T> {
 
         self.reserve(lower);
 
-        for elem in iterator {
-            self.push(elem);
-        }
+        iterator.for_each(move |elem| self.push(elem));
     }
 }
 
diff --git a/src/liballoc/collections/btree/map.rs b/src/liballoc/collections/btree/map.rs
index ce29978856f..1683b810556 100644
--- a/src/liballoc/collections/btree/map.rs
+++ b/src/liballoc/collections/btree/map.rs
@@ -75,10 +75,10 @@ use Entry::*;
 ///
 /// // look up the values associated with some keys.
 /// let to_find = ["Up!", "Office Space"];
-/// for book in &to_find {
-///     match movie_reviews.get(book) {
-///        Some(review) => println!("{}: {}", book, review),
-///        None => println!("{} is unreviewed.", book)
+/// for movie in &to_find {
+///     match movie_reviews.get(movie) {
+///        Some(review) => println!("{}: {}", movie, review),
+///        None => println!("{} is unreviewed.", movie)
 ///     }
 /// }
 ///
@@ -200,7 +200,7 @@ impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> {
             }
         }
 
-        if self.len() == 0 {
+        if self.is_empty() {
             // Ideally we'd call `BTreeMap::new` here, but that has the `K:
             // Ord` constraint, which this method lacks.
             BTreeMap {
@@ -759,19 +759,19 @@ impl<K: Ord, V> BTreeMap<K, V> {
     #[stable(feature = "btree_append", since = "1.11.0")]
     pub fn append(&mut self, other: &mut Self) {
         // Do we have to append anything at all?
-        if other.len() == 0 {
+        if other.is_empty() {
             return;
         }
 
         // We can just swap `self` and `other` if `self` is empty.
-        if self.len() == 0 {
+        if self.is_empty() {
             mem::swap(self, other);
             return;
         }
 
         // First, we merge `self` and `other` into a sorted sequence in linear time.
-        let self_iter = mem::replace(self, BTreeMap::new()).into_iter();
-        let other_iter = mem::replace(other, BTreeMap::new()).into_iter();
+        let self_iter = mem::take(self).into_iter();
+        let other_iter = mem::take(other).into_iter();
         let iter = MergeIter {
             left: self_iter.peekable(),
             right: other_iter.peekable(),
@@ -1193,6 +1193,10 @@ impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> {
     fn size_hint(&self) -> (usize, Option<usize>) {
         (self.length, Some(self.length))
     }
+
+    fn last(mut self) -> Option<(&'a K, &'a V)> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "fused", since = "1.26.0")]
@@ -1253,6 +1257,10 @@ impl<'a, K: 'a, V: 'a> Iterator for IterMut<'a, K, V> {
     fn size_hint(&self) -> (usize, Option<usize>) {
         (self.length, Some(self.length))
     }
+
+    fn last(mut self) -> Option<(&'a K, &'a mut V)> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1421,6 +1429,10 @@ impl<'a, K, V> Iterator for Keys<'a, K, V> {
     fn size_hint(&self) -> (usize, Option<usize>) {
         self.inner.size_hint()
     }
+
+    fn last(mut self) -> Option<&'a K> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1458,6 +1470,10 @@ impl<'a, K, V> Iterator for Values<'a, K, V> {
     fn size_hint(&self) -> (usize, Option<usize>) {
         self.inner.size_hint()
     }
+
+    fn last(mut self) -> Option<&'a V> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1495,6 +1511,10 @@ impl<'a, K, V> Iterator for Range<'a, K, V> {
             unsafe { Some(self.next_unchecked()) }
         }
     }
+
+    fn last(mut self) -> Option<(&'a K, &'a V)> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "map_values_mut", since = "1.10.0")]
@@ -1508,6 +1528,10 @@ impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
     fn size_hint(&self) -> (usize, Option<usize>) {
         self.inner.size_hint()
     }
+
+    fn last(mut self) -> Option<&'a mut V> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "map_values_mut", since = "1.10.0")]
@@ -1626,6 +1650,10 @@ impl<'a, K, V> Iterator for RangeMut<'a, K, V> {
             unsafe { Some(self.next_unchecked()) }
         }
     }
+
+    fn last(mut self) -> Option<(&'a K, &'a mut V)> {
+        self.next_back()
+    }
 }
 
 impl<'a, K, V> RangeMut<'a, K, V> {
@@ -1727,9 +1755,9 @@ impl<K: Ord, V> FromIterator<(K, V)> for BTreeMap<K, V> {
 impl<K: Ord, V> Extend<(K, V)> for BTreeMap<K, V> {
     #[inline]
     fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
-        for (k, v) in iter {
+        iter.into_iter().for_each(move |(k, v)| {
             self.insert(k, v);
-        }
+        });
     }
 }
 
@@ -2004,7 +2032,7 @@ impl<K, V> BTreeMap<K, V> {
     /// assert_eq!(keys, [1, 2]);
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
-    pub fn keys<'a>(&'a self) -> Keys<'a, K, V> {
+    pub fn keys(&self) -> Keys<'_, K, V> {
         Keys { inner: self.iter() }
     }
 
@@ -2025,7 +2053,7 @@ impl<K, V> BTreeMap<K, V> {
     /// assert_eq!(values, ["hello", "goodbye"]);
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
-    pub fn values<'a>(&'a self) -> Values<'a, K, V> {
+    pub fn values(&self) -> Values<'_, K, V> {
         Values { inner: self.iter() }
     }
 
@@ -2529,8 +2557,8 @@ enum UnderflowResult<'a, K, V> {
     Stole(NodeRef<marker::Mut<'a>, K, V, marker::Internal>),
 }
 
-fn handle_underfull_node<'a, K, V>(node: NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>)
-                                   -> UnderflowResult<'a, K, V> {
+fn handle_underfull_node<K, V>(node: NodeRef<marker::Mut<'_>, K, V, marker::LeafOrInternal>)
+                               -> UnderflowResult<'_, K, V> {
     let parent = if let Ok(parent) = node.ascend() {
         parent
     } else {
diff --git a/src/liballoc/collections/btree/node.rs b/src/liballoc/collections/btree/node.rs
index 66d619b1298..e067096f0c7 100644
--- a/src/liballoc/collections/btree/node.rs
+++ b/src/liballoc/collections/btree/node.rs
@@ -106,10 +106,10 @@ impl<K, V> LeafNode<K, V> {
         LeafNode {
             // As a general policy, we leave fields uninitialized if they can be, as this should
             // be both slightly faster and easier to track in Valgrind.
-            keys: uninitialized_array![_; CAPACITY],
-            vals: uninitialized_array![_; CAPACITY],
+            keys: uninit_array![_; CAPACITY],
+            vals: uninit_array![_; CAPACITY],
             parent: ptr::null(),
-            parent_idx: MaybeUninit::uninitialized(),
+            parent_idx: MaybeUninit::uninit(),
             len: 0
         }
     }
@@ -129,7 +129,7 @@ unsafe impl Sync for NodeHeader<(), ()> {}
 // ever take a pointer past the first key.
 static EMPTY_ROOT_NODE: NodeHeader<(), ()> = NodeHeader {
     parent: ptr::null(),
-    parent_idx: MaybeUninit::uninitialized(),
+    parent_idx: MaybeUninit::uninit(),
     len: 0,
     keys_start: [],
 };
@@ -159,7 +159,7 @@ impl<K, V> InternalNode<K, V> {
     unsafe fn new() -> Self {
         InternalNode {
             data: LeafNode::new(),
-            edges: uninitialized_array![_; 2*B],
+            edges: uninit_array![_; 2*B],
         }
     }
 }
@@ -261,7 +261,7 @@ impl<K, V> Root<K, V> {
             -> NodeRef<marker::Mut<'_>, K, V, marker::Internal> {
         debug_assert!(!self.is_shared_root());
         let mut new_node = Box::new(unsafe { InternalNode::new() });
-        new_node.edges[0].set(unsafe { BoxedNode::from_ptr(self.node.as_ptr()) });
+        new_node.edges[0].write(unsafe { BoxedNode::from_ptr(self.node.as_ptr()) });
 
         self.node = BoxedNode::from_internal(new_node);
         self.height += 1;
@@ -394,7 +394,7 @@ impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
     }
 
     /// Temporarily takes out another, immutable reference to the same node.
-    fn reborrow<'a>(&'a self) -> NodeRef<marker::Immut<'a>, K, V, Type> {
+    fn reborrow(&self) -> NodeRef<marker::Immut<'_>, K, V, Type> {
         NodeRef {
             height: self.height,
             node: self.node,
@@ -737,7 +737,7 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
         unsafe {
             ptr::write(self.keys_mut().get_unchecked_mut(idx), key);
             ptr::write(self.vals_mut().get_unchecked_mut(idx), val);
-            self.as_internal_mut().edges.get_unchecked_mut(idx + 1).set(edge.node);
+            self.as_internal_mut().edges.get_unchecked_mut(idx + 1).write(edge.node);
 
             (*self.as_leaf_mut()).len += 1;
 
@@ -1080,7 +1080,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
         let mut child = self.descend();
         unsafe {
             (*child.as_leaf_mut()).parent = ptr;
-            (*child.as_leaf_mut()).parent_idx.set(idx);
+            (*child.as_leaf_mut()).parent_idx.write(idx);
         }
     }
 
diff --git a/src/liballoc/collections/btree/set.rs b/src/liballoc/collections/btree/set.rs
index 2be6455ad59..d3af910a82c 100644
--- a/src/liballoc/collections/btree/set.rs
+++ b/src/liballoc/collections/btree/set.rs
@@ -3,7 +3,7 @@
 
 use core::borrow::Borrow;
 use core::cmp::Ordering::{self, Less, Greater, Equal};
-use core::cmp::{min, max};
+use core::cmp::max;
 use core::fmt::{self, Debug};
 use core::iter::{Peekable, FromIterator, FusedIterator};
 use core::ops::{BitOr, BitAnd, BitXor, Sub, RangeBounds};
@@ -118,17 +118,36 @@ pub struct Range<'a, T: 'a> {
 /// [`difference`]: struct.BTreeSet.html#method.difference
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Difference<'a, T: 'a> {
-    a: Peekable<Iter<'a, T>>,
-    b: Peekable<Iter<'a, T>>,
+    inner: DifferenceInner<'a, T>,
+}
+enum DifferenceInner<'a, T: 'a> {
+    Stitch {
+        self_iter: Iter<'a, T>,
+        other_iter: Peekable<Iter<'a, T>>,
+    },
+    Search {
+        self_iter: Iter<'a, T>,
+        other_set: &'a BTreeSet<T>,
+    },
 }
 
 #[stable(feature = "collection_debug", since = "1.17.0")]
 impl<T: fmt::Debug> fmt::Debug for Difference<'_, T> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.debug_tuple("Difference")
-         .field(&self.a)
-         .field(&self.b)
-         .finish()
+        match &self.inner {
+            DifferenceInner::Stitch {
+                self_iter,
+                other_iter,
+            } => f
+                .debug_tuple("Difference")
+                .field(&self_iter)
+                .field(&other_iter)
+                .finish(),
+            DifferenceInner::Search {
+                self_iter,
+                other_set: _,
+            } => f.debug_tuple("Difference").field(&self_iter).finish(),
+        }
     }
 }
 
@@ -164,17 +183,36 @@ impl<T: fmt::Debug> fmt::Debug for SymmetricDifference<'_, T> {
 /// [`intersection`]: struct.BTreeSet.html#method.intersection
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Intersection<'a, T: 'a> {
-    a: Peekable<Iter<'a, T>>,
-    b: Peekable<Iter<'a, T>>,
+    inner: IntersectionInner<'a, T>,
+}
+enum IntersectionInner<'a, T: 'a> {
+    Stitch {
+        small_iter: Iter<'a, T>, // for size_hint, should be the smaller of the sets
+        other_iter: Iter<'a, T>,
+    },
+    Search {
+        small_iter: Iter<'a, T>,
+        large_set: &'a BTreeSet<T>,
+    },
 }
 
 #[stable(feature = "collection_debug", since = "1.17.0")]
 impl<T: fmt::Debug> fmt::Debug for Intersection<'_, T> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.debug_tuple("Intersection")
-         .field(&self.a)
-         .field(&self.b)
-         .finish()
+        match &self.inner {
+            IntersectionInner::Stitch {
+                small_iter,
+                other_iter,
+            } => f
+                .debug_tuple("Intersection")
+                .field(&small_iter)
+                .field(&other_iter)
+                .finish(),
+            IntersectionInner::Search {
+                small_iter,
+                large_set: _,
+            } => f.debug_tuple("Intersection").field(&small_iter).finish(),
+        }
     }
 }
 
@@ -201,6 +239,14 @@ impl<T: fmt::Debug> fmt::Debug for Union<'_, T> {
     }
 }
 
+// This constant is used by functions that compare two sets.
+// It estimates the relative size at which searching performs better
+// than iterating, based on the benchmarks in
+// https://github.com/ssomers/rust_bench_btreeset_intersection;
+// It's used to divide rather than multiply sizes, to rule out overflow,
+// and it's a power of two to make that division cheap.
+const ITER_PERFORMANCE_TIPPING_SIZE_DIFF: usize = 16;
+
 impl<T: Ord> BTreeSet<T> {
     /// Makes a new `BTreeSet` with a reasonable choice of B.
     ///
@@ -268,9 +314,24 @@ impl<T: Ord> BTreeSet<T> {
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn difference<'a>(&'a self, other: &'a BTreeSet<T>) -> Difference<'a, T> {
-        Difference {
-            a: self.iter().peekable(),
-            b: other.iter().peekable(),
+        if self.len() > other.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF {
+            // Self is bigger than or not much smaller than other set.
+            // Iterate both sets jointly, spotting matches along the way.
+            Difference {
+                inner: DifferenceInner::Stitch {
+                    self_iter: self.iter(),
+                    other_iter: other.iter().peekable(),
+                },
+            }
+        } else {
+            // Self is much smaller than other set, or both sets are empty.
+            // Iterate the small set, searching for matches in the large set.
+            Difference {
+                inner: DifferenceInner::Search {
+                    self_iter: self.iter(),
+                    other_set: other,
+                },
+            }
         }
     }
 
@@ -326,9 +387,29 @@ impl<T: Ord> BTreeSet<T> {
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn intersection<'a>(&'a self, other: &'a BTreeSet<T>) -> Intersection<'a, T> {
-        Intersection {
-            a: self.iter().peekable(),
-            b: other.iter().peekable(),
+        let (small, other) = if self.len() <= other.len() {
+            (self, other)
+        } else {
+            (other, self)
+        };
+        if small.len() > other.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF {
+            // Small set is not much smaller than other set.
+            // Iterate both sets jointly, spotting matches along the way.
+            Intersection {
+                inner: IntersectionInner::Stitch {
+                    small_iter: small.iter(),
+                    other_iter: other.iter(),
+                },
+            }
+        } else {
+            // Big difference in number of elements, or both sets are empty.
+            // Iterate the small set, searching for matches in the large set.
+            Intersection {
+                inner: IntersectionInner::Search {
+                    small_iter: small.iter(),
+                    large_set: other,
+                },
+            }
         }
     }
 
@@ -462,28 +543,44 @@ impl<T: Ord> BTreeSet<T> {
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn is_subset(&self, other: &BTreeSet<T>) -> bool {
-        // Stolen from TreeMap
-        let mut x = self.iter();
-        let mut y = other.iter();
-        let mut a = x.next();
-        let mut b = y.next();
-        while a.is_some() {
-            if b.is_none() {
-                return false;
-            }
+        // Same result as self.difference(other).next().is_none()
+        // but the 3 paths below are faster (in order: hugely, 20%, 5%).
+        if self.len() > other.len() {
+            false
+        } else if self.len() > other.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF {
+            // Self is not much smaller than other set.
+            // Stolen from TreeMap
+            let mut x = self.iter();
+            let mut y = other.iter();
+            let mut a = x.next();
+            let mut b = y.next();
+            while a.is_some() {
+                if b.is_none() {
+                    return false;
+                }
 
-            let a1 = a.unwrap();
-            let b1 = b.unwrap();
+                let a1 = a.unwrap();
+                let b1 = b.unwrap();
 
-            match b1.cmp(a1) {
-                Less => (),
-                Greater => return false,
-                Equal => a = x.next(),
-            }
+                match b1.cmp(a1) {
+                    Less => (),
+                    Greater => return false,
+                    Equal => a = x.next(),
+                }
 
-            b = y.next();
+                b = y.next();
+            }
+            true
+        } else {
+            // Big difference in number of elements, or both sets are empty.
+            // Iterate the small set, searching for matches in the large set.
+            for next in self {
+                if !other.contains(next) {
+                    return false;
+                }
+            }
+            true
         }
-        true
     }
 
     /// Returns `true` if the set is a superset of another,
@@ -786,9 +883,9 @@ impl<'a, T> IntoIterator for &'a BTreeSet<T> {
 impl<T: Ord> Extend<T> for BTreeSet<T> {
     #[inline]
     fn extend<Iter: IntoIterator<Item = T>>(&mut self, iter: Iter) {
-        for elem in iter {
+        iter.into_iter().for_each(move |elem| {
             self.insert(elem);
-        }
+        });
     }
 }
 
@@ -922,6 +1019,9 @@ impl<'a, T> Iterator for Iter<'a, T> {
     fn size_hint(&self) -> (usize, Option<usize>) {
         self.iter.size_hint()
     }
+    fn last(mut self) -> Option<&'a T> {
+        self.next_back()
+    }
 }
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
@@ -976,6 +1076,10 @@ impl<'a, T> Iterator for Range<'a, T> {
     fn next(&mut self) -> Option<&'a T> {
         self.iter.next().map(|(k, _)| k)
     }
+
+    fn last(mut self) -> Option<&'a T> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "btree_range", since = "1.17.0")]
@@ -1001,8 +1105,22 @@ fn cmp_opt<T: Ord>(x: Option<&T>, y: Option<&T>, short: Ordering, long: Ordering
 impl<T> Clone for Difference<'_, T> {
     fn clone(&self) -> Self {
         Difference {
-            a: self.a.clone(),
-            b: self.b.clone(),
+            inner: match &self.inner {
+                DifferenceInner::Stitch {
+                    self_iter,
+                    other_iter,
+                } => DifferenceInner::Stitch {
+                    self_iter: self_iter.clone(),
+                    other_iter: other_iter.clone(),
+                },
+                DifferenceInner::Search {
+                    self_iter,
+                    other_set,
+                } => DifferenceInner::Search {
+                    self_iter: self_iter.clone(),
+                    other_set,
+                },
+            },
         }
     }
 }
@@ -1011,24 +1129,52 @@ impl<'a, T: Ord> Iterator for Difference<'a, T> {
     type Item = &'a T;
 
     fn next(&mut self) -> Option<&'a T> {
-        loop {
-            match cmp_opt(self.a.peek(), self.b.peek(), Less, Less) {
-                Less => return self.a.next(),
-                Equal => {
-                    self.a.next();
-                    self.b.next();
-                }
-                Greater => {
-                    self.b.next();
+        match &mut self.inner {
+            DifferenceInner::Stitch {
+                self_iter,
+                other_iter,
+            } => {
+                let mut self_next = self_iter.next()?;
+                loop {
+                    match other_iter
+                        .peek()
+                        .map_or(Less, |other_next| Ord::cmp(self_next, other_next))
+                    {
+                        Less => return Some(self_next),
+                        Equal => {
+                            self_next = self_iter.next()?;
+                            other_iter.next();
+                        }
+                        Greater => {
+                            other_iter.next();
+                        }
+                    }
                 }
             }
+            DifferenceInner::Search {
+                self_iter,
+                other_set,
+            } => loop {
+                let self_next = self_iter.next()?;
+                if !other_set.contains(&self_next) {
+                    return Some(self_next);
+                }
+            },
         }
     }
 
     fn size_hint(&self) -> (usize, Option<usize>) {
-        let a_len = self.a.len();
-        let b_len = self.b.len();
-        (a_len.saturating_sub(b_len), Some(a_len))
+        let (self_len, other_len) = match &self.inner {
+            DifferenceInner::Stitch {
+                self_iter,
+                other_iter
+            } => (self_iter.len(), other_iter.len()),
+            DifferenceInner::Search {
+                self_iter,
+                other_set
+            } => (self_iter.len(), other_set.len()),
+        };
+        (self_len.saturating_sub(other_len), Some(self_len))
     }
 }
 
@@ -1073,8 +1219,22 @@ impl<T: Ord> FusedIterator for SymmetricDifference<'_, T> {}
 impl<T> Clone for Intersection<'_, T> {
     fn clone(&self) -> Self {
         Intersection {
-            a: self.a.clone(),
-            b: self.b.clone(),
+            inner: match &self.inner {
+                IntersectionInner::Stitch {
+                    small_iter,
+                    other_iter,
+                } => IntersectionInner::Stitch {
+                    small_iter: small_iter.clone(),
+                    other_iter: other_iter.clone(),
+                },
+                IntersectionInner::Search {
+                    small_iter,
+                    large_set,
+                } => IntersectionInner::Search {
+                    small_iter: small_iter.clone(),
+                    large_set,
+                },
+            },
         }
     }
 }
@@ -1083,24 +1243,39 @@ impl<'a, T: Ord> Iterator for Intersection<'a, T> {
     type Item = &'a T;
 
     fn next(&mut self) -> Option<&'a T> {
-        loop {
-            match Ord::cmp(self.a.peek()?, self.b.peek()?) {
-                Less => {
-                    self.a.next();
-                }
-                Equal => {
-                    self.b.next();
-                    return self.a.next();
-                }
-                Greater => {
-                    self.b.next();
+        match &mut self.inner {
+            IntersectionInner::Stitch {
+                small_iter,
+                other_iter,
+            } => {
+                let mut small_next = small_iter.next()?;
+                let mut other_next = other_iter.next()?;
+                loop {
+                    match Ord::cmp(small_next, other_next) {
+                        Less => small_next = small_iter.next()?,
+                        Greater => other_next = other_iter.next()?,
+                        Equal => return Some(small_next),
+                    }
                 }
             }
+            IntersectionInner::Search {
+                small_iter,
+                large_set,
+            } => loop {
+                let small_next = small_iter.next()?;
+                if large_set.contains(&small_next) {
+                    return Some(small_next);
+                }
+            },
         }
     }
 
     fn size_hint(&self) -> (usize, Option<usize>) {
-        (0, Some(min(self.a.len(), self.b.len())))
+        let min_len = match &self.inner {
+            IntersectionInner::Stitch { small_iter, .. } => small_iter.len(),
+            IntersectionInner::Search { small_iter, .. } => small_iter.len(),
+        };
+        (0, Some(min_len))
     }
 }
 
diff --git a/src/liballoc/collections/linked_list.rs b/src/liballoc/collections/linked_list.rs
index c2ee2e63156..a14a3fe9994 100644
--- a/src/liballoc/collections/linked_list.rs
+++ b/src/liballoc/collections/linked_list.rs
@@ -23,6 +23,9 @@ use core::ptr::NonNull;
 use crate::boxed::Box;
 use super::SpecExtend;
 
+#[cfg(test)]
+mod tests;
+
 /// A doubly-linked list with owned nodes.
 ///
 /// The `LinkedList` allows pushing and popping elements at either end
@@ -86,6 +89,9 @@ impl<T> Clone for Iter<'_, T> {
 /// [`LinkedList`]: struct.LinkedList.html
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct IterMut<'a, T: 'a> {
+    // We do *not* exclusively own the entire list here, references to node's `element`
+    // have been handed out by the iterator!  So be careful when using this; the methods
+    // called must be aware that there can be aliasing pointers to `element`.
     list: &'a mut LinkedList<T>,
     head: Option<NonNull<Node<T>>>,
     tail: Option<NonNull<Node<T>>>,
@@ -143,6 +149,8 @@ impl<T> LinkedList<T> {
     /// Adds the given node to the front of the list.
     #[inline]
     fn push_front_node(&mut self, mut node: Box<Node<T>>) {
+        // This method takes care not to create mutable references to whole nodes,
+        // to maintain validity of aliasing pointers into `element`.
         unsafe {
             node.next = self.head;
             node.prev = None;
@@ -150,7 +158,8 @@ impl<T> LinkedList<T> {
 
             match self.head {
                 None => self.tail = node,
-                Some(mut head) => head.as_mut().prev = node,
+                // Not creating new mutable (unique!) references overlapping `element`.
+                Some(head) => (*head.as_ptr()).prev = node,
             }
 
             self.head = node;
@@ -161,13 +170,16 @@ impl<T> LinkedList<T> {
     /// Removes and returns the node at the front of the list.
     #[inline]
     fn pop_front_node(&mut self) -> Option<Box<Node<T>>> {
+        // This method takes care not to create mutable references to whole nodes,
+        // to maintain validity of aliasing pointers into `element`.
         self.head.map(|node| unsafe {
             let node = Box::from_raw(node.as_ptr());
             self.head = node.next;
 
             match self.head {
                 None => self.tail = None,
-                Some(mut head) => head.as_mut().prev = None,
+                // Not creating new mutable (unique!) references overlapping `element`.
+                Some(head) => (*head.as_ptr()).prev = None,
             }
 
             self.len -= 1;
@@ -178,6 +190,8 @@ impl<T> LinkedList<T> {
     /// Adds the given node to the back of the list.
     #[inline]
     fn push_back_node(&mut self, mut node: Box<Node<T>>) {
+        // This method takes care not to create mutable references to whole nodes,
+        // to maintain validity of aliasing pointers into `element`.
         unsafe {
             node.next = None;
             node.prev = self.tail;
@@ -185,7 +199,8 @@ impl<T> LinkedList<T> {
 
             match self.tail {
                 None => self.head = node,
-                Some(mut tail) => tail.as_mut().next = node,
+                // Not creating new mutable (unique!) references overlapping `element`.
+                Some(tail) => (*tail.as_ptr()).next = node,
             }
 
             self.tail = node;
@@ -196,13 +211,16 @@ impl<T> LinkedList<T> {
     /// Removes and returns the node at the back of the list.
     #[inline]
     fn pop_back_node(&mut self) -> Option<Box<Node<T>>> {
+        // This method takes care not to create mutable references to whole nodes,
+        // to maintain validity of aliasing pointers into `element`.
         self.tail.map(|node| unsafe {
             let node = Box::from_raw(node.as_ptr());
             self.tail = node.prev;
 
             match self.tail {
                 None => self.head = None,
-                Some(mut tail) => tail.as_mut().next = None,
+                // Not creating new mutable (unique!) references overlapping `element`.
+                Some(tail) => (*tail.as_ptr()).next = None,
             }
 
             self.len -= 1;
@@ -213,20 +231,24 @@ impl<T> LinkedList<T> {
     /// Unlinks the specified node from the current list.
     ///
     /// Warning: this will not check that the provided node belongs to the current list.
+    ///
+    /// This method takes care not to create mutable references to `element`, to
+    /// maintain validity of aliasing pointers.
     #[inline]
     unsafe fn unlink_node(&mut self, mut node: NonNull<Node<T>>) {
-        let node = node.as_mut();
+        let node = node.as_mut(); // this one is ours now, we can create an &mut.
 
+        // Not creating new mutable (unique!) references overlapping `element`.
         match node.prev {
-            Some(mut prev) => prev.as_mut().next = node.next.clone(),
+            Some(prev) => (*prev.as_ptr()).next = node.next,
             // this node is the head node
-            None => self.head = node.next.clone(),
+            None => self.head = node.next,
         };
 
         match node.next {
-            Some(mut next) => next.as_mut().prev = node.prev.clone(),
+            Some(next) => (*next.as_ptr()).prev = node.prev,
             // this node is the tail node
-            None => self.tail = node.prev.clone(),
+            None => self.tail = node.prev,
         };
 
         self.len -= 1;
@@ -297,6 +319,8 @@ impl<T> LinkedList<T> {
         match self.tail {
             None => mem::swap(self, other),
             Some(mut tail) => {
+                // `as_mut` is okay here because we have exclusive access to the entirety
+                // of both lists.
                 if let Some(mut other_head) = other.head.take() {
                     unsafe {
                         tail.as_mut().next = Some(other_head);
@@ -687,7 +711,7 @@ impl<T> LinkedList<T> {
         let len = self.len();
         assert!(at <= len, "Cannot split off at a nonexistent index");
         if at == 0 {
-            return mem::replace(self, Self::new());
+            return mem::take(self);
         } else if at == len {
             return Self::new();
         }
@@ -811,6 +835,11 @@ impl<'a, T> Iterator for Iter<'a, T> {
     fn size_hint(&self) -> (usize, Option<usize>) {
         (self.len, Some(self.len))
     }
+
+    #[inline]
+    fn last(mut self) -> Option<&'a T> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -860,6 +889,11 @@ impl<'a, T> Iterator for IterMut<'a, T> {
     fn size_hint(&self) -> (usize, Option<usize>) {
         (self.len, Some(self.len))
     }
+
+    #[inline]
+    fn last(mut self) -> Option<&'a mut T> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -916,9 +950,11 @@ impl<T> IterMut<'_, T> {
                issue = "27794")]
     pub fn insert_next(&mut self, element: T) {
         match self.head {
+            // `push_back` is okay with aliasing `element` references
             None => self.list.push_back(element),
-            Some(mut head) => unsafe {
-                let mut prev = match head.as_ref().prev {
+            Some(head) => unsafe {
+                let prev = match head.as_ref().prev {
+                    // `push_front` is okay with aliasing nodes
                     None => return self.list.push_front(element),
                     Some(prev) => prev,
                 };
@@ -929,8 +965,10 @@ impl<T> IterMut<'_, T> {
                     element,
                 }));
 
-                prev.as_mut().next = node;
-                head.as_mut().prev = node;
+                // Not creating references to entire nodes to not invalidate the
+                // reference to `element` we handed to the user.
+                (*prev.as_ptr()).next = node;
+                (*head.as_ptr()).prev = node;
 
                 self.list.len += 1;
             },
@@ -994,6 +1032,7 @@ impl<T, F> Iterator for DrainFilter<'_, T, F>
                 self.idx += 1;
 
                 if (self.pred)(&mut node.as_mut().element) {
+                    // `unlink_node` is okay with aliasing `element` references.
                     self.list.unlink_node(node);
                     return Some(Box::from_raw(node.as_ptr()).element);
                 }
@@ -1107,9 +1146,7 @@ impl<T> Extend<T> for LinkedList<T> {
 
 impl<I: IntoIterator> SpecExtend<I> for LinkedList<I::Item> {
     default fn spec_extend(&mut self, iter: I) {
-        for elt in iter {
-            self.push_back(elt);
-        }
+        iter.into_iter().for_each(move |elt| self.push_back(elt));
     }
 }
 
@@ -1210,271 +1247,3 @@ unsafe impl<T: Send> Send for IterMut<'_, T> {}
 
 #[stable(feature = "rust1", since = "1.0.0")]
 unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
-
-#[cfg(test)]
-mod tests {
-    use std::thread;
-    use std::vec::Vec;
-
-    use rand::{thread_rng, RngCore};
-
-    use super::{LinkedList, Node};
-
-    #[cfg(test)]
-    fn list_from<T: Clone>(v: &[T]) -> LinkedList<T> {
-        v.iter().cloned().collect()
-    }
-
-    pub fn check_links<T>(list: &LinkedList<T>) {
-        unsafe {
-            let mut len = 0;
-            let mut last_ptr: Option<&Node<T>> = None;
-            let mut node_ptr: &Node<T>;
-            match list.head {
-                None => {
-                    // tail node should also be None.
-                    assert!(list.tail.is_none());
-                    assert_eq!(0, list.len);
-                    return;
-                }
-                Some(node) => node_ptr = &*node.as_ptr(),
-            }
-            loop {
-                match (last_ptr, node_ptr.prev) {
-                    (None, None) => {}
-                    (None, _) => panic!("prev link for head"),
-                    (Some(p), Some(pptr)) => {
-                        assert_eq!(p as *const Node<T>, pptr.as_ptr() as *const Node<T>);
-                    }
-                    _ => panic!("prev link is none, not good"),
-                }
-                match node_ptr.next {
-                    Some(next) => {
-                        last_ptr = Some(node_ptr);
-                        node_ptr = &*next.as_ptr();
-                        len += 1;
-                    }
-                    None => {
-                        len += 1;
-                        break;
-                    }
-                }
-            }
-
-            // verify that the tail node points to the last node.
-            let tail = list.tail.as_ref().expect("some tail node").as_ref();
-            assert_eq!(tail as *const Node<T>, node_ptr as *const Node<T>);
-            // check that len matches interior links.
-            assert_eq!(len, list.len);
-        }
-    }
-
-    #[test]
-    fn test_append() {
-        // Empty to empty
-        {
-            let mut m = LinkedList::<i32>::new();
-            let mut n = LinkedList::new();
-            m.append(&mut n);
-            check_links(&m);
-            assert_eq!(m.len(), 0);
-            assert_eq!(n.len(), 0);
-        }
-        // Non-empty to empty
-        {
-            let mut m = LinkedList::new();
-            let mut n = LinkedList::new();
-            n.push_back(2);
-            m.append(&mut n);
-            check_links(&m);
-            assert_eq!(m.len(), 1);
-            assert_eq!(m.pop_back(), Some(2));
-            assert_eq!(n.len(), 0);
-            check_links(&m);
-        }
-        // Empty to non-empty
-        {
-            let mut m = LinkedList::new();
-            let mut n = LinkedList::new();
-            m.push_back(2);
-            m.append(&mut n);
-            check_links(&m);
-            assert_eq!(m.len(), 1);
-            assert_eq!(m.pop_back(), Some(2));
-            check_links(&m);
-        }
-
-        // Non-empty to non-empty
-        let v = vec![1, 2, 3, 4, 5];
-        let u = vec![9, 8, 1, 2, 3, 4, 5];
-        let mut m = list_from(&v);
-        let mut n = list_from(&u);
-        m.append(&mut n);
-        check_links(&m);
-        let mut sum = v;
-        sum.extend_from_slice(&u);
-        assert_eq!(sum.len(), m.len());
-        for elt in sum {
-            assert_eq!(m.pop_front(), Some(elt))
-        }
-        assert_eq!(n.len(), 0);
-        // let's make sure it's working properly, since we
-        // did some direct changes to private members
-        n.push_back(3);
-        assert_eq!(n.len(), 1);
-        assert_eq!(n.pop_front(), Some(3));
-        check_links(&n);
-    }
-
-    #[test]
-    fn test_insert_prev() {
-        let mut m = list_from(&[0, 2, 4, 6, 8]);
-        let len = m.len();
-        {
-            let mut it = m.iter_mut();
-            it.insert_next(-2);
-            loop {
-                match it.next() {
-                    None => break,
-                    Some(elt) => {
-                        it.insert_next(*elt + 1);
-                        match it.peek_next() {
-                            Some(x) => assert_eq!(*x, *elt + 2),
-                            None => assert_eq!(8, *elt),
-                        }
-                    }
-                }
-            }
-            it.insert_next(0);
-            it.insert_next(1);
-        }
-        check_links(&m);
-        assert_eq!(m.len(), 3 + len * 2);
-        assert_eq!(m.into_iter().collect::<Vec<_>>(),
-                   [-2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1]);
-    }
-
-    #[test]
-    #[cfg_attr(target_os = "emscripten", ignore)]
-    fn test_send() {
-        let n = list_from(&[1, 2, 3]);
-        thread::spawn(move || {
-                check_links(&n);
-                let a: &[_] = &[&1, &2, &3];
-                assert_eq!(a, &*n.iter().collect::<Vec<_>>());
-            })
-            .join()
-            .ok()
-            .unwrap();
-    }
-
-    #[test]
-    fn test_fuzz() {
-        for _ in 0..25 {
-            fuzz_test(3);
-            fuzz_test(16);
-            fuzz_test(189);
-        }
-    }
-
-    #[test]
-    fn test_26021() {
-        // There was a bug in split_off that failed to null out the RHS's head's prev ptr.
-        // This caused the RHS's dtor to walk up into the LHS at drop and delete all of
-        // its nodes.
-        //
-        // https://github.com/rust-lang/rust/issues/26021
-        let mut v1 = LinkedList::new();
-        v1.push_front(1);
-        v1.push_front(1);
-        v1.push_front(1);
-        v1.push_front(1);
-        let _ = v1.split_off(3); // Dropping this now should not cause laundry consumption
-        assert_eq!(v1.len(), 3);
-
-        assert_eq!(v1.iter().len(), 3);
-        assert_eq!(v1.iter().collect::<Vec<_>>().len(), 3);
-    }
-
-    #[test]
-    fn test_split_off() {
-        let mut v1 = LinkedList::new();
-        v1.push_front(1);
-        v1.push_front(1);
-        v1.push_front(1);
-        v1.push_front(1);
-
-        // test all splits
-        for ix in 0..1 + v1.len() {
-            let mut a = v1.clone();
-            let b = a.split_off(ix);
-            check_links(&a);
-            check_links(&b);
-            a.extend(b);
-            assert_eq!(v1, a);
-        }
-    }
-
-    #[cfg(test)]
-    fn fuzz_test(sz: i32) {
-        let mut m: LinkedList<_> = LinkedList::new();
-        let mut v = vec![];
-        for i in 0..sz {
-            check_links(&m);
-            let r: u8 = thread_rng().next_u32() as u8;
-            match r % 6 {
-                0 => {
-                    m.pop_back();
-                    v.pop();
-                }
-                1 => {
-                    if !v.is_empty() {
-                        m.pop_front();
-                        v.remove(0);
-                    }
-                }
-                2 | 4 => {
-                    m.push_front(-i);
-                    v.insert(0, -i);
-                }
-                3 | 5 | _ => {
-                    m.push_back(i);
-                    v.push(i);
-                }
-            }
-        }
-
-        check_links(&m);
-
-        let mut i = 0;
-        for (a, &b) in m.into_iter().zip(&v) {
-            i += 1;
-            assert_eq!(a, b);
-        }
-        assert_eq!(i, v.len());
-    }
-
-    #[test]
-    fn drain_filter_test() {
-        let mut m: LinkedList<u32> = LinkedList::new();
-        m.extend(&[1, 2, 3, 4, 5, 6]);
-        let deleted = m.drain_filter(|v| *v < 4).collect::<Vec<_>>();
-
-        check_links(&m);
-
-        assert_eq!(deleted, &[1, 2, 3]);
-        assert_eq!(m.into_iter().collect::<Vec<_>>(), &[4, 5, 6]);
-    }
-
-    #[test]
-    fn drain_to_empty_test() {
-        let mut m: LinkedList<u32> = LinkedList::new();
-        m.extend(&[1, 2, 3, 4, 5, 6]);
-        let deleted = m.drain_filter(|_| true).collect::<Vec<_>>();
-
-        check_links(&m);
-
-        assert_eq!(deleted, &[1, 2, 3, 4, 5, 6]);
-        assert_eq!(m.into_iter().collect::<Vec<_>>(), &[]);
-    }
-}
diff --git a/src/liballoc/collections/linked_list/tests.rs b/src/liballoc/collections/linked_list/tests.rs
new file mode 100644
index 00000000000..9a6c57d2869
--- /dev/null
+++ b/src/liballoc/collections/linked_list/tests.rs
@@ -0,0 +1,264 @@
+use super::*;
+
+use std::thread;
+use std::vec::Vec;
+
+use rand::{thread_rng, RngCore};
+
+fn list_from<T: Clone>(v: &[T]) -> LinkedList<T> {
+    v.iter().cloned().collect()
+}
+
+pub fn check_links<T>(list: &LinkedList<T>) {
+    unsafe {
+        let mut len = 0;
+        let mut last_ptr: Option<&Node<T>> = None;
+        let mut node_ptr: &Node<T>;
+        match list.head {
+            None => {
+                // tail node should also be None.
+                assert!(list.tail.is_none());
+                assert_eq!(0, list.len);
+                return;
+            }
+            Some(node) => node_ptr = &*node.as_ptr(),
+        }
+        loop {
+            match (last_ptr, node_ptr.prev) {
+                (None, None) => {}
+                (None, _) => panic!("prev link for head"),
+                (Some(p), Some(pptr)) => {
+                    assert_eq!(p as *const Node<T>, pptr.as_ptr() as *const Node<T>);
+                }
+                _ => panic!("prev link is none, not good"),
+            }
+            match node_ptr.next {
+                Some(next) => {
+                    last_ptr = Some(node_ptr);
+                    node_ptr = &*next.as_ptr();
+                    len += 1;
+                }
+                None => {
+                    len += 1;
+                    break;
+                }
+            }
+        }
+
+        // verify that the tail node points to the last node.
+        let tail = list.tail.as_ref().expect("some tail node").as_ref();
+        assert_eq!(tail as *const Node<T>, node_ptr as *const Node<T>);
+        // check that len matches interior links.
+        assert_eq!(len, list.len);
+    }
+}
+
+#[test]
+fn test_append() {
+    // Empty to empty
+    {
+        let mut m = LinkedList::<i32>::new();
+        let mut n = LinkedList::new();
+        m.append(&mut n);
+        check_links(&m);
+        assert_eq!(m.len(), 0);
+        assert_eq!(n.len(), 0);
+    }
+    // Non-empty to empty
+    {
+        let mut m = LinkedList::new();
+        let mut n = LinkedList::new();
+        n.push_back(2);
+        m.append(&mut n);
+        check_links(&m);
+        assert_eq!(m.len(), 1);
+        assert_eq!(m.pop_back(), Some(2));
+        assert_eq!(n.len(), 0);
+        check_links(&m);
+    }
+    // Empty to non-empty
+    {
+        let mut m = LinkedList::new();
+        let mut n = LinkedList::new();
+        m.push_back(2);
+        m.append(&mut n);
+        check_links(&m);
+        assert_eq!(m.len(), 1);
+        assert_eq!(m.pop_back(), Some(2));
+        check_links(&m);
+    }
+
+    // Non-empty to non-empty
+    let v = vec![1, 2, 3, 4, 5];
+    let u = vec![9, 8, 1, 2, 3, 4, 5];
+    let mut m = list_from(&v);
+    let mut n = list_from(&u);
+    m.append(&mut n);
+    check_links(&m);
+    let mut sum = v;
+    sum.extend_from_slice(&u);
+    assert_eq!(sum.len(), m.len());
+    for elt in sum {
+        assert_eq!(m.pop_front(), Some(elt))
+    }
+    assert_eq!(n.len(), 0);
+    // let's make sure it's working properly, since we
+    // did some direct changes to private members
+    n.push_back(3);
+    assert_eq!(n.len(), 1);
+    assert_eq!(n.pop_front(), Some(3));
+    check_links(&n);
+}
+
+#[test]
+fn test_insert_prev() {
+    let mut m = list_from(&[0, 2, 4, 6, 8]);
+    let len = m.len();
+    {
+        let mut it = m.iter_mut();
+        it.insert_next(-2);
+        loop {
+            match it.next() {
+                None => break,
+                Some(elt) => {
+                    it.insert_next(*elt + 1);
+                    match it.peek_next() {
+                        Some(x) => assert_eq!(*x, *elt + 2),
+                        None => assert_eq!(8, *elt),
+                    }
+                }
+            }
+        }
+        it.insert_next(0);
+        it.insert_next(1);
+    }
+    check_links(&m);
+    assert_eq!(m.len(), 3 + len * 2);
+    assert_eq!(m.into_iter().collect::<Vec<_>>(),
+                [-2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1]);
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+#[cfg(not(miri))] // Miri does not support threads
+fn test_send() {
+    let n = list_from(&[1, 2, 3]);
+    thread::spawn(move || {
+            check_links(&n);
+            let a: &[_] = &[&1, &2, &3];
+            assert_eq!(a, &*n.iter().collect::<Vec<_>>());
+        })
+        .join()
+        .ok()
+        .unwrap();
+}
+
+#[test]
+fn test_fuzz() {
+    for _ in 0..25 {
+        fuzz_test(3);
+        fuzz_test(16);
+        #[cfg(not(miri))] // Miri is too slow
+        fuzz_test(189);
+    }
+}
+
+#[test]
+fn test_26021() {
+    // There was a bug in split_off that failed to null out the RHS's head's prev ptr.
+    // This caused the RHS's dtor to walk up into the LHS at drop and delete all of
+    // its nodes.
+    //
+    // https://github.com/rust-lang/rust/issues/26021
+    let mut v1 = LinkedList::new();
+    v1.push_front(1);
+    v1.push_front(1);
+    v1.push_front(1);
+    v1.push_front(1);
+    let _ = v1.split_off(3); // Dropping this now should not cause laundry consumption
+    assert_eq!(v1.len(), 3);
+
+    assert_eq!(v1.iter().len(), 3);
+    assert_eq!(v1.iter().collect::<Vec<_>>().len(), 3);
+}
+
+#[test]
+fn test_split_off() {
+    let mut v1 = LinkedList::new();
+    v1.push_front(1);
+    v1.push_front(1);
+    v1.push_front(1);
+    v1.push_front(1);
+
+    // test all splits
+    for ix in 0..1 + v1.len() {
+        let mut a = v1.clone();
+        let b = a.split_off(ix);
+        check_links(&a);
+        check_links(&b);
+        a.extend(b);
+        assert_eq!(v1, a);
+    }
+}
+
+fn fuzz_test(sz: i32) {
+    let mut m: LinkedList<_> = LinkedList::new();
+    let mut v = vec![];
+    for i in 0..sz {
+        check_links(&m);
+        let r: u8 = thread_rng().next_u32() as u8;
+        match r % 6 {
+            0 => {
+                m.pop_back();
+                v.pop();
+            }
+            1 => {
+                if !v.is_empty() {
+                    m.pop_front();
+                    v.remove(0);
+                }
+            }
+            2 | 4 => {
+                m.push_front(-i);
+                v.insert(0, -i);
+            }
+            3 | 5 | _ => {
+                m.push_back(i);
+                v.push(i);
+            }
+        }
+    }
+
+    check_links(&m);
+
+    let mut i = 0;
+    for (a, &b) in m.into_iter().zip(&v) {
+        i += 1;
+        assert_eq!(a, b);
+    }
+    assert_eq!(i, v.len());
+}
+
+#[test]
+fn drain_filter_test() {
+    let mut m: LinkedList<u32> = LinkedList::new();
+    m.extend(&[1, 2, 3, 4, 5, 6]);
+    let deleted = m.drain_filter(|v| *v < 4).collect::<Vec<_>>();
+
+    check_links(&m);
+
+    assert_eq!(deleted, &[1, 2, 3]);
+    assert_eq!(m.into_iter().collect::<Vec<_>>(), &[4, 5, 6]);
+}
+
+#[test]
+fn drain_to_empty_test() {
+    let mut m: LinkedList<u32> = LinkedList::new();
+    m.extend(&[1, 2, 3, 4, 5, 6]);
+    let deleted = m.drain_filter(|_| true).collect::<Vec<_>>();
+
+    check_links(&m);
+
+    assert_eq!(deleted, &[1, 2, 3, 4, 5, 6]);
+    assert_eq!(m.into_iter().collect::<Vec<_>>(), &[]);
+}
diff --git a/src/liballoc/collections/vec_deque.rs b/src/liballoc/collections/vec_deque.rs
index 4e90f783ec6..9240346ace9 100644
--- a/src/liballoc/collections/vec_deque.rs
+++ b/src/liballoc/collections/vec_deque.rs
@@ -7,6 +7,7 @@
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
+use core::array::LengthAtMost32;
 use core::cmp::{self, Ordering};
 use core::fmt;
 use core::iter::{repeat_with, FromIterator, FusedIterator};
@@ -21,6 +22,9 @@ use crate::collections::CollectionAllocErr;
 use crate::raw_vec::RawVec;
 use crate::vec::Vec;
 
+#[cfg(test)]
+mod tests;
+
 const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
 const MINIMUM_CAPACITY: usize = 1; // 2 - 1
 #[cfg(target_pointer_width = "16")]
@@ -96,7 +100,7 @@ impl<T> VecDeque<T> {
             // For zero sized types, we are always at maximum capacity
             MAXIMUM_ZST_CAPACITY
         } else {
-            self.buf.cap()
+            self.buf.capacity()
         }
     }
 
@@ -312,10 +316,10 @@ impl<T> VecDeque<T> {
     }
 
     /// Frobs the head and tail sections around to handle the fact that we
-    /// just reallocated. Unsafe because it trusts old_cap.
+    /// just reallocated. Unsafe because it trusts old_capacity.
     #[inline]
-    unsafe fn handle_cap_increase(&mut self, old_cap: usize) {
-        let new_cap = self.cap();
+    unsafe fn handle_capacity_increase(&mut self, old_capacity: usize) {
+        let new_capacity = self.cap();
 
         // Move the shortest contiguous section of the ring buffer
         //    T             H
@@ -334,15 +338,15 @@ impl<T> VecDeque<T> {
         if self.tail <= self.head {
             // A
             // Nop
-        } else if self.head < old_cap - self.tail {
+        } else if self.head < old_capacity - self.tail {
             // B
-            self.copy_nonoverlapping(old_cap, 0, self.head);
-            self.head += old_cap;
+            self.copy_nonoverlapping(old_capacity, 0, self.head);
+            self.head += old_capacity;
             debug_assert!(self.head > self.tail);
         } else {
             // C
-            let new_tail = new_cap - (old_cap - self.tail);
-            self.copy_nonoverlapping(new_tail, self.tail, old_cap - self.tail);
+            let new_tail = new_capacity - (old_capacity - self.tail);
+            self.copy_nonoverlapping(new_tail, self.tail, old_capacity - self.tail);
             self.tail = new_tail;
             debug_assert!(self.head < self.tail);
         }
@@ -367,7 +371,7 @@ impl<T> VecDeque<T> {
         VecDeque::with_capacity(INITIAL_CAPACITY)
     }
 
-    /// Creates an empty `VecDeque` with space for at least `n` elements.
+    /// Creates an empty `VecDeque` with space for at least `capacity` elements.
     ///
     /// # Examples
     ///
@@ -377,10 +381,10 @@ impl<T> VecDeque<T> {
     /// let vector: VecDeque<u32> = VecDeque::with_capacity(10);
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
-    pub fn with_capacity(n: usize) -> VecDeque<T> {
+    pub fn with_capacity(capacity: usize) -> VecDeque<T> {
         // +1 since the ringbuffer always leaves one space empty
-        let cap = cmp::max(n + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
-        assert!(cap > n, "capacity overflow");
+        let cap = cmp::max(capacity + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
+        assert!(cap > capacity, "capacity overflow");
 
         VecDeque {
             tail: 0,
@@ -549,7 +553,7 @@ impl<T> VecDeque<T> {
         if new_cap > old_cap {
             self.buf.reserve_exact(used_cap, new_cap - used_cap);
             unsafe {
-                self.handle_cap_increase(old_cap);
+                self.handle_capacity_increase(old_cap);
             }
         }
     }
@@ -639,7 +643,7 @@ impl<T> VecDeque<T> {
         if new_cap > old_cap {
             self.buf.try_reserve_exact(used_cap, new_cap - used_cap)?;
             unsafe {
-                self.handle_cap_increase(old_cap);
+                self.handle_capacity_increase(old_cap);
             }
         }
         Ok(())
@@ -1833,8 +1837,8 @@ impl<T> VecDeque<T> {
     /// Retains only the elements specified by the predicate.
     ///
     /// In other words, remove all elements `e` such that `f(&e)` returns false.
-    /// This method operates in place and preserves the order of the retained
-    /// elements.
+    /// This method operates in place, visiting each element exactly once in the
+    /// original order, and preserves the order of the retained elements.
     ///
     /// # Examples
     ///
@@ -1846,6 +1850,20 @@ impl<T> VecDeque<T> {
     /// buf.retain(|&x| x%2 == 0);
     /// assert_eq!(buf, [2, 4]);
     /// ```
+    ///
+    /// The exact order may be useful for tracking external state, like an index.
+    ///
+    /// ```
+    /// use std::collections::VecDeque;
+    ///
+    /// let mut buf = VecDeque::new();
+    /// buf.extend(1..6);
+    ///
+    /// let keep = [false, true, true, false, true];
+    /// let mut i = 0;
+    /// buf.retain(|_| (keep[i], i += 1).0);
+    /// assert_eq!(buf, [2, 3, 5]);
+    /// ```
     #[stable(feature = "vec_deque_retain", since = "1.4.0")]
     pub fn retain<F>(&mut self, mut f: F)
         where F: FnMut(&T) -> bool
@@ -1871,7 +1889,7 @@ impl<T> VecDeque<T> {
             let old_cap = self.cap();
             self.buf.double();
             unsafe {
-                self.handle_cap_increase(old_cap);
+                self.handle_capacity_increase(old_cap);
             }
             debug_assert!(!self.is_full());
         }
@@ -1932,8 +1950,6 @@ impl<T> VecDeque<T> {
     /// # Examples
     ///
     /// ```
-    /// #![feature(vecdeque_rotate)]
-    ///
     /// use std::collections::VecDeque;
     ///
     /// let mut buf: VecDeque<_> = (0..10).collect();
@@ -1947,7 +1963,7 @@ impl<T> VecDeque<T> {
     /// }
     /// assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
     /// ```
-    #[unstable(feature = "vecdeque_rotate", issue = "56686")]
+    #[stable(feature = "vecdeque_rotate", since = "1.36.0")]
     pub fn rotate_left(&mut self, mid: usize) {
         assert!(mid <= self.len());
         let k = self.len() - mid;
@@ -1977,8 +1993,6 @@ impl<T> VecDeque<T> {
     /// # Examples
     ///
     /// ```
-    /// #![feature(vecdeque_rotate)]
-    ///
     /// use std::collections::VecDeque;
     ///
     /// let mut buf: VecDeque<_> = (0..10).collect();
@@ -1992,7 +2006,7 @@ impl<T> VecDeque<T> {
     /// }
     /// assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
     /// ```
-    #[unstable(feature = "vecdeque_rotate", issue = "56686")]
+    #[stable(feature = "vecdeque_rotate", since = "1.36.0")]
     pub fn rotate_right(&mut self, k: usize) {
         assert!(k <= self.len());
         let mid = self.len() - k;
@@ -2194,6 +2208,11 @@ impl<'a, T> Iterator for Iter<'a, T> {
         self.tail = self.head - iter.len();
         final_res
     }
+
+    #[inline]
+    fn last(mut self) -> Option<&'a T> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -2307,6 +2326,11 @@ impl<'a, T> Iterator for IterMut<'a, T> {
         accum = front.iter_mut().fold(accum, &mut f);
         back.iter_mut().fold(accum, &mut f)
     }
+
+    #[inline]
+    fn last(mut self) -> Option<&'a mut T> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -2549,13 +2573,14 @@ impl<A: PartialEq> PartialEq for VecDeque<A> {
 impl<A: Eq> Eq for VecDeque<A> {}
 
 macro_rules! __impl_slice_eq1 {
-    ($Lhs: ty, $Rhs: ty) => {
-        __impl_slice_eq1! { $Lhs, $Rhs, Sized }
-    };
-    ($Lhs: ty, $Rhs: ty, $Bound: ident) => {
+    ([$($vars:tt)*] $lhs:ty, $rhs:ty, $($constraints:tt)*) => {
         #[stable(feature = "vec_deque_partial_eq_slice", since = "1.17.0")]
-        impl<A: $Bound, B> PartialEq<$Rhs> for $Lhs where A: PartialEq<B> {
-            fn eq(&self, other: &$Rhs) -> bool {
+        impl<A, B, $($vars)*> PartialEq<$rhs> for $lhs
+        where
+            A: PartialEq<B>,
+            $($constraints)*
+        {
+            fn eq(&self, other: &$rhs) -> bool {
                 if self.len() != other.len() {
                     return false;
                 }
@@ -2567,26 +2592,12 @@ macro_rules! __impl_slice_eq1 {
     }
 }
 
-__impl_slice_eq1! { VecDeque<A>, Vec<B> }
-__impl_slice_eq1! { VecDeque<A>, &[B] }
-__impl_slice_eq1! { VecDeque<A>, &mut [B] }
-
-macro_rules! array_impls {
-    ($($N: expr)+) => {
-        $(
-            __impl_slice_eq1! { VecDeque<A>, [B; $N] }
-            __impl_slice_eq1! { VecDeque<A>, &[B; $N] }
-            __impl_slice_eq1! { VecDeque<A>, &mut [B; $N] }
-        )+
-    }
-}
-
-array_impls! {
-     0  1  2  3  4  5  6  7  8  9
-    10 11 12 13 14 15 16 17 18 19
-    20 21 22 23 24 25 26 27 28 29
-    30 31 32
-}
+__impl_slice_eq1! { [] VecDeque<A>, Vec<B>, }
+__impl_slice_eq1! { [] VecDeque<A>, &[B], }
+__impl_slice_eq1! { [] VecDeque<A>, &mut [B], }
+__impl_slice_eq1! { [const N: usize] VecDeque<A>, [B; N], [B; N]: LengthAtMost32 }
+__impl_slice_eq1! { [const N: usize] VecDeque<A>, &[B; N], [B; N]: LengthAtMost32 }
+__impl_slice_eq1! { [const N: usize] VecDeque<A>, &mut [B; N], [B; N]: LengthAtMost32 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<A: PartialOrd> PartialOrd for VecDeque<A> {
@@ -2677,9 +2688,7 @@ impl<'a, T> IntoIterator for &'a mut VecDeque<T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<A> Extend<A> for VecDeque<A> {
     fn extend<T: IntoIterator<Item = A>>(&mut self, iter: T) {
-        for elt in iter {
-            self.push_back(elt);
-        }
+        iter.into_iter().for_each(move |elt| self.push_back(elt));
     }
 }
 
@@ -2699,6 +2708,14 @@ impl<T: fmt::Debug> fmt::Debug for VecDeque<T> {
 
 #[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")]
 impl<T> From<Vec<T>> for VecDeque<T> {
+    /// Turn a [`Vec<T>`] into a [`VecDeque<T>`].
+    ///
+    /// [`Vec<T>`]: crate::vec::Vec
+    /// [`VecDeque<T>`]: crate::collections::VecDeque
+    ///
+    /// This avoids reallocating where possible, but the conditions for that are
+    /// strict, and subject to change, and so shouldn't be relied upon unless the
+    /// `Vec<T>` came from `From<VecDeque<T>>` and hasn't been reallocated.
     fn from(mut other: Vec<T>) -> Self {
         unsafe {
             let other_buf = other.as_mut_ptr();
@@ -2708,9 +2725,9 @@ impl<T> From<Vec<T>> for VecDeque<T> {
 
             // We need to extend the buf if it's not a power of two, too small
             // or doesn't have at least one free space
-            if !buf.cap().is_power_of_two() || (buf.cap() < (MINIMUM_CAPACITY + 1)) ||
-               (buf.cap() == len) {
-                let cap = cmp::max(buf.cap() + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
+            if !buf.capacity().is_power_of_two() || (buf.capacity() < (MINIMUM_CAPACITY + 1)) ||
+               (buf.capacity() == len) {
+                let cap = cmp::max(buf.capacity() + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
                 buf.reserve_exact(len, cap - len);
             }
 
@@ -2725,6 +2742,35 @@ impl<T> From<Vec<T>> for VecDeque<T> {
 
 #[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")]
 impl<T> From<VecDeque<T>> for Vec<T> {
+    /// Turn a [`VecDeque<T>`] into a [`Vec<T>`].
+    ///
+    /// [`Vec<T>`]: crate::vec::Vec
+    /// [`VecDeque<T>`]: crate::collections::VecDeque
+    ///
+    /// This never needs to re-allocate, but does need to do O(n) data movement if
+    /// the circular buffer doesn't happen to be at the beginning of the allocation.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::collections::VecDeque;
+    ///
+    /// // This one is O(1).
+    /// let deque: VecDeque<_> = (1..5).collect();
+    /// let ptr = deque.as_slices().0.as_ptr();
+    /// let vec = Vec::from(deque);
+    /// assert_eq!(vec, [1, 2, 3, 4]);
+    /// assert_eq!(vec.as_ptr(), ptr);
+    ///
+    /// // This one needs data rearranging.
+    /// let mut deque: VecDeque<_> = (1..5).collect();
+    /// deque.push_front(9);
+    /// deque.push_front(8);
+    /// let ptr = deque.as_slices().1.as_ptr();
+    /// let vec = Vec::from(deque);
+    /// assert_eq!(vec, [8, 9, 1, 2, 3, 4]);
+    /// assert_eq!(vec.as_ptr(), ptr);
+    /// ```
     fn from(other: VecDeque<T>) -> Self {
         unsafe {
             let buf = other.buf.ptr();
@@ -2793,378 +2839,3 @@ impl<T> From<VecDeque<T>> for Vec<T> {
         }
     }
 }
-
-#[cfg(test)]
-mod tests {
-    use ::test;
-
-    use super::VecDeque;
-
-    #[bench]
-    fn bench_push_back_100(b: &mut test::Bencher) {
-        let mut deq = VecDeque::with_capacity(101);
-        b.iter(|| {
-            for i in 0..100 {
-                deq.push_back(i);
-            }
-            deq.head = 0;
-            deq.tail = 0;
-        })
-    }
-
-    #[bench]
-    fn bench_push_front_100(b: &mut test::Bencher) {
-        let mut deq = VecDeque::with_capacity(101);
-        b.iter(|| {
-            for i in 0..100 {
-                deq.push_front(i);
-            }
-            deq.head = 0;
-            deq.tail = 0;
-        })
-    }
-
-    #[bench]
-    fn bench_pop_back_100(b: &mut test::Bencher) {
-        let mut deq = VecDeque::<i32>::with_capacity(101);
-
-        b.iter(|| {
-            deq.head = 100;
-            deq.tail = 0;
-            while !deq.is_empty() {
-                test::black_box(deq.pop_back());
-            }
-        })
-    }
-
-    #[bench]
-    fn bench_pop_front_100(b: &mut test::Bencher) {
-        let mut deq = VecDeque::<i32>::with_capacity(101);
-
-        b.iter(|| {
-            deq.head = 100;
-            deq.tail = 0;
-            while !deq.is_empty() {
-                test::black_box(deq.pop_front());
-            }
-        })
-    }
-
-    #[test]
-    fn test_swap_front_back_remove() {
-        fn test(back: bool) {
-            // This test checks that every single combination of tail position and length is tested.
-            // Capacity 15 should be large enough to cover every case.
-            let mut tester = VecDeque::with_capacity(15);
-            let usable_cap = tester.capacity();
-            let final_len = usable_cap / 2;
-
-            for len in 0..final_len {
-                let expected: VecDeque<_> = if back {
-                    (0..len).collect()
-                } else {
-                    (0..len).rev().collect()
-                };
-                for tail_pos in 0..usable_cap {
-                    tester.tail = tail_pos;
-                    tester.head = tail_pos;
-                    if back {
-                        for i in 0..len * 2 {
-                            tester.push_front(i);
-                        }
-                        for i in 0..len {
-                            assert_eq!(tester.swap_remove_back(i), Some(len * 2 - 1 - i));
-                        }
-                    } else {
-                        for i in 0..len * 2 {
-                            tester.push_back(i);
-                        }
-                        for i in 0..len {
-                            let idx = tester.len() - 1 - i;
-                            assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i));
-                        }
-                    }
-                    assert!(tester.tail < tester.cap());
-                    assert!(tester.head < tester.cap());
-                    assert_eq!(tester, expected);
-                }
-            }
-        }
-        test(true);
-        test(false);
-    }
-
-    #[test]
-    fn test_insert() {
-        // This test checks that every single combination of tail position, length, and
-        // insertion position is tested. Capacity 15 should be large enough to cover every case.
-
-        let mut tester = VecDeque::with_capacity(15);
-        // can't guarantee we got 15, so have to get what we got.
-        // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
-        // this test isn't covering what it wants to
-        let cap = tester.capacity();
-
-
-        // len is the length *after* insertion
-        for len in 1..cap {
-            // 0, 1, 2, .., len - 1
-            let expected = (0..).take(len).collect::<VecDeque<_>>();
-            for tail_pos in 0..cap {
-                for to_insert in 0..len {
-                    tester.tail = tail_pos;
-                    tester.head = tail_pos;
-                    for i in 0..len {
-                        if i != to_insert {
-                            tester.push_back(i);
-                        }
-                    }
-                    tester.insert(to_insert, to_insert);
-                    assert!(tester.tail < tester.cap());
-                    assert!(tester.head < tester.cap());
-                    assert_eq!(tester, expected);
-                }
-            }
-        }
-    }
-
-    #[test]
-    fn test_remove() {
-        // This test checks that every single combination of tail position, length, and
-        // removal position is tested. Capacity 15 should be large enough to cover every case.
-
-        let mut tester = VecDeque::with_capacity(15);
-        // can't guarantee we got 15, so have to get what we got.
-        // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
-        // this test isn't covering what it wants to
-        let cap = tester.capacity();
-
-        // len is the length *after* removal
-        for len in 0..cap - 1 {
-            // 0, 1, 2, .., len - 1
-            let expected = (0..).take(len).collect::<VecDeque<_>>();
-            for tail_pos in 0..cap {
-                for to_remove in 0..=len {
-                    tester.tail = tail_pos;
-                    tester.head = tail_pos;
-                    for i in 0..len {
-                        if i == to_remove {
-                            tester.push_back(1234);
-                        }
-                        tester.push_back(i);
-                    }
-                    if to_remove == len {
-                        tester.push_back(1234);
-                    }
-                    tester.remove(to_remove);
-                    assert!(tester.tail < tester.cap());
-                    assert!(tester.head < tester.cap());
-                    assert_eq!(tester, expected);
-                }
-            }
-        }
-    }
-
-    #[test]
-    fn test_drain() {
-        let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
-
-        let cap = tester.capacity();
-        for len in 0..=cap {
-            for tail in 0..=cap {
-                for drain_start in 0..=len {
-                    for drain_end in drain_start..=len {
-                        tester.tail = tail;
-                        tester.head = tail;
-                        for i in 0..len {
-                            tester.push_back(i);
-                        }
-
-                        // Check that we drain the correct values
-                        let drained: VecDeque<_> = tester.drain(drain_start..drain_end).collect();
-                        let drained_expected: VecDeque<_> = (drain_start..drain_end).collect();
-                        assert_eq!(drained, drained_expected);
-
-                        // We shouldn't have changed the capacity or made the
-                        // head or tail out of bounds
-                        assert_eq!(tester.capacity(), cap);
-                        assert!(tester.tail < tester.cap());
-                        assert!(tester.head < tester.cap());
-
-                        // We should see the correct values in the VecDeque
-                        let expected: VecDeque<_> = (0..drain_start)
-                            .chain(drain_end..len)
-                            .collect();
-                        assert_eq!(expected, tester);
-                    }
-                }
-            }
-        }
-    }
-
-    #[test]
-    fn test_shrink_to_fit() {
-        // This test checks that every single combination of head and tail position,
-        // is tested. Capacity 15 should be large enough to cover every case.
-
-        let mut tester = VecDeque::with_capacity(15);
-        // can't guarantee we got 15, so have to get what we got.
-        // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
-        // this test isn't covering what it wants to
-        let cap = tester.capacity();
-        tester.reserve(63);
-        let max_cap = tester.capacity();
-
-        for len in 0..=cap {
-            // 0, 1, 2, .., len - 1
-            let expected = (0..).take(len).collect::<VecDeque<_>>();
-            for tail_pos in 0..=max_cap {
-                tester.tail = tail_pos;
-                tester.head = tail_pos;
-                tester.reserve(63);
-                for i in 0..len {
-                    tester.push_back(i);
-                }
-                tester.shrink_to_fit();
-                assert!(tester.capacity() <= cap);
-                assert!(tester.tail < tester.cap());
-                assert!(tester.head < tester.cap());
-                assert_eq!(tester, expected);
-            }
-        }
-    }
-
-    #[test]
-    fn test_split_off() {
-        // This test checks that every single combination of tail position, length, and
-        // split position is tested. Capacity 15 should be large enough to cover every case.
-
-        let mut tester = VecDeque::with_capacity(15);
-        // can't guarantee we got 15, so have to get what we got.
-        // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
-        // this test isn't covering what it wants to
-        let cap = tester.capacity();
-
-        // len is the length *before* splitting
-        for len in 0..cap {
-            // index to split at
-            for at in 0..=len {
-                // 0, 1, 2, .., at - 1 (may be empty)
-                let expected_self = (0..).take(at).collect::<VecDeque<_>>();
-                // at, at + 1, .., len - 1 (may be empty)
-                let expected_other = (at..).take(len - at).collect::<VecDeque<_>>();
-
-                for tail_pos in 0..cap {
-                    tester.tail = tail_pos;
-                    tester.head = tail_pos;
-                    for i in 0..len {
-                        tester.push_back(i);
-                    }
-                    let result = tester.split_off(at);
-                    assert!(tester.tail < tester.cap());
-                    assert!(tester.head < tester.cap());
-                    assert!(result.tail < result.cap());
-                    assert!(result.head < result.cap());
-                    assert_eq!(tester, expected_self);
-                    assert_eq!(result, expected_other);
-                }
-            }
-        }
-    }
-
-    #[test]
-    fn test_from_vec() {
-        use crate::vec::Vec;
-        for cap in 0..35 {
-            for len in 0..=cap {
-                let mut vec = Vec::with_capacity(cap);
-                vec.extend(0..len);
-
-                let vd = VecDeque::from(vec.clone());
-                assert!(vd.cap().is_power_of_two());
-                assert_eq!(vd.len(), vec.len());
-                assert!(vd.into_iter().eq(vec));
-            }
-        }
-    }
-
-    #[test]
-    fn test_vec_from_vecdeque() {
-        use crate::vec::Vec;
-
-        fn create_vec_and_test_convert(cap: usize, offset: usize, len: usize) {
-            let mut vd = VecDeque::with_capacity(cap);
-            for _ in 0..offset {
-                vd.push_back(0);
-                vd.pop_front();
-            }
-            vd.extend(0..len);
-
-            let vec: Vec<_> = Vec::from(vd.clone());
-            assert_eq!(vec.len(), vd.len());
-            assert!(vec.into_iter().eq(vd));
-        }
-
-        for cap_pwr in 0..7 {
-            // Make capacity as a (2^x)-1, so that the ring size is 2^x
-            let cap = (2i32.pow(cap_pwr) - 1) as usize;
-
-            // In these cases there is enough free space to solve it with copies
-            for len in 0..((cap + 1) / 2) {
-                // Test contiguous cases
-                for offset in 0..(cap - len) {
-                    create_vec_and_test_convert(cap, offset, len)
-                }
-
-                // Test cases where block at end of buffer is bigger than block at start
-                for offset in (cap - len)..(cap - (len / 2)) {
-                    create_vec_and_test_convert(cap, offset, len)
-                }
-
-                // Test cases where block at start of buffer is bigger than block at end
-                for offset in (cap - (len / 2))..cap {
-                    create_vec_and_test_convert(cap, offset, len)
-                }
-            }
-
-            // Now there's not (necessarily) space to straighten the ring with simple copies,
-            // the ring will use swapping when:
-            // (cap + 1 - offset) > (cap + 1 - len) && (len - (cap + 1 - offset)) > (cap + 1 - len))
-            //  right block size  >   free space    &&      left block size       >    free space
-            for len in ((cap + 1) / 2)..cap {
-                // Test contiguous cases
-                for offset in 0..(cap - len) {
-                    create_vec_and_test_convert(cap, offset, len)
-                }
-
-                // Test cases where block at end of buffer is bigger than block at start
-                for offset in (cap - len)..(cap - (len / 2)) {
-                    create_vec_and_test_convert(cap, offset, len)
-                }
-
-                // Test cases where block at start of buffer is bigger than block at end
-                for offset in (cap - (len / 2))..cap {
-                    create_vec_and_test_convert(cap, offset, len)
-                }
-            }
-        }
-    }
-
-    #[test]
-    fn issue_53529() {
-        use crate::boxed::Box;
-
-        let mut dst = VecDeque::new();
-        dst.push_front(Box::new(1));
-        dst.push_front(Box::new(2));
-        assert_eq!(*dst.pop_back().unwrap(), 1);
-
-        let mut src = VecDeque::new();
-        src.push_front(Box::new(2));
-        dst.append(&mut src);
-        for a in dst {
-            assert_eq!(*a, 2);
-        }
-    }
-
-}
diff --git a/src/liballoc/collections/vec_deque/tests.rs b/src/liballoc/collections/vec_deque/tests.rs
new file mode 100644
index 00000000000..d2535239979
--- /dev/null
+++ b/src/liballoc/collections/vec_deque/tests.rs
@@ -0,0 +1,379 @@
+use super::*;
+
+use ::test;
+
+#[bench]
+#[cfg(not(miri))] // Miri does not support benchmarks
+fn bench_push_back_100(b: &mut test::Bencher) {
+    let mut deq = VecDeque::with_capacity(101);
+    b.iter(|| {
+        for i in 0..100 {
+            deq.push_back(i);
+        }
+        deq.head = 0;
+        deq.tail = 0;
+    })
+}
+
+#[bench]
+#[cfg(not(miri))] // Miri does not support benchmarks
+fn bench_push_front_100(b: &mut test::Bencher) {
+    let mut deq = VecDeque::with_capacity(101);
+    b.iter(|| {
+        for i in 0..100 {
+            deq.push_front(i);
+        }
+        deq.head = 0;
+        deq.tail = 0;
+    })
+}
+
+#[bench]
+#[cfg(not(miri))] // Miri does not support benchmarks
+fn bench_pop_back_100(b: &mut test::Bencher) {
+    let mut deq = VecDeque::<i32>::with_capacity(101);
+
+    b.iter(|| {
+        deq.head = 100;
+        deq.tail = 0;
+        while !deq.is_empty() {
+            test::black_box(deq.pop_back());
+        }
+    })
+}
+
+#[bench]
+#[cfg(not(miri))] // Miri does not support benchmarks
+fn bench_pop_front_100(b: &mut test::Bencher) {
+    let mut deq = VecDeque::<i32>::with_capacity(101);
+
+    b.iter(|| {
+        deq.head = 100;
+        deq.tail = 0;
+        while !deq.is_empty() {
+            test::black_box(deq.pop_front());
+        }
+    })
+}
+
+#[test]
+fn test_swap_front_back_remove() {
+    fn test(back: bool) {
+        // This test checks that every single combination of tail position and length is tested.
+        // Capacity 15 should be large enough to cover every case.
+        let mut tester = VecDeque::with_capacity(15);
+        let usable_cap = tester.capacity();
+        let final_len = usable_cap / 2;
+
+        for len in 0..final_len {
+            let expected: VecDeque<_> = if back {
+                (0..len).collect()
+            } else {
+                (0..len).rev().collect()
+            };
+            for tail_pos in 0..usable_cap {
+                tester.tail = tail_pos;
+                tester.head = tail_pos;
+                if back {
+                    for i in 0..len * 2 {
+                        tester.push_front(i);
+                    }
+                    for i in 0..len {
+                        assert_eq!(tester.swap_remove_back(i), Some(len * 2 - 1 - i));
+                    }
+                } else {
+                    for i in 0..len * 2 {
+                        tester.push_back(i);
+                    }
+                    for i in 0..len {
+                        let idx = tester.len() - 1 - i;
+                        assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i));
+                    }
+                }
+                assert!(tester.tail < tester.cap());
+                assert!(tester.head < tester.cap());
+                assert_eq!(tester, expected);
+            }
+        }
+    }
+    test(true);
+    test(false);
+}
+
+#[test]
+fn test_insert() {
+    // This test checks that every single combination of tail position, length, and
+    // insertion position is tested. Capacity 15 should be large enough to cover every case.
+
+    let mut tester = VecDeque::with_capacity(15);
+    // can't guarantee we got 15, so have to get what we got.
+    // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
+    // this test isn't covering what it wants to
+    let cap = tester.capacity();
+
+
+    // len is the length *after* insertion
+    for len in 1..cap {
+        // 0, 1, 2, .., len - 1
+        let expected = (0..).take(len).collect::<VecDeque<_>>();
+        for tail_pos in 0..cap {
+            for to_insert in 0..len {
+                tester.tail = tail_pos;
+                tester.head = tail_pos;
+                for i in 0..len {
+                    if i != to_insert {
+                        tester.push_back(i);
+                    }
+                }
+                tester.insert(to_insert, to_insert);
+                assert!(tester.tail < tester.cap());
+                assert!(tester.head < tester.cap());
+                assert_eq!(tester, expected);
+            }
+        }
+    }
+}
+
+#[test]
+fn test_remove() {
+    // This test checks that every single combination of tail position, length, and
+    // removal position is tested. Capacity 15 should be large enough to cover every case.
+
+    let mut tester = VecDeque::with_capacity(15);
+    // can't guarantee we got 15, so have to get what we got.
+    // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
+    // this test isn't covering what it wants to
+    let cap = tester.capacity();
+
+    // len is the length *after* removal
+    for len in 0..cap - 1 {
+        // 0, 1, 2, .., len - 1
+        let expected = (0..).take(len).collect::<VecDeque<_>>();
+        for tail_pos in 0..cap {
+            for to_remove in 0..=len {
+                tester.tail = tail_pos;
+                tester.head = tail_pos;
+                for i in 0..len {
+                    if i == to_remove {
+                        tester.push_back(1234);
+                    }
+                    tester.push_back(i);
+                }
+                if to_remove == len {
+                    tester.push_back(1234);
+                }
+                tester.remove(to_remove);
+                assert!(tester.tail < tester.cap());
+                assert!(tester.head < tester.cap());
+                assert_eq!(tester, expected);
+            }
+        }
+    }
+}
+
+#[test]
+fn test_drain() {
+    let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
+
+    let cap = tester.capacity();
+    for len in 0..=cap {
+        for tail in 0..=cap {
+            for drain_start in 0..=len {
+                for drain_end in drain_start..=len {
+                    tester.tail = tail;
+                    tester.head = tail;
+                    for i in 0..len {
+                        tester.push_back(i);
+                    }
+
+                    // Check that we drain the correct values
+                    let drained: VecDeque<_> = tester.drain(drain_start..drain_end).collect();
+                    let drained_expected: VecDeque<_> = (drain_start..drain_end).collect();
+                    assert_eq!(drained, drained_expected);
+
+                    // We shouldn't have changed the capacity or made the
+                    // head or tail out of bounds
+                    assert_eq!(tester.capacity(), cap);
+                    assert!(tester.tail < tester.cap());
+                    assert!(tester.head < tester.cap());
+
+                    // We should see the correct values in the VecDeque
+                    let expected: VecDeque<_> = (0..drain_start)
+                        .chain(drain_end..len)
+                        .collect();
+                    assert_eq!(expected, tester);
+                }
+            }
+        }
+    }
+}
+
+#[test]
+fn test_shrink_to_fit() {
+    // This test checks that every single combination of head and tail position,
+    // is tested. Capacity 15 should be large enough to cover every case.
+
+    let mut tester = VecDeque::with_capacity(15);
+    // can't guarantee we got 15, so have to get what we got.
+    // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
+    // this test isn't covering what it wants to
+    let cap = tester.capacity();
+    tester.reserve(63);
+    let max_cap = tester.capacity();
+
+    for len in 0..=cap {
+        // 0, 1, 2, .., len - 1
+        let expected = (0..).take(len).collect::<VecDeque<_>>();
+        for tail_pos in 0..=max_cap {
+            tester.tail = tail_pos;
+            tester.head = tail_pos;
+            tester.reserve(63);
+            for i in 0..len {
+                tester.push_back(i);
+            }
+            tester.shrink_to_fit();
+            assert!(tester.capacity() <= cap);
+            assert!(tester.tail < tester.cap());
+            assert!(tester.head < tester.cap());
+            assert_eq!(tester, expected);
+        }
+    }
+}
+
+#[test]
+fn test_split_off() {
+    // This test checks that every single combination of tail position, length, and
+    // split position is tested. Capacity 15 should be large enough to cover every case.
+
+    let mut tester = VecDeque::with_capacity(15);
+    // can't guarantee we got 15, so have to get what we got.
+    // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
+    // this test isn't covering what it wants to
+    let cap = tester.capacity();
+
+    // len is the length *before* splitting
+    for len in 0..cap {
+        // index to split at
+        for at in 0..=len {
+            // 0, 1, 2, .., at - 1 (may be empty)
+            let expected_self = (0..).take(at).collect::<VecDeque<_>>();
+            // at, at + 1, .., len - 1 (may be empty)
+            let expected_other = (at..).take(len - at).collect::<VecDeque<_>>();
+
+            for tail_pos in 0..cap {
+                tester.tail = tail_pos;
+                tester.head = tail_pos;
+                for i in 0..len {
+                    tester.push_back(i);
+                }
+                let result = tester.split_off(at);
+                assert!(tester.tail < tester.cap());
+                assert!(tester.head < tester.cap());
+                assert!(result.tail < result.cap());
+                assert!(result.head < result.cap());
+                assert_eq!(tester, expected_self);
+                assert_eq!(result, expected_other);
+            }
+        }
+    }
+}
+
+#[test]
+fn test_from_vec() {
+    use crate::vec::Vec;
+    for cap in 0..35 {
+        for len in 0..=cap {
+            let mut vec = Vec::with_capacity(cap);
+            vec.extend(0..len);
+
+            let vd = VecDeque::from(vec.clone());
+            assert!(vd.cap().is_power_of_two());
+            assert_eq!(vd.len(), vec.len());
+            assert!(vd.into_iter().eq(vec));
+        }
+    }
+}
+
+#[test]
+fn test_vec_from_vecdeque() {
+    use crate::vec::Vec;
+
+    fn create_vec_and_test_convert(capacity: usize, offset: usize, len: usize) {
+        let mut vd = VecDeque::with_capacity(capacity);
+        for _ in 0..offset {
+            vd.push_back(0);
+            vd.pop_front();
+        }
+        vd.extend(0..len);
+
+        let vec: Vec<_> = Vec::from(vd.clone());
+        assert_eq!(vec.len(), vd.len());
+        assert!(vec.into_iter().eq(vd));
+    }
+
+    #[cfg(not(miri))] // Miri is too slow
+    let max_pwr = 7;
+    #[cfg(miri)]
+    let max_pwr = 5;
+
+    for cap_pwr in 0..max_pwr {
+        // Make capacity as a (2^x)-1, so that the ring size is 2^x
+        let cap = (2i32.pow(cap_pwr) - 1) as usize;
+
+        // In these cases there is enough free space to solve it with copies
+        for len in 0..((cap + 1) / 2) {
+            // Test contiguous cases
+            for offset in 0..(cap - len) {
+                create_vec_and_test_convert(cap, offset, len)
+            }
+
+            // Test cases where block at end of buffer is bigger than block at start
+            for offset in (cap - len)..(cap - (len / 2)) {
+                create_vec_and_test_convert(cap, offset, len)
+            }
+
+            // Test cases where block at start of buffer is bigger than block at end
+            for offset in (cap - (len / 2))..cap {
+                create_vec_and_test_convert(cap, offset, len)
+            }
+        }
+
+        // Now there's not (necessarily) space to straighten the ring with simple copies,
+        // the ring will use swapping when:
+        // (cap + 1 - offset) > (cap + 1 - len) && (len - (cap + 1 - offset)) > (cap + 1 - len))
+        //  right block size  >   free space    &&      left block size       >    free space
+        for len in ((cap + 1) / 2)..cap {
+            // Test contiguous cases
+            for offset in 0..(cap - len) {
+                create_vec_and_test_convert(cap, offset, len)
+            }
+
+            // Test cases where block at end of buffer is bigger than block at start
+            for offset in (cap - len)..(cap - (len / 2)) {
+                create_vec_and_test_convert(cap, offset, len)
+            }
+
+            // Test cases where block at start of buffer is bigger than block at end
+            for offset in (cap - (len / 2))..cap {
+                create_vec_and_test_convert(cap, offset, len)
+            }
+        }
+    }
+}
+
+#[test]
+fn issue_53529() {
+    use crate::boxed::Box;
+
+    let mut dst = VecDeque::new();
+    dst.push_front(Box::new(1));
+    dst.push_front(Box::new(2));
+    assert_eq!(*dst.pop_back().unwrap(), 1);
+
+    let mut src = VecDeque::new();
+    src.push_front(Box::new(2));
+    dst.append(&mut src);
+    for a in dst {
+        assert_eq!(*a, 2);
+    }
+}
diff --git a/src/liballoc/fmt.rs b/src/liballoc/fmt.rs
index d2ba9b00191..68cbc366d7b 100644
--- a/src/liballoc/fmt.rs
+++ b/src/liballoc/fmt.rs
@@ -343,9 +343,10 @@
 //! * `^` - the argument is center-aligned in `width` columns
 //! * `>` - the argument is right-aligned in `width` columns
 //!
-//! Note that alignment may not be implemented by some types. A good way
-//! to ensure padding is applied is to format your input, then use this
-//! resulting string to pad your output.
+//! Note that alignment may not be implemented by some types. In particular, it
+//! is not generally implemented for the `Debug` trait.  A good way to ensure
+//! padding is applied is to format your input, then use this resulting string
+//! to pad your output.
 //!
 //! ## Sign/`#`/`0`
 //!
diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs
index 440ce8ac5e8..deea74daa52 100644
--- a/src/liballoc/lib.rs
+++ b/src/liballoc/lib.rs
@@ -10,9 +10,9 @@
 //!
 //! ## Boxed values
 //!
-//! The [`Box`](boxed/index.html) type is a smart pointer type. There can
-//! only be one owner of a `Box`, and the owner can decide to mutate the
-//! contents, which live on the heap.
+//! The [`Box`] type is a smart pointer type. There can only be one owner of a
+//! [`Box`], and the owner can decide to mutate the contents, which live on the
+//! heap.
 //!
 //! This type can be sent among threads efficiently as the size of a `Box` value
 //! is the same as that of a pointer. Tree-like data structures are often built
@@ -20,20 +20,20 @@
 //!
 //! ## Reference counted pointers
 //!
-//! The [`Rc`](rc/index.html) type is a non-threadsafe reference-counted pointer
-//! type intended for sharing memory within a thread. An `Rc` pointer wraps a
-//! type, `T`, and only allows access to `&T`, a shared reference.
+//! The [`Rc`] type is a non-threadsafe reference-counted pointer type intended
+//! for sharing memory within a thread. An [`Rc`] pointer wraps a type, `T`, and
+//! only allows access to `&T`, a shared reference.
 //!
-//! This type is useful when inherited mutability (such as using `Box`) is too
-//! constraining for an application, and is often paired with the `Cell` or
-//! `RefCell` types in order to allow mutation.
+//! This type is useful when inherited mutability (such as using [`Box`]) is too
+//! constraining for an application, and is often paired with the [`Cell`] or
+//! [`RefCell`] types in order to allow mutation.
 //!
 //! ## Atomically reference counted pointers
 //!
-//! The [`Arc`](sync/index.html) type is the threadsafe equivalent of the `Rc`
-//! type. It provides all the same functionality of `Rc`, except it requires
-//! that the contained type `T` is shareable. Additionally, `Arc<T>` is itself
-//! sendable while `Rc<T>` is not.
+//! The [`Arc`] type is the threadsafe equivalent of the [`Rc`] type. It
+//! provides all the same functionality of [`Rc`], except it requires that the
+//! contained type `T` is shareable. Additionally, [`Arc<T>`][`Arc`] is itself
+//! sendable while [`Rc<T>`][`Rc`] is not.
 //!
 //! This type allows for shared access to the contained data, and is often
 //! paired with synchronization primitives such as mutexes to allow mutation of
@@ -49,24 +49,27 @@
 //!
 //! The [`alloc`](alloc/index.html) module defines the low-level interface to the
 //! default global allocator. It is not compatible with the libc allocator API.
+//!
+//! [`Arc`]: sync/index.html
+//! [`Box`]: boxed/index.html
+//! [`Cell`]: ../core/cell/index.html
+//! [`Rc`]: rc/index.html
+//! [`RefCell`]: ../core/cell/index.html
 
 #![allow(unused_attributes)]
-#![unstable(feature = "alloc",
-            reason = "this library is unlikely to be stabilized in its current \
-                      form or name",
-            issue = "27783")]
+#![stable(feature = "alloc", since = "1.36.0")]
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/",
        issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
        test(no_crate_inject, attr(allow(unused_variables), deny(warnings))))]
 #![no_std]
 #![needs_allocator]
 
-#![deny(rust_2018_idioms)]
-#![allow(explicit_outlives_requirements)]
-
 #![warn(deprecated_in_future)]
-#![warn(intra_doc_link_resolution_failure)]
+#![warn(missing_docs)]
 #![warn(missing_debug_implementations)]
+#![deny(intra_doc_link_resolution_failure)] // rustdoc is run without -D warnings
+#![allow(explicit_outlives_requirements)]
+#![cfg_attr(not(bootstrap), allow(incomplete_features))]
 
 #![cfg_attr(not(test), feature(generator_trait))]
 #![cfg_attr(test, feature(test))]
@@ -79,18 +82,19 @@
 #![feature(box_syntax)]
 #![feature(cfg_target_has_atomic)]
 #![feature(coerce_unsized)]
+#![feature(const_generic_impls_guard)]
+#![feature(const_generics)]
+#![cfg_attr(not(bootstrap), feature(const_in_array_repeat_expressions))]
 #![feature(dispatch_from_dyn)]
 #![feature(core_intrinsics)]
-#![feature(custom_attribute)]
 #![feature(dropck_eyepatch)]
 #![feature(exact_size_is_empty)]
 #![feature(fmt_internals)]
 #![feature(fn_traits)]
 #![feature(fundamental)]
-#![feature(futures_api)]
+#![feature(internal_uninit_const)]
 #![feature(lang_items)]
 #![feature(libc)]
-#![feature(needs_allocator)]
 #![feature(nll)]
 #![feature(optin_builtin_traits)]
 #![feature(pattern)]
@@ -98,6 +102,7 @@
 #![feature(ptr_offset_from)]
 #![feature(rustc_attrs)]
 #![feature(receiver_trait)]
+#![feature(slice_from_raw_parts)]
 #![feature(specialization)]
 #![feature(staged_api)]
 #![feature(std_internals)]
@@ -107,14 +112,16 @@
 #![feature(unboxed_closures)]
 #![feature(unicode_internals)]
 #![feature(unsize)]
+#![feature(unsized_locals)]
 #![feature(allocator_internals)]
 #![feature(on_unimplemented)]
 #![feature(rustc_const_unstable)]
 #![feature(const_vec_new)]
 #![feature(slice_partition_dedup)]
-#![feature(maybe_uninit, maybe_uninit_slice, maybe_uninit_array)]
+#![feature(maybe_uninit_extra, maybe_uninit_slice, maybe_uninit_array)]
 #![feature(alloc_layout_extra)]
 #![feature(try_trait)]
+#![feature(mem_take)]
 
 // Allow testing this library
 
@@ -144,7 +151,7 @@ mod boxed {
     pub use std::boxed::Box;
 }
 #[cfg(test)]
-mod boxed_test;
+mod tests;
 pub mod collections;
 #[cfg(all(target_has_atomic = "ptr", target_has_atomic = "cas"))]
 pub mod sync;
diff --git a/src/liballoc/macros.rs b/src/liballoc/macros.rs
index dd128e096f9..250c419c531 100644
--- a/src/liballoc/macros.rs
+++ b/src/liballoc/macros.rs
@@ -42,7 +42,7 @@ macro_rules! vec {
     ($($x:expr),*) => (
         <[_]>::into_vec(box [$($x),*])
     );
-    ($($x:expr,)*) => (vec![$($x),*])
+    ($($x:expr,)*) => ($crate::vec![$($x),*])
 }
 
 // HACK(japaric): with cfg(test) the inherent `[T]::into_vec` method, which is
diff --git a/src/liballoc/prelude.rs b/src/liballoc/prelude.rs
deleted file mode 100644
index 6767cf89f73..00000000000
--- a/src/liballoc/prelude.rs
+++ /dev/null
@@ -1,19 +0,0 @@
-//! The alloc Prelude
-//!
-//! The purpose of this module is to alleviate imports of commonly-used
-//! items of the `alloc` crate by adding a glob import to the top of modules:
-//!
-//! ```
-//! # #![allow(unused_imports)]
-//! # #![feature(alloc)]
-//! extern crate alloc;
-//! use alloc::prelude::*;
-//! ```
-
-#![unstable(feature = "alloc", issue = "27783")]
-
-#[unstable(feature = "alloc", issue = "27783")] pub use crate::borrow::ToOwned;
-#[unstable(feature = "alloc", issue = "27783")] pub use crate::boxed::Box;
-#[unstable(feature = "alloc", issue = "27783")] pub use crate::slice::SliceConcatExt;
-#[unstable(feature = "alloc", issue = "27783")] pub use crate::string::{String, ToString};
-#[unstable(feature = "alloc", issue = "27783")] pub use crate::vec::Vec;
diff --git a/src/liballoc/prelude/mod.rs b/src/liballoc/prelude/mod.rs
new file mode 100644
index 00000000000..0534ad3edc7
--- /dev/null
+++ b/src/liballoc/prelude/mod.rs
@@ -0,0 +1,15 @@
+//! The alloc Prelude
+//!
+//! The purpose of this module is to alleviate imports of commonly-used
+//! items of the `alloc` crate by adding a glob import to the top of modules:
+//!
+//! ```
+//! # #![allow(unused_imports)]
+//! #![feature(alloc_prelude)]
+//! extern crate alloc;
+//! use alloc::prelude::v1::*;
+//! ```
+
+#![unstable(feature = "alloc_prelude", issue = "58935")]
+
+pub mod v1;
diff --git a/src/liballoc/prelude/v1.rs b/src/liballoc/prelude/v1.rs
new file mode 100644
index 00000000000..3cb285bf049
--- /dev/null
+++ b/src/liballoc/prelude/v1.rs
@@ -0,0 +1,10 @@
+//! The first version of the prelude of `alloc` crate.
+//!
+//! See the [module-level documentation](../index.html) for more.
+
+#![unstable(feature = "alloc_prelude", issue = "58935")]
+
+#[unstable(feature = "alloc_prelude", issue = "58935")] pub use crate::borrow::ToOwned;
+#[unstable(feature = "alloc_prelude", issue = "58935")] pub use crate::boxed::Box;
+#[unstable(feature = "alloc_prelude", issue = "58935")] pub use crate::string::{String, ToString};
+#[unstable(feature = "alloc_prelude", issue = "58935")] pub use crate::vec::Vec;
diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs
index fe28fe5095c..0abab45e920 100644
--- a/src/liballoc/raw_vec.rs
+++ b/src/liballoc/raw_vec.rs
@@ -11,6 +11,9 @@ use crate::alloc::{Alloc, Layout, Global, handle_alloc_error};
 use crate::collections::CollectionAllocErr::{self, *};
 use crate::boxed::Box;
 
+#[cfg(test)]
+mod tests;
+
 /// A low-level utility for more ergonomically allocating, reallocating, and deallocating
 /// a buffer of memory on the heap without having to worry about all the corner cases
 /// involved. This type is excellent for building your own data structures like Vec and VecDeque.
@@ -34,7 +37,7 @@ use crate::boxed::Box;
 /// that might occur with zero-sized types.
 ///
 /// However this means that you need to be careful when round-tripping this type
-/// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`,
+/// with a `Box<[T]>`: `capacity()` won't yield the len. However `with_capacity`,
 /// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
 /// field. This allows zero-sized types to not be special-cased by consumers of
 /// this type.
@@ -65,25 +68,25 @@ impl<T, A: Alloc> RawVec<T, A> {
     /// Like `with_capacity` but parameterized over the choice of
     /// allocator for the returned RawVec.
     #[inline]
-    pub fn with_capacity_in(cap: usize, a: A) -> Self {
-        RawVec::allocate_in(cap, false, a)
+    pub fn with_capacity_in(capacity: usize, a: A) -> Self {
+        RawVec::allocate_in(capacity, false, a)
     }
 
     /// Like `with_capacity_zeroed` but parameterized over the choice
     /// of allocator for the returned RawVec.
     #[inline]
-    pub fn with_capacity_zeroed_in(cap: usize, a: A) -> Self {
-        RawVec::allocate_in(cap, true, a)
+    pub fn with_capacity_zeroed_in(capacity: usize, a: A) -> Self {
+        RawVec::allocate_in(capacity, true, a)
     }
 
-    fn allocate_in(cap: usize, zeroed: bool, mut a: A) -> Self {
+    fn allocate_in(capacity: usize, zeroed: bool, mut a: A) -> Self {
         unsafe {
             let elem_size = mem::size_of::<T>();
 
-            let alloc_size = cap.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow());
+            let alloc_size = capacity.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow());
             alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow());
 
-            // handles ZSTs and `cap = 0` alike
+            // handles ZSTs and `capacity = 0` alike
             let ptr = if alloc_size == 0 {
                 NonNull::<T>::dangling()
             } else {
@@ -102,7 +105,7 @@ impl<T, A: Alloc> RawVec<T, A> {
 
             RawVec {
                 ptr: ptr.into(),
-                cap,
+                cap: capacity,
                 a,
             }
         }
@@ -120,8 +123,8 @@ impl<T> RawVec<T, Global> {
     }
 
     /// Creates a RawVec (on the system heap) with exactly the
-    /// capacity and alignment requirements for a `[T; cap]`. This is
-    /// equivalent to calling RawVec::new when `cap` is 0 or T is
+    /// capacity and alignment requirements for a `[T; capacity]`. This is
+    /// equivalent to calling RawVec::new when `capacity` is 0 or T is
     /// zero-sized. Note that if `T` is zero-sized this means you will
     /// *not* get a RawVec with the requested capacity!
     ///
@@ -135,14 +138,14 @@ impl<T> RawVec<T, Global> {
     ///
     /// Aborts on OOM
     #[inline]
-    pub fn with_capacity(cap: usize) -> Self {
-        RawVec::allocate_in(cap, false, Global)
+    pub fn with_capacity(capacity: usize) -> Self {
+        RawVec::allocate_in(capacity, false, Global)
     }
 
     /// Like `with_capacity` but guarantees the buffer is zeroed.
     #[inline]
-    pub fn with_capacity_zeroed(cap: usize) -> Self {
-        RawVec::allocate_in(cap, true, Global)
+    pub fn with_capacity_zeroed(capacity: usize) -> Self {
+        RawVec::allocate_in(capacity, true, Global)
     }
 }
 
@@ -154,10 +157,10 @@ impl<T, A: Alloc> RawVec<T, A> {
     /// The ptr must be allocated (via the given allocator `a`), and with the given capacity. The
     /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
     /// If the ptr and capacity come from a RawVec created via `a`, then this is guaranteed.
-    pub unsafe fn from_raw_parts_in(ptr: *mut T, cap: usize, a: A) -> Self {
+    pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self {
         RawVec {
             ptr: Unique::new_unchecked(ptr),
-            cap,
+            cap: capacity,
             a,
         }
     }
@@ -171,10 +174,10 @@ impl<T> RawVec<T, Global> {
     /// The ptr must be allocated (on the system heap), and with the given capacity. The
     /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
     /// If the ptr and capacity come from a RawVec, then this is guaranteed.
-    pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self {
+    pub unsafe fn from_raw_parts(ptr: *mut T, capacity: usize) -> Self {
         RawVec {
             ptr: Unique::new_unchecked(ptr),
-            cap,
+            cap: capacity,
             a: Global,
         }
     }
@@ -191,7 +194,7 @@ impl<T> RawVec<T, Global> {
 
 impl<T, A: Alloc> RawVec<T, A> {
     /// Gets a raw pointer to the start of the allocation. Note that this is
-    /// Unique::empty() if `cap = 0` or T is zero-sized. In the former case, you must
+    /// Unique::empty() if `capacity = 0` or T is zero-sized. In the former case, you must
     /// be careful.
     pub fn ptr(&self) -> *mut T {
         self.ptr.as_ptr()
@@ -201,7 +204,7 @@ impl<T, A: Alloc> RawVec<T, A> {
     ///
     /// This will always be `usize::MAX` if `T` is zero-sized.
     #[inline(always)]
-    pub fn cap(&self) -> usize {
+    pub fn capacity(&self) -> usize {
         if mem::size_of::<T>() == 0 {
             !0
         } else {
@@ -240,7 +243,7 @@ impl<T, A: Alloc> RawVec<T, A> {
     /// This function is ideal for when pushing elements one-at-a-time because
     /// you don't need to incur the costs of the more general computations
     /// reserve needs to do to guard against overflow. You do however need to
-    /// manually check if your `len == cap`.
+    /// manually check if your `len == capacity`.
     ///
     /// # Panics
     ///
@@ -256,7 +259,7 @@ impl<T, A: Alloc> RawVec<T, A> {
     /// # Examples
     ///
     /// ```
-    /// # #![feature(alloc, raw_vec_internals)]
+    /// # #![feature(raw_vec_internals)]
     /// # extern crate alloc;
     /// # use std::ptr;
     /// # use alloc::raw_vec::RawVec;
@@ -267,7 +270,7 @@ impl<T, A: Alloc> RawVec<T, A> {
     ///
     /// impl<T> MyVec<T> {
     ///     pub fn push(&mut self, elem: T) {
-    ///         if self.len == self.buf.cap() { self.buf.double(); }
+    ///         if self.len == self.buf.capacity() { self.buf.double(); }
     ///         // double would have aborted or panicked if the len exceeded
     ///         // `isize::MAX` so this is safe to do unchecked now.
     ///         unsafe {
@@ -381,20 +384,20 @@ impl<T, A: Alloc> RawVec<T, A> {
     }
 
     /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
-    pub fn try_reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize)
+    pub fn try_reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize)
            -> Result<(), CollectionAllocErr> {
 
-        self.reserve_internal(used_cap, needed_extra_cap, Fallible, Exact)
+        self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Exact)
     }
 
     /// Ensures that the buffer contains at least enough space to hold
-    /// `used_cap + needed_extra_cap` elements. If it doesn't already,
+    /// `used_capacity + needed_extra_capacity` elements. If it doesn't already,
     /// will reallocate the minimum possible amount of memory necessary.
     /// Generally this will be exactly the amount of memory necessary,
     /// but in principle the allocator is free to give back more than
     /// we asked for.
     ///
-    /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
+    /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
     /// the requested space. This is not really unsafe, but the unsafe
     /// code *you* write that relies on the behavior of this function may break.
     ///
@@ -407,22 +410,23 @@ impl<T, A: Alloc> RawVec<T, A> {
     /// # Aborts
     ///
     /// Aborts on OOM
-    pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
-        match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Exact) {
+    pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
+        match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Exact) {
             Err(CapacityOverflow) => capacity_overflow(),
             Err(AllocErr) => unreachable!(),
             Ok(()) => { /* yay */ }
          }
      }
 
-    /// Calculates the buffer's new size given that it'll hold `used_cap +
-    /// needed_extra_cap` elements. This logic is used in amortized reserve methods.
+    /// Calculates the buffer's new size given that it'll hold `used_capacity +
+    /// needed_extra_capacity` elements. This logic is used in amortized reserve methods.
     /// Returns `(new_capacity, new_alloc_size)`.
-    fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize)
+    fn amortized_new_size(&self, used_capacity: usize, needed_extra_capacity: usize)
         -> Result<usize, CollectionAllocErr> {
 
         // Nothing we can really do about these checks :(
-        let required_cap = used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?;
+        let required_cap = used_capacity.checked_add(needed_extra_capacity)
+            .ok_or(CapacityOverflow)?;
         // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
         let double_cap = self.cap * 2;
         // `double_cap` guarantees exponential growth.
@@ -430,18 +434,18 @@ impl<T, A: Alloc> RawVec<T, A> {
     }
 
     /// The same as `reserve`, but returns on errors instead of panicking or aborting.
-    pub fn try_reserve(&mut self, used_cap: usize, needed_extra_cap: usize)
+    pub fn try_reserve(&mut self, used_capacity: usize, needed_extra_capacity: usize)
         -> Result<(), CollectionAllocErr> {
-        self.reserve_internal(used_cap, needed_extra_cap, Fallible, Amortized)
+        self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Amortized)
     }
 
     /// Ensures that the buffer contains at least enough space to hold
-    /// `used_cap + needed_extra_cap` elements. If it doesn't already have
+    /// `used_capacity + needed_extra_capacity` elements. If it doesn't already have
     /// enough capacity, will reallocate enough space plus comfortable slack
     /// space to get amortized `O(1)` behavior. Will limit this behavior
     /// if it would needlessly cause itself to panic.
     ///
-    /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
+    /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
     /// the requested space. This is not really unsafe, but the unsafe
     /// code *you* write that relies on the behavior of this function may break.
     ///
@@ -460,7 +464,7 @@ impl<T, A: Alloc> RawVec<T, A> {
     /// # Examples
     ///
     /// ```
-    /// # #![feature(alloc, raw_vec_internals)]
+    /// # #![feature(raw_vec_internals)]
     /// # extern crate alloc;
     /// # use std::ptr;
     /// # use alloc::raw_vec::RawVec;
@@ -487,20 +491,20 @@ impl<T, A: Alloc> RawVec<T, A> {
     /// #   vector.push_all(&[1, 3, 5, 7, 9]);
     /// # }
     /// ```
-    pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
-        match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Amortized) {
+    pub fn reserve(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
+        match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Amortized) {
             Err(CapacityOverflow) => capacity_overflow(),
             Err(AllocErr) => unreachable!(),
             Ok(()) => { /* yay */ }
         }
     }
     /// Attempts to ensure that the buffer contains at least enough space to hold
-    /// `used_cap + needed_extra_cap` elements. If it doesn't already have
+    /// `used_capacity + needed_extra_capacity` elements. If it doesn't already have
     /// enough capacity, will reallocate in place enough space plus comfortable slack
     /// space to get amortized `O(1)` behavior. Will limit this behaviour
     /// if it would needlessly cause itself to panic.
     ///
-    /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
+    /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
     /// the requested space. This is not really unsafe, but the unsafe
     /// code *you* write that relies on the behavior of this function may break.
     ///
@@ -511,7 +515,7 @@ impl<T, A: Alloc> RawVec<T, A> {
     /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
     /// * Panics on 32-bit platforms if the requested capacity exceeds
     ///   `isize::MAX` bytes.
-    pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool {
+    pub fn reserve_in_place(&mut self, used_capacity: usize, needed_extra_capacity: usize) -> bool {
         unsafe {
             // NOTE: we don't early branch on ZSTs here because we want this
             // to actually catch "asking for more than usize::MAX" in that case.
@@ -520,20 +524,20 @@ impl<T, A: Alloc> RawVec<T, A> {
 
             // Don't actually need any more capacity. If the current `cap` is 0, we can't
             // reallocate in place.
-            // Wrapping in case they give a bad `used_cap`
+            // Wrapping in case they give a bad `used_capacity`
             let old_layout = match self.current_layout() {
                 Some(layout) => layout,
                 None => return false,
             };
-            if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
+            if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity {
                 return false;
             }
 
-            let new_cap = self.amortized_new_size(used_cap, needed_extra_cap)
+            let new_cap = self.amortized_new_size(used_capacity, needed_extra_capacity)
                 .unwrap_or_else(|_| capacity_overflow());
 
-            // Here, `cap < used_cap + needed_extra_cap <= new_cap`
-            // (regardless of whether `self.cap - used_cap` wrapped).
+            // Here, `cap < used_capacity + needed_extra_capacity <= new_cap`
+            // (regardless of whether `self.cap - used_capacity` wrapped).
             // Therefore we can safely call grow_in_place.
 
             let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
@@ -632,8 +636,8 @@ use ReserveStrategy::*;
 impl<T, A: Alloc> RawVec<T, A> {
     fn reserve_internal(
         &mut self,
-        used_cap: usize,
-        needed_extra_cap: usize,
+        used_capacity: usize,
+        needed_extra_capacity: usize,
         fallibility: Fallibility,
         strategy: ReserveStrategy,
     ) -> Result<(), CollectionAllocErr> {
@@ -646,15 +650,15 @@ impl<T, A: Alloc> RawVec<T, A> {
             // panic.
 
             // Don't actually need any more capacity.
-            // Wrapping in case they gave a bad `used_cap`.
-            if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
+            // Wrapping in case they gave a bad `used_capacity`.
+            if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity {
                 return Ok(());
             }
 
             // Nothing we can really do about these checks :(
             let new_cap = match strategy {
-                Exact => used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?,
-                Amortized => self.amortized_new_size(used_cap, needed_extra_cap)?,
+                Exact => used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?,
+                Amortized => self.amortized_new_size(used_capacity, needed_extra_capacity)?,
             };
             let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
 
@@ -685,14 +689,16 @@ impl<T, A: Alloc> RawVec<T, A> {
 impl<T> RawVec<T, Global> {
     /// Converts the entire buffer into `Box<[T]>`.
     ///
-    /// While it is not *strictly* Undefined Behavior to call
-    /// this procedure while some of the RawVec is uninitialized,
-    /// it certainly makes it trivial to trigger it.
-    ///
     /// Note that this will correctly reconstitute any `cap` changes
     /// that may have been performed. (see description of type for details)
+    ///
+    /// # Undefined Behavior
+    ///
+    /// All elements of `RawVec<T, Global>` must be initialized. Notice that
+    /// the rules around uninitialized boxed values are not finalized yet,
+    /// but until they are, it is advisable to avoid them.
     pub unsafe fn into_box(self) -> Box<[T]> {
-        // NOTE: not calling `cap()` here, actually using the real `cap` field!
+        // NOTE: not calling `capacity()` here, actually using the real `cap` field!
         let slice = slice::from_raw_parts_mut(self.ptr(), self.cap);
         let output: Box<[T]> = Box::from_raw(slice);
         mem::forget(self);
@@ -745,82 +751,3 @@ fn alloc_guard(alloc_size: usize) -> Result<(), CollectionAllocErr> {
 fn capacity_overflow() -> ! {
     panic!("capacity overflow")
 }
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    #[test]
-    fn allocator_param() {
-        use crate::alloc::AllocErr;
-
-        // Writing a test of integration between third-party
-        // allocators and RawVec is a little tricky because the RawVec
-        // API does not expose fallible allocation methods, so we
-        // cannot check what happens when allocator is exhausted
-        // (beyond detecting a panic).
-        //
-        // Instead, this just checks that the RawVec methods do at
-        // least go through the Allocator API when it reserves
-        // storage.
-
-        // A dumb allocator that consumes a fixed amount of fuel
-        // before allocation attempts start failing.
-        struct BoundedAlloc { fuel: usize }
-        unsafe impl Alloc for BoundedAlloc {
-            unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
-                let size = layout.size();
-                if size > self.fuel {
-                    return Err(AllocErr);
-                }
-                match Global.alloc(layout) {
-                    ok @ Ok(_) => { self.fuel -= size; ok }
-                    err @ Err(_) => err,
-                }
-            }
-            unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
-                Global.dealloc(ptr, layout)
-            }
-        }
-
-        let a = BoundedAlloc { fuel: 500 };
-        let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
-        assert_eq!(v.a.fuel, 450);
-        v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
-        assert_eq!(v.a.fuel, 250);
-    }
-
-    #[test]
-    fn reserve_does_not_overallocate() {
-        {
-            let mut v: RawVec<u32> = RawVec::new();
-            // First `reserve` allocates like `reserve_exact`
-            v.reserve(0, 9);
-            assert_eq!(9, v.cap());
-        }
-
-        {
-            let mut v: RawVec<u32> = RawVec::new();
-            v.reserve(0, 7);
-            assert_eq!(7, v.cap());
-            // 97 if more than double of 7, so `reserve` should work
-            // like `reserve_exact`.
-            v.reserve(7, 90);
-            assert_eq!(97, v.cap());
-        }
-
-        {
-            let mut v: RawVec<u32> = RawVec::new();
-            v.reserve(0, 12);
-            assert_eq!(12, v.cap());
-            v.reserve(12, 3);
-            // 3 is less than half of 12, so `reserve` must grow
-            // exponentially. At the time of writing this test grow
-            // factor is 2, so new capacity is 24, however, grow factor
-            // of 1.5 is OK too. Hence `>= 18` in assert.
-            assert!(v.cap() >= 12 + 12 / 2);
-        }
-    }
-
-
-}
diff --git a/src/liballoc/raw_vec/tests.rs b/src/liballoc/raw_vec/tests.rs
new file mode 100644
index 00000000000..c389898d1ef
--- /dev/null
+++ b/src/liballoc/raw_vec/tests.rs
@@ -0,0 +1,73 @@
+use super::*;
+
+#[test]
+fn allocator_param() {
+    use crate::alloc::AllocErr;
+
+    // Writing a test of integration between third-party
+    // allocators and RawVec is a little tricky because the RawVec
+    // API does not expose fallible allocation methods, so we
+    // cannot check what happens when allocator is exhausted
+    // (beyond detecting a panic).
+    //
+    // Instead, this just checks that the RawVec methods do at
+    // least go through the Allocator API when it reserves
+    // storage.
+
+    // A dumb allocator that consumes a fixed amount of fuel
+    // before allocation attempts start failing.
+    struct BoundedAlloc { fuel: usize }
+    unsafe impl Alloc for BoundedAlloc {
+        unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
+            let size = layout.size();
+            if size > self.fuel {
+                return Err(AllocErr);
+            }
+            match Global.alloc(layout) {
+                ok @ Ok(_) => { self.fuel -= size; ok }
+                err @ Err(_) => err,
+            }
+        }
+        unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
+            Global.dealloc(ptr, layout)
+        }
+    }
+
+    let a = BoundedAlloc { fuel: 500 };
+    let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
+    assert_eq!(v.a.fuel, 450);
+    v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
+    assert_eq!(v.a.fuel, 250);
+}
+
+#[test]
+fn reserve_does_not_overallocate() {
+    {
+        let mut v: RawVec<u32> = RawVec::new();
+        // First `reserve` allocates like `reserve_exact`
+        v.reserve(0, 9);
+        assert_eq!(9, v.capacity());
+    }
+
+    {
+        let mut v: RawVec<u32> = RawVec::new();
+        v.reserve(0, 7);
+        assert_eq!(7, v.capacity());
+        // 97 if more than double of 7, so `reserve` should work
+        // like `reserve_exact`.
+        v.reserve(7, 90);
+        assert_eq!(97, v.capacity());
+    }
+
+    {
+        let mut v: RawVec<u32> = RawVec::new();
+        v.reserve(0, 12);
+        assert_eq!(12, v.capacity());
+        v.reserve(12, 3);
+        // 3 is less than half of 12, so `reserve` must grow
+        // exponentially. At the time of writing this test grow
+        // factor is 2, so new capacity is 24, however, grow factor
+        // of 1.5 is OK too. Hence `>= 18` in assert.
+        assert!(v.capacity() >= 12 + 12 / 2);
+    }
+}
diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs
index 68eecd97ea1..0c406a92029 100644
--- a/src/liballoc/rc.rs
+++ b/src/liballoc/rc.rs
@@ -232,25 +232,30 @@ use crate::boxed::Box;
 use std::boxed::Box;
 
 use core::any::Any;
+use core::array::LengthAtMost32;
 use core::borrow;
 use core::cell::Cell;
 use core::cmp::Ordering;
 use core::fmt;
 use core::hash::{Hash, Hasher};
 use core::intrinsics::abort;
+use core::iter;
 use core::marker::{self, Unpin, Unsize, PhantomData};
-use core::mem::{self, align_of_val, forget, size_of_val};
+use core::mem::{self, align_of, align_of_val, forget, size_of_val};
 use core::ops::{Deref, Receiver, CoerceUnsized, DispatchFromDyn};
 use core::pin::Pin;
 use core::ptr::{self, NonNull};
-use core::slice::from_raw_parts_mut;
-use core::convert::From;
+use core::slice::{self, from_raw_parts_mut};
+use core::convert::{From, TryFrom};
 use core::usize;
 
 use crate::alloc::{Global, Alloc, Layout, box_free, handle_alloc_error};
 use crate::string::String;
 use crate::vec::Vec;
 
+#[cfg(test)]
+mod tests;
+
 struct RcBox<T: ?Sized> {
     strong: Cell<usize>,
     weak: Cell<usize>,
@@ -286,6 +291,19 @@ impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {}
 #[unstable(feature = "dispatch_from_dyn", issue = "0")]
 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Rc<U>> for Rc<T> {}
 
+impl<T: ?Sized> Rc<T> {
+    fn from_inner(ptr: NonNull<RcBox<T>>) -> Self {
+        Self {
+            ptr,
+            phantom: PhantomData,
+        }
+    }
+
+    unsafe fn from_ptr(ptr: *mut RcBox<T>) -> Self {
+        Self::from_inner(NonNull::new_unchecked(ptr))
+    }
+}
+
 impl<T> Rc<T> {
     /// Constructs a new `Rc<T>`.
     ///
@@ -298,18 +316,15 @@ impl<T> Rc<T> {
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn new(value: T) -> Rc<T> {
-        Rc {
-            // there is an implicit weak pointer owned by all the strong
-            // pointers, which ensures that the weak destructor never frees
-            // the allocation while the strong destructor is running, even
-            // if the weak pointer is stored inside the strong one.
-            ptr: Box::into_raw_non_null(box RcBox {
-                strong: Cell::new(1),
-                weak: Cell::new(1),
-                value,
-            }),
-            phantom: PhantomData,
-        }
+        // There is an implicit weak pointer owned by all the strong
+        // pointers, which ensures that the weak destructor never frees
+        // the allocation while the strong destructor is running, even
+        // if the weak pointer is stored inside the strong one.
+        Self::from_inner(Box::into_raw_non_null(box RcBox {
+            strong: Cell::new(1),
+            weak: Cell::new(1),
+            value,
+        }))
     }
 
     /// Constructs a new `Pin<Rc<T>>`. If `T` does not implement `Unpin`, then
@@ -375,9 +390,9 @@ impl<T: ?Sized> Rc<T> {
     /// ```
     /// use std::rc::Rc;
     ///
-    /// let x = Rc::new(10);
+    /// let x = Rc::new("hello".to_owned());
     /// let x_ptr = Rc::into_raw(x);
-    /// assert_eq!(unsafe { *x_ptr }, 10);
+    /// assert_eq!(unsafe { &*x_ptr }, "hello");
     /// ```
     #[stable(feature = "rc_raw", since = "1.17.0")]
     pub fn into_raw(this: Self) -> *const T {
@@ -401,13 +416,13 @@ impl<T: ?Sized> Rc<T> {
     /// ```
     /// use std::rc::Rc;
     ///
-    /// let x = Rc::new(10);
+    /// let x = Rc::new("hello".to_owned());
     /// let x_ptr = Rc::into_raw(x);
     ///
     /// unsafe {
     ///     // Convert back to an `Rc` to prevent leak.
     ///     let x = Rc::from_raw(x_ptr);
-    ///     assert_eq!(*x, 10);
+    ///     assert_eq!(&*x, "hello");
     ///
     ///     // Further calls to `Rc::from_raw(x_ptr)` would be memory unsafe.
     /// }
@@ -416,20 +431,13 @@ impl<T: ?Sized> Rc<T> {
     /// ```
     #[stable(feature = "rc_raw", since = "1.17.0")]
     pub unsafe fn from_raw(ptr: *const T) -> Self {
-        // Align the unsized value to the end of the RcBox.
-        // Because it is ?Sized, it will always be the last field in memory.
-        let align = align_of_val(&*ptr);
-        let layout = Layout::new::<RcBox<()>>();
-        let offset = (layout.size() + layout.padding_needed_for(align)) as isize;
+        let offset = data_offset(ptr);
 
         // Reverse the offset to find the original RcBox.
         let fake_ptr = ptr as *mut RcBox<T>;
         let rc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
 
-        Rc {
-            ptr: NonNull::new_unchecked(rc_ptr),
-            phantom: PhantomData,
-        }
+        Self::from_ptr(rc_ptr)
     }
 
     /// Consumes the `Rc`, returning the wrapped pointer as `NonNull<T>`.
@@ -441,10 +449,10 @@ impl<T: ?Sized> Rc<T> {
     ///
     /// use std::rc::Rc;
     ///
-    /// let x = Rc::new(10);
+    /// let x = Rc::new("hello".to_owned());
     /// let ptr = Rc::into_raw_non_null(x);
-    /// let deref = unsafe { *ptr.as_ref() };
-    /// assert_eq!(deref, 10);
+    /// let deref = unsafe { ptr.as_ref() };
+    /// assert_eq!(deref, "hello");
     /// ```
     #[unstable(feature = "rc_into_raw_non_null", issue = "47336")]
     #[inline]
@@ -584,15 +592,18 @@ impl<T: ?Sized> Rc<T> {
 impl<T: Clone> Rc<T> {
     /// Makes a mutable reference into the given `Rc`.
     ///
-    /// If there are other `Rc` or [`Weak`][weak] pointers to the same value,
-    /// then `make_mut` will invoke [`clone`][clone] on the inner value to
-    /// ensure unique ownership. This is also referred to as clone-on-write.
+    /// If there are other `Rc` pointers to the same value, then `make_mut` will
+    /// [`clone`] the inner value to ensure unique ownership.  This is also
+    /// referred to as clone-on-write.
     ///
-    /// See also [`get_mut`][get_mut], which will fail rather than cloning.
+    /// If there are no other `Rc` pointers to this value, then [`Weak`]
+    /// pointers to this value will be dissassociated.
     ///
-    /// [weak]: struct.Weak.html
-    /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
-    /// [get_mut]: struct.Rc.html#method.get_mut
+    /// See also [`get_mut`], which will fail rather than cloning.
+    ///
+    /// [`Weak`]: struct.Weak.html
+    /// [`clone`]: ../../std/clone/trait.Clone.html#tymethod.clone
+    /// [`get_mut`]: struct.Rc.html#method.get_mut
     ///
     /// # Examples
     ///
@@ -611,6 +622,23 @@ impl<T: Clone> Rc<T> {
     /// assert_eq!(*data, 8);
     /// assert_eq!(*other_data, 12);
     /// ```
+    ///
+    /// [`Weak`] pointers will be dissassociated:
+    ///
+    /// ```
+    /// use std::rc::Rc;
+    ///
+    /// let mut data = Rc::new(75);
+    /// let weak = Rc::downgrade(&data);
+    ///
+    /// assert!(75 == *data);
+    /// assert!(75 == *weak.upgrade().unwrap());
+    ///
+    /// *Rc::make_mut(&mut data) += 1;
+    ///
+    /// assert!(76 == *data);
+    /// assert!(weak.upgrade().is_none());
+    /// ```
     #[inline]
     #[stable(feature = "rc_unique", since = "1.4.0")]
     pub fn make_mut(this: &mut Self) -> &mut T {
@@ -667,7 +695,7 @@ impl Rc<dyn Any> {
         if (*self).is::<T>() {
             let ptr = self.ptr.cast::<RcBox<T>>();
             forget(self);
-            Ok(Rc { ptr, phantom: PhantomData })
+            Ok(Rc::from_inner(ptr))
         } else {
             Err(self)
         }
@@ -675,21 +703,29 @@ impl Rc<dyn Any> {
 }
 
 impl<T: ?Sized> Rc<T> {
-    // Allocates an `RcBox<T>` with sufficient space for an unsized value
-    unsafe fn allocate_for_ptr(ptr: *const T) -> *mut RcBox<T> {
-        // Calculate layout using the given value.
+    /// Allocates an `RcBox<T>` with sufficient space for
+    /// an unsized value where the value has the layout provided.
+    ///
+    /// The function `mem_to_rcbox` is called with the data pointer
+    /// and must return back a (potentially fat)-pointer for the `RcBox<T>`.
+    unsafe fn allocate_for_unsized(
+        value_layout: Layout,
+        mem_to_rcbox: impl FnOnce(*mut u8) -> *mut RcBox<T>
+    ) -> *mut RcBox<T> {
+        // Calculate layout using the given value layout.
         // Previously, layout was calculated on the expression
         // `&*(ptr as *const RcBox<T>)`, but this created a misaligned
         // reference (see #54908).
         let layout = Layout::new::<RcBox<()>>()
-            .extend(Layout::for_value(&*ptr)).unwrap().0
+            .extend(value_layout).unwrap().0
             .pad_to_align().unwrap();
 
+        // Allocate for the layout.
         let mem = Global.alloc(layout)
             .unwrap_or_else(|_| handle_alloc_error(layout));
 
         // Initialize the RcBox
-        let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut RcBox<T>;
+        let inner = mem_to_rcbox(mem.as_ptr());
         debug_assert_eq!(Layout::for_value(&*inner), layout);
 
         ptr::write(&mut (*inner).strong, Cell::new(1));
@@ -698,6 +734,15 @@ impl<T: ?Sized> Rc<T> {
         inner
     }
 
+    /// Allocates an `RcBox<T>` with sufficient space for an unsized value
+    unsafe fn allocate_for_ptr(ptr: *const T) -> *mut RcBox<T> {
+        // Allocate for the `RcBox<T>` using the given value.
+        Self::allocate_for_unsized(
+            Layout::for_value(&*ptr),
+            |mem| set_data_ptr(ptr as *mut T, mem) as *mut RcBox<T>,
+        )
+    }
+
     fn from_box(v: Box<T>) -> Rc<T> {
         unsafe {
             let box_unique = Box::into_unique(v);
@@ -715,44 +760,49 @@ impl<T: ?Sized> Rc<T> {
             // Free the allocation without dropping its contents
             box_free(box_unique);
 
-            Rc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
+            Self::from_ptr(ptr)
         }
     }
 }
 
-// Sets the data pointer of a `?Sized` raw pointer.
-//
-// For a slice/trait object, this sets the `data` field and leaves the rest
-// unchanged. For a sized raw pointer, this simply sets the pointer.
+impl<T> Rc<[T]> {
+    /// Allocates an `RcBox<[T]>` with the given length.
+    unsafe fn allocate_for_slice(len: usize) -> *mut RcBox<[T]> {
+        Self::allocate_for_unsized(
+            Layout::array::<T>(len).unwrap(),
+            |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut RcBox<[T]>,
+        )
+    }
+}
+
+/// Sets the data pointer of a `?Sized` raw pointer.
+///
+/// For a slice/trait object, this sets the `data` field and leaves the rest
+/// unchanged. For a sized raw pointer, this simply sets the pointer.
 unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T {
     ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
     ptr
 }
 
 impl<T> Rc<[T]> {
-    // Copy elements from slice into newly allocated Rc<[T]>
-    //
-    // Unsafe because the caller must either take ownership or bind `T: Copy`
+    /// Copy elements from slice into newly allocated Rc<[T]>
+    ///
+    /// Unsafe because the caller must either take ownership or bind `T: Copy`
     unsafe fn copy_from_slice(v: &[T]) -> Rc<[T]> {
-        let v_ptr = v as *const [T];
-        let ptr = Self::allocate_for_ptr(v_ptr);
+        let ptr = Self::allocate_for_slice(v.len());
 
         ptr::copy_nonoverlapping(
             v.as_ptr(),
             &mut (*ptr).value as *mut [T] as *mut T,
             v.len());
 
-        Rc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
+        Self::from_ptr(ptr)
     }
-}
-
-trait RcFromSlice<T> {
-    fn from_slice(slice: &[T]) -> Self;
-}
 
-impl<T: Clone> RcFromSlice<T> for Rc<[T]> {
-    #[inline]
-    default fn from_slice(v: &[T]) -> Self {
+    /// Constructs an `Rc<[T]>` from an iterator known to be of a certain size.
+    ///
+    /// Behavior is undefined should the size be wrong.
+    unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Rc<[T]> {
         // Panic guard while cloning T elements.
         // In the event of a panic, elements that have been written
         // into the new RcBox will be dropped, then the memory freed.
@@ -769,37 +819,48 @@ impl<T: Clone> RcFromSlice<T> for Rc<[T]> {
                     let slice = from_raw_parts_mut(self.elems, self.n_elems);
                     ptr::drop_in_place(slice);
 
-                    Global.dealloc(self.mem, self.layout.clone());
+                    Global.dealloc(self.mem, self.layout);
                 }
             }
         }
 
-        unsafe {
-            let v_ptr = v as *const [T];
-            let ptr = Self::allocate_for_ptr(v_ptr);
+        let ptr = Self::allocate_for_slice(len);
 
-            let mem = ptr as *mut _ as *mut u8;
-            let layout = Layout::for_value(&*ptr);
+        let mem = ptr as *mut _ as *mut u8;
+        let layout = Layout::for_value(&*ptr);
 
-            // Pointer to first element
-            let elems = &mut (*ptr).value as *mut [T] as *mut T;
+        // Pointer to first element
+        let elems = &mut (*ptr).value as *mut [T] as *mut T;
 
-            let mut guard = Guard{
-                mem: NonNull::new_unchecked(mem),
-                elems: elems,
-                layout: layout,
-                n_elems: 0,
-            };
+        let mut guard = Guard {
+            mem: NonNull::new_unchecked(mem),
+            elems,
+            layout,
+            n_elems: 0,
+        };
 
-            for (i, item) in v.iter().enumerate() {
-                ptr::write(elems.add(i), item.clone());
-                guard.n_elems += 1;
-            }
+        for (i, item) in iter.enumerate() {
+            ptr::write(elems.add(i), item);
+            guard.n_elems += 1;
+        }
 
-            // All clear. Forget the guard so it doesn't free the new RcBox.
-            forget(guard);
+        // All clear. Forget the guard so it doesn't free the new RcBox.
+        forget(guard);
 
-            Rc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
+        Self::from_ptr(ptr)
+    }
+}
+
+/// Specialization trait used for `From<&[T]>`.
+trait RcFromSlice<T> {
+    fn from_slice(slice: &[T]) -> Self;
+}
+
+impl<T: Clone> RcFromSlice<T> for Rc<[T]> {
+    #[inline]
+    default fn from_slice(v: &[T]) -> Self {
+        unsafe {
+            Self::from_iter_exact(v.iter().cloned(), v.len())
         }
     }
 }
@@ -891,7 +952,7 @@ impl<T: ?Sized> Clone for Rc<T> {
     #[inline]
     fn clone(&self) -> Rc<T> {
         self.inc_strong();
-        Rc { ptr: self.ptr, phantom: PhantomData }
+        Self::from_inner(self.ptr)
     }
 }
 
@@ -932,6 +993,11 @@ impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> {
     }
 }
 
+/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
+/// would otherwise add a cost to all equality checks on refs. We assume that `Rc`s are used to
+/// store large values, that are slow to clone, but also heavy to check for equality, causing this
+/// cost to pay off more easily. It's also more likely to have two `Rc` clones, that point to
+/// the same value, than two `&T`s.
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized + Eq> RcEqIdent<T> for Rc<T> {
     #[inline]
@@ -1192,6 +1258,114 @@ impl<T> From<Vec<T>> for Rc<[T]> {
     }
 }
 
+#[unstable(feature = "boxed_slice_try_from", issue = "0")]
+impl<T, const N: usize> TryFrom<Rc<[T]>> for Rc<[T; N]>
+where
+    [T; N]: LengthAtMost32,
+{
+    type Error = Rc<[T]>;
+
+    fn try_from(boxed_slice: Rc<[T]>) -> Result<Self, Self::Error> {
+        if boxed_slice.len() == N {
+            Ok(unsafe { Rc::from_raw(Rc::into_raw(boxed_slice) as *mut [T; N]) })
+        } else {
+            Err(boxed_slice)
+        }
+    }
+}
+
+#[stable(feature = "shared_from_iter", since = "1.37.0")]
+impl<T> iter::FromIterator<T> for Rc<[T]> {
+    /// Takes each element in the `Iterator` and collects it into an `Rc<[T]>`.
+    ///
+    /// # Performance characteristics
+    ///
+    /// ## The general case
+    ///
+    /// In the general case, collecting into `Rc<[T]>` is done by first
+    /// collecting into a `Vec<T>`. That is, when writing the following:
+    ///
+    /// ```rust
+    /// # use std::rc::Rc;
+    /// let evens: Rc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
+    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+    /// ```
+    ///
+    /// this behaves as if we wrote:
+    ///
+    /// ```rust
+    /// # use std::rc::Rc;
+    /// let evens: Rc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
+    ///     .collect::<Vec<_>>() // The first set of allocations happens here.
+    ///     .into(); // A second allocation for `Rc<[T]>` happens here.
+    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+    /// ```
+    ///
+    /// This will allocate as many times as needed for constructing the `Vec<T>`
+    /// and then it will allocate once for turning the `Vec<T>` into the `Rc<[T]>`.
+    ///
+    /// ## Iterators of known length
+    ///
+    /// When your `Iterator` implements `TrustedLen` and is of an exact size,
+    /// a single allocation will be made for the `Rc<[T]>`. For example:
+    ///
+    /// ```rust
+    /// # use std::rc::Rc;
+    /// let evens: Rc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
+    /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
+    /// ```
+    fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
+        RcFromIter::from_iter(iter.into_iter())
+    }
+}
+
+/// Specialization trait used for collecting into `Rc<[T]>`.
+trait RcFromIter<T, I> {
+    fn from_iter(iter: I) -> Self;
+}
+
+impl<T, I: Iterator<Item = T>> RcFromIter<T, I> for Rc<[T]> {
+    default fn from_iter(iter: I) -> Self {
+        iter.collect::<Vec<T>>().into()
+    }
+}
+
+impl<T, I: iter::TrustedLen<Item = T>> RcFromIter<T, I> for Rc<[T]>  {
+    default fn from_iter(iter: I) -> Self {
+        // This is the case for a `TrustedLen` iterator.
+        let (low, high) = iter.size_hint();
+        if let Some(high) = high {
+            debug_assert_eq!(
+                low, high,
+                "TrustedLen iterator's size hint is not exact: {:?}",
+                (low, high)
+            );
+
+            unsafe {
+                // SAFETY: We need to ensure that the iterator has an exact length and we have.
+                Rc::from_iter_exact(iter, low)
+            }
+        } else {
+            // Fall back to normal implementation.
+            iter.collect::<Vec<T>>().into()
+        }
+    }
+}
+
+impl<'a, T: 'a + Clone> RcFromIter<&'a T, slice::Iter<'a, T>> for Rc<[T]> {
+    fn from_iter(iter: slice::Iter<'a, T>) -> Self {
+        // Delegate to `impl<T: Clone> From<&[T]> for Rc<[T]>`.
+        //
+        // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping`
+        // which is even more performant.
+        //
+        // In the fall-back case we have `T: Clone`. This is still better
+        // than the `TrustedLen` implementation as slices have a known length
+        // and so we get to avoid calling `size_hint` and avoid the branching.
+        iter.as_slice().into()
+    }
+}
+
 /// `Weak` is a version of [`Rc`] that holds a non-owning reference to the
 /// managed value. The value is accessed by calling [`upgrade`] on the `Weak`
 /// pointer, which returns an [`Option`]`<`[`Rc`]`<T>>`.
@@ -1257,6 +1431,143 @@ impl<T> Weak<T> {
             ptr: NonNull::new(usize::MAX as *mut RcBox<T>).expect("MAX is not 0"),
         }
     }
+
+    /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
+    ///
+    /// It is up to the caller to ensure that the object is still alive when accessing it through
+    /// the pointer.
+    ///
+    /// The pointer may be [`null`] or be dangling in case the object has already been destroyed.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(weak_into_raw)]
+    ///
+    /// use std::rc::Rc;
+    /// use std::ptr;
+    ///
+    /// let strong = Rc::new("hello".to_owned());
+    /// let weak = Rc::downgrade(&strong);
+    /// // Both point to the same object
+    /// assert!(ptr::eq(&*strong, weak.as_raw()));
+    /// // The strong here keeps it alive, so we can still access the object.
+    /// assert_eq!("hello", unsafe { &*weak.as_raw() });
+    ///
+    /// drop(strong);
+    /// // But not any more. We can do weak.as_raw(), but accessing the pointer would lead to
+    /// // undefined behaviour.
+    /// // assert_eq!("hello", unsafe { &*weak.as_raw() });
+    /// ```
+    ///
+    /// [`null`]: ../../std/ptr/fn.null.html
+    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    pub fn as_raw(&self) -> *const T {
+        match self.inner() {
+            None => ptr::null(),
+            Some(inner) => {
+                let offset = data_offset_sized::<T>();
+                let ptr = inner as *const RcBox<T>;
+                // Note: while the pointer we create may already point to dropped value, the
+                // allocation still lives (it must hold the weak point as long as we are alive).
+                // Therefore, the offset is OK to do, it won't get out of the allocation.
+                let ptr = unsafe { (ptr as *const u8).offset(offset) };
+                ptr as *const T
+            }
+        }
+    }
+
+    /// Consumes the `Weak<T>` and turns it into a raw pointer.
+    ///
+    /// This converts the weak pointer into a raw pointer, preserving the original weak count. It
+    /// can be turned back into the `Weak<T>` with [`from_raw`].
+    ///
+    /// The same restrictions of accessing the target of the pointer as with
+    /// [`as_raw`] apply.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(weak_into_raw)]
+    ///
+    /// use std::rc::{Rc, Weak};
+    ///
+    /// let strong = Rc::new("hello".to_owned());
+    /// let weak = Rc::downgrade(&strong);
+    /// let raw = weak.into_raw();
+    ///
+    /// assert_eq!(1, Rc::weak_count(&strong));
+    /// assert_eq!("hello", unsafe { &*raw });
+    ///
+    /// drop(unsafe { Weak::from_raw(raw) });
+    /// assert_eq!(0, Rc::weak_count(&strong));
+    /// ```
+    ///
+    /// [`from_raw`]: struct.Weak.html#method.from_raw
+    /// [`as_raw`]: struct.Weak.html#method.as_raw
+    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    pub fn into_raw(self) -> *const T {
+        let result = self.as_raw();
+        mem::forget(self);
+        result
+    }
+
+    /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
+    ///
+    /// This can be used to safely get a strong reference (by calling [`upgrade`]
+    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
+    ///
+    /// It takes ownership of one weak count. In case a [`null`] is passed, a dangling [`Weak`] is
+    /// returned.
+    ///
+    /// # Safety
+    ///
+    /// The pointer must represent one valid weak count. In other words, it must point to `T` which
+    /// is or *was* managed by an [`Rc`] and the weak count of that [`Rc`] must not have reached
+    /// 0. It is allowed for the strong count to be 0.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(weak_into_raw)]
+    ///
+    /// use std::rc::{Rc, Weak};
+    ///
+    /// let strong = Rc::new("hello".to_owned());
+    ///
+    /// let raw_1 = Rc::downgrade(&strong).into_raw();
+    /// let raw_2 = Rc::downgrade(&strong).into_raw();
+    ///
+    /// assert_eq!(2, Rc::weak_count(&strong));
+    ///
+    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
+    /// assert_eq!(1, Rc::weak_count(&strong));
+    ///
+    /// drop(strong);
+    ///
+    /// // Decrement the last weak count.
+    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
+    /// ```
+    ///
+    /// [`null`]: ../../std/ptr/fn.null.html
+    /// [`into_raw`]: struct.Weak.html#method.into_raw
+    /// [`upgrade`]: struct.Weak.html#method.upgrade
+    /// [`Rc`]: struct.Rc.html
+    /// [`Weak`]: struct.Weak.html
+    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    pub unsafe fn from_raw(ptr: *const T) -> Self {
+        if ptr.is_null() {
+            Self::new()
+        } else {
+            // See Rc::from_raw for details
+            let offset = data_offset(ptr);
+            let fake_ptr = ptr as *mut RcBox<T>;
+            let ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
+            Weak {
+                ptr: NonNull::new(ptr).expect("Invalid pointer passed to from_raw"),
+            }
+        }
+    }
 }
 
 pub(crate) fn is_dangling<T: ?Sized>(ptr: NonNull<T>) -> bool {
@@ -1298,7 +1609,7 @@ impl<T: ?Sized> Weak<T> {
             None
         } else {
             inner.inc_strong();
-            Some(Rc { ptr: self.ptr, phantom: PhantomData })
+            Some(Rc::from_inner(self.ptr))
         }
     }
 
@@ -1357,18 +1668,18 @@ impl<T: ?Sized> Weak<T> {
     ///
     /// ```
     /// #![feature(weak_ptr_eq)]
-    /// use std::rc::{Rc, Weak};
+    /// use std::rc::Rc;
     ///
     /// let first_rc = Rc::new(5);
     /// let first = Rc::downgrade(&first_rc);
     /// let second = Rc::downgrade(&first_rc);
     ///
-    /// assert!(Weak::ptr_eq(&first, &second));
+    /// assert!(first.ptr_eq(&second));
     ///
     /// let third_rc = Rc::new(5);
     /// let third = Rc::downgrade(&third_rc);
     ///
-    /// assert!(!Weak::ptr_eq(&first, &third));
+    /// assert!(!first.ptr_eq(&third));
     /// ```
     ///
     /// Comparing `Weak::new`.
@@ -1379,16 +1690,16 @@ impl<T: ?Sized> Weak<T> {
     ///
     /// let first = Weak::new();
     /// let second = Weak::new();
-    /// assert!(Weak::ptr_eq(&first, &second));
+    /// assert!(first.ptr_eq(&second));
     ///
     /// let third_rc = Rc::new(());
     /// let third = Rc::downgrade(&third_rc);
-    /// assert!(!Weak::ptr_eq(&first, &third));
+    /// assert!(!first.ptr_eq(&third));
     /// ```
     #[inline]
     #[unstable(feature = "weak_ptr_eq", issue = "55981")]
-    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
-        this.ptr.as_ptr() == other.ptr.as_ptr()
+    pub fn ptr_eq(&self, other: &Self) -> bool {
+        self.ptr.as_ptr() == other.ptr.as_ptr()
     }
 }
 
@@ -1502,14 +1813,16 @@ trait RcBoxPtr<T: ?Sized> {
 
     #[inline]
     fn inc_strong(&self) {
+        let strong = self.strong();
+
         // We want to abort on overflow instead of dropping the value.
         // The reference count will never be zero when this is called;
         // nevertheless, we insert an abort here to hint LLVM at
         // an otherwise missed optimization.
-        if self.strong() == 0 || self.strong() == usize::max_value() {
+        if strong == 0 || strong == usize::max_value() {
             unsafe { abort(); }
         }
-        self.inner().strong.set(self.strong() + 1);
+        self.inner().strong.set(strong + 1);
     }
 
     #[inline]
@@ -1524,14 +1837,16 @@ trait RcBoxPtr<T: ?Sized> {
 
     #[inline]
     fn inc_weak(&self) {
+        let weak = self.weak();
+
         // We want to abort on overflow instead of dropping the value.
         // The reference count will never be zero when this is called;
         // nevertheless, we insert an abort here to hint LLVM at
         // an otherwise missed optimization.
-        if self.weak() == 0 || self.weak() == usize::max_value() {
+        if weak == 0 || weak == usize::max_value() {
             unsafe { abort(); }
         }
-        self.inner().weak.set(self.weak() + 1);
+        self.inner().weak.set(weak + 1);
     }
 
     #[inline]
@@ -1556,436 +1871,6 @@ impl<T: ?Sized> RcBoxPtr<T> for RcBox<T> {
     }
 }
 
-#[cfg(test)]
-mod tests {
-    use super::{Rc, Weak};
-    use std::boxed::Box;
-    use std::cell::RefCell;
-    use std::option::Option::{self, None, Some};
-    use std::result::Result::{Err, Ok};
-    use std::mem::drop;
-    use std::clone::Clone;
-    use std::convert::From;
-
-    #[test]
-    fn test_clone() {
-        let x = Rc::new(RefCell::new(5));
-        let y = x.clone();
-        *x.borrow_mut() = 20;
-        assert_eq!(*y.borrow(), 20);
-    }
-
-    #[test]
-    fn test_simple() {
-        let x = Rc::new(5);
-        assert_eq!(*x, 5);
-    }
-
-    #[test]
-    fn test_simple_clone() {
-        let x = Rc::new(5);
-        let y = x.clone();
-        assert_eq!(*x, 5);
-        assert_eq!(*y, 5);
-    }
-
-    #[test]
-    fn test_destructor() {
-        let x: Rc<Box<_>> = Rc::new(box 5);
-        assert_eq!(**x, 5);
-    }
-
-    #[test]
-    fn test_live() {
-        let x = Rc::new(5);
-        let y = Rc::downgrade(&x);
-        assert!(y.upgrade().is_some());
-    }
-
-    #[test]
-    fn test_dead() {
-        let x = Rc::new(5);
-        let y = Rc::downgrade(&x);
-        drop(x);
-        assert!(y.upgrade().is_none());
-    }
-
-    #[test]
-    fn weak_self_cyclic() {
-        struct Cycle {
-            x: RefCell<Option<Weak<Cycle>>>,
-        }
-
-        let a = Rc::new(Cycle { x: RefCell::new(None) });
-        let b = Rc::downgrade(&a.clone());
-        *a.x.borrow_mut() = Some(b);
-
-        // hopefully we don't double-free (or leak)...
-    }
-
-    #[test]
-    fn is_unique() {
-        let x = Rc::new(3);
-        assert!(Rc::is_unique(&x));
-        let y = x.clone();
-        assert!(!Rc::is_unique(&x));
-        drop(y);
-        assert!(Rc::is_unique(&x));
-        let w = Rc::downgrade(&x);
-        assert!(!Rc::is_unique(&x));
-        drop(w);
-        assert!(Rc::is_unique(&x));
-    }
-
-    #[test]
-    fn test_strong_count() {
-        let a = Rc::new(0);
-        assert!(Rc::strong_count(&a) == 1);
-        let w = Rc::downgrade(&a);
-        assert!(Rc::strong_count(&a) == 1);
-        let b = w.upgrade().expect("upgrade of live rc failed");
-        assert!(Rc::strong_count(&b) == 2);
-        assert!(Rc::strong_count(&a) == 2);
-        drop(w);
-        drop(a);
-        assert!(Rc::strong_count(&b) == 1);
-        let c = b.clone();
-        assert!(Rc::strong_count(&b) == 2);
-        assert!(Rc::strong_count(&c) == 2);
-    }
-
-    #[test]
-    fn test_weak_count() {
-        let a = Rc::new(0);
-        assert!(Rc::strong_count(&a) == 1);
-        assert!(Rc::weak_count(&a) == 0);
-        let w = Rc::downgrade(&a);
-        assert!(Rc::strong_count(&a) == 1);
-        assert!(Rc::weak_count(&a) == 1);
-        drop(w);
-        assert!(Rc::strong_count(&a) == 1);
-        assert!(Rc::weak_count(&a) == 0);
-        let c = a.clone();
-        assert!(Rc::strong_count(&a) == 2);
-        assert!(Rc::weak_count(&a) == 0);
-        drop(c);
-    }
-
-    #[test]
-    fn weak_counts() {
-        assert_eq!(Weak::weak_count(&Weak::<u64>::new()), None);
-        assert_eq!(Weak::strong_count(&Weak::<u64>::new()), 0);
-
-        let a = Rc::new(0);
-        let w = Rc::downgrade(&a);
-        assert_eq!(Weak::strong_count(&w), 1);
-        assert_eq!(Weak::weak_count(&w), Some(1));
-        let w2 = w.clone();
-        assert_eq!(Weak::strong_count(&w), 1);
-        assert_eq!(Weak::weak_count(&w), Some(2));
-        assert_eq!(Weak::strong_count(&w2), 1);
-        assert_eq!(Weak::weak_count(&w2), Some(2));
-        drop(w);
-        assert_eq!(Weak::strong_count(&w2), 1);
-        assert_eq!(Weak::weak_count(&w2), Some(1));
-        let a2 = a.clone();
-        assert_eq!(Weak::strong_count(&w2), 2);
-        assert_eq!(Weak::weak_count(&w2), Some(1));
-        drop(a2);
-        drop(a);
-        assert_eq!(Weak::strong_count(&w2), 0);
-        assert_eq!(Weak::weak_count(&w2), Some(1));
-        drop(w2);
-    }
-
-    #[test]
-    fn try_unwrap() {
-        let x = Rc::new(3);
-        assert_eq!(Rc::try_unwrap(x), Ok(3));
-        let x = Rc::new(4);
-        let _y = x.clone();
-        assert_eq!(Rc::try_unwrap(x), Err(Rc::new(4)));
-        let x = Rc::new(5);
-        let _w = Rc::downgrade(&x);
-        assert_eq!(Rc::try_unwrap(x), Ok(5));
-    }
-
-    #[test]
-    fn into_from_raw() {
-        let x = Rc::new(box "hello");
-        let y = x.clone();
-
-        let x_ptr = Rc::into_raw(x);
-        drop(y);
-        unsafe {
-            assert_eq!(**x_ptr, "hello");
-
-            let x = Rc::from_raw(x_ptr);
-            assert_eq!(**x, "hello");
-
-            assert_eq!(Rc::try_unwrap(x).map(|x| *x), Ok("hello"));
-        }
-    }
-
-    #[test]
-    fn test_into_from_raw_unsized() {
-        use std::fmt::Display;
-        use std::string::ToString;
-
-        let rc: Rc<str> = Rc::from("foo");
-
-        let ptr = Rc::into_raw(rc.clone());
-        let rc2 = unsafe { Rc::from_raw(ptr) };
-
-        assert_eq!(unsafe { &*ptr }, "foo");
-        assert_eq!(rc, rc2);
-
-        let rc: Rc<dyn Display> = Rc::new(123);
-
-        let ptr = Rc::into_raw(rc.clone());
-        let rc2 = unsafe { Rc::from_raw(ptr) };
-
-        assert_eq!(unsafe { &*ptr }.to_string(), "123");
-        assert_eq!(rc2.to_string(), "123");
-    }
-
-    #[test]
-    fn get_mut() {
-        let mut x = Rc::new(3);
-        *Rc::get_mut(&mut x).unwrap() = 4;
-        assert_eq!(*x, 4);
-        let y = x.clone();
-        assert!(Rc::get_mut(&mut x).is_none());
-        drop(y);
-        assert!(Rc::get_mut(&mut x).is_some());
-        let _w = Rc::downgrade(&x);
-        assert!(Rc::get_mut(&mut x).is_none());
-    }
-
-    #[test]
-    fn test_cowrc_clone_make_unique() {
-        let mut cow0 = Rc::new(75);
-        let mut cow1 = cow0.clone();
-        let mut cow2 = cow1.clone();
-
-        assert!(75 == *Rc::make_mut(&mut cow0));
-        assert!(75 == *Rc::make_mut(&mut cow1));
-        assert!(75 == *Rc::make_mut(&mut cow2));
-
-        *Rc::make_mut(&mut cow0) += 1;
-        *Rc::make_mut(&mut cow1) += 2;
-        *Rc::make_mut(&mut cow2) += 3;
-
-        assert!(76 == *cow0);
-        assert!(77 == *cow1);
-        assert!(78 == *cow2);
-
-        // none should point to the same backing memory
-        assert!(*cow0 != *cow1);
-        assert!(*cow0 != *cow2);
-        assert!(*cow1 != *cow2);
-    }
-
-    #[test]
-    fn test_cowrc_clone_unique2() {
-        let mut cow0 = Rc::new(75);
-        let cow1 = cow0.clone();
-        let cow2 = cow1.clone();
-
-        assert!(75 == *cow0);
-        assert!(75 == *cow1);
-        assert!(75 == *cow2);
-
-        *Rc::make_mut(&mut cow0) += 1;
-
-        assert!(76 == *cow0);
-        assert!(75 == *cow1);
-        assert!(75 == *cow2);
-
-        // cow1 and cow2 should share the same contents
-        // cow0 should have a unique reference
-        assert!(*cow0 != *cow1);
-        assert!(*cow0 != *cow2);
-        assert!(*cow1 == *cow2);
-    }
-
-    #[test]
-    fn test_cowrc_clone_weak() {
-        let mut cow0 = Rc::new(75);
-        let cow1_weak = Rc::downgrade(&cow0);
-
-        assert!(75 == *cow0);
-        assert!(75 == *cow1_weak.upgrade().unwrap());
-
-        *Rc::make_mut(&mut cow0) += 1;
-
-        assert!(76 == *cow0);
-        assert!(cow1_weak.upgrade().is_none());
-    }
-
-    #[test]
-    fn test_show() {
-        let foo = Rc::new(75);
-        assert_eq!(format!("{:?}", foo), "75");
-    }
-
-    #[test]
-    fn test_unsized() {
-        let foo: Rc<[i32]> = Rc::new([1, 2, 3]);
-        assert_eq!(foo, foo.clone());
-    }
-
-    #[test]
-    fn test_from_owned() {
-        let foo = 123;
-        let foo_rc = Rc::from(foo);
-        assert!(123 == *foo_rc);
-    }
-
-    #[test]
-    fn test_new_weak() {
-        let foo: Weak<usize> = Weak::new();
-        assert!(foo.upgrade().is_none());
-    }
-
-    #[test]
-    fn test_ptr_eq() {
-        let five = Rc::new(5);
-        let same_five = five.clone();
-        let other_five = Rc::new(5);
-
-        assert!(Rc::ptr_eq(&five, &same_five));
-        assert!(!Rc::ptr_eq(&five, &other_five));
-    }
-
-    #[test]
-    fn test_from_str() {
-        let r: Rc<str> = Rc::from("foo");
-
-        assert_eq!(&r[..], "foo");
-    }
-
-    #[test]
-    fn test_copy_from_slice() {
-        let s: &[u32] = &[1, 2, 3];
-        let r: Rc<[u32]> = Rc::from(s);
-
-        assert_eq!(&r[..], [1, 2, 3]);
-    }
-
-    #[test]
-    fn test_clone_from_slice() {
-        #[derive(Clone, Debug, Eq, PartialEq)]
-        struct X(u32);
-
-        let s: &[X] = &[X(1), X(2), X(3)];
-        let r: Rc<[X]> = Rc::from(s);
-
-        assert_eq!(&r[..], s);
-    }
-
-    #[test]
-    #[should_panic]
-    fn test_clone_from_slice_panic() {
-        use std::string::{String, ToString};
-
-        struct Fail(u32, String);
-
-        impl Clone for Fail {
-            fn clone(&self) -> Fail {
-                if self.0 == 2 {
-                    panic!();
-                }
-                Fail(self.0, self.1.clone())
-            }
-        }
-
-        let s: &[Fail] = &[
-            Fail(0, "foo".to_string()),
-            Fail(1, "bar".to_string()),
-            Fail(2, "baz".to_string()),
-        ];
-
-        // Should panic, but not cause memory corruption
-        let _r: Rc<[Fail]> = Rc::from(s);
-    }
-
-    #[test]
-    fn test_from_box() {
-        let b: Box<u32> = box 123;
-        let r: Rc<u32> = Rc::from(b);
-
-        assert_eq!(*r, 123);
-    }
-
-    #[test]
-    fn test_from_box_str() {
-        use std::string::String;
-
-        let s = String::from("foo").into_boxed_str();
-        let r: Rc<str> = Rc::from(s);
-
-        assert_eq!(&r[..], "foo");
-    }
-
-    #[test]
-    fn test_from_box_slice() {
-        let s = vec![1, 2, 3].into_boxed_slice();
-        let r: Rc<[u32]> = Rc::from(s);
-
-        assert_eq!(&r[..], [1, 2, 3]);
-    }
-
-    #[test]
-    fn test_from_box_trait() {
-        use std::fmt::Display;
-        use std::string::ToString;
-
-        let b: Box<dyn Display> = box 123;
-        let r: Rc<dyn Display> = Rc::from(b);
-
-        assert_eq!(r.to_string(), "123");
-    }
-
-    #[test]
-    fn test_from_box_trait_zero_sized() {
-        use std::fmt::Debug;
-
-        let b: Box<dyn Debug> = box ();
-        let r: Rc<dyn Debug> = Rc::from(b);
-
-        assert_eq!(format!("{:?}", r), "()");
-    }
-
-    #[test]
-    fn test_from_vec() {
-        let v = vec![1, 2, 3];
-        let r: Rc<[u32]> = Rc::from(v);
-
-        assert_eq!(&r[..], [1, 2, 3]);
-    }
-
-    #[test]
-    fn test_downcast() {
-        use std::any::Any;
-
-        let r1: Rc<dyn Any> = Rc::new(i32::max_value());
-        let r2: Rc<dyn Any> = Rc::new("abc");
-
-        assert!(r1.clone().downcast::<u32>().is_err());
-
-        let r1i32 = r1.downcast::<i32>();
-        assert!(r1i32.is_ok());
-        assert_eq!(r1i32.unwrap(), Rc::new(i32::max_value()));
-
-        assert!(r2.clone().downcast::<i32>().is_err());
-
-        let r2str = r2.downcast::<&'static str>();
-        assert!(r2str.is_ok());
-        assert_eq!(r2str.unwrap(), Rc::new("abc"));
-    }
-}
-
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized> borrow::Borrow<T> for Rc<T> {
     fn borrow(&self) -> &T {
@@ -2002,3 +1887,22 @@ impl<T: ?Sized> AsRef<T> for Rc<T> {
 
 #[stable(feature = "pin", since = "1.33.0")]
 impl<T: ?Sized> Unpin for Rc<T> { }
+
+unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
+    // Align the unsized value to the end of the `RcBox`.
+    // Because it is ?Sized, it will always be the last field in memory.
+    data_offset_align(align_of_val(&*ptr))
+}
+
+/// Computes the offset of the data field within `RcBox`.
+///
+/// Unlike [`data_offset`], this doesn't need the pointer, but it works only on `T: Sized`.
+fn data_offset_sized<T>() -> isize {
+    data_offset_align(align_of::<T>())
+}
+
+#[inline]
+fn data_offset_align(align: usize) -> isize {
+    let layout = Layout::new::<RcBox<()>>();
+    (layout.size() + layout.padding_needed_for(align)) as isize
+}
diff --git a/src/liballoc/rc/tests.rs b/src/liballoc/rc/tests.rs
new file mode 100644
index 00000000000..6fd3f909357
--- /dev/null
+++ b/src/liballoc/rc/tests.rs
@@ -0,0 +1,439 @@
+use super::*;
+
+use std::boxed::Box;
+use std::cell::RefCell;
+use std::option::Option::{self, None, Some};
+use std::result::Result::{Err, Ok};
+use std::mem::drop;
+use std::clone::Clone;
+use std::convert::{From, TryInto};
+
+#[test]
+fn test_clone() {
+    let x = Rc::new(RefCell::new(5));
+    let y = x.clone();
+    *x.borrow_mut() = 20;
+    assert_eq!(*y.borrow(), 20);
+}
+
+#[test]
+fn test_simple() {
+    let x = Rc::new(5);
+    assert_eq!(*x, 5);
+}
+
+#[test]
+fn test_simple_clone() {
+    let x = Rc::new(5);
+    let y = x.clone();
+    assert_eq!(*x, 5);
+    assert_eq!(*y, 5);
+}
+
+#[test]
+fn test_destructor() {
+    let x: Rc<Box<_>> = Rc::new(box 5);
+    assert_eq!(**x, 5);
+}
+
+#[test]
+fn test_live() {
+    let x = Rc::new(5);
+    let y = Rc::downgrade(&x);
+    assert!(y.upgrade().is_some());
+}
+
+#[test]
+fn test_dead() {
+    let x = Rc::new(5);
+    let y = Rc::downgrade(&x);
+    drop(x);
+    assert!(y.upgrade().is_none());
+}
+
+#[test]
+fn weak_self_cyclic() {
+    struct Cycle {
+        x: RefCell<Option<Weak<Cycle>>>,
+    }
+
+    let a = Rc::new(Cycle { x: RefCell::new(None) });
+    let b = Rc::downgrade(&a.clone());
+    *a.x.borrow_mut() = Some(b);
+
+    // hopefully we don't double-free (or leak)...
+}
+
+#[test]
+fn is_unique() {
+    let x = Rc::new(3);
+    assert!(Rc::is_unique(&x));
+    let y = x.clone();
+    assert!(!Rc::is_unique(&x));
+    drop(y);
+    assert!(Rc::is_unique(&x));
+    let w = Rc::downgrade(&x);
+    assert!(!Rc::is_unique(&x));
+    drop(w);
+    assert!(Rc::is_unique(&x));
+}
+
+#[test]
+fn test_strong_count() {
+    let a = Rc::new(0);
+    assert!(Rc::strong_count(&a) == 1);
+    let w = Rc::downgrade(&a);
+    assert!(Rc::strong_count(&a) == 1);
+    let b = w.upgrade().expect("upgrade of live rc failed");
+    assert!(Rc::strong_count(&b) == 2);
+    assert!(Rc::strong_count(&a) == 2);
+    drop(w);
+    drop(a);
+    assert!(Rc::strong_count(&b) == 1);
+    let c = b.clone();
+    assert!(Rc::strong_count(&b) == 2);
+    assert!(Rc::strong_count(&c) == 2);
+}
+
+#[test]
+fn test_weak_count() {
+    let a = Rc::new(0);
+    assert!(Rc::strong_count(&a) == 1);
+    assert!(Rc::weak_count(&a) == 0);
+    let w = Rc::downgrade(&a);
+    assert!(Rc::strong_count(&a) == 1);
+    assert!(Rc::weak_count(&a) == 1);
+    drop(w);
+    assert!(Rc::strong_count(&a) == 1);
+    assert!(Rc::weak_count(&a) == 0);
+    let c = a.clone();
+    assert!(Rc::strong_count(&a) == 2);
+    assert!(Rc::weak_count(&a) == 0);
+    drop(c);
+}
+
+#[test]
+fn weak_counts() {
+    assert_eq!(Weak::weak_count(&Weak::<u64>::new()), None);
+    assert_eq!(Weak::strong_count(&Weak::<u64>::new()), 0);
+
+    let a = Rc::new(0);
+    let w = Rc::downgrade(&a);
+    assert_eq!(Weak::strong_count(&w), 1);
+    assert_eq!(Weak::weak_count(&w), Some(1));
+    let w2 = w.clone();
+    assert_eq!(Weak::strong_count(&w), 1);
+    assert_eq!(Weak::weak_count(&w), Some(2));
+    assert_eq!(Weak::strong_count(&w2), 1);
+    assert_eq!(Weak::weak_count(&w2), Some(2));
+    drop(w);
+    assert_eq!(Weak::strong_count(&w2), 1);
+    assert_eq!(Weak::weak_count(&w2), Some(1));
+    let a2 = a.clone();
+    assert_eq!(Weak::strong_count(&w2), 2);
+    assert_eq!(Weak::weak_count(&w2), Some(1));
+    drop(a2);
+    drop(a);
+    assert_eq!(Weak::strong_count(&w2), 0);
+    assert_eq!(Weak::weak_count(&w2), Some(1));
+    drop(w2);
+}
+
+#[test]
+fn try_unwrap() {
+    let x = Rc::new(3);
+    assert_eq!(Rc::try_unwrap(x), Ok(3));
+    let x = Rc::new(4);
+    let _y = x.clone();
+    assert_eq!(Rc::try_unwrap(x), Err(Rc::new(4)));
+    let x = Rc::new(5);
+    let _w = Rc::downgrade(&x);
+    assert_eq!(Rc::try_unwrap(x), Ok(5));
+}
+
+#[test]
+fn into_from_raw() {
+    let x = Rc::new(box "hello");
+    let y = x.clone();
+
+    let x_ptr = Rc::into_raw(x);
+    drop(y);
+    unsafe {
+        assert_eq!(**x_ptr, "hello");
+
+        let x = Rc::from_raw(x_ptr);
+        assert_eq!(**x, "hello");
+
+        assert_eq!(Rc::try_unwrap(x).map(|x| *x), Ok("hello"));
+    }
+}
+
+#[test]
+fn test_into_from_raw_unsized() {
+    use std::fmt::Display;
+    use std::string::ToString;
+
+    let rc: Rc<str> = Rc::from("foo");
+
+    let ptr = Rc::into_raw(rc.clone());
+    let rc2 = unsafe { Rc::from_raw(ptr) };
+
+    assert_eq!(unsafe { &*ptr }, "foo");
+    assert_eq!(rc, rc2);
+
+    let rc: Rc<dyn Display> = Rc::new(123);
+
+    let ptr = Rc::into_raw(rc.clone());
+    let rc2 = unsafe { Rc::from_raw(ptr) };
+
+    assert_eq!(unsafe { &*ptr }.to_string(), "123");
+    assert_eq!(rc2.to_string(), "123");
+}
+
+#[test]
+fn get_mut() {
+    let mut x = Rc::new(3);
+    *Rc::get_mut(&mut x).unwrap() = 4;
+    assert_eq!(*x, 4);
+    let y = x.clone();
+    assert!(Rc::get_mut(&mut x).is_none());
+    drop(y);
+    assert!(Rc::get_mut(&mut x).is_some());
+    let _w = Rc::downgrade(&x);
+    assert!(Rc::get_mut(&mut x).is_none());
+}
+
+#[test]
+fn test_cowrc_clone_make_unique() {
+    let mut cow0 = Rc::new(75);
+    let mut cow1 = cow0.clone();
+    let mut cow2 = cow1.clone();
+
+    assert!(75 == *Rc::make_mut(&mut cow0));
+    assert!(75 == *Rc::make_mut(&mut cow1));
+    assert!(75 == *Rc::make_mut(&mut cow2));
+
+    *Rc::make_mut(&mut cow0) += 1;
+    *Rc::make_mut(&mut cow1) += 2;
+    *Rc::make_mut(&mut cow2) += 3;
+
+    assert!(76 == *cow0);
+    assert!(77 == *cow1);
+    assert!(78 == *cow2);
+
+    // none should point to the same backing memory
+    assert!(*cow0 != *cow1);
+    assert!(*cow0 != *cow2);
+    assert!(*cow1 != *cow2);
+}
+
+#[test]
+fn test_cowrc_clone_unique2() {
+    let mut cow0 = Rc::new(75);
+    let cow1 = cow0.clone();
+    let cow2 = cow1.clone();
+
+    assert!(75 == *cow0);
+    assert!(75 == *cow1);
+    assert!(75 == *cow2);
+
+    *Rc::make_mut(&mut cow0) += 1;
+
+    assert!(76 == *cow0);
+    assert!(75 == *cow1);
+    assert!(75 == *cow2);
+
+    // cow1 and cow2 should share the same contents
+    // cow0 should have a unique reference
+    assert!(*cow0 != *cow1);
+    assert!(*cow0 != *cow2);
+    assert!(*cow1 == *cow2);
+}
+
+#[test]
+fn test_cowrc_clone_weak() {
+    let mut cow0 = Rc::new(75);
+    let cow1_weak = Rc::downgrade(&cow0);
+
+    assert!(75 == *cow0);
+    assert!(75 == *cow1_weak.upgrade().unwrap());
+
+    *Rc::make_mut(&mut cow0) += 1;
+
+    assert!(76 == *cow0);
+    assert!(cow1_weak.upgrade().is_none());
+}
+
+#[test]
+fn test_show() {
+    let foo = Rc::new(75);
+    assert_eq!(format!("{:?}", foo), "75");
+}
+
+#[test]
+fn test_unsized() {
+    let foo: Rc<[i32]> = Rc::new([1, 2, 3]);
+    assert_eq!(foo, foo.clone());
+}
+
+#[test]
+fn test_from_owned() {
+    let foo = 123;
+    let foo_rc = Rc::from(foo);
+    assert!(123 == *foo_rc);
+}
+
+#[test]
+fn test_new_weak() {
+    let foo: Weak<usize> = Weak::new();
+    assert!(foo.upgrade().is_none());
+}
+
+#[test]
+fn test_ptr_eq() {
+    let five = Rc::new(5);
+    let same_five = five.clone();
+    let other_five = Rc::new(5);
+
+    assert!(Rc::ptr_eq(&five, &same_five));
+    assert!(!Rc::ptr_eq(&five, &other_five));
+}
+
+#[test]
+fn test_from_str() {
+    let r: Rc<str> = Rc::from("foo");
+
+    assert_eq!(&r[..], "foo");
+}
+
+#[test]
+fn test_copy_from_slice() {
+    let s: &[u32] = &[1, 2, 3];
+    let r: Rc<[u32]> = Rc::from(s);
+
+    assert_eq!(&r[..], [1, 2, 3]);
+}
+
+#[test]
+fn test_clone_from_slice() {
+    #[derive(Clone, Debug, Eq, PartialEq)]
+    struct X(u32);
+
+    let s: &[X] = &[X(1), X(2), X(3)];
+    let r: Rc<[X]> = Rc::from(s);
+
+    assert_eq!(&r[..], s);
+}
+
+#[test]
+#[should_panic]
+fn test_clone_from_slice_panic() {
+    use std::string::{String, ToString};
+
+    struct Fail(u32, String);
+
+    impl Clone for Fail {
+        fn clone(&self) -> Fail {
+            if self.0 == 2 {
+                panic!();
+            }
+            Fail(self.0, self.1.clone())
+        }
+    }
+
+    let s: &[Fail] = &[
+        Fail(0, "foo".to_string()),
+        Fail(1, "bar".to_string()),
+        Fail(2, "baz".to_string()),
+    ];
+
+    // Should panic, but not cause memory corruption
+    let _r: Rc<[Fail]> = Rc::from(s);
+}
+
+#[test]
+fn test_from_box() {
+    let b: Box<u32> = box 123;
+    let r: Rc<u32> = Rc::from(b);
+
+    assert_eq!(*r, 123);
+}
+
+#[test]
+fn test_from_box_str() {
+    use std::string::String;
+
+    let s = String::from("foo").into_boxed_str();
+    let r: Rc<str> = Rc::from(s);
+
+    assert_eq!(&r[..], "foo");
+}
+
+#[test]
+fn test_from_box_slice() {
+    let s = vec![1, 2, 3].into_boxed_slice();
+    let r: Rc<[u32]> = Rc::from(s);
+
+    assert_eq!(&r[..], [1, 2, 3]);
+}
+
+#[test]
+fn test_from_box_trait() {
+    use std::fmt::Display;
+    use std::string::ToString;
+
+    let b: Box<dyn Display> = box 123;
+    let r: Rc<dyn Display> = Rc::from(b);
+
+    assert_eq!(r.to_string(), "123");
+}
+
+#[test]
+fn test_from_box_trait_zero_sized() {
+    use std::fmt::Debug;
+
+    let b: Box<dyn Debug> = box ();
+    let r: Rc<dyn Debug> = Rc::from(b);
+
+    assert_eq!(format!("{:?}", r), "()");
+}
+
+#[test]
+fn test_from_vec() {
+    let v = vec![1, 2, 3];
+    let r: Rc<[u32]> = Rc::from(v);
+
+    assert_eq!(&r[..], [1, 2, 3]);
+}
+
+#[test]
+fn test_downcast() {
+    use std::any::Any;
+
+    let r1: Rc<dyn Any> = Rc::new(i32::max_value());
+    let r2: Rc<dyn Any> = Rc::new("abc");
+
+    assert!(r1.clone().downcast::<u32>().is_err());
+
+    let r1i32 = r1.downcast::<i32>();
+    assert!(r1i32.is_ok());
+    assert_eq!(r1i32.unwrap(), Rc::new(i32::max_value()));
+
+    assert!(r2.clone().downcast::<i32>().is_err());
+
+    let r2str = r2.downcast::<&'static str>();
+    assert!(r2str.is_ok());
+    assert_eq!(r2str.unwrap(), Rc::new("abc"));
+}
+
+#[test]
+fn test_array_from_slice() {
+    let v = vec![1, 2, 3];
+    let r: Rc<[u32]> = Rc::from(v);
+
+    let a: Result<Rc<[u32; 3]>, _> = r.clone().try_into();
+    assert!(a.is_ok());
+
+    let a: Result<Rc<[u32; 2]>, _> = r.clone().try_into();
+    assert!(a.is_err());
+}
diff --git a/src/liballoc/slice.rs b/src/liballoc/slice.rs
index f4b2d463778..881d499c074 100644
--- a/src/liballoc/slice.rs
+++ b/src/liballoc/slice.rs
@@ -123,12 +123,12 @@ pub use core::slice::{RChunks, RChunksMut, RChunksExact, RChunksExactMut};
 ////////////////////////////////////////////////////////////////////////////////
 
 // HACK(japaric) needed for the implementation of `vec!` macro during testing
-// NB see the hack module in this file for more details
+// N.B., see the `hack` module in this file for more details.
 #[cfg(test)]
 pub use hack::into_vec;
 
 // HACK(japaric) needed for the implementation of `Vec::clone` during testing
-// NB see the hack module in this file for more details
+// N.B., see the `hack` module in this file for more details.
 #[cfg(test)]
 pub use hack::to_vec;
 
@@ -137,17 +137,16 @@ pub use hack::to_vec;
 // `core::slice::SliceExt` - we need to supply these functions for the
 // `test_permutations` test
 mod hack {
-    use core::mem;
-
     use crate::boxed::Box;
     use crate::vec::Vec;
     #[cfg(test)]
     use crate::string::ToString;
 
-    pub fn into_vec<T>(mut b: Box<[T]>) -> Vec<T> {
+    pub fn into_vec<T>(b: Box<[T]>) -> Vec<T> {
         unsafe {
-            let xs = Vec::from_raw_parts(b.as_mut_ptr(), b.len(), b.len());
-            mem::forget(b);
+            let len = b.len();
+            let b = Box::into_raw(b);
+            let xs = Vec::from_raw_parts(b as *mut T, len, len);
             xs
         }
     }
@@ -376,7 +375,7 @@ impl<T> [T] {
     pub fn to_vec(&self) -> Vec<T>
         where T: Clone
     {
-        // NB see hack module in this file
+        // N.B., see the `hack` module in this file for more details.
         hack::to_vec(self)
     }
 
@@ -397,7 +396,7 @@ impl<T> [T] {
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
     pub fn into_vec(self: Box<Self>) -> Vec<T> {
-        // NB see hack module in this file
+        // N.B., see the `hack` module in this file for more details.
         hack::into_vec(self)
     }
 
@@ -485,6 +484,57 @@ impl<T> [T] {
         }
         buf
     }
+
+    /// Flattens a slice of `T` into a single value `Self::Output`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// assert_eq!(["hello", "world"].concat(), "helloworld");
+    /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
+    /// ```
+    #[stable(feature = "rust1", since = "1.0.0")]
+    pub fn concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output
+        where Self: Concat<Item>
+    {
+        Concat::concat(self)
+    }
+
+    /// Flattens a slice of `T` into a single value `Self::Output`, placing a
+    /// given separator between each.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// assert_eq!(["hello", "world"].join(" "), "hello world");
+    /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
+    /// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
+    /// ```
+    #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
+    pub fn join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
+        where Self: Join<Separator>
+    {
+        Join::join(self, sep)
+    }
+
+    /// Flattens a slice of `T` into a single value `Self::Output`, placing a
+    /// given separator between each.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # #![allow(deprecated)]
+    /// assert_eq!(["hello", "world"].connect(" "), "hello world");
+    /// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
+    /// ```
+    #[stable(feature = "rust1", since = "1.0.0")]
+    #[rustc_deprecated(since = "1.3.0", reason = "renamed to join")]
+    pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
+        where Self: Join<Separator>
+    {
+        Join::join(self, sep)
+    }
+
 }
 
 #[lang = "slice_u8_alloc"]
@@ -528,75 +578,84 @@ impl [u8] {
 ////////////////////////////////////////////////////////////////////////////////
 // Extension traits for slices over specific kinds of data
 ////////////////////////////////////////////////////////////////////////////////
-#[unstable(feature = "slice_concat_ext",
-           reason = "trait should not have to exist",
-           issue = "27747")]
-/// An extension trait for concatenating slices
+
+/// Helper trait for [`[T]::concat`](../../std/primitive.slice.html#method.concat).
+///
+/// Note: the `Item` type parameter is not used in this trait,
+/// but it allows impls to be more generic.
+/// Without it, we get this error:
+///
+/// ```error
+/// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
+///    --> src/liballoc/slice.rs:608:6
+///     |
+/// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
+///     |      ^ unconstrained type parameter
+/// ```
+///
+/// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
+/// such that multiple `T` types would apply:
 ///
-/// While this trait is unstable, the methods are stable. `SliceConcatExt` is
-/// included in the [standard library prelude], so you can use [`join()`] and
-/// [`concat()`] as if they existed on `[T]` itself.
+/// ```
+/// # #[allow(dead_code)]
+/// pub struct Foo(Vec<u32>, Vec<String>);
 ///
-/// [standard library prelude]: ../../std/prelude/index.html
-/// [`join()`]: #tymethod.join
-/// [`concat()`]: #tymethod.concat
-pub trait SliceConcatExt<T: ?Sized> {
-    #[unstable(feature = "slice_concat_ext",
-               reason = "trait should not have to exist",
-               issue = "27747")]
+/// impl std::borrow::Borrow<[u32]> for Foo {
+///     fn borrow(&self) -> &[u32] { &self.0 }
+/// }
+///
+/// impl std::borrow::Borrow<[String]> for Foo {
+///     fn borrow(&self) -> &[String] { &self.1 }
+/// }
+/// ```
+#[unstable(feature = "slice_concat_trait", issue = "27747")]
+pub trait Concat<Item: ?Sized> {
+    #[unstable(feature = "slice_concat_trait", issue = "27747")]
     /// The resulting type after concatenation
     type Output;
 
-    /// Flattens a slice of `T` into a single value `Self::Output`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// assert_eq!(["hello", "world"].concat(), "helloworld");
-    /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
-    /// ```
-    #[stable(feature = "rust1", since = "1.0.0")]
-    fn concat(&self) -> Self::Output;
+    /// Implementation of [`[T]::concat`](../../std/primitive.slice.html#method.concat)
+    #[unstable(feature = "slice_concat_trait", issue = "27747")]
+    fn concat(slice: &Self) -> Self::Output;
+}
 
-    /// Flattens a slice of `T` into a single value `Self::Output`, placing a
-    /// given separator between each.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// assert_eq!(["hello", "world"].join(" "), "hello world");
-    /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
-    /// ```
-    #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
-    fn join(&self, sep: &T) -> Self::Output;
+/// Helper trait for [`[T]::join`](../../std/primitive.slice.html#method.join)
+#[unstable(feature = "slice_concat_trait", issue = "27747")]
+pub trait Join<Separator> {
+    #[unstable(feature = "slice_concat_trait", issue = "27747")]
+    /// The resulting type after concatenation
+    type Output;
 
-    #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_deprecated(since = "1.3.0", reason = "renamed to join")]
-    fn connect(&self, sep: &T) -> Self::Output;
+    /// Implementation of [`[T]::join`](../../std/primitive.slice.html#method.join)
+    #[unstable(feature = "slice_concat_trait", issue = "27747")]
+    fn join(slice: &Self, sep: Separator) -> Self::Output;
 }
 
-#[unstable(feature = "slice_concat_ext",
-           reason = "trait should not have to exist",
-           issue = "27747")]
-impl<T: Clone, V: Borrow<[T]>> SliceConcatExt<T> for [V] {
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<T: Clone, V: Borrow<[T]>> Concat<T> for [V] {
     type Output = Vec<T>;
 
-    fn concat(&self) -> Vec<T> {
-        let size = self.iter().map(|slice| slice.borrow().len()).sum();
+    fn concat(slice: &Self) -> Vec<T> {
+        let size = slice.iter().map(|slice| slice.borrow().len()).sum();
         let mut result = Vec::with_capacity(size);
-        for v in self {
+        for v in slice {
             result.extend_from_slice(v.borrow())
         }
         result
     }
+}
 
-    fn join(&self, sep: &T) -> Vec<T> {
-        let mut iter = self.iter();
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<T: Clone, V: Borrow<[T]>> Join<&T> for [V] {
+    type Output = Vec<T>;
+
+    fn join(slice: &Self, sep: &T) -> Vec<T> {
+        let mut iter = slice.iter();
         let first = match iter.next() {
             Some(first) => first,
             None => return vec![],
         };
-        let size = self.iter().map(|slice| slice.borrow().len()).sum::<usize>() + self.len() - 1;
+        let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() + slice.len() - 1;
         let mut result = Vec::with_capacity(size);
         result.extend_from_slice(first.borrow());
 
@@ -606,9 +665,28 @@ impl<T: Clone, V: Borrow<[T]>> SliceConcatExt<T> for [V] {
         }
         result
     }
+}
 
-    fn connect(&self, sep: &T) -> Vec<T> {
-        self.join(sep)
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
+    type Output = Vec<T>;
+
+    fn join(slice: &Self, sep: &[T]) -> Vec<T> {
+        let mut iter = slice.iter();
+        let first = match iter.next() {
+            Some(first) => first,
+            None => return vec![],
+        };
+        let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() +
+            sep.len() * (slice.len() - 1);
+        let mut result = Vec::with_capacity(size);
+        result.extend_from_slice(first.borrow());
+
+        for v in iter {
+            result.extend_from_slice(sep);
+            result.extend_from_slice(v.borrow())
+        }
+        result
     }
 }
 
diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs
index a36804bddff..9a1342c30d5 100644
--- a/src/liballoc/str.rs
+++ b/src/liballoc/str.rs
@@ -28,7 +28,7 @@
 // It's cleaner to just turn off the unused_imports warning than to fix them.
 #![allow(unused_imports)]
 
-use core::borrow::Borrow;
+use core::borrow::{Borrow, BorrowMut};
 use core::str::pattern::{Pattern, Searcher, ReverseSearcher, DoubleEndedSearcher};
 use core::mem;
 use core::ptr;
@@ -37,7 +37,7 @@ use core::unicode::conversions;
 
 use crate::borrow::ToOwned;
 use crate::boxed::Box;
-use crate::slice::{SliceConcatExt, SliceIndex};
+use crate::slice::{Concat, Join, SliceIndex};
 use crate::string::String;
 use crate::vec::Vec;
 
@@ -68,26 +68,29 @@ pub use core::str::pattern;
 pub use core::str::EncodeUtf16;
 #[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
 pub use core::str::SplitAsciiWhitespace;
+#[stable(feature = "str_escape", since = "1.34.0")]
+pub use core::str::{EscapeDebug, EscapeDefault, EscapeUnicode};
 
-#[unstable(feature = "slice_concat_ext",
-           reason = "trait should not have to exist",
-           issue = "27747")]
-impl<S: Borrow<str>> SliceConcatExt<str> for [S] {
+/// Note: `str` in `Concat<str>` is not meaningful here.
+/// This type parameter of the trait only exists to enable another impl.
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<S: Borrow<str>> Concat<str> for [S] {
     type Output = String;
 
-    fn concat(&self) -> String {
-        self.join("")
+    fn concat(slice: &Self) -> String {
+        Join::join(slice, "")
     }
+}
+
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<S: Borrow<str>> Join<&str> for [S] {
+    type Output = String;
 
-    fn join(&self, sep: &str) -> String {
+    fn join(slice: &Self, sep: &str) -> String {
         unsafe {
-            String::from_utf8_unchecked( join_generic_copy(self, sep.as_bytes()) )
+            String::from_utf8_unchecked( join_generic_copy(slice, sep.as_bytes()) )
         }
     }
-
-    fn connect(&self, sep: &str) -> String {
-        self.join(sep)
-    }
 }
 
 macro_rules! spezialize_for_lengths {
@@ -128,7 +131,7 @@ macro_rules! copy_slice_and_advance {
 
 // Optimized join implementation that works for both Vec<T> (T: Copy) and String's inner vec
 // Currently (2018-05-13) there is a bug with type inference and specialization (see issue #36262)
-// For this reason SliceConcatExt<T> is not specialized for T: Copy and SliceConcatExt<str> is the
+// For this reason SliceConcat<T> is not specialized for T: Copy and SliceConcat<str> is the
 // only user of this function. It is left in place for the time when that is fixed.
 //
 // the bounds for String-join are S: Borrow<str> and for Vec-join Borrow<[T]>
@@ -188,6 +191,14 @@ impl Borrow<str> for String {
     }
 }
 
+#[stable(feature = "string_borrow_mut", since = "1.36.0")]
+impl BorrowMut<str> for String {
+    #[inline]
+    fn borrow_mut(&mut self) -> &mut str {
+        &mut self[..]
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl ToOwned for str {
     type Owned = String;
@@ -197,7 +208,7 @@ impl ToOwned for str {
     }
 
     fn clone_into(&self, target: &mut String) {
-        let mut b = mem::replace(target, String::new()).into_bytes();
+        let mut b = mem::take(target).into_bytes();
         self.as_bytes().clone_into(&mut b);
         *target = unsafe { String::from_utf8_unchecked(b) }
     }
@@ -421,6 +432,13 @@ impl str {
     ///
     /// assert_eq!(new_year, new_year.to_uppercase());
     /// ```
+    ///
+    /// One character can become multiple:
+    /// ```
+    /// let s = "tschüß";
+    ///
+    /// assert_eq!("TSCHÜSS", s.to_uppercase());
+    /// ```
     #[stable(feature = "unicode_case_mapping", since = "1.2.0")]
     pub fn to_uppercase(&self) -> String {
         let mut s = String::with_capacity(self.len());
@@ -571,4 +589,3 @@ impl str {
 pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box<str> {
     Box::from_raw(Box::into_raw(v) as *mut str)
 }
-
diff --git a/src/liballoc/string.rs b/src/liballoc/string.rs
index a3e2098695f..eca726cd410 100644
--- a/src/liballoc/string.rs
+++ b/src/liballoc/string.rs
@@ -552,7 +552,7 @@ impl String {
     /// assert_eq!("Hello �World", output);
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
-    pub fn from_utf8_lossy<'a>(v: &'a [u8]) -> Cow<'a, str> {
+    pub fn from_utf8_lossy(v: &[u8]) -> Cow<'_, str> {
         let mut iter = lossy::Utf8Lossy::from_bytes(v).chunks();
 
         let (first_valid, first_broken) = if let Some(chunk) = iter.next() {
@@ -1200,8 +1200,8 @@ impl String {
     /// Retains only the characters specified by the predicate.
     ///
     /// In other words, remove all characters `c` such that `f(c)` returns `false`.
-    /// This method operates in place and preserves the order of the retained
-    /// characters.
+    /// This method operates in place, visiting each character exactly once in the
+    /// original order, and preserves the order of the retained characters.
     ///
     /// # Examples
     ///
@@ -1212,6 +1212,16 @@ impl String {
     ///
     /// assert_eq!(s, "foobar");
     /// ```
+    ///
+    /// The exact order may be useful for tracking external state, like an index.
+    ///
+    /// ```
+    /// let mut s = String::from("abcde");
+    /// let keep = [false, true, true, false, true];
+    /// let mut i = 0;
+    /// s.retain(|_| (keep[i], i += 1).0);
+    /// assert_eq!(s, "bce");
+    /// ```
     #[inline]
     #[stable(feature = "string_retain", since = "1.26.0")]
     pub fn retain<F>(&mut self, mut f: F)
@@ -1828,6 +1838,7 @@ impl PartialEq for String {
 macro_rules! impl_eq {
     ($lhs:ty, $rhs: ty) => {
         #[stable(feature = "rust1", since = "1.0.0")]
+        #[allow(unused_lifetimes)]
         impl<'a, 'b> PartialEq<$rhs> for $lhs {
             #[inline]
             fn eq(&self, other: &$rhs) -> bool { PartialEq::eq(&self[..], &other[..]) }
@@ -1836,6 +1847,7 @@ macro_rules! impl_eq {
         }
 
         #[stable(feature = "rust1", since = "1.0.0")]
+        #[allow(unused_lifetimes)]
         impl<'a, 'b> PartialEq<$lhs> for $rhs {
             #[inline]
             fn eq(&self, other: &$lhs) -> bool { PartialEq::eq(&self[..], &other[..]) }
@@ -2179,6 +2191,14 @@ impl From<&str> for String {
     }
 }
 
+#[stable(feature = "from_ref_string", since = "1.35.0")]
+impl From<&String> for String {
+    #[inline]
+    fn from(s: &String) -> String {
+        s.clone()
+    }
+}
+
 // note: test pulls in libstd, which causes errors here
 #[cfg(not(test))]
 #[stable(feature = "string_from_box", since = "1.18.0")]
@@ -2367,6 +2387,11 @@ impl Iterator for Drain<'_> {
     fn size_hint(&self) -> (usize, Option<usize>) {
         self.iter.size_hint()
     }
+
+    #[inline]
+    fn last(mut self) -> Option<char> {
+        self.next_back()
+    }
 }
 
 #[stable(feature = "drain", since = "1.6.0")]
diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs
index b7d7995b540..7d3b2656a7b 100644
--- a/src/liballoc/sync.rs
+++ b/src/liballoc/sync.rs
@@ -7,21 +7,23 @@
 //! [arc]: struct.Arc.html
 
 use core::any::Any;
+use core::array::LengthAtMost32;
 use core::sync::atomic;
 use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
 use core::borrow;
 use core::fmt;
 use core::cmp::{self, Ordering};
+use core::iter;
 use core::intrinsics::abort;
-use core::mem::{self, align_of_val, size_of_val};
+use core::mem::{self, align_of, align_of_val, size_of_val};
 use core::ops::{Deref, Receiver, CoerceUnsized, DispatchFromDyn};
 use core::pin::Pin;
 use core::ptr::{self, NonNull};
 use core::marker::{Unpin, Unsize, PhantomData};
 use core::hash::{Hash, Hasher};
 use core::{isize, usize};
-use core::convert::From;
-use core::slice::from_raw_parts_mut;
+use core::convert::{From, TryFrom};
+use core::slice::{self, from_raw_parts_mut};
 
 use crate::alloc::{Global, Alloc, Layout, box_free, handle_alloc_error};
 use crate::boxed::Box;
@@ -29,6 +31,9 @@ use crate::rc::is_dangling;
 use crate::string::String;
 use crate::vec::Vec;
 
+#[cfg(test)]
+mod tests;
+
 /// A soft limit on the amount of references that may be made to an `Arc`.
 ///
 /// Going above this limit will abort your program (although not
@@ -206,6 +211,19 @@ impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
 #[unstable(feature = "dispatch_from_dyn", issue = "0")]
 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
 
+impl<T: ?Sized> Arc<T> {
+    fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
+        Self {
+            ptr,
+            phantom: PhantomData,
+        }
+    }
+
+    unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
+        Self::from_inner(NonNull::new_unchecked(ptr))
+    }
+}
+
 /// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
 /// managed value. The value is accessed by calling [`upgrade`] on the `Weak`
 /// pointer, which returns an [`Option`]`<`[`Arc`]`<T>>`.
@@ -290,7 +308,7 @@ impl<T> Arc<T> {
             weak: atomic::AtomicUsize::new(1),
             data,
         };
-        Arc { ptr: Box::into_raw_non_null(x), phantom: PhantomData }
+        Self::from_inner(Box::into_raw_non_null(x))
     }
 
     /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
@@ -356,9 +374,9 @@ impl<T: ?Sized> Arc<T> {
     /// ```
     /// use std::sync::Arc;
     ///
-    /// let x = Arc::new(10);
+    /// let x = Arc::new("hello".to_owned());
     /// let x_ptr = Arc::into_raw(x);
-    /// assert_eq!(unsafe { *x_ptr }, 10);
+    /// assert_eq!(unsafe { &*x_ptr }, "hello");
     /// ```
     #[stable(feature = "rc_raw", since = "1.17.0")]
     pub fn into_raw(this: Self) -> *const T {
@@ -382,13 +400,13 @@ impl<T: ?Sized> Arc<T> {
     /// ```
     /// use std::sync::Arc;
     ///
-    /// let x = Arc::new(10);
+    /// let x = Arc::new("hello".to_owned());
     /// let x_ptr = Arc::into_raw(x);
     ///
     /// unsafe {
     ///     // Convert back to an `Arc` to prevent leak.
     ///     let x = Arc::from_raw(x_ptr);
-    ///     assert_eq!(*x, 10);
+    ///     assert_eq!(&*x, "hello");
     ///
     ///     // Further calls to `Arc::from_raw(x_ptr)` would be memory unsafe.
     /// }
@@ -397,20 +415,13 @@ impl<T: ?Sized> Arc<T> {
     /// ```
     #[stable(feature = "rc_raw", since = "1.17.0")]
     pub unsafe fn from_raw(ptr: *const T) -> Self {
-        // Align the unsized value to the end of the ArcInner.
-        // Because it is ?Sized, it will always be the last field in memory.
-        let align = align_of_val(&*ptr);
-        let layout = Layout::new::<ArcInner<()>>();
-        let offset = (layout.size() + layout.padding_needed_for(align)) as isize;
+        let offset = data_offset(ptr);
 
         // Reverse the offset to find the original ArcInner.
         let fake_ptr = ptr as *mut ArcInner<T>;
         let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
 
-        Arc {
-            ptr: NonNull::new_unchecked(arc_ptr),
-            phantom: PhantomData,
-        }
+        Self::from_ptr(arc_ptr)
     }
 
     /// Consumes the `Arc`, returning the wrapped pointer as `NonNull<T>`.
@@ -422,10 +433,10 @@ impl<T: ?Sized> Arc<T> {
     ///
     /// use std::sync::Arc;
     ///
-    /// let x = Arc::new(10);
+    /// let x = Arc::new("hello".to_owned());
     /// let ptr = Arc::into_raw_non_null(x);
-    /// let deref = unsafe { *ptr.as_ref() };
-    /// assert_eq!(deref, 10);
+    /// let deref = unsafe { ptr.as_ref() };
+    /// assert_eq!(deref, "hello");
     /// ```
     #[unstable(feature = "rc_into_raw_non_null", issue = "47336")]
     #[inline]
@@ -581,21 +592,28 @@ impl<T: ?Sized> Arc<T> {
 }
 
 impl<T: ?Sized> Arc<T> {
-    // Allocates an `ArcInner<T>` with sufficient space for an unsized value
-    unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
-        // Calculate layout using the given value.
+    /// Allocates an `ArcInner<T>` with sufficient space for
+    /// an unsized value where the value has the layout provided.
+    ///
+    /// The function `mem_to_arcinner` is called with the data pointer
+    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
+    unsafe fn allocate_for_unsized(
+        value_layout: Layout,
+        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>
+    ) -> *mut ArcInner<T> {
+        // Calculate layout using the given value layout.
         // Previously, layout was calculated on the expression
         // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
         // reference (see #54908).
         let layout = Layout::new::<ArcInner<()>>()
-            .extend(Layout::for_value(&*ptr)).unwrap().0
+            .extend(value_layout).unwrap().0
             .pad_to_align().unwrap();
 
         let mem = Global.alloc(layout)
             .unwrap_or_else(|_| handle_alloc_error(layout));
 
         // Initialize the ArcInner
-        let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut ArcInner<T>;
+        let inner = mem_to_arcinner(mem.as_ptr());
         debug_assert_eq!(Layout::for_value(&*inner), layout);
 
         ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
@@ -604,6 +622,15 @@ impl<T: ?Sized> Arc<T> {
         inner
     }
 
+    /// Allocates an `ArcInner<T>` with sufficient space for an unsized value.
+    unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
+        // Allocate for the `ArcInner<T>` using the given value.
+        Self::allocate_for_unsized(
+            Layout::for_value(&*ptr),
+            |mem| set_data_ptr(ptr as *mut T, mem) as *mut ArcInner<T>,
+        )
+    }
+
     fn from_box(v: Box<T>) -> Arc<T> {
         unsafe {
             let box_unique = Box::into_unique(v);
@@ -621,45 +648,49 @@ impl<T: ?Sized> Arc<T> {
             // Free the allocation without dropping its contents
             box_free(box_unique);
 
-            Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
+            Self::from_ptr(ptr)
         }
     }
 }
 
-// Sets the data pointer of a `?Sized` raw pointer.
-//
-// For a slice/trait object, this sets the `data` field and leaves the rest
-// unchanged. For a sized raw pointer, this simply sets the pointer.
+impl<T> Arc<[T]> {
+    /// Allocates an `ArcInner<[T]>` with the given length.
+    unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
+        Self::allocate_for_unsized(
+            Layout::array::<T>(len).unwrap(),
+            |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>,
+        )
+    }
+}
+
+/// Sets the data pointer of a `?Sized` raw pointer.
+///
+/// For a slice/trait object, this sets the `data` field and leaves the rest
+/// unchanged. For a sized raw pointer, this simply sets the pointer.
 unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T {
     ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
     ptr
 }
 
 impl<T> Arc<[T]> {
-    // Copy elements from slice into newly allocated Arc<[T]>
-    //
-    // Unsafe because the caller must either take ownership or bind `T: Copy`
+    /// Copy elements from slice into newly allocated Arc<[T]>
+    ///
+    /// Unsafe because the caller must either take ownership or bind `T: Copy`.
     unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
-        let v_ptr = v as *const [T];
-        let ptr = Self::allocate_for_ptr(v_ptr);
+        let ptr = Self::allocate_for_slice(v.len());
 
         ptr::copy_nonoverlapping(
             v.as_ptr(),
             &mut (*ptr).data as *mut [T] as *mut T,
             v.len());
 
-        Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
+        Self::from_ptr(ptr)
     }
-}
-
-// Specialization trait used for From<&[T]>
-trait ArcFromSlice<T> {
-    fn from_slice(slice: &[T]) -> Self;
-}
 
-impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
-    #[inline]
-    default fn from_slice(v: &[T]) -> Self {
+    /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
+    ///
+    /// Behavior is undefined should the size be wrong.
+    unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Arc<[T]> {
         // Panic guard while cloning T elements.
         // In the event of a panic, elements that have been written
         // into the new ArcInner will be dropped, then the memory freed.
@@ -676,37 +707,48 @@ impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
                     let slice = from_raw_parts_mut(self.elems, self.n_elems);
                     ptr::drop_in_place(slice);
 
-                    Global.dealloc(self.mem.cast(), self.layout.clone());
+                    Global.dealloc(self.mem.cast(), self.layout);
                 }
             }
         }
 
-        unsafe {
-            let v_ptr = v as *const [T];
-            let ptr = Self::allocate_for_ptr(v_ptr);
+        let ptr = Self::allocate_for_slice(len);
 
-            let mem = ptr as *mut _ as *mut u8;
-            let layout = Layout::for_value(&*ptr);
+        let mem = ptr as *mut _ as *mut u8;
+        let layout = Layout::for_value(&*ptr);
 
-            // Pointer to first element
-            let elems = &mut (*ptr).data as *mut [T] as *mut T;
+        // Pointer to first element
+        let elems = &mut (*ptr).data as *mut [T] as *mut T;
 
-            let mut guard = Guard{
-                mem: NonNull::new_unchecked(mem),
-                elems: elems,
-                layout: layout,
-                n_elems: 0,
-            };
+        let mut guard = Guard {
+            mem: NonNull::new_unchecked(mem),
+            elems,
+            layout,
+            n_elems: 0,
+        };
 
-            for (i, item) in v.iter().enumerate() {
-                ptr::write(elems.add(i), item.clone());
-                guard.n_elems += 1;
-            }
+        for (i, item) in iter.enumerate() {
+            ptr::write(elems.add(i), item);
+            guard.n_elems += 1;
+        }
+
+        // All clear. Forget the guard so it doesn't free the new ArcInner.
+        mem::forget(guard);
 
-            // All clear. Forget the guard so it doesn't free the new ArcInner.
-            mem::forget(guard);
+        Self::from_ptr(ptr)
+    }
+}
 
-            Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
+/// Specialization trait used for `From<&[T]>`.
+trait ArcFromSlice<T> {
+    fn from_slice(slice: &[T]) -> Self;
+}
+
+impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
+    #[inline]
+    default fn from_slice(v: &[T]) -> Self {
+        unsafe {
+            Self::from_iter_exact(v.iter().cloned(), v.len())
         }
     }
 }
@@ -764,7 +806,7 @@ impl<T: ?Sized> Clone for Arc<T> {
             }
         }
 
-        Arc { ptr: self.ptr, phantom: PhantomData }
+        Self::from_inner(self.ptr)
     }
 }
 
@@ -1043,7 +1085,7 @@ impl Arc<dyn Any + Send + Sync> {
         if (*self).is::<T>() {
             let ptr = self.ptr.cast::<ArcInner<T>>();
             mem::forget(self);
-            Ok(Arc { ptr, phantom: PhantomData })
+            Ok(Arc::from_inner(ptr))
         } else {
             Err(self)
         }
@@ -1071,6 +1113,144 @@ impl<T> Weak<T> {
             ptr: NonNull::new(usize::MAX as *mut ArcInner<T>).expect("MAX is not 0"),
         }
     }
+
+    /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
+    ///
+    /// It is up to the caller to ensure that the object is still alive when accessing it through
+    /// the pointer.
+    ///
+    /// The pointer may be [`null`] or be dangling in case the object has already been destroyed.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(weak_into_raw)]
+    ///
+    /// use std::sync::Arc;
+    /// use std::ptr;
+    ///
+    /// let strong = Arc::new("hello".to_owned());
+    /// let weak = Arc::downgrade(&strong);
+    /// // Both point to the same object
+    /// assert!(ptr::eq(&*strong, weak.as_raw()));
+    /// // The strong here keeps it alive, so we can still access the object.
+    /// assert_eq!("hello", unsafe { &*weak.as_raw() });
+    ///
+    /// drop(strong);
+    /// // But not any more. We can do weak.as_raw(), but accessing the pointer would lead to
+    /// // undefined behaviour.
+    /// // assert_eq!("hello", unsafe { &*weak.as_raw() });
+    /// ```
+    ///
+    /// [`null`]: ../../std/ptr/fn.null.html
+    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    pub fn as_raw(&self) -> *const T {
+        match self.inner() {
+            None => ptr::null(),
+            Some(inner) => {
+                let offset = data_offset_sized::<T>();
+                let ptr = inner as *const ArcInner<T>;
+                // Note: while the pointer we create may already point to dropped value, the
+                // allocation still lives (it must hold the weak point as long as we are alive).
+                // Therefore, the offset is OK to do, it won't get out of the allocation.
+                let ptr = unsafe { (ptr as *const u8).offset(offset) };
+                ptr as *const T
+            }
+        }
+    }
+
+    /// Consumes the `Weak<T>` and turns it into a raw pointer.
+    ///
+    /// This converts the weak pointer into a raw pointer, preserving the original weak count. It
+    /// can be turned back into the `Weak<T>` with [`from_raw`].
+    ///
+    /// The same restrictions of accessing the target of the pointer as with
+    /// [`as_raw`] apply.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(weak_into_raw)]
+    ///
+    /// use std::sync::{Arc, Weak};
+    ///
+    /// let strong = Arc::new("hello".to_owned());
+    /// let weak = Arc::downgrade(&strong);
+    /// let raw = weak.into_raw();
+    ///
+    /// assert_eq!(1, Arc::weak_count(&strong));
+    /// assert_eq!("hello", unsafe { &*raw });
+    ///
+    /// drop(unsafe { Weak::from_raw(raw) });
+    /// assert_eq!(0, Arc::weak_count(&strong));
+    /// ```
+    ///
+    /// [`from_raw`]: struct.Weak.html#method.from_raw
+    /// [`as_raw`]: struct.Weak.html#method.as_raw
+    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    pub fn into_raw(self) -> *const T {
+        let result = self.as_raw();
+        mem::forget(self);
+        result
+    }
+
+    /// Converts a raw pointer previously created by [`into_raw`] back into
+    /// `Weak<T>`.
+    ///
+    /// This can be used to safely get a strong reference (by calling [`upgrade`]
+    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
+    ///
+    /// It takes ownership of one weak count. In case a [`null`] is passed, a dangling [`Weak`] is
+    /// returned.
+    ///
+    /// # Safety
+    ///
+    /// The pointer must represent one valid weak count. In other words, it must point to `T` which
+    /// is or *was* managed by an [`Arc`] and the weak count of that [`Arc`] must not have reached
+    /// 0. It is allowed for the strong count to be 0.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(weak_into_raw)]
+    ///
+    /// use std::sync::{Arc, Weak};
+    ///
+    /// let strong = Arc::new("hello".to_owned());
+    ///
+    /// let raw_1 = Arc::downgrade(&strong).into_raw();
+    /// let raw_2 = Arc::downgrade(&strong).into_raw();
+    ///
+    /// assert_eq!(2, Arc::weak_count(&strong));
+    ///
+    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
+    /// assert_eq!(1, Arc::weak_count(&strong));
+    ///
+    /// drop(strong);
+    ///
+    /// // Decrement the last weak count.
+    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
+    /// ```
+    ///
+    /// [`null`]: ../../std/ptr/fn.null.html
+    /// [`into_raw`]: struct.Weak.html#method.into_raw
+    /// [`upgrade`]: struct.Weak.html#method.upgrade
+    /// [`Weak`]: struct.Weak.html
+    /// [`Arc`]: struct.Arc.html
+    #[unstable(feature = "weak_into_raw", issue = "60728")]
+    pub unsafe fn from_raw(ptr: *const T) -> Self {
+        if ptr.is_null() {
+            Self::new()
+        } else {
+            // See Arc::from_raw for details
+            let offset = data_offset(ptr);
+            let fake_ptr = ptr as *mut ArcInner<T>;
+            let ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
+            Weak {
+                ptr: NonNull::new(ptr).expect("Invalid pointer passed to from_raw"),
+            }
+        }
+    }
 }
 
 impl<T: ?Sized> Weak<T> {
@@ -1126,11 +1306,7 @@ impl<T: ?Sized> Weak<T> {
 
             // Relaxed is valid for the same reason it is on Arc's Clone impl
             match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) {
-                Ok(_) => return Some(Arc {
-                    // null checked above
-                    ptr: self.ptr,
-                    phantom: PhantomData,
-                }),
+                Ok(_) => return Some(Arc::from_inner(self.ptr)), // null checked above
                 Err(old) => n = old,
             }
         }
@@ -1215,18 +1391,18 @@ impl<T: ?Sized> Weak<T> {
     ///
     /// ```
     /// #![feature(weak_ptr_eq)]
-    /// use std::sync::{Arc, Weak};
+    /// use std::sync::Arc;
     ///
     /// let first_rc = Arc::new(5);
     /// let first = Arc::downgrade(&first_rc);
     /// let second = Arc::downgrade(&first_rc);
     ///
-    /// assert!(Weak::ptr_eq(&first, &second));
+    /// assert!(first.ptr_eq(&second));
     ///
     /// let third_rc = Arc::new(5);
     /// let third = Arc::downgrade(&third_rc);
     ///
-    /// assert!(!Weak::ptr_eq(&first, &third));
+    /// assert!(!first.ptr_eq(&third));
     /// ```
     ///
     /// Comparing `Weak::new`.
@@ -1237,16 +1413,16 @@ impl<T: ?Sized> Weak<T> {
     ///
     /// let first = Weak::new();
     /// let second = Weak::new();
-    /// assert!(Weak::ptr_eq(&first, &second));
+    /// assert!(first.ptr_eq(&second));
     ///
     /// let third_rc = Arc::new(());
     /// let third = Arc::downgrade(&third_rc);
-    /// assert!(!Weak::ptr_eq(&first, &third));
+    /// assert!(!first.ptr_eq(&third));
     /// ```
     #[inline]
     #[unstable(feature = "weak_ptr_eq", issue = "55981")]
-    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
-        this.ptr.as_ptr() == other.ptr.as_ptr()
+    pub fn ptr_eq(&self, other: &Self) -> bool {
+        self.ptr.as_ptr() == other.ptr.as_ptr()
     }
 }
 
@@ -1377,6 +1553,11 @@ impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
     }
 }
 
+/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
+/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
+/// store large values, that are slow to clone, but also heavy to check for equality, causing this
+/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
+/// the same value, than two `&T`s.
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized + Eq> ArcEqIdent<T> for Arc<T> {
     #[inline]
@@ -1646,484 +1827,111 @@ impl<T> From<Vec<T>> for Arc<[T]> {
     }
 }
 
-#[cfg(test)]
-mod tests {
-    use std::boxed::Box;
-    use std::clone::Clone;
-    use std::sync::mpsc::channel;
-    use std::mem::drop;
-    use std::ops::Drop;
-    use std::option::Option::{self, None, Some};
-    use std::sync::atomic::{self, Ordering::{Acquire, SeqCst}};
-    use std::thread;
-    use std::sync::Mutex;
-    use std::convert::From;
-
-    use super::{Arc, Weak};
-    use crate::vec::Vec;
-
-    struct Canary(*mut atomic::AtomicUsize);
-
-    impl Drop for Canary {
-        fn drop(&mut self) {
-            unsafe {
-                match *self {
-                    Canary(c) => {
-                        (*c).fetch_add(1, SeqCst);
-                    }
-                }
-            }
-        }
-    }
-
-    #[test]
-    #[cfg_attr(target_os = "emscripten", ignore)]
-    fn manually_share_arc() {
-        let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
-        let arc_v = Arc::new(v);
-
-        let (tx, rx) = channel();
-
-        let _t = thread::spawn(move || {
-            let arc_v: Arc<Vec<i32>> = rx.recv().unwrap();
-            assert_eq!((*arc_v)[3], 4);
-        });
-
-        tx.send(arc_v.clone()).unwrap();
-
-        assert_eq!((*arc_v)[2], 3);
-        assert_eq!((*arc_v)[4], 5);
-    }
-
-    #[test]
-    fn test_arc_get_mut() {
-        let mut x = Arc::new(3);
-        *Arc::get_mut(&mut x).unwrap() = 4;
-        assert_eq!(*x, 4);
-        let y = x.clone();
-        assert!(Arc::get_mut(&mut x).is_none());
-        drop(y);
-        assert!(Arc::get_mut(&mut x).is_some());
-        let _w = Arc::downgrade(&x);
-        assert!(Arc::get_mut(&mut x).is_none());
-    }
-
-    #[test]
-    fn weak_counts() {
-        assert_eq!(Weak::weak_count(&Weak::<u64>::new()), None);
-        assert_eq!(Weak::strong_count(&Weak::<u64>::new()), 0);
-
-        let a = Arc::new(0);
-        let w = Arc::downgrade(&a);
-        assert_eq!(Weak::strong_count(&w), 1);
-        assert_eq!(Weak::weak_count(&w), Some(1));
-        let w2 = w.clone();
-        assert_eq!(Weak::strong_count(&w), 1);
-        assert_eq!(Weak::weak_count(&w), Some(2));
-        assert_eq!(Weak::strong_count(&w2), 1);
-        assert_eq!(Weak::weak_count(&w2), Some(2));
-        drop(w);
-        assert_eq!(Weak::strong_count(&w2), 1);
-        assert_eq!(Weak::weak_count(&w2), Some(1));
-        let a2 = a.clone();
-        assert_eq!(Weak::strong_count(&w2), 2);
-        assert_eq!(Weak::weak_count(&w2), Some(1));
-        drop(a2);
-        drop(a);
-        assert_eq!(Weak::strong_count(&w2), 0);
-        assert_eq!(Weak::weak_count(&w2), Some(1));
-        drop(w2);
-    }
-
-    #[test]
-    fn try_unwrap() {
-        let x = Arc::new(3);
-        assert_eq!(Arc::try_unwrap(x), Ok(3));
-        let x = Arc::new(4);
-        let _y = x.clone();
-        assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
-        let x = Arc::new(5);
-        let _w = Arc::downgrade(&x);
-        assert_eq!(Arc::try_unwrap(x), Ok(5));
-    }
-
-    #[test]
-    fn into_from_raw() {
-        let x = Arc::new(box "hello");
-        let y = x.clone();
-
-        let x_ptr = Arc::into_raw(x);
-        drop(y);
-        unsafe {
-            assert_eq!(**x_ptr, "hello");
-
-            let x = Arc::from_raw(x_ptr);
-            assert_eq!(**x, "hello");
-
-            assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello"));
-        }
-    }
-
-    #[test]
-    fn test_into_from_raw_unsized() {
-        use std::fmt::Display;
-        use std::string::ToString;
-
-        let arc: Arc<str> = Arc::from("foo");
-
-        let ptr = Arc::into_raw(arc.clone());
-        let arc2 = unsafe { Arc::from_raw(ptr) };
-
-        assert_eq!(unsafe { &*ptr }, "foo");
-        assert_eq!(arc, arc2);
-
-        let arc: Arc<dyn Display> = Arc::new(123);
-
-        let ptr = Arc::into_raw(arc.clone());
-        let arc2 = unsafe { Arc::from_raw(ptr) };
-
-        assert_eq!(unsafe { &*ptr }.to_string(), "123");
-        assert_eq!(arc2.to_string(), "123");
-    }
-
-    #[test]
-    fn test_cowarc_clone_make_mut() {
-        let mut cow0 = Arc::new(75);
-        let mut cow1 = cow0.clone();
-        let mut cow2 = cow1.clone();
-
-        assert!(75 == *Arc::make_mut(&mut cow0));
-        assert!(75 == *Arc::make_mut(&mut cow1));
-        assert!(75 == *Arc::make_mut(&mut cow2));
-
-        *Arc::make_mut(&mut cow0) += 1;
-        *Arc::make_mut(&mut cow1) += 2;
-        *Arc::make_mut(&mut cow2) += 3;
-
-        assert!(76 == *cow0);
-        assert!(77 == *cow1);
-        assert!(78 == *cow2);
-
-        // none should point to the same backing memory
-        assert!(*cow0 != *cow1);
-        assert!(*cow0 != *cow2);
-        assert!(*cow1 != *cow2);
-    }
-
-    #[test]
-    fn test_cowarc_clone_unique2() {
-        let mut cow0 = Arc::new(75);
-        let cow1 = cow0.clone();
-        let cow2 = cow1.clone();
-
-        assert!(75 == *cow0);
-        assert!(75 == *cow1);
-        assert!(75 == *cow2);
-
-        *Arc::make_mut(&mut cow0) += 1;
-        assert!(76 == *cow0);
-        assert!(75 == *cow1);
-        assert!(75 == *cow2);
+#[unstable(feature = "boxed_slice_try_from", issue = "0")]
+impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]>
+where
+    [T; N]: LengthAtMost32,
+{
+    type Error = Arc<[T]>;
 
-        // cow1 and cow2 should share the same contents
-        // cow0 should have a unique reference
-        assert!(*cow0 != *cow1);
-        assert!(*cow0 != *cow2);
-        assert!(*cow1 == *cow2);
-    }
-
-    #[test]
-    fn test_cowarc_clone_weak() {
-        let mut cow0 = Arc::new(75);
-        let cow1_weak = Arc::downgrade(&cow0);
-
-        assert!(75 == *cow0);
-        assert!(75 == *cow1_weak.upgrade().unwrap());
-
-        *Arc::make_mut(&mut cow0) += 1;
-
-        assert!(76 == *cow0);
-        assert!(cow1_weak.upgrade().is_none());
-    }
-
-    #[test]
-    fn test_live() {
-        let x = Arc::new(5);
-        let y = Arc::downgrade(&x);
-        assert!(y.upgrade().is_some());
-    }
-
-    #[test]
-    fn test_dead() {
-        let x = Arc::new(5);
-        let y = Arc::downgrade(&x);
-        drop(x);
-        assert!(y.upgrade().is_none());
-    }
-
-    #[test]
-    fn weak_self_cyclic() {
-        struct Cycle {
-            x: Mutex<Option<Weak<Cycle>>>,
-        }
-
-        let a = Arc::new(Cycle { x: Mutex::new(None) });
-        let b = Arc::downgrade(&a.clone());
-        *a.x.lock().unwrap() = Some(b);
-
-        // hopefully we don't double-free (or leak)...
-    }
-
-    #[test]
-    fn drop_arc() {
-        let mut canary = atomic::AtomicUsize::new(0);
-        let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
-        drop(x);
-        assert!(canary.load(Acquire) == 1);
-    }
-
-    #[test]
-    fn drop_arc_weak() {
-        let mut canary = atomic::AtomicUsize::new(0);
-        let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
-        let arc_weak = Arc::downgrade(&arc);
-        assert!(canary.load(Acquire) == 0);
-        drop(arc);
-        assert!(canary.load(Acquire) == 1);
-        drop(arc_weak);
-    }
-
-    #[test]
-    fn test_strong_count() {
-        let a = Arc::new(0);
-        assert!(Arc::strong_count(&a) == 1);
-        let w = Arc::downgrade(&a);
-        assert!(Arc::strong_count(&a) == 1);
-        let b = w.upgrade().expect("");
-        assert!(Arc::strong_count(&b) == 2);
-        assert!(Arc::strong_count(&a) == 2);
-        drop(w);
-        drop(a);
-        assert!(Arc::strong_count(&b) == 1);
-        let c = b.clone();
-        assert!(Arc::strong_count(&b) == 2);
-        assert!(Arc::strong_count(&c) == 2);
-    }
-
-    #[test]
-    fn test_weak_count() {
-        let a = Arc::new(0);
-        assert!(Arc::strong_count(&a) == 1);
-        assert!(Arc::weak_count(&a) == 0);
-        let w = Arc::downgrade(&a);
-        assert!(Arc::strong_count(&a) == 1);
-        assert!(Arc::weak_count(&a) == 1);
-        let x = w.clone();
-        assert!(Arc::weak_count(&a) == 2);
-        drop(w);
-        drop(x);
-        assert!(Arc::strong_count(&a) == 1);
-        assert!(Arc::weak_count(&a) == 0);
-        let c = a.clone();
-        assert!(Arc::strong_count(&a) == 2);
-        assert!(Arc::weak_count(&a) == 0);
-        let d = Arc::downgrade(&c);
-        assert!(Arc::weak_count(&c) == 1);
-        assert!(Arc::strong_count(&c) == 2);
-
-        drop(a);
-        drop(c);
-        drop(d);
-    }
-
-    #[test]
-    fn show_arc() {
-        let a = Arc::new(5);
-        assert_eq!(format!("{:?}", a), "5");
-    }
-
-    // Make sure deriving works with Arc<T>
-    #[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)]
-    struct Foo {
-        inner: Arc<i32>,
-    }
-
-    #[test]
-    fn test_unsized() {
-        let x: Arc<[i32]> = Arc::new([1, 2, 3]);
-        assert_eq!(format!("{:?}", x), "[1, 2, 3]");
-        let y = Arc::downgrade(&x.clone());
-        drop(x);
-        assert!(y.upgrade().is_none());
-    }
-
-    #[test]
-    fn test_from_owned() {
-        let foo = 123;
-        let foo_arc = Arc::from(foo);
-        assert!(123 == *foo_arc);
-    }
-
-    #[test]
-    fn test_new_weak() {
-        let foo: Weak<usize> = Weak::new();
-        assert!(foo.upgrade().is_none());
-    }
-
-    #[test]
-    fn test_ptr_eq() {
-        let five = Arc::new(5);
-        let same_five = five.clone();
-        let other_five = Arc::new(5);
-
-        assert!(Arc::ptr_eq(&five, &same_five));
-        assert!(!Arc::ptr_eq(&five, &other_five));
-    }
-
-    #[test]
-    #[cfg_attr(target_os = "emscripten", ignore)]
-    fn test_weak_count_locked() {
-        let mut a = Arc::new(atomic::AtomicBool::new(false));
-        let a2 = a.clone();
-        let t = thread::spawn(move || {
-            for _i in 0..1000000 {
-                Arc::get_mut(&mut a);
-            }
-            a.store(true, SeqCst);
-        });
-
-        while !a2.load(SeqCst) {
-            let n = Arc::weak_count(&a2);
-            assert!(n < 2, "bad weak count: {}", n);
+    fn try_from(boxed_slice: Arc<[T]>) -> Result<Self, Self::Error> {
+        if boxed_slice.len() == N {
+            Ok(unsafe { Arc::from_raw(Arc::into_raw(boxed_slice) as *mut [T; N]) })
+        } else {
+            Err(boxed_slice)
         }
-        t.join().unwrap();
     }
+}
 
-    #[test]
-    fn test_from_str() {
-        let r: Arc<str> = Arc::from("foo");
-
-        assert_eq!(&r[..], "foo");
-    }
-
-    #[test]
-    fn test_copy_from_slice() {
-        let s: &[u32] = &[1, 2, 3];
-        let r: Arc<[u32]> = Arc::from(s);
-
-        assert_eq!(&r[..], [1, 2, 3]);
+#[stable(feature = "shared_from_iter", since = "1.37.0")]
+impl<T> iter::FromIterator<T> for Arc<[T]> {
+    /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
+    ///
+    /// # Performance characteristics
+    ///
+    /// ## The general case
+    ///
+    /// In the general case, collecting into `Arc<[T]>` is done by first
+    /// collecting into a `Vec<T>`. That is, when writing the following:
+    ///
+    /// ```rust
+    /// # use std::sync::Arc;
+    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
+    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+    /// ```
+    ///
+    /// this behaves as if we wrote:
+    ///
+    /// ```rust
+    /// # use std::sync::Arc;
+    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
+    ///     .collect::<Vec<_>>() // The first set of allocations happens here.
+    ///     .into(); // A second allocation for `Arc<[T]>` happens here.
+    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+    /// ```
+    ///
+    /// This will allocate as many times as needed for constructing the `Vec<T>`
+    /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
+    ///
+    /// ## Iterators of known length
+    ///
+    /// When your `Iterator` implements `TrustedLen` and is of an exact size,
+    /// a single allocation will be made for the `Arc<[T]>`. For example:
+    ///
+    /// ```rust
+    /// # use std::sync::Arc;
+    /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
+    /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
+    /// ```
+    fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
+        ArcFromIter::from_iter(iter.into_iter())
     }
+}
 
-    #[test]
-    fn test_clone_from_slice() {
-        #[derive(Clone, Debug, Eq, PartialEq)]
-        struct X(u32);
-
-        let s: &[X] = &[X(1), X(2), X(3)];
-        let r: Arc<[X]> = Arc::from(s);
+/// Specialization trait used for collecting into `Arc<[T]>`.
+trait ArcFromIter<T, I> {
+    fn from_iter(iter: I) -> Self;
+}
 
-        assert_eq!(&r[..], s);
+impl<T, I: Iterator<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
+    default fn from_iter(iter: I) -> Self {
+        iter.collect::<Vec<T>>().into()
     }
+}
 
-    #[test]
-    #[should_panic]
-    fn test_clone_from_slice_panic() {
-        use std::string::{String, ToString};
-
-        struct Fail(u32, String);
+impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
+    default fn from_iter(iter: I) -> Self {
+        // This is the case for a `TrustedLen` iterator.
+        let (low, high) = iter.size_hint();
+        if let Some(high) = high {
+            debug_assert_eq!(
+                low, high,
+                "TrustedLen iterator's size hint is not exact: {:?}",
+                (low, high)
+            );
 
-        impl Clone for Fail {
-            fn clone(&self) -> Fail {
-                if self.0 == 2 {
-                    panic!();
-                }
-                Fail(self.0, self.1.clone())
+            unsafe {
+                // SAFETY: We need to ensure that the iterator has an exact length and we have.
+                Arc::from_iter_exact(iter, low)
             }
+        } else {
+            // Fall back to normal implementation.
+            iter.collect::<Vec<T>>().into()
         }
-
-        let s: &[Fail] = &[
-            Fail(0, "foo".to_string()),
-            Fail(1, "bar".to_string()),
-            Fail(2, "baz".to_string()),
-        ];
-
-        // Should panic, but not cause memory corruption
-        let _r: Arc<[Fail]> = Arc::from(s);
-    }
-
-    #[test]
-    fn test_from_box() {
-        let b: Box<u32> = box 123;
-        let r: Arc<u32> = Arc::from(b);
-
-        assert_eq!(*r, 123);
-    }
-
-    #[test]
-    fn test_from_box_str() {
-        use std::string::String;
-
-        let s = String::from("foo").into_boxed_str();
-        let r: Arc<str> = Arc::from(s);
-
-        assert_eq!(&r[..], "foo");
-    }
-
-    #[test]
-    fn test_from_box_slice() {
-        let s = vec![1, 2, 3].into_boxed_slice();
-        let r: Arc<[u32]> = Arc::from(s);
-
-        assert_eq!(&r[..], [1, 2, 3]);
     }
+}
 
-    #[test]
-    fn test_from_box_trait() {
-        use std::fmt::Display;
-        use std::string::ToString;
-
-        let b: Box<dyn Display> = box 123;
-        let r: Arc<dyn Display> = Arc::from(b);
-
-        assert_eq!(r.to_string(), "123");
-    }
-
-    #[test]
-    fn test_from_box_trait_zero_sized() {
-        use std::fmt::Debug;
-
-        let b: Box<dyn Debug> = box ();
-        let r: Arc<dyn Debug> = Arc::from(b);
-
-        assert_eq!(format!("{:?}", r), "()");
-    }
-
-    #[test]
-    fn test_from_vec() {
-        let v = vec![1, 2, 3];
-        let r: Arc<[u32]> = Arc::from(v);
-
-        assert_eq!(&r[..], [1, 2, 3]);
-    }
-
-    #[test]
-    fn test_downcast() {
-        use std::any::Any;
-
-        let r1: Arc<dyn Any + Send + Sync> = Arc::new(i32::max_value());
-        let r2: Arc<dyn Any + Send + Sync> = Arc::new("abc");
-
-        assert!(r1.clone().downcast::<u32>().is_err());
-
-        let r1i32 = r1.downcast::<i32>();
-        assert!(r1i32.is_ok());
-        assert_eq!(r1i32.unwrap(), Arc::new(i32::max_value()));
-
-        assert!(r2.clone().downcast::<i32>().is_err());
-
-        let r2str = r2.downcast::<&'static str>();
-        assert!(r2str.is_ok());
-        assert_eq!(r2str.unwrap(), Arc::new("abc"));
+impl<'a, T: 'a + Clone> ArcFromIter<&'a T, slice::Iter<'a, T>> for Arc<[T]> {
+    fn from_iter(iter: slice::Iter<'a, T>) -> Self {
+        // Delegate to `impl<T: Clone> From<&[T]> for Arc<[T]>`.
+        //
+        // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping`
+        // which is even more performant.
+        //
+        // In the fall-back case we have `T: Clone`. This is still better
+        // than the `TrustedLen` implementation as slices have a known length
+        // and so we get to avoid calling `size_hint` and avoid the branching.
+        iter.as_slice().into()
     }
 }
 
@@ -2143,3 +1951,23 @@ impl<T: ?Sized> AsRef<T> for Arc<T> {
 
 #[stable(feature = "pin", since = "1.33.0")]
 impl<T: ?Sized> Unpin for Arc<T> { }
+
+/// Computes the offset of the data field within `ArcInner`.
+unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
+    // Align the unsized value to the end of the `ArcInner`.
+    // Because it is `?Sized`, it will always be the last field in memory.
+    data_offset_align(align_of_val(&*ptr))
+}
+
+/// Computes the offset of the data field within `ArcInner`.
+///
+/// Unlike [`data_offset`], this doesn't need the pointer, but it works only on `T: Sized`.
+fn data_offset_sized<T>() -> isize {
+    data_offset_align(align_of::<T>())
+}
+
+#[inline]
+fn data_offset_align(align: usize) -> isize {
+    let layout = Layout::new::<ArcInner<()>>();
+    (layout.size() + layout.padding_needed_for(align)) as isize
+}
diff --git a/src/liballoc/sync/tests.rs b/src/liballoc/sync/tests.rs
new file mode 100644
index 00000000000..9220f5e0333
--- /dev/null
+++ b/src/liballoc/sync/tests.rs
@@ -0,0 +1,492 @@
+use super::*;
+
+use std::boxed::Box;
+use std::clone::Clone;
+use std::sync::mpsc::channel;
+use std::mem::drop;
+use std::ops::Drop;
+use std::option::Option::{self, None, Some};
+use std::sync::atomic::{self, Ordering::{Acquire, SeqCst}};
+use std::thread;
+use std::sync::Mutex;
+use std::convert::{From, TryInto};
+
+use crate::vec::Vec;
+
+struct Canary(*mut atomic::AtomicUsize);
+
+impl Drop for Canary {
+    fn drop(&mut self) {
+        unsafe {
+            match *self {
+                Canary(c) => {
+                    (*c).fetch_add(1, SeqCst);
+                }
+            }
+        }
+    }
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+#[cfg(not(miri))] // Miri does not support threads
+fn manually_share_arc() {
+    let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+    let arc_v = Arc::new(v);
+
+    let (tx, rx) = channel();
+
+    let _t = thread::spawn(move || {
+        let arc_v: Arc<Vec<i32>> = rx.recv().unwrap();
+        assert_eq!((*arc_v)[3], 4);
+    });
+
+    tx.send(arc_v.clone()).unwrap();
+
+    assert_eq!((*arc_v)[2], 3);
+    assert_eq!((*arc_v)[4], 5);
+}
+
+#[test]
+fn test_arc_get_mut() {
+    let mut x = Arc::new(3);
+    *Arc::get_mut(&mut x).unwrap() = 4;
+    assert_eq!(*x, 4);
+    let y = x.clone();
+    assert!(Arc::get_mut(&mut x).is_none());
+    drop(y);
+    assert!(Arc::get_mut(&mut x).is_some());
+    let _w = Arc::downgrade(&x);
+    assert!(Arc::get_mut(&mut x).is_none());
+}
+
+#[test]
+fn weak_counts() {
+    assert_eq!(Weak::weak_count(&Weak::<u64>::new()), None);
+    assert_eq!(Weak::strong_count(&Weak::<u64>::new()), 0);
+
+    let a = Arc::new(0);
+    let w = Arc::downgrade(&a);
+    assert_eq!(Weak::strong_count(&w), 1);
+    assert_eq!(Weak::weak_count(&w), Some(1));
+    let w2 = w.clone();
+    assert_eq!(Weak::strong_count(&w), 1);
+    assert_eq!(Weak::weak_count(&w), Some(2));
+    assert_eq!(Weak::strong_count(&w2), 1);
+    assert_eq!(Weak::weak_count(&w2), Some(2));
+    drop(w);
+    assert_eq!(Weak::strong_count(&w2), 1);
+    assert_eq!(Weak::weak_count(&w2), Some(1));
+    let a2 = a.clone();
+    assert_eq!(Weak::strong_count(&w2), 2);
+    assert_eq!(Weak::weak_count(&w2), Some(1));
+    drop(a2);
+    drop(a);
+    assert_eq!(Weak::strong_count(&w2), 0);
+    assert_eq!(Weak::weak_count(&w2), Some(1));
+    drop(w2);
+}
+
+#[test]
+fn try_unwrap() {
+    let x = Arc::new(3);
+    assert_eq!(Arc::try_unwrap(x), Ok(3));
+    let x = Arc::new(4);
+    let _y = x.clone();
+    assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
+    let x = Arc::new(5);
+    let _w = Arc::downgrade(&x);
+    assert_eq!(Arc::try_unwrap(x), Ok(5));
+}
+
+#[test]
+fn into_from_raw() {
+    let x = Arc::new(box "hello");
+    let y = x.clone();
+
+    let x_ptr = Arc::into_raw(x);
+    drop(y);
+    unsafe {
+        assert_eq!(**x_ptr, "hello");
+
+        let x = Arc::from_raw(x_ptr);
+        assert_eq!(**x, "hello");
+
+        assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello"));
+    }
+}
+
+#[test]
+fn test_into_from_raw_unsized() {
+    use std::fmt::Display;
+    use std::string::ToString;
+
+    let arc: Arc<str> = Arc::from("foo");
+
+    let ptr = Arc::into_raw(arc.clone());
+    let arc2 = unsafe { Arc::from_raw(ptr) };
+
+    assert_eq!(unsafe { &*ptr }, "foo");
+    assert_eq!(arc, arc2);
+
+    let arc: Arc<dyn Display> = Arc::new(123);
+
+    let ptr = Arc::into_raw(arc.clone());
+    let arc2 = unsafe { Arc::from_raw(ptr) };
+
+    assert_eq!(unsafe { &*ptr }.to_string(), "123");
+    assert_eq!(arc2.to_string(), "123");
+}
+
+#[test]
+fn test_cowarc_clone_make_mut() {
+    let mut cow0 = Arc::new(75);
+    let mut cow1 = cow0.clone();
+    let mut cow2 = cow1.clone();
+
+    assert!(75 == *Arc::make_mut(&mut cow0));
+    assert!(75 == *Arc::make_mut(&mut cow1));
+    assert!(75 == *Arc::make_mut(&mut cow2));
+
+    *Arc::make_mut(&mut cow0) += 1;
+    *Arc::make_mut(&mut cow1) += 2;
+    *Arc::make_mut(&mut cow2) += 3;
+
+    assert!(76 == *cow0);
+    assert!(77 == *cow1);
+    assert!(78 == *cow2);
+
+    // none should point to the same backing memory
+    assert!(*cow0 != *cow1);
+    assert!(*cow0 != *cow2);
+    assert!(*cow1 != *cow2);
+}
+
+#[test]
+fn test_cowarc_clone_unique2() {
+    let mut cow0 = Arc::new(75);
+    let cow1 = cow0.clone();
+    let cow2 = cow1.clone();
+
+    assert!(75 == *cow0);
+    assert!(75 == *cow1);
+    assert!(75 == *cow2);
+
+    *Arc::make_mut(&mut cow0) += 1;
+    assert!(76 == *cow0);
+    assert!(75 == *cow1);
+    assert!(75 == *cow2);
+
+    // cow1 and cow2 should share the same contents
+    // cow0 should have a unique reference
+    assert!(*cow0 != *cow1);
+    assert!(*cow0 != *cow2);
+    assert!(*cow1 == *cow2);
+}
+
+#[test]
+fn test_cowarc_clone_weak() {
+    let mut cow0 = Arc::new(75);
+    let cow1_weak = Arc::downgrade(&cow0);
+
+    assert!(75 == *cow0);
+    assert!(75 == *cow1_weak.upgrade().unwrap());
+
+    *Arc::make_mut(&mut cow0) += 1;
+
+    assert!(76 == *cow0);
+    assert!(cow1_weak.upgrade().is_none());
+}
+
+#[test]
+fn test_live() {
+    let x = Arc::new(5);
+    let y = Arc::downgrade(&x);
+    assert!(y.upgrade().is_some());
+}
+
+#[test]
+fn test_dead() {
+    let x = Arc::new(5);
+    let y = Arc::downgrade(&x);
+    drop(x);
+    assert!(y.upgrade().is_none());
+}
+
+#[test]
+fn weak_self_cyclic() {
+    struct Cycle {
+        x: Mutex<Option<Weak<Cycle>>>,
+    }
+
+    let a = Arc::new(Cycle { x: Mutex::new(None) });
+    let b = Arc::downgrade(&a.clone());
+    *a.x.lock().unwrap() = Some(b);
+
+    // hopefully we don't double-free (or leak)...
+}
+
+#[test]
+fn drop_arc() {
+    let mut canary = atomic::AtomicUsize::new(0);
+    let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
+    drop(x);
+    assert!(canary.load(Acquire) == 1);
+}
+
+#[test]
+fn drop_arc_weak() {
+    let mut canary = atomic::AtomicUsize::new(0);
+    let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
+    let arc_weak = Arc::downgrade(&arc);
+    assert!(canary.load(Acquire) == 0);
+    drop(arc);
+    assert!(canary.load(Acquire) == 1);
+    drop(arc_weak);
+}
+
+#[test]
+fn test_strong_count() {
+    let a = Arc::new(0);
+    assert!(Arc::strong_count(&a) == 1);
+    let w = Arc::downgrade(&a);
+    assert!(Arc::strong_count(&a) == 1);
+    let b = w.upgrade().expect("");
+    assert!(Arc::strong_count(&b) == 2);
+    assert!(Arc::strong_count(&a) == 2);
+    drop(w);
+    drop(a);
+    assert!(Arc::strong_count(&b) == 1);
+    let c = b.clone();
+    assert!(Arc::strong_count(&b) == 2);
+    assert!(Arc::strong_count(&c) == 2);
+}
+
+#[test]
+fn test_weak_count() {
+    let a = Arc::new(0);
+    assert!(Arc::strong_count(&a) == 1);
+    assert!(Arc::weak_count(&a) == 0);
+    let w = Arc::downgrade(&a);
+    assert!(Arc::strong_count(&a) == 1);
+    assert!(Arc::weak_count(&a) == 1);
+    let x = w.clone();
+    assert!(Arc::weak_count(&a) == 2);
+    drop(w);
+    drop(x);
+    assert!(Arc::strong_count(&a) == 1);
+    assert!(Arc::weak_count(&a) == 0);
+    let c = a.clone();
+    assert!(Arc::strong_count(&a) == 2);
+    assert!(Arc::weak_count(&a) == 0);
+    let d = Arc::downgrade(&c);
+    assert!(Arc::weak_count(&c) == 1);
+    assert!(Arc::strong_count(&c) == 2);
+
+    drop(a);
+    drop(c);
+    drop(d);
+}
+
+#[test]
+fn show_arc() {
+    let a = Arc::new(5);
+    assert_eq!(format!("{:?}", a), "5");
+}
+
+// Make sure deriving works with Arc<T>
+#[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)]
+struct Foo {
+    inner: Arc<i32>,
+}
+
+#[test]
+fn test_unsized() {
+    let x: Arc<[i32]> = Arc::new([1, 2, 3]);
+    assert_eq!(format!("{:?}", x), "[1, 2, 3]");
+    let y = Arc::downgrade(&x.clone());
+    drop(x);
+    assert!(y.upgrade().is_none());
+}
+
+#[test]
+fn test_from_owned() {
+    let foo = 123;
+    let foo_arc = Arc::from(foo);
+    assert!(123 == *foo_arc);
+}
+
+#[test]
+fn test_new_weak() {
+    let foo: Weak<usize> = Weak::new();
+    assert!(foo.upgrade().is_none());
+}
+
+#[test]
+fn test_ptr_eq() {
+    let five = Arc::new(5);
+    let same_five = five.clone();
+    let other_five = Arc::new(5);
+
+    assert!(Arc::ptr_eq(&five, &same_five));
+    assert!(!Arc::ptr_eq(&five, &other_five));
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+#[cfg(not(miri))] // Miri does not support threads
+fn test_weak_count_locked() {
+    let mut a = Arc::new(atomic::AtomicBool::new(false));
+    let a2 = a.clone();
+    let t = thread::spawn(move || {
+        for _i in 0..1000000 {
+            Arc::get_mut(&mut a);
+        }
+        a.store(true, SeqCst);
+    });
+
+    while !a2.load(SeqCst) {
+        let n = Arc::weak_count(&a2);
+        assert!(n < 2, "bad weak count: {}", n);
+    }
+    t.join().unwrap();
+}
+
+#[test]
+fn test_from_str() {
+    let r: Arc<str> = Arc::from("foo");
+
+    assert_eq!(&r[..], "foo");
+}
+
+#[test]
+fn test_copy_from_slice() {
+    let s: &[u32] = &[1, 2, 3];
+    let r: Arc<[u32]> = Arc::from(s);
+
+    assert_eq!(&r[..], [1, 2, 3]);
+}
+
+#[test]
+fn test_clone_from_slice() {
+    #[derive(Clone, Debug, Eq, PartialEq)]
+    struct X(u32);
+
+    let s: &[X] = &[X(1), X(2), X(3)];
+    let r: Arc<[X]> = Arc::from(s);
+
+    assert_eq!(&r[..], s);
+}
+
+#[test]
+#[should_panic]
+fn test_clone_from_slice_panic() {
+    use std::string::{String, ToString};
+
+    struct Fail(u32, String);
+
+    impl Clone for Fail {
+        fn clone(&self) -> Fail {
+            if self.0 == 2 {
+                panic!();
+            }
+            Fail(self.0, self.1.clone())
+        }
+    }
+
+    let s: &[Fail] = &[
+        Fail(0, "foo".to_string()),
+        Fail(1, "bar".to_string()),
+        Fail(2, "baz".to_string()),
+    ];
+
+    // Should panic, but not cause memory corruption
+    let _r: Arc<[Fail]> = Arc::from(s);
+}
+
+#[test]
+fn test_from_box() {
+    let b: Box<u32> = box 123;
+    let r: Arc<u32> = Arc::from(b);
+
+    assert_eq!(*r, 123);
+}
+
+#[test]
+fn test_from_box_str() {
+    use std::string::String;
+
+    let s = String::from("foo").into_boxed_str();
+    let r: Arc<str> = Arc::from(s);
+
+    assert_eq!(&r[..], "foo");
+}
+
+#[test]
+fn test_from_box_slice() {
+    let s = vec![1, 2, 3].into_boxed_slice();
+    let r: Arc<[u32]> = Arc::from(s);
+
+    assert_eq!(&r[..], [1, 2, 3]);
+}
+
+#[test]
+fn test_from_box_trait() {
+    use std::fmt::Display;
+    use std::string::ToString;
+
+    let b: Box<dyn Display> = box 123;
+    let r: Arc<dyn Display> = Arc::from(b);
+
+    assert_eq!(r.to_string(), "123");
+}
+
+#[test]
+fn test_from_box_trait_zero_sized() {
+    use std::fmt::Debug;
+
+    let b: Box<dyn Debug> = box ();
+    let r: Arc<dyn Debug> = Arc::from(b);
+
+    assert_eq!(format!("{:?}", r), "()");
+}
+
+#[test]
+fn test_from_vec() {
+    let v = vec![1, 2, 3];
+    let r: Arc<[u32]> = Arc::from(v);
+
+    assert_eq!(&r[..], [1, 2, 3]);
+}
+
+#[test]
+fn test_downcast() {
+    use std::any::Any;
+
+    let r1: Arc<dyn Any + Send + Sync> = Arc::new(i32::max_value());
+    let r2: Arc<dyn Any + Send + Sync> = Arc::new("abc");
+
+    assert!(r1.clone().downcast::<u32>().is_err());
+
+    let r1i32 = r1.downcast::<i32>();
+    assert!(r1i32.is_ok());
+    assert_eq!(r1i32.unwrap(), Arc::new(i32::max_value()));
+
+    assert!(r2.clone().downcast::<i32>().is_err());
+
+    let r2str = r2.downcast::<&'static str>();
+    assert!(r2str.is_ok());
+    assert_eq!(r2str.unwrap(), Arc::new("abc"));
+}
+
+#[test]
+fn test_array_from_slice() {
+    let v = vec![1, 2, 3];
+    let r: Arc<[u32]> = Arc::from(v);
+
+    let a: Result<Arc<[u32; 3]>, _> = r.clone().try_into();
+    assert!(a.is_ok());
+
+    let a: Result<Arc<[u32; 2]>, _> = r.clone().try_into();
+    assert!(a.is_err());
+}
diff --git a/src/liballoc/boxed_test.rs b/src/liballoc/tests.rs
index 654eabd0703..ed46ba8a1b9 100644
--- a/src/liballoc/boxed_test.rs
+++ b/src/liballoc/tests.rs
@@ -1,6 +1,7 @@
 //! Test for `boxed` mod.
 
 use core::any::Any;
+use core::convert::TryInto;
 use core::ops::Deref;
 use core::result::Result::{Err, Ok};
 use core::clone::Clone;
@@ -138,3 +139,15 @@ fn boxed_slice_from_iter() {
     assert_eq!(boxed.len(), 100);
     assert_eq!(boxed[7], 7);
 }
+
+#[test]
+fn test_array_from_slice() {
+    let v = vec![1, 2, 3];
+    let r: Box<[u32]> = v.into_boxed_slice();
+
+    let a: Result<Box<[u32; 3]>, _> = r.clone().try_into();
+    assert!(a.is_ok());
+
+    let a: Result<Box<[u32; 2]>, _> = r.clone().try_into();
+    assert!(a.is_err());
+}
diff --git a/src/liballoc/tests/arc.rs b/src/liballoc/tests/arc.rs
index 2759b1b1cac..cf2ad2a8e60 100644
--- a/src/liballoc/tests/arc.rs
+++ b/src/liballoc/tests/arc.rs
@@ -2,6 +2,8 @@ use std::any::Any;
 use std::sync::{Arc, Weak};
 use std::cell::RefCell;
 use std::cmp::PartialEq;
+use std::iter::TrustedLen;
+use std::mem;
 
 #[test]
 fn uninhabited() {
@@ -85,3 +87,122 @@ fn eq() {
     assert!(!(x != x));
     assert_eq!(*x.0.borrow(), 0);
 }
+
+// The test code below is identical to that in `rc.rs`.
+// For better maintainability we therefore define this type alias.
+type Rc<T> = Arc<T>;
+
+const SHARED_ITER_MAX: u16 = 100;
+
+fn assert_trusted_len<I: TrustedLen>(_: &I) {}
+
+#[test]
+fn shared_from_iter_normal() {
+    // Exercise the base implementation for non-`TrustedLen` iterators.
+    {
+        // `Filter` is never `TrustedLen` since we don't
+        // know statically how many elements will be kept:
+        let iter = (0..SHARED_ITER_MAX).filter(|x| x % 2 == 0).map(Box::new);
+
+        // Collecting into a `Vec<T>` or `Rc<[T]>` should make no difference:
+        let vec = iter.clone().collect::<Vec<_>>();
+        let rc = iter.collect::<Rc<[_]>>();
+        assert_eq!(&*vec, &*rc);
+
+        // Clone a bit and let these get dropped.
+        {
+            let _rc_2 = rc.clone();
+            let _rc_3 = rc.clone();
+            let _rc_4 = Rc::downgrade(&_rc_3);
+        }
+    } // Drop what hasn't been here.
+}
+
+#[test]
+fn shared_from_iter_trustedlen_normal() {
+    // Exercise the `TrustedLen` implementation under normal circumstances
+    // where `size_hint()` matches `(_, Some(exact_len))`.
+    {
+        let iter = (0..SHARED_ITER_MAX).map(Box::new);
+        assert_trusted_len(&iter);
+
+        // Collecting into a `Vec<T>` or `Rc<[T]>` should make no difference:
+        let vec = iter.clone().collect::<Vec<_>>();
+        let rc = iter.collect::<Rc<[_]>>();
+        assert_eq!(&*vec, &*rc);
+        assert_eq!(mem::size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc));
+
+        // Clone a bit and let these get dropped.
+        {
+            let _rc_2 = rc.clone();
+            let _rc_3 = rc.clone();
+            let _rc_4 = Rc::downgrade(&_rc_3);
+        }
+    } // Drop what hasn't been here.
+
+    // Try a ZST to make sure it is handled well.
+    {
+        let iter = (0..SHARED_ITER_MAX).map(|_| ());
+        let vec = iter.clone().collect::<Vec<_>>();
+        let rc = iter.collect::<Rc<[_]>>();
+        assert_eq!(&*vec, &*rc);
+        assert_eq!(0, mem::size_of_val(&*rc));
+        {
+            let _rc_2 = rc.clone();
+            let _rc_3 = rc.clone();
+            let _rc_4 = Rc::downgrade(&_rc_3);
+        }
+    }
+}
+
+#[test]
+#[should_panic = "I've almost got 99 problems."]
+fn shared_from_iter_trustedlen_panic() {
+    // Exercise the `TrustedLen` implementation when `size_hint()` matches
+    // `(_, Some(exact_len))` but where `.next()` drops before the last iteration.
+    let iter = (0..SHARED_ITER_MAX)
+        .map(|val| {
+            match val {
+                98 => panic!("I've almost got 99 problems."),
+                _ => Box::new(val),
+            }
+        });
+    assert_trusted_len(&iter);
+    let _ = iter.collect::<Rc<[_]>>();
+
+    panic!("I am unreachable.");
+}
+
+#[test]
+fn shared_from_iter_trustedlen_no_fuse() {
+    // Exercise the `TrustedLen` implementation when `size_hint()` matches
+    // `(_, Some(exact_len))` but where the iterator does not behave in a fused manner.
+    struct Iter(std::vec::IntoIter<Option<Box<u8>>>);
+
+    unsafe impl TrustedLen for Iter {}
+
+    impl Iterator for Iter {
+        fn size_hint(&self) -> (usize, Option<usize>) {
+            (2, Some(2))
+        }
+
+        type Item = Box<u8>;
+
+        fn next(&mut self) -> Option<Self::Item> {
+            self.0.next().flatten()
+        }
+    }
+
+    let vec = vec![
+        Some(Box::new(42)),
+        Some(Box::new(24)),
+        None,
+        Some(Box::new(12)),
+    ];
+    let iter = Iter(vec.into_iter());
+    assert_trusted_len(&iter);
+    assert_eq!(
+        &[Box::new(42), Box::new(24)],
+        &*iter.collect::<Rc<[_]>>()
+    );
+}
diff --git a/src/liballoc/tests/binary_heap.rs b/src/liballoc/tests/binary_heap.rs
index 1d4a3edc1ac..0685fa943c0 100644
--- a/src/liballoc/tests/binary_heap.rs
+++ b/src/liballoc/tests/binary_heap.rs
@@ -282,7 +282,7 @@ fn assert_covariance() {
 //
 // Destructors must be called exactly once per element.
 #[test]
-#[cfg(not(miri))] // Miri does not support panics
+#[cfg(not(miri))] // Miri does not support catching panics
 fn panic_safe() {
     static DROP_COUNTER: AtomicUsize = AtomicUsize::new(0);
 
diff --git a/src/liballoc/tests/btree/map.rs b/src/liballoc/tests/btree/map.rs
index f14750089c9..844afe87076 100644
--- a/src/liballoc/tests/btree/map.rs
+++ b/src/liballoc/tests/btree/map.rs
@@ -226,7 +226,6 @@ fn test_range_equal_empty_cases() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_range_equal_excluded() {
     let map: BTreeMap<_, _> = (0..5).map(|i| (i, i)).collect();
     map.range((Excluded(2), Excluded(2)));
@@ -234,7 +233,6 @@ fn test_range_equal_excluded() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_range_backwards_1() {
     let map: BTreeMap<_, _> = (0..5).map(|i| (i, i)).collect();
     map.range((Included(3), Included(2)));
@@ -242,7 +240,6 @@ fn test_range_backwards_1() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_range_backwards_2() {
     let map: BTreeMap<_, _> = (0..5).map(|i| (i, i)).collect();
     map.range((Included(3), Excluded(2)));
@@ -250,7 +247,6 @@ fn test_range_backwards_2() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_range_backwards_3() {
     let map: BTreeMap<_, _> = (0..5).map(|i| (i, i)).collect();
     map.range((Excluded(3), Included(2)));
@@ -258,7 +254,6 @@ fn test_range_backwards_3() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_range_backwards_4() {
     let map: BTreeMap<_, _> = (0..5).map(|i| (i, i)).collect();
     map.range((Excluded(3), Excluded(2)));
diff --git a/src/liballoc/tests/btree/set.rs b/src/liballoc/tests/btree/set.rs
index 9e031375949..8c3dacd914f 100644
--- a/src/liballoc/tests/btree/set.rs
+++ b/src/liballoc/tests/btree/set.rs
@@ -69,6 +69,20 @@ fn test_intersection() {
     check_intersection(&[11, 1, 3, 77, 103, 5, -5],
                        &[2, 11, 77, -9, -42, 5, 3],
                        &[3, 5, 11, 77]);
+    let large = (0..1000).collect::<Vec<_>>();
+    check_intersection(&[], &large, &[]);
+    check_intersection(&large, &[], &[]);
+    check_intersection(&[-1], &large, &[]);
+    check_intersection(&large, &[-1], &[]);
+    check_intersection(&[0], &large, &[0]);
+    check_intersection(&large, &[0], &[0]);
+    check_intersection(&[999], &large, &[999]);
+    check_intersection(&large, &[999], &[999]);
+    check_intersection(&[1000], &large, &[]);
+    check_intersection(&large, &[1000], &[]);
+    check_intersection(&[11, 5000, 1, 3, 77, 8924, 103],
+                       &large,
+                       &[1, 3, 11, 77, 103]);
 }
 
 #[test]
@@ -84,6 +98,18 @@ fn test_difference() {
     check_difference(&[-5, 11, 22, 33, 40, 42],
                      &[-12, -5, 14, 23, 34, 38, 39, 50],
                      &[11, 22, 33, 40, 42]);
+    let large = (0..1000).collect::<Vec<_>>();
+    check_difference(&[], &large, &[]);
+    check_difference(&[-1], &large, &[-1]);
+    check_difference(&[0], &large, &[]);
+    check_difference(&[999], &large, &[]);
+    check_difference(&[1000], &large, &[1000]);
+    check_difference(&[11, 5000, 1, 3, 77, 8924, 103],
+                     &large,
+                     &[5000, 8924]);
+    check_difference(&large, &[], &large);
+    check_difference(&large, &[-1], &large);
+    check_difference(&large, &[1000], &large);
 }
 
 #[test]
@@ -115,6 +141,41 @@ fn test_union() {
 }
 
 #[test]
+// Only tests the simple function definition with respect to intersection
+fn test_is_disjoint() {
+    let one = [1].iter().collect::<BTreeSet<_>>();
+    let two = [2].iter().collect::<BTreeSet<_>>();
+    assert!(one.is_disjoint(&two));
+}
+
+#[test]
+// Also tests the trivial function definition of is_superset
+fn test_is_subset() {
+    fn is_subset(a: &[i32], b: &[i32]) -> bool {
+        let set_a = a.iter().collect::<BTreeSet<_>>();
+        let set_b = b.iter().collect::<BTreeSet<_>>();
+        set_a.is_subset(&set_b)
+    }
+
+    assert_eq!(is_subset(&[], &[]), true);
+    assert_eq!(is_subset(&[], &[1, 2]), true);
+    assert_eq!(is_subset(&[0], &[1, 2]), false);
+    assert_eq!(is_subset(&[1], &[1, 2]), true);
+    assert_eq!(is_subset(&[2], &[1, 2]), true);
+    assert_eq!(is_subset(&[3], &[1, 2]), false);
+    assert_eq!(is_subset(&[1, 2], &[1]), false);
+    assert_eq!(is_subset(&[1, 2], &[1, 2]), true);
+    assert_eq!(is_subset(&[1, 2], &[2, 3]), false);
+    let large = (0..1000).collect::<Vec<_>>();
+    assert_eq!(is_subset(&[], &large), true);
+    assert_eq!(is_subset(&large, &[]), false);
+    assert_eq!(is_subset(&[-1], &large), false);
+    assert_eq!(is_subset(&[0], &large), true);
+    assert_eq!(is_subset(&[1, 2], &large), true);
+    assert_eq!(is_subset(&[999, 1000], &large), false);
+}
+
+#[test]
 fn test_zip() {
     let mut x = BTreeSet::new();
     x.insert(5);
diff --git a/src/liballoc/tests/cow_str.rs b/src/liballoc/tests/cow_str.rs
index eb6adb159b0..6f357eda9b8 100644
--- a/src/liballoc/tests/cow_str.rs
+++ b/src/liballoc/tests/cow_str.rs
@@ -7,9 +7,9 @@ fn check_cow_add_cow() {
     let borrowed2 = Cow::Borrowed("World!");
     let borrow_empty = Cow::Borrowed("");
 
-    let owned1: Cow<str> = Cow::Owned(String::from("Hi, "));
-    let owned2: Cow<str> = Cow::Owned(String::from("Rustaceans!"));
-    let owned_empty: Cow<str> = Cow::Owned(String::new());
+    let owned1: Cow<'_, str> = Cow::Owned(String::from("Hi, "));
+    let owned2: Cow<'_, str> = Cow::Owned(String::from("Rustaceans!"));
+    let owned_empty: Cow<'_, str> = Cow::Owned(String::new());
 
     assert_eq!("Hello, World!", borrowed1.clone() + borrowed2.clone());
     assert_eq!("Hello, Rustaceans!", borrowed1.clone() + owned2.clone());
@@ -36,8 +36,8 @@ fn check_cow_add_str() {
     let borrowed = Cow::Borrowed("Hello, ");
     let borrow_empty = Cow::Borrowed("");
 
-    let owned: Cow<str> = Cow::Owned(String::from("Hi, "));
-    let owned_empty: Cow<str> = Cow::Owned(String::new());
+    let owned: Cow<'_, str> = Cow::Owned(String::from("Hi, "));
+    let owned_empty: Cow<'_, str> = Cow::Owned(String::new());
 
     assert_eq!("Hello, World!", borrowed.clone() + "World!");
 
@@ -60,9 +60,9 @@ fn check_cow_add_assign_cow() {
     let borrowed2 = Cow::Borrowed("World!");
     let borrow_empty = Cow::Borrowed("");
 
-    let mut owned1: Cow<str> = Cow::Owned(String::from("Hi, "));
-    let owned2: Cow<str> = Cow::Owned(String::from("Rustaceans!"));
-    let owned_empty: Cow<str> = Cow::Owned(String::new());
+    let mut owned1: Cow<'_, str> = Cow::Owned(String::from("Hi, "));
+    let owned2: Cow<'_, str> = Cow::Owned(String::from("Rustaceans!"));
+    let owned_empty: Cow<'_, str> = Cow::Owned(String::new());
 
     let mut s = borrowed1.clone();
     s += borrow_empty.clone();
@@ -101,8 +101,8 @@ fn check_cow_add_assign_str() {
     let mut borrowed = Cow::Borrowed("Hello, ");
     let borrow_empty = Cow::Borrowed("");
 
-    let mut owned: Cow<str> = Cow::Owned(String::from("Hi, "));
-    let owned_empty: Cow<str> = Cow::Owned(String::new());
+    let mut owned: Cow<'_, str> = Cow::Owned(String::from("Hi, "));
+    let owned_empty: Cow<'_, str> = Cow::Owned(String::new());
 
     let mut s = borrowed.clone();
     s += "";
@@ -132,10 +132,10 @@ fn check_cow_add_assign_str() {
 
 #[test]
 fn check_cow_clone_from() {
-    let mut c1: Cow<str> = Cow::Owned(String::with_capacity(25));
+    let mut c1: Cow<'_, str> = Cow::Owned(String::with_capacity(25));
     let s: String = "hi".to_string();
     assert!(s.capacity() < 25);
-    let c2: Cow<str> = Cow::Owned(s);
+    let c2: Cow<'_, str> = Cow::Owned(s);
     c1.clone_from(&c2);
     assert!(c1.into_owned().capacity() >= 25);
 }
diff --git a/src/liballoc/tests/heap.rs b/src/liballoc/tests/heap.rs
index c225ebfa96b..904b3e7e1b0 100644
--- a/src/liballoc/tests/heap.rs
+++ b/src/liballoc/tests/heap.rs
@@ -1,6 +1,6 @@
 use std::alloc::{Global, Alloc, Layout, System};
 
-/// Issue #45955.
+/// Issue #45955 and #62251.
 #[test]
 fn alloc_system_overaligned_request() {
     check_overalign_requests(System)
@@ -12,21 +12,23 @@ fn std_heap_overaligned_request() {
 }
 
 fn check_overalign_requests<T: Alloc>(mut allocator: T) {
-    let size = 8;
-    let align = 16; // greater than size
-    let iterations = 100;
-    unsafe {
-        let pointers: Vec<_> = (0..iterations).map(|_| {
-            allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap()
-        }).collect();
-        for &ptr in &pointers {
-            assert_eq!((ptr.as_ptr() as usize) % align, 0,
-                       "Got a pointer less aligned than requested")
-        }
+    for &align in &[4, 8, 16, 32] { // less than and bigger than `MIN_ALIGN`
+        for &size in &[align/2, align-1] { // size less than alignment
+            let iterations = 128;
+            unsafe {
+                let pointers: Vec<_> = (0..iterations).map(|_| {
+                    allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap()
+                }).collect();
+                for &ptr in &pointers {
+                    assert_eq!((ptr.as_ptr() as usize) % align, 0,
+                               "Got a pointer less aligned than requested")
+                }
 
-        // Clean up
-        for &ptr in &pointers {
-            allocator.dealloc(ptr, Layout::from_size_align(size, align).unwrap())
+                // Clean up
+                for &ptr in &pointers {
+                    allocator.dealloc(ptr, Layout::from_size_align(size, align).unwrap())
+                }
+            }
         }
     }
 }
diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs
index 90921b6af9f..6d774f3fecd 100644
--- a/src/liballoc/tests/lib.rs
+++ b/src/liballoc/tests/lib.rs
@@ -2,11 +2,12 @@
 #![feature(box_syntax)]
 #![feature(drain_filter)]
 #![feature(exact_size_is_empty)]
+#![feature(option_flattening)]
 #![feature(pattern)]
 #![feature(repeat_generic_slice)]
+#![feature(trusted_len)]
 #![feature(try_reserve)]
 #![feature(unboxed_closures)]
-#![feature(vecdeque_rotate)]
 
 use std::hash::{Hash, Hasher};
 use std::collections::hash_map::DefaultHasher;
diff --git a/src/liballoc/tests/linked_list.rs b/src/liballoc/tests/linked_list.rs
index 0fbfbdccd45..8a26454c389 100644
--- a/src/liballoc/tests/linked_list.rs
+++ b/src/liballoc/tests/linked_list.rs
@@ -40,12 +40,10 @@ fn test_basic() {
     assert_eq!(n.pop_front(), Some(1));
 }
 
-#[cfg(test)]
 fn generate_test() -> LinkedList<i32> {
     list_from(&[0, 1, 2, 3, 4, 5, 6])
 }
 
-#[cfg(test)]
 fn list_from<T: Clone>(v: &[T]) -> LinkedList<T> {
     v.iter().cloned().collect()
 }
diff --git a/src/liballoc/tests/rc.rs b/src/liballoc/tests/rc.rs
index 18f82e80410..7854ca0fc16 100644
--- a/src/liballoc/tests/rc.rs
+++ b/src/liballoc/tests/rc.rs
@@ -2,6 +2,8 @@ use std::any::Any;
 use std::rc::{Rc, Weak};
 use std::cell::RefCell;
 use std::cmp::PartialEq;
+use std::mem;
+use std::iter::TrustedLen;
 
 #[test]
 fn uninhabited() {
@@ -85,3 +87,118 @@ fn eq() {
     assert!(!(x != x));
     assert_eq!(*x.0.borrow(), 0);
 }
+
+const SHARED_ITER_MAX: u16 = 100;
+
+fn assert_trusted_len<I: TrustedLen>(_: &I) {}
+
+#[test]
+fn shared_from_iter_normal() {
+    // Exercise the base implementation for non-`TrustedLen` iterators.
+    {
+        // `Filter` is never `TrustedLen` since we don't
+        // know statically how many elements will be kept:
+        let iter = (0..SHARED_ITER_MAX).filter(|x| x % 2 == 0).map(Box::new);
+
+        // Collecting into a `Vec<T>` or `Rc<[T]>` should make no difference:
+        let vec = iter.clone().collect::<Vec<_>>();
+        let rc = iter.collect::<Rc<[_]>>();
+        assert_eq!(&*vec, &*rc);
+
+        // Clone a bit and let these get dropped.
+        {
+            let _rc_2 = rc.clone();
+            let _rc_3 = rc.clone();
+            let _rc_4 = Rc::downgrade(&_rc_3);
+        }
+    } // Drop what hasn't been here.
+}
+
+#[test]
+fn shared_from_iter_trustedlen_normal() {
+    // Exercise the `TrustedLen` implementation under normal circumstances
+    // where `size_hint()` matches `(_, Some(exact_len))`.
+    {
+        let iter = (0..SHARED_ITER_MAX).map(Box::new);
+        assert_trusted_len(&iter);
+
+        // Collecting into a `Vec<T>` or `Rc<[T]>` should make no difference:
+        let vec = iter.clone().collect::<Vec<_>>();
+        let rc = iter.collect::<Rc<[_]>>();
+        assert_eq!(&*vec, &*rc);
+        assert_eq!(mem::size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc));
+
+        // Clone a bit and let these get dropped.
+        {
+            let _rc_2 = rc.clone();
+            let _rc_3 = rc.clone();
+            let _rc_4 = Rc::downgrade(&_rc_3);
+        }
+    } // Drop what hasn't been here.
+
+    // Try a ZST to make sure it is handled well.
+    {
+        let iter = (0..SHARED_ITER_MAX).map(|_| ());
+        let vec = iter.clone().collect::<Vec<_>>();
+        let rc = iter.collect::<Rc<[_]>>();
+        assert_eq!(&*vec, &*rc);
+        assert_eq!(0, mem::size_of_val(&*rc));
+        {
+            let _rc_2 = rc.clone();
+            let _rc_3 = rc.clone();
+            let _rc_4 = Rc::downgrade(&_rc_3);
+        }
+    }
+}
+
+#[test]
+#[should_panic = "I've almost got 99 problems."]
+fn shared_from_iter_trustedlen_panic() {
+    // Exercise the `TrustedLen` implementation when `size_hint()` matches
+    // `(_, Some(exact_len))` but where `.next()` drops before the last iteration.
+    let iter = (0..SHARED_ITER_MAX)
+        .map(|val| {
+            match val {
+                98 => panic!("I've almost got 99 problems."),
+                _ => Box::new(val),
+            }
+        });
+    assert_trusted_len(&iter);
+    let _ = iter.collect::<Rc<[_]>>();
+
+    panic!("I am unreachable.");
+}
+
+#[test]
+fn shared_from_iter_trustedlen_no_fuse() {
+    // Exercise the `TrustedLen` implementation when `size_hint()` matches
+    // `(_, Some(exact_len))` but where the iterator does not behave in a fused manner.
+    struct Iter(std::vec::IntoIter<Option<Box<u8>>>);
+
+    unsafe impl TrustedLen for Iter {}
+
+    impl Iterator for Iter {
+        fn size_hint(&self) -> (usize, Option<usize>) {
+            (2, Some(2))
+        }
+
+        type Item = Box<u8>;
+
+        fn next(&mut self) -> Option<Self::Item> {
+            self.0.next().flatten()
+        }
+    }
+
+    let vec = vec![
+        Some(Box::new(42)),
+        Some(Box::new(24)),
+        None,
+        Some(Box::new(12)),
+    ];
+    let iter = Iter(vec.into_iter());
+    assert_trusted_len(&iter);
+    assert_eq!(
+        &[Box::new(42), Box::new(24)],
+        &*iter.collect::<Rc<[_]>>()
+    );
+}
diff --git a/src/liballoc/tests/slice.rs b/src/liballoc/tests/slice.rs
index feba46b0fad..ad2cd7c95eb 100644
--- a/src/liballoc/tests/slice.rs
+++ b/src/liballoc/tests/slice.rs
@@ -258,7 +258,6 @@ fn test_swap_remove() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_swap_remove_fail() {
     let mut v = vec![1];
     let _ = v.swap_remove(0);
@@ -390,7 +389,7 @@ fn test_reverse() {
 }
 
 #[test]
-#[cfg(not(miri))] // Miri does not support entropy
+#[cfg(not(miri))] // Miri is too slow
 fn test_sort() {
     let mut rng = thread_rng();
 
@@ -467,10 +466,19 @@ fn test_sort() {
 }
 
 #[test]
-#[cfg(not(miri))] // Miri does not support entropy
 fn test_sort_stability() {
-    for len in (2..25).chain(500..510) {
-        for _ in 0..10 {
+    #[cfg(not(miri))] // Miri is too slow
+    let large_range = 500..510;
+    #[cfg(not(miri))] // Miri is too slow
+    let rounds = 10;
+
+    #[cfg(miri)]
+    let large_range = 0..0; // empty range
+    #[cfg(miri)]
+    let rounds = 1;
+
+    for len in (2..25).chain(large_range) {
+        for _ in 0..rounds {
             let mut counts = [0; 10];
 
             // create a vector like [(6, 1), (5, 1), (6, 2), ...],
@@ -632,7 +640,6 @@ fn test_insert() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_insert_oob() {
     let mut a = vec![1, 2, 3];
     a.insert(4, 5);
@@ -657,7 +664,6 @@ fn test_remove() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_remove_fail() {
     let mut a = vec![1];
     let _ = a.remove(0);
@@ -939,7 +945,6 @@ fn test_windowsator() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_windowsator_0() {
     let v = &[1, 2, 3, 4];
     let _it = v.windows(0);
@@ -964,7 +969,6 @@ fn test_chunksator() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_chunksator_0() {
     let v = &[1, 2, 3, 4];
     let _it = v.chunks(0);
@@ -989,7 +993,6 @@ fn test_chunks_exactator() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_chunks_exactator_0() {
     let v = &[1, 2, 3, 4];
     let _it = v.chunks_exact(0);
@@ -1014,7 +1017,6 @@ fn test_rchunksator() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_rchunksator_0() {
     let v = &[1, 2, 3, 4];
     let _it = v.rchunks(0);
@@ -1039,7 +1041,6 @@ fn test_rchunks_exactator() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_rchunks_exactator_0() {
     let v = &[1, 2, 3, 4];
     let _it = v.rchunks_exact(0);
@@ -1092,7 +1093,6 @@ fn test_vec_default() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_overflow_does_not_cause_segfault() {
     let mut v = vec![];
     v.reserve_exact(!0);
@@ -1102,7 +1102,6 @@ fn test_overflow_does_not_cause_segfault() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_overflow_does_not_cause_segfault_managed() {
     let mut v = vec![Rc::new(1)];
     v.reserve_exact(!0);
@@ -1278,7 +1277,6 @@ fn test_mut_chunks_rev() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_mut_chunks_0() {
     let mut v = [1, 2, 3, 4];
     let _it = v.chunks_mut(0);
@@ -1311,7 +1309,6 @@ fn test_mut_chunks_exact_rev() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_mut_chunks_exact_0() {
     let mut v = [1, 2, 3, 4];
     let _it = v.chunks_exact_mut(0);
@@ -1344,7 +1341,6 @@ fn test_mut_rchunks_rev() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_mut_rchunks_0() {
     let mut v = [1, 2, 3, 4];
     let _it = v.rchunks_mut(0);
@@ -1377,7 +1373,6 @@ fn test_mut_rchunks_exact_rev() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_mut_rchunks_exact_0() {
     let mut v = [1, 2, 3, 4];
     let _it = v.rchunks_exact_mut(0);
@@ -1411,7 +1406,7 @@ fn test_box_slice_clone() {
 #[test]
 #[allow(unused_must_use)] // here, we care about the side effects of `.clone()`
 #[cfg_attr(target_os = "emscripten", ignore)]
-#[cfg(not(miri))] // Miri does not support panics
+#[cfg(not(miri))] // Miri does not support threads
 fn test_box_slice_clone_panics() {
     use std::sync::Arc;
     use std::sync::atomic::{AtomicUsize, Ordering};
@@ -1476,7 +1471,6 @@ fn test_copy_from_slice() {
 
 #[test]
 #[should_panic(expected = "destination and source slices have different lengths")]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_copy_from_slice_dst_longer() {
     let src = [0, 1, 2, 3];
     let mut dst = [0; 5];
@@ -1485,7 +1479,6 @@ fn test_copy_from_slice_dst_longer() {
 
 #[test]
 #[should_panic(expected = "destination and source slices have different lengths")]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_copy_from_slice_dst_shorter() {
     let src = [0, 1, 2, 3];
     let mut dst = [0; 3];
@@ -1605,7 +1598,7 @@ thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
 
 #[test]
 #[cfg_attr(target_os = "emscripten", ignore)] // no threads
-#[cfg(not(miri))] // Miri does not support panics
+#[cfg(not(miri))] // Miri does not support threads
 fn panic_safe() {
     let prev = panic::take_hook();
     panic::set_hook(Box::new(move |info| {
diff --git a/src/liballoc/tests/str.rs b/src/liballoc/tests/str.rs
index f465d67dc93..c5198ca39fe 100644
--- a/src/liballoc/tests/str.rs
+++ b/src/liballoc/tests/str.rs
@@ -351,7 +351,6 @@ mod slice_index {
     //  to be used in `should_panic`)
     #[test]
     #[should_panic(expected = "out of bounds")]
-    #[cfg(not(miri))] // Miri does not support panics
     fn assert_range_eq_can_fail_by_panic() {
         assert_range_eq!("abc", 0..5, "abc");
     }
@@ -361,7 +360,6 @@ mod slice_index {
     //  to be used in `should_panic`)
     #[test]
     #[should_panic(expected = "==")]
-    #[cfg(not(miri))] // Miri does not support panics
     fn assert_range_eq_can_fail_by_inequality() {
         assert_range_eq!("abc", 0..2, "abc");
     }
@@ -409,7 +407,6 @@ mod slice_index {
 
                 #[test]
                 #[should_panic(expected = $expect_msg)]
-                #[cfg(not(miri))] // Miri does not support panics
                 fn index_fail() {
                     let v: String = $data.into();
                     let v: &str = &v;
@@ -418,7 +415,6 @@ mod slice_index {
 
                 #[test]
                 #[should_panic(expected = $expect_msg)]
-                #[cfg(not(miri))] // Miri does not support panics
                 fn index_mut_fail() {
                     let mut v: String = $data.into();
                     let v: &mut str = &mut v;
@@ -514,7 +510,6 @@ mod slice_index {
 
     #[test]
     #[should_panic]
-    #[cfg(not(miri))] // Miri does not support panics
     fn test_slice_fail() {
         &"中华Việt Nam"[0..2];
     }
@@ -666,14 +661,12 @@ mod slice_index {
     // check the panic includes the prefix of the sliced string
     #[test]
     #[should_panic(expected="byte index 1024 is out of bounds of `Lorem ipsum dolor sit amet")]
-    #[cfg(not(miri))] // Miri does not support panics
     fn test_slice_fail_truncated_1() {
         &LOREM_PARAGRAPH[..1024];
     }
     // check the truncation in the panic message
     #[test]
     #[should_panic(expected="luctus, im`[...]")]
-    #[cfg(not(miri))] // Miri does not support panics
     fn test_slice_fail_truncated_2() {
         &LOREM_PARAGRAPH[..1024];
     }
@@ -688,7 +681,6 @@ fn test_str_slice_rangetoinclusive_ok() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_str_slice_rangetoinclusive_notok() {
     let s = "abcαβγ";
     &s[..=3];
@@ -704,7 +696,6 @@ fn test_str_slicemut_rangetoinclusive_ok() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_str_slicemut_rangetoinclusive_notok() {
     let mut s = "abcαβγ".to_owned();
     let s: &mut str = &mut s;
@@ -894,7 +885,6 @@ fn test_as_bytes() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_as_bytes_fail() {
     // Don't double free. (I'm not sure if this exercises the
     // original problem code path anymore.)
@@ -984,7 +974,6 @@ fn test_split_at_mut() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_split_at_boundscheck() {
     let s = "ศไทย中华Việt Nam";
     s.split_at(1);
@@ -1120,6 +1109,16 @@ fn test_iterator_last() {
 }
 
 #[test]
+fn test_chars_debug() {
+    let s = "ศไทย中华Việt Nam";
+    let c = s.chars();
+    assert_eq!(
+        format!("{:?}", c),
+        r#"Chars(['ศ', 'ไ', 'ท', 'ย', '中', '华', 'V', 'i', 'ệ', 't', ' ', 'N', 'a', 'm'])"#
+    );
+}
+
+#[test]
 fn test_bytesator() {
     let s = "ศไทย中华Việt Nam";
     let v = [
diff --git a/src/liballoc/tests/string.rs b/src/liballoc/tests/string.rs
index 7e93d84fe3b..765210e5aa6 100644
--- a/src/liballoc/tests/string.rs
+++ b/src/liballoc/tests/string.rs
@@ -54,11 +54,11 @@ fn test_from_utf8() {
 #[test]
 fn test_from_utf8_lossy() {
     let xs = b"hello";
-    let ys: Cow<str> = "hello".into_cow();
+    let ys: Cow<'_, str> = "hello".into_cow();
     assert_eq!(String::from_utf8_lossy(xs), ys);
 
     let xs = "ศไทย中华Việt Nam".as_bytes();
-    let ys: Cow<str> = "ศไทย中华Việt Nam".into_cow();
+    let ys: Cow<'_, str> = "ศไทย中华Việt Nam".into_cow();
     assert_eq!(String::from_utf8_lossy(xs), ys);
 
     let xs = b"Hello\xC2 There\xFF Goodbye";
@@ -231,7 +231,6 @@ fn test_split_off_empty() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_split_off_past_end() {
     let orig = "Hello, world!";
     let mut split = String::from(orig);
@@ -240,7 +239,6 @@ fn test_split_off_past_end() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_split_off_mid_char() {
     let mut orig = String::from("山");
     orig.split_off(1);
@@ -289,7 +287,6 @@ fn test_str_truncate_invalid_len() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_str_truncate_split_codepoint() {
     let mut s = String::from("\u{FC}"); // ü
     s.truncate(1);
@@ -324,7 +321,6 @@ fn remove() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn remove_bad() {
     "ศ".to_string().remove(1);
 }
@@ -360,13 +356,11 @@ fn insert() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn insert_bad1() {
     "".to_string().insert(1, 't');
 }
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn insert_bad2() {
     "ệ".to_string().insert(1, 't');
 }
@@ -447,7 +441,6 @@ fn test_replace_range() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_replace_range_char_boundary() {
     let mut s = "Hello, 世界!".to_owned();
     s.replace_range(..8, "");
@@ -464,7 +457,6 @@ fn test_replace_range_inclusive_range() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_replace_range_out_of_bounds() {
     let mut s = String::from("12345");
     s.replace_range(5..6, "789");
@@ -472,7 +464,6 @@ fn test_replace_range_out_of_bounds() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_replace_range_inclusive_out_of_bounds() {
     let mut s = String::from("12345");
     s.replace_range(5..=5, "789");
diff --git a/src/liballoc/tests/vec.rs b/src/liballoc/tests/vec.rs
index 6e4ca1d90e6..6e8ffe18522 100644
--- a/src/liballoc/tests/vec.rs
+++ b/src/liballoc/tests/vec.rs
@@ -1,5 +1,3 @@
-#![cfg(not(miri))]
-
 use std::borrow::Cow;
 use std::mem::size_of;
 use std::{usize, isize};
@@ -368,7 +366,6 @@ fn test_vec_truncate_drop() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_vec_truncate_fail() {
     struct BadElem(i32);
     impl Drop for BadElem {
@@ -392,7 +389,6 @@ fn test_index() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_index_out_of_bounds() {
     let vec = vec![1, 2, 3];
     let _ = vec[3];
@@ -400,7 +396,6 @@ fn test_index_out_of_bounds() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_slice_out_of_bounds_1() {
     let x = vec![1, 2, 3, 4, 5];
     &x[!0..];
@@ -408,7 +403,6 @@ fn test_slice_out_of_bounds_1() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_slice_out_of_bounds_2() {
     let x = vec![1, 2, 3, 4, 5];
     &x[..6];
@@ -416,7 +410,6 @@ fn test_slice_out_of_bounds_2() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_slice_out_of_bounds_3() {
     let x = vec![1, 2, 3, 4, 5];
     &x[!0..4];
@@ -424,7 +417,6 @@ fn test_slice_out_of_bounds_3() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_slice_out_of_bounds_4() {
     let x = vec![1, 2, 3, 4, 5];
     &x[1..6];
@@ -432,7 +424,6 @@ fn test_slice_out_of_bounds_4() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_slice_out_of_bounds_5() {
     let x = vec![1, 2, 3, 4, 5];
     &x[3..2];
@@ -440,7 +431,6 @@ fn test_slice_out_of_bounds_5() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_swap_remove_empty() {
     let mut vec = Vec::<i32>::new();
     vec.swap_remove(0);
@@ -511,7 +501,6 @@ fn test_drain_items_zero_sized() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_drain_out_of_bounds() {
     let mut v = vec![1, 2, 3, 4, 5];
     v.drain(5..6);
@@ -585,7 +574,6 @@ fn test_drain_max_vec_size() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_drain_inclusive_out_of_bounds() {
     let mut v = vec![1, 2, 3, 4, 5];
     v.drain(5..=5);
@@ -615,7 +603,6 @@ fn test_splice_inclusive_range() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_splice_out_of_bounds() {
     let mut v = vec![1, 2, 3, 4, 5];
     let a = [10, 11, 12];
@@ -624,7 +611,6 @@ fn test_splice_out_of_bounds() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_splice_inclusive_out_of_bounds() {
     let mut v = vec![1, 2, 3, 4, 5];
     let a = [10, 11, 12];
@@ -959,6 +945,115 @@ fn drain_filter_complex() {
 }
 
 #[test]
+#[cfg(not(miri))] // Miri does not support catching panics
+fn drain_filter_consumed_panic() {
+    use std::rc::Rc;
+    use std::sync::Mutex;
+
+    struct Check {
+        index: usize,
+        drop_counts: Rc<Mutex<Vec<usize>>>,
+    };
+
+    impl Drop for Check {
+        fn drop(&mut self) {
+            self.drop_counts.lock().unwrap()[self.index] += 1;
+            println!("drop: {}", self.index);
+        }
+    }
+
+    let check_count = 10;
+    let drop_counts = Rc::new(Mutex::new(vec![0_usize; check_count]));
+    let mut data: Vec<Check> = (0..check_count)
+        .map(|index| Check { index, drop_counts: Rc::clone(&drop_counts) })
+        .collect();
+
+    let _ = std::panic::catch_unwind(move || {
+        let filter = |c: &mut Check| {
+            if c.index == 2 {
+                panic!("panic at index: {}", c.index);
+            }
+            // Verify that if the filter could panic again on another element
+            // that it would not cause a double panic and all elements of the
+            // vec would still be dropped exactly once.
+            if c.index == 4 {
+                panic!("panic at index: {}", c.index);
+            }
+            c.index < 6
+        };
+        let drain = data.drain_filter(filter);
+
+        // NOTE: The DrainFilter is explictly consumed
+        drain.for_each(drop);
+    });
+
+    let drop_counts = drop_counts.lock().unwrap();
+    assert_eq!(check_count, drop_counts.len());
+
+    for (index, count) in drop_counts.iter().cloned().enumerate() {
+        assert_eq!(1, count, "unexpected drop count at index: {} (count: {})", index, count);
+    }
+}
+
+#[test]
+#[cfg(not(miri))] // Miri does not support catching panics
+fn drain_filter_unconsumed_panic() {
+    use std::rc::Rc;
+    use std::sync::Mutex;
+
+    struct Check {
+        index: usize,
+        drop_counts: Rc<Mutex<Vec<usize>>>,
+    };
+
+    impl Drop for Check {
+        fn drop(&mut self) {
+            self.drop_counts.lock().unwrap()[self.index] += 1;
+            println!("drop: {}", self.index);
+        }
+    }
+
+    let check_count = 10;
+    let drop_counts = Rc::new(Mutex::new(vec![0_usize; check_count]));
+    let mut data: Vec<Check> = (0..check_count)
+        .map(|index| Check { index, drop_counts: Rc::clone(&drop_counts) })
+        .collect();
+
+    let _ = std::panic::catch_unwind(move || {
+        let filter = |c: &mut Check| {
+            if c.index == 2 {
+                panic!("panic at index: {}", c.index);
+            }
+            // Verify that if the filter could panic again on another element
+            // that it would not cause a double panic and all elements of the
+            // vec would still be dropped exactly once.
+            if c.index == 4 {
+                panic!("panic at index: {}", c.index);
+            }
+            c.index < 6
+        };
+        let _drain = data.drain_filter(filter);
+
+        // NOTE: The DrainFilter is dropped without being consumed
+    });
+
+    let drop_counts = drop_counts.lock().unwrap();
+    assert_eq!(check_count, drop_counts.len());
+
+    for (index, count) in drop_counts.iter().cloned().enumerate() {
+        assert_eq!(1, count, "unexpected drop count at index: {} (count: {})", index, count);
+    }
+}
+
+#[test]
+fn drain_filter_unconsumed() {
+    let mut vec = vec![1, 2, 3, 4];
+    let drain = vec.drain_filter(|&mut x| x % 2 != 0);
+    drop(drain);
+    assert_eq!(vec, [2, 4]);
+}
+
+#[test]
 fn test_reserve_exact() {
     // This is all the same as test_reserve
 
@@ -983,6 +1078,7 @@ fn test_reserve_exact() {
 }
 
 #[test]
+#[cfg(not(miri))] // Miri does not support signalling OOM
 fn test_try_reserve() {
 
     // These are the interesting cases:
@@ -1085,6 +1181,7 @@ fn test_try_reserve() {
 }
 
 #[test]
+#[cfg(not(miri))] // Miri does not support signalling OOM
 fn test_try_reserve_exact() {
 
     // This is exactly the same as test_try_reserve with the method changed.
@@ -1163,3 +1260,24 @@ fn test_try_reserve_exact() {
     }
 
 }
+
+#[test]
+fn test_stable_push_pop() {
+    // Test that, if we reserved enough space, adding and removing elements does not
+    // invalidate references into the vector (such as `v0`).  This test also
+    // runs in Miri, which would detect such problems.
+    let mut v = Vec::with_capacity(10);
+    v.push(13);
+
+    // laundering the lifetime -- we take care that `v` does not reallocate, so that's okay.
+    let v0 = unsafe { &*(&v[0] as *const _) };
+
+    // Now do a bunch of things and occasionally use `v0` again to assert it is still valid.
+    v.push(1);
+    v.push(2);
+    v.insert(1, 1);
+    assert_eq!(*v0, 13);
+    v.remove(1);
+    v.pop().unwrap();
+    assert_eq!(*v0, 13);
+}
diff --git a/src/liballoc/tests/vec_deque.rs b/src/liballoc/tests/vec_deque.rs
index 16ddc1444fc..1bbcca97b3c 100644
--- a/src/liballoc/tests/vec_deque.rs
+++ b/src/liballoc/tests/vec_deque.rs
@@ -44,7 +44,6 @@ fn test_simple() {
     assert_eq!(d[3], 4);
 }
 
-#[cfg(test)]
 fn test_parameterized<T: Clone + PartialEq + Debug>(a: T, b: T, c: T, d: T) {
     let mut deq = VecDeque::new();
     assert_eq!(deq.len(), 0);
@@ -108,7 +107,6 @@ fn test_index() {
 
 #[test]
 #[should_panic]
-#[cfg(not(miri))] // Miri does not support panics
 fn test_index_out_of_bounds() {
     let mut deq = VecDeque::new();
     for i in 1..4 {
diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs
index cd62c3e0524..dac04e4e624 100644
--- a/src/liballoc/vec.rs
+++ b/src/liballoc/vec.rs
@@ -56,6 +56,7 @@
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
+use core::array::LengthAtMost32;
 use core::cmp::{self, Ordering};
 use core::fmt;
 use core::hash::{self, Hash};
@@ -432,7 +433,7 @@ impl<T> Vec<T> {
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn capacity(&self) -> usize {
-        self.buf.cap()
+        self.buf.capacity()
     }
 
     /// Reserves capacity for at least `additional` more elements to be inserted
@@ -735,6 +736,75 @@ impl<T> Vec<T> {
         self
     }
 
+    /// Returns a raw pointer to the vector's buffer.
+    ///
+    /// The caller must ensure that the vector outlives the pointer this
+    /// function returns, or else it will end up pointing to garbage.
+    /// Modifying the vector may cause its buffer to be reallocated,
+    /// which would also make any pointers to it invalid.
+    ///
+    /// The caller must also ensure that the memory the pointer (non-transitively) points to
+    /// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
+    /// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// let x = vec![1, 2, 4];
+    /// let x_ptr = x.as_ptr();
+    ///
+    /// unsafe {
+    ///     for i in 0..x.len() {
+    ///         assert_eq!(*x_ptr.add(i), 1 << i);
+    ///     }
+    /// }
+    /// ```
+    ///
+    /// [`as_mut_ptr`]: #method.as_mut_ptr
+    #[stable(feature = "vec_as_ptr", since = "1.37.0")]
+    #[inline]
+    pub fn as_ptr(&self) -> *const T {
+        // We shadow the slice method of the same name to avoid going through
+        // `deref`, which creates an intermediate reference.
+        let ptr = self.buf.ptr();
+        unsafe { assume(!ptr.is_null()); }
+        ptr
+    }
+
+    /// Returns an unsafe mutable pointer to the vector's buffer.
+    ///
+    /// The caller must ensure that the vector outlives the pointer this
+    /// function returns, or else it will end up pointing to garbage.
+    /// Modifying the vector may cause its buffer to be reallocated,
+    /// which would also make any pointers to it invalid.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// // Allocate vector big enough for 4 elements.
+    /// let size = 4;
+    /// let mut x: Vec<i32> = Vec::with_capacity(size);
+    /// let x_ptr = x.as_mut_ptr();
+    ///
+    /// // Initialize elements via raw pointer writes, then set length.
+    /// unsafe {
+    ///     for i in 0..size {
+    ///         *x_ptr.add(i) = i as i32;
+    ///     }
+    ///     x.set_len(size);
+    /// }
+    /// assert_eq!(&*x, &[0,1,2,3]);
+    /// ```
+    #[stable(feature = "vec_as_ptr", since = "1.37.0")]
+    #[inline]
+    pub fn as_mut_ptr(&mut self) -> *mut T {
+        // We shadow the slice method of the same name to avoid going through
+        // `deref_mut`, which creates an intermediate reference.
+        let ptr = self.buf.ptr();
+        unsafe { assume(!ptr.is_null()); }
+        ptr
+    }
+
     /// Forces the length of the vector to `new_len`.
     ///
     /// This is a low-level operation that maintains none of the normal
@@ -878,7 +948,7 @@ impl<T> Vec<T> {
         assert!(index <= len);
 
         // space for the new element
-        if len == self.buf.cap() {
+        if len == self.buf.capacity() {
             self.reserve(1);
         }
 
@@ -937,8 +1007,8 @@ impl<T> Vec<T> {
     /// Retains only the elements specified by the predicate.
     ///
     /// In other words, remove all elements `e` such that `f(&e)` returns `false`.
-    /// This method operates in place and preserves the order of the retained
-    /// elements.
+    /// This method operates in place, visiting each element exactly once in the
+    /// original order, and preserves the order of the retained elements.
     ///
     /// # Examples
     ///
@@ -947,6 +1017,16 @@ impl<T> Vec<T> {
     /// vec.retain(|&x| x%2 == 0);
     /// assert_eq!(vec, [2, 4]);
     /// ```
+    ///
+    /// The exact order may be useful for tracking external state, like an index.
+    ///
+    /// ```
+    /// let mut vec = vec![1, 2, 3, 4, 5];
+    /// let keep = [false, true, true, false, true];
+    /// let mut i = 0;
+    /// vec.retain(|_| (keep[i], i += 1).0);
+    /// assert_eq!(vec, [2, 3, 5]);
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn retain<F>(&mut self, mut f: F)
         where F: FnMut(&T) -> bool
@@ -1019,7 +1099,7 @@ impl<T> Vec<T> {
     pub fn push(&mut self, value: T) {
         // This will panic or abort if we would allocate > isize::MAX bytes
         // or if the length increment would overflow for zero-sized types.
-        if self.len == self.buf.cap() {
+        if self.len == self.buf.capacity() {
             self.reserve(1);
         }
         unsafe {
@@ -1084,7 +1164,7 @@ impl<T> Vec<T> {
         let count = (*other).len();
         self.reserve(count);
         let len = self.len();
-        ptr::copy_nonoverlapping(other as *const T, self.get_unchecked_mut(len), count);
+        ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count);
         self.len += count;
     }
 
@@ -1288,6 +1368,40 @@ impl<T> Vec<T> {
             self.truncate(new_len);
         }
     }
+
+    /// Consumes and leaks the `Vec`, returning a mutable reference to the contents,
+    /// `&'a mut [T]`. Note that the type `T` must outlive the chosen lifetime
+    /// `'a`. If the type has only static references, or none at all, then this
+    /// may be chosen to be `'static`.
+    ///
+    /// This function is similar to the `leak` function on `Box`.
+    ///
+    /// This function is mainly useful for data that lives for the remainder of
+    /// the program's life. Dropping the returned reference will cause a memory
+    /// leak.
+    ///
+    /// # Examples
+    ///
+    /// Simple usage:
+    ///
+    /// ```
+    /// #![feature(vec_leak)]
+    ///
+    /// fn main() {
+    ///     let x = vec![1, 2, 3];
+    ///     let static_ref: &'static mut [usize] = Vec::leak(x);
+    ///     static_ref[0] += 1;
+    ///     assert_eq!(static_ref, &[2, 2, 3]);
+    /// }
+    /// ```
+    #[unstable(feature = "vec_leak", issue = "62195")]
+    #[inline]
+    pub fn leak<'a>(vec: Vec<T>) -> &'a mut [T]
+    where
+        T: 'a // Technically not needed, but kept to be explicit.
+    {
+        Box::leak(vec.into_boxed_slice())
+    }
 }
 
 impl<T: Clone> Vec<T> {
@@ -1696,9 +1810,7 @@ impl<T> ops::Deref for Vec<T> {
 
     fn deref(&self) -> &[T] {
         unsafe {
-            let p = self.buf.ptr();
-            assume(!p.is_null());
-            slice::from_raw_parts(p, self.len)
+            slice::from_raw_parts(self.as_ptr(), self.len)
         }
     }
 }
@@ -1707,9 +1819,7 @@ impl<T> ops::Deref for Vec<T> {
 impl<T> ops::DerefMut for Vec<T> {
     fn deref_mut(&mut self) -> &mut [T] {
         unsafe {
-            let ptr = self.buf.ptr();
-            assume(!ptr.is_null());
-            slice::from_raw_parts_mut(ptr, self.len)
+            slice::from_raw_parts_mut(self.as_mut_ptr(), self.len)
         }
     }
 }
@@ -1744,13 +1854,12 @@ impl<T> IntoIterator for Vec<T> {
     fn into_iter(mut self) -> IntoIter<T> {
         unsafe {
             let begin = self.as_mut_ptr();
-            assume(!begin.is_null());
             let end = if mem::size_of::<T>() == 0 {
                 arith_offset(begin as *const i8, self.len() as isize) as *const T
             } else {
                 begin.add(self.len()) as *const T
             };
-            let cap = self.buf.cap();
+            let cap = self.buf.capacity();
             mem::forget(self);
             IntoIter {
                 buf: NonNull::new_unchecked(begin),
@@ -1944,16 +2053,14 @@ impl<T> Vec<T> {
     /// with the given `replace_with` iterator and yields the removed items.
     /// `replace_with` does not need to be the same length as `range`.
     ///
-    /// Note 1: The element range is removed even if the iterator is not
-    /// consumed until the end.
+    /// The element range is removed even if the iterator is not consumed until the end.
     ///
-    /// Note 2: It is unspecified how many elements are removed from the vector,
+    /// It is unspecified how many elements are removed from the vector
     /// if the `Splice` value is leaked.
     ///
-    /// Note 3: The input iterator `replace_with` is only consumed
-    /// when the `Splice` value is dropped.
+    /// The input iterator `replace_with` is only consumed when the `Splice` value is dropped.
     ///
-    /// Note 4: This is optimal if:
+    /// This is optimal if:
     ///
     /// * The tail (elements in the vector after `range`) is empty,
     /// * or `replace_with` yields fewer elements than `range`’s length
@@ -2046,6 +2153,7 @@ impl<T> Vec<T> {
             del: 0,
             old_len,
             pred: filter,
+            panic_flag: false,
         }
     }
 }
@@ -2064,47 +2172,36 @@ impl<'a, T: 'a + Copy> Extend<&'a T> for Vec<T> {
 }
 
 macro_rules! __impl_slice_eq1 {
-    ($Lhs: ty, $Rhs: ty) => {
-        __impl_slice_eq1! { $Lhs, $Rhs, Sized }
-    };
-    ($Lhs: ty, $Rhs: ty, $Bound: ident) => {
+    ([$($vars:tt)*] $lhs:ty, $rhs:ty, $($constraints:tt)*) => {
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl<'a, 'b, A: $Bound, B> PartialEq<$Rhs> for $Lhs where A: PartialEq<B> {
+        impl<A, B, $($vars)*> PartialEq<$rhs> for $lhs
+        where
+            A: PartialEq<B>,
+            $($constraints)*
+        {
             #[inline]
-            fn eq(&self, other: &$Rhs) -> bool { self[..] == other[..] }
+            fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] }
             #[inline]
-            fn ne(&self, other: &$Rhs) -> bool { self[..] != other[..] }
+            fn ne(&self, other: &$rhs) -> bool { self[..] != other[..] }
         }
     }
 }
 
-__impl_slice_eq1! { Vec<A>, Vec<B> }
-__impl_slice_eq1! { Vec<A>, &'b [B] }
-__impl_slice_eq1! { Vec<A>, &'b mut [B] }
-__impl_slice_eq1! { Cow<'a, [A]>, &'b [B], Clone }
-__impl_slice_eq1! { Cow<'a, [A]>, &'b mut [B], Clone }
-__impl_slice_eq1! { Cow<'a, [A]>, Vec<B>, Clone }
+__impl_slice_eq1! { [] Vec<A>, Vec<B>, }
+__impl_slice_eq1! { [] Vec<A>, &[B], }
+__impl_slice_eq1! { [] Vec<A>, &mut [B], }
+__impl_slice_eq1! { [] Cow<'_, [A]>, &[B], A: Clone }
+__impl_slice_eq1! { [] Cow<'_, [A]>, &mut [B], A: Clone }
+__impl_slice_eq1! { [] Cow<'_, [A]>, Vec<B>, A: Clone }
+__impl_slice_eq1! { [const N: usize] Vec<A>, [B; N], [B; N]: LengthAtMost32 }
+__impl_slice_eq1! { [const N: usize] Vec<A>, &[B; N], [B; N]: LengthAtMost32 }
 
-macro_rules! array_impls {
-    ($($N: expr)+) => {
-        $(
-            // NOTE: some less important impls are omitted to reduce code bloat
-            __impl_slice_eq1! { Vec<A>, [B; $N] }
-            __impl_slice_eq1! { Vec<A>, &'b [B; $N] }
-            // __impl_slice_eq1! { Vec<A>, &'b mut [B; $N] }
-            // __impl_slice_eq1! { Cow<'a, [A]>, [B; $N], Clone }
-            // __impl_slice_eq1! { Cow<'a, [A]>, &'b [B; $N], Clone }
-            // __impl_slice_eq1! { Cow<'a, [A]>, &'b mut [B; $N], Clone }
-        )+
-    }
-}
-
-array_impls! {
-     0  1  2  3  4  5  6  7  8  9
-    10 11 12 13 14 15 16 17 18 19
-    20 21 22 23 24 25 26 27 28 29
-    30 31 32
-}
+// NOTE: some less important impls are omitted to reduce code bloat
+// FIXME(Centril): Reconsider this?
+//__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], [B; N]: LengthAtMost32 }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], [B; N]: LengthAtMost32 }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], [B; N]: LengthAtMost32 }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], [B; N]: LengthAtMost32 }
 
 /// Implements comparison of vectors, lexicographically.
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -2673,10 +2770,20 @@ pub struct DrainFilter<'a, T, F>
     where F: FnMut(&mut T) -> bool,
 {
     vec: &'a mut Vec<T>,
+    /// The index of the item that will be inspected by the next call to `next`.
     idx: usize,
+    /// The number of items that have been drained (removed) thus far.
     del: usize,
+    /// The original length of `vec` prior to draining.
     old_len: usize,
+    /// The filter test predicate.
     pred: F,
+    /// A flag that indicates a panic has occured in the filter test prodicate.
+    /// This is used as a hint in the drop implmentation to prevent consumption
+    /// of the remainder of the `DrainFilter`. Any unprocessed items will be
+    /// backshifted in the `vec`, but no further items will be dropped or
+    /// tested by the filter predicate.
+    panic_flag: bool,
 }
 
 #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
@@ -2687,20 +2794,23 @@ impl<T, F> Iterator for DrainFilter<'_, T, F>
 
     fn next(&mut self) -> Option<T> {
         unsafe {
-            while self.idx != self.old_len {
+            while self.idx < self.old_len {
                 let i = self.idx;
-                self.idx += 1;
                 let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
-                if (self.pred)(&mut v[i]) {
+                self.panic_flag = true;
+                let drained = (self.pred)(&mut v[i]);
+                self.panic_flag = false;
+                // Update the index *after* the predicate is called. If the index
+                // is updated prior and the predicate panics, the element at this
+                // index would be leaked.
+                self.idx += 1;
+                if drained {
                     self.del += 1;
                     return Some(ptr::read(&v[i]));
                 } else if self.del > 0 {
                     let del = self.del;
                     let src: *const T = &v[i];
                     let dst: *mut T = &mut v[i - del];
-                    // This is safe because self.vec has length 0
-                    // thus its elements will not have Drop::drop
-                    // called on them in the event of a panic.
                     ptr::copy_nonoverlapping(src, dst, 1);
                 }
             }
@@ -2718,9 +2828,46 @@ impl<T, F> Drop for DrainFilter<'_, T, F>
     where F: FnMut(&mut T) -> bool,
 {
     fn drop(&mut self) {
-        self.for_each(drop);
-        unsafe {
-            self.vec.set_len(self.old_len - self.del);
+        struct BackshiftOnDrop<'a, 'b, T, F>
+            where
+                F: FnMut(&mut T) -> bool,
+        {
+            drain: &'b mut DrainFilter<'a, T, F>,
+        }
+
+        impl<'a, 'b, T, F> Drop for BackshiftOnDrop<'a, 'b, T, F>
+            where
+                F: FnMut(&mut T) -> bool
+        {
+            fn drop(&mut self) {
+                unsafe {
+                    if self.drain.idx < self.drain.old_len && self.drain.del > 0 {
+                        // This is a pretty messed up state, and there isn't really an
+                        // obviously right thing to do. We don't want to keep trying
+                        // to execute `pred`, so we just backshift all the unprocessed
+                        // elements and tell the vec that they still exist. The backshift
+                        // is required to prevent a double-drop of the last successfully
+                        // drained item prior to a panic in the predicate.
+                        let ptr = self.drain.vec.as_mut_ptr();
+                        let src = ptr.add(self.drain.idx);
+                        let dst = src.sub(self.drain.del);
+                        let tail_len = self.drain.old_len - self.drain.idx;
+                        src.copy_to(dst, tail_len);
+                    }
+                    self.drain.vec.set_len(self.drain.old_len - self.drain.del);
+                }
+            }
+        }
+
+        let backshift = BackshiftOnDrop {
+            drain: self
+        };
+
+        // Attempt to consume any remaining elements if the filter predicate
+        // has not yet panicked. We'll backshift any remaining elements
+        // whether we've already panicked or if the consumption here panics.
+        if !backshift.drain.panic_flag {
+            backshift.drain.for_each(drop);
         }
     }
 }