about summary refs log tree commit diff
path: root/src/liballoc
diff options
context:
space:
mode:
Diffstat (limited to 'src/liballoc')
-rw-r--r--src/liballoc/alloc.rs106
-rw-r--r--src/liballoc/alloc/tests.rs9
-rw-r--r--src/liballoc/benches/btree/set.rs32
-rw-r--r--src/liballoc/benches/lib.rs1
-rw-r--r--src/liballoc/boxed.rs65
-rw-r--r--src/liballoc/collections/btree/map.rs412
-rw-r--r--src/liballoc/collections/btree/node.rs222
-rw-r--r--src/liballoc/collections/btree/search.rs17
-rw-r--r--src/liballoc/collections/btree/set.rs101
-rw-r--r--src/liballoc/collections/linked_list.rs10
-rw-r--r--src/liballoc/collections/mod.rs18
-rw-r--r--src/liballoc/collections/vec_deque.rs208
-rw-r--r--src/liballoc/collections/vec_deque/tests.rs83
-rw-r--r--src/liballoc/fmt.rs15
-rw-r--r--src/liballoc/lib.rs5
-rw-r--r--src/liballoc/macros.rs6
-rw-r--r--src/liballoc/raw_vec.rs657
-rw-r--r--src/liballoc/raw_vec/tests.rs9
-rw-r--r--src/liballoc/rc.rs31
-rw-r--r--src/liballoc/slice.rs5
-rw-r--r--src/liballoc/str.rs2
-rw-r--r--src/liballoc/string.rs29
-rw-r--r--src/liballoc/sync.rs56
-rw-r--r--src/liballoc/task.rs89
-rw-r--r--src/liballoc/tests/btree/map.rs376
-rw-r--r--src/liballoc/tests/btree/set.rs81
-rw-r--r--src/liballoc/tests/heap.rs10
-rw-r--r--src/liballoc/tests/lib.rs1
-rw-r--r--src/liballoc/tests/slice.rs4
-rw-r--r--src/liballoc/tests/string.rs4
-rw-r--r--src/liballoc/vec.rs42
31 files changed, 1835 insertions, 871 deletions
diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs
index 73e8121868a..66575e3ef55 100644
--- a/src/liballoc/alloc.rs
+++ b/src/liballoc/alloc.rs
@@ -2,7 +2,7 @@
 
 #![stable(feature = "alloc_module", since = "1.28.0")]
 
-use core::intrinsics::{min_align_of_val, size_of_val};
+use core::intrinsics::{self, min_align_of_val, size_of_val};
 use core::ptr::{NonNull, Unique};
 use core::usize;
 
@@ -165,28 +165,97 @@ pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
 #[unstable(feature = "allocator_api", issue = "32838")]
 unsafe impl AllocRef for Global {
     #[inline]
-    unsafe fn alloc(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> {
-        NonNull::new(alloc(layout)).ok_or(AllocErr).map(|p| (p, layout.size()))
+    fn alloc(&mut self, layout: Layout, init: AllocInit) -> Result<MemoryBlock, AllocErr> {
+        unsafe {
+            let size = layout.size();
+            if size == 0 {
+                Ok(MemoryBlock { ptr: layout.dangling(), size: 0 })
+            } else {
+                let raw_ptr = match init {
+                    AllocInit::Uninitialized => alloc(layout),
+                    AllocInit::Zeroed => alloc_zeroed(layout),
+                };
+                let ptr = NonNull::new(raw_ptr).ok_or(AllocErr)?;
+                Ok(MemoryBlock { ptr, size })
+            }
+        }
     }
 
     #[inline]
     unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
-        dealloc(ptr.as_ptr(), layout)
+        if layout.size() != 0 {
+            dealloc(ptr.as_ptr(), layout)
+        }
     }
 
     #[inline]
-    unsafe fn realloc(
+    unsafe fn grow(
         &mut self,
         ptr: NonNull<u8>,
         layout: Layout,
         new_size: usize,
-    ) -> Result<(NonNull<u8>, usize), AllocErr> {
-        NonNull::new(realloc(ptr.as_ptr(), layout, new_size)).ok_or(AllocErr).map(|p| (p, new_size))
+        placement: ReallocPlacement,
+        init: AllocInit,
+    ) -> Result<MemoryBlock, AllocErr> {
+        let size = layout.size();
+        debug_assert!(
+            new_size >= size,
+            "`new_size` must be greater than or equal to `memory.size()`"
+        );
+
+        if size == new_size {
+            return Ok(MemoryBlock { ptr, size });
+        }
+
+        match placement {
+            ReallocPlacement::InPlace => Err(AllocErr),
+            ReallocPlacement::MayMove if layout.size() == 0 => {
+                let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
+                self.alloc(new_layout, init)
+            }
+            ReallocPlacement::MayMove => {
+                // `realloc` probably checks for `new_size > size` or something similar.
+                intrinsics::assume(new_size > size);
+                let ptr = realloc(ptr.as_ptr(), layout, new_size);
+                let memory =
+                    MemoryBlock { ptr: NonNull::new(ptr).ok_or(AllocErr)?, size: new_size };
+                init.init_offset(memory, size);
+                Ok(memory)
+            }
+        }
     }
 
     #[inline]
-    unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> {
-        NonNull::new(alloc_zeroed(layout)).ok_or(AllocErr).map(|p| (p, layout.size()))
+    unsafe fn shrink(
+        &mut self,
+        ptr: NonNull<u8>,
+        layout: Layout,
+        new_size: usize,
+        placement: ReallocPlacement,
+    ) -> Result<MemoryBlock, AllocErr> {
+        let size = layout.size();
+        debug_assert!(
+            new_size <= size,
+            "`new_size` must be smaller than or equal to `memory.size()`"
+        );
+
+        if size == new_size {
+            return Ok(MemoryBlock { ptr, size });
+        }
+
+        match placement {
+            ReallocPlacement::InPlace => Err(AllocErr),
+            ReallocPlacement::MayMove if new_size == 0 => {
+                self.dealloc(ptr, layout);
+                Ok(MemoryBlock { ptr: layout.dangling(), size: 0 })
+            }
+            ReallocPlacement::MayMove => {
+                // `realloc` probably checks for `new_size < size` or something similar.
+                intrinsics::assume(new_size < size);
+                let ptr = realloc(ptr.as_ptr(), layout, new_size);
+                Ok(MemoryBlock { ptr: NonNull::new(ptr).ok_or(AllocErr)?, size: new_size })
+            }
+        }
     }
 }
 
@@ -196,14 +265,10 @@ unsafe impl AllocRef for Global {
 #[lang = "exchange_malloc"]
 #[inline]
 unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
-    if size == 0 {
-        align as *mut u8
-    } else {
-        let layout = Layout::from_size_align_unchecked(size, align);
-        match Global.alloc(layout) {
-            Ok((ptr, _)) => ptr.as_ptr(),
-            Err(_) => handle_alloc_error(layout),
-        }
+    let layout = Layout::from_size_align_unchecked(size, align);
+    match Global.alloc(layout, AllocInit::Uninitialized) {
+        Ok(memory) => memory.ptr.as_ptr(),
+        Err(_) => handle_alloc_error(layout),
     }
 }
 
@@ -217,11 +282,8 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
 pub(crate) unsafe fn box_free<T: ?Sized>(ptr: Unique<T>) {
     let size = size_of_val(ptr.as_ref());
     let align = min_align_of_val(ptr.as_ref());
-    // We do not allocate for Box<T> when T is ZST, so deallocation is also not necessary.
-    if size != 0 {
-        let layout = Layout::from_size_align_unchecked(size, align);
-        Global.dealloc(ptr.cast().into(), layout);
-    }
+    let layout = Layout::from_size_align_unchecked(size, align);
+    Global.dealloc(ptr.cast().into(), layout)
 }
 
 /// Abort on memory allocation error or failure.
diff --git a/src/liballoc/alloc/tests.rs b/src/liballoc/alloc/tests.rs
index 55944398e16..1ad40eca93b 100644
--- a/src/liballoc/alloc/tests.rs
+++ b/src/liballoc/alloc/tests.rs
@@ -8,16 +8,17 @@ use test::Bencher;
 fn allocate_zeroed() {
     unsafe {
         let layout = Layout::from_size_align(1024, 1).unwrap();
-        let (ptr, _) =
-            Global.alloc_zeroed(layout.clone()).unwrap_or_else(|_| handle_alloc_error(layout));
+        let memory = Global
+            .alloc(layout.clone(), AllocInit::Zeroed)
+            .unwrap_or_else(|_| handle_alloc_error(layout));
 
-        let mut i = ptr.cast::<u8>().as_ptr();
+        let mut i = memory.ptr.cast::<u8>().as_ptr();
         let end = i.add(layout.size());
         while i < end {
             assert_eq!(*i, 0);
             i = i.offset(1);
         }
-        Global.dealloc(ptr, layout);
+        Global.dealloc(memory.ptr, layout);
     }
 }
 
diff --git a/src/liballoc/benches/btree/set.rs b/src/liballoc/benches/btree/set.rs
index d9e75ab7fa4..2518506b9b5 100644
--- a/src/liballoc/benches/btree/set.rs
+++ b/src/liballoc/benches/btree/set.rs
@@ -63,6 +63,22 @@ pub fn clone_100_and_clear(b: &mut Bencher) {
 }
 
 #[bench]
+pub fn clone_100_and_drain_all(b: &mut Bencher) {
+    let src = pos(100);
+    b.iter(|| src.clone().drain_filter(|_| true).count())
+}
+
+#[bench]
+pub fn clone_100_and_drain_half(b: &mut Bencher) {
+    let src = pos(100);
+    b.iter(|| {
+        let mut set = src.clone();
+        assert_eq!(set.drain_filter(|i| i % 2 == 0).count(), 100 / 2);
+        assert_eq!(set.len(), 100 / 2);
+    })
+}
+
+#[bench]
 pub fn clone_100_and_into_iter(b: &mut Bencher) {
     let src = pos(100);
     b.iter(|| src.clone().into_iter().count())
@@ -116,6 +132,22 @@ pub fn clone_10k_and_clear(b: &mut Bencher) {
 }
 
 #[bench]
+pub fn clone_10k_and_drain_all(b: &mut Bencher) {
+    let src = pos(10_000);
+    b.iter(|| src.clone().drain_filter(|_| true).count())
+}
+
+#[bench]
+pub fn clone_10k_and_drain_half(b: &mut Bencher) {
+    let src = pos(10_000);
+    b.iter(|| {
+        let mut set = src.clone();
+        assert_eq!(set.drain_filter(|i| i % 2 == 0).count(), 10_000 / 2);
+        assert_eq!(set.len(), 10_000 / 2);
+    })
+}
+
+#[bench]
 pub fn clone_10k_and_into_iter(b: &mut Bencher) {
     let src = pos(10_000);
     b.iter(|| src.clone().into_iter().count())
diff --git a/src/liballoc/benches/lib.rs b/src/liballoc/benches/lib.rs
index 951477a24c8..f31717d9fd5 100644
--- a/src/liballoc/benches/lib.rs
+++ b/src/liballoc/benches/lib.rs
@@ -1,3 +1,4 @@
+#![feature(btree_drain_filter)]
 #![feature(map_first_last)]
 #![feature(repr_simd)]
 #![feature(test)]
diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs
index 9a7d0d9aeba..5406956a528 100644
--- a/src/liballoc/boxed.rs
+++ b/src/liballoc/boxed.rs
@@ -143,10 +143,9 @@ use core::ops::{
 };
 use core::pin::Pin;
 use core::ptr::{self, NonNull, Unique};
-use core::slice;
 use core::task::{Context, Poll};
 
-use crate::alloc::{self, AllocRef, Global};
+use crate::alloc::{self, AllocInit, AllocRef, Global};
 use crate::raw_vec::RawVec;
 use crate::str::from_boxed_utf8_unchecked;
 use crate::vec::Vec;
@@ -196,14 +195,12 @@ impl<T> Box<T> {
     #[unstable(feature = "new_uninit", issue = "63291")]
     pub fn new_uninit() -> Box<mem::MaybeUninit<T>> {
         let layout = alloc::Layout::new::<mem::MaybeUninit<T>>();
-        unsafe {
-            let ptr = if layout.size() == 0 {
-                NonNull::dangling()
-            } else {
-                Global.alloc(layout).unwrap_or_else(|_| alloc::handle_alloc_error(layout)).0.cast()
-            };
-            Box::from_raw(ptr.as_ptr())
-        }
+        let ptr = Global
+            .alloc(layout, AllocInit::Uninitialized)
+            .unwrap_or_else(|_| alloc::handle_alloc_error(layout))
+            .ptr
+            .cast();
+        unsafe { Box::from_raw(ptr.as_ptr()) }
     }
 
     /// Constructs a new `Box` with uninitialized contents, with the memory
@@ -226,11 +223,13 @@ impl<T> Box<T> {
     /// [zeroed]: ../../std/mem/union.MaybeUninit.html#method.zeroed
     #[unstable(feature = "new_uninit", issue = "63291")]
     pub fn new_zeroed() -> Box<mem::MaybeUninit<T>> {
-        unsafe {
-            let mut uninit = Self::new_uninit();
-            ptr::write_bytes::<T>(uninit.as_mut_ptr(), 0, 1);
-            uninit
-        }
+        let layout = alloc::Layout::new::<mem::MaybeUninit<T>>();
+        let ptr = Global
+            .alloc(layout, AllocInit::Zeroed)
+            .unwrap_or_else(|_| alloc::handle_alloc_error(layout))
+            .ptr
+            .cast();
+        unsafe { Box::from_raw(ptr.as_ptr()) }
     }
 
     /// Constructs a new `Pin<Box<T>>`. If `T` does not implement `Unpin`, then
@@ -265,15 +264,7 @@ impl<T> Box<[T]> {
     /// ```
     #[unstable(feature = "new_uninit", issue = "63291")]
     pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
-        let layout = alloc::Layout::array::<mem::MaybeUninit<T>>(len).unwrap();
-        unsafe {
-            let ptr = if layout.size() == 0 {
-                NonNull::dangling()
-            } else {
-                Global.alloc(layout).unwrap_or_else(|_| alloc::handle_alloc_error(layout)).0.cast()
-            };
-            Box::from_raw(slice::from_raw_parts_mut(ptr.as_ptr(), len))
-        }
+        unsafe { RawVec::with_capacity(len).into_box(len) }
     }
 }
 
@@ -778,7 +769,7 @@ impl<T: Copy> From<&[T]> for Box<[T]> {
         let buf = RawVec::with_capacity(len);
         unsafe {
             ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
-            buf.into_box()
+            buf.into_box(slice.len()).assume_init()
         }
     }
 }
@@ -1105,29 +1096,6 @@ impl<T: ?Sized> AsMut<T> for Box<T> {
 #[stable(feature = "pin", since = "1.33.0")]
 impl<T: ?Sized> Unpin for Box<T> {}
 
-#[cfg(bootstrap)]
-#[unstable(feature = "generator_trait", issue = "43122")]
-impl<G: ?Sized + Generator + Unpin> Generator for Box<G> {
-    type Yield = G::Yield;
-    type Return = G::Return;
-
-    fn resume(mut self: Pin<&mut Self>) -> GeneratorState<Self::Yield, Self::Return> {
-        G::resume(Pin::new(&mut *self))
-    }
-}
-
-#[cfg(bootstrap)]
-#[unstable(feature = "generator_trait", issue = "43122")]
-impl<G: ?Sized + Generator> Generator for Pin<Box<G>> {
-    type Yield = G::Yield;
-    type Return = G::Return;
-
-    fn resume(mut self: Pin<&mut Self>) -> GeneratorState<Self::Yield, Self::Return> {
-        G::resume((*self).as_mut())
-    }
-}
-
-#[cfg(not(bootstrap))]
 #[unstable(feature = "generator_trait", issue = "43122")]
 impl<G: ?Sized + Generator<R> + Unpin, R> Generator<R> for Box<G> {
     type Yield = G::Yield;
@@ -1138,7 +1106,6 @@ impl<G: ?Sized + Generator<R> + Unpin, R> Generator<R> for Box<G> {
     }
 }
 
-#[cfg(not(bootstrap))]
 #[unstable(feature = "generator_trait", issue = "43122")]
 impl<G: ?Sized + Generator<R>, R> Generator<R> for Pin<Box<G>> {
     type Yield = G::Yield;
diff --git a/src/liballoc/collections/btree/map.rs b/src/liballoc/collections/btree/map.rs
index 8b9ffdfb49b..bbeced1751d 100644
--- a/src/liballoc/collections/btree/map.rs
+++ b/src/liballoc/collections/btree/map.rs
@@ -122,7 +122,7 @@ use UnderflowResult::*;
 /// ```
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct BTreeMap<K, V> {
-    root: node::Root<K, V>,
+    root: Option<node::Root<K, V>>,
     length: usize,
 }
 
@@ -147,10 +147,11 @@ impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> {
         {
             match node.force() {
                 Leaf(leaf) => {
-                    let mut out_tree = BTreeMap { root: node::Root::new_leaf(), length: 0 };
+                    let mut out_tree = BTreeMap { root: Some(node::Root::new_leaf()), length: 0 };
 
                     {
-                        let mut out_node = match out_tree.root.as_mut().force() {
+                        let root = out_tree.root.as_mut().unwrap();
+                        let mut out_node = match root.as_mut().force() {
                             Leaf(leaf) => leaf,
                             Internal(_) => unreachable!(),
                         };
@@ -169,9 +170,14 @@ impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> {
                 }
                 Internal(internal) => {
                     let mut out_tree = clone_subtree(internal.first_edge().descend());
+                    out_tree.ensure_root_is_owned();
 
                     {
-                        let mut out_node = out_tree.root.push_level();
+                        // Ideally we'd use the return of ensure_root_is_owned
+                        // instead of re-unwrapping here but unfortunately that
+                        // borrows all of out_tree and we need access to the
+                        // length below.
+                        let mut out_node = out_tree.root.as_mut().unwrap().push_level();
                         let mut in_edge = internal.first_edge();
                         while let Ok(kv) = in_edge.right_kv() {
                             let (k, v) = kv.into_kv();
@@ -190,7 +196,7 @@ impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> {
                                 (root, length)
                             };
 
-                            out_node.push(k, v, subroot);
+                            out_node.push(k, v, subroot.unwrap_or_else(node::Root::new_leaf));
                             out_tree.length += 1 + sublength;
                         }
                     }
@@ -203,9 +209,9 @@ impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> {
         if self.is_empty() {
             // Ideally we'd call `BTreeMap::new` here, but that has the `K:
             // Ord` constraint, which this method lacks.
-            BTreeMap { root: node::Root::shared_empty_root(), length: 0 }
+            BTreeMap { root: None, length: 0 }
         } else {
-            clone_subtree(self.root.as_ref())
+            clone_subtree(self.root.as_ref().unwrap().as_ref())
         }
     }
 
@@ -271,14 +277,14 @@ where
     type Key = K;
 
     fn get(&self, key: &Q) -> Option<&K> {
-        match search::search_tree(self.root.as_ref(), key) {
+        match search::search_tree(self.root.as_ref()?.as_ref(), key) {
             Found(handle) => Some(handle.into_kv().0),
             GoDown(_) => None,
         }
     }
 
     fn take(&mut self, key: &Q) -> Option<K> {
-        match search::search_tree(self.root.as_mut(), key) {
+        match search::search_tree(self.root.as_mut()?.as_mut(), key) {
             Found(handle) => Some(
                 OccupiedEntry { handle, length: &mut self.length, _marker: PhantomData }
                     .remove_kv()
@@ -290,7 +296,7 @@ where
 
     fn replace(&mut self, key: K) -> Option<K> {
         self.ensure_root_is_owned();
-        match search::search_tree::<marker::Mut<'_>, K, (), K>(self.root.as_mut(), &key) {
+        match search::search_tree::<marker::Mut<'_>, K, (), K>(self.root.as_mut()?.as_mut(), &key) {
             Found(handle) => Some(mem::replace(handle.into_kv_mut().0, key)),
             GoDown(handle) => {
                 VacantEntry { key, handle, length: &mut self.length, _marker: PhantomData }
@@ -344,15 +350,18 @@ pub struct IterMut<'a, K: 'a, V: 'a> {
 /// [`BTreeMap`]: struct.BTreeMap.html
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct IntoIter<K, V> {
-    front: Handle<NodeRef<marker::Owned, K, V, marker::Leaf>, marker::Edge>,
-    back: Handle<NodeRef<marker::Owned, K, V, marker::Leaf>, marker::Edge>,
+    front: Option<Handle<NodeRef<marker::Owned, K, V, marker::Leaf>, marker::Edge>>,
+    back: Option<Handle<NodeRef<marker::Owned, K, V, marker::Leaf>, marker::Edge>>,
     length: usize,
 }
 
 #[stable(feature = "collection_debug", since = "1.17.0")]
 impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IntoIter<K, V> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        let range = Range { front: self.front.reborrow(), back: self.back.reborrow() };
+        let range = Range {
+            front: self.front.as_ref().map(|f| f.reborrow()),
+            back: self.back.as_ref().map(|b| b.reborrow()),
+        };
         f.debug_list().entries(range).finish()
     }
 }
@@ -417,8 +426,8 @@ pub struct ValuesMut<'a, K: 'a, V: 'a> {
 /// [`BTreeMap`]: struct.BTreeMap.html
 #[stable(feature = "btree_range", since = "1.17.0")]
 pub struct Range<'a, K: 'a, V: 'a> {
-    front: Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge>,
-    back: Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge>,
+    front: Option<Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge>>,
+    back: Option<Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge>>,
 }
 
 #[stable(feature = "collection_debug", since = "1.17.0")]
@@ -437,8 +446,8 @@ impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Range<'_, K, V> {
 /// [`BTreeMap`]: struct.BTreeMap.html
 #[stable(feature = "btree_range", since = "1.17.0")]
 pub struct RangeMut<'a, K: 'a, V: 'a> {
-    front: Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>,
-    back: Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>,
+    front: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>>,
+    back: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>>,
 
     // Be invariant in `K` and `V`
     _marker: PhantomData<&'a mut (K, V)>,
@@ -447,7 +456,10 @@ pub struct RangeMut<'a, K: 'a, V: 'a> {
 #[stable(feature = "collection_debug", since = "1.17.0")]
 impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for RangeMut<'_, K, V> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        let range = Range { front: self.front.reborrow(), back: self.back.reborrow() };
+        let range = Range {
+            front: self.front.as_ref().map(|f| f.reborrow()),
+            back: self.back.as_ref().map(|b| b.reborrow()),
+        };
         f.debug_list().entries(range).finish()
     }
 }
@@ -544,7 +556,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn new() -> BTreeMap<K, V> {
-        BTreeMap { root: node::Root::shared_empty_root(), length: 0 }
+        BTreeMap { root: None, length: 0 }
     }
 
     /// Clears the map, removing all elements.
@@ -589,7 +601,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
         K: Borrow<Q>,
         Q: Ord,
     {
-        match search::search_tree(self.root.as_ref(), key) {
+        match search::search_tree(self.root.as_ref()?.as_ref(), key) {
             Found(handle) => Some(handle.into_kv().1),
             GoDown(_) => None,
         }
@@ -616,7 +628,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
         K: Borrow<Q>,
         Q: Ord,
     {
-        match search::search_tree(self.root.as_ref(), k) {
+        match search::search_tree(self.root.as_ref()?.as_ref(), k) {
             Found(handle) => Some(handle.into_kv()),
             GoDown(_) => None,
         }
@@ -645,7 +657,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
         T: Ord,
         K: Borrow<T>,
     {
-        let front = self.root.as_ref().first_leaf_edge();
+        let front = self.root.as_ref()?.as_ref().first_leaf_edge();
         front.right_kv().ok().map(Handle::into_kv)
     }
 
@@ -674,7 +686,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
         T: Ord,
         K: Borrow<T>,
     {
-        let front = self.root.as_mut().first_leaf_edge();
+        let front = self.root.as_mut()?.as_mut().first_leaf_edge();
         if let Ok(kv) = front.right_kv() {
             Some(OccupiedEntry {
                 handle: kv.forget_node_type(),
@@ -708,7 +720,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
         T: Ord,
         K: Borrow<T>,
     {
-        let back = self.root.as_ref().last_leaf_edge();
+        let back = self.root.as_ref()?.as_ref().last_leaf_edge();
         back.left_kv().ok().map(Handle::into_kv)
     }
 
@@ -737,7 +749,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
         T: Ord,
         K: Borrow<T>,
     {
-        let back = self.root.as_mut().last_leaf_edge();
+        let back = self.root.as_mut()?.as_mut().last_leaf_edge();
         if let Ok(kv) = back.left_kv() {
             Some(OccupiedEntry {
                 handle: kv.forget_node_type(),
@@ -801,7 +813,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
         K: Borrow<Q>,
         Q: Ord,
     {
-        match search::search_tree(self.root.as_mut(), key) {
+        match search::search_tree(self.root.as_mut()?.as_mut(), key) {
             Found(handle) => Some(handle.into_kv_mut().1),
             GoDown(_) => None,
         }
@@ -896,7 +908,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
         K: Borrow<Q>,
         Q: Ord,
     {
-        match search::search_tree(self.root.as_mut(), key) {
+        match search::search_tree(self.root.as_mut()?.as_mut(), key) {
             Found(handle) => Some(
                 OccupiedEntry { handle, length: &mut self.length, _marker: PhantomData }
                     .remove_entry(),
@@ -992,11 +1004,15 @@ impl<K: Ord, V> BTreeMap<K, V> {
         K: Borrow<T>,
         R: RangeBounds<T>,
     {
-        let root1 = self.root.as_ref();
-        let root2 = self.root.as_ref();
-        let (f, b) = range_search(root1, root2, range);
+        if let Some(root) = &self.root {
+            let root1 = root.as_ref();
+            let root2 = root.as_ref();
+            let (f, b) = range_search(root1, root2, range);
 
-        Range { front: f, back: b }
+            Range { front: Some(f), back: Some(b) }
+        } else {
+            Range { front: None, back: None }
+        }
     }
 
     /// Constructs a mutable double-ended iterator over a sub-range of elements in the map.
@@ -1036,11 +1052,15 @@ impl<K: Ord, V> BTreeMap<K, V> {
         K: Borrow<T>,
         R: RangeBounds<T>,
     {
-        let root1 = self.root.as_mut();
-        let root2 = unsafe { ptr::read(&root1) };
-        let (f, b) = range_search(root1, root2, range);
+        if let Some(root) = &mut self.root {
+            let root1 = root.as_mut();
+            let root2 = unsafe { ptr::read(&root1) };
+            let (f, b) = range_search(root1, root2, range);
 
-        RangeMut { front: f, back: b, _marker: PhantomData }
+            RangeMut { front: Some(f), back: Some(b), _marker: PhantomData }
+        } else {
+            RangeMut { front: None, back: None, _marker: PhantomData }
+        }
     }
 
     /// Gets the given key's corresponding entry in the map for in-place manipulation.
@@ -1065,7 +1085,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
     pub fn entry(&mut self, key: K) -> Entry<'_, K, V> {
         // FIXME(@porglezomp) Avoid allocating if we don't insert
         self.ensure_root_is_owned();
-        match search::search_tree(self.root.as_mut(), &key) {
+        match search::search_tree(self.root.as_mut().unwrap().as_mut(), &key) {
             Found(handle) => {
                 Occupied(OccupiedEntry { handle, length: &mut self.length, _marker: PhantomData })
             }
@@ -1077,7 +1097,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
 
     fn from_sorted_iter<I: Iterator<Item = (K, V)>>(&mut self, iter: I) {
         self.ensure_root_is_owned();
-        let mut cur_node = self.root.as_mut().last_leaf_edge().into_node();
+        let mut cur_node = self.root.as_mut().unwrap().as_mut().last_leaf_edge().into_node();
         // Iterate through all key-value pairs, pushing them into nodes at the right level.
         for (key, value) in iter {
             // Try to push key-value pair into the current leaf node.
@@ -1126,7 +1146,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
 
     fn fix_right_edge(&mut self) {
         // Handle underfull nodes, start from the top.
-        let mut cur_node = self.root.as_mut();
+        let mut cur_node = self.root.as_mut().unwrap().as_mut();
         while let Internal(internal) = cur_node.force() {
             // Check if right-most child is underfull.
             let mut last_edge = internal.last_edge();
@@ -1187,14 +1207,14 @@ impl<K: Ord, V> BTreeMap<K, V> {
         let total_num = self.len();
 
         let mut right = Self::new();
-        right.root = node::Root::new_leaf();
-        for _ in 0..(self.root.as_ref().height()) {
-            right.root.push_level();
+        let right_root = right.ensure_root_is_owned();
+        for _ in 0..(self.root.as_ref().unwrap().as_ref().height()) {
+            right_root.push_level();
         }
 
         {
-            let mut left_node = self.root.as_mut();
-            let mut right_node = right.root.as_mut();
+            let mut left_node = self.root.as_mut().unwrap().as_mut();
+            let mut right_node = right.root.as_mut().unwrap().as_mut();
 
             loop {
                 let mut split_edge = match search::search_node(left_node, key) {
@@ -1223,7 +1243,9 @@ impl<K: Ord, V> BTreeMap<K, V> {
         self.fix_right_border();
         right.fix_left_border();
 
-        if self.root.as_ref().height() < right.root.as_ref().height() {
+        if self.root.as_ref().unwrap().as_ref().height()
+            < right.root.as_ref().unwrap().as_ref().height()
+        {
             self.recalc_length();
             right.length = total_num - self.len();
         } else {
@@ -1234,6 +1256,48 @@ impl<K: Ord, V> BTreeMap<K, V> {
         right
     }
 
+    /// Creates an iterator which uses a closure to determine if an element should be removed.
+    ///
+    /// If the closure returns true, the element is removed from the map and yielded.
+    /// If the closure returns false, or panics, the element remains in the map and will not be
+    /// yielded.
+    ///
+    /// Note that `drain_filter` lets you mutate every value in the filter closure, regardless of
+    /// whether you choose to keep or remove it.
+    ///
+    /// If the iterator is only partially consumed or not consumed at all, each of the remaining
+    /// elements will still be subjected to the closure and removed and dropped if it returns true.
+    ///
+    /// It is unspecified how many more elements will be subjected to the closure
+    /// if a panic occurs in the closure, or a panic occurs while dropping an element,
+    /// or if the `DrainFilter` value is leaked.
+    ///
+    /// # Examples
+    ///
+    /// Splitting a map into even and odd keys, reusing the original map:
+    ///
+    /// ```
+    /// #![feature(btree_drain_filter)]
+    /// use std::collections::BTreeMap;
+    ///
+    /// let mut map: BTreeMap<i32, i32> = (0..8).map(|x| (x, x)).collect();
+    /// let evens: BTreeMap<_, _> = map.drain_filter(|k, _v| k % 2 == 0).collect();
+    /// let odds = map;
+    /// assert_eq!(evens.keys().copied().collect::<Vec<_>>(), vec![0, 2, 4, 6]);
+    /// assert_eq!(odds.keys().copied().collect::<Vec<_>>(), vec![1, 3, 5, 7]);
+    /// ```
+    #[unstable(feature = "btree_drain_filter", issue = "70530")]
+    pub fn drain_filter<F>(&mut self, pred: F) -> DrainFilter<'_, K, V, F>
+    where
+        F: FnMut(&K, &mut V) -> bool,
+    {
+        DrainFilter { pred, inner: self.drain_filter_inner() }
+    }
+    pub(super) fn drain_filter_inner(&mut self) -> DrainFilterInner<'_, K, V> {
+        let front = self.root.as_mut().map(|r| r.as_mut().first_leaf_edge());
+        DrainFilterInner { length: &mut self.length, cur_leaf_edge: front }
+    }
+
     /// Calculates the number of elements if it is incorrect.
     fn recalc_length(&mut self) {
         fn dfs<'a, K, V>(node: NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal>) -> usize
@@ -1261,19 +1325,19 @@ impl<K: Ord, V> BTreeMap<K, V> {
             res
         }
 
-        self.length = dfs(self.root.as_ref());
+        self.length = dfs(self.root.as_ref().unwrap().as_ref());
     }
 
     /// Removes empty levels on the top.
     fn fix_top(&mut self) {
         loop {
             {
-                let node = self.root.as_ref();
+                let node = self.root.as_ref().unwrap().as_ref();
                 if node.height() == 0 || node.len() > 0 {
                     break;
                 }
             }
-            self.root.pop_level();
+            self.root.as_mut().unwrap().pop_level();
         }
     }
 
@@ -1281,7 +1345,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
         self.fix_top();
 
         {
-            let mut cur_node = self.root.as_mut();
+            let mut cur_node = self.root.as_mut().unwrap().as_mut();
 
             while let Internal(node) = cur_node.force() {
                 let mut last_kv = node.last_kv();
@@ -1307,7 +1371,7 @@ impl<K: Ord, V> BTreeMap<K, V> {
         self.fix_top();
 
         {
-            let mut cur_node = self.root.as_mut();
+            let mut cur_node = self.root.as_mut().unwrap().as_mut();
 
             while let Internal(node) = cur_node.force() {
                 let mut first_kv = node.first_kv();
@@ -1326,13 +1390,6 @@ impl<K: Ord, V> BTreeMap<K, V> {
 
         self.fix_top();
     }
-
-    /// If the root node is the shared root node, allocate our own node.
-    fn ensure_root_is_owned(&mut self) {
-        if self.root.is_shared_root() {
-            self.root = node::Root::new_leaf();
-        }
-    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1458,12 +1515,21 @@ impl<K, V> IntoIterator for BTreeMap<K, V> {
     type IntoIter = IntoIter<K, V>;
 
     fn into_iter(self) -> IntoIter<K, V> {
-        let root1 = unsafe { ptr::read(&self.root).into_ref() };
-        let root2 = unsafe { ptr::read(&self.root).into_ref() };
+        if self.root.is_none() {
+            mem::forget(self);
+            return IntoIter { front: None, back: None, length: 0 };
+        }
+
+        let root1 = unsafe { unwrap_unchecked(ptr::read(&self.root)).into_ref() };
+        let root2 = unsafe { unwrap_unchecked(ptr::read(&self.root)).into_ref() };
         let len = self.length;
         mem::forget(self);
 
-        IntoIter { front: root1.first_leaf_edge(), back: root2.last_leaf_edge(), length: len }
+        IntoIter {
+            front: Some(root1.first_leaf_edge()),
+            back: Some(root2.last_leaf_edge()),
+            length: len,
+        }
     }
 }
 
@@ -1477,6 +1543,14 @@ impl<K, V> Drop for IntoIter<K, V> {
                 // Continue the same loop we perform below. This only runs when unwinding, so we
                 // don't have to care about panics this time (they'll abort).
                 while let Some(_) = self.0.next() {}
+
+                unsafe {
+                    let mut node =
+                        unwrap_unchecked(ptr::read(&self.0.front)).into_node().forget_type();
+                    while let Some(parent) = node.deallocate_and_ascend() {
+                        node = parent.into_node().forget_type();
+                    }
+                }
             }
         }
 
@@ -1487,13 +1561,13 @@ impl<K, V> Drop for IntoIter<K, V> {
         }
 
         unsafe {
-            let mut node = ptr::read(&self.front).into_node().forget_type();
-            if node.is_shared_root() {
-                return;
-            }
-
-            while let Some(parent) = node.deallocate_and_ascend() {
-                node = parent.into_node().forget_type();
+            if let Some(front) = ptr::read(&self.front) {
+                let mut node = front.into_node().forget_type();
+                // Most of the nodes have been deallocated while traversing
+                // but one pile from a leaf up to the root is left standing.
+                while let Some(parent) = node.deallocate_and_ascend() {
+                    node = parent.into_node().forget_type();
+                }
             }
         }
     }
@@ -1508,7 +1582,7 @@ impl<K, V> Iterator for IntoIter<K, V> {
             None
         } else {
             self.length -= 1;
-            Some(unsafe { self.front.next_unchecked() })
+            Some(unsafe { self.front.as_mut().unwrap().next_unchecked() })
         }
     }
 
@@ -1524,7 +1598,7 @@ impl<K, V> DoubleEndedIterator for IntoIter<K, V> {
             None
         } else {
             self.length -= 1;
-            Some(unsafe { self.back.next_back_unchecked() })
+            Some(unsafe { self.back.as_mut().unwrap().next_back_unchecked() })
         }
     }
 }
@@ -1621,6 +1695,124 @@ impl<K, V> Clone for Values<'_, K, V> {
     }
 }
 
+/// An iterator produced by calling `drain_filter` on BTreeMap.
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+pub struct DrainFilter<'a, K, V, F>
+where
+    K: 'a + Ord, // This Ord bound should be removed before stabilization.
+    V: 'a,
+    F: 'a + FnMut(&K, &mut V) -> bool,
+{
+    pred: F,
+    inner: DrainFilterInner<'a, K, V>,
+}
+pub(super) struct DrainFilterInner<'a, K, V>
+where
+    K: 'a + Ord,
+    V: 'a,
+{
+    length: &'a mut usize,
+    cur_leaf_edge: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>>,
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, K, V, F> Drop for DrainFilter<'a, K, V, F>
+where
+    K: 'a + Ord,
+    V: 'a,
+    F: 'a + FnMut(&K, &mut V) -> bool,
+{
+    fn drop(&mut self) {
+        self.for_each(drop);
+    }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, K, V, F> fmt::Debug for DrainFilter<'a, K, V, F>
+where
+    K: 'a + fmt::Debug + Ord,
+    V: 'a + fmt::Debug,
+    F: 'a + FnMut(&K, &mut V) -> bool,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_tuple("DrainFilter").field(&self.inner.peek()).finish()
+    }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, K, V, F> Iterator for DrainFilter<'a, K, V, F>
+where
+    K: 'a + Ord,
+    V: 'a,
+    F: 'a + FnMut(&K, &mut V) -> bool,
+{
+    type Item = (K, V);
+
+    fn next(&mut self) -> Option<(K, V)> {
+        self.inner.next(&mut self.pred)
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.inner.size_hint()
+    }
+}
+
+impl<'a, K, V> DrainFilterInner<'a, K, V>
+where
+    K: 'a + Ord,
+    V: 'a,
+{
+    /// Allow Debug implementations to predict the next element.
+    pub(super) fn peek(&self) -> Option<(&K, &V)> {
+        let edge = self.cur_leaf_edge.as_ref()?;
+        edge.reborrow().next_kv().ok().map(|kv| kv.into_kv())
+    }
+
+    unsafe fn next_kv(
+        &mut self,
+    ) -> Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV>> {
+        let edge = self.cur_leaf_edge.as_ref()?;
+        ptr::read(edge).next_kv().ok()
+    }
+
+    /// Implementation of a typical `DrainFilter::next` method, given the predicate.
+    pub(super) fn next<F>(&mut self, pred: &mut F) -> Option<(K, V)>
+    where
+        F: FnMut(&K, &mut V) -> bool,
+    {
+        while let Some(kv) = unsafe { self.next_kv() } {
+            let (k, v) = unsafe { ptr::read(&kv) }.into_kv_mut();
+            if pred(k, v) {
+                *self.length -= 1;
+                let (k, v, leaf_edge_location) = kv.remove_kv_tracking();
+                // `remove_kv_tracking` has either preserved or invalidated `self.cur_leaf_edge`
+                if let Some(node) = leaf_edge_location {
+                    match search::search_tree(node, &k) {
+                        search::SearchResult::Found(_) => unreachable!(),
+                        search::SearchResult::GoDown(leaf) => self.cur_leaf_edge = Some(leaf),
+                    }
+                };
+                return Some((k, v));
+            }
+            self.cur_leaf_edge = Some(kv.next_leaf_edge());
+        }
+        None
+    }
+
+    /// Implementation of a typical `DrainFilter::size_hint` method.
+    pub(super) fn size_hint(&self) -> (usize, Option<usize>) {
+        (0, Some(*self.length))
+    }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<K, V, F> FusedIterator for DrainFilter<'_, K, V, F>
+where
+    K: Ord,
+    F: FnMut(&K, &mut V) -> bool,
+{
+}
+
 #[stable(feature = "btree_range", since = "1.17.0")]
 impl<'a, K, V> Iterator for Range<'a, K, V> {
     type Item = (&'a K, &'a V);
@@ -1674,7 +1866,7 @@ impl<'a, K, V> Range<'a, K, V> {
     }
 
     unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) {
-        self.front.next_unchecked()
+        unwrap_unchecked(self.front.as_mut()).next_unchecked()
     }
 }
 
@@ -1687,7 +1879,7 @@ impl<'a, K, V> DoubleEndedIterator for Range<'a, K, V> {
 
 impl<'a, K, V> Range<'a, K, V> {
     unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a V) {
-        self.back.next_back_unchecked()
+        unwrap_unchecked(self.back.as_mut()).next_back_unchecked()
     }
 }
 
@@ -1725,7 +1917,7 @@ impl<'a, K, V> RangeMut<'a, K, V> {
     }
 
     unsafe fn next_unchecked(&mut self) -> (&'a mut K, &'a mut V) {
-        self.front.next_unchecked()
+        unwrap_unchecked(self.front.as_mut()).next_unchecked()
     }
 }
 
@@ -1746,7 +1938,7 @@ impl<K, V> FusedIterator for RangeMut<'_, K, V> {}
 
 impl<'a, K, V> RangeMut<'a, K, V> {
     unsafe fn next_back_unchecked(&mut self) -> (&'a mut K, &'a mut V) {
-        self.back.next_back_unchecked()
+        unwrap_unchecked(self.back.as_mut()).next_back_unchecked()
     }
 }
 
@@ -1960,8 +2152,8 @@ impl<K, V> BTreeMap<K, V> {
     pub fn iter(&self) -> Iter<'_, K, V> {
         Iter {
             range: Range {
-                front: self.root.as_ref().first_leaf_edge(),
-                back: self.root.as_ref().last_leaf_edge(),
+                front: self.root.as_ref().map(|r| r.as_ref().first_leaf_edge()),
+                back: self.root.as_ref().map(|r| r.as_ref().last_leaf_edge()),
             },
             length: self.length,
         }
@@ -1990,13 +2182,17 @@ impl<K, V> BTreeMap<K, V> {
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
-        let root1 = self.root.as_mut();
-        let root2 = unsafe { ptr::read(&root1) };
         IterMut {
-            range: RangeMut {
-                front: root1.first_leaf_edge(),
-                back: root2.last_leaf_edge(),
-                _marker: PhantomData,
+            range: if let Some(root) = &mut self.root {
+                let root1 = root.as_mut();
+                let root2 = unsafe { ptr::read(&root1) };
+                RangeMut {
+                    front: Some(root1.first_leaf_edge()),
+                    back: Some(root2.last_leaf_edge()),
+                    _marker: PhantomData,
+                }
+            } else {
+                RangeMut { front: None, back: None, _marker: PhantomData }
             },
             length: self.length,
         }
@@ -2107,6 +2303,12 @@ impl<K, V> BTreeMap<K, V> {
     pub fn is_empty(&self) -> bool {
         self.len() == 0
     }
+
+    /// If the root node is the empty (non-allocated) root node, allocate our
+    /// own node.
+    fn ensure_root_is_owned(&mut self) -> &mut node::Root<K, V> {
+        self.root.get_or_insert_with(node::Root::new_leaf)
+    }
 }
 
 impl<'a, K: Ord, V> Entry<'a, K, V> {
@@ -2489,12 +2691,31 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> {
     fn remove_kv(self) -> (K, V) {
         *self.length -= 1;
 
-        let (small_leaf, old_key, old_val) = match self.handle.force() {
+        let (old_key, old_val, _) = self.handle.remove_kv_tracking();
+        (old_key, old_val)
+    }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV> {
+    /// Removes a key/value-pair from the map, and returns that pair, as well as
+    /// the whereabouts of the leaf edge corresponding to that former pair:
+    /// if None is returned, the leaf edge is still the left leaf edge of the KV handle;
+    /// if a node is returned, it heads the subtree where the leaf edge may be found.
+    fn remove_kv_tracking(
+        self,
+    ) -> (K, V, Option<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>>) {
+        let mut levels_down_handled: isize;
+        let (small_leaf, old_key, old_val) = match self.force() {
             Leaf(leaf) => {
+                levels_down_handled = 1; // handled at same level, but affects only the right side
                 let (hole, old_key, old_val) = leaf.remove();
                 (hole.into_node(), old_key, old_val)
             }
             Internal(mut internal) => {
+                // Replace the location freed in the internal node with the next KV,
+                // and remove that next KV from its leaf.
+                levels_down_handled = unsafe { ptr::read(&internal).into_node().height() } as isize;
+
                 let key_loc = internal.kv_mut().0 as *mut K;
                 let val_loc = internal.kv_mut().1 as *mut V;
 
@@ -2514,27 +2735,39 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> {
         let mut cur_node = small_leaf.forget_type();
         while cur_node.len() < node::MIN_LEN {
             match handle_underfull_node(cur_node) {
-                AtRoot => break,
+                AtRoot(root) => {
+                    cur_node = root;
+                    break;
+                }
                 EmptyParent(_) => unreachable!(),
                 Merged(parent) => {
+                    levels_down_handled -= 1;
                     if parent.len() == 0 {
                         // We must be at the root
-                        parent.into_root_mut().pop_level();
+                        let root = parent.into_root_mut();
+                        root.pop_level();
+                        cur_node = root.as_mut();
                         break;
                     } else {
                         cur_node = parent.forget_type();
                     }
                 }
-                Stole(_) => break,
+                Stole(internal_node) => {
+                    levels_down_handled -= 1;
+                    cur_node = internal_node.forget_type();
+                    // This internal node might be underfull, but only if it's the root.
+                    break;
+                }
             }
         }
 
-        (old_key, old_val)
+        let leaf_edge_location = if levels_down_handled > 0 { None } else { Some(cur_node) };
+        (old_key, old_val, leaf_edge_location)
     }
 }
 
 enum UnderflowResult<'a, K, V> {
-    AtRoot,
+    AtRoot(NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>),
     EmptyParent(NodeRef<marker::Mut<'a>, K, V, marker::Internal>),
     Merged(NodeRef<marker::Mut<'a>, K, V, marker::Internal>),
     Stole(NodeRef<marker::Mut<'a>, K, V, marker::Internal>),
@@ -2543,10 +2776,9 @@ enum UnderflowResult<'a, K, V> {
 fn handle_underfull_node<K, V>(
     node: NodeRef<marker::Mut<'_>, K, V, marker::LeafOrInternal>,
 ) -> UnderflowResult<'_, K, V> {
-    let parent = if let Ok(parent) = node.ascend() {
-        parent
-    } else {
-        return AtRoot;
+    let parent = match node.ascend() {
+        Ok(parent) => parent,
+        Err(root) => return AtRoot(root),
     };
 
     let (is_left, mut handle) = match parent.left_kv() {
diff --git a/src/liballoc/collections/btree/node.rs b/src/liballoc/collections/btree/node.rs
index 362755f8b7f..11c14299573 100644
--- a/src/liballoc/collections/btree/node.rs
+++ b/src/liballoc/collections/btree/node.rs
@@ -44,34 +44,7 @@ const B: usize = 6;
 pub const MIN_LEN: usize = B - 1;
 pub const CAPACITY: usize = 2 * B - 1;
 
-/// The underlying representation of leaf nodes. Note that it is often unsafe to actually store
-/// these, since only the first `len` keys and values are assumed to be initialized. As such,
-/// these should always be put behind pointers, and specifically behind `BoxedNode` in the owned
-/// case.
-///
-/// We have a separate type for the header and rely on it matching the prefix of `LeafNode`, in
-/// order to statically allocate a single dummy node to avoid allocations. This struct is
-/// `repr(C)` to prevent them from being reordered. `LeafNode` does not just contain a
-/// `NodeHeader` because we do not want unnecessary padding between `len` and the keys.
-/// Crucially, `NodeHeader` can be safely transmuted to different K and V. (This is exploited
-/// by `as_header`.)
-#[repr(C)]
-struct NodeHeader<K, V> {
-    /// We use `*const` as opposed to `*mut` so as to be covariant in `K` and `V`.
-    /// This either points to an actual node or is null.
-    parent: *const InternalNode<K, V>,
-
-    /// This node's index into the parent node's `edges` array.
-    /// `*node.parent.edges[node.parent_idx]` should be the same thing as `node`.
-    /// This is only guaranteed to be initialized when `parent` is non-null.
-    parent_idx: MaybeUninit<u16>,
-
-    /// The number of keys and values this node stores.
-    ///
-    /// This next to `parent_idx` to encourage the compiler to join `len` and
-    /// `parent_idx` into the same 32-bit word, reducing space overhead.
-    len: u16,
-}
+/// The underlying representation of leaf nodes.
 #[repr(C)]
 struct LeafNode<K, V> {
     /// We use `*const` as opposed to `*mut` so as to be covariant in `K` and `V`.
@@ -111,21 +84,6 @@ impl<K, V> LeafNode<K, V> {
     }
 }
 
-impl<K, V> NodeHeader<K, V> {
-    fn is_shared_root(&self) -> bool {
-        ptr::eq(self, &EMPTY_ROOT_NODE as *const _ as *const _)
-    }
-}
-
-// We need to implement Sync here in order to make a static instance.
-unsafe impl Sync for NodeHeader<(), ()> {}
-
-// An empty node used as a placeholder for the root node, to avoid allocations.
-// We use just a header in order to save space, since no operation on an empty tree will
-// ever take a pointer past the first key.
-static EMPTY_ROOT_NODE: NodeHeader<(), ()> =
-    NodeHeader { parent: ptr::null(), parent_idx: MaybeUninit::uninit(), len: 0 };
-
 /// The underlying representation of internal nodes. As with `LeafNode`s, these should be hidden
 /// behind `BoxedNode`s to prevent dropping uninitialized keys and values. Any pointer to an
 /// `InternalNode` can be directly casted to a pointer to the underlying `LeafNode` portion of the
@@ -153,10 +111,12 @@ impl<K, V> InternalNode<K, V> {
     }
 }
 
-/// An owned pointer to a node. This basically is either `Box<LeafNode<K, V>>` or
-/// `Box<InternalNode<K, V>>`. However, it contains no information as to which of the two types
-/// of nodes is actually behind the box, and, partially due to this lack of information, has no
-/// destructor.
+/// A managed, non-null pointer to a node. This is either an owned pointer to
+/// `LeafNode<K, V>` or an owned pointer to `InternalNode<K, V>`.
+///
+/// However, `BoxedNode` contains no information as to which of the two types
+/// of nodes it actually contains, and, partially due to this lack of information,
+/// has no destructor.
 struct BoxedNode<K, V> {
     ptr: Unique<LeafNode<K, V>>,
 }
@@ -167,9 +127,7 @@ impl<K, V> BoxedNode<K, V> {
     }
 
     fn from_internal(node: Box<InternalNode<K, V>>) -> Self {
-        unsafe {
-            BoxedNode { ptr: Unique::new_unchecked(Box::into_raw(node) as *mut LeafNode<K, V>) }
-        }
+        BoxedNode { ptr: Box::into_unique(node).cast() }
     }
 
     unsafe fn from_ptr(ptr: NonNull<LeafNode<K, V>>) -> Self {
@@ -181,10 +139,12 @@ impl<K, V> BoxedNode<K, V> {
     }
 }
 
-/// An owned tree. Note that despite being owned, this does not have a destructor,
-/// and must be cleaned up manually.
+/// An owned tree.
+///
+/// Note that this does not have a destructor, and must be cleaned up manually.
 pub struct Root<K, V> {
     node: BoxedNode<K, V>,
+    /// The number of levels below the root node.
     height: usize,
 }
 
@@ -192,21 +152,7 @@ unsafe impl<K: Sync, V: Sync> Sync for Root<K, V> {}
 unsafe impl<K: Send, V: Send> Send for Root<K, V> {}
 
 impl<K, V> Root<K, V> {
-    pub fn is_shared_root(&self) -> bool {
-        self.as_ref().is_shared_root()
-    }
-
-    pub fn shared_empty_root() -> Self {
-        Root {
-            node: unsafe {
-                BoxedNode::from_ptr(NonNull::new_unchecked(
-                    &EMPTY_ROOT_NODE as *const _ as *const LeafNode<K, V> as *mut _,
-                ))
-            },
-            height: 0,
-        }
-    }
-
+    /// Returns a new owned tree, with its own root node that is initially empty.
     pub fn new_leaf() -> Self {
         Root { node: BoxedNode::from_leaf(Box::new(unsafe { LeafNode::new() })), height: 0 }
     }
@@ -241,7 +187,6 @@ impl<K, V> Root<K, V> {
     /// Adds a new internal node with a single edge, pointing to the previous root, and make that
     /// new node the root. This increases the height by 1 and is the opposite of `pop_level`.
     pub fn push_level(&mut self) -> NodeRef<marker::Mut<'_>, K, V, marker::Internal> {
-        debug_assert!(!self.is_shared_root());
         let mut new_node = Box::new(unsafe { InternalNode::new() });
         new_node.edges[0].write(unsafe { BoxedNode::from_ptr(self.node.as_ptr()) });
 
@@ -304,12 +249,8 @@ impl<K, V> Root<K, V> {
 ///   `Leaf`, the `NodeRef` points to a leaf node, when this is `Internal` the
 ///   `NodeRef` points to an internal node, and when this is `LeafOrInternal` the
 ///   `NodeRef` could be pointing to either type of node.
-///   Note that in case of a leaf node, this might still be the shared root!
-///   Only turn this into a `LeafNode` reference if you know it is not the shared root!
-///   Shared references must be dereferencable *for the entire size of their pointee*,
-///   so '&LeafNode` or `&InternalNode` pointing to the shared root is undefined behavior.
-///   Turning this into a `NodeHeader` reference is always safe.
 pub struct NodeRef<BorrowType, K, V, Type> {
+    /// The number of levels below the node.
     height: usize,
     node: NonNull<LeafNode<K, V>>,
     // `root` is null unless the borrow type is `Mut`
@@ -349,7 +290,7 @@ impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
     /// Note that, despite being safe, calling this function can have the side effect
     /// of invalidating mutable references that unsafe code has created.
     pub fn len(&self) -> usize {
-        self.as_header().len as usize
+        self.as_leaf().len as usize
     }
 
     /// Returns the height of this node in the whole tree. Zero height denotes the
@@ -369,35 +310,24 @@ impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
         NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData }
     }
 
-    /// Exposes the leaf "portion" of any leaf or internal node that is not the shared root.
+    /// Exposes the leaf "portion" of any leaf or internal node.
     /// If the node is a leaf, this function simply opens up its data.
     /// If the node is an internal node, so not a leaf, it does have all the data a leaf has
     /// (header, keys and values), and this function exposes that.
-    /// Unsafe because the node must not be the shared root. For more information,
-    /// see the `NodeRef` comments.
-    unsafe fn as_leaf(&self) -> &LeafNode<K, V> {
-        debug_assert!(!self.is_shared_root());
-        self.node.as_ref()
-    }
-
-    fn as_header(&self) -> &NodeHeader<K, V> {
-        unsafe { &*(self.node.as_ptr() as *const NodeHeader<K, V>) }
-    }
-
-    /// Returns whether the node is the shared, empty root.
-    pub fn is_shared_root(&self) -> bool {
-        self.as_header().is_shared_root()
+    fn as_leaf(&self) -> &LeafNode<K, V> {
+        // The node must be valid for at least the LeafNode portion.
+        // This is not a reference in the NodeRef type because we don't know if
+        // it should be unique or shared.
+        unsafe { self.node.as_ref() }
     }
 
     /// Borrows a view into the keys stored in the node.
-    /// Unsafe because the caller must ensure that the node is not the shared root.
-    pub unsafe fn keys(&self) -> &[K] {
+    pub fn keys(&self) -> &[K] {
         self.reborrow().into_key_slice()
     }
 
     /// Borrows a view into the values stored in the node.
-    /// Unsafe because the caller must ensure that the node is not the shared root.
-    unsafe fn vals(&self) -> &[V] {
+    fn vals(&self) -> &[V] {
         self.reborrow().into_val_slice()
     }
 
@@ -411,7 +341,7 @@ impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
     pub fn ascend(
         self,
     ) -> Result<Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>, Self> {
-        let parent_as_leaf = self.as_header().parent as *const LeafNode<K, V>;
+        let parent_as_leaf = self.as_leaf().parent as *const LeafNode<K, V>;
         if let Some(non_zero) = NonNull::new(parent_as_leaf as *mut _) {
             Ok(Handle {
                 node: NodeRef {
@@ -420,7 +350,7 @@ impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
                     root: self.root,
                     _marker: PhantomData,
                 },
-                idx: unsafe { usize::from(*self.as_header().parent_idx.as_ptr()) },
+                idx: unsafe { usize::from(*self.as_leaf().parent_idx.as_ptr()) },
                 _marker: PhantomData,
             })
         } else {
@@ -459,7 +389,6 @@ impl<K, V> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> {
     pub unsafe fn deallocate_and_ascend(
         self,
     ) -> Option<Handle<NodeRef<marker::Owned, K, V, marker::Internal>, marker::Edge>> {
-        assert!(!self.is_shared_root());
         let height = self.height;
         let node = self.node;
         let ret = self.ascend().ok();
@@ -502,41 +431,37 @@ impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
     /// (header, keys and values), and this function exposes that.
     ///
     /// Returns a raw ptr to avoid asserting exclusive access to the entire node.
-    /// This also implies you can invoke this member on the shared root, but the resulting pointer
-    /// might not be properly aligned and definitely would not allow accessing keys and values.
     fn as_leaf_mut(&mut self) -> *mut LeafNode<K, V> {
         self.node.as_ptr()
     }
 
-    /// Unsafe because the caller must ensure that the node is not the shared root.
-    unsafe fn keys_mut(&mut self) -> &mut [K] {
-        self.reborrow_mut().into_key_slice_mut()
+    fn keys_mut(&mut self) -> &mut [K] {
+        // SAFETY: the caller will not be able to call further methods on self
+        // until the key slice reference is dropped, as we have unique access
+        // for the lifetime of the borrow.
+        unsafe { self.reborrow_mut().into_key_slice_mut() }
     }
 
-    /// Unsafe because the caller must ensure that the node is not the shared root.
-    unsafe fn vals_mut(&mut self) -> &mut [V] {
-        self.reborrow_mut().into_val_slice_mut()
+    fn vals_mut(&mut self) -> &mut [V] {
+        // SAFETY: the caller will not be able to call further methods on self
+        // until the value slice reference is dropped, as we have unique access
+        // for the lifetime of the borrow.
+        unsafe { self.reborrow_mut().into_val_slice_mut() }
     }
 }
 
 impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Immut<'a>, K, V, Type> {
-    /// Unsafe because the caller must ensure that the node is not the shared root.
-    unsafe fn into_key_slice(self) -> &'a [K] {
-        debug_assert!(!self.is_shared_root());
-        // We cannot be the shared root, so `as_leaf` is okay.
-        slice::from_raw_parts(MaybeUninit::first_ptr(&self.as_leaf().keys), self.len())
+    fn into_key_slice(self) -> &'a [K] {
+        unsafe { slice::from_raw_parts(MaybeUninit::first_ptr(&self.as_leaf().keys), self.len()) }
     }
 
-    /// Unsafe because the caller must ensure that the node is not the shared root.
-    unsafe fn into_val_slice(self) -> &'a [V] {
-        debug_assert!(!self.is_shared_root());
-        // We cannot be the shared root, so `as_leaf` is okay.
-        slice::from_raw_parts(MaybeUninit::first_ptr(&self.as_leaf().vals), self.len())
+    fn into_val_slice(self) -> &'a [V] {
+        unsafe { slice::from_raw_parts(MaybeUninit::first_ptr(&self.as_leaf().vals), self.len()) }
     }
 
-    /// Unsafe because the caller must ensure that the node is not the shared root.
-    unsafe fn into_slices(self) -> (&'a [K], &'a [V]) {
-        let k = ptr::read(&self);
+    fn into_slices(self) -> (&'a [K], &'a [V]) {
+        // SAFETY: equivalent to reborrow() except not requiring Type: 'a
+        let k = unsafe { ptr::read(&self) };
         (k.into_key_slice(), self.into_val_slice())
     }
 }
@@ -548,28 +473,27 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
         unsafe { &mut *(self.root as *mut Root<K, V>) }
     }
 
-    /// Unsafe because the caller must ensure that the node is not the shared root.
-    unsafe fn into_key_slice_mut(mut self) -> &'a mut [K] {
-        debug_assert!(!self.is_shared_root());
-        // We cannot be the shared root, so `as_leaf_mut` is okay.
-        slice::from_raw_parts_mut(
-            MaybeUninit::first_ptr_mut(&mut (*self.as_leaf_mut()).keys),
-            self.len(),
-        )
+    fn into_key_slice_mut(mut self) -> &'a mut [K] {
+        // SAFETY: The keys of a node must always be initialized up to length.
+        unsafe {
+            slice::from_raw_parts_mut(
+                MaybeUninit::first_ptr_mut(&mut (*self.as_leaf_mut()).keys),
+                self.len(),
+            )
+        }
     }
 
-    /// Unsafe because the caller must ensure that the node is not the shared root.
-    unsafe fn into_val_slice_mut(mut self) -> &'a mut [V] {
-        debug_assert!(!self.is_shared_root());
-        slice::from_raw_parts_mut(
-            MaybeUninit::first_ptr_mut(&mut (*self.as_leaf_mut()).vals),
-            self.len(),
-        )
+    fn into_val_slice_mut(mut self) -> &'a mut [V] {
+        // SAFETY: The values of a node must always be initialized up to length.
+        unsafe {
+            slice::from_raw_parts_mut(
+                MaybeUninit::first_ptr_mut(&mut (*self.as_leaf_mut()).vals),
+                self.len(),
+            )
+        }
     }
 
-    /// Unsafe because the caller must ensure that the node is not the shared root.
-    unsafe fn into_slices_mut(mut self) -> (&'a mut [K], &'a mut [V]) {
-        debug_assert!(!self.is_shared_root());
+    fn into_slices_mut(mut self) -> (&'a mut [K], &'a mut [V]) {
         // We cannot use the getters here, because calling the second one
         // invalidates the reference returned by the first.
         // More precisely, it is the call to `len` that is the culprit,
@@ -577,8 +501,13 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
         // overlap with the keys (and even the values, for ZST keys).
         let len = self.len();
         let leaf = self.as_leaf_mut();
-        let keys = slice::from_raw_parts_mut(MaybeUninit::first_ptr_mut(&mut (*leaf).keys), len);
-        let vals = slice::from_raw_parts_mut(MaybeUninit::first_ptr_mut(&mut (*leaf).vals), len);
+        // SAFETY: The keys and values of a node must always be initialized up to length.
+        let keys = unsafe {
+            slice::from_raw_parts_mut(MaybeUninit::first_ptr_mut(&mut (*leaf).keys), len)
+        };
+        let vals = unsafe {
+            slice::from_raw_parts_mut(MaybeUninit::first_ptr_mut(&mut (*leaf).vals), len)
+        };
         (keys, vals)
     }
 }
@@ -587,7 +516,6 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> {
     /// Adds a key/value pair the end of the node.
     pub fn push(&mut self, key: K, val: V) {
         assert!(self.len() < CAPACITY);
-        debug_assert!(!self.is_shared_root());
 
         let idx = self.len();
 
@@ -602,7 +530,6 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> {
     /// Adds a key/value pair to the beginning of the node.
     pub fn push_front(&mut self, key: K, val: V) {
         assert!(self.len() < CAPACITY);
-        debug_assert!(!self.is_shared_root());
 
         unsafe {
             slice_insert(self.keys_mut(), 0, key);
@@ -619,7 +546,6 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
     pub fn push(&mut self, key: K, val: V, edge: Root<K, V>) {
         assert!(edge.height == self.height - 1);
         assert!(self.len() < CAPACITY);
-        debug_assert!(!self.is_shared_root());
 
         let idx = self.len();
 
@@ -653,7 +579,6 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
     pub fn push_front(&mut self, key: K, val: V, edge: Root<K, V>) {
         assert!(edge.height == self.height - 1);
         assert!(self.len() < CAPACITY);
-        debug_assert!(!self.is_shared_root());
 
         unsafe {
             slice_insert(self.keys_mut(), 0, key);
@@ -739,8 +664,7 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
         }
     }
 
-    /// Unsafe because the caller must ensure that the node is not the shared root.
-    unsafe fn into_kv_pointers_mut(mut self) -> (*mut K, *mut V) {
+    fn into_kv_pointers_mut(mut self) -> (*mut K, *mut V) {
         (self.keys_mut().as_mut_ptr(), self.vals_mut().as_mut_ptr())
     }
 }
@@ -899,7 +823,6 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge
     fn insert_fit(&mut self, key: K, val: V) -> *mut V {
         // Necessary for correctness, but in a private module
         debug_assert!(self.node.len() < CAPACITY);
-        debug_assert!(!self.node.is_shared_root());
 
         unsafe {
             slice_insert(self.node.keys_mut(), self.idx, key);
@@ -1076,7 +999,6 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV>
     /// - All the key/value pairs to the right of this handle are put into a newly
     ///   allocated node.
     pub fn split(mut self) -> (NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, K, V, Root<K, V>) {
-        assert!(!self.node.is_shared_root());
         unsafe {
             let mut new_node = Box::new(LeafNode::new());
 
@@ -1108,7 +1030,6 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV>
     pub fn remove(
         mut self,
     ) -> (Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>, K, V) {
-        assert!(!self.node.is_shared_root());
         unsafe {
             let k = slice_remove(self.node.keys_mut(), self.idx);
             let v = slice_remove(self.node.vals_mut(), self.idx);
@@ -1221,7 +1142,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
 
             (*left_node.as_leaf_mut()).len += right_len as u16 + 1;
 
-            if self.node.height > 1 {
+            let layout = if self.node.height > 1 {
                 ptr::copy_nonoverlapping(
                     right_node.cast_unchecked().as_internal().edges.as_ptr(),
                     left_node
@@ -1238,10 +1159,11 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
                         .correct_parent_link();
                 }
 
-                Global.dealloc(right_node.node.cast(), Layout::new::<InternalNode<K, V>>());
+                Layout::new::<InternalNode<K, V>>()
             } else {
-                Global.dealloc(right_node.node.cast(), Layout::new::<LeafNode<K, V>>());
-            }
+                Layout::new::<LeafNode<K, V>>()
+            };
+            Global.dealloc(right_node.node.cast(), layout);
 
             Handle::new_edge(self.node, self.idx)
         }
diff --git a/src/liballoc/collections/btree/search.rs b/src/liballoc/collections/btree/search.rs
index 2ba5cebbdee..4e80f7f21eb 100644
--- a/src/liballoc/collections/btree/search.rs
+++ b/src/liballoc/collections/btree/search.rs
@@ -67,19 +67,16 @@ where
     Q: Ord,
     K: Borrow<Q>,
 {
-    // This function is defined over all borrow types (immutable, mutable, owned),
-    // and may be called on the shared root in each case.
+    // This function is defined over all borrow types (immutable, mutable, owned).
     // Using `keys()` is fine here even if BorrowType is mutable, as all we return
     // is an index -- not a reference.
     let len = node.len();
-    if len > 0 {
-        let keys = unsafe { node.keys() }; // safe because a non-empty node cannot be the shared root
-        for (i, k) in keys.iter().enumerate() {
-            match key.cmp(k.borrow()) {
-                Ordering::Greater => {}
-                Ordering::Equal => return (i, true),
-                Ordering::Less => return (i, false),
-            }
+    let keys = node.keys();
+    for (i, k) in keys.iter().enumerate() {
+        match key.cmp(k.borrow()) {
+            Ordering::Greater => {}
+            Ordering::Equal => return (i, true),
+            Ordering::Less => return (i, false),
         }
     }
     (len, false)
diff --git a/src/liballoc/collections/btree/set.rs b/src/liballoc/collections/btree/set.rs
index b100ce754ca..0b02223def4 100644
--- a/src/liballoc/collections/btree/set.rs
+++ b/src/liballoc/collections/btree/set.rs
@@ -8,8 +8,8 @@ use core::fmt::{self, Debug};
 use core::iter::{FromIterator, FusedIterator, Peekable};
 use core::ops::{BitAnd, BitOr, BitXor, RangeBounds, Sub};
 
+use super::map::{BTreeMap, Keys};
 use super::Recover;
-use crate::collections::btree_map::{self, BTreeMap, Keys};
 
 // FIXME(conventions): implement bounded iterators
 
@@ -102,7 +102,7 @@ impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 #[derive(Debug)]
 pub struct IntoIter<T> {
-    iter: btree_map::IntoIter<T, ()>,
+    iter: super::map::IntoIter<T, ()>,
 }
 
 /// An iterator over a sub-range of items in a `BTreeSet`.
@@ -115,7 +115,7 @@ pub struct IntoIter<T> {
 #[derive(Debug)]
 #[stable(feature = "btree_range", since = "1.17.0")]
 pub struct Range<'a, T: 'a> {
-    iter: btree_map::Range<'a, T, ()>,
+    iter: super::map::Range<'a, T, ()>,
 }
 
 /// Core of SymmetricDifference and Union.
@@ -944,6 +944,41 @@ impl<T: Ord> BTreeSet<T> {
     {
         BTreeSet { map: self.map.split_off(key) }
     }
+
+    /// Creates an iterator which uses a closure to determine if a value should be removed.
+    ///
+    /// If the closure returns true, then the value is removed and yielded.
+    /// If the closure returns false, the value will remain in the list and will not be yielded
+    /// by the iterator.
+    ///
+    /// If the iterator is only partially consumed or not consumed at all, each of the remaining
+    /// values will still be subjected to the closure and removed and dropped if it returns true.
+    ///
+    /// It is unspecified how many more values will be subjected to the closure
+    /// if a panic occurs in the closure, or if a panic occurs while dropping a value, or if the
+    /// `DrainFilter` itself is leaked.
+    ///
+    /// # Examples
+    ///
+    /// Splitting a set into even and odd values, reusing the original set:
+    ///
+    /// ```
+    /// #![feature(btree_drain_filter)]
+    /// use std::collections::BTreeSet;
+    ///
+    /// let mut set: BTreeSet<i32> = (0..8).collect();
+    /// let evens: BTreeSet<_> = set.drain_filter(|v| v % 2 == 0).collect();
+    /// let odds = set;
+    /// assert_eq!(evens.into_iter().collect::<Vec<_>>(), vec![0, 2, 4, 6]);
+    /// assert_eq!(odds.into_iter().collect::<Vec<_>>(), vec![1, 3, 5, 7]);
+    /// ```
+    #[unstable(feature = "btree_drain_filter", issue = "70530")]
+    pub fn drain_filter<'a, F>(&'a mut self, pred: F) -> DrainFilter<'a, T, F>
+    where
+        F: 'a + FnMut(&T) -> bool,
+    {
+        DrainFilter { pred, inner: self.map.drain_filter_inner() }
+    }
 }
 
 impl<T> BTreeSet<T> {
@@ -1055,6 +1090,66 @@ impl<'a, T> IntoIterator for &'a BTreeSet<T> {
     }
 }
 
+/// An iterator produced by calling `drain_filter` on BTreeSet.
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+pub struct DrainFilter<'a, T, F>
+where
+    T: 'a + Ord,
+    F: 'a + FnMut(&T) -> bool,
+{
+    pred: F,
+    inner: super::map::DrainFilterInner<'a, T, ()>,
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, T, F> Drop for DrainFilter<'a, T, F>
+where
+    T: 'a + Ord,
+    F: 'a + FnMut(&T) -> bool,
+{
+    fn drop(&mut self) {
+        self.for_each(drop);
+    }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, T, F> fmt::Debug for DrainFilter<'a, T, F>
+where
+    T: 'a + Ord + fmt::Debug,
+    F: 'a + FnMut(&T) -> bool,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_tuple("DrainFilter").field(&self.inner.peek().map(|(k, _)| k)).finish()
+    }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, 'f, T, F> Iterator for DrainFilter<'a, T, F>
+where
+    T: 'a + Ord,
+    F: 'a + 'f + FnMut(&T) -> bool,
+{
+    type Item = T;
+
+    fn next(&mut self) -> Option<T> {
+        let pred = &mut self.pred;
+        let mut mapped_pred = |k: &T, _v: &mut ()| pred(k);
+        self.inner.next(&mut mapped_pred).map(|(k, _)| k)
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.inner.size_hint()
+    }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, T, F> FusedIterator for DrainFilter<'a, T, F>
+where
+    T: 'a + Ord,
+    F: 'a + FnMut(&T) -> bool,
+{
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: Ord> Extend<T> for BTreeSet<T> {
     #[inline]
diff --git a/src/liballoc/collections/linked_list.rs b/src/liballoc/collections/linked_list.rs
index a9b4e3e4706..53d4f7239b7 100644
--- a/src/liballoc/collections/linked_list.rs
+++ b/src/liballoc/collections/linked_list.rs
@@ -841,10 +841,10 @@ impl<T> LinkedList<T> {
     /// d.push_front(2);
     /// d.push_front(3);
     ///
-    /// let mut splitted = d.split_off(2);
+    /// let mut split = d.split_off(2);
     ///
-    /// assert_eq!(splitted.pop_front(), Some(1));
-    /// assert_eq!(splitted.pop_front(), None);
+    /// assert_eq!(split.pop_front(), Some(1));
+    /// assert_eq!(split.pop_front(), None);
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn split_off(&mut self, at: usize) -> LinkedList<T> {
@@ -959,7 +959,7 @@ impl<T> LinkedList<T> {
         let it = self.head;
         let old_len = self.len;
 
-        DrainFilter { list: self, it: it, pred: filter, idx: 0, old_len: old_len }
+        DrainFilter { list: self, it, pred: filter, idx: 0, old_len }
     }
 }
 
@@ -1427,7 +1427,7 @@ impl<'a, T> CursorMut<'a, T> {
     /// `CursorMut`, which means it cannot outlive the `CursorMut` and that the
     /// `CursorMut` is frozen for the lifetime of the `Cursor`.
     #[unstable(feature = "linked_list_cursors", issue = "58533")]
-    pub fn as_cursor<'cm>(&'cm self) -> Cursor<'cm, T> {
+    pub fn as_cursor(&self) -> Cursor<'_, T> {
         Cursor { list: self.list, current: self.current, index: self.index }
     }
 }
diff --git a/src/liballoc/collections/mod.rs b/src/liballoc/collections/mod.rs
index 0bb62373fab..6b21e54f66a 100644
--- a/src/liballoc/collections/mod.rs
+++ b/src/liballoc/collections/mod.rs
@@ -42,6 +42,7 @@ pub use linked_list::LinkedList;
 pub use vec_deque::VecDeque;
 
 use crate::alloc::{Layout, LayoutErr};
+use core::fmt::Display;
 
 /// The error type for `try_reserve` methods.
 #[derive(Clone, PartialEq, Eq, Debug)]
@@ -77,6 +78,23 @@ impl From<LayoutErr> for TryReserveError {
     }
 }
 
+#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
+impl Display for TryReserveError {
+    fn fmt(
+        &self,
+        fmt: &mut core::fmt::Formatter<'_>,
+    ) -> core::result::Result<(), core::fmt::Error> {
+        fmt.write_str("memory allocation failed")?;
+        let reason = match &self {
+            TryReserveError::CapacityOverflow => {
+                " because the computed capacity exceeded the collection's maximum"
+            }
+            TryReserveError::AllocError { .. } => " because the memory allocator returned a error",
+        };
+        fmt.write_str(reason)
+    }
+}
+
 /// An intermediate trait for specialization of `Extend`.
 #[doc(hidden)]
 trait SpecExtend<I: IntoIterator> {
diff --git a/src/liballoc/collections/vec_deque.rs b/src/liballoc/collections/vec_deque.rs
index 85d1d98b8a9..94532521a90 100644
--- a/src/liballoc/collections/vec_deque.rs
+++ b/src/liballoc/collections/vec_deque.rs
@@ -959,6 +959,9 @@ impl<T> VecDeque<T> {
     /// Returns a pair of slices which contain, in order, the contents of the
     /// `VecDeque`.
     ///
+    /// If [`make_contiguous`](#method.make_contiguous) was previously called, all elements
+    /// of the `VecDeque` will be in the first slice and the second slice will be empty.
+    ///
     /// # Examples
     ///
     /// ```
@@ -989,6 +992,9 @@ impl<T> VecDeque<T> {
     /// Returns a pair of slices which contain, in order, the contents of the
     /// `VecDeque`.
     ///
+    /// If [`make_contiguous`](#method.make_contiguous) was previously called, all elements
+    /// of the `VecDeque` will be in the first slice and the second slice will be empty.
+    ///
     /// # Examples
     ///
     /// ```
@@ -1876,6 +1882,7 @@ impl<T> VecDeque<T> {
     /// assert_eq!(buf2, [2, 3]);
     /// ```
     #[inline]
+    #[must_use = "use `.truncate()` if you don't need the other half"]
     #[stable(feature = "split_off", since = "1.4.0")]
     pub fn split_off(&mut self, at: usize) -> Self {
         let len = self.len();
@@ -2043,6 +2050,148 @@ impl<T> VecDeque<T> {
         }
     }
 
+    /// Rearranges the internal storage of this deque so it is one contiguous slice, which is then returned.
+    ///
+    /// This method does not allocate and does not change the order of the inserted elements.
+    /// As it returns a mutable slice, this can be used to sort or binary search a deque.
+    ///
+    /// Once the internal storage is contiguous, the [`as_slices`](#method.as_slices) and
+    /// [`as_mut_slices`](#method.as_mut_slices) methods will return the entire contents of the
+    /// `VecDeque` in a single slice.
+    ///
+    /// # Examples
+    ///
+    /// Sorting the content of a deque.
+    ///
+    /// ```
+    /// #![feature(deque_make_contiguous)]
+    ///
+    /// use std::collections::VecDeque;
+    ///
+    /// let mut buf = VecDeque::with_capacity(15);
+    ///
+    /// buf.push_back(2);
+    /// buf.push_back(1);
+    /// buf.push_front(3);
+    ///
+    /// // sorting the deque
+    /// buf.make_contiguous().sort();
+    /// assert_eq!(buf.as_slices(), (&[1, 2, 3] as &[_], &[] as &[_]));
+    ///
+    /// // sorting it in reverse order
+    /// buf.make_contiguous().sort_by(|a, b| b.cmp(a));
+    /// assert_eq!(buf.as_slices(), (&[3, 2, 1] as &[_], &[] as &[_]));
+    /// ```
+    ///
+    /// Getting immutable access to the contiguous slice.
+    ///
+    /// ```rust
+    /// #![feature(deque_make_contiguous)]
+    ///
+    /// use std::collections::VecDeque;
+    ///
+    /// let mut buf = VecDeque::new();
+    ///
+    /// buf.push_back(2);
+    /// buf.push_back(1);
+    /// buf.push_front(3);
+    ///
+    /// buf.make_contiguous();
+    /// if let (slice, &[]) = buf.as_slices() {
+    ///     // we can now be sure that `slice` contains all elements of the deque,
+    ///     // while still having immutable access to `buf`.
+    ///     assert_eq!(buf.len(), slice.len());
+    ///     assert_eq!(slice, &[3, 2, 1] as &[_]);
+    /// }
+    /// ```
+    #[unstable(feature = "deque_make_contiguous", issue = "none")]
+    pub fn make_contiguous(&mut self) -> &mut [T] {
+        if self.is_contiguous() {
+            let tail = self.tail;
+            let head = self.head;
+            return unsafe { &mut self.buffer_as_mut_slice()[tail..head] };
+        }
+
+        let buf = self.buf.ptr();
+        let cap = self.cap();
+        let len = self.len();
+
+        let free = self.tail - self.head;
+        let tail_len = cap - self.tail;
+
+        if free >= tail_len {
+            // there is enough free space to copy the tail in one go,
+            // this means that we first shift the head backwards, and then
+            // copy the tail to the correct position.
+            //
+            // from: DEFGH....ABC
+            // to:   ABCDEFGH....
+            unsafe {
+                ptr::copy(buf, buf.add(tail_len), self.head);
+                // ...DEFGH.ABC
+                ptr::copy_nonoverlapping(buf.add(self.tail), buf, tail_len);
+                // ABCDEFGH....
+
+                self.tail = 0;
+                self.head = len;
+            }
+        } else if free >= self.head {
+            // there is enough free space to copy the head in one go,
+            // this means that we first shift the tail forwards, and then
+            // copy the head to the correct position.
+            //
+            // from: FGH....ABCDE
+            // to:   ...ABCDEFGH.
+            unsafe {
+                ptr::copy(buf.add(self.tail), buf.add(self.head), tail_len);
+                // FGHABCDE....
+                ptr::copy_nonoverlapping(buf, buf.add(self.head + tail_len), self.head);
+                // ...ABCDEFGH.
+
+                self.tail = self.head;
+                self.head = self.tail + len;
+            }
+        } else {
+            // free is smaller than both head and tail,
+            // this means we have to slowly "swap" the tail and the head.
+            //
+            // from: EFGHI...ABCD or HIJK.ABCDEFG
+            // to:   ABCDEFGHI... or ABCDEFGHIJK.
+            let mut left_edge: usize = 0;
+            let mut right_edge: usize = self.tail;
+            unsafe {
+                // The general problem looks like this
+                // GHIJKLM...ABCDEF - before any swaps
+                // ABCDEFM...GHIJKL - after 1 pass of swaps
+                // ABCDEFGHIJM...KL - swap until the left edge reaches the temp store
+                //                  - then restart the algorithm with a new (smaller) store
+                // Sometimes the temp store is reached when the right edge is at the end
+                // of the buffer - this means we've hit the right order with fewer swaps!
+                // E.g
+                // EF..ABCD
+                // ABCDEF.. - after four only swaps we've finished
+                while left_edge < len && right_edge != cap {
+                    let mut right_offset = 0;
+                    for i in left_edge..right_edge {
+                        right_offset = (i - left_edge) % (cap - right_edge);
+                        let src: isize = (right_edge + right_offset) as isize;
+                        ptr::swap(buf.add(i), buf.offset(src));
+                    }
+                    let n_ops = right_edge - left_edge;
+                    left_edge += n_ops;
+                    right_edge += right_offset + 1;
+                }
+
+                self.tail = 0;
+                self.head = len;
+            }
+        }
+
+        let tail = self.tail;
+        let head = self.head;
+        unsafe { &mut self.buffer_as_mut_slice()[tail..head] }
+    }
+
     /// Rotates the double-ended queue `mid` places to the left.
     ///
     /// Equivalently,
@@ -2132,7 +2281,7 @@ impl<T> VecDeque<T> {
     // Safety: the following two methods require that the rotation amount
     // be less than half the length of the deque.
     //
-    // `wrap_copy` requres that `min(x, cap() - x) + copy_len <= cap()`,
+    // `wrap_copy` requires that `min(x, cap() - x) + copy_len <= cap()`,
     // but than `min` is never more than half the capacity, regardless of x,
     // so it's sound to call here because we're calling with something
     // less than half the length, which is never above half the capacity.
@@ -2802,63 +2951,16 @@ impl<T> From<VecDeque<T>> for Vec<T> {
     /// assert_eq!(vec, [8, 9, 1, 2, 3, 4]);
     /// assert_eq!(vec.as_ptr(), ptr);
     /// ```
-    fn from(other: VecDeque<T>) -> Self {
+    fn from(mut other: VecDeque<T>) -> Self {
+        other.make_contiguous();
+
         unsafe {
             let buf = other.buf.ptr();
             let len = other.len();
-            let tail = other.tail;
-            let head = other.head;
             let cap = other.cap();
 
-            // Need to move the ring to the front of the buffer, as vec will expect this.
-            if other.is_contiguous() {
-                ptr::copy(buf.add(tail), buf, len);
-            } else {
-                if (tail - head) >= cmp::min(cap - tail, head) {
-                    // There is enough free space in the centre for the shortest block so we can
-                    // do this in at most three copy moves.
-                    if (cap - tail) > head {
-                        // right hand block is the long one; move that enough for the left
-                        ptr::copy(buf.add(tail), buf.add(tail - head), cap - tail);
-                        // copy left in the end
-                        ptr::copy(buf, buf.add(cap - head), head);
-                        // shift the new thing to the start
-                        ptr::copy(buf.add(tail - head), buf, len);
-                    } else {
-                        // left hand block is the long one, we can do it in two!
-                        ptr::copy(buf, buf.add(cap - tail), head);
-                        ptr::copy(buf.add(tail), buf, cap - tail);
-                    }
-                } else {
-                    // Need to use N swaps to move the ring
-                    // We can use the space at the end of the ring as a temp store
-
-                    let mut left_edge: usize = 0;
-                    let mut right_edge: usize = tail;
-
-                    // The general problem looks like this
-                    // GHIJKLM...ABCDEF - before any swaps
-                    // ABCDEFM...GHIJKL - after 1 pass of swaps
-                    // ABCDEFGHIJM...KL - swap until the left edge reaches the temp store
-                    //                  - then restart the algorithm with a new (smaller) store
-                    // Sometimes the temp store is reached when the right edge is at the end
-                    // of the buffer - this means we've hit the right order with fewer swaps!
-                    // E.g
-                    // EF..ABCD
-                    // ABCDEF.. - after four only swaps we've finished
-
-                    while left_edge < len && right_edge != cap {
-                        let mut right_offset = 0;
-                        for i in left_edge..right_edge {
-                            right_offset = (i - left_edge) % (cap - right_edge);
-                            let src: isize = (right_edge + right_offset) as isize;
-                            ptr::swap(buf.add(i), buf.offset(src));
-                        }
-                        let n_ops = right_edge - left_edge;
-                        left_edge += n_ops;
-                        right_edge += right_offset + 1;
-                    }
-                }
+            if other.head != 0 {
+                ptr::copy(buf.add(other.tail), buf, len);
             }
             let out = Vec::from_raw_parts(buf, len, cap);
             mem::forget(other);
diff --git a/src/liballoc/collections/vec_deque/tests.rs b/src/liballoc/collections/vec_deque/tests.rs
index f2ce5b1d15d..8ef5ec78e05 100644
--- a/src/liballoc/collections/vec_deque/tests.rs
+++ b/src/liballoc/collections/vec_deque/tests.rs
@@ -1,6 +1,6 @@
 use super::*;
 
-use ::test;
+use test;
 
 #[bench]
 #[cfg_attr(miri, ignore)] // Miri does not support benchmarks
@@ -131,6 +131,87 @@ fn test_insert() {
 }
 
 #[test]
+fn make_contiguous_big_tail() {
+    let mut tester = VecDeque::with_capacity(15);
+
+    for i in 0..3 {
+        tester.push_back(i);
+    }
+
+    for i in 3..10 {
+        tester.push_front(i);
+    }
+
+    // 012......9876543
+    assert_eq!(tester.capacity(), 15);
+    assert_eq!((&[9, 8, 7, 6, 5, 4, 3] as &[_], &[0, 1, 2] as &[_]), tester.as_slices());
+
+    let expected_start = tester.head;
+    tester.make_contiguous();
+    assert_eq!(tester.tail, expected_start);
+    assert_eq!((&[9, 8, 7, 6, 5, 4, 3, 0, 1, 2] as &[_], &[] as &[_]), tester.as_slices());
+}
+
+#[test]
+fn make_contiguous_big_head() {
+    let mut tester = VecDeque::with_capacity(15);
+
+    for i in 0..8 {
+        tester.push_back(i);
+    }
+
+    for i in 8..10 {
+        tester.push_front(i);
+    }
+
+    // 01234567......98
+    let expected_start = 0;
+    tester.make_contiguous();
+    assert_eq!(tester.tail, expected_start);
+    assert_eq!((&[9, 8, 0, 1, 2, 3, 4, 5, 6, 7] as &[_], &[] as &[_]), tester.as_slices());
+}
+
+#[test]
+fn make_contiguous_small_free() {
+    let mut tester = VecDeque::with_capacity(15);
+
+    for i in 'A' as u8..'I' as u8 {
+        tester.push_back(i as char);
+    }
+
+    for i in 'I' as u8..'N' as u8 {
+        tester.push_front(i as char);
+    }
+
+    // ABCDEFGH...MLKJI
+    let expected_start = 0;
+    tester.make_contiguous();
+    assert_eq!(tester.tail, expected_start);
+    assert_eq!(
+        (&['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] as &[_], &[] as &[_]),
+        tester.as_slices()
+    );
+
+    tester.clear();
+    for i in 'I' as u8..'N' as u8 {
+        tester.push_back(i as char);
+    }
+
+    for i in 'A' as u8..'I' as u8 {
+        tester.push_front(i as char);
+    }
+
+    // IJKLM...HGFEDCBA
+    let expected_start = 0;
+    tester.make_contiguous();
+    assert_eq!(tester.tail, expected_start);
+    assert_eq!(
+        (&['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'I', 'J', 'K', 'L', 'M'] as &[_], &[] as &[_]),
+        tester.as_slices()
+    );
+}
+
+#[test]
 fn test_remove() {
     // This test checks that every single combination of tail position, length, and
     // removal position is tested. Capacity 15 should be large enough to cover every case.
diff --git a/src/liballoc/fmt.rs b/src/liballoc/fmt.rs
index e6162e0f571..13ef2f063f9 100644
--- a/src/liballoc/fmt.rs
+++ b/src/liballoc/fmt.rs
@@ -247,6 +247,21 @@
 //! Hello, `     123` has 3 right-aligned characters
 //! ```
 //!
+//! ## Localization
+//!
+//! In some programming languages, the behavior of string formatting functions
+//! depends on the operating system's locale setting. The format functions
+//! provided by Rust's standard library do not have any concept of locale, and
+//! will produce the same results on all systems regardless of user
+//! configuration.
+//!
+//! For example, the following code will always print `1.5` even if the system
+//! locale uses a decimal separator other than a dot.
+//!
+//! ```
+//! println!("The value is {}", 1.5);
+//! ```
+//!
 //! # Escaping
 //!
 //! The literal characters `{` and `}` may be included in a string by preceding
diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs
index ffa4176cc79..121c1cde548 100644
--- a/src/liballoc/lib.rs
+++ b/src/liballoc/lib.rs
@@ -80,6 +80,7 @@
 #![feature(box_into_raw_non_null)]
 #![feature(box_patterns)]
 #![feature(box_syntax)]
+#![feature(cfg_sanitize)]
 #![feature(cfg_target_has_atomic)]
 #![feature(coerce_unsized)]
 #![feature(const_generic_impls_guard)]
@@ -98,6 +99,8 @@
 #![feature(internal_uninit_const)]
 #![feature(lang_items)]
 #![feature(libc)]
+#![cfg_attr(not(bootstrap), feature(negative_impls))]
+#![feature(new_uninit)]
 #![feature(nll)]
 #![feature(optin_builtin_traits)]
 #![feature(pattern)]
@@ -160,6 +163,8 @@ pub mod str;
 pub mod string;
 #[cfg(target_has_atomic = "ptr")]
 pub mod sync;
+#[cfg(target_has_atomic = "ptr")]
+pub mod task;
 #[cfg(test)]
 mod tests;
 pub mod vec;
diff --git a/src/liballoc/macros.rs b/src/liballoc/macros.rs
index 422d3486f92..4bc0c3a079d 100644
--- a/src/liballoc/macros.rs
+++ b/src/liballoc/macros.rs
@@ -36,6 +36,9 @@
 #[stable(feature = "rust1", since = "1.0.0")]
 #[allow_internal_unstable(box_syntax)]
 macro_rules! vec {
+    () => (
+        $crate::vec::Vec::new()
+    );
     ($elem:expr; $n:expr) => (
         $crate::vec::from_elem($elem, $n)
     );
@@ -51,6 +54,9 @@ macro_rules! vec {
 // NB see the slice::hack module in slice.rs for more information
 #[cfg(test)]
 macro_rules! vec {
+    () => (
+        $crate::vec::Vec::new()
+    );
     ($elem:expr; $n:expr) => (
         $crate::vec::from_elem($elem, $n)
     );
diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs
index 345834d7daa..2bf40490e78 100644
--- a/src/liballoc/raw_vec.rs
+++ b/src/liballoc/raw_vec.rs
@@ -1,13 +1,19 @@
 #![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "none")]
 #![doc(hidden)]
 
+use core::alloc::MemoryBlock;
 use core::cmp;
-use core::mem;
+use core::mem::{self, MaybeUninit};
 use core::ops::Drop;
-use core::ptr::{self, NonNull, Unique};
+use core::ptr::{NonNull, Unique};
 use core::slice;
 
-use crate::alloc::{handle_alloc_error, AllocErr, AllocRef, Global, Layout};
+use crate::alloc::{
+    handle_alloc_error, AllocErr,
+    AllocInit::{self, *},
+    AllocRef, Global, Layout,
+    ReallocPlacement::{self, *},
+};
 use crate::boxed::Box;
 use crate::collections::TryReserveError::{self, *};
 
@@ -21,83 +27,26 @@ mod tests;
 ///
 /// * Produces `Unique::empty()` on zero-sized types.
 /// * Produces `Unique::empty()` on zero-length allocations.
+/// * Avoids freeing `Unique::empty()`.
 /// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
 /// * Guards against 32-bit systems allocating more than isize::MAX bytes.
 /// * Guards against overflowing your length.
-/// * Aborts on OOM or calls `handle_alloc_error` as applicable.
-/// * Avoids freeing `Unique::empty()`.
+/// * Calls `handle_alloc_error` for fallible allocations.
 /// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
+/// * Uses the excess returned from the allocator to use the largest available capacity.
 ///
 /// This type does not in anyway inspect the memory that it manages. When dropped it *will*
 /// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
 /// to handle the actual things *stored* inside of a `RawVec`.
 ///
-/// Note that a `RawVec` always forces its capacity to be `usize::MAX` for zero-sized types.
-/// This enables you to use capacity-growing logic catch the overflows in your length
-/// that might occur with zero-sized types.
-///
-/// The above means that you need to be careful when round-tripping this type with a
-/// `Box<[T]>`, since `capacity()` won't yield the length. However, `with_capacity`,
-/// `shrink_to_fit`, and `from_box` will actually set `RawVec`'s private capacity
-/// field. This allows zero-sized types to not be special-cased by consumers of
-/// this type.
+/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
+/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
+/// `Box<[T]>`, since `capacity()` won't yield the length.
 #[allow(missing_debug_implementations)]
 pub struct RawVec<T, A: AllocRef = Global> {
     ptr: Unique<T>,
     cap: usize,
-    a: A,
-}
-
-impl<T, A: AllocRef> RawVec<T, A> {
-    /// Like `new`, but parameterized over the choice of allocator for
-    /// the returned `RawVec`.
-    pub const fn new_in(a: A) -> Self {
-        let cap = if mem::size_of::<T>() == 0 { core::usize::MAX } else { 0 };
-
-        // `Unique::empty()` doubles as "unallocated" and "zero-sized allocation".
-        RawVec { ptr: Unique::empty(), cap, a }
-    }
-
-    /// Like `with_capacity`, but parameterized over the choice of
-    /// allocator for the returned `RawVec`.
-    #[inline]
-    pub fn with_capacity_in(capacity: usize, a: A) -> Self {
-        RawVec::allocate_in(capacity, false, a)
-    }
-
-    /// Like `with_capacity_zeroed`, but parameterized over the choice
-    /// of allocator for the returned `RawVec`.
-    #[inline]
-    pub fn with_capacity_zeroed_in(capacity: usize, a: A) -> Self {
-        RawVec::allocate_in(capacity, true, a)
-    }
-
-    fn allocate_in(mut capacity: usize, zeroed: bool, mut a: A) -> Self {
-        unsafe {
-            let elem_size = mem::size_of::<T>();
-
-            let alloc_size = capacity.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow());
-            alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow());
-
-            // Handles ZSTs and `capacity == 0` alike.
-            let ptr = if alloc_size == 0 {
-                NonNull::<T>::dangling()
-            } else {
-                let align = mem::align_of::<T>();
-                let layout = Layout::from_size_align(alloc_size, align).unwrap();
-                let result = if zeroed { a.alloc_zeroed(layout) } else { a.alloc(layout) };
-                match result {
-                    Ok((ptr, size)) => {
-                        capacity = size / elem_size;
-                        ptr.cast()
-                    }
-                    Err(_) => handle_alloc_error(layout),
-                }
-            };
-
-            RawVec { ptr: ptr.into(), cap: capacity, a }
-        }
-    }
+    alloc: A,
 }
 
 impl<T> RawVec<T, Global> {
@@ -140,39 +89,26 @@ impl<T> RawVec<T, Global> {
     /// Aborts on OOM.
     #[inline]
     pub fn with_capacity(capacity: usize) -> Self {
-        RawVec::allocate_in(capacity, false, Global)
+        Self::with_capacity_in(capacity, Global)
     }
 
     /// Like `with_capacity`, but guarantees the buffer is zeroed.
     #[inline]
     pub fn with_capacity_zeroed(capacity: usize) -> Self {
-        RawVec::allocate_in(capacity, true, Global)
-    }
-}
-
-impl<T, A: AllocRef> RawVec<T, A> {
-    /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
-    ///
-    /// # Undefined Behavior
-    ///
-    /// The `ptr` must be allocated (via the given allocator `a`), and with the given `capacity`.
-    /// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems).
-    /// If the `ptr` and `capacity` come from a `RawVec` created via `a`, then this is guaranteed.
-    pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self {
-        RawVec { ptr: Unique::new_unchecked(ptr), cap: capacity, a }
+        Self::with_capacity_zeroed_in(capacity, Global)
     }
-}
 
-impl<T> RawVec<T, Global> {
     /// Reconstitutes a `RawVec` from a pointer and capacity.
     ///
-    /// # Undefined Behavior
+    /// # Safety
     ///
     /// The `ptr` must be allocated (on the system heap), and with the given `capacity`.
-    /// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems).
+    /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
+    /// systems). ZST vectors may have a capacity up to `usize::MAX`.
     /// If the `ptr` and `capacity` come from a `RawVec`, then this is guaranteed.
+    #[inline]
     pub unsafe fn from_raw_parts(ptr: *mut T, capacity: usize) -> Self {
-        RawVec { ptr: Unique::new_unchecked(ptr), cap: capacity, a: Global }
+        Self::from_raw_parts_in(ptr, capacity, Global)
     }
 
     /// Converts a `Box<[T]>` into a `RawVec<T>`.
@@ -186,6 +122,56 @@ impl<T> RawVec<T, Global> {
 }
 
 impl<T, A: AllocRef> RawVec<T, A> {
+    /// Like `new`, but parameterized over the choice of allocator for
+    /// the returned `RawVec`.
+    pub const fn new_in(alloc: A) -> Self {
+        // `cap: 0` means "unallocated". zero-sized types are ignored.
+        Self { ptr: Unique::empty(), cap: 0, alloc }
+    }
+
+    /// Like `with_capacity`, but parameterized over the choice of
+    /// allocator for the returned `RawVec`.
+    #[inline]
+    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
+        Self::allocate_in(capacity, Uninitialized, alloc)
+    }
+
+    /// Like `with_capacity_zeroed`, but parameterized over the choice
+    /// of allocator for the returned `RawVec`.
+    #[inline]
+    pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
+        Self::allocate_in(capacity, Zeroed, alloc)
+    }
+
+    fn allocate_in(capacity: usize, init: AllocInit, mut alloc: A) -> Self {
+        if mem::size_of::<T>() == 0 {
+            Self::new_in(alloc)
+        } else {
+            let layout = Layout::array::<T>(capacity).unwrap_or_else(|_| capacity_overflow());
+            alloc_guard(layout.size()).unwrap_or_else(|_| capacity_overflow());
+
+            let memory = alloc.alloc(layout, init).unwrap_or_else(|_| handle_alloc_error(layout));
+            Self {
+                ptr: memory.ptr.cast().into(),
+                cap: Self::capacity_from_bytes(memory.size),
+                alloc,
+            }
+        }
+    }
+
+    /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
+    ///
+    /// # Safety
+    ///
+    /// The `ptr` must be allocated (via the given allocator `a`), and with the given `capacity`.
+    /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
+    /// systems). ZST vectors may have a capacity up to `usize::MAX`.
+    /// If the `ptr` and `capacity` come from a `RawVec` created via `a`, then this is guaranteed.
+    #[inline]
+    pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self {
+        Self { ptr: Unique::new_unchecked(ptr), cap: capacity, alloc: a }
+    }
+
     /// Gets a raw pointer to the start of the allocation. Note that this is
     /// `Unique::empty()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
     /// be careful.
@@ -198,21 +184,21 @@ impl<T, A: AllocRef> RawVec<T, A> {
     /// This will always be `usize::MAX` if `T` is zero-sized.
     #[inline(always)]
     pub fn capacity(&self) -> usize {
-        if mem::size_of::<T>() == 0 { !0 } else { self.cap }
+        if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
     }
 
     /// Returns a shared reference to the allocator backing this `RawVec`.
     pub fn alloc(&self) -> &A {
-        &self.a
+        &self.alloc
     }
 
     /// Returns a mutable reference to the allocator backing this `RawVec`.
     pub fn alloc_mut(&mut self) -> &mut A {
-        &mut self.a
+        &mut self.alloc
     }
 
-    fn current_layout(&self) -> Option<Layout> {
-        if self.cap == 0 {
+    fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
+        if mem::size_of::<T>() == 0 || self.cap == 0 {
             None
         } else {
             // We have an allocated chunk of memory, so we can bypass runtime
@@ -220,7 +206,8 @@ impl<T, A: AllocRef> RawVec<T, A> {
             unsafe {
                 let align = mem::align_of::<T>();
                 let size = mem::size_of::<T>() * self.cap;
-                Some(Layout::from_size_align_unchecked(size, align))
+                let layout = Layout::from_size_align_unchecked(size, align);
+                Some((self.ptr.cast().into(), layout))
             }
         }
     }
@@ -276,50 +263,10 @@ impl<T, A: AllocRef> RawVec<T, A> {
     #[inline(never)]
     #[cold]
     pub fn double(&mut self) {
-        unsafe {
-            let elem_size = mem::size_of::<T>();
-
-            // Since we set the capacity to `usize::MAX` when `elem_size` is
-            // 0, getting to here necessarily means the `RawVec` is overfull.
-            assert!(elem_size != 0, "capacity overflow");
-
-            let (ptr, new_cap) = match self.current_layout() {
-                Some(cur) => {
-                    // Since we guarantee that we never allocate more than
-                    // `isize::MAX` bytes, `elem_size * self.cap <= isize::MAX` as
-                    // a precondition, so this can't overflow. Additionally the
-                    // alignment will never be too large as to "not be
-                    // satisfiable", so `Layout::from_size_align` will always
-                    // return `Some`.
-                    //
-                    // TL;DR, we bypass runtime checks due to dynamic assertions
-                    // in this module, allowing us to use
-                    // `from_size_align_unchecked`.
-                    let new_cap = 2 * self.cap;
-                    let new_size = new_cap * elem_size;
-                    alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
-                    let ptr_res = self.a.realloc(NonNull::from(self.ptr).cast(), cur, new_size);
-                    match ptr_res {
-                        Ok((ptr, new_size)) => (ptr, new_size / elem_size),
-                        Err(_) => handle_alloc_error(Layout::from_size_align_unchecked(
-                            new_size,
-                            cur.align(),
-                        )),
-                    }
-                }
-                None => {
-                    // Skip to 4 because tiny `Vec`'s are dumb; but not if that
-                    // would cause overflow.
-                    let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
-                    let layout = Layout::array::<T>(new_cap).unwrap();
-                    match self.a.alloc(layout) {
-                        Ok((ptr, new_size)) => (ptr, new_size / elem_size),
-                        Err(_) => handle_alloc_error(layout),
-                    }
-                }
-            };
-            self.ptr = ptr.cast().into();
-            self.cap = new_cap;
+        match self.grow(Double, MayMove, Uninitialized) {
+            Err(CapacityOverflow) => capacity_overflow(),
+            Err(AllocError { layout, .. }) => handle_alloc_error(layout),
+            Ok(()) => { /* yay */ }
         }
     }
 
@@ -338,99 +285,7 @@ impl<T, A: AllocRef> RawVec<T, A> {
     #[inline(never)]
     #[cold]
     pub fn double_in_place(&mut self) -> bool {
-        unsafe {
-            let elem_size = mem::size_of::<T>();
-            let old_layout = match self.current_layout() {
-                Some(layout) => layout,
-                None => return false, // nothing to double
-            };
-
-            // Since we set the capacity to `usize::MAX` when `elem_size` is
-            // 0, getting to here necessarily means the `RawVec` is overfull.
-            assert!(elem_size != 0, "capacity overflow");
-
-            // Since we guarantee that we never allocate more than `isize::MAX`
-            // bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so
-            // this can't overflow.
-            //
-            // Similarly to with `double` above, we can go straight to
-            // `Layout::from_size_align_unchecked` as we know this won't
-            // overflow and the alignment is sufficiently small.
-            let new_cap = 2 * self.cap;
-            let new_size = new_cap * elem_size;
-            alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
-            match self.a.grow_in_place(NonNull::from(self.ptr).cast(), old_layout, new_size) {
-                Ok(_) => {
-                    // We can't directly divide `size`.
-                    self.cap = new_cap;
-                    true
-                }
-                Err(_) => false,
-            }
-        }
-    }
-
-    /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
-    pub fn try_reserve_exact(
-        &mut self,
-        used_capacity: usize,
-        needed_extra_capacity: usize,
-    ) -> Result<(), TryReserveError> {
-        self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Exact)
-    }
-
-    /// Ensures that the buffer contains at least enough space to hold
-    /// `used_capacity + needed_extra_capacity` elements. If it doesn't already,
-    /// will reallocate the minimum possible amount of memory necessary.
-    /// Generally this will be exactly the amount of memory necessary,
-    /// but in principle the allocator is free to give back more than
-    /// we asked for.
-    ///
-    /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
-    /// the requested space. This is not really unsafe, but the unsafe
-    /// code *you* write that relies on the behavior of this function may break.
-    ///
-    /// # Panics
-    ///
-    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
-    /// * Panics on 32-bit platforms if the requested capacity exceeds
-    ///   `isize::MAX` bytes.
-    ///
-    /// # Aborts
-    ///
-    /// Aborts on OOM.
-    pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
-        match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Exact) {
-            Err(CapacityOverflow) => capacity_overflow(),
-            Err(AllocError { .. }) => unreachable!(),
-            Ok(()) => { /* yay */ }
-        }
-    }
-
-    /// Calculates the buffer's new size given that it'll hold `used_capacity +
-    /// needed_extra_capacity` elements. This logic is used in amortized reserve methods.
-    /// Returns `(new_capacity, new_alloc_size)`.
-    fn amortized_new_size(
-        &self,
-        used_capacity: usize,
-        needed_extra_capacity: usize,
-    ) -> Result<usize, TryReserveError> {
-        // Nothing we can really do about these checks, sadly.
-        let required_cap =
-            used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
-        // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
-        let double_cap = self.cap * 2;
-        // `double_cap` guarantees exponential growth.
-        Ok(cmp::max(double_cap, required_cap))
-    }
-
-    /// The same as `reserve`, but returns on errors instead of panicking or aborting.
-    pub fn try_reserve(
-        &mut self,
-        used_capacity: usize,
-        needed_extra_capacity: usize,
-    ) -> Result<(), TryReserveError> {
-        self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Amortized)
+        self.grow(Double, InPlace, Uninitialized).is_ok()
     }
 
     /// Ensures that the buffer contains at least enough space to hold
@@ -486,12 +341,26 @@ impl<T, A: AllocRef> RawVec<T, A> {
     /// # }
     /// ```
     pub fn reserve(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
-        match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Amortized) {
+        match self.try_reserve(used_capacity, needed_extra_capacity) {
             Err(CapacityOverflow) => capacity_overflow(),
-            Err(AllocError { .. }) => unreachable!(),
+            Err(AllocError { layout, .. }) => handle_alloc_error(layout),
             Ok(()) => { /* yay */ }
         }
     }
+
+    /// The same as `reserve`, but returns on errors instead of panicking or aborting.
+    pub fn try_reserve(
+        &mut self,
+        used_capacity: usize,
+        needed_extra_capacity: usize,
+    ) -> Result<(), TryReserveError> {
+        if self.needs_to_grow(used_capacity, needed_extra_capacity) {
+            self.grow(Amortized { used_capacity, needed_extra_capacity }, MayMove, Uninitialized)
+        } else {
+            Ok(())
+        }
+    }
+
     /// Attempts to ensure that the buffer contains at least enough space to hold
     /// `used_capacity + needed_extra_capacity` elements. If it doesn't already have
     /// enough capacity, will reallocate in place enough space plus comfortable slack
@@ -510,45 +379,54 @@ impl<T, A: AllocRef> RawVec<T, A> {
     /// * Panics on 32-bit platforms if the requested capacity exceeds
     ///   `isize::MAX` bytes.
     pub fn reserve_in_place(&mut self, used_capacity: usize, needed_extra_capacity: usize) -> bool {
-        unsafe {
-            // NOTE: we don't early branch on ZSTs here because we want this
-            // to actually catch "asking for more than usize::MAX" in that case.
-            // If we make it past the first branch then we are guaranteed to
-            // panic.
-
-            // Don't actually need any more capacity. If the current `cap` is 0, we can't
-            // reallocate in place.
-            // Wrapping in case they give a bad `used_capacity`
-            let old_layout = match self.current_layout() {
-                Some(layout) => layout,
-                None => return false,
-            };
-            if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity {
-                return false;
-            }
+        // This is more readable than putting this in one line:
+        // `!self.needs_to_grow(...) || self.grow(...).is_ok()`
+        if self.needs_to_grow(used_capacity, needed_extra_capacity) {
+            self.grow(Amortized { used_capacity, needed_extra_capacity }, InPlace, Uninitialized)
+                .is_ok()
+        } else {
+            true
+        }
+    }
 
-            let new_cap = self
-                .amortized_new_size(used_capacity, needed_extra_capacity)
-                .unwrap_or_else(|_| capacity_overflow());
-
-            // Here, `cap < used_capacity + needed_extra_capacity <= new_cap`
-            // (regardless of whether `self.cap - used_capacity` wrapped).
-            // Therefore, we can safely call `grow_in_place`.
-
-            let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
-            // FIXME: may crash and burn on over-reserve
-            alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow());
-            match self.a.grow_in_place(
-                NonNull::from(self.ptr).cast(),
-                old_layout,
-                new_layout.size(),
-            ) {
-                Ok(_) => {
-                    self.cap = new_cap;
-                    true
-                }
-                Err(_) => false,
-            }
+    /// Ensures that the buffer contains at least enough space to hold
+    /// `used_capacity + needed_extra_capacity` elements. If it doesn't already,
+    /// will reallocate the minimum possible amount of memory necessary.
+    /// Generally this will be exactly the amount of memory necessary,
+    /// but in principle the allocator is free to give back more than
+    /// we asked for.
+    ///
+    /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
+    /// the requested space. This is not really unsafe, but the unsafe
+    /// code *you* write that relies on the behavior of this function may break.
+    ///
+    /// # Panics
+    ///
+    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
+    /// * Panics on 32-bit platforms if the requested capacity exceeds
+    ///   `isize::MAX` bytes.
+    ///
+    /// # Aborts
+    ///
+    /// Aborts on OOM.
+    pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
+        match self.try_reserve_exact(used_capacity, needed_extra_capacity) {
+            Err(CapacityOverflow) => capacity_overflow(),
+            Err(AllocError { layout, .. }) => handle_alloc_error(layout),
+            Ok(()) => { /* yay */ }
+        }
+    }
+
+    /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
+    pub fn try_reserve_exact(
+        &mut self,
+        used_capacity: usize,
+        needed_extra_capacity: usize,
+    ) -> Result<(), TryReserveError> {
+        if self.needs_to_grow(used_capacity, needed_extra_capacity) {
+            self.grow(Exact { used_capacity, needed_extra_capacity }, MayMove, Uninitialized)
+        } else {
+            Ok(())
         }
     }
 
@@ -563,166 +441,157 @@ impl<T, A: AllocRef> RawVec<T, A> {
     ///
     /// Aborts on OOM.
     pub fn shrink_to_fit(&mut self, amount: usize) {
-        let elem_size = mem::size_of::<T>();
-
-        // Set the `cap` because they might be about to promote to a `Box<[T]>`
-        if elem_size == 0 {
-            self.cap = amount;
-            return;
-        }
-
-        // This check is my waterloo; it's the only thing `Vec` wouldn't have to do.
-        assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
-
-        if amount == 0 {
-            // We want to create a new zero-length vector within the
-            // same allocator. We use `ptr::write` to avoid an
-            // erroneous attempt to drop the contents, and we use
-            // `ptr::read` to sidestep condition against destructuring
-            // types that implement Drop.
-
-            unsafe {
-                let a = ptr::read(&self.a as *const A);
-                self.dealloc_buffer();
-                ptr::write(self, RawVec::new_in(a));
-            }
-        } else if self.cap != amount {
-            unsafe {
-                // We know here that our `amount` is greater than zero. This
-                // implies, via the assert above, that capacity is also greater
-                // than zero, which means that we've got a current layout that
-                // "fits"
-                //
-                // We also know that `self.cap` is greater than `amount`, and
-                // consequently we don't need runtime checks for creating either
-                // layout.
-                let old_size = elem_size * self.cap;
-                let new_size = elem_size * amount;
-                let align = mem::align_of::<T>();
-                let old_layout = Layout::from_size_align_unchecked(old_size, align);
-                match self.a.realloc(NonNull::from(self.ptr).cast(), old_layout, new_size) {
-                    Ok((ptr, _)) => self.ptr = ptr.cast().into(),
-                    Err(_) => {
-                        handle_alloc_error(Layout::from_size_align_unchecked(new_size, align))
-                    }
-                }
-            }
-            self.cap = amount;
+        match self.shrink(amount, MayMove) {
+            Err(CapacityOverflow) => capacity_overflow(),
+            Err(AllocError { layout, .. }) => handle_alloc_error(layout),
+            Ok(()) => { /* yay */ }
         }
     }
 }
 
-enum Fallibility {
-    Fallible,
-    Infallible,
+#[derive(Copy, Clone)]
+enum Strategy {
+    Double,
+    Amortized { used_capacity: usize, needed_extra_capacity: usize },
+    Exact { used_capacity: usize, needed_extra_capacity: usize },
 }
+use Strategy::*;
 
-use Fallibility::*;
+impl<T, A: AllocRef> RawVec<T, A> {
+    /// Returns if the buffer needs to grow to fulfill the needed extra capacity.
+    /// Mainly used to make inlining reserve-calls possible without inlining `grow`.
+    fn needs_to_grow(&self, used_capacity: usize, needed_extra_capacity: usize) -> bool {
+        needed_extra_capacity > self.capacity().wrapping_sub(used_capacity)
+    }
 
-enum ReserveStrategy {
-    Exact,
-    Amortized,
-}
+    fn capacity_from_bytes(excess: usize) -> usize {
+        debug_assert_ne!(mem::size_of::<T>(), 0);
+        excess / mem::size_of::<T>()
+    }
 
-use ReserveStrategy::*;
+    fn set_memory(&mut self, memory: MemoryBlock) {
+        self.ptr = memory.ptr.cast().into();
+        self.cap = Self::capacity_from_bytes(memory.size);
+    }
 
-impl<T, A: AllocRef> RawVec<T, A> {
-    fn reserve_internal(
+    /// Single method to handle all possibilities of growing the buffer.
+    fn grow(
         &mut self,
-        used_capacity: usize,
-        needed_extra_capacity: usize,
-        fallibility: Fallibility,
-        strategy: ReserveStrategy,
+        strategy: Strategy,
+        placement: ReallocPlacement,
+        init: AllocInit,
     ) -> Result<(), TryReserveError> {
         let elem_size = mem::size_of::<T>();
+        if elem_size == 0 {
+            // Since we return a capacity of `usize::MAX` when `elem_size` is
+            // 0, getting to here necessarily means the `RawVec` is overfull.
+            return Err(CapacityOverflow);
+        }
+        let new_layout = match strategy {
+            Double => unsafe {
+                // Since we guarantee that we never allocate more than `isize::MAX` bytes,
+                // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow.
+                // Additionally the alignment will never be too large as to "not be satisfiable",
+                // so `Layout::from_size_align` will always return `Some`.
+                //
+                // TL;DR, we bypass runtime checks due to dynamic assertions in this module,
+                // allowing us to use `from_size_align_unchecked`.
+                let cap = if self.cap == 0 {
+                    // Skip to 4 because tiny `Vec`'s are dumb; but not if that would cause overflow.
+                    if elem_size > usize::MAX / 8 { 1 } else { 4 }
+                } else {
+                    self.cap * 2
+                };
+                Layout::from_size_align_unchecked(cap * elem_size, mem::align_of::<T>())
+            },
+            Amortized { used_capacity, needed_extra_capacity } => {
+                // Nothing we can really do about these checks, sadly.
+                let required_cap =
+                    used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
+                // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
+                let double_cap = self.cap * 2;
+                // `double_cap` guarantees exponential growth.
+                let cap = cmp::max(double_cap, required_cap);
+                Layout::array::<T>(cap).map_err(|_| CapacityOverflow)?
+            }
+            Exact { used_capacity, needed_extra_capacity } => {
+                let cap =
+                    used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
+                Layout::array::<T>(cap).map_err(|_| CapacityOverflow)?
+            }
+        };
+        alloc_guard(new_layout.size())?;
 
-        unsafe {
-            // NOTE: we don't early branch on ZSTs here because we want this
-            // to actually catch "asking for more than usize::MAX" in that case.
-            // If we make it past the first branch then we are guaranteed to
-            // panic.
-
-            // Don't actually need any more capacity.
-            // Wrapping in case they gave a bad `used_capacity`.
-            if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity {
-                return Ok(());
+        let memory = if let Some((ptr, old_layout)) = self.current_memory() {
+            debug_assert_eq!(old_layout.align(), new_layout.align());
+            unsafe {
+                self.alloc
+                    .grow(ptr, old_layout, new_layout.size(), placement, init)
+                    .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
+            }
+        } else {
+            match placement {
+                MayMove => self.alloc.alloc(new_layout, init),
+                InPlace => Err(AllocErr),
             }
+            .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
+        };
+        self.set_memory(memory);
+        Ok(())
+    }
 
-            // Nothing we can really do about these checks, sadly.
-            let new_cap = match strategy {
-                Exact => {
-                    used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?
-                }
-                Amortized => self.amortized_new_size(used_capacity, needed_extra_capacity)?,
-            };
-            let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
+    fn shrink(
+        &mut self,
+        amount: usize,
+        placement: ReallocPlacement,
+    ) -> Result<(), TryReserveError> {
+        assert!(amount <= self.capacity(), "Tried to shrink to a larger capacity");
 
-            alloc_guard(new_layout.size())?;
+        let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
+        let new_size = amount * mem::size_of::<T>();
 
-            let res = match self.current_layout() {
-                Some(layout) => {
-                    debug_assert!(new_layout.align() == layout.align());
-                    self.a.realloc(NonNull::from(self.ptr).cast(), layout, new_layout.size())
-                }
-                None => self.a.alloc(new_layout),
-            };
-
-            let (ptr, new_cap) = match (res, fallibility) {
-                (Err(AllocErr), Infallible) => handle_alloc_error(new_layout),
-                (Err(AllocErr), Fallible) => {
-                    return Err(TryReserveError::AllocError {
-                        layout: new_layout,
-                        non_exhaustive: (),
-                    });
+        let memory = unsafe {
+            self.alloc.shrink(ptr, layout, new_size, placement).map_err(|_| {
+                TryReserveError::AllocError {
+                    layout: Layout::from_size_align_unchecked(new_size, layout.align()),
+                    non_exhaustive: (),
                 }
-                (Ok((ptr, new_size)), _) => (ptr, new_size / elem_size),
-            };
-
-            self.ptr = ptr.cast().into();
-            self.cap = new_cap;
-
-            Ok(())
-        }
+            })?
+        };
+        self.set_memory(memory);
+        Ok(())
     }
 }
 
 impl<T> RawVec<T, Global> {
-    /// Converts the entire buffer into `Box<[T]>`.
+    /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
     ///
     /// Note that this will correctly reconstitute any `cap` changes
     /// that may have been performed. (See description of type for details.)
     ///
-    /// # Undefined Behavior
+    /// # Safety
     ///
-    /// All elements of `RawVec<T, Global>` must be initialized. Notice that
-    /// the rules around uninitialized boxed values are not finalized yet,
-    /// but until they are, it is advisable to avoid them.
-    pub unsafe fn into_box(self) -> Box<[T]> {
+    /// `shrink_to_fit(len)` must be called immediately prior to calling this function. This
+    /// implies, that `len` must be smaller than or equal to `self.capacity()`.
+    pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>]> {
+        debug_assert!(
+            len <= self.capacity(),
+            "`len` must be smaller than or equal to `self.capacity()`"
+        );
+
         // NOTE: not calling `capacity()` here; actually using the real `cap` field!
-        let slice = slice::from_raw_parts_mut(self.ptr(), self.cap);
-        let output: Box<[T]> = Box::from_raw(slice);
+        let slice = slice::from_raw_parts_mut(self.ptr() as *mut MaybeUninit<T>, len);
+        let output = Box::from_raw(slice);
         mem::forget(self);
         output
     }
 }
 
-impl<T, A: AllocRef> RawVec<T, A> {
-    /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
-    pub unsafe fn dealloc_buffer(&mut self) {
-        let elem_size = mem::size_of::<T>();
-        if elem_size != 0 {
-            if let Some(layout) = self.current_layout() {
-                self.a.dealloc(NonNull::from(self.ptr).cast(), layout);
-            }
-        }
-    }
-}
-
 unsafe impl<#[may_dangle] T, A: AllocRef> Drop for RawVec<T, A> {
     /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
     fn drop(&mut self) {
-        unsafe {
-            self.dealloc_buffer();
+        if let Some((ptr, layout)) = self.current_memory() {
+            unsafe { self.alloc.dealloc(ptr, layout) }
         }
     }
 }
diff --git a/src/liballoc/raw_vec/tests.rs b/src/liballoc/raw_vec/tests.rs
index 860058debe1..e7ab8a305d2 100644
--- a/src/liballoc/raw_vec/tests.rs
+++ b/src/liballoc/raw_vec/tests.rs
@@ -12,6 +12,7 @@ fn allocator_param() {
     //
     // Instead, this just checks that the `RawVec` methods do at
     // least go through the Allocator API when it reserves
+
     // storage.
 
     // A dumb allocator that consumes a fixed amount of fuel
@@ -20,12 +21,12 @@ fn allocator_param() {
         fuel: usize,
     }
     unsafe impl AllocRef for BoundedAlloc {
-        unsafe fn alloc(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> {
+        fn alloc(&mut self, layout: Layout, init: AllocInit) -> Result<MemoryBlock, AllocErr> {
             let size = layout.size();
             if size > self.fuel {
                 return Err(AllocErr);
             }
-            match Global.alloc(layout) {
+            match Global.alloc(layout, init) {
                 ok @ Ok(_) => {
                     self.fuel -= size;
                     ok
@@ -40,9 +41,9 @@ fn allocator_param() {
 
     let a = BoundedAlloc { fuel: 500 };
     let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
-    assert_eq!(v.a.fuel, 450);
+    assert_eq!(v.alloc.fuel, 450);
     v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
-    assert_eq!(v.a.fuel, 250);
+    assert_eq!(v.alloc.fuel, 250);
 }
 
 #[test]
diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs
index 9c286298aa6..6a78a7398a6 100644
--- a/src/liballoc/rc.rs
+++ b/src/liballoc/rc.rs
@@ -252,13 +252,17 @@ use core::ptr::{self, NonNull};
 use core::slice::{self, from_raw_parts_mut};
 use core::usize;
 
-use crate::alloc::{box_free, handle_alloc_error, AllocRef, Global, Layout};
+use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout};
 use crate::string::String;
 use crate::vec::Vec;
 
 #[cfg(test)]
 mod tests;
 
+// This is repr(C) to future-proof against possible field-reordering, which
+// would interfere with otherwise safe [into|from]_raw() of transmutable
+// inner types.
+#[repr(C)]
 struct RcBox<T: ?Sized> {
     strong: Cell<usize>,
     weak: Cell<usize>,
@@ -580,15 +584,24 @@ impl<T: ?Sized> Rc<T> {
         }
     }
 
-    /// Constructs an `Rc` from a raw pointer.
+    /// Constructs an `Rc<T>` from a raw pointer.
     ///
-    /// The raw pointer must have been previously returned by a call to a
-    /// [`Rc::into_raw`][into_raw].
+    /// The raw pointer must have been previously returned by a call to
+    /// [`Rc<U>::into_raw`][into_raw] where `U` must have the same size
+    /// and alignment as `T`. This is trivially true if `U` is `T`.
+    /// Note that if `U` is not `T` but has the same size and alignment, this is
+    /// basically like transmuting references of different types. See
+    /// [`mem::transmute`][transmute] for more information on what
+    /// restrictions apply in this case.
     ///
-    /// This function is unsafe because improper use may lead to memory problems. For example, a
-    /// double-free may occur if the function is called twice on the same raw pointer.
+    /// The user of `from_raw` has to make sure a specific value of `T` is only
+    /// dropped once.
+    ///
+    /// This function is unsafe because improper use may lead to memory unsafety,
+    /// even if the returned `Rc<T>` is never accessed.
     ///
     /// [into_raw]: struct.Rc.html#method.into_raw
+    /// [transmute]: ../../std/mem/fn.transmute.html
     ///
     /// # Examples
     ///
@@ -923,10 +936,12 @@ impl<T: ?Sized> Rc<T> {
         let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
 
         // Allocate for the layout.
-        let (mem, _) = Global.alloc(layout).unwrap_or_else(|_| handle_alloc_error(layout));
+        let mem = Global
+            .alloc(layout, AllocInit::Uninitialized)
+            .unwrap_or_else(|_| handle_alloc_error(layout));
 
         // Initialize the RcBox
-        let inner = mem_to_rcbox(mem.as_ptr());
+        let inner = mem_to_rcbox(mem.ptr.as_ptr());
         debug_assert_eq!(Layout::for_value(&*inner), layout);
 
         ptr::write(&mut (*inner).strong, Cell::new(1));
diff --git a/src/liballoc/slice.rs b/src/liballoc/slice.rs
index 7b83658fca6..2ce5bc8ed2f 100644
--- a/src/liballoc/slice.rs
+++ b/src/liballoc/slice.rs
@@ -145,8 +145,7 @@ mod hack {
         unsafe {
             let len = b.len();
             let b = Box::into_raw(b);
-            let xs = Vec::from_raw_parts(b as *mut T, len, len);
-            xs
+            Vec::from_raw_parts(b as *mut T, len, len)
         }
     }
 
@@ -433,7 +432,7 @@ impl<T> [T] {
     ///
     /// ```should_panic
     /// // this will panic at runtime
-    /// b"0123456789abcdef".repeat(usize::max_value());
+    /// b"0123456789abcdef".repeat(usize::MAX);
     /// ```
     #[stable(feature = "repeat_generic_slice", since = "1.40.0")]
     pub fn repeat(&self, n: usize) -> Vec<T>
diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs
index 843a2f1f8e9..70860c09a2c 100644
--- a/src/liballoc/str.rs
+++ b/src/liballoc/str.rs
@@ -499,7 +499,7 @@ impl str {
     ///
     /// ```should_panic
     /// // this will panic at runtime
-    /// "0123456789abcdef".repeat(usize::max_value());
+    /// "0123456789abcdef".repeat(usize::MAX);
     /// ```
     #[stable(feature = "repeat_str", since = "1.16.0")]
     pub fn repeat(&self, n: usize) -> String {
diff --git a/src/liballoc/string.rs b/src/liballoc/string.rs
index f5afea15d65..1e5fe125c55 100644
--- a/src/liballoc/string.rs
+++ b/src/liballoc/string.rs
@@ -407,7 +407,7 @@ impl String {
     ///
     /// assert_eq!(s.capacity(), cap);
     ///
-    /// // ...but this may make the vector reallocate
+    /// // ...but this may make the string reallocate
     /// s.push('a');
     /// ```
     #[inline]
@@ -1461,6 +1461,7 @@ impl String {
     /// ```
     #[inline]
     #[stable(feature = "string_split_off", since = "1.16.0")]
+    #[must_use = "use `.truncate()` if you don't need the other half"]
     pub fn split_off(&mut self, at: usize) -> String {
         assert!(self.is_char_boundary(at));
         let other = self.vec.split_off(at);
@@ -1848,6 +1849,21 @@ impl<'a, 'b> Pattern<'a> for &'b String {
     fn is_prefix_of(self, haystack: &'a str) -> bool {
         self[..].is_prefix_of(haystack)
     }
+
+    #[inline]
+    fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
+        self[..].strip_prefix_of(haystack)
+    }
+
+    #[inline]
+    fn is_suffix_of(self, haystack: &'a str) -> bool {
+        self[..].is_suffix_of(haystack)
+    }
+
+    #[inline]
+    fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str> {
+        self[..].strip_suffix_of(haystack)
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -2225,6 +2241,17 @@ impl From<&str> for String {
     }
 }
 
+#[stable(feature = "from_mut_str_for_string", since = "1.44.0")]
+impl From<&mut str> for String {
+    /// Converts a `&mut str` into a `String`.
+    ///
+    /// The result is allocated on the heap.
+    #[inline]
+    fn from(s: &mut str) -> String {
+        s.to_owned()
+    }
+}
+
 #[stable(feature = "from_ref_string", since = "1.35.0")]
 impl From<&String> for String {
     #[inline]
diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs
index 4a0cf2984ed..111a7651b5e 100644
--- a/src/liballoc/sync.rs
+++ b/src/liballoc/sync.rs
@@ -25,7 +25,7 @@ use core::sync::atomic;
 use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
 use core::{isize, usize};
 
-use crate::alloc::{box_free, handle_alloc_error, AllocRef, Global, Layout};
+use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout};
 use crate::boxed::Box;
 use crate::rc::is_dangling;
 use crate::string::String;
@@ -40,6 +40,23 @@ mod tests;
 /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
 const MAX_REFCOUNT: usize = (isize::MAX) as usize;
 
+#[cfg(not(sanitize = "thread"))]
+macro_rules! acquire {
+    ($x:expr) => {
+        atomic::fence(Acquire)
+    };
+}
+
+// ThreadSanitizer does not support memory fences. To avoid false positive
+// reports in Arc / Weak implementation use atomic loads for synchronization
+// instead.
+#[cfg(sanitize = "thread")]
+macro_rules! acquire {
+    ($x:expr) => {
+        $x.load(Acquire)
+    };
+}
+
 /// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
 /// Reference Counted'.
 ///
@@ -270,6 +287,10 @@ impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
     }
 }
 
+// This is repr(C) to future-proof against possible field-reordering, which
+// would interfere with otherwise safe [into|from]_raw() of transmutable
+// inner types.
+#[repr(C)]
 struct ArcInner<T: ?Sized> {
     strong: atomic::AtomicUsize,
 
@@ -402,7 +423,7 @@ impl<T> Arc<T> {
             return Err(this);
         }
 
-        atomic::fence(Acquire);
+        acquire!(this.inner().strong);
 
         unsafe {
             let elem = ptr::read(&this.ptr.as_ref().data);
@@ -560,15 +581,24 @@ impl<T: ?Sized> Arc<T> {
         }
     }
 
-    /// Constructs an `Arc` from a raw pointer.
+    /// Constructs an `Arc<T>` from a raw pointer.
+    ///
+    /// The raw pointer must have been previously returned by a call to
+    /// [`Arc<U>::into_raw`][into_raw] where `U` must have the same size and
+    /// alignment as `T`. This is trivially true if `U` is `T`.
+    /// Note that if `U` is not `T` but has the same size and alignment, this is
+    /// basically like transmuting references of different types. See
+    /// [`mem::transmute`][transmute] for more information on what
+    /// restrictions apply in this case.
     ///
-    /// The raw pointer must have been previously returned by a call to a
-    /// [`Arc::into_raw`][into_raw].
+    /// The user of `from_raw` has to make sure a specific value of `T` is only
+    /// dropped once.
     ///
-    /// This function is unsafe because improper use may lead to memory problems. For example, a
-    /// double-free may occur if the function is called twice on the same raw pointer.
+    /// This function is unsafe because improper use may lead to memory unsafety,
+    /// even if the returned `Arc<T>` is never accessed.
     ///
     /// [into_raw]: struct.Arc.html#method.into_raw
+    /// [transmute]: ../../std/mem/fn.transmute.html
     ///
     /// # Examples
     ///
@@ -739,7 +769,7 @@ impl<T: ?Sized> Arc<T> {
         ptr::drop_in_place(&mut self.ptr.as_mut().data);
 
         if self.inner().weak.fetch_sub(1, Release) == 1 {
-            atomic::fence(Acquire);
+            acquire!(self.inner().weak);
             Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()))
         }
     }
@@ -784,10 +814,12 @@ impl<T: ?Sized> Arc<T> {
         // reference (see #54908).
         let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
 
-        let (mem, _) = Global.alloc(layout).unwrap_or_else(|_| handle_alloc_error(layout));
+        let mem = Global
+            .alloc(layout, AllocInit::Uninitialized)
+            .unwrap_or_else(|_| handle_alloc_error(layout));
 
         // Initialize the ArcInner
-        let inner = mem_to_arcinner(mem.as_ptr());
+        let inner = mem_to_arcinner(mem.ptr.as_ptr());
         debug_assert_eq!(Layout::for_value(&*inner), layout);
 
         ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
@@ -1243,7 +1275,7 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
         //
         // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
         // [2]: (https://github.com/rust-lang/rust/pull/41714)
-        atomic::fence(Acquire);
+        acquire!(self.inner().strong);
 
         unsafe {
             self.drop_slow();
@@ -1701,7 +1733,7 @@ impl<T: ?Sized> Drop for Weak<T> {
         let inner = if let Some(inner) = self.inner() { inner } else { return };
 
         if inner.weak.fetch_sub(1, Release) == 1 {
-            atomic::fence(Acquire);
+            acquire!(inner.weak);
             unsafe { Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())) }
         }
     }
diff --git a/src/liballoc/task.rs b/src/liballoc/task.rs
new file mode 100644
index 00000000000..a64d5d7a63b
--- /dev/null
+++ b/src/liballoc/task.rs
@@ -0,0 +1,89 @@
+#![unstable(feature = "wake_trait", issue = "69912")]
+//! Types and Traits for working with asynchronous tasks.
+use core::mem::{self, ManuallyDrop};
+use core::task::{RawWaker, RawWakerVTable, Waker};
+
+use crate::sync::Arc;
+
+/// The implementation of waking a task on an executor.
+///
+/// This trait can be used to create a [`Waker`]. An executor can define an
+/// implementation of this trait, and use that to construct a Waker to pass
+/// to the tasks that are executed on that executor.
+///
+/// This trait is a memory-safe and ergonomic alternative to constructing a
+/// [`RawWaker`]. It supports the common executor design in which the data used
+/// to wake up a task is stored in an [`Arc`][arc]. Some executors (especially
+/// those for embedded systems) cannot use this API, which is why [`RawWaker`]
+/// exists as an alternative for those systems.
+///
+/// [arc]: ../../std/sync/struct.Arc.html
+#[unstable(feature = "wake_trait", issue = "69912")]
+pub trait Wake {
+    /// Wake this task.
+    #[unstable(feature = "wake_trait", issue = "69912")]
+    fn wake(self: Arc<Self>);
+
+    /// Wake this task without consuming the waker.
+    ///
+    /// If an executor supports a cheaper way to wake without consuming the
+    /// waker, it should override this method. By default, it clones the
+    /// [`Arc`] and calls `wake` on the clone.
+    #[unstable(feature = "wake_trait", issue = "69912")]
+    fn wake_by_ref(self: &Arc<Self>) {
+        self.clone().wake();
+    }
+}
+
+#[unstable(feature = "wake_trait", issue = "69912")]
+impl<W: Wake + Send + Sync + 'static> From<Arc<W>> for Waker {
+    fn from(waker: Arc<W>) -> Waker {
+        // SAFETY: This is safe because raw_waker safely constructs
+        // a RawWaker from Arc<W>.
+        unsafe { Waker::from_raw(raw_waker(waker)) }
+    }
+}
+
+#[unstable(feature = "wake_trait", issue = "69912")]
+impl<W: Wake + Send + Sync + 'static> From<Arc<W>> for RawWaker {
+    fn from(waker: Arc<W>) -> RawWaker {
+        raw_waker(waker)
+    }
+}
+
+// NB: This private function for constructing a RawWaker is used, rather than
+// inlining this into the `From<Arc<W>> for RawWaker` impl, to ensure that
+// the safety of `From<Arc<W>> for Waker` does not depend on the correct
+// trait dispatch - instead both impls call this function directly and
+// explicitly.
+#[inline(always)]
+fn raw_waker<W: Wake + Send + Sync + 'static>(waker: Arc<W>) -> RawWaker {
+    // Increment the reference count of the arc to clone it.
+    unsafe fn clone_waker<W: Wake + Send + Sync + 'static>(waker: *const ()) -> RawWaker {
+        let waker: Arc<W> = Arc::from_raw(waker as *const W);
+        mem::forget(Arc::clone(&waker));
+        raw_waker(waker)
+    }
+
+    // Wake by value, moving the Arc into the Wake::wake function
+    unsafe fn wake<W: Wake + Send + Sync + 'static>(waker: *const ()) {
+        let waker: Arc<W> = Arc::from_raw(waker as *const W);
+        <W as Wake>::wake(waker);
+    }
+
+    // Wake by reference, wrap the waker in ManuallyDrop to avoid dropping it
+    unsafe fn wake_by_ref<W: Wake + Send + Sync + 'static>(waker: *const ()) {
+        let waker: ManuallyDrop<Arc<W>> = ManuallyDrop::new(Arc::from_raw(waker as *const W));
+        <W as Wake>::wake_by_ref(&waker);
+    }
+
+    // Decrement the reference count of the Arc on drop
+    unsafe fn drop_waker<W: Wake + Send + Sync + 'static>(waker: *const ()) {
+        mem::drop(Arc::from_raw(waker as *const W));
+    }
+
+    RawWaker::new(
+        Arc::into_raw(waker) as *const (),
+        &RawWakerVTable::new(clone_waker::<W>, wake::<W>, wake_by_ref::<W>, drop_waker::<W>),
+    )
+}
diff --git a/src/liballoc/tests/btree/map.rs b/src/liballoc/tests/btree/map.rs
index fd07a4d3926..14f12ca2d77 100644
--- a/src/liballoc/tests/btree/map.rs
+++ b/src/liballoc/tests/btree/map.rs
@@ -5,19 +5,33 @@ use std::fmt::Debug;
 use std::iter::FromIterator;
 use std::ops::Bound::{self, Excluded, Included, Unbounded};
 use std::ops::RangeBounds;
-use std::panic::catch_unwind;
+use std::panic::{catch_unwind, AssertUnwindSafe};
 use std::rc::Rc;
-use std::sync::atomic::{AtomicU32, Ordering};
+use std::sync::atomic::{AtomicUsize, Ordering};
 
 use super::DeterministicRng;
 
+// Value of node::CAPACITY, thus capacity of a tree with a single level,
+// i.e. a tree who's root is a leaf node at height 0.
+const NODE_CAPACITY: usize = 11;
+
+// Minimum number of elements to insert in order to guarantee a tree with 2 levels,
+// i.e. a tree who's root is an internal node at height 1, with edges to leaf nodes.
+// It's not the minimum size: removing an element from such a tree does not always reduce height.
+const MIN_INSERTS_HEIGHT_1: usize = NODE_CAPACITY + 1;
+
+// Minimum number of elements to insert in order to guarantee a tree with 3 levels,
+// i.e. a tree who's root is an internal node at height 2, with edges to more internal nodes.
+// It's not the minimum size: removing an element from such a tree does not always reduce height.
+const MIN_INSERTS_HEIGHT_2: usize = NODE_CAPACITY + (NODE_CAPACITY + 1) * NODE_CAPACITY + 1;
+
 #[test]
 fn test_basic_large() {
     let mut map = BTreeMap::new();
     #[cfg(not(miri))] // Miri is too slow
     let size = 10000;
     #[cfg(miri)]
-    let size = 144; // to obtain height 3 tree (having edges to both kinds of nodes)
+    let size = MIN_INSERTS_HEIGHT_2;
     assert_eq!(map.len(), 0);
 
     for i in 0..size {
@@ -67,7 +81,7 @@ fn test_basic_large() {
 #[test]
 fn test_basic_small() {
     let mut map = BTreeMap::new();
-    // Empty, shared root:
+    // Empty, root is absent (None):
     assert_eq!(map.remove(&1), None);
     assert_eq!(map.len(), 0);
     assert_eq!(map.get(&1), None);
@@ -123,7 +137,7 @@ fn test_basic_small() {
     assert_eq!(map.values().collect::<Vec<_>>(), vec![&4]);
     assert_eq!(map.remove(&2), Some(4));
 
-    // Empty but private root:
+    // Empty but root is owned (Some(...)):
     assert_eq!(map.len(), 0);
     assert_eq!(map.get(&1), None);
     assert_eq!(map.get_mut(&1), None);
@@ -237,37 +251,26 @@ impl TryFrom<usize> for Align32 {
 
 #[test]
 fn test_iter_mut_mutation() {
-    // Check many alignments because various fields precede array in NodeHeader.
-    // Check with size 0 which should not iterate at all.
-    // Check with size 1 for a tree with one kind of node (root = leaf).
-    // Check with size 12 for a tree with two kinds of nodes (root and leaves).
-    // Check with size 144 for a tree with all kinds of nodes (root, internals and leaves).
+    // Check many alignments and trees with roots at various heights.
     do_test_iter_mut_mutation::<u8>(0);
     do_test_iter_mut_mutation::<u8>(1);
-    do_test_iter_mut_mutation::<u8>(12);
-    do_test_iter_mut_mutation::<u8>(127); // not enough unique values to test 144
+    do_test_iter_mut_mutation::<u8>(MIN_INSERTS_HEIGHT_1);
+    do_test_iter_mut_mutation::<u8>(127); // not enough unique values to test MIN_INSERTS_HEIGHT_2
     do_test_iter_mut_mutation::<u16>(1);
-    do_test_iter_mut_mutation::<u16>(12);
-    do_test_iter_mut_mutation::<u16>(144);
+    do_test_iter_mut_mutation::<u16>(MIN_INSERTS_HEIGHT_1);
+    do_test_iter_mut_mutation::<u16>(MIN_INSERTS_HEIGHT_2);
     do_test_iter_mut_mutation::<u32>(1);
-    do_test_iter_mut_mutation::<u32>(12);
-    do_test_iter_mut_mutation::<u32>(144);
+    do_test_iter_mut_mutation::<u32>(MIN_INSERTS_HEIGHT_1);
+    do_test_iter_mut_mutation::<u32>(MIN_INSERTS_HEIGHT_2);
     do_test_iter_mut_mutation::<u64>(1);
-    do_test_iter_mut_mutation::<u64>(12);
-    do_test_iter_mut_mutation::<u64>(144);
+    do_test_iter_mut_mutation::<u64>(MIN_INSERTS_HEIGHT_1);
+    do_test_iter_mut_mutation::<u64>(MIN_INSERTS_HEIGHT_2);
     do_test_iter_mut_mutation::<u128>(1);
-    do_test_iter_mut_mutation::<u128>(12);
-    do_test_iter_mut_mutation::<u128>(144);
+    do_test_iter_mut_mutation::<u128>(MIN_INSERTS_HEIGHT_1);
+    do_test_iter_mut_mutation::<u128>(MIN_INSERTS_HEIGHT_2);
     do_test_iter_mut_mutation::<Align32>(1);
-    do_test_iter_mut_mutation::<Align32>(12);
-    do_test_iter_mut_mutation::<Align32>(144);
-}
-
-#[test]
-fn test_into_key_slice_with_shared_root_past_bounds() {
-    let mut map: BTreeMap<Align32, ()> = BTreeMap::new();
-    assert_eq!(map.get(&Align32(1)), None);
-    assert_eq!(map.get_mut(&Align32(1)), None);
+    do_test_iter_mut_mutation::<Align32>(MIN_INSERTS_HEIGHT_1);
+    do_test_iter_mut_mutation::<Align32>(MIN_INSERTS_HEIGHT_2);
 }
 
 #[test]
@@ -383,12 +386,11 @@ fn test_range_small() {
 }
 
 #[test]
-fn test_range_height_2() {
-    // Assuming that node.CAPACITY is 11, having 12 pairs implies a height 2 tree
-    // with 2 leaves. Depending on details we don't want or need to rely upon,
-    // the single key at the root will be 6 or 7.
+fn test_range_height_1() {
+    // Tests tree with a root and 2 leaves. Depending on details we don't want or need
+    // to rely upon, the single key at the root will be 6 or 7.
 
-    let map: BTreeMap<_, _> = (1..=12).map(|i| (i, i)).collect();
+    let map: BTreeMap<_, _> = (1..=MIN_INSERTS_HEIGHT_1 as i32).map(|i| (i, i)).collect();
     for &root in &[6, 7] {
         assert_eq!(range_keys(&map, (Excluded(root), Excluded(root + 1))), vec![]);
         assert_eq!(range_keys(&map, (Excluded(root), Included(root + 1))), vec![root + 1]);
@@ -526,7 +528,7 @@ fn test_range_1000() {
     #[cfg(not(miri))] // Miri is too slow
     let size = 1000;
     #[cfg(miri)]
-    let size = 144; // to obtain height 3 tree (having edges to both kinds of nodes)
+    let size = MIN_INSERTS_HEIGHT_2 as u32;
     let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
     fn test(map: &BTreeMap<u32, u32>, size: u32, min: Bound<&u32>, max: Bound<&u32>) {
@@ -607,6 +609,263 @@ fn test_range_mut() {
     }
 }
 
+mod test_drain_filter {
+    use super::*;
+
+    #[test]
+    fn empty() {
+        let mut map: BTreeMap<i32, i32> = BTreeMap::new();
+        map.drain_filter(|_, _| unreachable!("there's nothing to decide on"));
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn consuming_nothing() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        assert!(map.drain_filter(|_, _| false).eq(std::iter::empty()));
+    }
+
+    #[test]
+    fn consuming_all() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.clone().collect();
+        assert!(map.drain_filter(|_, _| true).eq(pairs));
+    }
+
+    #[test]
+    fn mutating_and_keeping() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        assert!(
+            map.drain_filter(|_, v| {
+                *v += 6;
+                false
+            })
+            .eq(std::iter::empty())
+        );
+        assert!(map.keys().copied().eq(0..3));
+        assert!(map.values().copied().eq(6..9));
+    }
+
+    #[test]
+    fn mutating_and_removing() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        assert!(
+            map.drain_filter(|_, v| {
+                *v += 6;
+                true
+            })
+            .eq((0..3).map(|i| (i, i + 6)))
+        );
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn underfull_keeping_all() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| false);
+        assert!(map.keys().copied().eq(0..3));
+    }
+
+    #[test]
+    fn underfull_removing_one() {
+        let pairs = (0..3).map(|i| (i, i));
+        for doomed in 0..3 {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i == doomed);
+            assert_eq!(map.len(), 2);
+        }
+    }
+
+    #[test]
+    fn underfull_keeping_one() {
+        let pairs = (0..3).map(|i| (i, i));
+        for sacred in 0..3 {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i != sacred);
+            assert!(map.keys().copied().eq(sacred..=sacred));
+        }
+    }
+
+    #[test]
+    fn underfull_removing_all() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| true);
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn height_0_keeping_all() {
+        let pairs = (0..NODE_CAPACITY).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| false);
+        assert!(map.keys().copied().eq(0..NODE_CAPACITY));
+    }
+
+    #[test]
+    fn height_0_removing_one() {
+        let pairs = (0..NODE_CAPACITY).map(|i| (i, i));
+        for doomed in 0..NODE_CAPACITY {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i == doomed);
+            assert_eq!(map.len(), NODE_CAPACITY - 1);
+        }
+    }
+
+    #[test]
+    fn height_0_keeping_one() {
+        let pairs = (0..NODE_CAPACITY).map(|i| (i, i));
+        for sacred in 0..NODE_CAPACITY {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i != sacred);
+            assert!(map.keys().copied().eq(sacred..=sacred));
+        }
+    }
+
+    #[test]
+    fn height_0_removing_all() {
+        let pairs = (0..NODE_CAPACITY).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| true);
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn height_0_keeping_half() {
+        let mut map: BTreeMap<_, _> = (0..16).map(|i| (i, i)).collect();
+        assert_eq!(map.drain_filter(|i, _| *i % 2 == 0).count(), 8);
+        assert_eq!(map.len(), 8);
+    }
+
+    #[test]
+    fn height_1_removing_all() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| true);
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn height_1_removing_one() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+        for doomed in 0..MIN_INSERTS_HEIGHT_1 {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i == doomed);
+            assert_eq!(map.len(), MIN_INSERTS_HEIGHT_1 - 1);
+        }
+    }
+
+    #[test]
+    fn height_1_keeping_one() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+        for sacred in 0..MIN_INSERTS_HEIGHT_1 {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i != sacred);
+            assert!(map.keys().copied().eq(sacred..=sacred));
+        }
+    }
+
+    #[cfg(not(miri))] // Miri is too slow
+    #[test]
+    fn height_2_removing_one() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+        for doomed in (0..MIN_INSERTS_HEIGHT_2).step_by(12) {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i == doomed);
+            assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2 - 1);
+        }
+    }
+
+    #[cfg(not(miri))] // Miri is too slow
+    #[test]
+    fn height_2_keeping_one() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+        for sacred in (0..MIN_INSERTS_HEIGHT_2).step_by(12) {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i != sacred);
+            assert!(map.keys().copied().eq(sacred..=sacred));
+        }
+    }
+
+    #[test]
+    fn height_2_removing_all() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| true);
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn drop_panic_leak() {
+        static PREDS: AtomicUsize = AtomicUsize::new(0);
+        static DROPS: AtomicUsize = AtomicUsize::new(0);
+
+        struct D;
+        impl Drop for D {
+            fn drop(&mut self) {
+                if DROPS.fetch_add(1, Ordering::SeqCst) == 1 {
+                    panic!("panic in `drop`");
+                }
+            }
+        }
+
+        let mut map = BTreeMap::new();
+        map.insert(0, D);
+        map.insert(4, D);
+        map.insert(8, D);
+
+        catch_unwind(move || {
+            drop(map.drain_filter(|i, _| {
+                PREDS.fetch_add(1usize << i, Ordering::SeqCst);
+                true
+            }))
+        })
+        .ok();
+
+        assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
+        assert_eq!(DROPS.load(Ordering::SeqCst), 3);
+    }
+
+    #[test]
+    fn pred_panic_leak() {
+        static PREDS: AtomicUsize = AtomicUsize::new(0);
+        static DROPS: AtomicUsize = AtomicUsize::new(0);
+
+        struct D;
+        impl Drop for D {
+            fn drop(&mut self) {
+                DROPS.fetch_add(1, Ordering::SeqCst);
+            }
+        }
+
+        let mut map = BTreeMap::new();
+        map.insert(0, D);
+        map.insert(4, D);
+        map.insert(8, D);
+
+        catch_unwind(AssertUnwindSafe(|| {
+            drop(map.drain_filter(|i, _| {
+                PREDS.fetch_add(1usize << i, Ordering::SeqCst);
+                match i {
+                    0 => true,
+                    _ => panic!(),
+                }
+            }))
+        }))
+        .ok();
+
+        assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
+        assert_eq!(DROPS.load(Ordering::SeqCst), 1);
+        assert_eq!(map.len(), 2);
+        assert_eq!(map.first_entry().unwrap().key(), &4);
+        assert_eq!(map.last_entry().unwrap().key(), &8);
+    }
+}
+
 #[test]
 fn test_borrow() {
     // make sure these compile -- using the Borrow trait
@@ -762,7 +1021,7 @@ fn test_bad_zst() {
 #[test]
 fn test_clone() {
     let mut map = BTreeMap::new();
-    let size = 12; // to obtain height 2 tree (having edges to leaf nodes)
+    let size = MIN_INSERTS_HEIGHT_1;
     assert_eq!(map.len(), 0);
 
     for i in 0..size {
@@ -790,20 +1049,19 @@ fn test_clone() {
         assert_eq!(map, map.clone());
     }
 
-    // Full 2-level and minimal 3-level tree (sizes 143, 144 -- the only ones we clone for).
-    for i in 1..=144 {
-        assert_eq!(map.insert(i, i), None);
-        assert_eq!(map.len(), i);
-        if i >= 143 {
-            assert_eq!(map, map.clone());
-        }
-    }
+    // Test a tree with 2 chock-full levels and a tree with 3 levels.
+    map = (1..MIN_INSERTS_HEIGHT_2).map(|i| (i, i)).collect();
+    assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2 - 1);
+    assert_eq!(map, map.clone());
+    map.insert(0, 0);
+    assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2);
+    assert_eq!(map, map.clone());
 }
 
 #[test]
 fn test_clone_from() {
     let mut map1 = BTreeMap::new();
-    let max_size = 12; // to obtain height 2 tree (having edges to leaf nodes)
+    let max_size = MIN_INSERTS_HEIGHT_1;
 
     // Range to max_size inclusive, because i is the size of map1 being tested.
     for i in 0..=max_size {
@@ -1021,8 +1279,8 @@ fn test_split_off_large_random_sorted() {
 }
 
 #[test]
-fn test_into_iter_drop_leak() {
-    static DROPS: AtomicU32 = AtomicU32::new(0);
+fn test_into_iter_drop_leak_height_0() {
+    static DROPS: AtomicUsize = AtomicUsize::new(0);
 
     struct D;
 
@@ -1045,3 +1303,27 @@ fn test_into_iter_drop_leak() {
 
     assert_eq!(DROPS.load(Ordering::SeqCst), 5);
 }
+
+#[test]
+fn test_into_iter_drop_leak_height_1() {
+    let size = MIN_INSERTS_HEIGHT_1;
+    static DROPS: AtomicUsize = AtomicUsize::new(0);
+    static PANIC_POINT: AtomicUsize = AtomicUsize::new(0);
+
+    struct D;
+    impl Drop for D {
+        fn drop(&mut self) {
+            if DROPS.fetch_add(1, Ordering::SeqCst) == PANIC_POINT.load(Ordering::SeqCst) {
+                panic!("panic in `drop`");
+            }
+        }
+    }
+
+    for panic_point in vec![0, 1, size - 2, size - 1] {
+        DROPS.store(0, Ordering::SeqCst);
+        PANIC_POINT.store(panic_point, Ordering::SeqCst);
+        let map: BTreeMap<_, _> = (0..size).map(|i| (i, D)).collect();
+        catch_unwind(move || drop(map.into_iter())).ok();
+        assert_eq!(DROPS.load(Ordering::SeqCst), size);
+    }
+}
diff --git a/src/liballoc/tests/btree/set.rs b/src/liballoc/tests/btree/set.rs
index 1a2b62d026b..136018b9f7d 100644
--- a/src/liballoc/tests/btree/set.rs
+++ b/src/liballoc/tests/btree/set.rs
@@ -1,5 +1,7 @@
 use std::collections::BTreeSet;
 use std::iter::FromIterator;
+use std::panic::{catch_unwind, AssertUnwindSafe};
+use std::sync::atomic::{AtomicU32, Ordering};
 
 use super::DeterministicRng;
 
@@ -303,6 +305,85 @@ fn test_is_subset() {
 }
 
 #[test]
+fn test_drain_filter() {
+    let mut x: BTreeSet<_> = [1].iter().copied().collect();
+    let mut y: BTreeSet<_> = [1].iter().copied().collect();
+
+    x.drain_filter(|_| true);
+    y.drain_filter(|_| false);
+    assert_eq!(x.len(), 0);
+    assert_eq!(y.len(), 1);
+}
+
+#[test]
+fn test_drain_filter_drop_panic_leak() {
+    static PREDS: AtomicU32 = AtomicU32::new(0);
+    static DROPS: AtomicU32 = AtomicU32::new(0);
+
+    #[derive(PartialEq, Eq, PartialOrd, Ord)]
+    struct D(i32);
+    impl Drop for D {
+        fn drop(&mut self) {
+            if DROPS.fetch_add(1, Ordering::SeqCst) == 1 {
+                panic!("panic in `drop`");
+            }
+        }
+    }
+
+    let mut set = BTreeSet::new();
+    set.insert(D(0));
+    set.insert(D(4));
+    set.insert(D(8));
+
+    catch_unwind(move || {
+        drop(set.drain_filter(|d| {
+            PREDS.fetch_add(1u32 << d.0, Ordering::SeqCst);
+            true
+        }))
+    })
+    .ok();
+
+    assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
+    assert_eq!(DROPS.load(Ordering::SeqCst), 3);
+}
+
+#[test]
+fn test_drain_filter_pred_panic_leak() {
+    static PREDS: AtomicU32 = AtomicU32::new(0);
+    static DROPS: AtomicU32 = AtomicU32::new(0);
+
+    #[derive(PartialEq, Eq, PartialOrd, Ord)]
+    struct D(i32);
+    impl Drop for D {
+        fn drop(&mut self) {
+            DROPS.fetch_add(1, Ordering::SeqCst);
+        }
+    }
+
+    let mut set = BTreeSet::new();
+    set.insert(D(0));
+    set.insert(D(4));
+    set.insert(D(8));
+
+    catch_unwind(AssertUnwindSafe(|| {
+        drop(set.drain_filter(|d| {
+            PREDS.fetch_add(1u32 << d.0, Ordering::SeqCst);
+            match d.0 {
+                0 => true,
+                _ => panic!(),
+            }
+        }))
+    }))
+    .ok();
+
+    assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
+    assert_eq!(DROPS.load(Ordering::SeqCst), 1);
+    assert_eq!(set.len(), 2);
+    assert_eq!(set.first().unwrap().0, 4);
+    assert_eq!(set.last().unwrap().0, 8);
+}
+
+#[test]
 fn test_clear() {
     let mut x = BTreeSet::new();
     x.insert(1);
diff --git a/src/liballoc/tests/heap.rs b/src/liballoc/tests/heap.rs
index d159126f426..62f062b83d7 100644
--- a/src/liballoc/tests/heap.rs
+++ b/src/liballoc/tests/heap.rs
@@ -1,4 +1,4 @@
-use std::alloc::{AllocRef, Global, Layout, System};
+use std::alloc::{AllocInit, AllocRef, Global, Layout, System};
 
 /// Issue #45955 and #62251.
 #[test]
@@ -20,7 +20,13 @@ fn check_overalign_requests<T: AllocRef>(mut allocator: T) {
             unsafe {
                 let pointers: Vec<_> = (0..iterations)
                     .map(|_| {
-                        allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap().0
+                        allocator
+                            .alloc(
+                                Layout::from_size_align(size, align).unwrap(),
+                                AllocInit::Uninitialized,
+                            )
+                            .unwrap()
+                            .ptr
                     })
                     .collect();
                 for &ptr in &pointers {
diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs
index ea75f8903c3..ad6feaeebc6 100644
--- a/src/liballoc/tests/lib.rs
+++ b/src/liballoc/tests/lib.rs
@@ -1,5 +1,6 @@
 #![feature(allocator_api)]
 #![feature(box_syntax)]
+#![feature(btree_drain_filter)]
 #![feature(drain_filter)]
 #![feature(exact_size_is_empty)]
 #![feature(map_first_last)]
diff --git a/src/liballoc/tests/slice.rs b/src/liballoc/tests/slice.rs
index 3d6b4bff5e0..8e49e6d8eba 100644
--- a/src/liballoc/tests/slice.rs
+++ b/src/liballoc/tests/slice.rs
@@ -1733,9 +1733,9 @@ fn panic_safe() {
     let moduli = &[5, 20, 50];
 
     #[cfg(miri)]
-    let lens = 1..13;
+    let lens = 1..10;
     #[cfg(miri)]
-    let moduli = &[10];
+    let moduli = &[5];
 
     for len in lens {
         for &modulus in moduli {
diff --git a/src/liballoc/tests/string.rs b/src/liballoc/tests/string.rs
index 08859b2b24b..d2f09eb4a75 100644
--- a/src/liballoc/tests/string.rs
+++ b/src/liballoc/tests/string.rs
@@ -266,14 +266,14 @@ fn test_split_off_empty() {
 fn test_split_off_past_end() {
     let orig = "Hello, world!";
     let mut split = String::from(orig);
-    split.split_off(orig.len() + 1);
+    let _ = split.split_off(orig.len() + 1);
 }
 
 #[test]
 #[should_panic]
 fn test_split_off_mid_char() {
     let mut orig = String::from("山");
-    orig.split_off(1);
+    let _ = orig.split_off(1);
 }
 
 #[test]
diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs
index 3fd7be06fd4..96a6399d051 100644
--- a/src/liballoc/vec.rs
+++ b/src/liballoc/vec.rs
@@ -1,3 +1,4 @@
+// ignore-tidy-filelength
 //! A contiguous growable array type with heap-allocated contents, written
 //! `Vec<T>`.
 //!
@@ -317,7 +318,7 @@ impl<T> Vec<T> {
     /// let mut vec: Vec<i32> = Vec::new();
     /// ```
     #[inline]
-    #[rustc_const_stable(feature = "const_vec_new", since = "1.32.0")]
+    #[rustc_const_stable(feature = "const_vec_new", since = "1.39.0")]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub const fn new() -> Vec<T> {
         Vec { buf: RawVec::NEW, len: 0 }
@@ -678,8 +679,9 @@ impl<T> Vec<T> {
         unsafe {
             self.shrink_to_fit();
             let buf = ptr::read(&self.buf);
+            let len = self.len();
             mem::forget(self);
-            buf.into_box()
+            buf.into_box(len).assume_init()
         }
     }
 
@@ -1377,6 +1379,7 @@ impl<T> Vec<T> {
     /// assert_eq!(vec2, [2, 3]);
     /// ```
     #[inline]
+    #[must_use = "use `.truncate()` if you don't need the other half"]
     #[stable(feature = "split_off", since = "1.4.0")]
     pub fn split_off(&mut self, at: usize) -> Self {
         assert!(at <= self.len(), "`at` out of bounds");
@@ -1659,7 +1662,7 @@ struct SetLenOnDrop<'a> {
 impl<'a> SetLenOnDrop<'a> {
     #[inline]
     fn new(len: &'a mut usize) -> Self {
-        SetLenOnDrop { local_len: *len, len: len }
+        SetLenOnDrop { local_len: *len, len }
     }
 
     #[inline]
@@ -2397,6 +2400,21 @@ impl<T: Clone> From<&mut [T]> for Vec<T> {
     }
 }
 
+#[stable(feature = "vec_from_array", since = "1.44.0")]
+impl<T, const N: usize> From<[T; N]> for Vec<T>
+where
+    [T; N]: LengthAtMost32,
+{
+    #[cfg(not(test))]
+    fn from(s: [T; N]) -> Vec<T> {
+        <[T]>::into_vec(box s)
+    }
+    #[cfg(test)]
+    fn from(s: [T; N]) -> Vec<T> {
+        crate::slice::into_vec(box s)
+    }
+}
+
 #[stable(feature = "vec_from_cow_slice", since = "1.14.0")]
 impl<'a, T> From<Cow<'a, [T]>> for Vec<T>
 where
@@ -2626,13 +2644,21 @@ impl<T: Clone> Clone for IntoIter<T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 unsafe impl<#[may_dangle] T> Drop for IntoIter<T> {
     fn drop(&mut self) {
+        struct DropGuard<'a, T>(&'a mut IntoIter<T>);
+
+        impl<T> Drop for DropGuard<'_, T> {
+            fn drop(&mut self) {
+                // RawVec handles deallocation
+                let _ = unsafe { RawVec::from_raw_parts(self.0.buf.as_ptr(), self.0.cap) };
+            }
+        }
+
+        let guard = DropGuard(self);
         // destroy the remaining elements
         unsafe {
-            ptr::drop_in_place(self.as_mut_slice());
+            ptr::drop_in_place(guard.0.as_mut_slice());
         }
-
-        // RawVec handles deallocation
-        let _ = unsafe { RawVec::from_raw_parts(self.buf.as_ptr(), self.cap) };
+        // now `guard` will be dropped and do the rest
     }
 }
 
@@ -2894,7 +2920,7 @@ where
     /// The filter test predicate.
     pred: F,
     /// A flag that indicates a panic has occurred in the filter test prodicate.
-    /// This is used as a hint in the drop implmentation to prevent consumption
+    /// This is used as a hint in the drop implementation to prevent consumption
     /// of the remainder of the `DrainFilter`. Any unprocessed items will be
     /// backshifted in the `vec`, but no further items will be dropped or
     /// tested by the filter predicate.