about summary refs log tree commit diff
path: root/src/liballoc
diff options
context:
space:
mode:
authorStefan Lankes <stlankes@users.noreply.github.com>2020-04-04 07:41:05 +0200
committerGitHub <noreply@github.com>2020-04-04 07:41:05 +0200
commitaa223304dc130c5ace18d48c53b192b14088862e (patch)
tree1971ea5717f0e2ef2dc9468b3a0e96c209d481fe /src/liballoc
parent9f6b96e461003853bf36052cfaf79b12e1c35413 (diff)
parent9e55101bb681010c82c3c827305e2665fc8f2aa0 (diff)
downloadrust-aa223304dc130c5ace18d48c53b192b14088862e.tar.gz
rust-aa223304dc130c5ace18d48c53b192b14088862e.zip
Merge branch 'master' into abi
Diffstat (limited to 'src/liballoc')
-rw-r--r--src/liballoc/alloc.rs112
-rw-r--r--src/liballoc/alloc/tests.rs9
-rw-r--r--src/liballoc/benches/btree/set.rs32
-rw-r--r--src/liballoc/benches/lib.rs1
-rw-r--r--src/liballoc/boxed.rs41
-rw-r--r--src/liballoc/collections/btree/map.rs210
-rw-r--r--src/liballoc/collections/btree/node.rs9
-rw-r--r--src/liballoc/collections/btree/set.rs101
-rw-r--r--src/liballoc/collections/vec_deque.rs205
-rw-r--r--src/liballoc/collections/vec_deque/tests.rs83
-rw-r--r--src/liballoc/lib.rs1
-rw-r--r--src/liballoc/macros.rs6
-rw-r--r--src/liballoc/raw_vec.rs655
-rw-r--r--src/liballoc/raw_vec/tests.rs9
-rw-r--r--src/liballoc/rc.rs8
-rw-r--r--src/liballoc/slice.rs2
-rw-r--r--src/liballoc/str.rs2
-rw-r--r--src/liballoc/string.rs15
-rw-r--r--src/liballoc/sync.rs8
-rw-r--r--src/liballoc/task.rs6
-rw-r--r--src/liballoc/tests/btree/map.rs261
-rw-r--r--src/liballoc/tests/btree/set.rs81
-rw-r--r--src/liballoc/tests/heap.rs10
-rw-r--r--src/liballoc/tests/lib.rs1
-rw-r--r--src/liballoc/vec.rs3
25 files changed, 1325 insertions, 546 deletions
diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs
index 9f82b2c6fa6..66575e3ef55 100644
--- a/src/liballoc/alloc.rs
+++ b/src/liballoc/alloc.rs
@@ -2,7 +2,7 @@
 
 #![stable(feature = "alloc_module", since = "1.28.0")]
 
-use core::intrinsics::{min_align_of_val, size_of_val};
+use core::intrinsics::{self, min_align_of_val, size_of_val};
 use core::ptr::{NonNull, Unique};
 use core::usize;
 
@@ -165,11 +165,19 @@ pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
 #[unstable(feature = "allocator_api", issue = "32838")]
 unsafe impl AllocRef for Global {
     #[inline]
-    fn alloc(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> {
-        if layout.size() == 0 {
-            Ok((layout.dangling(), 0))
-        } else {
-            unsafe { NonNull::new(alloc(layout)).ok_or(AllocErr).map(|p| (p, layout.size())) }
+    fn alloc(&mut self, layout: Layout, init: AllocInit) -> Result<MemoryBlock, AllocErr> {
+        unsafe {
+            let size = layout.size();
+            if size == 0 {
+                Ok(MemoryBlock { ptr: layout.dangling(), size: 0 })
+            } else {
+                let raw_ptr = match init {
+                    AllocInit::Uninitialized => alloc(layout),
+                    AllocInit::Zeroed => alloc_zeroed(layout),
+                };
+                let ptr = NonNull::new(raw_ptr).ok_or(AllocErr)?;
+                Ok(MemoryBlock { ptr, size })
+            }
         }
     }
 
@@ -181,32 +189,71 @@ unsafe impl AllocRef for Global {
     }
 
     #[inline]
-    unsafe fn realloc(
+    unsafe fn grow(
         &mut self,
         ptr: NonNull<u8>,
         layout: Layout,
         new_size: usize,
-    ) -> Result<(NonNull<u8>, usize), AllocErr> {
-        match (layout.size(), new_size) {
-            (0, 0) => Ok((layout.dangling(), 0)),
-            (0, _) => self.alloc(Layout::from_size_align_unchecked(new_size, layout.align())),
-            (_, 0) => {
-                self.dealloc(ptr, layout);
-                Ok((layout.dangling(), 0))
+        placement: ReallocPlacement,
+        init: AllocInit,
+    ) -> Result<MemoryBlock, AllocErr> {
+        let size = layout.size();
+        debug_assert!(
+            new_size >= size,
+            "`new_size` must be greater than or equal to `memory.size()`"
+        );
+
+        if size == new_size {
+            return Ok(MemoryBlock { ptr, size });
+        }
+
+        match placement {
+            ReallocPlacement::InPlace => Err(AllocErr),
+            ReallocPlacement::MayMove if layout.size() == 0 => {
+                let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
+                self.alloc(new_layout, init)
+            }
+            ReallocPlacement::MayMove => {
+                // `realloc` probably checks for `new_size > size` or something similar.
+                intrinsics::assume(new_size > size);
+                let ptr = realloc(ptr.as_ptr(), layout, new_size);
+                let memory =
+                    MemoryBlock { ptr: NonNull::new(ptr).ok_or(AllocErr)?, size: new_size };
+                init.init_offset(memory, size);
+                Ok(memory)
             }
-            (_, _) => NonNull::new(realloc(ptr.as_ptr(), layout, new_size))
-                .ok_or(AllocErr)
-                .map(|p| (p, new_size)),
         }
     }
 
     #[inline]
-    fn alloc_zeroed(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> {
-        if layout.size() == 0 {
-            Ok((layout.dangling(), 0))
-        } else {
-            unsafe {
-                NonNull::new(alloc_zeroed(layout)).ok_or(AllocErr).map(|p| (p, layout.size()))
+    unsafe fn shrink(
+        &mut self,
+        ptr: NonNull<u8>,
+        layout: Layout,
+        new_size: usize,
+        placement: ReallocPlacement,
+    ) -> Result<MemoryBlock, AllocErr> {
+        let size = layout.size();
+        debug_assert!(
+            new_size <= size,
+            "`new_size` must be smaller than or equal to `memory.size()`"
+        );
+
+        if size == new_size {
+            return Ok(MemoryBlock { ptr, size });
+        }
+
+        match placement {
+            ReallocPlacement::InPlace => Err(AllocErr),
+            ReallocPlacement::MayMove if new_size == 0 => {
+                self.dealloc(ptr, layout);
+                Ok(MemoryBlock { ptr: layout.dangling(), size: 0 })
+            }
+            ReallocPlacement::MayMove => {
+                // `realloc` probably checks for `new_size < size` or something similar.
+                intrinsics::assume(new_size < size);
+                let ptr = realloc(ptr.as_ptr(), layout, new_size);
+                Ok(MemoryBlock { ptr: NonNull::new(ptr).ok_or(AllocErr)?, size: new_size })
             }
         }
     }
@@ -218,14 +265,10 @@ unsafe impl AllocRef for Global {
 #[lang = "exchange_malloc"]
 #[inline]
 unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
-    if size == 0 {
-        align as *mut u8
-    } else {
-        let layout = Layout::from_size_align_unchecked(size, align);
-        match Global.alloc(layout) {
-            Ok((ptr, _)) => ptr.as_ptr(),
-            Err(_) => handle_alloc_error(layout),
-        }
+    let layout = Layout::from_size_align_unchecked(size, align);
+    match Global.alloc(layout, AllocInit::Uninitialized) {
+        Ok(memory) => memory.ptr.as_ptr(),
+        Err(_) => handle_alloc_error(layout),
     }
 }
 
@@ -239,11 +282,8 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
 pub(crate) unsafe fn box_free<T: ?Sized>(ptr: Unique<T>) {
     let size = size_of_val(ptr.as_ref());
     let align = min_align_of_val(ptr.as_ref());
-    // We do not allocate for Box<T> when T is ZST, so deallocation is also not necessary.
-    if size != 0 {
-        let layout = Layout::from_size_align_unchecked(size, align);
-        Global.dealloc(ptr.cast().into(), layout);
-    }
+    let layout = Layout::from_size_align_unchecked(size, align);
+    Global.dealloc(ptr.cast().into(), layout)
 }
 
 /// Abort on memory allocation error or failure.
diff --git a/src/liballoc/alloc/tests.rs b/src/liballoc/alloc/tests.rs
index 55944398e16..1ad40eca93b 100644
--- a/src/liballoc/alloc/tests.rs
+++ b/src/liballoc/alloc/tests.rs
@@ -8,16 +8,17 @@ use test::Bencher;
 fn allocate_zeroed() {
     unsafe {
         let layout = Layout::from_size_align(1024, 1).unwrap();
-        let (ptr, _) =
-            Global.alloc_zeroed(layout.clone()).unwrap_or_else(|_| handle_alloc_error(layout));
+        let memory = Global
+            .alloc(layout.clone(), AllocInit::Zeroed)
+            .unwrap_or_else(|_| handle_alloc_error(layout));
 
-        let mut i = ptr.cast::<u8>().as_ptr();
+        let mut i = memory.ptr.cast::<u8>().as_ptr();
         let end = i.add(layout.size());
         while i < end {
             assert_eq!(*i, 0);
             i = i.offset(1);
         }
-        Global.dealloc(ptr, layout);
+        Global.dealloc(memory.ptr, layout);
     }
 }
 
diff --git a/src/liballoc/benches/btree/set.rs b/src/liballoc/benches/btree/set.rs
index d9e75ab7fa4..2518506b9b5 100644
--- a/src/liballoc/benches/btree/set.rs
+++ b/src/liballoc/benches/btree/set.rs
@@ -63,6 +63,22 @@ pub fn clone_100_and_clear(b: &mut Bencher) {
 }
 
 #[bench]
+pub fn clone_100_and_drain_all(b: &mut Bencher) {
+    let src = pos(100);
+    b.iter(|| src.clone().drain_filter(|_| true).count())
+}
+
+#[bench]
+pub fn clone_100_and_drain_half(b: &mut Bencher) {
+    let src = pos(100);
+    b.iter(|| {
+        let mut set = src.clone();
+        assert_eq!(set.drain_filter(|i| i % 2 == 0).count(), 100 / 2);
+        assert_eq!(set.len(), 100 / 2);
+    })
+}
+
+#[bench]
 pub fn clone_100_and_into_iter(b: &mut Bencher) {
     let src = pos(100);
     b.iter(|| src.clone().into_iter().count())
@@ -116,6 +132,22 @@ pub fn clone_10k_and_clear(b: &mut Bencher) {
 }
 
 #[bench]
+pub fn clone_10k_and_drain_all(b: &mut Bencher) {
+    let src = pos(10_000);
+    b.iter(|| src.clone().drain_filter(|_| true).count())
+}
+
+#[bench]
+pub fn clone_10k_and_drain_half(b: &mut Bencher) {
+    let src = pos(10_000);
+    b.iter(|| {
+        let mut set = src.clone();
+        assert_eq!(set.drain_filter(|i| i % 2 == 0).count(), 10_000 / 2);
+        assert_eq!(set.len(), 10_000 / 2);
+    })
+}
+
+#[bench]
 pub fn clone_10k_and_into_iter(b: &mut Bencher) {
     let src = pos(10_000);
     b.iter(|| src.clone().into_iter().count())
diff --git a/src/liballoc/benches/lib.rs b/src/liballoc/benches/lib.rs
index 951477a24c8..f31717d9fd5 100644
--- a/src/liballoc/benches/lib.rs
+++ b/src/liballoc/benches/lib.rs
@@ -1,3 +1,4 @@
+#![feature(btree_drain_filter)]
 #![feature(map_first_last)]
 #![feature(repr_simd)]
 #![feature(test)]
diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs
index 36641284a76..5406956a528 100644
--- a/src/liballoc/boxed.rs
+++ b/src/liballoc/boxed.rs
@@ -143,10 +143,9 @@ use core::ops::{
 };
 use core::pin::Pin;
 use core::ptr::{self, NonNull, Unique};
-use core::slice;
 use core::task::{Context, Poll};
 
-use crate::alloc::{self, AllocRef, Global};
+use crate::alloc::{self, AllocInit, AllocRef, Global};
 use crate::raw_vec::RawVec;
 use crate::str::from_boxed_utf8_unchecked;
 use crate::vec::Vec;
@@ -196,14 +195,12 @@ impl<T> Box<T> {
     #[unstable(feature = "new_uninit", issue = "63291")]
     pub fn new_uninit() -> Box<mem::MaybeUninit<T>> {
         let layout = alloc::Layout::new::<mem::MaybeUninit<T>>();
-        unsafe {
-            let ptr = if layout.size() == 0 {
-                NonNull::dangling()
-            } else {
-                Global.alloc(layout).unwrap_or_else(|_| alloc::handle_alloc_error(layout)).0.cast()
-            };
-            Box::from_raw(ptr.as_ptr())
-        }
+        let ptr = Global
+            .alloc(layout, AllocInit::Uninitialized)
+            .unwrap_or_else(|_| alloc::handle_alloc_error(layout))
+            .ptr
+            .cast();
+        unsafe { Box::from_raw(ptr.as_ptr()) }
     }
 
     /// Constructs a new `Box` with uninitialized contents, with the memory
@@ -226,11 +223,13 @@ impl<T> Box<T> {
     /// [zeroed]: ../../std/mem/union.MaybeUninit.html#method.zeroed
     #[unstable(feature = "new_uninit", issue = "63291")]
     pub fn new_zeroed() -> Box<mem::MaybeUninit<T>> {
-        unsafe {
-            let mut uninit = Self::new_uninit();
-            ptr::write_bytes::<T>(uninit.as_mut_ptr(), 0, 1);
-            uninit
-        }
+        let layout = alloc::Layout::new::<mem::MaybeUninit<T>>();
+        let ptr = Global
+            .alloc(layout, AllocInit::Zeroed)
+            .unwrap_or_else(|_| alloc::handle_alloc_error(layout))
+            .ptr
+            .cast();
+        unsafe { Box::from_raw(ptr.as_ptr()) }
     }
 
     /// Constructs a new `Pin<Box<T>>`. If `T` does not implement `Unpin`, then
@@ -265,15 +264,7 @@ impl<T> Box<[T]> {
     /// ```
     #[unstable(feature = "new_uninit", issue = "63291")]
     pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
-        let layout = alloc::Layout::array::<mem::MaybeUninit<T>>(len).unwrap();
-        unsafe {
-            let ptr = if layout.size() == 0 {
-                NonNull::dangling()
-            } else {
-                Global.alloc(layout).unwrap_or_else(|_| alloc::handle_alloc_error(layout)).0.cast()
-            };
-            Box::from_raw(slice::from_raw_parts_mut(ptr.as_ptr(), len))
-        }
+        unsafe { RawVec::with_capacity(len).into_box(len) }
     }
 }
 
@@ -778,7 +769,7 @@ impl<T: Copy> From<&[T]> for Box<[T]> {
         let buf = RawVec::with_capacity(len);
         unsafe {
             ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
-            buf.into_box()
+            buf.into_box(slice.len()).assume_init()
         }
     }
 }
diff --git a/src/liballoc/collections/btree/map.rs b/src/liballoc/collections/btree/map.rs
index bde66c406af..bbeced1751d 100644
--- a/src/liballoc/collections/btree/map.rs
+++ b/src/liballoc/collections/btree/map.rs
@@ -1256,6 +1256,48 @@ impl<K: Ord, V> BTreeMap<K, V> {
         right
     }
 
+    /// Creates an iterator which uses a closure to determine if an element should be removed.
+    ///
+    /// If the closure returns true, the element is removed from the map and yielded.
+    /// If the closure returns false, or panics, the element remains in the map and will not be
+    /// yielded.
+    ///
+    /// Note that `drain_filter` lets you mutate every value in the filter closure, regardless of
+    /// whether you choose to keep or remove it.
+    ///
+    /// If the iterator is only partially consumed or not consumed at all, each of the remaining
+    /// elements will still be subjected to the closure and removed and dropped if it returns true.
+    ///
+    /// It is unspecified how many more elements will be subjected to the closure
+    /// if a panic occurs in the closure, or a panic occurs while dropping an element,
+    /// or if the `DrainFilter` value is leaked.
+    ///
+    /// # Examples
+    ///
+    /// Splitting a map into even and odd keys, reusing the original map:
+    ///
+    /// ```
+    /// #![feature(btree_drain_filter)]
+    /// use std::collections::BTreeMap;
+    ///
+    /// let mut map: BTreeMap<i32, i32> = (0..8).map(|x| (x, x)).collect();
+    /// let evens: BTreeMap<_, _> = map.drain_filter(|k, _v| k % 2 == 0).collect();
+    /// let odds = map;
+    /// assert_eq!(evens.keys().copied().collect::<Vec<_>>(), vec![0, 2, 4, 6]);
+    /// assert_eq!(odds.keys().copied().collect::<Vec<_>>(), vec![1, 3, 5, 7]);
+    /// ```
+    #[unstable(feature = "btree_drain_filter", issue = "70530")]
+    pub fn drain_filter<F>(&mut self, pred: F) -> DrainFilter<'_, K, V, F>
+    where
+        F: FnMut(&K, &mut V) -> bool,
+    {
+        DrainFilter { pred, inner: self.drain_filter_inner() }
+    }
+    pub(super) fn drain_filter_inner(&mut self) -> DrainFilterInner<'_, K, V> {
+        let front = self.root.as_mut().map(|r| r.as_mut().first_leaf_edge());
+        DrainFilterInner { length: &mut self.length, cur_leaf_edge: front }
+    }
+
     /// Calculates the number of elements if it is incorrect.
     fn recalc_length(&mut self) {
         fn dfs<'a, K, V>(node: NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal>) -> usize
@@ -1653,6 +1695,124 @@ impl<K, V> Clone for Values<'_, K, V> {
     }
 }
 
+/// An iterator produced by calling `drain_filter` on BTreeMap.
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+pub struct DrainFilter<'a, K, V, F>
+where
+    K: 'a + Ord, // This Ord bound should be removed before stabilization.
+    V: 'a,
+    F: 'a + FnMut(&K, &mut V) -> bool,
+{
+    pred: F,
+    inner: DrainFilterInner<'a, K, V>,
+}
+pub(super) struct DrainFilterInner<'a, K, V>
+where
+    K: 'a + Ord,
+    V: 'a,
+{
+    length: &'a mut usize,
+    cur_leaf_edge: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>>,
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, K, V, F> Drop for DrainFilter<'a, K, V, F>
+where
+    K: 'a + Ord,
+    V: 'a,
+    F: 'a + FnMut(&K, &mut V) -> bool,
+{
+    fn drop(&mut self) {
+        self.for_each(drop);
+    }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, K, V, F> fmt::Debug for DrainFilter<'a, K, V, F>
+where
+    K: 'a + fmt::Debug + Ord,
+    V: 'a + fmt::Debug,
+    F: 'a + FnMut(&K, &mut V) -> bool,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_tuple("DrainFilter").field(&self.inner.peek()).finish()
+    }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, K, V, F> Iterator for DrainFilter<'a, K, V, F>
+where
+    K: 'a + Ord,
+    V: 'a,
+    F: 'a + FnMut(&K, &mut V) -> bool,
+{
+    type Item = (K, V);
+
+    fn next(&mut self) -> Option<(K, V)> {
+        self.inner.next(&mut self.pred)
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.inner.size_hint()
+    }
+}
+
+impl<'a, K, V> DrainFilterInner<'a, K, V>
+where
+    K: 'a + Ord,
+    V: 'a,
+{
+    /// Allow Debug implementations to predict the next element.
+    pub(super) fn peek(&self) -> Option<(&K, &V)> {
+        let edge = self.cur_leaf_edge.as_ref()?;
+        edge.reborrow().next_kv().ok().map(|kv| kv.into_kv())
+    }
+
+    unsafe fn next_kv(
+        &mut self,
+    ) -> Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV>> {
+        let edge = self.cur_leaf_edge.as_ref()?;
+        ptr::read(edge).next_kv().ok()
+    }
+
+    /// Implementation of a typical `DrainFilter::next` method, given the predicate.
+    pub(super) fn next<F>(&mut self, pred: &mut F) -> Option<(K, V)>
+    where
+        F: FnMut(&K, &mut V) -> bool,
+    {
+        while let Some(kv) = unsafe { self.next_kv() } {
+            let (k, v) = unsafe { ptr::read(&kv) }.into_kv_mut();
+            if pred(k, v) {
+                *self.length -= 1;
+                let (k, v, leaf_edge_location) = kv.remove_kv_tracking();
+                // `remove_kv_tracking` has either preserved or invalidated `self.cur_leaf_edge`
+                if let Some(node) = leaf_edge_location {
+                    match search::search_tree(node, &k) {
+                        search::SearchResult::Found(_) => unreachable!(),
+                        search::SearchResult::GoDown(leaf) => self.cur_leaf_edge = Some(leaf),
+                    }
+                };
+                return Some((k, v));
+            }
+            self.cur_leaf_edge = Some(kv.next_leaf_edge());
+        }
+        None
+    }
+
+    /// Implementation of a typical `DrainFilter::size_hint` method.
+    pub(super) fn size_hint(&self) -> (usize, Option<usize>) {
+        (0, Some(*self.length))
+    }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<K, V, F> FusedIterator for DrainFilter<'_, K, V, F>
+where
+    K: Ord,
+    F: FnMut(&K, &mut V) -> bool,
+{
+}
+
 #[stable(feature = "btree_range", since = "1.17.0")]
 impl<'a, K, V> Iterator for Range<'a, K, V> {
     type Item = (&'a K, &'a V);
@@ -2531,12 +2691,31 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> {
     fn remove_kv(self) -> (K, V) {
         *self.length -= 1;
 
-        let (small_leaf, old_key, old_val) = match self.handle.force() {
+        let (old_key, old_val, _) = self.handle.remove_kv_tracking();
+        (old_key, old_val)
+    }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV> {
+    /// Removes a key/value-pair from the map, and returns that pair, as well as
+    /// the whereabouts of the leaf edge corresponding to that former pair:
+    /// if None is returned, the leaf edge is still the left leaf edge of the KV handle;
+    /// if a node is returned, it heads the subtree where the leaf edge may be found.
+    fn remove_kv_tracking(
+        self,
+    ) -> (K, V, Option<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>>) {
+        let mut levels_down_handled: isize;
+        let (small_leaf, old_key, old_val) = match self.force() {
             Leaf(leaf) => {
+                levels_down_handled = 1; // handled at same level, but affects only the right side
                 let (hole, old_key, old_val) = leaf.remove();
                 (hole.into_node(), old_key, old_val)
             }
             Internal(mut internal) => {
+                // Replace the location freed in the internal node with the next KV,
+                // and remove that next KV from its leaf.
+                levels_down_handled = unsafe { ptr::read(&internal).into_node().height() } as isize;
+
                 let key_loc = internal.kv_mut().0 as *mut K;
                 let val_loc = internal.kv_mut().1 as *mut V;
 
@@ -2556,27 +2735,39 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> {
         let mut cur_node = small_leaf.forget_type();
         while cur_node.len() < node::MIN_LEN {
             match handle_underfull_node(cur_node) {
-                AtRoot => break,
+                AtRoot(root) => {
+                    cur_node = root;
+                    break;
+                }
                 EmptyParent(_) => unreachable!(),
                 Merged(parent) => {
+                    levels_down_handled -= 1;
                     if parent.len() == 0 {
                         // We must be at the root
-                        parent.into_root_mut().pop_level();
+                        let root = parent.into_root_mut();
+                        root.pop_level();
+                        cur_node = root.as_mut();
                         break;
                     } else {
                         cur_node = parent.forget_type();
                     }
                 }
-                Stole(_) => break,
+                Stole(internal_node) => {
+                    levels_down_handled -= 1;
+                    cur_node = internal_node.forget_type();
+                    // This internal node might be underfull, but only if it's the root.
+                    break;
+                }
             }
         }
 
-        (old_key, old_val)
+        let leaf_edge_location = if levels_down_handled > 0 { None } else { Some(cur_node) };
+        (old_key, old_val, leaf_edge_location)
     }
 }
 
 enum UnderflowResult<'a, K, V> {
-    AtRoot,
+    AtRoot(NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>),
     EmptyParent(NodeRef<marker::Mut<'a>, K, V, marker::Internal>),
     Merged(NodeRef<marker::Mut<'a>, K, V, marker::Internal>),
     Stole(NodeRef<marker::Mut<'a>, K, V, marker::Internal>),
@@ -2585,10 +2776,9 @@ enum UnderflowResult<'a, K, V> {
 fn handle_underfull_node<K, V>(
     node: NodeRef<marker::Mut<'_>, K, V, marker::LeafOrInternal>,
 ) -> UnderflowResult<'_, K, V> {
-    let parent = if let Ok(parent) = node.ascend() {
-        parent
-    } else {
-        return AtRoot;
+    let parent = match node.ascend() {
+        Ok(parent) => parent,
+        Err(root) => return AtRoot(root),
     };
 
     let (is_left, mut handle) = match parent.left_kv() {
diff --git a/src/liballoc/collections/btree/node.rs b/src/liballoc/collections/btree/node.rs
index 6ebb98c42cd..11c14299573 100644
--- a/src/liballoc/collections/btree/node.rs
+++ b/src/liballoc/collections/btree/node.rs
@@ -1142,7 +1142,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
 
             (*left_node.as_leaf_mut()).len += right_len as u16 + 1;
 
-            if self.node.height > 1 {
+            let layout = if self.node.height > 1 {
                 ptr::copy_nonoverlapping(
                     right_node.cast_unchecked().as_internal().edges.as_ptr(),
                     left_node
@@ -1159,10 +1159,11 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
                         .correct_parent_link();
                 }
 
-                Global.dealloc(right_node.node.cast(), Layout::new::<InternalNode<K, V>>());
+                Layout::new::<InternalNode<K, V>>()
             } else {
-                Global.dealloc(right_node.node.cast(), Layout::new::<LeafNode<K, V>>());
-            }
+                Layout::new::<LeafNode<K, V>>()
+            };
+            Global.dealloc(right_node.node.cast(), layout);
 
             Handle::new_edge(self.node, self.idx)
         }
diff --git a/src/liballoc/collections/btree/set.rs b/src/liballoc/collections/btree/set.rs
index b100ce754ca..0b02223def4 100644
--- a/src/liballoc/collections/btree/set.rs
+++ b/src/liballoc/collections/btree/set.rs
@@ -8,8 +8,8 @@ use core::fmt::{self, Debug};
 use core::iter::{FromIterator, FusedIterator, Peekable};
 use core::ops::{BitAnd, BitOr, BitXor, RangeBounds, Sub};
 
+use super::map::{BTreeMap, Keys};
 use super::Recover;
-use crate::collections::btree_map::{self, BTreeMap, Keys};
 
 // FIXME(conventions): implement bounded iterators
 
@@ -102,7 +102,7 @@ impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 #[derive(Debug)]
 pub struct IntoIter<T> {
-    iter: btree_map::IntoIter<T, ()>,
+    iter: super::map::IntoIter<T, ()>,
 }
 
 /// An iterator over a sub-range of items in a `BTreeSet`.
@@ -115,7 +115,7 @@ pub struct IntoIter<T> {
 #[derive(Debug)]
 #[stable(feature = "btree_range", since = "1.17.0")]
 pub struct Range<'a, T: 'a> {
-    iter: btree_map::Range<'a, T, ()>,
+    iter: super::map::Range<'a, T, ()>,
 }
 
 /// Core of SymmetricDifference and Union.
@@ -944,6 +944,41 @@ impl<T: Ord> BTreeSet<T> {
     {
         BTreeSet { map: self.map.split_off(key) }
     }
+
+    /// Creates an iterator which uses a closure to determine if a value should be removed.
+    ///
+    /// If the closure returns true, then the value is removed and yielded.
+    /// If the closure returns false, the value will remain in the list and will not be yielded
+    /// by the iterator.
+    ///
+    /// If the iterator is only partially consumed or not consumed at all, each of the remaining
+    /// values will still be subjected to the closure and removed and dropped if it returns true.
+    ///
+    /// It is unspecified how many more values will be subjected to the closure
+    /// if a panic occurs in the closure, or if a panic occurs while dropping a value, or if the
+    /// `DrainFilter` itself is leaked.
+    ///
+    /// # Examples
+    ///
+    /// Splitting a set into even and odd values, reusing the original set:
+    ///
+    /// ```
+    /// #![feature(btree_drain_filter)]
+    /// use std::collections::BTreeSet;
+    ///
+    /// let mut set: BTreeSet<i32> = (0..8).collect();
+    /// let evens: BTreeSet<_> = set.drain_filter(|v| v % 2 == 0).collect();
+    /// let odds = set;
+    /// assert_eq!(evens.into_iter().collect::<Vec<_>>(), vec![0, 2, 4, 6]);
+    /// assert_eq!(odds.into_iter().collect::<Vec<_>>(), vec![1, 3, 5, 7]);
+    /// ```
+    #[unstable(feature = "btree_drain_filter", issue = "70530")]
+    pub fn drain_filter<'a, F>(&'a mut self, pred: F) -> DrainFilter<'a, T, F>
+    where
+        F: 'a + FnMut(&T) -> bool,
+    {
+        DrainFilter { pred, inner: self.map.drain_filter_inner() }
+    }
 }
 
 impl<T> BTreeSet<T> {
@@ -1055,6 +1090,66 @@ impl<'a, T> IntoIterator for &'a BTreeSet<T> {
     }
 }
 
+/// An iterator produced by calling `drain_filter` on BTreeSet.
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+pub struct DrainFilter<'a, T, F>
+where
+    T: 'a + Ord,
+    F: 'a + FnMut(&T) -> bool,
+{
+    pred: F,
+    inner: super::map::DrainFilterInner<'a, T, ()>,
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, T, F> Drop for DrainFilter<'a, T, F>
+where
+    T: 'a + Ord,
+    F: 'a + FnMut(&T) -> bool,
+{
+    fn drop(&mut self) {
+        self.for_each(drop);
+    }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, T, F> fmt::Debug for DrainFilter<'a, T, F>
+where
+    T: 'a + Ord + fmt::Debug,
+    F: 'a + FnMut(&T) -> bool,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_tuple("DrainFilter").field(&self.inner.peek().map(|(k, _)| k)).finish()
+    }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, 'f, T, F> Iterator for DrainFilter<'a, T, F>
+where
+    T: 'a + Ord,
+    F: 'a + 'f + FnMut(&T) -> bool,
+{
+    type Item = T;
+
+    fn next(&mut self) -> Option<T> {
+        let pred = &mut self.pred;
+        let mut mapped_pred = |k: &T, _v: &mut ()| pred(k);
+        self.inner.next(&mut mapped_pred).map(|(k, _)| k)
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.inner.size_hint()
+    }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, T, F> FusedIterator for DrainFilter<'a, T, F>
+where
+    T: 'a + Ord,
+    F: 'a + FnMut(&T) -> bool,
+{
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: Ord> Extend<T> for BTreeSet<T> {
     #[inline]
diff --git a/src/liballoc/collections/vec_deque.rs b/src/liballoc/collections/vec_deque.rs
index 69284fbf1b3..94532521a90 100644
--- a/src/liballoc/collections/vec_deque.rs
+++ b/src/liballoc/collections/vec_deque.rs
@@ -959,6 +959,9 @@ impl<T> VecDeque<T> {
     /// Returns a pair of slices which contain, in order, the contents of the
     /// `VecDeque`.
     ///
+    /// If [`make_contiguous`](#method.make_contiguous) was previously called, all elements
+    /// of the `VecDeque` will be in the first slice and the second slice will be empty.
+    ///
     /// # Examples
     ///
     /// ```
@@ -989,6 +992,9 @@ impl<T> VecDeque<T> {
     /// Returns a pair of slices which contain, in order, the contents of the
     /// `VecDeque`.
     ///
+    /// If [`make_contiguous`](#method.make_contiguous) was previously called, all elements
+    /// of the `VecDeque` will be in the first slice and the second slice will be empty.
+    ///
     /// # Examples
     ///
     /// ```
@@ -2044,6 +2050,148 @@ impl<T> VecDeque<T> {
         }
     }
 
+    /// Rearranges the internal storage of this deque so it is one contiguous slice, which is then returned.
+    ///
+    /// This method does not allocate and does not change the order of the inserted elements.
+    /// As it returns a mutable slice, this can be used to sort or binary search a deque.
+    ///
+    /// Once the internal storage is contiguous, the [`as_slices`](#method.as_slices) and
+    /// [`as_mut_slices`](#method.as_mut_slices) methods will return the entire contents of the
+    /// `VecDeque` in a single slice.
+    ///
+    /// # Examples
+    ///
+    /// Sorting the content of a deque.
+    ///
+    /// ```
+    /// #![feature(deque_make_contiguous)]
+    ///
+    /// use std::collections::VecDeque;
+    ///
+    /// let mut buf = VecDeque::with_capacity(15);
+    ///
+    /// buf.push_back(2);
+    /// buf.push_back(1);
+    /// buf.push_front(3);
+    ///
+    /// // sorting the deque
+    /// buf.make_contiguous().sort();
+    /// assert_eq!(buf.as_slices(), (&[1, 2, 3] as &[_], &[] as &[_]));
+    ///
+    /// // sorting it in reverse order
+    /// buf.make_contiguous().sort_by(|a, b| b.cmp(a));
+    /// assert_eq!(buf.as_slices(), (&[3, 2, 1] as &[_], &[] as &[_]));
+    /// ```
+    ///
+    /// Getting immutable access to the contiguous slice.
+    ///
+    /// ```rust
+    /// #![feature(deque_make_contiguous)]
+    ///
+    /// use std::collections::VecDeque;
+    ///
+    /// let mut buf = VecDeque::new();
+    ///
+    /// buf.push_back(2);
+    /// buf.push_back(1);
+    /// buf.push_front(3);
+    ///
+    /// buf.make_contiguous();
+    /// if let (slice, &[]) = buf.as_slices() {
+    ///     // we can now be sure that `slice` contains all elements of the deque,
+    ///     // while still having immutable access to `buf`.
+    ///     assert_eq!(buf.len(), slice.len());
+    ///     assert_eq!(slice, &[3, 2, 1] as &[_]);
+    /// }
+    /// ```
+    #[unstable(feature = "deque_make_contiguous", issue = "none")]
+    pub fn make_contiguous(&mut self) -> &mut [T] {
+        if self.is_contiguous() {
+            let tail = self.tail;
+            let head = self.head;
+            return unsafe { &mut self.buffer_as_mut_slice()[tail..head] };
+        }
+
+        let buf = self.buf.ptr();
+        let cap = self.cap();
+        let len = self.len();
+
+        let free = self.tail - self.head;
+        let tail_len = cap - self.tail;
+
+        if free >= tail_len {
+            // there is enough free space to copy the tail in one go,
+            // this means that we first shift the head backwards, and then
+            // copy the tail to the correct position.
+            //
+            // from: DEFGH....ABC
+            // to:   ABCDEFGH....
+            unsafe {
+                ptr::copy(buf, buf.add(tail_len), self.head);
+                // ...DEFGH.ABC
+                ptr::copy_nonoverlapping(buf.add(self.tail), buf, tail_len);
+                // ABCDEFGH....
+
+                self.tail = 0;
+                self.head = len;
+            }
+        } else if free >= self.head {
+            // there is enough free space to copy the head in one go,
+            // this means that we first shift the tail forwards, and then
+            // copy the head to the correct position.
+            //
+            // from: FGH....ABCDE
+            // to:   ...ABCDEFGH.
+            unsafe {
+                ptr::copy(buf.add(self.tail), buf.add(self.head), tail_len);
+                // FGHABCDE....
+                ptr::copy_nonoverlapping(buf, buf.add(self.head + tail_len), self.head);
+                // ...ABCDEFGH.
+
+                self.tail = self.head;
+                self.head = self.tail + len;
+            }
+        } else {
+            // free is smaller than both head and tail,
+            // this means we have to slowly "swap" the tail and the head.
+            //
+            // from: EFGHI...ABCD or HIJK.ABCDEFG
+            // to:   ABCDEFGHI... or ABCDEFGHIJK.
+            let mut left_edge: usize = 0;
+            let mut right_edge: usize = self.tail;
+            unsafe {
+                // The general problem looks like this
+                // GHIJKLM...ABCDEF - before any swaps
+                // ABCDEFM...GHIJKL - after 1 pass of swaps
+                // ABCDEFGHIJM...KL - swap until the left edge reaches the temp store
+                //                  - then restart the algorithm with a new (smaller) store
+                // Sometimes the temp store is reached when the right edge is at the end
+                // of the buffer - this means we've hit the right order with fewer swaps!
+                // E.g
+                // EF..ABCD
+                // ABCDEF.. - after four only swaps we've finished
+                while left_edge < len && right_edge != cap {
+                    let mut right_offset = 0;
+                    for i in left_edge..right_edge {
+                        right_offset = (i - left_edge) % (cap - right_edge);
+                        let src: isize = (right_edge + right_offset) as isize;
+                        ptr::swap(buf.add(i), buf.offset(src));
+                    }
+                    let n_ops = right_edge - left_edge;
+                    left_edge += n_ops;
+                    right_edge += right_offset + 1;
+                }
+
+                self.tail = 0;
+                self.head = len;
+            }
+        }
+
+        let tail = self.tail;
+        let head = self.head;
+        unsafe { &mut self.buffer_as_mut_slice()[tail..head] }
+    }
+
     /// Rotates the double-ended queue `mid` places to the left.
     ///
     /// Equivalently,
@@ -2803,63 +2951,16 @@ impl<T> From<VecDeque<T>> for Vec<T> {
     /// assert_eq!(vec, [8, 9, 1, 2, 3, 4]);
     /// assert_eq!(vec.as_ptr(), ptr);
     /// ```
-    fn from(other: VecDeque<T>) -> Self {
+    fn from(mut other: VecDeque<T>) -> Self {
+        other.make_contiguous();
+
         unsafe {
             let buf = other.buf.ptr();
             let len = other.len();
-            let tail = other.tail;
-            let head = other.head;
             let cap = other.cap();
 
-            // Need to move the ring to the front of the buffer, as vec will expect this.
-            if other.is_contiguous() {
-                ptr::copy(buf.add(tail), buf, len);
-            } else {
-                if (tail - head) >= cmp::min(cap - tail, head) {
-                    // There is enough free space in the centre for the shortest block so we can
-                    // do this in at most three copy moves.
-                    if (cap - tail) > head {
-                        // right hand block is the long one; move that enough for the left
-                        ptr::copy(buf.add(tail), buf.add(tail - head), cap - tail);
-                        // copy left in the end
-                        ptr::copy(buf, buf.add(cap - head), head);
-                        // shift the new thing to the start
-                        ptr::copy(buf.add(tail - head), buf, len);
-                    } else {
-                        // left hand block is the long one, we can do it in two!
-                        ptr::copy(buf, buf.add(cap - tail), head);
-                        ptr::copy(buf.add(tail), buf, cap - tail);
-                    }
-                } else {
-                    // Need to use N swaps to move the ring
-                    // We can use the space at the end of the ring as a temp store
-
-                    let mut left_edge: usize = 0;
-                    let mut right_edge: usize = tail;
-
-                    // The general problem looks like this
-                    // GHIJKLM...ABCDEF - before any swaps
-                    // ABCDEFM...GHIJKL - after 1 pass of swaps
-                    // ABCDEFGHIJM...KL - swap until the left edge reaches the temp store
-                    //                  - then restart the algorithm with a new (smaller) store
-                    // Sometimes the temp store is reached when the right edge is at the end
-                    // of the buffer - this means we've hit the right order with fewer swaps!
-                    // E.g
-                    // EF..ABCD
-                    // ABCDEF.. - after four only swaps we've finished
-
-                    while left_edge < len && right_edge != cap {
-                        let mut right_offset = 0;
-                        for i in left_edge..right_edge {
-                            right_offset = (i - left_edge) % (cap - right_edge);
-                            let src: isize = (right_edge + right_offset) as isize;
-                            ptr::swap(buf.add(i), buf.offset(src));
-                        }
-                        let n_ops = right_edge - left_edge;
-                        left_edge += n_ops;
-                        right_edge += right_offset + 1;
-                    }
-                }
+            if other.head != 0 {
+                ptr::copy(buf.add(other.tail), buf, len);
             }
             let out = Vec::from_raw_parts(buf, len, cap);
             mem::forget(other);
diff --git a/src/liballoc/collections/vec_deque/tests.rs b/src/liballoc/collections/vec_deque/tests.rs
index f2ce5b1d15d..8ef5ec78e05 100644
--- a/src/liballoc/collections/vec_deque/tests.rs
+++ b/src/liballoc/collections/vec_deque/tests.rs
@@ -1,6 +1,6 @@
 use super::*;
 
-use ::test;
+use test;
 
 #[bench]
 #[cfg_attr(miri, ignore)] // Miri does not support benchmarks
@@ -131,6 +131,87 @@ fn test_insert() {
 }
 
 #[test]
+fn make_contiguous_big_tail() {
+    let mut tester = VecDeque::with_capacity(15);
+
+    for i in 0..3 {
+        tester.push_back(i);
+    }
+
+    for i in 3..10 {
+        tester.push_front(i);
+    }
+
+    // 012......9876543
+    assert_eq!(tester.capacity(), 15);
+    assert_eq!((&[9, 8, 7, 6, 5, 4, 3] as &[_], &[0, 1, 2] as &[_]), tester.as_slices());
+
+    let expected_start = tester.head;
+    tester.make_contiguous();
+    assert_eq!(tester.tail, expected_start);
+    assert_eq!((&[9, 8, 7, 6, 5, 4, 3, 0, 1, 2] as &[_], &[] as &[_]), tester.as_slices());
+}
+
+#[test]
+fn make_contiguous_big_head() {
+    let mut tester = VecDeque::with_capacity(15);
+
+    for i in 0..8 {
+        tester.push_back(i);
+    }
+
+    for i in 8..10 {
+        tester.push_front(i);
+    }
+
+    // 01234567......98
+    let expected_start = 0;
+    tester.make_contiguous();
+    assert_eq!(tester.tail, expected_start);
+    assert_eq!((&[9, 8, 0, 1, 2, 3, 4, 5, 6, 7] as &[_], &[] as &[_]), tester.as_slices());
+}
+
+#[test]
+fn make_contiguous_small_free() {
+    let mut tester = VecDeque::with_capacity(15);
+
+    for i in 'A' as u8..'I' as u8 {
+        tester.push_back(i as char);
+    }
+
+    for i in 'I' as u8..'N' as u8 {
+        tester.push_front(i as char);
+    }
+
+    // ABCDEFGH...MLKJI
+    let expected_start = 0;
+    tester.make_contiguous();
+    assert_eq!(tester.tail, expected_start);
+    assert_eq!(
+        (&['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] as &[_], &[] as &[_]),
+        tester.as_slices()
+    );
+
+    tester.clear();
+    for i in 'I' as u8..'N' as u8 {
+        tester.push_back(i as char);
+    }
+
+    for i in 'A' as u8..'I' as u8 {
+        tester.push_front(i as char);
+    }
+
+    // IJKLM...HGFEDCBA
+    let expected_start = 0;
+    tester.make_contiguous();
+    assert_eq!(tester.tail, expected_start);
+    assert_eq!(
+        (&['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'I', 'J', 'K', 'L', 'M'] as &[_], &[] as &[_]),
+        tester.as_slices()
+    );
+}
+
+#[test]
 fn test_remove() {
     // This test checks that every single combination of tail position, length, and
     // removal position is tested. Capacity 15 should be large enough to cover every case.
diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs
index 5857b79d5ee..121c1cde548 100644
--- a/src/liballoc/lib.rs
+++ b/src/liballoc/lib.rs
@@ -100,6 +100,7 @@
 #![feature(lang_items)]
 #![feature(libc)]
 #![cfg_attr(not(bootstrap), feature(negative_impls))]
+#![feature(new_uninit)]
 #![feature(nll)]
 #![feature(optin_builtin_traits)]
 #![feature(pattern)]
diff --git a/src/liballoc/macros.rs b/src/liballoc/macros.rs
index 422d3486f92..4bc0c3a079d 100644
--- a/src/liballoc/macros.rs
+++ b/src/liballoc/macros.rs
@@ -36,6 +36,9 @@
 #[stable(feature = "rust1", since = "1.0.0")]
 #[allow_internal_unstable(box_syntax)]
 macro_rules! vec {
+    () => (
+        $crate::vec::Vec::new()
+    );
     ($elem:expr; $n:expr) => (
         $crate::vec::from_elem($elem, $n)
     );
@@ -51,6 +54,9 @@ macro_rules! vec {
 // NB see the slice::hack module in slice.rs for more information
 #[cfg(test)]
 macro_rules! vec {
+    () => (
+        $crate::vec::Vec::new()
+    );
     ($elem:expr; $n:expr) => (
         $crate::vec::from_elem($elem, $n)
     );
diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs
index b31fec7f037..2bf40490e78 100644
--- a/src/liballoc/raw_vec.rs
+++ b/src/liballoc/raw_vec.rs
@@ -1,13 +1,19 @@
 #![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "none")]
 #![doc(hidden)]
 
+use core::alloc::MemoryBlock;
 use core::cmp;
-use core::mem;
+use core::mem::{self, MaybeUninit};
 use core::ops::Drop;
-use core::ptr::{self, NonNull, Unique};
+use core::ptr::{NonNull, Unique};
 use core::slice;
 
-use crate::alloc::{handle_alloc_error, AllocErr, AllocRef, Global, Layout};
+use crate::alloc::{
+    handle_alloc_error, AllocErr,
+    AllocInit::{self, *},
+    AllocRef, Global, Layout,
+    ReallocPlacement::{self, *},
+};
 use crate::boxed::Box;
 use crate::collections::TryReserveError::{self, *};
 
@@ -21,81 +27,26 @@ mod tests;
 ///
 /// * Produces `Unique::empty()` on zero-sized types.
 /// * Produces `Unique::empty()` on zero-length allocations.
+/// * Avoids freeing `Unique::empty()`.
 /// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
 /// * Guards against 32-bit systems allocating more than isize::MAX bytes.
 /// * Guards against overflowing your length.
-/// * Aborts on OOM or calls `handle_alloc_error` as applicable.
-/// * Avoids freeing `Unique::empty()`.
+/// * Calls `handle_alloc_error` for fallible allocations.
 /// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
+/// * Uses the excess returned from the allocator to use the largest available capacity.
 ///
 /// This type does not in anyway inspect the memory that it manages. When dropped it *will*
 /// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
 /// to handle the actual things *stored* inside of a `RawVec`.
 ///
-/// Note that a `RawVec` always forces its capacity to be `usize::MAX` for zero-sized types.
-/// This enables you to use capacity-growing logic catch the overflows in your length
-/// that might occur with zero-sized types.
-///
-/// The above means that you need to be careful when round-tripping this type with a
-/// `Box<[T]>`, since `capacity()` won't yield the length. However, `with_capacity`,
-/// `shrink_to_fit`, and `from_box` will actually set `RawVec`'s private capacity
-/// field. This allows zero-sized types to not be special-cased by consumers of
-/// this type.
+/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
+/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
+/// `Box<[T]>`, since `capacity()` won't yield the length.
 #[allow(missing_debug_implementations)]
 pub struct RawVec<T, A: AllocRef = Global> {
     ptr: Unique<T>,
     cap: usize,
-    a: A,
-}
-
-impl<T, A: AllocRef> RawVec<T, A> {
-    /// Like `new`, but parameterized over the choice of allocator for
-    /// the returned `RawVec`.
-    pub const fn new_in(a: A) -> Self {
-        let cap = if mem::size_of::<T>() == 0 { core::usize::MAX } else { 0 };
-
-        // `Unique::empty()` doubles as "unallocated" and "zero-sized allocation".
-        RawVec { ptr: Unique::empty(), cap, a }
-    }
-
-    /// Like `with_capacity`, but parameterized over the choice of
-    /// allocator for the returned `RawVec`.
-    #[inline]
-    pub fn with_capacity_in(capacity: usize, a: A) -> Self {
-        RawVec::allocate_in(capacity, false, a)
-    }
-
-    /// Like `with_capacity_zeroed`, but parameterized over the choice
-    /// of allocator for the returned `RawVec`.
-    #[inline]
-    pub fn with_capacity_zeroed_in(capacity: usize, a: A) -> Self {
-        RawVec::allocate_in(capacity, true, a)
-    }
-
-    fn allocate_in(mut capacity: usize, zeroed: bool, mut a: A) -> Self {
-        let elem_size = mem::size_of::<T>();
-
-        let alloc_size = capacity.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow());
-        alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow());
-
-        // Handles ZSTs and `capacity == 0` alike.
-        let ptr = if alloc_size == 0 {
-            NonNull::<T>::dangling()
-        } else {
-            let align = mem::align_of::<T>();
-            let layout = Layout::from_size_align(alloc_size, align).unwrap();
-            let result = if zeroed { a.alloc_zeroed(layout) } else { a.alloc(layout) };
-            match result {
-                Ok((ptr, size)) => {
-                    capacity = size / elem_size;
-                    ptr.cast()
-                }
-                Err(_) => handle_alloc_error(layout),
-            }
-        };
-
-        RawVec { ptr: ptr.into(), cap: capacity, a }
-    }
+    alloc: A,
 }
 
 impl<T> RawVec<T, Global> {
@@ -138,39 +89,26 @@ impl<T> RawVec<T, Global> {
     /// Aborts on OOM.
     #[inline]
     pub fn with_capacity(capacity: usize) -> Self {
-        RawVec::allocate_in(capacity, false, Global)
+        Self::with_capacity_in(capacity, Global)
     }
 
     /// Like `with_capacity`, but guarantees the buffer is zeroed.
     #[inline]
     pub fn with_capacity_zeroed(capacity: usize) -> Self {
-        RawVec::allocate_in(capacity, true, Global)
+        Self::with_capacity_zeroed_in(capacity, Global)
     }
-}
 
-impl<T, A: AllocRef> RawVec<T, A> {
-    /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
-    ///
-    /// # Undefined Behavior
-    ///
-    /// The `ptr` must be allocated (via the given allocator `a`), and with the given `capacity`.
-    /// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems).
-    /// If the `ptr` and `capacity` come from a `RawVec` created via `a`, then this is guaranteed.
-    pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self {
-        RawVec { ptr: Unique::new_unchecked(ptr), cap: capacity, a }
-    }
-}
-
-impl<T> RawVec<T, Global> {
     /// Reconstitutes a `RawVec` from a pointer and capacity.
     ///
-    /// # Undefined Behavior
+    /// # Safety
     ///
     /// The `ptr` must be allocated (on the system heap), and with the given `capacity`.
-    /// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems).
+    /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
+    /// systems). ZST vectors may have a capacity up to `usize::MAX`.
     /// If the `ptr` and `capacity` come from a `RawVec`, then this is guaranteed.
+    #[inline]
     pub unsafe fn from_raw_parts(ptr: *mut T, capacity: usize) -> Self {
-        RawVec { ptr: Unique::new_unchecked(ptr), cap: capacity, a: Global }
+        Self::from_raw_parts_in(ptr, capacity, Global)
     }
 
     /// Converts a `Box<[T]>` into a `RawVec<T>`.
@@ -184,6 +122,56 @@ impl<T> RawVec<T, Global> {
 }
 
 impl<T, A: AllocRef> RawVec<T, A> {
+    /// Like `new`, but parameterized over the choice of allocator for
+    /// the returned `RawVec`.
+    pub const fn new_in(alloc: A) -> Self {
+        // `cap: 0` means "unallocated". zero-sized types are ignored.
+        Self { ptr: Unique::empty(), cap: 0, alloc }
+    }
+
+    /// Like `with_capacity`, but parameterized over the choice of
+    /// allocator for the returned `RawVec`.
+    #[inline]
+    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
+        Self::allocate_in(capacity, Uninitialized, alloc)
+    }
+
+    /// Like `with_capacity_zeroed`, but parameterized over the choice
+    /// of allocator for the returned `RawVec`.
+    #[inline]
+    pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
+        Self::allocate_in(capacity, Zeroed, alloc)
+    }
+
+    fn allocate_in(capacity: usize, init: AllocInit, mut alloc: A) -> Self {
+        if mem::size_of::<T>() == 0 {
+            Self::new_in(alloc)
+        } else {
+            let layout = Layout::array::<T>(capacity).unwrap_or_else(|_| capacity_overflow());
+            alloc_guard(layout.size()).unwrap_or_else(|_| capacity_overflow());
+
+            let memory = alloc.alloc(layout, init).unwrap_or_else(|_| handle_alloc_error(layout));
+            Self {
+                ptr: memory.ptr.cast().into(),
+                cap: Self::capacity_from_bytes(memory.size),
+                alloc,
+            }
+        }
+    }
+
+    /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
+    ///
+    /// # Safety
+    ///
+    /// The `ptr` must be allocated (via the given allocator `a`), and with the given `capacity`.
+    /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
+    /// systems). ZST vectors may have a capacity up to `usize::MAX`.
+    /// If the `ptr` and `capacity` come from a `RawVec` created via `a`, then this is guaranteed.
+    #[inline]
+    pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self {
+        Self { ptr: Unique::new_unchecked(ptr), cap: capacity, alloc: a }
+    }
+
     /// Gets a raw pointer to the start of the allocation. Note that this is
     /// `Unique::empty()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
     /// be careful.
@@ -196,21 +184,21 @@ impl<T, A: AllocRef> RawVec<T, A> {
     /// This will always be `usize::MAX` if `T` is zero-sized.
     #[inline(always)]
     pub fn capacity(&self) -> usize {
-        if mem::size_of::<T>() == 0 { !0 } else { self.cap }
+        if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
     }
 
     /// Returns a shared reference to the allocator backing this `RawVec`.
     pub fn alloc(&self) -> &A {
-        &self.a
+        &self.alloc
     }
 
     /// Returns a mutable reference to the allocator backing this `RawVec`.
     pub fn alloc_mut(&mut self) -> &mut A {
-        &mut self.a
+        &mut self.alloc
     }
 
-    fn current_layout(&self) -> Option<Layout> {
-        if self.cap == 0 {
+    fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
+        if mem::size_of::<T>() == 0 || self.cap == 0 {
             None
         } else {
             // We have an allocated chunk of memory, so we can bypass runtime
@@ -218,7 +206,8 @@ impl<T, A: AllocRef> RawVec<T, A> {
             unsafe {
                 let align = mem::align_of::<T>();
                 let size = mem::size_of::<T>() * self.cap;
-                Some(Layout::from_size_align_unchecked(size, align))
+                let layout = Layout::from_size_align_unchecked(size, align);
+                Some((self.ptr.cast().into(), layout))
             }
         }
     }
@@ -274,50 +263,10 @@ impl<T, A: AllocRef> RawVec<T, A> {
     #[inline(never)]
     #[cold]
     pub fn double(&mut self) {
-        unsafe {
-            let elem_size = mem::size_of::<T>();
-
-            // Since we set the capacity to `usize::MAX` when `elem_size` is
-            // 0, getting to here necessarily means the `RawVec` is overfull.
-            assert!(elem_size != 0, "capacity overflow");
-
-            let (ptr, new_cap) = match self.current_layout() {
-                Some(cur) => {
-                    // Since we guarantee that we never allocate more than
-                    // `isize::MAX` bytes, `elem_size * self.cap <= isize::MAX` as
-                    // a precondition, so this can't overflow. Additionally the
-                    // alignment will never be too large as to "not be
-                    // satisfiable", so `Layout::from_size_align` will always
-                    // return `Some`.
-                    //
-                    // TL;DR, we bypass runtime checks due to dynamic assertions
-                    // in this module, allowing us to use
-                    // `from_size_align_unchecked`.
-                    let new_cap = 2 * self.cap;
-                    let new_size = new_cap * elem_size;
-                    alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
-                    let ptr_res = self.a.realloc(NonNull::from(self.ptr).cast(), cur, new_size);
-                    match ptr_res {
-                        Ok((ptr, new_size)) => (ptr, new_size / elem_size),
-                        Err(_) => handle_alloc_error(Layout::from_size_align_unchecked(
-                            new_size,
-                            cur.align(),
-                        )),
-                    }
-                }
-                None => {
-                    // Skip to 4 because tiny `Vec`'s are dumb; but not if that
-                    // would cause overflow.
-                    let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
-                    let layout = Layout::array::<T>(new_cap).unwrap();
-                    match self.a.alloc(layout) {
-                        Ok((ptr, new_size)) => (ptr, new_size / elem_size),
-                        Err(_) => handle_alloc_error(layout),
-                    }
-                }
-            };
-            self.ptr = ptr.cast().into();
-            self.cap = new_cap;
+        match self.grow(Double, MayMove, Uninitialized) {
+            Err(CapacityOverflow) => capacity_overflow(),
+            Err(AllocError { layout, .. }) => handle_alloc_error(layout),
+            Ok(()) => { /* yay */ }
         }
     }
 
@@ -336,99 +285,7 @@ impl<T, A: AllocRef> RawVec<T, A> {
     #[inline(never)]
     #[cold]
     pub fn double_in_place(&mut self) -> bool {
-        unsafe {
-            let elem_size = mem::size_of::<T>();
-            let old_layout = match self.current_layout() {
-                Some(layout) => layout,
-                None => return false, // nothing to double
-            };
-
-            // Since we set the capacity to `usize::MAX` when `elem_size` is
-            // 0, getting to here necessarily means the `RawVec` is overfull.
-            assert!(elem_size != 0, "capacity overflow");
-
-            // Since we guarantee that we never allocate more than `isize::MAX`
-            // bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so
-            // this can't overflow.
-            //
-            // Similarly to with `double` above, we can go straight to
-            // `Layout::from_size_align_unchecked` as we know this won't
-            // overflow and the alignment is sufficiently small.
-            let new_cap = 2 * self.cap;
-            let new_size = new_cap * elem_size;
-            alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
-            match self.a.grow_in_place(NonNull::from(self.ptr).cast(), old_layout, new_size) {
-                Ok(_) => {
-                    // We can't directly divide `size`.
-                    self.cap = new_cap;
-                    true
-                }
-                Err(_) => false,
-            }
-        }
-    }
-
-    /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
-    pub fn try_reserve_exact(
-        &mut self,
-        used_capacity: usize,
-        needed_extra_capacity: usize,
-    ) -> Result<(), TryReserveError> {
-        self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Exact)
-    }
-
-    /// Ensures that the buffer contains at least enough space to hold
-    /// `used_capacity + needed_extra_capacity` elements. If it doesn't already,
-    /// will reallocate the minimum possible amount of memory necessary.
-    /// Generally this will be exactly the amount of memory necessary,
-    /// but in principle the allocator is free to give back more than
-    /// we asked for.
-    ///
-    /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
-    /// the requested space. This is not really unsafe, but the unsafe
-    /// code *you* write that relies on the behavior of this function may break.
-    ///
-    /// # Panics
-    ///
-    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
-    /// * Panics on 32-bit platforms if the requested capacity exceeds
-    ///   `isize::MAX` bytes.
-    ///
-    /// # Aborts
-    ///
-    /// Aborts on OOM.
-    pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
-        match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Exact) {
-            Err(CapacityOverflow) => capacity_overflow(),
-            Err(AllocError { .. }) => unreachable!(),
-            Ok(()) => { /* yay */ }
-        }
-    }
-
-    /// Calculates the buffer's new size given that it'll hold `used_capacity +
-    /// needed_extra_capacity` elements. This logic is used in amortized reserve methods.
-    /// Returns `(new_capacity, new_alloc_size)`.
-    fn amortized_new_size(
-        &self,
-        used_capacity: usize,
-        needed_extra_capacity: usize,
-    ) -> Result<usize, TryReserveError> {
-        // Nothing we can really do about these checks, sadly.
-        let required_cap =
-            used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
-        // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
-        let double_cap = self.cap * 2;
-        // `double_cap` guarantees exponential growth.
-        Ok(cmp::max(double_cap, required_cap))
-    }
-
-    /// The same as `reserve`, but returns on errors instead of panicking or aborting.
-    pub fn try_reserve(
-        &mut self,
-        used_capacity: usize,
-        needed_extra_capacity: usize,
-    ) -> Result<(), TryReserveError> {
-        self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Amortized)
+        self.grow(Double, InPlace, Uninitialized).is_ok()
     }
 
     /// Ensures that the buffer contains at least enough space to hold
@@ -484,12 +341,26 @@ impl<T, A: AllocRef> RawVec<T, A> {
     /// # }
     /// ```
     pub fn reserve(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
-        match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Amortized) {
+        match self.try_reserve(used_capacity, needed_extra_capacity) {
             Err(CapacityOverflow) => capacity_overflow(),
-            Err(AllocError { .. }) => unreachable!(),
+            Err(AllocError { layout, .. }) => handle_alloc_error(layout),
             Ok(()) => { /* yay */ }
         }
     }
+
+    /// The same as `reserve`, but returns on errors instead of panicking or aborting.
+    pub fn try_reserve(
+        &mut self,
+        used_capacity: usize,
+        needed_extra_capacity: usize,
+    ) -> Result<(), TryReserveError> {
+        if self.needs_to_grow(used_capacity, needed_extra_capacity) {
+            self.grow(Amortized { used_capacity, needed_extra_capacity }, MayMove, Uninitialized)
+        } else {
+            Ok(())
+        }
+    }
+
     /// Attempts to ensure that the buffer contains at least enough space to hold
     /// `used_capacity + needed_extra_capacity` elements. If it doesn't already have
     /// enough capacity, will reallocate in place enough space plus comfortable slack
@@ -508,45 +379,54 @@ impl<T, A: AllocRef> RawVec<T, A> {
     /// * Panics on 32-bit platforms if the requested capacity exceeds
     ///   `isize::MAX` bytes.
     pub fn reserve_in_place(&mut self, used_capacity: usize, needed_extra_capacity: usize) -> bool {
-        unsafe {
-            // NOTE: we don't early branch on ZSTs here because we want this
-            // to actually catch "asking for more than usize::MAX" in that case.
-            // If we make it past the first branch then we are guaranteed to
-            // panic.
-
-            // Don't actually need any more capacity. If the current `cap` is 0, we can't
-            // reallocate in place.
-            // Wrapping in case they give a bad `used_capacity`
-            let old_layout = match self.current_layout() {
-                Some(layout) => layout,
-                None => return false,
-            };
-            if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity {
-                return false;
-            }
+        // This is more readable than putting this in one line:
+        // `!self.needs_to_grow(...) || self.grow(...).is_ok()`
+        if self.needs_to_grow(used_capacity, needed_extra_capacity) {
+            self.grow(Amortized { used_capacity, needed_extra_capacity }, InPlace, Uninitialized)
+                .is_ok()
+        } else {
+            true
+        }
+    }
 
-            let new_cap = self
-                .amortized_new_size(used_capacity, needed_extra_capacity)
-                .unwrap_or_else(|_| capacity_overflow());
-
-            // Here, `cap < used_capacity + needed_extra_capacity <= new_cap`
-            // (regardless of whether `self.cap - used_capacity` wrapped).
-            // Therefore, we can safely call `grow_in_place`.
-
-            let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
-            // FIXME: may crash and burn on over-reserve
-            alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow());
-            match self.a.grow_in_place(
-                NonNull::from(self.ptr).cast(),
-                old_layout,
-                new_layout.size(),
-            ) {
-                Ok(_) => {
-                    self.cap = new_cap;
-                    true
-                }
-                Err(_) => false,
-            }
+    /// Ensures that the buffer contains at least enough space to hold
+    /// `used_capacity + needed_extra_capacity` elements. If it doesn't already,
+    /// will reallocate the minimum possible amount of memory necessary.
+    /// Generally this will be exactly the amount of memory necessary,
+    /// but in principle the allocator is free to give back more than
+    /// we asked for.
+    ///
+    /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
+    /// the requested space. This is not really unsafe, but the unsafe
+    /// code *you* write that relies on the behavior of this function may break.
+    ///
+    /// # Panics
+    ///
+    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
+    /// * Panics on 32-bit platforms if the requested capacity exceeds
+    ///   `isize::MAX` bytes.
+    ///
+    /// # Aborts
+    ///
+    /// Aborts on OOM.
+    pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
+        match self.try_reserve_exact(used_capacity, needed_extra_capacity) {
+            Err(CapacityOverflow) => capacity_overflow(),
+            Err(AllocError { layout, .. }) => handle_alloc_error(layout),
+            Ok(()) => { /* yay */ }
+        }
+    }
+
+    /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
+    pub fn try_reserve_exact(
+        &mut self,
+        used_capacity: usize,
+        needed_extra_capacity: usize,
+    ) -> Result<(), TryReserveError> {
+        if self.needs_to_grow(used_capacity, needed_extra_capacity) {
+            self.grow(Exact { used_capacity, needed_extra_capacity }, MayMove, Uninitialized)
+        } else {
+            Ok(())
         }
     }
 
@@ -561,166 +441,157 @@ impl<T, A: AllocRef> RawVec<T, A> {
     ///
     /// Aborts on OOM.
     pub fn shrink_to_fit(&mut self, amount: usize) {
-        let elem_size = mem::size_of::<T>();
-
-        // Set the `cap` because they might be about to promote to a `Box<[T]>`
-        if elem_size == 0 {
-            self.cap = amount;
-            return;
-        }
-
-        // This check is my waterloo; it's the only thing `Vec` wouldn't have to do.
-        assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
-
-        if amount == 0 {
-            // We want to create a new zero-length vector within the
-            // same allocator. We use `ptr::write` to avoid an
-            // erroneous attempt to drop the contents, and we use
-            // `ptr::read` to sidestep condition against destructuring
-            // types that implement Drop.
-
-            unsafe {
-                let a = ptr::read(&self.a as *const A);
-                self.dealloc_buffer();
-                ptr::write(self, RawVec::new_in(a));
-            }
-        } else if self.cap != amount {
-            unsafe {
-                // We know here that our `amount` is greater than zero. This
-                // implies, via the assert above, that capacity is also greater
-                // than zero, which means that we've got a current layout that
-                // "fits"
-                //
-                // We also know that `self.cap` is greater than `amount`, and
-                // consequently we don't need runtime checks for creating either
-                // layout.
-                let old_size = elem_size * self.cap;
-                let new_size = elem_size * amount;
-                let align = mem::align_of::<T>();
-                let old_layout = Layout::from_size_align_unchecked(old_size, align);
-                match self.a.realloc(NonNull::from(self.ptr).cast(), old_layout, new_size) {
-                    Ok((ptr, _)) => self.ptr = ptr.cast().into(),
-                    Err(_) => {
-                        handle_alloc_error(Layout::from_size_align_unchecked(new_size, align))
-                    }
-                }
-            }
-            self.cap = amount;
+        match self.shrink(amount, MayMove) {
+            Err(CapacityOverflow) => capacity_overflow(),
+            Err(AllocError { layout, .. }) => handle_alloc_error(layout),
+            Ok(()) => { /* yay */ }
         }
     }
 }
 
-enum Fallibility {
-    Fallible,
-    Infallible,
+#[derive(Copy, Clone)]
+enum Strategy {
+    Double,
+    Amortized { used_capacity: usize, needed_extra_capacity: usize },
+    Exact { used_capacity: usize, needed_extra_capacity: usize },
 }
+use Strategy::*;
 
-use Fallibility::*;
+impl<T, A: AllocRef> RawVec<T, A> {
+    /// Returns if the buffer needs to grow to fulfill the needed extra capacity.
+    /// Mainly used to make inlining reserve-calls possible without inlining `grow`.
+    fn needs_to_grow(&self, used_capacity: usize, needed_extra_capacity: usize) -> bool {
+        needed_extra_capacity > self.capacity().wrapping_sub(used_capacity)
+    }
 
-enum ReserveStrategy {
-    Exact,
-    Amortized,
-}
+    fn capacity_from_bytes(excess: usize) -> usize {
+        debug_assert_ne!(mem::size_of::<T>(), 0);
+        excess / mem::size_of::<T>()
+    }
 
-use ReserveStrategy::*;
+    fn set_memory(&mut self, memory: MemoryBlock) {
+        self.ptr = memory.ptr.cast().into();
+        self.cap = Self::capacity_from_bytes(memory.size);
+    }
 
-impl<T, A: AllocRef> RawVec<T, A> {
-    fn reserve_internal(
+    /// Single method to handle all possibilities of growing the buffer.
+    fn grow(
         &mut self,
-        used_capacity: usize,
-        needed_extra_capacity: usize,
-        fallibility: Fallibility,
-        strategy: ReserveStrategy,
+        strategy: Strategy,
+        placement: ReallocPlacement,
+        init: AllocInit,
     ) -> Result<(), TryReserveError> {
         let elem_size = mem::size_of::<T>();
+        if elem_size == 0 {
+            // Since we return a capacity of `usize::MAX` when `elem_size` is
+            // 0, getting to here necessarily means the `RawVec` is overfull.
+            return Err(CapacityOverflow);
+        }
+        let new_layout = match strategy {
+            Double => unsafe {
+                // Since we guarantee that we never allocate more than `isize::MAX` bytes,
+                // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow.
+                // Additionally the alignment will never be too large as to "not be satisfiable",
+                // so `Layout::from_size_align` will always return `Some`.
+                //
+                // TL;DR, we bypass runtime checks due to dynamic assertions in this module,
+                // allowing us to use `from_size_align_unchecked`.
+                let cap = if self.cap == 0 {
+                    // Skip to 4 because tiny `Vec`'s are dumb; but not if that would cause overflow.
+                    if elem_size > usize::MAX / 8 { 1 } else { 4 }
+                } else {
+                    self.cap * 2
+                };
+                Layout::from_size_align_unchecked(cap * elem_size, mem::align_of::<T>())
+            },
+            Amortized { used_capacity, needed_extra_capacity } => {
+                // Nothing we can really do about these checks, sadly.
+                let required_cap =
+                    used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
+                // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
+                let double_cap = self.cap * 2;
+                // `double_cap` guarantees exponential growth.
+                let cap = cmp::max(double_cap, required_cap);
+                Layout::array::<T>(cap).map_err(|_| CapacityOverflow)?
+            }
+            Exact { used_capacity, needed_extra_capacity } => {
+                let cap =
+                    used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
+                Layout::array::<T>(cap).map_err(|_| CapacityOverflow)?
+            }
+        };
+        alloc_guard(new_layout.size())?;
 
-        unsafe {
-            // NOTE: we don't early branch on ZSTs here because we want this
-            // to actually catch "asking for more than usize::MAX" in that case.
-            // If we make it past the first branch then we are guaranteed to
-            // panic.
-
-            // Don't actually need any more capacity.
-            // Wrapping in case they gave a bad `used_capacity`.
-            if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity {
-                return Ok(());
+        let memory = if let Some((ptr, old_layout)) = self.current_memory() {
+            debug_assert_eq!(old_layout.align(), new_layout.align());
+            unsafe {
+                self.alloc
+                    .grow(ptr, old_layout, new_layout.size(), placement, init)
+                    .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
+            }
+        } else {
+            match placement {
+                MayMove => self.alloc.alloc(new_layout, init),
+                InPlace => Err(AllocErr),
             }
+            .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
+        };
+        self.set_memory(memory);
+        Ok(())
+    }
 
-            // Nothing we can really do about these checks, sadly.
-            let new_cap = match strategy {
-                Exact => {
-                    used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?
-                }
-                Amortized => self.amortized_new_size(used_capacity, needed_extra_capacity)?,
-            };
-            let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
+    fn shrink(
+        &mut self,
+        amount: usize,
+        placement: ReallocPlacement,
+    ) -> Result<(), TryReserveError> {
+        assert!(amount <= self.capacity(), "Tried to shrink to a larger capacity");
 
-            alloc_guard(new_layout.size())?;
+        let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
+        let new_size = amount * mem::size_of::<T>();
 
-            let res = match self.current_layout() {
-                Some(layout) => {
-                    debug_assert!(new_layout.align() == layout.align());
-                    self.a.realloc(NonNull::from(self.ptr).cast(), layout, new_layout.size())
-                }
-                None => self.a.alloc(new_layout),
-            };
-
-            let (ptr, new_cap) = match (res, fallibility) {
-                (Err(AllocErr), Infallible) => handle_alloc_error(new_layout),
-                (Err(AllocErr), Fallible) => {
-                    return Err(TryReserveError::AllocError {
-                        layout: new_layout,
-                        non_exhaustive: (),
-                    });
+        let memory = unsafe {
+            self.alloc.shrink(ptr, layout, new_size, placement).map_err(|_| {
+                TryReserveError::AllocError {
+                    layout: Layout::from_size_align_unchecked(new_size, layout.align()),
+                    non_exhaustive: (),
                 }
-                (Ok((ptr, new_size)), _) => (ptr, new_size / elem_size),
-            };
-
-            self.ptr = ptr.cast().into();
-            self.cap = new_cap;
-
-            Ok(())
-        }
+            })?
+        };
+        self.set_memory(memory);
+        Ok(())
     }
 }
 
 impl<T> RawVec<T, Global> {
-    /// Converts the entire buffer into `Box<[T]>`.
+    /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
     ///
     /// Note that this will correctly reconstitute any `cap` changes
     /// that may have been performed. (See description of type for details.)
     ///
-    /// # Undefined Behavior
+    /// # Safety
     ///
-    /// All elements of `RawVec<T, Global>` must be initialized. Notice that
-    /// the rules around uninitialized boxed values are not finalized yet,
-    /// but until they are, it is advisable to avoid them.
-    pub unsafe fn into_box(self) -> Box<[T]> {
+    /// `shrink_to_fit(len)` must be called immediately prior to calling this function. This
+    /// implies, that `len` must be smaller than or equal to `self.capacity()`.
+    pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>]> {
+        debug_assert!(
+            len <= self.capacity(),
+            "`len` must be smaller than or equal to `self.capacity()`"
+        );
+
         // NOTE: not calling `capacity()` here; actually using the real `cap` field!
-        let slice = slice::from_raw_parts_mut(self.ptr(), self.cap);
-        let output: Box<[T]> = Box::from_raw(slice);
+        let slice = slice::from_raw_parts_mut(self.ptr() as *mut MaybeUninit<T>, len);
+        let output = Box::from_raw(slice);
         mem::forget(self);
         output
     }
 }
 
-impl<T, A: AllocRef> RawVec<T, A> {
-    /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
-    pub unsafe fn dealloc_buffer(&mut self) {
-        let elem_size = mem::size_of::<T>();
-        if elem_size != 0 {
-            if let Some(layout) = self.current_layout() {
-                self.a.dealloc(NonNull::from(self.ptr).cast(), layout);
-            }
-        }
-    }
-}
-
 unsafe impl<#[may_dangle] T, A: AllocRef> Drop for RawVec<T, A> {
     /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
     fn drop(&mut self) {
-        unsafe {
-            self.dealloc_buffer();
+        if let Some((ptr, layout)) = self.current_memory() {
+            unsafe { self.alloc.dealloc(ptr, layout) }
         }
     }
 }
diff --git a/src/liballoc/raw_vec/tests.rs b/src/liballoc/raw_vec/tests.rs
index 21a8a76d0a7..e7ab8a305d2 100644
--- a/src/liballoc/raw_vec/tests.rs
+++ b/src/liballoc/raw_vec/tests.rs
@@ -12,6 +12,7 @@ fn allocator_param() {
     //
     // Instead, this just checks that the `RawVec` methods do at
     // least go through the Allocator API when it reserves
+
     // storage.
 
     // A dumb allocator that consumes a fixed amount of fuel
@@ -20,12 +21,12 @@ fn allocator_param() {
         fuel: usize,
     }
     unsafe impl AllocRef for BoundedAlloc {
-        fn alloc(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> {
+        fn alloc(&mut self, layout: Layout, init: AllocInit) -> Result<MemoryBlock, AllocErr> {
             let size = layout.size();
             if size > self.fuel {
                 return Err(AllocErr);
             }
-            match Global.alloc(layout) {
+            match Global.alloc(layout, init) {
                 ok @ Ok(_) => {
                     self.fuel -= size;
                     ok
@@ -40,9 +41,9 @@ fn allocator_param() {
 
     let a = BoundedAlloc { fuel: 500 };
     let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
-    assert_eq!(v.a.fuel, 450);
+    assert_eq!(v.alloc.fuel, 450);
     v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
-    assert_eq!(v.a.fuel, 250);
+    assert_eq!(v.alloc.fuel, 250);
 }
 
 #[test]
diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs
index e7f7608e676..6a78a7398a6 100644
--- a/src/liballoc/rc.rs
+++ b/src/liballoc/rc.rs
@@ -252,7 +252,7 @@ use core::ptr::{self, NonNull};
 use core::slice::{self, from_raw_parts_mut};
 use core::usize;
 
-use crate::alloc::{box_free, handle_alloc_error, AllocRef, Global, Layout};
+use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout};
 use crate::string::String;
 use crate::vec::Vec;
 
@@ -936,10 +936,12 @@ impl<T: ?Sized> Rc<T> {
         let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
 
         // Allocate for the layout.
-        let (mem, _) = Global.alloc(layout).unwrap_or_else(|_| handle_alloc_error(layout));
+        let mem = Global
+            .alloc(layout, AllocInit::Uninitialized)
+            .unwrap_or_else(|_| handle_alloc_error(layout));
 
         // Initialize the RcBox
-        let inner = mem_to_rcbox(mem.as_ptr());
+        let inner = mem_to_rcbox(mem.ptr.as_ptr());
         debug_assert_eq!(Layout::for_value(&*inner), layout);
 
         ptr::write(&mut (*inner).strong, Cell::new(1));
diff --git a/src/liballoc/slice.rs b/src/liballoc/slice.rs
index d8fc1faca3a..2ce5bc8ed2f 100644
--- a/src/liballoc/slice.rs
+++ b/src/liballoc/slice.rs
@@ -432,7 +432,7 @@ impl<T> [T] {
     ///
     /// ```should_panic
     /// // this will panic at runtime
-    /// b"0123456789abcdef".repeat(usize::max_value());
+    /// b"0123456789abcdef".repeat(usize::MAX);
     /// ```
     #[stable(feature = "repeat_generic_slice", since = "1.40.0")]
     pub fn repeat(&self, n: usize) -> Vec<T>
diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs
index 843a2f1f8e9..70860c09a2c 100644
--- a/src/liballoc/str.rs
+++ b/src/liballoc/str.rs
@@ -499,7 +499,7 @@ impl str {
     ///
     /// ```should_panic
     /// // this will panic at runtime
-    /// "0123456789abcdef".repeat(usize::max_value());
+    /// "0123456789abcdef".repeat(usize::MAX);
     /// ```
     #[stable(feature = "repeat_str", since = "1.16.0")]
     pub fn repeat(&self, n: usize) -> String {
diff --git a/src/liballoc/string.rs b/src/liballoc/string.rs
index 7c89d38caa4..1e5fe125c55 100644
--- a/src/liballoc/string.rs
+++ b/src/liballoc/string.rs
@@ -1849,6 +1849,21 @@ impl<'a, 'b> Pattern<'a> for &'b String {
     fn is_prefix_of(self, haystack: &'a str) -> bool {
         self[..].is_prefix_of(haystack)
     }
+
+    #[inline]
+    fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
+        self[..].strip_prefix_of(haystack)
+    }
+
+    #[inline]
+    fn is_suffix_of(self, haystack: &'a str) -> bool {
+        self[..].is_suffix_of(haystack)
+    }
+
+    #[inline]
+    fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str> {
+        self[..].strip_suffix_of(haystack)
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs
index e8985e20256..111a7651b5e 100644
--- a/src/liballoc/sync.rs
+++ b/src/liballoc/sync.rs
@@ -25,7 +25,7 @@ use core::sync::atomic;
 use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
 use core::{isize, usize};
 
-use crate::alloc::{box_free, handle_alloc_error, AllocRef, Global, Layout};
+use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout};
 use crate::boxed::Box;
 use crate::rc::is_dangling;
 use crate::string::String;
@@ -814,10 +814,12 @@ impl<T: ?Sized> Arc<T> {
         // reference (see #54908).
         let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
 
-        let (mem, _) = Global.alloc(layout).unwrap_or_else(|_| handle_alloc_error(layout));
+        let mem = Global
+            .alloc(layout, AllocInit::Uninitialized)
+            .unwrap_or_else(|_| handle_alloc_error(layout));
 
         // Initialize the ArcInner
-        let inner = mem_to_arcinner(mem.as_ptr());
+        let inner = mem_to_arcinner(mem.ptr.as_ptr());
         debug_assert_eq!(Layout::for_value(&*inner), layout);
 
         ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
diff --git a/src/liballoc/task.rs b/src/liballoc/task.rs
index 981095302c7..a64d5d7a63b 100644
--- a/src/liballoc/task.rs
+++ b/src/liballoc/task.rs
@@ -12,10 +12,12 @@ use crate::sync::Arc;
 /// to the tasks that are executed on that executor.
 ///
 /// This trait is a memory-safe and ergonomic alternative to constructing a
-/// [`RawWaker`]. It supports the common executor design in which the data
-/// used to wake up a task is stored in an [`Arc`]. Some executors (especially
+/// [`RawWaker`]. It supports the common executor design in which the data used
+/// to wake up a task is stored in an [`Arc`][arc]. Some executors (especially
 /// those for embedded systems) cannot use this API, which is why [`RawWaker`]
 /// exists as an alternative for those systems.
+///
+/// [arc]: ../../std/sync/struct.Arc.html
 #[unstable(feature = "wake_trait", issue = "69912")]
 pub trait Wake {
     /// Wake this task.
diff --git a/src/liballoc/tests/btree/map.rs b/src/liballoc/tests/btree/map.rs
index 535b6a9c314..14f12ca2d77 100644
--- a/src/liballoc/tests/btree/map.rs
+++ b/src/liballoc/tests/btree/map.rs
@@ -5,7 +5,7 @@ use std::fmt::Debug;
 use std::iter::FromIterator;
 use std::ops::Bound::{self, Excluded, Included, Unbounded};
 use std::ops::RangeBounds;
-use std::panic::catch_unwind;
+use std::panic::{catch_unwind, AssertUnwindSafe};
 use std::rc::Rc;
 use std::sync::atomic::{AtomicUsize, Ordering};
 
@@ -528,7 +528,7 @@ fn test_range_1000() {
     #[cfg(not(miri))] // Miri is too slow
     let size = 1000;
     #[cfg(miri)]
-    let size = MIN_INSERTS_HEIGHT_2;
+    let size = MIN_INSERTS_HEIGHT_2 as u32;
     let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
     fn test(map: &BTreeMap<u32, u32>, size: u32, min: Bound<&u32>, max: Bound<&u32>) {
@@ -609,6 +609,263 @@ fn test_range_mut() {
     }
 }
 
+mod test_drain_filter {
+    use super::*;
+
+    #[test]
+    fn empty() {
+        let mut map: BTreeMap<i32, i32> = BTreeMap::new();
+        map.drain_filter(|_, _| unreachable!("there's nothing to decide on"));
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn consuming_nothing() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        assert!(map.drain_filter(|_, _| false).eq(std::iter::empty()));
+    }
+
+    #[test]
+    fn consuming_all() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.clone().collect();
+        assert!(map.drain_filter(|_, _| true).eq(pairs));
+    }
+
+    #[test]
+    fn mutating_and_keeping() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        assert!(
+            map.drain_filter(|_, v| {
+                *v += 6;
+                false
+            })
+            .eq(std::iter::empty())
+        );
+        assert!(map.keys().copied().eq(0..3));
+        assert!(map.values().copied().eq(6..9));
+    }
+
+    #[test]
+    fn mutating_and_removing() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        assert!(
+            map.drain_filter(|_, v| {
+                *v += 6;
+                true
+            })
+            .eq((0..3).map(|i| (i, i + 6)))
+        );
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn underfull_keeping_all() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| false);
+        assert!(map.keys().copied().eq(0..3));
+    }
+
+    #[test]
+    fn underfull_removing_one() {
+        let pairs = (0..3).map(|i| (i, i));
+        for doomed in 0..3 {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i == doomed);
+            assert_eq!(map.len(), 2);
+        }
+    }
+
+    #[test]
+    fn underfull_keeping_one() {
+        let pairs = (0..3).map(|i| (i, i));
+        for sacred in 0..3 {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i != sacred);
+            assert!(map.keys().copied().eq(sacred..=sacred));
+        }
+    }
+
+    #[test]
+    fn underfull_removing_all() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| true);
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn height_0_keeping_all() {
+        let pairs = (0..NODE_CAPACITY).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| false);
+        assert!(map.keys().copied().eq(0..NODE_CAPACITY));
+    }
+
+    #[test]
+    fn height_0_removing_one() {
+        let pairs = (0..NODE_CAPACITY).map(|i| (i, i));
+        for doomed in 0..NODE_CAPACITY {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i == doomed);
+            assert_eq!(map.len(), NODE_CAPACITY - 1);
+        }
+    }
+
+    #[test]
+    fn height_0_keeping_one() {
+        let pairs = (0..NODE_CAPACITY).map(|i| (i, i));
+        for sacred in 0..NODE_CAPACITY {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i != sacred);
+            assert!(map.keys().copied().eq(sacred..=sacred));
+        }
+    }
+
+    #[test]
+    fn height_0_removing_all() {
+        let pairs = (0..NODE_CAPACITY).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| true);
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn height_0_keeping_half() {
+        let mut map: BTreeMap<_, _> = (0..16).map(|i| (i, i)).collect();
+        assert_eq!(map.drain_filter(|i, _| *i % 2 == 0).count(), 8);
+        assert_eq!(map.len(), 8);
+    }
+
+    #[test]
+    fn height_1_removing_all() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| true);
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn height_1_removing_one() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+        for doomed in 0..MIN_INSERTS_HEIGHT_1 {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i == doomed);
+            assert_eq!(map.len(), MIN_INSERTS_HEIGHT_1 - 1);
+        }
+    }
+
+    #[test]
+    fn height_1_keeping_one() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+        for sacred in 0..MIN_INSERTS_HEIGHT_1 {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i != sacred);
+            assert!(map.keys().copied().eq(sacred..=sacred));
+        }
+    }
+
+    #[cfg(not(miri))] // Miri is too slow
+    #[test]
+    fn height_2_removing_one() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+        for doomed in (0..MIN_INSERTS_HEIGHT_2).step_by(12) {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i == doomed);
+            assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2 - 1);
+        }
+    }
+
+    #[cfg(not(miri))] // Miri is too slow
+    #[test]
+    fn height_2_keeping_one() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+        for sacred in (0..MIN_INSERTS_HEIGHT_2).step_by(12) {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i != sacred);
+            assert!(map.keys().copied().eq(sacred..=sacred));
+        }
+    }
+
+    #[test]
+    fn height_2_removing_all() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| true);
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn drop_panic_leak() {
+        static PREDS: AtomicUsize = AtomicUsize::new(0);
+        static DROPS: AtomicUsize = AtomicUsize::new(0);
+
+        struct D;
+        impl Drop for D {
+            fn drop(&mut self) {
+                if DROPS.fetch_add(1, Ordering::SeqCst) == 1 {
+                    panic!("panic in `drop`");
+                }
+            }
+        }
+
+        let mut map = BTreeMap::new();
+        map.insert(0, D);
+        map.insert(4, D);
+        map.insert(8, D);
+
+        catch_unwind(move || {
+            drop(map.drain_filter(|i, _| {
+                PREDS.fetch_add(1usize << i, Ordering::SeqCst);
+                true
+            }))
+        })
+        .ok();
+
+        assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
+        assert_eq!(DROPS.load(Ordering::SeqCst), 3);
+    }
+
+    #[test]
+    fn pred_panic_leak() {
+        static PREDS: AtomicUsize = AtomicUsize::new(0);
+        static DROPS: AtomicUsize = AtomicUsize::new(0);
+
+        struct D;
+        impl Drop for D {
+            fn drop(&mut self) {
+                DROPS.fetch_add(1, Ordering::SeqCst);
+            }
+        }
+
+        let mut map = BTreeMap::new();
+        map.insert(0, D);
+        map.insert(4, D);
+        map.insert(8, D);
+
+        catch_unwind(AssertUnwindSafe(|| {
+            drop(map.drain_filter(|i, _| {
+                PREDS.fetch_add(1usize << i, Ordering::SeqCst);
+                match i {
+                    0 => true,
+                    _ => panic!(),
+                }
+            }))
+        }))
+        .ok();
+
+        assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
+        assert_eq!(DROPS.load(Ordering::SeqCst), 1);
+        assert_eq!(map.len(), 2);
+        assert_eq!(map.first_entry().unwrap().key(), &4);
+        assert_eq!(map.last_entry().unwrap().key(), &8);
+    }
+}
+
 #[test]
 fn test_borrow() {
     // make sure these compile -- using the Borrow trait
diff --git a/src/liballoc/tests/btree/set.rs b/src/liballoc/tests/btree/set.rs
index 1a2b62d026b..136018b9f7d 100644
--- a/src/liballoc/tests/btree/set.rs
+++ b/src/liballoc/tests/btree/set.rs
@@ -1,5 +1,7 @@
 use std::collections::BTreeSet;
 use std::iter::FromIterator;
+use std::panic::{catch_unwind, AssertUnwindSafe};
+use std::sync::atomic::{AtomicU32, Ordering};
 
 use super::DeterministicRng;
 
@@ -303,6 +305,85 @@ fn test_is_subset() {
 }
 
 #[test]
+fn test_drain_filter() {
+    let mut x: BTreeSet<_> = [1].iter().copied().collect();
+    let mut y: BTreeSet<_> = [1].iter().copied().collect();
+
+    x.drain_filter(|_| true);
+    y.drain_filter(|_| false);
+    assert_eq!(x.len(), 0);
+    assert_eq!(y.len(), 1);
+}
+
+#[test]
+fn test_drain_filter_drop_panic_leak() {
+    static PREDS: AtomicU32 = AtomicU32::new(0);
+    static DROPS: AtomicU32 = AtomicU32::new(0);
+
+    #[derive(PartialEq, Eq, PartialOrd, Ord)]
+    struct D(i32);
+    impl Drop for D {
+        fn drop(&mut self) {
+            if DROPS.fetch_add(1, Ordering::SeqCst) == 1 {
+                panic!("panic in `drop`");
+            }
+        }
+    }
+
+    let mut set = BTreeSet::new();
+    set.insert(D(0));
+    set.insert(D(4));
+    set.insert(D(8));
+
+    catch_unwind(move || {
+        drop(set.drain_filter(|d| {
+            PREDS.fetch_add(1u32 << d.0, Ordering::SeqCst);
+            true
+        }))
+    })
+    .ok();
+
+    assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
+    assert_eq!(DROPS.load(Ordering::SeqCst), 3);
+}
+
+#[test]
+fn test_drain_filter_pred_panic_leak() {
+    static PREDS: AtomicU32 = AtomicU32::new(0);
+    static DROPS: AtomicU32 = AtomicU32::new(0);
+
+    #[derive(PartialEq, Eq, PartialOrd, Ord)]
+    struct D(i32);
+    impl Drop for D {
+        fn drop(&mut self) {
+            DROPS.fetch_add(1, Ordering::SeqCst);
+        }
+    }
+
+    let mut set = BTreeSet::new();
+    set.insert(D(0));
+    set.insert(D(4));
+    set.insert(D(8));
+
+    catch_unwind(AssertUnwindSafe(|| {
+        drop(set.drain_filter(|d| {
+            PREDS.fetch_add(1u32 << d.0, Ordering::SeqCst);
+            match d.0 {
+                0 => true,
+                _ => panic!(),
+            }
+        }))
+    }))
+    .ok();
+
+    assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
+    assert_eq!(DROPS.load(Ordering::SeqCst), 1);
+    assert_eq!(set.len(), 2);
+    assert_eq!(set.first().unwrap().0, 4);
+    assert_eq!(set.last().unwrap().0, 8);
+}
+
+#[test]
 fn test_clear() {
     let mut x = BTreeSet::new();
     x.insert(1);
diff --git a/src/liballoc/tests/heap.rs b/src/liballoc/tests/heap.rs
index d159126f426..62f062b83d7 100644
--- a/src/liballoc/tests/heap.rs
+++ b/src/liballoc/tests/heap.rs
@@ -1,4 +1,4 @@
-use std::alloc::{AllocRef, Global, Layout, System};
+use std::alloc::{AllocInit, AllocRef, Global, Layout, System};
 
 /// Issue #45955 and #62251.
 #[test]
@@ -20,7 +20,13 @@ fn check_overalign_requests<T: AllocRef>(mut allocator: T) {
             unsafe {
                 let pointers: Vec<_> = (0..iterations)
                     .map(|_| {
-                        allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap().0
+                        allocator
+                            .alloc(
+                                Layout::from_size_align(size, align).unwrap(),
+                                AllocInit::Uninitialized,
+                            )
+                            .unwrap()
+                            .ptr
                     })
                     .collect();
                 for &ptr in &pointers {
diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs
index ea75f8903c3..ad6feaeebc6 100644
--- a/src/liballoc/tests/lib.rs
+++ b/src/liballoc/tests/lib.rs
@@ -1,5 +1,6 @@
 #![feature(allocator_api)]
 #![feature(box_syntax)]
+#![feature(btree_drain_filter)]
 #![feature(drain_filter)]
 #![feature(exact_size_is_empty)]
 #![feature(map_first_last)]
diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs
index e171edef736..96a6399d051 100644
--- a/src/liballoc/vec.rs
+++ b/src/liballoc/vec.rs
@@ -679,8 +679,9 @@ impl<T> Vec<T> {
         unsafe {
             self.shrink_to_fit();
             let buf = ptr::read(&self.buf);
+            let len = self.len();
             mem::forget(self);
-            buf.into_box()
+            buf.into_box(len).assume_init()
         }
     }