about summary refs log tree commit diff
path: root/src/liballoc/tests
diff options
context:
space:
mode:
Diffstat (limited to 'src/liballoc/tests')
-rw-r--r--src/liballoc/tests/arc.rs2
-rw-r--r--src/liballoc/tests/binary_heap.rs16
-rw-r--r--src/liballoc/tests/btree/map.rs422
-rw-r--r--src/liballoc/tests/btree/set.rs87
-rw-r--r--src/liballoc/tests/heap.rs10
-rw-r--r--src/liballoc/tests/lib.rs2
-rw-r--r--src/liballoc/tests/rc.rs2
-rw-r--r--src/liballoc/tests/slice.rs24
-rw-r--r--src/liballoc/tests/string.rs7
-rw-r--r--src/liballoc/tests/vec.rs74
-rw-r--r--src/liballoc/tests/vec_deque.rs17
11 files changed, 536 insertions, 127 deletions
diff --git a/src/liballoc/tests/arc.rs b/src/liballoc/tests/arc.rs
index 34384cfcba9..c02ba267056 100644
--- a/src/liballoc/tests/arc.rs
+++ b/src/liballoc/tests/arc.rs
@@ -50,7 +50,7 @@ fn trait_object() {
 
 #[test]
 fn float_nan_ne() {
-    let x = Arc::new(std::f32::NAN);
+    let x = Arc::new(f32::NAN);
     assert!(x != x);
     assert!(!(x == x));
 }
diff --git a/src/liballoc/tests/binary_heap.rs b/src/liballoc/tests/binary_heap.rs
index be5516f54f3..62084ccf53c 100644
--- a/src/liballoc/tests/binary_heap.rs
+++ b/src/liballoc/tests/binary_heap.rs
@@ -372,6 +372,14 @@ fn assert_covariance() {
     }
 }
 
+#[test]
+fn test_retain() {
+    let mut a = BinaryHeap::from(vec![-10, -5, 1, 2, 4, 13]);
+    a.retain(|x| x % 2 == 0);
+
+    assert_eq!(a.into_sorted_vec(), [-10, 2, 4])
+}
+
 // old binaryheap failed this test
 //
 // Integrity means that all elements are present after a comparison panics,
@@ -409,16 +417,14 @@ fn panic_safe() {
     }
     let mut rng = thread_rng();
     const DATASZ: usize = 32;
-    #[cfg(not(miri))] // Miri is too slow
-    const NTEST: usize = 10;
-    #[cfg(miri)]
-    const NTEST: usize = 1;
+    // Miri is too slow
+    let ntest = if cfg!(miri) { 1 } else { 10 };
 
     // don't use 0 in the data -- we want to catch the zeroed-out case.
     let data = (1..=DATASZ).collect::<Vec<_>>();
 
     // since it's a fuzzy test, run several tries.
-    for _ in 0..NTEST {
+    for _ in 0..ntest {
         for i in 1..=DATASZ {
             DROP_COUNTER.store(0, Ordering::SeqCst);
 
diff --git a/src/liballoc/tests/btree/map.rs b/src/liballoc/tests/btree/map.rs
index fd07a4d3926..731a1b5f875 100644
--- a/src/liballoc/tests/btree/map.rs
+++ b/src/liballoc/tests/btree/map.rs
@@ -5,19 +5,31 @@ use std::fmt::Debug;
 use std::iter::FromIterator;
 use std::ops::Bound::{self, Excluded, Included, Unbounded};
 use std::ops::RangeBounds;
-use std::panic::catch_unwind;
+use std::panic::{catch_unwind, AssertUnwindSafe};
 use std::rc::Rc;
-use std::sync::atomic::{AtomicU32, Ordering};
+use std::sync::atomic::{AtomicUsize, Ordering};
 
 use super::DeterministicRng;
 
+// Value of node::CAPACITY, thus capacity of a tree with a single level,
+// i.e. a tree who's root is a leaf node at height 0.
+const NODE_CAPACITY: usize = 11;
+
+// Minimum number of elements to insert in order to guarantee a tree with 2 levels,
+// i.e. a tree who's root is an internal node at height 1, with edges to leaf nodes.
+// It's not the minimum size: removing an element from such a tree does not always reduce height.
+const MIN_INSERTS_HEIGHT_1: usize = NODE_CAPACITY + 1;
+
+// Minimum number of elements to insert in order to guarantee a tree with 3 levels,
+// i.e. a tree who's root is an internal node at height 2, with edges to more internal nodes.
+// It's not the minimum size: removing an element from such a tree does not always reduce height.
+const MIN_INSERTS_HEIGHT_2: usize = NODE_CAPACITY + (NODE_CAPACITY + 1) * NODE_CAPACITY + 1;
+
 #[test]
 fn test_basic_large() {
     let mut map = BTreeMap::new();
-    #[cfg(not(miri))] // Miri is too slow
-    let size = 10000;
-    #[cfg(miri)]
-    let size = 144; // to obtain height 3 tree (having edges to both kinds of nodes)
+    // Miri is too slow
+    let size = if cfg!(miri) { MIN_INSERTS_HEIGHT_2 } else { 10000 };
     assert_eq!(map.len(), 0);
 
     for i in 0..size {
@@ -67,7 +79,7 @@ fn test_basic_large() {
 #[test]
 fn test_basic_small() {
     let mut map = BTreeMap::new();
-    // Empty, shared root:
+    // Empty, root is absent (None):
     assert_eq!(map.remove(&1), None);
     assert_eq!(map.len(), 0);
     assert_eq!(map.get(&1), None);
@@ -123,7 +135,7 @@ fn test_basic_small() {
     assert_eq!(map.values().collect::<Vec<_>>(), vec![&4]);
     assert_eq!(map.remove(&2), Some(4));
 
-    // Empty but private root:
+    // Empty but root is owned (Some(...)):
     assert_eq!(map.len(), 0);
     assert_eq!(map.get(&1), None);
     assert_eq!(map.get_mut(&1), None);
@@ -141,10 +153,8 @@ fn test_basic_small() {
 
 #[test]
 fn test_iter() {
-    #[cfg(not(miri))] // Miri is too slow
-    let size = 10000;
-    #[cfg(miri)]
-    let size = 200;
+    // Miri is too slow
+    let size = if cfg!(miri) { 200 } else { 10000 };
 
     let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
@@ -166,10 +176,8 @@ fn test_iter() {
 
 #[test]
 fn test_iter_rev() {
-    #[cfg(not(miri))] // Miri is too slow
-    let size = 10000;
-    #[cfg(miri)]
-    let size = 200;
+    // Miri is too slow
+    let size = if cfg!(miri) { 200 } else { 10000 };
 
     let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
@@ -237,37 +245,26 @@ impl TryFrom<usize> for Align32 {
 
 #[test]
 fn test_iter_mut_mutation() {
-    // Check many alignments because various fields precede array in NodeHeader.
-    // Check with size 0 which should not iterate at all.
-    // Check with size 1 for a tree with one kind of node (root = leaf).
-    // Check with size 12 for a tree with two kinds of nodes (root and leaves).
-    // Check with size 144 for a tree with all kinds of nodes (root, internals and leaves).
+    // Check many alignments and trees with roots at various heights.
     do_test_iter_mut_mutation::<u8>(0);
     do_test_iter_mut_mutation::<u8>(1);
-    do_test_iter_mut_mutation::<u8>(12);
-    do_test_iter_mut_mutation::<u8>(127); // not enough unique values to test 144
+    do_test_iter_mut_mutation::<u8>(MIN_INSERTS_HEIGHT_1);
+    do_test_iter_mut_mutation::<u8>(127); // not enough unique values to test MIN_INSERTS_HEIGHT_2
     do_test_iter_mut_mutation::<u16>(1);
-    do_test_iter_mut_mutation::<u16>(12);
-    do_test_iter_mut_mutation::<u16>(144);
+    do_test_iter_mut_mutation::<u16>(MIN_INSERTS_HEIGHT_1);
+    do_test_iter_mut_mutation::<u16>(MIN_INSERTS_HEIGHT_2);
     do_test_iter_mut_mutation::<u32>(1);
-    do_test_iter_mut_mutation::<u32>(12);
-    do_test_iter_mut_mutation::<u32>(144);
+    do_test_iter_mut_mutation::<u32>(MIN_INSERTS_HEIGHT_1);
+    do_test_iter_mut_mutation::<u32>(MIN_INSERTS_HEIGHT_2);
     do_test_iter_mut_mutation::<u64>(1);
-    do_test_iter_mut_mutation::<u64>(12);
-    do_test_iter_mut_mutation::<u64>(144);
+    do_test_iter_mut_mutation::<u64>(MIN_INSERTS_HEIGHT_1);
+    do_test_iter_mut_mutation::<u64>(MIN_INSERTS_HEIGHT_2);
     do_test_iter_mut_mutation::<u128>(1);
-    do_test_iter_mut_mutation::<u128>(12);
-    do_test_iter_mut_mutation::<u128>(144);
+    do_test_iter_mut_mutation::<u128>(MIN_INSERTS_HEIGHT_1);
+    do_test_iter_mut_mutation::<u128>(MIN_INSERTS_HEIGHT_2);
     do_test_iter_mut_mutation::<Align32>(1);
-    do_test_iter_mut_mutation::<Align32>(12);
-    do_test_iter_mut_mutation::<Align32>(144);
-}
-
-#[test]
-fn test_into_key_slice_with_shared_root_past_bounds() {
-    let mut map: BTreeMap<Align32, ()> = BTreeMap::new();
-    assert_eq!(map.get(&Align32(1)), None);
-    assert_eq!(map.get_mut(&Align32(1)), None);
+    do_test_iter_mut_mutation::<Align32>(MIN_INSERTS_HEIGHT_1);
+    do_test_iter_mut_mutation::<Align32>(MIN_INSERTS_HEIGHT_2);
 }
 
 #[test]
@@ -286,10 +283,8 @@ fn test_values_mut() {
 
 #[test]
 fn test_iter_mixed() {
-    #[cfg(not(miri))] // Miri is too slow
-    let size = 10000;
-    #[cfg(miri)]
-    let size = 200;
+    // Miri is too slow
+    let size = if cfg!(miri) { 200 } else { 10000 };
 
     let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
@@ -383,12 +378,11 @@ fn test_range_small() {
 }
 
 #[test]
-fn test_range_height_2() {
-    // Assuming that node.CAPACITY is 11, having 12 pairs implies a height 2 tree
-    // with 2 leaves. Depending on details we don't want or need to rely upon,
-    // the single key at the root will be 6 or 7.
+fn test_range_height_1() {
+    // Tests tree with a root and 2 leaves. Depending on details we don't want or need
+    // to rely upon, the single key at the root will be 6 or 7.
 
-    let map: BTreeMap<_, _> = (1..=12).map(|i| (i, i)).collect();
+    let map: BTreeMap<_, _> = (1..=MIN_INSERTS_HEIGHT_1 as i32).map(|i| (i, i)).collect();
     for &root in &[6, 7] {
         assert_eq!(range_keys(&map, (Excluded(root), Excluded(root + 1))), vec![]);
         assert_eq!(range_keys(&map, (Excluded(root), Included(root + 1))), vec![root + 1]);
@@ -473,7 +467,7 @@ fn test_range_large() {
 
 #[test]
 fn test_range_inclusive_max_value() {
-    let max = std::usize::MAX;
+    let max = usize::MAX;
     let map: BTreeMap<_, _> = vec![(max, 0)].into_iter().collect();
 
     assert_eq!(map.range(max..=max).collect::<Vec<_>>(), &[(&max, &0)]);
@@ -523,10 +517,8 @@ fn test_range_backwards_4() {
 
 #[test]
 fn test_range_1000() {
-    #[cfg(not(miri))] // Miri is too slow
-    let size = 1000;
-    #[cfg(miri)]
-    let size = 144; // to obtain height 3 tree (having edges to both kinds of nodes)
+    // Miri is too slow
+    let size = if cfg!(miri) { MIN_INSERTS_HEIGHT_2 as u32 } else { 1000 };
     let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
     fn test(map: &BTreeMap<u32, u32>, size: u32, min: Bound<&u32>, max: Bound<&u32>) {
@@ -564,10 +556,8 @@ fn test_range_borrowed_key() {
 #[test]
 fn test_range() {
     let size = 200;
-    #[cfg(not(miri))] // Miri is too slow
-    let step = 1;
-    #[cfg(miri)]
-    let step = 66;
+    // Miri is too slow
+    let step = if cfg!(miri) { 66 } else { 1 };
     let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
     for i in (0..size).step_by(step) {
@@ -587,10 +577,8 @@ fn test_range() {
 #[test]
 fn test_range_mut() {
     let size = 200;
-    #[cfg(not(miri))] // Miri is too slow
-    let step = 1;
-    #[cfg(miri)]
-    let step = 66;
+    // Miri is too slow
+    let step = if cfg!(miri) { 66 } else { 1 };
     let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
 
     for i in (0..size).step_by(step) {
@@ -607,6 +595,263 @@ fn test_range_mut() {
     }
 }
 
+mod test_drain_filter {
+    use super::*;
+
+    #[test]
+    fn empty() {
+        let mut map: BTreeMap<i32, i32> = BTreeMap::new();
+        map.drain_filter(|_, _| unreachable!("there's nothing to decide on"));
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn consuming_nothing() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        assert!(map.drain_filter(|_, _| false).eq(std::iter::empty()));
+    }
+
+    #[test]
+    fn consuming_all() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.clone().collect();
+        assert!(map.drain_filter(|_, _| true).eq(pairs));
+    }
+
+    #[test]
+    fn mutating_and_keeping() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        assert!(
+            map.drain_filter(|_, v| {
+                *v += 6;
+                false
+            })
+            .eq(std::iter::empty())
+        );
+        assert!(map.keys().copied().eq(0..3));
+        assert!(map.values().copied().eq(6..9));
+    }
+
+    #[test]
+    fn mutating_and_removing() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        assert!(
+            map.drain_filter(|_, v| {
+                *v += 6;
+                true
+            })
+            .eq((0..3).map(|i| (i, i + 6)))
+        );
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn underfull_keeping_all() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| false);
+        assert!(map.keys().copied().eq(0..3));
+    }
+
+    #[test]
+    fn underfull_removing_one() {
+        let pairs = (0..3).map(|i| (i, i));
+        for doomed in 0..3 {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i == doomed);
+            assert_eq!(map.len(), 2);
+        }
+    }
+
+    #[test]
+    fn underfull_keeping_one() {
+        let pairs = (0..3).map(|i| (i, i));
+        for sacred in 0..3 {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i != sacred);
+            assert!(map.keys().copied().eq(sacred..=sacred));
+        }
+    }
+
+    #[test]
+    fn underfull_removing_all() {
+        let pairs = (0..3).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| true);
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn height_0_keeping_all() {
+        let pairs = (0..NODE_CAPACITY).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| false);
+        assert!(map.keys().copied().eq(0..NODE_CAPACITY));
+    }
+
+    #[test]
+    fn height_0_removing_one() {
+        let pairs = (0..NODE_CAPACITY).map(|i| (i, i));
+        for doomed in 0..NODE_CAPACITY {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i == doomed);
+            assert_eq!(map.len(), NODE_CAPACITY - 1);
+        }
+    }
+
+    #[test]
+    fn height_0_keeping_one() {
+        let pairs = (0..NODE_CAPACITY).map(|i| (i, i));
+        for sacred in 0..NODE_CAPACITY {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i != sacred);
+            assert!(map.keys().copied().eq(sacred..=sacred));
+        }
+    }
+
+    #[test]
+    fn height_0_removing_all() {
+        let pairs = (0..NODE_CAPACITY).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| true);
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn height_0_keeping_half() {
+        let mut map: BTreeMap<_, _> = (0..16).map(|i| (i, i)).collect();
+        assert_eq!(map.drain_filter(|i, _| *i % 2 == 0).count(), 8);
+        assert_eq!(map.len(), 8);
+    }
+
+    #[test]
+    fn height_1_removing_all() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| true);
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn height_1_removing_one() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+        for doomed in 0..MIN_INSERTS_HEIGHT_1 {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i == doomed);
+            assert_eq!(map.len(), MIN_INSERTS_HEIGHT_1 - 1);
+        }
+    }
+
+    #[test]
+    fn height_1_keeping_one() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+        for sacred in 0..MIN_INSERTS_HEIGHT_1 {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i != sacred);
+            assert!(map.keys().copied().eq(sacred..=sacred));
+        }
+    }
+
+    #[cfg(not(miri))] // Miri is too slow
+    #[test]
+    fn height_2_removing_one() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+        for doomed in (0..MIN_INSERTS_HEIGHT_2).step_by(12) {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i == doomed);
+            assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2 - 1);
+        }
+    }
+
+    #[cfg(not(miri))] // Miri is too slow
+    #[test]
+    fn height_2_keeping_one() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+        for sacred in (0..MIN_INSERTS_HEIGHT_2).step_by(12) {
+            let mut map: BTreeMap<_, _> = pairs.clone().collect();
+            map.drain_filter(|i, _| *i != sacred);
+            assert!(map.keys().copied().eq(sacred..=sacred));
+        }
+    }
+
+    #[test]
+    fn height_2_removing_all() {
+        let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+        let mut map: BTreeMap<_, _> = pairs.collect();
+        map.drain_filter(|_, _| true);
+        assert!(map.is_empty());
+    }
+
+    #[test]
+    fn drop_panic_leak() {
+        static PREDS: AtomicUsize = AtomicUsize::new(0);
+        static DROPS: AtomicUsize = AtomicUsize::new(0);
+
+        struct D;
+        impl Drop for D {
+            fn drop(&mut self) {
+                if DROPS.fetch_add(1, Ordering::SeqCst) == 1 {
+                    panic!("panic in `drop`");
+                }
+            }
+        }
+
+        let mut map = BTreeMap::new();
+        map.insert(0, D);
+        map.insert(4, D);
+        map.insert(8, D);
+
+        catch_unwind(move || {
+            drop(map.drain_filter(|i, _| {
+                PREDS.fetch_add(1usize << i, Ordering::SeqCst);
+                true
+            }))
+        })
+        .ok();
+
+        assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
+        assert_eq!(DROPS.load(Ordering::SeqCst), 3);
+    }
+
+    #[test]
+    fn pred_panic_leak() {
+        static PREDS: AtomicUsize = AtomicUsize::new(0);
+        static DROPS: AtomicUsize = AtomicUsize::new(0);
+
+        struct D;
+        impl Drop for D {
+            fn drop(&mut self) {
+                DROPS.fetch_add(1, Ordering::SeqCst);
+            }
+        }
+
+        let mut map = BTreeMap::new();
+        map.insert(0, D);
+        map.insert(4, D);
+        map.insert(8, D);
+
+        catch_unwind(AssertUnwindSafe(|| {
+            drop(map.drain_filter(|i, _| {
+                PREDS.fetch_add(1usize << i, Ordering::SeqCst);
+                match i {
+                    0 => true,
+                    _ => panic!(),
+                }
+            }))
+        }))
+        .ok();
+
+        assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
+        assert_eq!(DROPS.load(Ordering::SeqCst), 1);
+        assert_eq!(map.len(), 2);
+        assert_eq!(map.first_entry().unwrap().key(), &4);
+        assert_eq!(map.last_entry().unwrap().key(), &8);
+    }
+}
+
 #[test]
 fn test_borrow() {
     // make sure these compile -- using the Borrow trait
@@ -762,7 +1007,7 @@ fn test_bad_zst() {
 #[test]
 fn test_clone() {
     let mut map = BTreeMap::new();
-    let size = 12; // to obtain height 2 tree (having edges to leaf nodes)
+    let size = MIN_INSERTS_HEIGHT_1;
     assert_eq!(map.len(), 0);
 
     for i in 0..size {
@@ -790,20 +1035,19 @@ fn test_clone() {
         assert_eq!(map, map.clone());
     }
 
-    // Full 2-level and minimal 3-level tree (sizes 143, 144 -- the only ones we clone for).
-    for i in 1..=144 {
-        assert_eq!(map.insert(i, i), None);
-        assert_eq!(map.len(), i);
-        if i >= 143 {
-            assert_eq!(map, map.clone());
-        }
-    }
+    // Test a tree with 2 chock-full levels and a tree with 3 levels.
+    map = (1..MIN_INSERTS_HEIGHT_2).map(|i| (i, i)).collect();
+    assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2 - 1);
+    assert_eq!(map, map.clone());
+    map.insert(0, 0);
+    assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2);
+    assert_eq!(map, map.clone());
 }
 
 #[test]
 fn test_clone_from() {
     let mut map1 = BTreeMap::new();
-    let max_size = 12; // to obtain height 2 tree (having edges to leaf nodes)
+    let max_size = MIN_INSERTS_HEIGHT_1;
 
     // Range to max_size inclusive, because i is the size of map1 being tested.
     for i in 0..=max_size {
@@ -1005,10 +1249,8 @@ fn test_split_off_empty_left() {
 
 #[test]
 fn test_split_off_large_random_sorted() {
-    #[cfg(not(miri))] // Miri is too slow
-    let mut data = rand_data(1529);
-    #[cfg(miri)]
-    let mut data = rand_data(529);
+    // Miri is too slow
+    let mut data = if cfg!(miri) { rand_data(529) } else { rand_data(1529) };
     // special case with maximum height.
     data.sort();
 
@@ -1021,8 +1263,8 @@ fn test_split_off_large_random_sorted() {
 }
 
 #[test]
-fn test_into_iter_drop_leak() {
-    static DROPS: AtomicU32 = AtomicU32::new(0);
+fn test_into_iter_drop_leak_height_0() {
+    static DROPS: AtomicUsize = AtomicUsize::new(0);
 
     struct D;
 
@@ -1045,3 +1287,27 @@ fn test_into_iter_drop_leak() {
 
     assert_eq!(DROPS.load(Ordering::SeqCst), 5);
 }
+
+#[test]
+fn test_into_iter_drop_leak_height_1() {
+    let size = MIN_INSERTS_HEIGHT_1;
+    static DROPS: AtomicUsize = AtomicUsize::new(0);
+    static PANIC_POINT: AtomicUsize = AtomicUsize::new(0);
+
+    struct D;
+    impl Drop for D {
+        fn drop(&mut self) {
+            if DROPS.fetch_add(1, Ordering::SeqCst) == PANIC_POINT.load(Ordering::SeqCst) {
+                panic!("panic in `drop`");
+            }
+        }
+    }
+
+    for panic_point in vec![0, 1, size - 2, size - 1] {
+        DROPS.store(0, Ordering::SeqCst);
+        PANIC_POINT.store(panic_point, Ordering::SeqCst);
+        let map: BTreeMap<_, _> = (0..size).map(|i| (i, D)).collect();
+        catch_unwind(move || drop(map.into_iter())).ok();
+        assert_eq!(DROPS.load(Ordering::SeqCst), size);
+    }
+}
diff --git a/src/liballoc/tests/btree/set.rs b/src/liballoc/tests/btree/set.rs
index 1a2b62d026b..75251ca0d51 100644
--- a/src/liballoc/tests/btree/set.rs
+++ b/src/liballoc/tests/btree/set.rs
@@ -1,5 +1,7 @@
 use std::collections::BTreeSet;
 use std::iter::FromIterator;
+use std::panic::{catch_unwind, AssertUnwindSafe};
+use std::sync::atomic::{AtomicU32, Ordering};
 
 use super::DeterministicRng;
 
@@ -303,6 +305,85 @@ fn test_is_subset() {
 }
 
 #[test]
+fn test_drain_filter() {
+    let mut x: BTreeSet<_> = [1].iter().copied().collect();
+    let mut y: BTreeSet<_> = [1].iter().copied().collect();
+
+    x.drain_filter(|_| true);
+    y.drain_filter(|_| false);
+    assert_eq!(x.len(), 0);
+    assert_eq!(y.len(), 1);
+}
+
+#[test]
+fn test_drain_filter_drop_panic_leak() {
+    static PREDS: AtomicU32 = AtomicU32::new(0);
+    static DROPS: AtomicU32 = AtomicU32::new(0);
+
+    #[derive(PartialEq, Eq, PartialOrd, Ord)]
+    struct D(i32);
+    impl Drop for D {
+        fn drop(&mut self) {
+            if DROPS.fetch_add(1, Ordering::SeqCst) == 1 {
+                panic!("panic in `drop`");
+            }
+        }
+    }
+
+    let mut set = BTreeSet::new();
+    set.insert(D(0));
+    set.insert(D(4));
+    set.insert(D(8));
+
+    catch_unwind(move || {
+        drop(set.drain_filter(|d| {
+            PREDS.fetch_add(1u32 << d.0, Ordering::SeqCst);
+            true
+        }))
+    })
+    .ok();
+
+    assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
+    assert_eq!(DROPS.load(Ordering::SeqCst), 3);
+}
+
+#[test]
+fn test_drain_filter_pred_panic_leak() {
+    static PREDS: AtomicU32 = AtomicU32::new(0);
+    static DROPS: AtomicU32 = AtomicU32::new(0);
+
+    #[derive(PartialEq, Eq, PartialOrd, Ord)]
+    struct D(i32);
+    impl Drop for D {
+        fn drop(&mut self) {
+            DROPS.fetch_add(1, Ordering::SeqCst);
+        }
+    }
+
+    let mut set = BTreeSet::new();
+    set.insert(D(0));
+    set.insert(D(4));
+    set.insert(D(8));
+
+    catch_unwind(AssertUnwindSafe(|| {
+        drop(set.drain_filter(|d| {
+            PREDS.fetch_add(1u32 << d.0, Ordering::SeqCst);
+            match d.0 {
+                0 => true,
+                _ => panic!(),
+            }
+        }))
+    }))
+    .ok();
+
+    assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
+    assert_eq!(DROPS.load(Ordering::SeqCst), 1);
+    assert_eq!(set.len(), 2);
+    assert_eq!(set.first().unwrap().0, 4);
+    assert_eq!(set.last().unwrap().0, 8);
+}
+
+#[test]
 fn test_clear() {
     let mut x = BTreeSet::new();
     x.insert(1);
@@ -540,10 +621,8 @@ fn test_split_off_empty_left() {
 
 #[test]
 fn test_split_off_large_random_sorted() {
-    #[cfg(not(miri))] // Miri is too slow
-    let mut data = rand_data(1529);
-    #[cfg(miri)]
-    let mut data = rand_data(529);
+    // Miri is too slow
+    let mut data = if cfg!(miri) { rand_data(529) } else { rand_data(1529) };
     // special case with maximum height.
     data.sort();
 
diff --git a/src/liballoc/tests/heap.rs b/src/liballoc/tests/heap.rs
index 7fcfcf9b294..62f062b83d7 100644
--- a/src/liballoc/tests/heap.rs
+++ b/src/liballoc/tests/heap.rs
@@ -1,4 +1,4 @@
-use std::alloc::{AllocRef, Global, Layout, System};
+use std::alloc::{AllocInit, AllocRef, Global, Layout, System};
 
 /// Issue #45955 and #62251.
 #[test]
@@ -20,7 +20,13 @@ fn check_overalign_requests<T: AllocRef>(mut allocator: T) {
             unsafe {
                 let pointers: Vec<_> = (0..iterations)
                     .map(|_| {
-                        allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap()
+                        allocator
+                            .alloc(
+                                Layout::from_size_align(size, align).unwrap(),
+                                AllocInit::Uninitialized,
+                            )
+                            .unwrap()
+                            .ptr
                     })
                     .collect();
                 for &ptr in &pointers {
diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs
index ea75f8903c3..78d49558262 100644
--- a/src/liballoc/tests/lib.rs
+++ b/src/liballoc/tests/lib.rs
@@ -1,5 +1,6 @@
 #![feature(allocator_api)]
 #![feature(box_syntax)]
+#![feature(btree_drain_filter)]
 #![feature(drain_filter)]
 #![feature(exact_size_is_empty)]
 #![feature(map_first_last)]
@@ -13,6 +14,7 @@
 #![feature(binary_heap_drain_sorted)]
 #![feature(vec_remove_item)]
 #![feature(split_inclusive)]
+#![feature(binary_heap_retain)]
 
 use std::collections::hash_map::DefaultHasher;
 use std::hash::{Hash, Hasher};
diff --git a/src/liballoc/tests/rc.rs b/src/liballoc/tests/rc.rs
index 884856cd1b4..501b4f0f816 100644
--- a/src/liballoc/tests/rc.rs
+++ b/src/liballoc/tests/rc.rs
@@ -50,7 +50,7 @@ fn trait_object() {
 
 #[test]
 fn float_nan_ne() {
-    let x = Rc::new(std::f32::NAN);
+    let x = Rc::new(f32::NAN);
     assert!(x != x);
     assert!(!(x == x));
 }
diff --git a/src/liballoc/tests/slice.rs b/src/liballoc/tests/slice.rs
index 3d6b4bff5e0..75b76bb73ed 100644
--- a/src/liballoc/tests/slice.rs
+++ b/src/liballoc/tests/slice.rs
@@ -463,15 +463,9 @@ fn test_sort() {
 
 #[test]
 fn test_sort_stability() {
-    #[cfg(not(miri))] // Miri is too slow
-    let large_range = 500..510;
-    #[cfg(not(miri))] // Miri is too slow
-    let rounds = 10;
-
-    #[cfg(miri)]
-    let large_range = 0..0; // empty range
-    #[cfg(miri)]
-    let rounds = 1;
+    // Miri is too slow
+    let large_range = if cfg!(miri) { 0..0 } else { 500..510 };
+    let rounds = if cfg!(miri) { 1 } else { 10 };
 
     for len in (2..25).chain(large_range) {
         for _ in 0..rounds {
@@ -1727,15 +1721,9 @@ fn panic_safe() {
 
     let mut rng = thread_rng();
 
-    #[cfg(not(miri))] // Miri is too slow
-    let lens = (1..20).chain(70..MAX_LEN);
-    #[cfg(not(miri))] // Miri is too slow
-    let moduli = &[5, 20, 50];
-
-    #[cfg(miri)]
-    let lens = 1..13;
-    #[cfg(miri)]
-    let moduli = &[10];
+    // Miri is too slow
+    let lens = if cfg!(miri) { (1..10).chain(20..21) } else { (1..20).chain(70..MAX_LEN) };
+    let moduli: &[u32] = if cfg!(miri) { &[5] } else { &[5, 20, 50] };
 
     for len in lens {
         for &modulus in moduli {
diff --git a/src/liballoc/tests/string.rs b/src/liballoc/tests/string.rs
index 08859b2b24b..9ea020d2d19 100644
--- a/src/liballoc/tests/string.rs
+++ b/src/liballoc/tests/string.rs
@@ -1,7 +1,6 @@
 use std::borrow::Cow;
 use std::collections::TryReserveError::*;
 use std::mem::size_of;
-use std::{isize, usize};
 
 pub trait IntoCow<'a, B: ?Sized>
 where
@@ -266,14 +265,14 @@ fn test_split_off_empty() {
 fn test_split_off_past_end() {
     let orig = "Hello, world!";
     let mut split = String::from(orig);
-    split.split_off(orig.len() + 1);
+    let _ = split.split_off(orig.len() + 1);
 }
 
 #[test]
 #[should_panic]
 fn test_split_off_mid_char() {
     let mut orig = String::from("山");
-    orig.split_off(1);
+    let _ = orig.split_off(1);
 }
 
 #[test]
@@ -556,6 +555,7 @@ fn test_reserve_exact() {
 
 #[test]
 #[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
+#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
 fn test_try_reserve() {
     // These are the interesting cases:
     // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM)
@@ -645,6 +645,7 @@ fn test_try_reserve() {
 
 #[test]
 #[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
+#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
 fn test_try_reserve_exact() {
     // This is exactly the same as test_try_reserve with the method changed.
     // See that test for comments.
diff --git a/src/liballoc/tests/vec.rs b/src/liballoc/tests/vec.rs
index 9c4ac52acac..3d76324f7e8 100644
--- a/src/liballoc/tests/vec.rs
+++ b/src/liballoc/tests/vec.rs
@@ -3,7 +3,6 @@ use std::collections::TryReserveError::*;
 use std::mem::size_of;
 use std::panic::{catch_unwind, AssertUnwindSafe};
 use std::vec::{Drain, IntoIter};
-use std::{isize, usize};
 
 struct DropCounter<'a> {
     count: &'a mut u32,
@@ -1138,6 +1137,7 @@ fn test_reserve_exact() {
 
 #[test]
 #[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
+#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
 fn test_try_reserve() {
     // These are the interesting cases:
     // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM)
@@ -1255,6 +1255,7 @@ fn test_try_reserve() {
 
 #[test]
 #[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
+#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
 fn test_try_reserve_exact() {
     // This is exactly the same as test_try_reserve with the method changed.
     // See that test for comments.
@@ -1351,17 +1352,26 @@ fn test_try_reserve_exact() {
 }
 
 #[test]
-fn test_stable_push_pop() {
+fn test_stable_pointers() {
+    /// Pull an element from the iterator, then drop it.
+    /// Useful to cover both the `next` and `drop` paths of an iterator.
+    fn next_then_drop<I: Iterator>(mut i: I) {
+        i.next().unwrap();
+        drop(i);
+    }
+
     // Test that, if we reserved enough space, adding and removing elements does not
     // invalidate references into the vector (such as `v0`).  This test also
     // runs in Miri, which would detect such problems.
-    let mut v = Vec::with_capacity(10);
+    let mut v = Vec::with_capacity(128);
     v.push(13);
 
-    // laundering the lifetime -- we take care that `v` does not reallocate, so that's okay.
-    let v0 = unsafe { &*(&v[0] as *const _) };
-
+    // Laundering the lifetime -- we take care that `v` does not reallocate, so that's okay.
+    let v0 = &mut v[0];
+    let v0 = unsafe { &mut *(v0 as *mut _) };
     // Now do a bunch of things and occasionally use `v0` again to assert it is still valid.
+
+    // Pushing/inserting and popping/removing
     v.push(1);
     v.push(2);
     v.insert(1, 1);
@@ -1369,6 +1379,58 @@ fn test_stable_push_pop() {
     v.remove(1);
     v.pop().unwrap();
     assert_eq!(*v0, 13);
+    v.push(1);
+    v.swap_remove(1);
+    assert_eq!(v.len(), 2);
+    v.swap_remove(1); // swap_remove the last element
+    assert_eq!(*v0, 13);
+
+    // Appending
+    v.append(&mut vec![27, 19]);
+    assert_eq!(*v0, 13);
+
+    // Extending
+    v.extend_from_slice(&[1, 2]);
+    v.extend(&[1, 2]); // `slice::Iter` (with `T: Copy`) specialization
+    v.extend(vec![2, 3]); // `vec::IntoIter` specialization
+    v.extend(std::iter::once(3)); // `TrustedLen` specialization
+    v.extend(std::iter::empty::<i32>()); // `TrustedLen` specialization with empty iterator
+    v.extend(std::iter::once(3).filter(|_| true)); // base case
+    v.extend(std::iter::once(&3)); // `cloned` specialization
+    assert_eq!(*v0, 13);
+
+    // Truncation
+    v.truncate(2);
+    assert_eq!(*v0, 13);
+
+    // Resizing
+    v.resize_with(v.len() + 10, || 42);
+    assert_eq!(*v0, 13);
+    v.resize_with(2, || panic!());
+    assert_eq!(*v0, 13);
+
+    // No-op reservation
+    v.reserve(32);
+    v.reserve_exact(32);
+    assert_eq!(*v0, 13);
+
+    // Partial draining
+    v.resize_with(10, || 42);
+    next_then_drop(v.drain(5..));
+    assert_eq!(*v0, 13);
+
+    // Splicing
+    v.resize_with(10, || 42);
+    next_then_drop(v.splice(5.., vec![1, 2, 3, 4, 5])); // empty tail after range
+    assert_eq!(*v0, 13);
+    next_then_drop(v.splice(5..8, vec![1])); // replacement is smaller than original range
+    assert_eq!(*v0, 13);
+    next_then_drop(v.splice(5..6, vec![1; 10].into_iter().filter(|_| true))); // lower bound not exact
+    assert_eq!(*v0, 13);
+
+    // Smoke test that would fire even outside Miri if an actual relocation happened.
+    *v0 -= 13;
+    assert_eq!(v[0], 0);
 }
 
 // https://github.com/rust-lang/rust/pull/49496 introduced specialization based on:
diff --git a/src/liballoc/tests/vec_deque.rs b/src/liballoc/tests/vec_deque.rs
index 101dd67d97a..762dc4be44d 100644
--- a/src/liballoc/tests/vec_deque.rs
+++ b/src/liballoc/tests/vec_deque.rs
@@ -3,7 +3,6 @@ use std::collections::{vec_deque::Drain, VecDeque};
 use std::fmt::Debug;
 use std::mem::size_of;
 use std::panic::{catch_unwind, AssertUnwindSafe};
-use std::{isize, usize};
 
 use crate::hash;
 
@@ -955,16 +954,14 @@ fn test_append_permutations() {
         out
     }
 
-    #[cfg(not(miri))] // Miri is too slow
-    const MAX: usize = 5;
-    #[cfg(miri)]
-    const MAX: usize = 3;
+    // Miri is too slow
+    let max = if cfg!(miri) { 3 } else { 5 };
 
     // Many different permutations of both the `VecDeque` getting appended to
     // and the one getting appended are generated to check `append`.
     // This ensures all 6 code paths of `append` are tested.
-    for src_push_back in 0..MAX {
-        for src_push_front in 0..MAX {
+    for src_push_back in 0..max {
+        for src_push_front in 0..max {
             // doesn't pop more values than are pushed
             for src_pop_back in 0..(src_push_back + src_push_front) {
                 for src_pop_front in 0..(src_push_back + src_push_front - src_pop_back) {
@@ -975,8 +972,8 @@ fn test_append_permutations() {
                         src_pop_front,
                     );
 
-                    for dst_push_back in 0..MAX {
-                        for dst_push_front in 0..MAX {
+                    for dst_push_back in 0..max {
+                        for dst_push_front in 0..max {
                             for dst_pop_back in 0..(dst_push_back + dst_push_front) {
                                 for dst_pop_front in
                                     0..(dst_push_back + dst_push_front - dst_pop_back)
@@ -1135,6 +1132,7 @@ fn test_reserve_exact_2() {
 
 #[test]
 #[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
+#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
 fn test_try_reserve() {
     // These are the interesting cases:
     // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM)
@@ -1249,6 +1247,7 @@ fn test_try_reserve() {
 
 #[test]
 #[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
+#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
 fn test_try_reserve_exact() {
     // This is exactly the same as test_try_reserve with the method changed.
     // See that test for comments.