diff options
| author | Corey Farwell <coreyf@rwell.org> | 2020-12-31 23:27:33 -0500 |
|---|---|---|
| committer | Corey Farwell <coreyf@rwell.org> | 2020-12-31 23:27:33 -0500 |
| commit | d482de30ea70d537dced8ec04a3903e3264cf106 (patch) | |
| tree | 67d6cd380ef7a66e785a54993bb0ca93b07b43ec /library/alloc/src | |
| parent | 26cc060756d0456b17fdc53ac5d34e7f7bdc873d (diff) | |
| parent | 99ad5a1a2824fea1ecf60068fd3636beae7ea2da (diff) | |
| download | rust-d482de30ea70d537dced8ec04a3903e3264cf106.tar.gz rust-d482de30ea70d537dced8ec04a3903e3264cf106.zip | |
Merge remote-tracking branch 'origin/master' into frewsxcv-san
Diffstat (limited to 'library/alloc/src')
39 files changed, 1719 insertions, 1676 deletions
diff --git a/library/alloc/src/collections/binary_heap.rs b/library/alloc/src/collections/binary_heap.rs index 97ebc12175f..76051d9e1df 100644 --- a/library/alloc/src/collections/binary_heap.rs +++ b/library/alloc/src/collections/binary_heap.rs @@ -915,6 +915,7 @@ impl<T> BinaryHeap<T> { /// /// assert_eq!(heap.len(), 2); /// ``` + #[doc(alias = "length")] #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { self.data.len() diff --git a/library/alloc/src/collections/btree/append.rs b/library/alloc/src/collections/btree/append.rs index bd99c4ed2f1..1d6488dd2df 100644 --- a/library/alloc/src/collections/btree/append.rs +++ b/library/alloc/src/collections/btree/append.rs @@ -30,7 +30,7 @@ impl<K, V> Root<K, V> { /// Pushes all key-value pairs to the end of the tree, incrementing a /// `length` variable along the way. The latter makes it easier for the /// caller to avoid a leak when the iterator panicks. - fn bulk_push<I>(&mut self, iter: I, length: &mut usize) + pub fn bulk_push<I>(&mut self, iter: I, length: &mut usize) where I: Iterator<Item = (K, V)>, { @@ -81,15 +81,18 @@ impl<K, V> Root<K, V> { // the appended elements even if advancing the iterator panicks. *length += 1; } - self.fix_right_edge(); + self.fix_right_border_of_plentiful(); } - fn fix_right_edge(&mut self) { - // Handle underfull nodes, start from the top. + /// Stock up any underfull nodes on the right border of the tree. + /// The other nodes, those that are not the root nor a rightmost edge, + /// must have MIN_LEN elements to spare. + fn fix_right_border_of_plentiful(&mut self) { let mut cur_node = self.borrow_mut(); while let Internal(internal) = cur_node.force() { // Check if right-most child is underfull. let mut last_kv = internal.last_kv().consider_for_balancing(); + debug_assert!(last_kv.left_child_len() >= MIN_LEN * 2); let right_child_len = last_kv.right_child_len(); if right_child_len < MIN_LEN { // We need to steal. diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs index bd2ad257402..944e0e65cf7 100644 --- a/library/alloc/src/collections/btree/map.rs +++ b/library/alloc/src/collections/btree/map.rs @@ -248,7 +248,7 @@ where let (map, dormant_map) = DormantMutRef::new(self); let root_node = Self::ensure_is_owned(&mut map.root).borrow_mut(); match search::search_tree::<marker::Mut<'_>, K, (), K>(root_node, &key) { - Found(handle) => Some(mem::replace(handle.into_key_mut(), key)), + Found(mut kv) => Some(mem::replace(kv.key_mut(), key)), GoDown(handle) => { VacantEntry { key, handle, dormant_map, _marker: PhantomData }.insert(()); None @@ -2132,6 +2132,7 @@ impl<K, V> BTreeMap<K, V> { /// a.insert(1, "a"); /// assert_eq!(a.len(), 1); /// ``` + #[doc(alias = "length")] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")] pub const fn len(&self) -> usize { diff --git a/library/alloc/src/collections/btree/map/entry.rs b/library/alloc/src/collections/btree/map/entry.rs index c92888cb897..6cc8813bc52 100644 --- a/library/alloc/src/collections/btree/map/entry.rs +++ b/library/alloc/src/collections/btree/map/entry.rs @@ -116,15 +116,16 @@ impl<'a, K: Ord, V> Entry<'a, K, V> { } } - #[unstable(feature = "or_insert_with_key", issue = "71024")] - /// Ensures a value is in the entry by inserting, if empty, the result of the default function, - /// which takes the key as its argument, and returns a mutable reference to the value in the - /// entry. + /// Ensures a value is in the entry by inserting, if empty, the result of the default function. + /// This method allows for generating key-derived values for insertion by providing the default + /// function a reference to the key that was moved during the `.entry(key)` method call. + /// + /// The reference to the moved key is provided so that cloning or copying the key is + /// unnecessary, unlike with `.or_insert_with(|| ... )`. /// /// # Examples /// /// ``` - /// #![feature(or_insert_with_key)] /// use std::collections::BTreeMap; /// /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); @@ -134,6 +135,7 @@ impl<'a, K: Ord, V> Entry<'a, K, V> { /// assert_eq!(map["poneyland"], 9); /// ``` #[inline] + #[stable(feature = "or_insert_with_key", since = "1.50.0")] pub fn or_insert_with_key<F: FnOnce(&K) -> V>(self, default: F) -> &'a mut V { match self { Occupied(entry) => entry.into_mut(), diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs index 97df8ea07d2..f92aed8ce15 100644 --- a/library/alloc/src/collections/btree/map/tests.rs +++ b/library/alloc/src/collections/btree/map/tests.rs @@ -111,11 +111,23 @@ impl<K, V> BTreeMap<K, V> { } } } + + // Transform the tree to minimize wasted space, obtaining fewer nodes that + // are mostly filled up to their capacity. The same compact tree could have + // been obtained by inserting keys in a shrewd order. + fn compact(&mut self) + where + K: Ord, + { + let iter = mem::take(self).into_iter(); + let root = BTreeMap::ensure_is_owned(&mut self.root); + root.bulk_push(iter, &mut self.length); + } } impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> { fn assert_min_len(self, min_len: usize) { - assert!(self.len() >= min_len, "{} < {}", self.len(), min_len); + assert!(self.len() >= min_len, "node len {} < {}", self.len(), min_len); if let node::ForceResult::Internal(node) = self.force() { for idx in 0..=node.len() { let edge = unsafe { Handle::new_edge(node, idx) }; @@ -1679,17 +1691,29 @@ fn test_first_last_entry() { } #[test] -fn test_insert_into_full_left() { - let mut map: BTreeMap<_, _> = (0..NODE_CAPACITY).map(|i| (i * 2, ())).collect(); - assert!(map.insert(NODE_CAPACITY, ()).is_none()); - map.check(); +fn test_insert_into_full_height_0() { + let size = NODE_CAPACITY; + for pos in 0..=size { + let mut map: BTreeMap<_, _> = (0..size).map(|i| (i * 2 + 1, ())).collect(); + assert!(map.insert(pos * 2, ()).is_none()); + map.check(); + } } #[test] -fn test_insert_into_full_right() { - let mut map: BTreeMap<_, _> = (0..NODE_CAPACITY).map(|i| (i * 2, ())).collect(); - assert!(map.insert(NODE_CAPACITY + 2, ()).is_none()); - map.check(); +fn test_insert_into_full_height_1() { + let size = NODE_CAPACITY + 1 + NODE_CAPACITY; + for pos in 0..=size { + let mut map: BTreeMap<_, _> = (0..size).map(|i| (i * 2 + 1, ())).collect(); + map.compact(); + let root_node = map.root.as_ref().unwrap().reborrow(); + assert_eq!(root_node.len(), 1); + assert_eq!(root_node.first_leaf_edge().into_node().len(), NODE_CAPACITY); + assert_eq!(root_node.last_leaf_edge().into_node().len(), NODE_CAPACITY); + + assert!(map.insert(pos * 2, ()).is_none()); + map.check(); + } } macro_rules! create_append_test { @@ -1797,7 +1821,6 @@ fn test_append_ord_chaos() { } fn rand_data(len: usize) -> Vec<(u32, u32)> { - assert!(len * 2 <= 70029); // from that point on numbers repeat let mut rng = DeterministicRng::new(); Vec::from_iter((0..len).map(|_| (rng.next(), rng.next()))) } @@ -1863,6 +1886,25 @@ fn test_split_off_tiny_right_height_2() { } #[test] +fn test_split_off_halfway() { + let mut rng = DeterministicRng::new(); + for &len in &[NODE_CAPACITY, 25, 50, 75, 100] { + let mut data = Vec::from_iter((0..len).map(|_| (rng.next(), ()))); + // Insertion in non-ascending order creates some variation in node length. + let mut map = BTreeMap::from_iter(data.iter().copied()); + data.sort(); + let small_keys = data.iter().take(len / 2).map(|kv| kv.0); + let large_keys = data.iter().skip(len / 2).map(|kv| kv.0); + let split_key = large_keys.clone().next().unwrap(); + let right = map.split_off(&split_key); + map.check(); + right.check(); + assert!(map.keys().copied().eq(small_keys)); + assert!(right.keys().copied().eq(large_keys)); + } +} + +#[test] fn test_split_off_large_random_sorted() { // Miri is too slow let mut data = if cfg!(miri) { rand_data(529) } else { rand_data(1529) }; diff --git a/library/alloc/src/collections/btree/mod.rs b/library/alloc/src/collections/btree/mod.rs index ebcbb0e467c..cdb39104047 100644 --- a/library/alloc/src/collections/btree/mod.rs +++ b/library/alloc/src/collections/btree/mod.rs @@ -38,6 +38,7 @@ pub unsafe fn unwrap_unchecked<T>(val: Option<T>) -> T { #[cfg(test)] /// XorShiftRng struct DeterministicRng { + count: usize, x: u32, y: u32, z: u32, @@ -47,11 +48,13 @@ struct DeterministicRng { #[cfg(test)] impl DeterministicRng { fn new() -> Self { - DeterministicRng { x: 0x193a6754, y: 0xa8a7d469, z: 0x97830e05, w: 0x113ba7bb } + DeterministicRng { count: 0, x: 0x193a6754, y: 0xa8a7d469, z: 0x97830e05, w: 0x113ba7bb } } - /// Guarantees that the first 70029 results are unique. + /// Guarantees that each returned number is unique. fn next(&mut self) -> u32 { + self.count += 1; + assert!(self.count <= 70029); let x = self.x; let t = x ^ (x << 11); self.x = self.y; diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs index 31809fde57b..b3641a7a0c6 100644 --- a/library/alloc/src/collections/btree/node.rs +++ b/library/alloc/src/collections/btree/node.rs @@ -35,6 +35,7 @@ use core::cmp::Ordering; use core::marker::PhantomData; use core::mem::{self, MaybeUninit}; use core::ptr::{self, NonNull}; +use core::slice::SliceIndex; use crate::alloc::{Allocator, Global, Layout}; use crate::boxed::Box; @@ -150,7 +151,7 @@ impl<K, V, Type> NodeRef<marker::Owned, K, V, Type> { /// Mutably borrows the owned node. Unlike `reborrow_mut`, this is safe, /// because the return value cannot be used to destroy the node itself, /// and there cannot be other references to the tree (except during the - /// process of `into_iter` or `drop`, but that is a horrific already). + /// process of `into_iter` or `drop`, but that is horrific already). pub fn borrow_mut(&mut self) -> NodeRef<marker::Mut<'_>, K, V, Type> { NodeRef { height: self.height, node: self.node, _marker: PhantomData } } @@ -192,7 +193,7 @@ impl<K, V> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> { let internal_node = NodeRef { height: self.height, node: top, _marker: PhantomData }; *self = internal_node.first_edge().descend(); - self.borrow_mut().clear_parent_link(); + self.clear_parent_link(); unsafe { Global.deallocate(top.cast(), Layout::new::<InternalNode<K, V>>()); @@ -239,17 +240,16 @@ impl<K, V> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> { /// - We cannot get implicit coercion from say `Mut<'a>` to `Immut<'a>`. /// Therefore, we have to explicitly call `reborrow` on a more powerfull /// `NodeRef` in order to reach a method like `key_at`. -/// - All methods on `NodeRef` that return some kind of reference, except -/// `reborrow` and `reborrow_mut`, take `self` by value and not by reference. -/// This avoids silently returning a second reference somewhere in the tree. -/// That is irrelevant when `BorrowType` is `Immut<'a>`, but the rule does -/// no harm because we make those `NodeRef` implicitly `Copy`. -/// The rule also avoids implicitly returning the lifetime of `&self`, -/// instead of the lifetime carried by `BorrowType`. -/// An exception to this rule are the insert functions. -/// - Given the above, we need a `reborrow_mut` to explicitly copy a `Mut<'a>` -/// `NodeRef` whenever we want to invoke a method returning an extra reference -/// somewhere in the tree. +/// +/// All methods on `NodeRef` that return some kind of reference, either: +/// - Take `self` by value, and return the lifetime carried by `BorrowType`. +/// Sometimes, to invoke such a method, we need to call `reborrow_mut`. +/// - Take `self` by reference, and (implicitly) return that reference's +/// lifetime, instead of the lifetime carried by `BorrowType`. That way, +/// the borrow checker guarantees that the `NodeRef` remains borrowed as long +/// as the returned reference is used. +/// The methods supporting insert bend this rule by returning a raw pointer, +/// i.e., a reference without any lifetime. pub struct NodeRef<BorrowType, K, V, Type> { /// The number of levels that the node and the level of leaves are apart, a /// constant of the node that cannot be entirely described by `Type`, and that @@ -295,19 +295,10 @@ impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> { } } -impl<'a, K, V> NodeRef<marker::Immut<'a>, K, V, marker::Internal> { - /// Exposes the data of an internal node in an immutable tree. - fn as_internal(this: &Self) -> &'a InternalNode<K, V> { - let ptr = Self::as_internal_ptr(this); - // SAFETY: there can be no mutable references into this tree borrowed as `Immut`. - unsafe { &*ptr } - } -} - impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> { - /// Offers exclusive access to the data of an internal node. - fn as_internal_mut(this: &mut Self) -> &'a mut InternalNode<K, V> { - let ptr = Self::as_internal_ptr(this); + /// Borrows exclusive access to the data of an internal node. + fn as_internal_mut(&mut self) -> &mut InternalNode<K, V> { + let ptr = Self::as_internal_ptr(self); unsafe { &mut *ptr } } } @@ -355,7 +346,7 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Immut<'a>, K, V, Type> { /// The node has more than `idx` initialized elements. pub unsafe fn key_at(self, idx: usize) -> &'a K { debug_assert!(idx < self.len()); - unsafe { Self::as_leaf(&self).keys.get_unchecked(idx).assume_init_ref() } + unsafe { self.into_leaf().keys.get_unchecked(idx).assume_init_ref() } } /// Exposes one of the values stored in the node. @@ -364,18 +355,7 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Immut<'a>, K, V, Type> { /// The node has more than `idx` initialized elements. unsafe fn val_at(self, idx: usize) -> &'a V { debug_assert!(idx < self.len()); - unsafe { Self::as_leaf(&self).vals.get_unchecked(idx).assume_init_ref() } - } -} - -impl<'a, K, V> NodeRef<marker::Immut<'a>, K, V, marker::Internal> { - /// Exposes the contents of one of the edges in the node. - /// - /// # Safety - /// The node has more than `idx` initialized elements. - unsafe fn edge_at(self, idx: usize) -> &'a BoxedNode<K, V> { - debug_assert!(idx <= self.len()); - unsafe { Self::as_internal(&self).edges.get_unchecked(idx).assume_init_ref() } + unsafe { self.into_leaf().vals.get_unchecked(idx).assume_init_ref() } } } @@ -431,8 +411,8 @@ impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> { impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Immut<'a>, K, V, Type> { /// Exposes the leaf portion of any leaf or internal node in an immutable tree. - fn as_leaf(this: &Self) -> &'a LeafNode<K, V> { - let ptr = Self::as_leaf_ptr(this); + fn into_leaf(self) -> &'a LeafNode<K, V> { + let ptr = Self::as_leaf_ptr(&self); // SAFETY: there can be no mutable references into this tree borrowed as `Immut`. unsafe { &*ptr } } @@ -489,98 +469,64 @@ impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> { NodeRef { height: self.height, node: self.node, _marker: PhantomData } } - /// Offers exclusive access to the leaf portion of any leaf or internal node. - fn as_leaf_mut(this: &mut Self) -> &'a mut LeafNode<K, V> { - let ptr = Self::as_leaf_ptr(this); + /// Borrows exclusive access to the leaf portion of any leaf or internal node. + fn as_leaf_mut(&mut self) -> &mut LeafNode<K, V> { + let ptr = Self::as_leaf_ptr(self); // SAFETY: we have exclusive access to the entire node. unsafe { &mut *ptr } } -} -impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> { - /// Offers exclusive access to a part of the key storage area. - /// - /// # Safety - /// The node has more than `idx` initialized elements. - unsafe fn into_key_area_mut_at(mut self, idx: usize) -> &'a mut MaybeUninit<K> { - debug_assert!(idx < self.len()); - unsafe { Self::as_leaf_mut(&mut self).keys.get_unchecked_mut(idx) } - } - - /// Offers exclusive access to a part of the value storage area. - /// - /// # Safety - /// The node has more than `idx` initialized elements. - unsafe fn into_val_area_mut_at(mut self, idx: usize) -> &'a mut MaybeUninit<V> { - debug_assert!(idx < self.len()); - unsafe { Self::as_leaf_mut(&mut self).vals.get_unchecked_mut(idx) } + /// Offers exclusive access to the leaf portion of any leaf or internal node. + fn into_leaf_mut(mut self) -> &'a mut LeafNode<K, V> { + let ptr = Self::as_leaf_ptr(&mut self); + // SAFETY: we have exclusive access to the entire node. + unsafe { &mut *ptr } } } -impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> { - /// Offers exclusive access to a part of the storage area for edge contents. +impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> { + /// Borrows exclusive access to an element of the key storage area. /// /// # Safety - /// The node has at least `idx` initialized elements. - unsafe fn into_edge_area_mut_at(mut self, idx: usize) -> &'a mut MaybeUninit<BoxedNode<K, V>> { - debug_assert!(idx <= self.len()); - unsafe { Self::as_internal_mut(&mut self).edges.get_unchecked_mut(idx) } - } -} - -impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Immut<'a>, K, V, Type> { - /// Exposes the entire key storage area in the node, - /// regardless of the node's current length, - /// having exclusive access to the entire node. - unsafe fn key_area(self) -> &'a [MaybeUninit<K>] { - Self::as_leaf(&self).keys.as_slice() - } - - /// Exposes the entire value storage area in the node, - /// regardless of the node's current length, - /// having exclusive access to the entire node. - unsafe fn val_area(self) -> &'a [MaybeUninit<V>] { - Self::as_leaf(&self).vals.as_slice() - } -} - -impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::Internal> { - /// Exposes the entire storage area for edge contents in the node, - /// regardless of the node's current length, - /// having exclusive access to the entire node. - unsafe fn edge_area(self) -> &'a [MaybeUninit<BoxedNode<K, V>>] { - Self::as_internal(&self).edges.as_slice() - } -} - -impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> { - /// Offers exclusive access to a sized slice of key storage area in the node. - unsafe fn into_key_area_slice(mut self) -> &'a mut [MaybeUninit<K>] { - let len = self.len(); + /// `index` is in bounds of 0..CAPACITY + unsafe fn key_area_mut<I, Output: ?Sized>(&mut self, index: I) -> &mut Output + where + I: SliceIndex<[MaybeUninit<K>], Output = Output>, + { // SAFETY: the caller will not be able to call further methods on self // until the key slice reference is dropped, as we have unique access // for the lifetime of the borrow. - unsafe { Self::as_leaf_mut(&mut self).keys.get_unchecked_mut(..len) } + unsafe { self.as_leaf_mut().keys.as_mut_slice().get_unchecked_mut(index) } } - /// Offers exclusive access to a sized slice of value storage area in the node. - unsafe fn into_val_area_slice(mut self) -> &'a mut [MaybeUninit<V>] { - let len = self.len(); + /// Borrows exclusive access to an element or slice of the node's value storage area. + /// + /// # Safety + /// `index` is in bounds of 0..CAPACITY + unsafe fn val_area_mut<I, Output: ?Sized>(&mut self, index: I) -> &mut Output + where + I: SliceIndex<[MaybeUninit<V>], Output = Output>, + { // SAFETY: the caller will not be able to call further methods on self // until the value slice reference is dropped, as we have unique access // for the lifetime of the borrow. - unsafe { Self::as_leaf_mut(&mut self).vals.get_unchecked_mut(..len) } + unsafe { self.as_leaf_mut().vals.as_mut_slice().get_unchecked_mut(index) } } } impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> { - /// Offers exclusive access to a sized slice of storage area for edge contents in the node. - unsafe fn into_edge_area_slice(mut self) -> &'a mut [MaybeUninit<BoxedNode<K, V>>] { - let len = self.len(); + /// Borrows exclusive access to an element or slice of the node's storage area for edge contents. + /// + /// # Safety + /// `index` is in bounds of 0..CAPACITY + 1 + unsafe fn edge_area_mut<I, Output: ?Sized>(&mut self, index: I) -> &mut Output + where + I: SliceIndex<[MaybeUninit<BoxedNode<K, V>>], Output = Output>, + { // SAFETY: the caller will not be able to call further methods on self // until the edge slice reference is dropped, as we have unique access // for the lifetime of the borrow. - unsafe { Self::as_internal_mut(&mut self).edges.get_unchecked_mut(..len + 1) } + unsafe { self.as_internal_mut().edges.as_mut_slice().get_unchecked_mut(index) } } } @@ -604,25 +550,27 @@ impl<'a, K, V, Type> NodeRef<marker::ValMut<'a>, K, V, Type> { } impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> { - /// Exposes exclusive access to the length of the node. - pub fn into_len_mut(mut self) -> &'a mut u16 { - &mut (*Self::as_leaf_mut(&mut self)).len + /// Borrows exclusive access to the length of the node. + pub fn len_mut(&mut self) -> &mut u16 { + &mut self.as_leaf_mut().len } } impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> { - /// Set or clear the node's link to its parent edge, + /// Sets the node's link to its parent edge, /// without invalidating other references to the node. fn set_parent_link(&mut self, parent: NonNull<InternalNode<K, V>>, parent_idx: usize) { let leaf = Self::as_leaf_ptr(self); unsafe { (*leaf).parent = Some(parent) }; unsafe { (*leaf).parent_idx.write(parent_idx as u16) }; } +} - /// Clear the node's link to its parent edge. - /// This only makes sense when there are no other references to the node. +impl<K, V> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> { + /// Clears the root's link to its parent edge. fn clear_parent_link(&mut self) { - let leaf = Self::as_leaf_mut(self); + let mut root_node = self.borrow_mut(); + let leaf = root_node.as_leaf_mut(); leaf.parent = None; } } @@ -630,24 +578,24 @@ impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> { impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> { /// Adds a key-value pair to the end of the node. pub fn push(&mut self, key: K, val: V) { - let len = unsafe { self.reborrow_mut().into_len_mut() }; + let len = self.len_mut(); let idx = usize::from(*len); assert!(idx < CAPACITY); *len += 1; unsafe { - self.reborrow_mut().into_key_area_mut_at(idx).write(key); - self.reborrow_mut().into_val_area_mut_at(idx).write(val); + self.key_area_mut(idx).write(key); + self.val_area_mut(idx).write(val); } } /// Adds a key-value pair to the beginning of the node. fn push_front(&mut self, key: K, val: V) { - assert!(self.len() < CAPACITY); - + let new_len = self.len() + 1; + assert!(new_len <= CAPACITY); unsafe { - *self.reborrow_mut().into_len_mut() += 1; - slice_insert(self.reborrow_mut().into_key_area_slice(), 0, key); - slice_insert(self.reborrow_mut().into_val_area_slice(), 0, val); + slice_insert(self.key_area_mut(..new_len), 0, key); + slice_insert(self.val_area_mut(..new_len), 0, val); + *self.len_mut() = new_len as u16; } } } @@ -674,14 +622,14 @@ impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> { pub fn push(&mut self, key: K, val: V, edge: Root<K, V>) { assert!(edge.height == self.height - 1); - let len = unsafe { self.reborrow_mut().into_len_mut() }; + let len = self.len_mut(); let idx = usize::from(*len); assert!(idx < CAPACITY); *len += 1; unsafe { - self.reborrow_mut().into_key_area_mut_at(idx).write(key); - self.reborrow_mut().into_val_area_mut_at(idx).write(val); - self.reborrow_mut().into_edge_area_mut_at(idx + 1).write(edge.node); + self.key_area_mut(idx).write(key); + self.val_area_mut(idx).write(val); + self.edge_area_mut(idx + 1).write(edge.node); Handle::new_edge(self.reborrow_mut(), idx + 1).correct_parent_link(); } } @@ -689,14 +637,15 @@ impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> { /// Adds a key-value pair, and an edge to go to the left of that pair, /// to the beginning of the node. fn push_front(&mut self, key: K, val: V, edge: Root<K, V>) { + let new_len = self.len() + 1; assert!(edge.height == self.height - 1); - assert!(self.len() < CAPACITY); + assert!(new_len <= CAPACITY); unsafe { - *self.reborrow_mut().into_len_mut() += 1; - slice_insert(self.reborrow_mut().into_key_area_slice(), 0, key); - slice_insert(self.reborrow_mut().into_val_area_slice(), 0, val); - slice_insert(self.reborrow_mut().into_edge_area_slice(), 0, edge.node); + slice_insert(self.key_area_mut(..new_len), 0, key); + slice_insert(self.val_area_mut(..new_len), 0, val); + slice_insert(self.edge_area_mut(..new_len + 1), 0, edge.node); + *self.len_mut() = new_len as u16; } self.correct_all_childrens_parent_links(); @@ -713,21 +662,21 @@ impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> { let idx = self.len() - 1; unsafe { - let key = ptr::read(self.reborrow().key_at(idx)); - let val = ptr::read(self.reborrow().val_at(idx)); + let key = self.key_area_mut(idx).assume_init_read(); + let val = self.val_area_mut(idx).assume_init_read(); let edge = match self.reborrow_mut().force() { ForceResult::Leaf(_) => None, - ForceResult::Internal(internal) => { - let node = ptr::read(internal.reborrow().edge_at(idx + 1)); + ForceResult::Internal(mut internal) => { + let node = internal.edge_area_mut(idx + 1).assume_init_read(); let mut edge = Root { node, height: internal.height - 1, _marker: PhantomData }; - // In practice, clearing the parent is a waste of time, because we will + // Currently, clearing the parent link is superfluous, because we will // insert the node elsewhere and set its parent link again. - edge.borrow_mut().clear_parent_link(); + edge.clear_parent_link(); Some(edge) } }; - *self.reborrow_mut().into_len_mut() -= 1; + *self.len_mut() -= 1; (key, val, edge) } } @@ -741,16 +690,16 @@ impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> { let old_len = self.len(); unsafe { - let key = slice_remove(self.reborrow_mut().into_key_area_slice(), 0); - let val = slice_remove(self.reborrow_mut().into_val_area_slice(), 0); + let key = slice_remove(self.key_area_mut(..old_len), 0); + let val = slice_remove(self.val_area_mut(..old_len), 0); let edge = match self.reborrow_mut().force() { ForceResult::Leaf(_) => None, ForceResult::Internal(mut internal) => { - let node = slice_remove(internal.reborrow_mut().into_edge_area_slice(), 0); + let node = slice_remove(internal.edge_area_mut(..old_len + 1), 0); let mut edge = Root { node, height: internal.height - 1, _marker: PhantomData }; - // In practice, clearing the parent is a waste of time, because we will + // Currently, clearing the parent link is superfluous, because we will // insert the node elsewhere and set its parent link again. - edge.borrow_mut().clear_parent_link(); + edge.clear_parent_link(); internal.correct_childrens_parent_links(0..old_len); @@ -758,14 +707,14 @@ impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> { } }; - *self.reborrow_mut().into_len_mut() -= 1; + *self.len_mut() -= 1; (key, val, edge) } } fn into_kv_pointers_mut(mut self) -> (*mut K, *mut V) { - let leaf = Self::as_leaf_mut(&mut self); + let leaf = self.as_leaf_mut(); let keys = MaybeUninit::slice_as_mut_ptr(&mut leaf.keys); let vals = MaybeUninit::slice_as_mut_ptr(&mut leaf.vals); (keys, vals) @@ -967,13 +916,14 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark /// The returned pointer points to the inserted value. fn insert_fit(&mut self, key: K, val: V) -> *mut V { debug_assert!(self.node.len() < CAPACITY); + let new_len = self.node.len() + 1; unsafe { - *self.node.reborrow_mut().into_len_mut() += 1; - slice_insert(self.node.reborrow_mut().into_key_area_slice(), self.idx, key); - slice_insert(self.node.reborrow_mut().into_val_area_slice(), self.idx, val); + slice_insert(self.node.key_area_mut(..new_len), self.idx, key); + slice_insert(self.node.val_area_mut(..new_len), self.idx, val); + *self.node.len_mut() = new_len as u16; - self.node.reborrow_mut().into_val_area_mut_at(self.idx).assume_init_mut() + self.node.val_area_mut(self.idx).assume_init_mut() } } } @@ -1025,14 +975,15 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, fn insert_fit(&mut self, key: K, val: V, edge: Root<K, V>) { debug_assert!(self.node.len() < CAPACITY); debug_assert!(edge.height == self.node.height - 1); + let new_len = self.node.len() + 1; unsafe { - *self.node.reborrow_mut().into_len_mut() += 1; - slice_insert(self.node.reborrow_mut().into_key_area_slice(), self.idx, key); - slice_insert(self.node.reborrow_mut().into_val_area_slice(), self.idx, val); - slice_insert(self.node.reborrow_mut().into_edge_area_slice(), self.idx + 1, edge.node); + slice_insert(self.node.key_area_mut(..new_len), self.idx, key); + slice_insert(self.node.val_area_mut(..new_len), self.idx, val); + slice_insert(self.node.edge_area_mut(..new_len + 1), self.idx + 1, edge.node); + *self.node.len_mut() = new_len as u16; - self.node.correct_childrens_parent_links((self.idx + 1)..=self.node.len()); + self.node.correct_childrens_parent_links(self.idx + 1..new_len + 1); } } @@ -1133,12 +1084,13 @@ impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Immut<'a>, K, V, NodeTyp } impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> { - pub fn into_key_mut(self) -> &'a mut K { - unsafe { self.node.into_key_area_mut_at(self.idx).assume_init_mut() } + pub fn key_mut(&mut self) -> &mut K { + unsafe { self.node.key_area_mut(self.idx).assume_init_mut() } } pub fn into_val_mut(self) -> &'a mut V { - unsafe { self.node.into_val_area_mut_at(self.idx).assume_init_mut() } + let leaf = self.node.into_leaf_mut(); + unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() } } } @@ -1153,43 +1105,43 @@ impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType> // We cannot call separate key and value methods, because calling the second one // invalidates the reference returned by the first. unsafe { - let leaf = NodeRef::as_leaf_mut(&mut self.node.reborrow_mut()); + let leaf = self.node.as_leaf_mut(); let key = leaf.keys.get_unchecked_mut(self.idx).assume_init_mut(); let val = leaf.vals.get_unchecked_mut(self.idx).assume_init_mut(); (key, val) } } -} -impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> { - /// Helps implementations of `split` for a particular `NodeType`, - /// by calculating the length of the new node. - fn split_new_node_len(&self) -> usize { - debug_assert!(self.idx < self.node.len()); - self.node.len() - self.idx - 1 + /// Replace the key and value that the KV handle refers to. + pub fn replace_kv(&mut self, k: K, v: V) -> (K, V) { + let (key, val) = self.kv_mut(); + (mem::replace(key, k), mem::replace(val, v)) } +} +impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> { /// Helps implementations of `split` for a particular `NodeType`, /// by taking care of leaf data. fn split_leaf_data(&mut self, new_node: &mut LeafNode<K, V>) -> (K, V) { - let new_len = self.split_new_node_len(); + debug_assert!(self.idx < self.node.len()); + let new_len = self.node.len() - self.idx - 1; new_node.len = new_len as u16; unsafe { - let k = ptr::read(self.node.reborrow().key_at(self.idx)); - let v = ptr::read(self.node.reborrow().val_at(self.idx)); + let k = self.node.key_area_mut(self.idx).assume_init_read(); + let v = self.node.val_area_mut(self.idx).assume_init_read(); ptr::copy_nonoverlapping( - self.node.reborrow().key_area().as_ptr().add(self.idx + 1), + self.node.key_area_mut(self.idx + 1..).as_ptr(), new_node.keys.as_mut_ptr(), new_len, ); ptr::copy_nonoverlapping( - self.node.reborrow().val_area().as_ptr().add(self.idx + 1), + self.node.val_area_mut(self.idx + 1..).as_ptr(), new_node.vals.as_mut_ptr(), new_len, ); - *self.node.reborrow_mut().into_len_mut() = self.idx as u16; + *self.node.len_mut() = self.idx as u16; (k, v) } } @@ -1219,10 +1171,11 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark pub fn remove( mut self, ) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) { + let old_len = self.node.len(); unsafe { - let k = slice_remove(self.node.reborrow_mut().into_key_area_slice(), self.idx); - let v = slice_remove(self.node.reborrow_mut().into_val_area_slice(), self.idx); - *self.node.reborrow_mut().into_len_mut() -= 1; + let k = slice_remove(self.node.key_area_mut(..old_len), self.idx); + let v = slice_remove(self.node.val_area_mut(..old_len), self.idx); + *self.node.len_mut() = (old_len - 1) as u16; ((k, v), self.left_edge()) } } @@ -1239,14 +1192,13 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, pub fn split(mut self) -> SplitResult<'a, K, V, marker::Internal> { unsafe { let mut new_node = Box::new(InternalNode::new()); - let new_len = self.split_new_node_len(); - // Move edges out before reducing length: + let kv = self.split_leaf_data(&mut new_node.data); + let new_len = usize::from(new_node.data.len); ptr::copy_nonoverlapping( - self.node.reborrow().edge_area().as_ptr().add(self.idx + 1), + self.node.edge_area_mut(self.idx + 1..).as_ptr(), new_node.edges.as_mut_ptr(), new_len + 1, ); - let kv = self.split_leaf_data(&mut new_node.data); let height = self.node.height; let mut right = NodeRef::from_new_internal(new_node, height); @@ -1282,10 +1234,12 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> { /// Chooses a balancing context involving the node as a child, thus between /// the KV immediately to the left or to the right in the parent node. /// Returns an `Err` if there is no parent. + /// Panics if the parent is empty. /// - /// This method optimizes for a node that has fewer elements than its left - /// and right siblings, if they exist, by preferring the left parent KV. - /// Merging with the left sibling is faster, since we only need to move + /// Prefers the left side, to be optimal if the given node is somehow + /// underfull, meaning here only that it has fewer elements than its left + /// sibling and than its right sibling, if they exist. In that case, + /// merging with the left sibling is faster, since we only need to move /// the node's N elements, instead of shifting them to the right and moving /// more than N elements in front. Stealing from the left sibling is also /// typically faster, since we only need to shift the node's N elements to @@ -1293,19 +1247,19 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> { /// the left. pub fn choose_parent_kv(self) -> Result<LeftOrRight<BalancingContext<'a, K, V>>, Self> { match unsafe { ptr::read(&self) }.ascend() { - Ok(parent) => match parent.left_kv() { + Ok(parent_edge) => match parent_edge.left_kv() { Ok(left_parent_kv) => Ok(LeftOrRight::Left(BalancingContext { parent: unsafe { ptr::read(&left_parent_kv) }, left_child: left_parent_kv.left_edge().descend(), right_child: self, })), - Err(parent) => match parent.right_kv() { + Err(parent_edge) => match parent_edge.right_kv() { Ok(right_parent_kv) => Ok(LeftOrRight::Right(BalancingContext { parent: unsafe { ptr::read(&right_parent_kv) }, left_child: self, right_child: right_parent_kv.right_edge().descend(), })), - Err(_) => unreachable!("empty non-root node"), + Err(_) => unreachable!("empty internal node"), }, }, Err(root) => Err(root), @@ -1346,66 +1300,59 @@ impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> { /// /// Panics unless we `.can_merge()`. pub fn merge( - mut self, + self, track_edge_idx: Option<LeftOrRight<usize>>, ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> { + let Handle { node: mut parent_node, idx: parent_idx, _marker } = self.parent; + let old_parent_len = parent_node.len(); let mut left_node = self.left_child; - let left_len = left_node.len(); - let right_node = self.right_child; + let old_left_len = left_node.len(); + let mut right_node = self.right_child; let right_len = right_node.len(); + let new_left_len = old_left_len + 1 + right_len; - assert!(left_len + right_len < CAPACITY); + assert!(new_left_len <= CAPACITY); assert!(match track_edge_idx { None => true, - Some(LeftOrRight::Left(idx)) => idx <= left_len, + Some(LeftOrRight::Left(idx)) => idx <= old_left_len, Some(LeftOrRight::Right(idx)) => idx <= right_len, }); unsafe { - *left_node.reborrow_mut().into_len_mut() += right_len as u16 + 1; + *left_node.len_mut() = new_left_len as u16; - let parent_key = slice_remove( - self.parent.node.reborrow_mut().into_key_area_slice(), - self.parent.idx, - ); - left_node.reborrow_mut().into_key_area_mut_at(left_len).write(parent_key); + let parent_key = slice_remove(parent_node.key_area_mut(..old_parent_len), parent_idx); + left_node.key_area_mut(old_left_len).write(parent_key); ptr::copy_nonoverlapping( - right_node.reborrow().key_area().as_ptr(), - left_node.reborrow_mut().into_key_area_slice().as_mut_ptr().add(left_len + 1), + right_node.key_area_mut(..).as_ptr(), + left_node.key_area_mut(old_left_len + 1..).as_mut_ptr(), right_len, ); - let parent_val = slice_remove( - self.parent.node.reborrow_mut().into_val_area_slice(), - self.parent.idx, - ); - left_node.reborrow_mut().into_val_area_mut_at(left_len).write(parent_val); + let parent_val = slice_remove(parent_node.val_area_mut(..old_parent_len), parent_idx); + left_node.val_area_mut(old_left_len).write(parent_val); ptr::copy_nonoverlapping( - right_node.reborrow().val_area().as_ptr(), - left_node.reborrow_mut().into_val_area_slice().as_mut_ptr().add(left_len + 1), + right_node.val_area_mut(..).as_ptr(), + left_node.val_area_mut(old_left_len + 1..).as_mut_ptr(), right_len, ); - slice_remove( - &mut self.parent.node.reborrow_mut().into_edge_area_slice(), - self.parent.idx + 1, - ); - let parent_old_len = self.parent.node.len(); - self.parent.node.correct_childrens_parent_links(self.parent.idx + 1..parent_old_len); - *self.parent.node.reborrow_mut().into_len_mut() -= 1; + slice_remove(&mut parent_node.edge_area_mut(..old_parent_len + 1), parent_idx + 1); + parent_node.correct_childrens_parent_links(parent_idx + 1..old_parent_len); + *parent_node.len_mut() -= 1; - if self.parent.node.height > 1 { + if parent_node.height > 1 { // SAFETY: the height of the nodes being merged is one below the height // of the node of this edge, thus above zero, so they are internal. let mut left_node = left_node.reborrow_mut().cast_to_internal_unchecked(); - let right_node = right_node.cast_to_internal_unchecked(); + let mut right_node = right_node.cast_to_internal_unchecked(); ptr::copy_nonoverlapping( - right_node.reborrow().edge_area().as_ptr(), - left_node.reborrow_mut().into_edge_area_slice().as_mut_ptr().add(left_len + 1), + right_node.edge_area_mut(..).as_ptr(), + left_node.edge_area_mut(old_left_len + 1..).as_mut_ptr(), right_len + 1, ); - left_node.correct_childrens_parent_links(left_len + 1..=left_len + 1 + right_len); + left_node.correct_childrens_parent_links(old_left_len + 1..new_left_len + 1); Global.deallocate(right_node.node.cast(), Layout::new::<InternalNode<K, V>>()); } else { @@ -1415,7 +1362,7 @@ impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> { let new_idx = match track_edge_idx { None => 0, Some(LeftOrRight::Left(idx)) => idx, - Some(LeftOrRight::Right(idx)) => left_len + 1 + idx, + Some(LeftOrRight::Right(idx)) => old_left_len + 1 + idx, }; Handle::new_edge(left_node, new_idx) } @@ -1432,8 +1379,7 @@ impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> { unsafe { let (k, v, edge) = self.left_child.pop(); - let k = mem::replace(self.parent.kv_mut().0, k); - let v = mem::replace(self.parent.kv_mut().1, v); + let (k, v) = self.parent.replace_kv(k, v); match self.right_child.reborrow_mut().force() { ForceResult::Leaf(mut leaf) => leaf.push_front(k, v), @@ -1455,8 +1401,7 @@ impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> { unsafe { let (k, v, edge) = self.right_child.pop_front(); - let k = mem::replace(self.parent.kv_mut().0, k); - let v = mem::replace(self.parent.kv_mut().1, v); + let (k, v) = self.parent.replace_kv(k, v); match self.left_child.reborrow_mut().force() { ForceResult::Leaf(mut leaf) => leaf.push(k, v), @@ -1469,6 +1414,7 @@ impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> { /// This does stealing similar to `steal_left` but steals multiple elements at once. pub fn bulk_steal_left(&mut self, count: usize) { + assert!(count > 0); unsafe { let left_node = &mut self.left_child; let old_left_len = left_node.len(); @@ -1480,6 +1426,9 @@ impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> { assert!(old_left_len >= count); let new_left_len = old_left_len - count; + let new_right_len = old_right_len + count; + *left_node.len_mut() = new_left_len as u16; + *right_node.len_mut() = new_right_len as u16; // Move leaf data. { @@ -1504,16 +1453,12 @@ impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> { move_kv(left_kv, new_left_len, parent_kv, 0, 1); } - *left_node.reborrow_mut().into_len_mut() -= count as u16; - *right_node.reborrow_mut().into_len_mut() += count as u16; - match (left_node.reborrow_mut().force(), right_node.reborrow_mut().force()) { (ForceResult::Internal(left), ForceResult::Internal(mut right)) => { // Make room for stolen edges. - let left = left.reborrow(); - let right_edges = right.reborrow_mut().into_edge_area_slice().as_mut_ptr(); + let right_edges = right.edge_area_mut(..).as_mut_ptr(); ptr::copy(right_edges, right_edges.add(count), old_right_len + 1); - right.correct_childrens_parent_links(count..count + old_right_len + 1); + right.correct_childrens_parent_links(count..new_right_len + 1); // Steal edges. move_edges(left, new_left_len + 1, right, 0, count); @@ -1526,6 +1471,7 @@ impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> { /// The symmetric clone of `bulk_steal_left`. pub fn bulk_steal_right(&mut self, count: usize) { + assert!(count > 0); unsafe { let left_node = &mut self.left_child; let old_left_len = left_node.len(); @@ -1536,7 +1482,10 @@ impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> { assert!(old_left_len + count <= CAPACITY); assert!(old_right_len >= count); + let new_left_len = old_left_len + count; let new_right_len = old_right_len - count; + *left_node.len_mut() = new_left_len as u16; + *right_node.len_mut() = new_right_len as u16; // Move leaf data. { @@ -1561,16 +1510,13 @@ impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> { ptr::copy(right_kv.1.add(count), right_kv.1, new_right_len); } - *left_node.reborrow_mut().into_len_mut() += count as u16; - *right_node.reborrow_mut().into_len_mut() -= count as u16; - match (left_node.reborrow_mut().force(), right_node.reborrow_mut().force()) { (ForceResult::Internal(left), ForceResult::Internal(mut right)) => { // Steal edges. - move_edges(right.reborrow(), 0, left, old_left_len + 1, count); + move_edges(right.reborrow_mut(), 0, left, old_left_len + 1, count); // Fill gap where stolen edges used to be. - let right_edges = right.reborrow_mut().into_edge_area_slice().as_mut_ptr(); + let right_edges = right.edge_area_mut(..).as_mut_ptr(); ptr::copy(right_edges.add(count), right_edges, new_right_len + 1); right.correct_childrens_parent_links(0..=new_right_len); } @@ -1596,16 +1542,16 @@ unsafe fn move_kv<K, V>( // Source and destination must have the same height. unsafe fn move_edges<'a, K: 'a, V: 'a>( - source: NodeRef<marker::Immut<'a>, K, V, marker::Internal>, + mut source: NodeRef<marker::Mut<'a>, K, V, marker::Internal>, source_offset: usize, mut dest: NodeRef<marker::Mut<'a>, K, V, marker::Internal>, dest_offset: usize, count: usize, ) { unsafe { - let source_ptr = source.edge_area().as_ptr(); - let dest_ptr = dest.reborrow_mut().into_edge_area_slice().as_mut_ptr(); - ptr::copy_nonoverlapping(source_ptr.add(source_offset), dest_ptr.add(dest_offset), count); + let source_ptr = source.edge_area_mut(..).as_ptr(); + let dest_ptr = dest.edge_area_mut(dest_offset..).as_mut_ptr(); + ptr::copy_nonoverlapping(source_ptr.add(source_offset), dest_ptr, count); dest.correct_childrens_parent_links(dest_offset..dest_offset + count); } } @@ -1700,12 +1646,11 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, ma move_kv(left_kv, new_left_len, right_kv, 0, new_right_len); - *left_node.reborrow_mut().into_len_mut() = new_left_len as u16; - *right_node.reborrow_mut().into_len_mut() = new_right_len as u16; + *left_node.len_mut() = new_left_len as u16; + *right_node.len_mut() = new_right_len as u16; match (left_node.force(), right_node.force()) { (ForceResult::Internal(left), ForceResult::Internal(right)) => { - let left = left.reborrow(); move_edges(left, new_left_len + 1, right, 1, new_right_len); } (ForceResult::Leaf(_), ForceResult::Leaf(_)) => {} diff --git a/library/alloc/src/collections/btree/node/tests.rs b/library/alloc/src/collections/btree/node/tests.rs index 6886962106b..7fe8ff743c0 100644 --- a/library/alloc/src/collections/btree/node/tests.rs +++ b/library/alloc/src/collections/btree/node/tests.rs @@ -30,11 +30,15 @@ impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> let depth = self.height(); let indent = " ".repeat(depth); result += &format!("\n{}", indent); - for idx in 0..leaf.len() { - if idx > 0 { - result += ", "; + if leaf.len() == 0 { + result += "(empty node)"; + } else { + for idx in 0..leaf.len() { + if idx > 0 { + result += ", "; + } + result += &format!("{:?}", unsafe { leaf.key_at(idx) }); } - result += &format!("{:?}", unsafe { leaf.key_at(idx) }); } } navigate::Position::Internal(_) => {} diff --git a/library/alloc/src/collections/btree/remove.rs b/library/alloc/src/collections/btree/remove.rs index 5ae1c1fcab8..7aeb39cbc32 100644 --- a/library/alloc/src/collections/btree/remove.rs +++ b/library/alloc/src/collections/btree/remove.rs @@ -1,7 +1,6 @@ use super::map::MIN_LEN; use super::node::{marker, ForceResult::*, Handle, LeftOrRight::*, NodeRef}; use super::unwrap_unchecked; -use core::mem; impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV> { /// Removes a key-value pair from the tree, and returns that pair, as well as @@ -55,12 +54,12 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark pos = unsafe { new_pos.cast_to_leaf_unchecked() }; // Only if we merged, the parent (if any) has shrunk, but skipping - // the following step does not pay off in benchmarks. + // the following step otherwise does not pay off in benchmarks. // // SAFETY: We won't destroy or rearrange the leaf where `pos` is at // by handling its parent recursively; at worst we will destroy or // rearrange the parent through the grandparent, thus change the - // leaf's parent pointer. + // link to the parent inside the leaf. if let Ok(parent) = unsafe { pos.reborrow_mut() }.into_node().ascend() { parent.into_node().handle_shrunk_node_recursively(handle_emptied_internal_root); } @@ -84,16 +83,15 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, // The internal node may have been stolen from or merged. Go back right // to find where the original KV ended up. let mut internal = unsafe { unwrap_unchecked(left_hole.next_kv().ok()) }; - let old_key = mem::replace(internal.kv_mut().0, left_kv.0); - let old_val = mem::replace(internal.kv_mut().1, left_kv.1); + let old_kv = internal.replace_kv(left_kv.0, left_kv.1); let pos = internal.next_leaf_edge(); - ((old_key, old_val), pos) + (old_kv, pos) } } impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> { - /// Stocks up a possibly underfull internal node, recursively. - /// Climbs up until it reaches an ancestor that has elements to spare or the root. + /// Stocks up a possibly underfull internal node and its ancestors, + /// until it reaches an ancestor that has elements to spare or is the root. fn handle_shrunk_node_recursively<F: FnOnce()>(mut self, handle_emptied_internal_root: F) { loop { self = match self.len() { @@ -124,7 +122,7 @@ impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> { ) -> Option<NodeRef<marker::Mut<'a>, K, V, marker::Internal>> { match self.forget_type().choose_parent_kv() { Ok(Left(left_parent_kv)) => { - debug_assert!(left_parent_kv.right_child_len() == MIN_LEN - 1); + debug_assert_eq!(left_parent_kv.right_child_len(), MIN_LEN - 1); if left_parent_kv.can_merge() { let pos = left_parent_kv.merge(None); let parent_edge = unsafe { unwrap_unchecked(pos.into_node().ascend().ok()) }; @@ -136,7 +134,7 @@ impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> { } } Ok(Right(right_parent_kv)) => { - debug_assert!(right_parent_kv.left_child_len() == MIN_LEN - 1); + debug_assert_eq!(right_parent_kv.left_child_len(), MIN_LEN - 1); if right_parent_kv.can_merge() { let pos = right_parent_kv.merge(None); let parent_edge = unsafe { unwrap_unchecked(pos.into_node().ascend().ok()) }; diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs index d8ce47ed77d..c72e305a1f9 100644 --- a/library/alloc/src/collections/btree/set.rs +++ b/library/alloc/src/collections/btree/set.rs @@ -679,7 +679,7 @@ impl<T: Ord> BTreeSet<T> { /// ``` #[unstable(feature = "map_first_last", issue = "62924")] pub fn pop_first(&mut self) -> Option<T> { - self.map.first_entry().map(|entry| entry.remove_entry().0) + self.map.pop_first().map(|kv| kv.0) } /// Removes the last value from the set and returns it, if any. @@ -701,7 +701,7 @@ impl<T: Ord> BTreeSet<T> { /// ``` #[unstable(feature = "map_first_last", issue = "62924")] pub fn pop_last(&mut self) -> Option<T> { - self.map.last_entry().map(|entry| entry.remove_entry().0) + self.map.pop_last().map(|kv| kv.0) } /// Adds a value to the set. @@ -975,6 +975,7 @@ impl<T> BTreeSet<T> { /// v.insert(1); /// assert_eq!(v.len(), 1); /// ``` + #[doc(alias = "length")] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")] pub const fn len(&self) -> usize { diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs index 4d05bc4ebfa..fd19c0078a7 100644 --- a/library/alloc/src/collections/btree/set/tests.rs +++ b/library/alloc/src/collections/btree/set/tests.rs @@ -696,8 +696,10 @@ fn test_first_last() { assert_eq!(a.pop_last(), None); } +// Unlike the function with the same name in map/tests, returns no values. +// Which also means it returns different predetermined pseudo-random keys, +// and the test cases using this function explore slightly different trees. fn rand_data(len: usize) -> Vec<u32> { - assert!(len <= 70029); // from that point on numbers repeat let mut rng = DeterministicRng::new(); Vec::from_iter((0..len).map(|_| rng.next())) } diff --git a/library/alloc/src/collections/btree/split.rs b/library/alloc/src/collections/btree/split.rs index 6108c139bb3..4561c8eaf47 100644 --- a/library/alloc/src/collections/btree/split.rs +++ b/library/alloc/src/collections/btree/split.rs @@ -53,6 +53,9 @@ impl<K, V> Root<K, V> { } } + /// Stock up or merge away any underfull nodes on the right border of the + /// tree. The other nodes, those that are not the root nor a rightmost edge, + /// must already have at least MIN_LEN elements. fn fix_right_border(&mut self) { self.fix_top(); @@ -72,6 +75,7 @@ impl<K, V> Root<K, V> { } cur_node = last_kv.into_right_child(); } + debug_assert!(cur_node.len() > MIN_LEN); } } @@ -98,6 +102,7 @@ impl<K, V> Root<K, V> { } cur_node = first_kv.into_left_child(); } + debug_assert!(cur_node.len() > MIN_LEN); } } diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs index 412c65681e6..397e774f1a0 100644 --- a/library/alloc/src/collections/linked_list.rs +++ b/library/alloc/src/collections/linked_list.rs @@ -593,6 +593,7 @@ impl<T> LinkedList<T> { /// dl.push_back(3); /// assert_eq!(dl.len(), 3); /// ``` + #[doc(alias = "length")] #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { @@ -1099,68 +1100,6 @@ impl<T> ExactSizeIterator for IterMut<'_, T> {} #[stable(feature = "fused", since = "1.26.0")] impl<T> FusedIterator for IterMut<'_, T> {} -impl<T> IterMut<'_, T> { - /// Inserts the given element just after the element most recently returned by `.next()`. - /// The inserted element does not appear in the iteration. - /// - /// This method will be removed soon. - #[inline] - #[unstable( - feature = "linked_list_extras", - reason = "this is probably better handled by a cursor type -- we'll see", - issue = "27794" - )] - #[rustc_deprecated( - reason = "Deprecated in favor of CursorMut methods. This method will be removed soon.", - since = "1.47.0" - )] - pub fn insert_next(&mut self, element: T) { - match self.head { - // `push_back` is okay with aliasing `element` references - None => self.list.push_back(element), - Some(head) => unsafe { - let prev = match head.as_ref().prev { - // `push_front` is okay with aliasing nodes - None => return self.list.push_front(element), - Some(prev) => prev, - }; - - let node = Some( - Box::leak(box Node { next: Some(head), prev: Some(prev), element }).into(), - ); - - // Not creating references to entire nodes to not invalidate the - // reference to `element` we handed to the user. - (*prev.as_ptr()).next = node; - (*head.as_ptr()).prev = node; - - self.list.len += 1; - }, - } - } - - /// Provides a reference to the next element, without changing the iterator. - /// - /// This method will be removed soon. - #[inline] - #[unstable( - feature = "linked_list_extras", - reason = "this is probably better handled by a cursor type -- we'll see", - issue = "27794" - )] - #[rustc_deprecated( - reason = "Deprecated in favor of CursorMut methods. This method will be removed soon.", - since = "1.47.0" - )] - pub fn peek_next(&mut self) -> Option<&mut T> { - if self.len == 0 { - None - } else { - unsafe { self.head.as_mut().map(|node| &mut node.as_mut().element) } - } - } -} - /// A cursor over a `LinkedList`. /// /// A `Cursor` is like an iterator, except that it can freely seek back-and-forth. diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 85c809e0d18..f8fad6de1a3 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -1038,6 +1038,7 @@ impl<T> VecDeque<T> { /// v.push_back(1); /// assert_eq!(v.len(), 1); /// ``` + #[doc(alias = "length")] #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { count(self.tail, self.head, self.cap()) @@ -1080,8 +1081,6 @@ impl<T> VecDeque<T> { /// # Examples /// /// ``` - /// #![feature(deque_range)] - /// /// use std::collections::VecDeque; /// /// let v: VecDeque<_> = vec![1, 2, 3].into_iter().collect(); @@ -1093,7 +1092,7 @@ impl<T> VecDeque<T> { /// assert_eq!(all.len(), 3); /// ``` #[inline] - #[unstable(feature = "deque_range", issue = "74217")] + #[stable(feature = "deque_range", since = "1.51.0")] pub fn range<R>(&self, range: R) -> Iter<'_, T> where R: RangeBounds<usize>, @@ -1117,8 +1116,6 @@ impl<T> VecDeque<T> { /// # Examples /// /// ``` - /// #![feature(deque_range)] - /// /// use std::collections::VecDeque; /// /// let mut v: VecDeque<_> = vec![1, 2, 3].into_iter().collect(); @@ -1134,7 +1131,7 @@ impl<T> VecDeque<T> { /// assert_eq!(v, vec![2, 4, 12]); /// ``` #[inline] - #[unstable(feature = "deque_range", issue = "74217")] + #[stable(feature = "deque_range", since = "1.51.0")] pub fn range_mut<R>(&mut self, range: R) -> IterMut<'_, T> where R: RangeBounds<usize>, @@ -1469,6 +1466,8 @@ impl<T> VecDeque<T> { #[inline] fn is_contiguous(&self) -> bool { + // FIXME: Should we consider `head == 0` to mean + // that `self` is contiguous? self.tail <= self.head } @@ -2198,7 +2197,7 @@ impl<T> VecDeque<T> { if self.is_contiguous() { let tail = self.tail; let head = self.head; - return unsafe { &mut self.buffer_as_mut_slice()[tail..head] }; + return unsafe { RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0 }; } let buf = self.buf.ptr(); @@ -2224,7 +2223,13 @@ impl<T> VecDeque<T> { self.tail = 0; self.head = len; } - } else if free >= self.head { + } else if free > self.head { + // FIXME: We currently do not consider ....ABCDEFGH + // to be contiguous because `head` would be `0` in this + // case. While we probably want to change this it + // isn't trivial as a few places expect `is_contiguous` + // to mean that we can just slice using `buf[tail..head]`. + // there is enough free space to copy the head in one go, // this means that we first shift the tail forwards, and then // copy the head to the correct position. @@ -2238,7 +2243,7 @@ impl<T> VecDeque<T> { // ...ABCDEFGH. self.tail = self.head; - self.head = self.tail + len; + self.head = self.wrap_add(self.tail, len); } } else { // free is smaller than both head and tail, @@ -2278,7 +2283,7 @@ impl<T> VecDeque<T> { let tail = self.tail; let head = self.head; - unsafe { &mut self.buffer_as_mut_slice()[tail..head] } + unsafe { RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0 } } /// Rotates the double-ended queue `mid` places to the left. @@ -2785,8 +2790,12 @@ impl<T> From<Vec<T>> for VecDeque<T> { let len = other.len(); // We need to extend the buf if it's not a power of two, too small - // or doesn't have at least one free space - if !buf.capacity().is_power_of_two() + // or doesn't have at least one free space. + // We check if `T` is a ZST in the first condition, + // because `usize::MAX` (the capacity returned by `capacity()` for ZST) + // is not a power of two and thus it'll always try + // to reserve more memory which will panic for ZST (rust-lang/rust#78532) + if (!buf.capacity().is_power_of_two() && mem::size_of::<T>() != 0) || (buf.capacity() < (MINIMUM_CAPACITY + 1)) || (buf.capacity() == len) { @@ -2839,7 +2848,7 @@ impl<T> From<VecDeque<T>> for Vec<T> { let len = other.len(); let cap = other.cap(); - if other.head != 0 { + if other.tail != 0 { ptr::copy(buf.add(other.tail), buf, len); } Vec::from_raw_parts(buf, len, cap) diff --git a/library/alloc/src/collections/vec_deque/tests.rs b/library/alloc/src/collections/vec_deque/tests.rs index d74f91c752c..21f52af056b 100644 --- a/library/alloc/src/collections/vec_deque/tests.rs +++ b/library/alloc/src/collections/vec_deque/tests.rs @@ -211,6 +211,20 @@ fn make_contiguous_small_free() { } #[test] +fn make_contiguous_head_to_end() { + let mut dq = VecDeque::with_capacity(3); + dq.push_front('B'); + dq.push_front('A'); + dq.push_back('C'); + dq.make_contiguous(); + let expected_tail = 0; + let expected_head = 3; + assert_eq!(expected_tail, dq.tail); + assert_eq!(expected_head, dq.head); + assert_eq!((&['A', 'B', 'C'] as &[_], &[] as &[_]), dq.as_slices()); +} + +#[test] fn test_remove() { // This test checks that every single combination of tail position, length, and // removal position is tested. Capacity 15 should be large enough to cover every case. diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 3ac34c9ae28..e6db66ac571 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -112,8 +112,7 @@ #![feature(never_type)] #![feature(nll)] #![feature(nonnull_slice_from_raw_parts)] -#![cfg_attr(bootstrap, feature(optin_builtin_traits))] -#![cfg_attr(not(bootstrap), feature(auto_traits))] +#![feature(auto_traits)] #![feature(or_patterns)] #![feature(pattern)] #![feature(ptr_internals)] @@ -140,6 +139,7 @@ #![feature(try_trait)] #![feature(type_alias_impl_trait)] #![feature(associated_type_bounds)] +#![feature(slice_group_by)] // Allow testing this library #[cfg(test)] diff --git a/library/alloc/src/macros.rs b/library/alloc/src/macros.rs index a992d768d63..7d4eff6185d 100644 --- a/library/alloc/src/macros.rs +++ b/library/alloc/src/macros.rs @@ -29,6 +29,10 @@ /// to the same boxed integer value, not five references pointing to independently /// boxed integers. /// +/// Also, note that `vec![expr; 0]` is allowed, and produces an empty vector. +/// This will still evaluate `expr`, however, and immediately drop the resulting value, so +/// be mindful of side effects. +/// /// [`Vec`]: crate::vec::Vec #[cfg(not(test))] #[macro_export] diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index edee439ad8d..36b7efc33a8 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -465,6 +465,7 @@ impl<T, A: Allocator> RawVec<T, A> { // above `RawVec::grow_amortized` for details. (The `A` parameter isn't // significant, because the number of different `A` types seen in practice is // much smaller than the number of `T` types.) +#[inline(never)] fn finish_grow<A>( new_layout: Result<Layout, LayoutError>, current_memory: Option<(NonNull<u8>, Layout)>, diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs index a96be57143d..57df28a15c8 100644 --- a/library/alloc/src/rc.rs +++ b/library/alloc/src/rc.rs @@ -1749,7 +1749,7 @@ struct WeakInner<'a> { strong: &'a Cell<usize>, } -impl<T: ?Sized> Weak<T> { +impl<T> Weak<T> { /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`. /// /// The pointer is valid only if there are some strong references. The pointer may be dangling, @@ -1882,7 +1882,9 @@ impl<T: ?Sized> Weak<T> { // SAFETY: we now have recovered the original Weak pointer, so can create the Weak. Weak { ptr: unsafe { NonNull::new_unchecked(ptr) } } } +} +impl<T: ?Sized> Weak<T> { /// Attempts to upgrade the `Weak` pointer to an [`Rc`], delaying /// dropping of the inner value if successful. /// @@ -2040,7 +2042,7 @@ impl<T: ?Sized> Drop for Weak<T> { // the strong pointers have disappeared. if inner.weak() == 0 { unsafe { - Global.deallocate(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())); + Global.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr())); } } } diff --git a/library/alloc/src/rc/tests.rs b/library/alloc/src/rc/tests.rs index bb5c3f4f904..2d183a8c88c 100644 --- a/library/alloc/src/rc/tests.rs +++ b/library/alloc/src/rc/tests.rs @@ -209,30 +209,6 @@ fn into_from_weak_raw() { } #[test] -fn test_into_from_weak_raw_unsized() { - use std::fmt::Display; - use std::string::ToString; - - let arc: Rc<str> = Rc::from("foo"); - let weak: Weak<str> = Rc::downgrade(&arc); - - let ptr = Weak::into_raw(weak.clone()); - let weak2 = unsafe { Weak::from_raw(ptr) }; - - assert_eq!(unsafe { &*ptr }, "foo"); - assert!(weak.ptr_eq(&weak2)); - - let arc: Rc<dyn Display> = Rc::new(123); - let weak: Weak<dyn Display> = Rc::downgrade(&arc); - - let ptr = Weak::into_raw(weak.clone()); - let weak2 = unsafe { Weak::from_raw(ptr) }; - - assert_eq!(unsafe { &*ptr }.to_string(), "123"); - assert!(weak.ptr_eq(&weak2)); -} - -#[test] fn get_mut() { let mut x = Rc::new(3); *Rc::get_mut(&mut x).unwrap() = 4; diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index 064700fc72c..cb015b94930 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -110,6 +110,8 @@ pub use core::slice::{Chunks, Windows}; pub use core::slice::{ChunksExact, ChunksExactMut}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::slice::{ChunksMut, Split, SplitMut}; +#[unstable(feature = "slice_group_by", issue = "80552")] +pub use core::slice::{GroupBy, GroupByMut}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::slice::{Iter, IterMut}; #[stable(feature = "rchunks", since = "1.31.0")] diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs index ce216e5336e..91988928593 100644 --- a/library/alloc/src/string.rs +++ b/library/alloc/src/string.rs @@ -1388,6 +1388,7 @@ impl String { /// assert_eq!(fancy_f.len(), 4); /// assert_eq!(fancy_f.chars().count(), 3); /// ``` + #[doc(alias = "length")] #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { @@ -1413,7 +1414,7 @@ impl String { self.len() == 0 } - /// Splits the string into two at the given index. + /// Splits the string into two at the given byte index. /// /// Returns a newly allocated `String`. `self` contains bytes `[0, at)`, and /// the returned `String` contains bytes `[at, len)`. `at` must be on the diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs index 9d478a302e9..85c0a9f0857 100644 --- a/library/alloc/src/sync.rs +++ b/library/alloc/src/sync.rs @@ -14,7 +14,7 @@ use core::hint; use core::intrinsics::abort; use core::iter; use core::marker::{PhantomData, Unpin, Unsize}; -use core::mem::{self, align_of_val, size_of_val}; +use core::mem::{self, align_of_val_raw, size_of_val}; use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver}; use core::pin::Pin; use core::ptr::{self, NonNull}; @@ -1535,7 +1535,7 @@ struct WeakInner<'a> { strong: &'a atomic::AtomicUsize, } -impl<T: ?Sized> Weak<T> { +impl<T> Weak<T> { /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`. /// /// The pointer is valid only if there are some strong references. The pointer may be dangling, @@ -1668,7 +1668,9 @@ impl<T: ?Sized> Weak<T> { // SAFETY: we now have recovered the original Weak pointer, so can create the Weak. unsafe { Weak { ptr: NonNull::new_unchecked(ptr) } } } +} +impl<T: ?Sized> Weak<T> { /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying /// dropping of the inner value if successful. /// @@ -1925,7 +1927,7 @@ impl<T: ?Sized> Drop for Weak<T> { if inner.weak.fetch_sub(1, Release) == 1 { acquire!(inner.weak); - unsafe { Global.deallocate(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())) } + unsafe { Global.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr())) } } } } @@ -2364,7 +2366,7 @@ unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize { // Because it is `?Sized`, it will always be the last field in memory. // Note: This is a detail of the current implementation of the compiler, // and is not a guaranteed language detail. Do not rely on it outside of std. - unsafe { data_offset_align(align_of_val(&*ptr)) } + unsafe { data_offset_align(align_of_val_raw(ptr)) } } #[inline] diff --git a/library/alloc/src/sync/tests.rs b/library/alloc/src/sync/tests.rs index 77f328d48f9..e8e1e66da5e 100644 --- a/library/alloc/src/sync/tests.rs +++ b/library/alloc/src/sync/tests.rs @@ -159,30 +159,6 @@ fn into_from_weak_raw() { } #[test] -fn test_into_from_weak_raw_unsized() { - use std::fmt::Display; - use std::string::ToString; - - let arc: Arc<str> = Arc::from("foo"); - let weak: Weak<str> = Arc::downgrade(&arc); - - let ptr = Weak::into_raw(weak.clone()); - let weak2 = unsafe { Weak::from_raw(ptr) }; - - assert_eq!(unsafe { &*ptr }, "foo"); - assert!(weak.ptr_eq(&weak2)); - - let arc: Arc<dyn Display> = Arc::new(123); - let weak: Weak<dyn Display> = Arc::downgrade(&arc); - - let ptr = Weak::into_raw(weak.clone()); - let weak2 = unsafe { Weak::from_raw(ptr) }; - - assert_eq!(unsafe { &*ptr }.to_string(), "123"); - assert!(weak.ptr_eq(&weak2)); -} - -#[test] fn test_cowarc_clone_make_mut() { let mut cow0 = Arc::new(75); let mut cow1 = cow0.clone(); diff --git a/library/alloc/src/vec/cow.rs b/library/alloc/src/vec/cow.rs new file mode 100644 index 00000000000..73d15d30647 --- /dev/null +++ b/library/alloc/src/vec/cow.rs @@ -0,0 +1,35 @@ +use crate::borrow::Cow; +use core::iter::FromIterator; + +use super::Vec; + +#[stable(feature = "cow_from_vec", since = "1.8.0")] +impl<'a, T: Clone> From<&'a [T]> for Cow<'a, [T]> { + fn from(s: &'a [T]) -> Cow<'a, [T]> { + Cow::Borrowed(s) + } +} + +#[stable(feature = "cow_from_vec", since = "1.8.0")] +impl<'a, T: Clone> From<Vec<T>> for Cow<'a, [T]> { + fn from(v: Vec<T>) -> Cow<'a, [T]> { + Cow::Owned(v) + } +} + +#[stable(feature = "cow_from_vec_ref", since = "1.28.0")] +impl<'a, T: Clone> From<&'a Vec<T>> for Cow<'a, [T]> { + fn from(v: &'a Vec<T>) -> Cow<'a, [T]> { + Cow::Borrowed(v.as_slice()) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, T> FromIterator<T> for Cow<'a, [T]> +where + T: Clone, +{ + fn from_iter<I: IntoIterator<Item = T>>(it: I) -> Cow<'a, [T]> { + Cow::Owned(FromIterator::from_iter(it)) + } +} diff --git a/library/alloc/src/vec/drain.rs b/library/alloc/src/vec/drain.rs new file mode 100644 index 00000000000..fb32d144f87 --- /dev/null +++ b/library/alloc/src/vec/drain.rs @@ -0,0 +1,155 @@ +use crate::alloc::{Allocator, Global}; +use core::fmt; +use core::iter::{FusedIterator, TrustedLen}; +use core::mem::{self}; +use core::ptr::{self, NonNull}; +use core::slice::{self}; + +use super::Vec; + +/// A draining iterator for `Vec<T>`. +/// +/// This `struct` is created by [`Vec::drain`]. +/// See its documentation for more. +/// +/// # Example +/// +/// ``` +/// let mut v = vec![0, 1, 2]; +/// let iter: std::vec::Drain<_> = v.drain(..); +/// ``` +#[stable(feature = "drain", since = "1.6.0")] +pub struct Drain< + 'a, + T: 'a, + #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global, +> { + /// Index of tail to preserve + pub(super) tail_start: usize, + /// Length of tail + pub(super) tail_len: usize, + /// Current remaining range to remove + pub(super) iter: slice::Iter<'a, T>, + pub(super) vec: NonNull<Vec<T, A>>, +} + +#[stable(feature = "collection_debug", since = "1.17.0")] +impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("Drain").field(&self.iter.as_slice()).finish() + } +} + +impl<'a, T, A: Allocator> Drain<'a, T, A> { + /// Returns the remaining items of this iterator as a slice. + /// + /// # Examples + /// + /// ``` + /// let mut vec = vec!['a', 'b', 'c']; + /// let mut drain = vec.drain(..); + /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']); + /// let _ = drain.next().unwrap(); + /// assert_eq!(drain.as_slice(), &['b', 'c']); + /// ``` + #[stable(feature = "vec_drain_as_slice", since = "1.46.0")] + pub fn as_slice(&self) -> &[T] { + self.iter.as_slice() + } + + /// Returns a reference to the underlying allocator. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn allocator(&self) -> &A { + unsafe { self.vec.as_ref().allocator() } + } +} + +#[stable(feature = "vec_drain_as_slice", since = "1.46.0")] +impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> { + fn as_ref(&self) -> &[T] { + self.as_slice() + } +} + +#[stable(feature = "drain", since = "1.6.0")] +unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {} +#[stable(feature = "drain", since = "1.6.0")] +unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {} + +#[stable(feature = "drain", since = "1.6.0")] +impl<T, A: Allocator> Iterator for Drain<'_, T, A> { + type Item = T; + + #[inline] + fn next(&mut self) -> Option<T> { + self.iter.next().map(|elt| unsafe { ptr::read(elt as *const _) }) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} + +#[stable(feature = "drain", since = "1.6.0")] +impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> { + #[inline] + fn next_back(&mut self) -> Option<T> { + self.iter.next_back().map(|elt| unsafe { ptr::read(elt as *const _) }) + } +} + +#[stable(feature = "drain", since = "1.6.0")] +impl<T, A: Allocator> Drop for Drain<'_, T, A> { + fn drop(&mut self) { + /// Continues dropping the remaining elements in the `Drain`, then moves back the + /// un-`Drain`ed elements to restore the original `Vec`. + struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>); + + impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> { + fn drop(&mut self) { + // Continue the same loop we have below. If the loop already finished, this does + // nothing. + self.0.for_each(drop); + + if self.0.tail_len > 0 { + unsafe { + let source_vec = self.0.vec.as_mut(); + // memmove back untouched tail, update to new length + let start = source_vec.len(); + let tail = self.0.tail_start; + if tail != start { + let src = source_vec.as_ptr().add(tail); + let dst = source_vec.as_mut_ptr().add(start); + ptr::copy(src, dst, self.0.tail_len); + } + source_vec.set_len(start + self.0.tail_len); + } + } + } + } + + // exhaust self first + while let Some(item) = self.next() { + let guard = DropGuard(self); + drop(item); + mem::forget(guard); + } + + // Drop a `DropGuard` to move back the non-drained tail of `self`. + DropGuard(self); + } +} + +#[stable(feature = "drain", since = "1.6.0")] +impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> { + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<T, A: Allocator> TrustedLen for Drain<'_, T, A> {} + +#[stable(feature = "fused", since = "1.26.0")] +impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {} diff --git a/library/alloc/src/vec/drain_filter.rs b/library/alloc/src/vec/drain_filter.rs new file mode 100644 index 00000000000..3c37c92ae44 --- /dev/null +++ b/library/alloc/src/vec/drain_filter.rs @@ -0,0 +1,143 @@ +use crate::alloc::{Allocator, Global}; +use core::ptr::{self}; +use core::slice::{self}; + +use super::Vec; + +/// An iterator which uses a closure to determine if an element should be removed. +/// +/// This struct is created by [`Vec::drain_filter`]. +/// See its documentation for more. +/// +/// # Example +/// +/// ``` +/// #![feature(drain_filter)] +/// +/// let mut v = vec![0, 1, 2]; +/// let iter: std::vec::DrainFilter<_, _> = v.drain_filter(|x| *x % 2 == 0); +/// ``` +#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] +#[derive(Debug)] +pub struct DrainFilter< + 'a, + T, + F, + #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, +> where + F: FnMut(&mut T) -> bool, +{ + pub(super) vec: &'a mut Vec<T, A>, + /// The index of the item that will be inspected by the next call to `next`. + pub(super) idx: usize, + /// The number of items that have been drained (removed) thus far. + pub(super) del: usize, + /// The original length of `vec` prior to draining. + pub(super) old_len: usize, + /// The filter test predicate. + pub(super) pred: F, + /// A flag that indicates a panic has occurred in the filter test predicate. + /// This is used as a hint in the drop implementation to prevent consumption + /// of the remainder of the `DrainFilter`. Any unprocessed items will be + /// backshifted in the `vec`, but no further items will be dropped or + /// tested by the filter predicate. + pub(super) panic_flag: bool, +} + +impl<T, F, A: Allocator> DrainFilter<'_, T, F, A> +where + F: FnMut(&mut T) -> bool, +{ + /// Returns a reference to the underlying allocator. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn allocator(&self) -> &A { + self.vec.allocator() + } +} + +#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] +impl<T, F, A: Allocator> Iterator for DrainFilter<'_, T, F, A> +where + F: FnMut(&mut T) -> bool, +{ + type Item = T; + + fn next(&mut self) -> Option<T> { + unsafe { + while self.idx < self.old_len { + let i = self.idx; + let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len); + self.panic_flag = true; + let drained = (self.pred)(&mut v[i]); + self.panic_flag = false; + // Update the index *after* the predicate is called. If the index + // is updated prior and the predicate panics, the element at this + // index would be leaked. + self.idx += 1; + if drained { + self.del += 1; + return Some(ptr::read(&v[i])); + } else if self.del > 0 { + let del = self.del; + let src: *const T = &v[i]; + let dst: *mut T = &mut v[i - del]; + ptr::copy_nonoverlapping(src, dst, 1); + } + } + None + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (0, Some(self.old_len - self.idx)) + } +} + +#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] +impl<T, F, A: Allocator> Drop for DrainFilter<'_, T, F, A> +where + F: FnMut(&mut T) -> bool, +{ + fn drop(&mut self) { + struct BackshiftOnDrop<'a, 'b, T, F, A: Allocator> + where + F: FnMut(&mut T) -> bool, + { + drain: &'b mut DrainFilter<'a, T, F, A>, + } + + impl<'a, 'b, T, F, A: Allocator> Drop for BackshiftOnDrop<'a, 'b, T, F, A> + where + F: FnMut(&mut T) -> bool, + { + fn drop(&mut self) { + unsafe { + if self.drain.idx < self.drain.old_len && self.drain.del > 0 { + // This is a pretty messed up state, and there isn't really an + // obviously right thing to do. We don't want to keep trying + // to execute `pred`, so we just backshift all the unprocessed + // elements and tell the vec that they still exist. The backshift + // is required to prevent a double-drop of the last successfully + // drained item prior to a panic in the predicate. + let ptr = self.drain.vec.as_mut_ptr(); + let src = ptr.add(self.drain.idx); + let dst = src.sub(self.drain.del); + let tail_len = self.drain.old_len - self.drain.idx; + src.copy_to(dst, tail_len); + } + self.drain.vec.set_len(self.drain.old_len - self.drain.del); + } + } + } + + let backshift = BackshiftOnDrop { drain: self }; + + // Attempt to consume any remaining elements if the filter predicate + // has not yet panicked. We'll backshift any remaining elements + // whether we've already panicked or if the consumption here panics. + if !backshift.drain.panic_flag { + backshift.drain.for_each(drop); + } + } +} diff --git a/library/alloc/src/vec/in_place_drop.rs b/library/alloc/src/vec/in_place_drop.rs new file mode 100644 index 00000000000..354d25c2389 --- /dev/null +++ b/library/alloc/src/vec/in_place_drop.rs @@ -0,0 +1,24 @@ +use core::ptr::{self}; +use core::slice::{self}; + +// A helper struct for in-place iteration that drops the destination slice of iteration, +// i.e. the head. The source slice (the tail) is dropped by IntoIter. +pub(super) struct InPlaceDrop<T> { + pub(super) inner: *mut T, + pub(super) dst: *mut T, +} + +impl<T> InPlaceDrop<T> { + fn len(&self) -> usize { + unsafe { self.dst.offset_from(self.inner) as usize } + } +} + +impl<T> Drop for InPlaceDrop<T> { + #[inline] + fn drop(&mut self) { + unsafe { + ptr::drop_in_place(slice::from_raw_parts_mut(self.inner, self.len())); + } + } +} diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs new file mode 100644 index 00000000000..f131d06bb18 --- /dev/null +++ b/library/alloc/src/vec/into_iter.rs @@ -0,0 +1,283 @@ +use crate::alloc::{Allocator, Global}; +use crate::raw_vec::RawVec; +use core::fmt; +use core::intrinsics::arith_offset; +use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccess}; +use core::marker::PhantomData; +use core::mem::{self}; +use core::ptr::{self, NonNull}; +use core::slice::{self}; + +/// An iterator that moves out of a vector. +/// +/// This `struct` is created by the `into_iter` method on [`Vec`](super::Vec) +/// (provided by the [`IntoIterator`] trait). +/// +/// # Example +/// +/// ``` +/// let v = vec![0, 1, 2]; +/// let iter: std::vec::IntoIter<_> = v.into_iter(); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +pub struct IntoIter< + T, + #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, +> { + pub(super) buf: NonNull<T>, + pub(super) phantom: PhantomData<T>, + pub(super) cap: usize, + pub(super) alloc: A, + pub(super) ptr: *const T, + pub(super) end: *const T, +} + +#[stable(feature = "vec_intoiter_debug", since = "1.13.0")] +impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("IntoIter").field(&self.as_slice()).finish() + } +} + +impl<T, A: Allocator> IntoIter<T, A> { + /// Returns the remaining items of this iterator as a slice. + /// + /// # Examples + /// + /// ``` + /// let vec = vec!['a', 'b', 'c']; + /// let mut into_iter = vec.into_iter(); + /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']); + /// let _ = into_iter.next().unwrap(); + /// assert_eq!(into_iter.as_slice(), &['b', 'c']); + /// ``` + #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")] + pub fn as_slice(&self) -> &[T] { + unsafe { slice::from_raw_parts(self.ptr, self.len()) } + } + + /// Returns the remaining items of this iterator as a mutable slice. + /// + /// # Examples + /// + /// ``` + /// let vec = vec!['a', 'b', 'c']; + /// let mut into_iter = vec.into_iter(); + /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']); + /// into_iter.as_mut_slice()[2] = 'z'; + /// assert_eq!(into_iter.next().unwrap(), 'a'); + /// assert_eq!(into_iter.next().unwrap(), 'b'); + /// assert_eq!(into_iter.next().unwrap(), 'z'); + /// ``` + #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")] + pub fn as_mut_slice(&mut self) -> &mut [T] { + unsafe { &mut *self.as_raw_mut_slice() } + } + + /// Returns a reference to the underlying allocator. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn allocator(&self) -> &A { + &self.alloc + } + + fn as_raw_mut_slice(&mut self) -> *mut [T] { + ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len()) + } + + pub(super) fn drop_remaining(&mut self) { + unsafe { + ptr::drop_in_place(self.as_mut_slice()); + } + self.ptr = self.end; + } + + /// Relinquishes the backing allocation, equivalent to + /// `ptr::write(&mut self, Vec::new().into_iter())` + pub(super) fn forget_allocation(&mut self) { + self.cap = 0; + self.buf = unsafe { NonNull::new_unchecked(RawVec::NEW.ptr()) }; + self.ptr = self.buf.as_ptr(); + self.end = self.buf.as_ptr(); + } +} + +#[stable(feature = "vec_intoiter_as_ref", since = "1.46.0")] +impl<T, A: Allocator> AsRef<[T]> for IntoIter<T, A> { + fn as_ref(&self) -> &[T] { + self.as_slice() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<T: Send, A: Allocator + Send> Send for IntoIter<T, A> {} +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<T: Sync, A: Allocator> Sync for IntoIter<T, A> {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T, A: Allocator> Iterator for IntoIter<T, A> { + type Item = T; + + #[inline] + fn next(&mut self) -> Option<T> { + if self.ptr as *const _ == self.end { + None + } else if mem::size_of::<T>() == 0 { + // purposefully don't use 'ptr.offset' because for + // vectors with 0-size elements this would return the + // same pointer. + self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T }; + + // Make up a value of this ZST. + Some(unsafe { mem::zeroed() }) + } else { + let old = self.ptr; + self.ptr = unsafe { self.ptr.offset(1) }; + + Some(unsafe { ptr::read(old) }) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + let exact = if mem::size_of::<T>() == 0 { + (self.end as usize).wrapping_sub(self.ptr as usize) + } else { + unsafe { self.end.offset_from(self.ptr) as usize } + }; + (exact, Some(exact)) + } + + #[inline] + fn count(self) -> usize { + self.len() + } + + unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item + where + Self: TrustedRandomAccess, + { + // SAFETY: the caller must guarantee that `i` is in bounds of the + // `Vec<T>`, so `i` cannot overflow an `isize`, and the `self.ptr.add(i)` + // is guaranteed to pointer to an element of the `Vec<T>` and + // thus guaranteed to be valid to dereference. + // + // Also note the implementation of `Self: TrustedRandomAccess` requires + // that `T: Copy` so reading elements from the buffer doesn't invalidate + // them for `Drop`. + unsafe { + if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) } + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> { + #[inline] + fn next_back(&mut self) -> Option<T> { + if self.end == self.ptr { + None + } else if mem::size_of::<T>() == 0 { + // See above for why 'ptr.offset' isn't used + self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T }; + + // Make up a value of this ZST. + Some(unsafe { mem::zeroed() }) + } else { + self.end = unsafe { self.end.offset(-1) }; + + Some(unsafe { ptr::read(self.end) }) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> { + fn is_empty(&self) -> bool { + self.ptr == self.end + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {} + +#[doc(hidden)] +#[unstable(issue = "none", feature = "std_internals")] +// T: Copy as approximation for !Drop since get_unchecked does not advance self.ptr +// and thus we can't implement drop-handling +unsafe impl<T, A: Allocator> TrustedRandomAccess for IntoIter<T, A> +where + T: Copy, +{ + fn may_have_side_effect() -> bool { + false + } +} + +#[stable(feature = "vec_into_iter_clone", since = "1.8.0")] +impl<T: Clone, A: Allocator + Clone> Clone for IntoIter<T, A> { + #[cfg(not(test))] + fn clone(&self) -> Self { + self.as_slice().to_vec_in(self.alloc.clone()).into_iter() + } + #[cfg(test)] + fn clone(&self) -> Self { + crate::slice::to_vec(self.as_slice(), self.alloc.clone()).into_iter() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> { + fn drop(&mut self) { + struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter<T, A>); + + impl<T, A: Allocator> Drop for DropGuard<'_, T, A> { + fn drop(&mut self) { + unsafe { + // `IntoIter::alloc` is not used anymore after this + let alloc = ptr::read(&self.0.alloc); + // RawVec handles deallocation + let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc); + } + } + } + + let guard = DropGuard(self); + // destroy the remaining elements + unsafe { + ptr::drop_in_place(guard.0.as_raw_mut_slice()); + } + // now `guard` will be dropped and do the rest + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> { + type Source = Self; + + #[inline] + unsafe fn as_inner(&mut self) -> &mut Self::Source { + self + } +} + +// internal helper trait for in-place iteration specialization. +#[rustc_specialization_trait] +pub(crate) trait AsIntoIter { + type Item; + fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item>; +} + +impl<T> AsIntoIter for IntoIter<T> { + type Item = T; + + fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item> { + self + } +} diff --git a/library/alloc/src/vec/is_zero.rs b/library/alloc/src/vec/is_zero.rs new file mode 100644 index 00000000000..b5739970b6e --- /dev/null +++ b/library/alloc/src/vec/is_zero.rs @@ -0,0 +1,71 @@ +use crate::boxed::Box; + +#[rustc_specialization_trait] +pub(super) unsafe trait IsZero { + /// Whether this value is zero + fn is_zero(&self) -> bool; +} + +macro_rules! impl_is_zero { + ($t:ty, $is_zero:expr) => { + unsafe impl IsZero for $t { + #[inline] + fn is_zero(&self) -> bool { + $is_zero(*self) + } + } + }; +} + +impl_is_zero!(i16, |x| x == 0); +impl_is_zero!(i32, |x| x == 0); +impl_is_zero!(i64, |x| x == 0); +impl_is_zero!(i128, |x| x == 0); +impl_is_zero!(isize, |x| x == 0); + +impl_is_zero!(u16, |x| x == 0); +impl_is_zero!(u32, |x| x == 0); +impl_is_zero!(u64, |x| x == 0); +impl_is_zero!(u128, |x| x == 0); +impl_is_zero!(usize, |x| x == 0); + +impl_is_zero!(bool, |x| x == false); +impl_is_zero!(char, |x| x == '\0'); + +impl_is_zero!(f32, |x: f32| x.to_bits() == 0); +impl_is_zero!(f64, |x: f64| x.to_bits() == 0); + +unsafe impl<T> IsZero for *const T { + #[inline] + fn is_zero(&self) -> bool { + (*self).is_null() + } +} + +unsafe impl<T> IsZero for *mut T { + #[inline] + fn is_zero(&self) -> bool { + (*self).is_null() + } +} + +// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null. +// For fat pointers, the bytes that would be the pointer metadata in the `Some` +// variant are padding in the `None` variant, so ignoring them and +// zero-initializing instead is ok. +// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of +// `SpecFromElem`. + +unsafe impl<T: ?Sized> IsZero for Option<&T> { + #[inline] + fn is_zero(&self) -> bool { + self.is_none() + } +} + +unsafe impl<T: ?Sized> IsZero for Option<Box<T>> { + #[inline] + fn is_zero(&self) -> bool { + self.is_none() + } +} diff --git a/library/alloc/src/vec.rs b/library/alloc/src/vec/mod.rs index 9fffb47aa59..2a83eb33fe3 100644 --- a/library/alloc/src/vec.rs +++ b/library/alloc/src/vec/mod.rs @@ -1,4 +1,3 @@ -// ignore-tidy-filelength //! A contiguous growable array type with heap-allocated contents, written //! `Vec<T>`. //! @@ -59,9 +58,7 @@ use core::convert::TryFrom; use core::fmt; use core::hash::{Hash, Hasher}; use core::intrinsics::{arith_offset, assume}; -use core::iter::{ - FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccess, -}; +use core::iter::FromIterator; use core::marker::PhantomData; use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::ops::{self, Index, IndexMut, Range, RangeBounds}; @@ -74,6 +71,61 @@ use crate::boxed::Box; use crate::collections::TryReserveError; use crate::raw_vec::RawVec; +#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] +pub use self::drain_filter::DrainFilter; + +mod drain_filter; + +#[stable(feature = "vec_splice", since = "1.21.0")] +pub use self::splice::Splice; + +mod splice; + +#[stable(feature = "drain", since = "1.6.0")] +pub use self::drain::Drain; + +mod drain; + +mod cow; + +pub(crate) use self::into_iter::AsIntoIter; +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::into_iter::IntoIter; + +mod into_iter; + +use self::is_zero::IsZero; + +mod is_zero; + +mod source_iter_marker; + +mod partial_eq; + +use self::spec_from_elem::SpecFromElem; + +mod spec_from_elem; + +use self::set_len_on_drop::SetLenOnDrop; + +mod set_len_on_drop; + +use self::in_place_drop::InPlaceDrop; + +mod in_place_drop; + +use self::spec_from_iter_nested::SpecFromIterNested; + +mod spec_from_iter_nested; + +use self::spec_from_iter::SpecFromIter; + +mod spec_from_iter; + +use self::spec_extend::SpecExtend; + +mod spec_extend; + /// A contiguous growable array type, written `Vec<T>` but pronounced 'vector'. /// /// # Examples @@ -1559,6 +1611,7 @@ impl<T, A: Allocator> Vec<T, A> { /// let a = vec![1, 2, 3]; /// assert_eq!(a.len(), 3); /// ``` + #[doc(alias = "length")] #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { @@ -1875,35 +1928,6 @@ impl<T, A: Allocator> Vec<T, A> { } } -// Set the length of the vec when the `SetLenOnDrop` value goes out of scope. -// -// The idea is: The length field in SetLenOnDrop is a local variable -// that the optimizer will see does not alias with any stores through the Vec's data -// pointer. This is a workaround for alias analysis issue #32155 -struct SetLenOnDrop<'a> { - len: &'a mut usize, - local_len: usize, -} - -impl<'a> SetLenOnDrop<'a> { - #[inline] - fn new(len: &'a mut usize) -> Self { - SetLenOnDrop { local_len: *len, len } - } - - #[inline] - fn increment_len(&mut self, increment: usize) { - self.local_len += increment; - } -} - -impl Drop for SetLenOnDrop<'_> { - #[inline] - fn drop(&mut self) { - *self.len = self.local_len; - } -} - impl<T: PartialEq, A: Allocator> Vec<T, A> { /// Removes consecutive repeated elements in the vector according to the /// [`PartialEq`] trait implementation. @@ -1963,131 +1987,6 @@ pub fn from_elem_in<T: Clone, A: Allocator>(elem: T, n: usize, alloc: A) -> Vec< <T as SpecFromElem>::from_elem(elem, n, alloc) } -// Specialization trait used for Vec::from_elem -trait SpecFromElem: Sized { - fn from_elem<A: Allocator>(elem: Self, n: usize, alloc: A) -> Vec<Self, A>; -} - -impl<T: Clone> SpecFromElem for T { - default fn from_elem<A: Allocator>(elem: Self, n: usize, alloc: A) -> Vec<Self, A> { - let mut v = Vec::with_capacity_in(n, alloc); - v.extend_with(n, ExtendElement(elem)); - v - } -} - -impl SpecFromElem for i8 { - #[inline] - fn from_elem<A: Allocator>(elem: i8, n: usize, alloc: A) -> Vec<i8, A> { - if elem == 0 { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; - } - unsafe { - let mut v = Vec::with_capacity_in(n, alloc); - ptr::write_bytes(v.as_mut_ptr(), elem as u8, n); - v.set_len(n); - v - } - } -} - -impl SpecFromElem for u8 { - #[inline] - fn from_elem<A: Allocator>(elem: u8, n: usize, alloc: A) -> Vec<u8, A> { - if elem == 0 { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; - } - unsafe { - let mut v = Vec::with_capacity_in(n, alloc); - ptr::write_bytes(v.as_mut_ptr(), elem, n); - v.set_len(n); - v - } - } -} - -impl<T: Clone + IsZero> SpecFromElem for T { - #[inline] - fn from_elem<A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<T, A> { - if elem.is_zero() { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; - } - let mut v = Vec::with_capacity_in(n, alloc); - v.extend_with(n, ExtendElement(elem)); - v - } -} - -#[rustc_specialization_trait] -unsafe trait IsZero { - /// Whether this value is zero - fn is_zero(&self) -> bool; -} - -macro_rules! impl_is_zero { - ($t:ty, $is_zero:expr) => { - unsafe impl IsZero for $t { - #[inline] - fn is_zero(&self) -> bool { - $is_zero(*self) - } - } - }; -} - -impl_is_zero!(i16, |x| x == 0); -impl_is_zero!(i32, |x| x == 0); -impl_is_zero!(i64, |x| x == 0); -impl_is_zero!(i128, |x| x == 0); -impl_is_zero!(isize, |x| x == 0); - -impl_is_zero!(u16, |x| x == 0); -impl_is_zero!(u32, |x| x == 0); -impl_is_zero!(u64, |x| x == 0); -impl_is_zero!(u128, |x| x == 0); -impl_is_zero!(usize, |x| x == 0); - -impl_is_zero!(bool, |x| x == false); -impl_is_zero!(char, |x| x == '\0'); - -impl_is_zero!(f32, |x: f32| x.to_bits() == 0); -impl_is_zero!(f64, |x: f64| x.to_bits() == 0); - -unsafe impl<T> IsZero for *const T { - #[inline] - fn is_zero(&self) -> bool { - (*self).is_null() - } -} - -unsafe impl<T> IsZero for *mut T { - #[inline] - fn is_zero(&self) -> bool { - (*self).is_null() - } -} - -// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null. -// For fat pointers, the bytes that would be the pointer metadata in the `Some` -// variant are padding in the `None` variant, so ignoring them and -// zero-initializing instead is ok. -// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of -// `SpecFromElem`. - -unsafe impl<T: ?Sized> IsZero for Option<&T> { - #[inline] - fn is_zero(&self) -> bool { - self.is_none() - } -} - -unsafe impl<T: ?Sized> IsZero for Option<Box<T>> { - #[inline] - fn is_zero(&self) -> bool { - self.is_none() - } -} - //////////////////////////////////////////////////////////////////////////////// // Common trait implementations for Vec //////////////////////////////////////////////////////////////////////////////// @@ -2262,348 +2161,6 @@ impl<T, A: Allocator> Extend<T> for Vec<T, A> { } } -/// Specialization trait used for Vec::from_iter -/// -/// ## The delegation graph: -/// -/// ```text -/// +-------------+ -/// |FromIterator | -/// +-+-----------+ -/// | -/// v -/// +-+-------------------------------+ +---------------------+ -/// |SpecFromIter +---->+SpecFromIterNested | -/// |where I: | | |where I: | -/// | Iterator (default)----------+ | | Iterator (default) | -/// | vec::IntoIter | | | TrustedLen | -/// | SourceIterMarker---fallback-+ | | | -/// | slice::Iter | | | -/// | Iterator<Item = &Clone> | +---------------------+ -/// +---------------------------------+ -/// ``` -trait SpecFromIter<T, I> { - fn from_iter(iter: I) -> Self; -} - -/// Another specialization trait for Vec::from_iter -/// necessary to manually prioritize overlapping specializations -/// see [`SpecFromIter`] for details. -trait SpecFromIterNested<T, I> { - fn from_iter(iter: I) -> Self; -} - -impl<T, I> SpecFromIterNested<T, I> for Vec<T> -where - I: Iterator<Item = T>, -{ - default fn from_iter(mut iterator: I) -> Self { - // Unroll the first iteration, as the vector is going to be - // expanded on this iteration in every case when the iterable is not - // empty, but the loop in extend_desugared() is not going to see the - // vector being full in the few subsequent loop iterations. - // So we get better branch prediction. - let mut vector = match iterator.next() { - None => return Vec::new(), - Some(element) => { - let (lower, _) = iterator.size_hint(); - let mut vector = Vec::with_capacity(lower.saturating_add(1)); - unsafe { - ptr::write(vector.as_mut_ptr(), element); - vector.set_len(1); - } - vector - } - }; - // must delegate to spec_extend() since extend() itself delegates - // to spec_from for empty Vecs - <Vec<T> as SpecExtend<T, I>>::spec_extend(&mut vector, iterator); - vector - } -} - -impl<T, I> SpecFromIterNested<T, I> for Vec<T> -where - I: TrustedLen<Item = T>, -{ - fn from_iter(iterator: I) -> Self { - let mut vector = Vec::new(); - // must delegate to spec_extend() since extend() itself delegates - // to spec_from for empty Vecs - vector.spec_extend(iterator); - vector - } -} - -impl<T, I> SpecFromIter<T, I> for Vec<T> -where - I: Iterator<Item = T>, -{ - default fn from_iter(iterator: I) -> Self { - SpecFromIterNested::from_iter(iterator) - } -} - -// A helper struct for in-place iteration that drops the destination slice of iteration, -// i.e. the head. The source slice (the tail) is dropped by IntoIter. -struct InPlaceDrop<T> { - inner: *mut T, - dst: *mut T, -} - -impl<T> InPlaceDrop<T> { - fn len(&self) -> usize { - unsafe { self.dst.offset_from(self.inner) as usize } - } -} - -impl<T> Drop for InPlaceDrop<T> { - #[inline] - fn drop(&mut self) { - unsafe { - ptr::drop_in_place(slice::from_raw_parts_mut(self.inner, self.len())); - } - } -} - -impl<T> SpecFromIter<T, IntoIter<T>> for Vec<T> { - fn from_iter(iterator: IntoIter<T>) -> Self { - // A common case is passing a vector into a function which immediately - // re-collects into a vector. We can short circuit this if the IntoIter - // has not been advanced at all. - // When it has been advanced We can also reuse the memory and move the data to the front. - // But we only do so when the resulting Vec wouldn't have more unused capacity - // than creating it through the generic FromIterator implementation would. That limitation - // is not strictly necessary as Vec's allocation behavior is intentionally unspecified. - // But it is a conservative choice. - let has_advanced = iterator.buf.as_ptr() as *const _ != iterator.ptr; - if !has_advanced || iterator.len() >= iterator.cap / 2 { - unsafe { - let it = ManuallyDrop::new(iterator); - if has_advanced { - ptr::copy(it.ptr, it.buf.as_ptr(), it.len()); - } - return Vec::from_raw_parts(it.buf.as_ptr(), it.len(), it.cap); - } - } - - let mut vec = Vec::new(); - // must delegate to spec_extend() since extend() itself delegates - // to spec_from for empty Vecs - vec.spec_extend(iterator); - vec - } -} - -fn write_in_place_with_drop<T>( - src_end: *const T, -) -> impl FnMut(InPlaceDrop<T>, T) -> Result<InPlaceDrop<T>, !> { - move |mut sink, item| { - unsafe { - // the InPlaceIterable contract cannot be verified precisely here since - // try_fold has an exclusive reference to the source pointer - // all we can do is check if it's still in range - debug_assert!(sink.dst as *const _ <= src_end, "InPlaceIterable contract violation"); - ptr::write(sink.dst, item); - sink.dst = sink.dst.add(1); - } - Ok(sink) - } -} - -/// Specialization marker for collecting an iterator pipeline into a Vec while reusing the -/// source allocation, i.e. executing the pipeline in place. -/// -/// The SourceIter parent trait is necessary for the specializing function to access the allocation -/// which is to be reused. But it is not sufficient for the specialization to be valid. See -/// additional bounds on the impl. -#[rustc_unsafe_specialization_marker] -trait SourceIterMarker: SourceIter<Source: AsIntoIter> {} - -// The std-internal SourceIter/InPlaceIterable traits are only implemented by chains of -// Adapter<Adapter<Adapter<IntoIter>>> (all owned by core/std). Additional bounds -// on the adapter implementations (beyond `impl<I: Trait> Trait for Adapter<I>`) only depend on other -// traits already marked as specialization traits (Copy, TrustedRandomAccess, FusedIterator). -// I.e. the marker does not depend on lifetimes of user-supplied types. Modulo the Copy hole, which -// several other specializations already depend on. -impl<T> SourceIterMarker for T where T: SourceIter<Source: AsIntoIter> + InPlaceIterable {} - -impl<T, I> SpecFromIter<T, I> for Vec<T> -where - I: Iterator<Item = T> + SourceIterMarker, -{ - default fn from_iter(mut iterator: I) -> Self { - // Additional requirements which cannot expressed via trait bounds. We rely on const eval - // instead: - // a) no ZSTs as there would be no allocation to reuse and pointer arithmetic would panic - // b) size match as required by Alloc contract - // c) alignments match as required by Alloc contract - if mem::size_of::<T>() == 0 - || mem::size_of::<T>() - != mem::size_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>() - || mem::align_of::<T>() - != mem::align_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>() - { - // fallback to more generic implementations - return SpecFromIterNested::from_iter(iterator); - } - - let (src_buf, src_ptr, dst_buf, dst_end, cap) = unsafe { - let inner = iterator.as_inner().as_into_iter(); - ( - inner.buf.as_ptr(), - inner.ptr, - inner.buf.as_ptr() as *mut T, - inner.end as *const T, - inner.cap, - ) - }; - - // use try-fold since - // - it vectorizes better for some iterator adapters - // - unlike most internal iteration methods, it only takes a &mut self - // - it lets us thread the write pointer through its innards and get it back in the end - let sink = InPlaceDrop { inner: dst_buf, dst: dst_buf }; - let sink = iterator - .try_fold::<_, _, Result<_, !>>(sink, write_in_place_with_drop(dst_end)) - .unwrap(); - // iteration succeeded, don't drop head - let dst = ManuallyDrop::new(sink).dst; - - let src = unsafe { iterator.as_inner().as_into_iter() }; - // check if SourceIter contract was upheld - // caveat: if they weren't we may not even make it to this point - debug_assert_eq!(src_buf, src.buf.as_ptr()); - // check InPlaceIterable contract. This is only possible if the iterator advanced the - // source pointer at all. If it uses unchecked access via TrustedRandomAccess - // then the source pointer will stay in its initial position and we can't use it as reference - if src.ptr != src_ptr { - debug_assert!( - dst as *const _ <= src.ptr, - "InPlaceIterable contract violation, write pointer advanced beyond read pointer" - ); - } - - // drop any remaining values at the tail of the source - src.drop_remaining(); - // but prevent drop of the allocation itself once IntoIter goes out of scope - src.forget_allocation(); - - let vec = unsafe { - let len = dst.offset_from(dst_buf) as usize; - Vec::from_raw_parts(dst_buf, len, cap) - }; - - vec - } -} - -impl<'a, T: 'a, I> SpecFromIter<&'a T, I> for Vec<T> -where - I: Iterator<Item = &'a T>, - T: Clone, -{ - default fn from_iter(iterator: I) -> Self { - SpecFromIter::from_iter(iterator.cloned()) - } -} - -// This utilizes `iterator.as_slice().to_vec()` since spec_extend -// must take more steps to reason about the final capacity + length -// and thus do more work. `to_vec()` directly allocates the correct amount -// and fills it exactly. -impl<'a, T: 'a + Clone> SpecFromIter<&'a T, slice::Iter<'a, T>> for Vec<T> { - #[cfg(not(test))] - fn from_iter(iterator: slice::Iter<'a, T>) -> Self { - iterator.as_slice().to_vec() - } - - // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is - // required for this method definition, is not available. Instead use the - // `slice::to_vec` function which is only available with cfg(test) - // NB see the slice::hack module in slice.rs for more information - #[cfg(test)] - fn from_iter(iterator: slice::Iter<'a, T>) -> Self { - crate::slice::to_vec(iterator.as_slice(), Global) - } -} - -// Specialization trait used for Vec::extend -trait SpecExtend<T, I> { - fn spec_extend(&mut self, iter: I); -} - -impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A> -where - I: Iterator<Item = T>, -{ - default fn spec_extend(&mut self, iter: I) { - self.extend_desugared(iter) - } -} - -impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A> -where - I: TrustedLen<Item = T>, -{ - default fn spec_extend(&mut self, iterator: I) { - // This is the case for a TrustedLen iterator. - let (low, high) = iterator.size_hint(); - if let Some(high_value) = high { - debug_assert_eq!( - low, - high_value, - "TrustedLen iterator's size hint is not exact: {:?}", - (low, high) - ); - } - if let Some(additional) = high { - self.reserve(additional); - unsafe { - let mut ptr = self.as_mut_ptr().add(self.len()); - let mut local_len = SetLenOnDrop::new(&mut self.len); - iterator.for_each(move |element| { - ptr::write(ptr, element); - ptr = ptr.offset(1); - // NB can't overflow since we would have had to alloc the address space - local_len.increment_len(1); - }); - } - } else { - self.extend_desugared(iterator) - } - } -} - -impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> { - fn spec_extend(&mut self, mut iterator: IntoIter<T>) { - unsafe { - self.append_elements(iterator.as_slice() as _); - } - iterator.ptr = iterator.end; - } -} - -impl<'a, T: 'a, I, A: Allocator + 'a> SpecExtend<&'a T, I> for Vec<T, A> -where - I: Iterator<Item = &'a T>, - T: Clone, -{ - default fn spec_extend(&mut self, iterator: I) { - self.spec_extend(iterator.cloned()) - } -} - -impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A> -where - T: Copy, -{ - fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) { - let slice = iterator.as_slice(); - unsafe { self.append_elements(slice) }; - } -} - impl<T, A: Allocator> Vec<T, A> { // leaf method to which various SpecFrom/SpecExtend implementations delegate when // they have no further optimizations to apply @@ -2755,45 +2312,6 @@ impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> { } } -macro_rules! __impl_slice_eq1 { - ([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?, #[$stability:meta]) => { - #[$stability] - impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs - where - T: PartialEq<U>, - $($ty: $bound)? - { - #[inline] - fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] } - #[inline] - fn ne(&self, other: &$rhs) -> bool { self[..] != other[..] } - } - } -} - -__impl_slice_eq1! { [A: Allocator] Vec<T, A>, Vec<U, A>, #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U], #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U], #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] } -__impl_slice_eq1! { [A: Allocator] &mut [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] } -__impl_slice_eq1! { [A: Allocator] Vec<T, A>, [U], #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] } -__impl_slice_eq1! { [A: Allocator] [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] } -__impl_slice_eq1! { [A: Allocator] Cow<'_, [T]>, Vec<U, A> where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [] Cow<'_, [T]>, &[U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [] Cow<'_, [T]>, &mut [U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, [U; N], #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, &[U; N], #[stable(feature = "rust1", since = "1.0.0")] } - -// NOTE: some less important impls are omitted to reduce code bloat -// FIXME(Centril): Reconsider this? -//__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], } -//__impl_slice_eq1! { [const N: usize] [A; N], Vec<B>, } -//__impl_slice_eq1! { [const N: usize] &[A; N], Vec<B>, } -//__impl_slice_eq1! { [const N: usize] &mut [A; N], Vec<B>, } -//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], } -//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], } -//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], } - /// Implements comparison of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison). #[stable(feature = "rust1", since = "1.0.0")] impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> { @@ -2993,729 +2511,3 @@ impl<T, A: Allocator, const N: usize> TryFrom<Vec<T, A>> for [T; N] { Ok(array) } } - -//////////////////////////////////////////////////////////////////////////////// -// Clone-on-write -//////////////////////////////////////////////////////////////////////////////// - -#[stable(feature = "cow_from_vec", since = "1.8.0")] -impl<'a, T: Clone> From<&'a [T]> for Cow<'a, [T]> { - fn from(s: &'a [T]) -> Cow<'a, [T]> { - Cow::Borrowed(s) - } -} - -#[stable(feature = "cow_from_vec", since = "1.8.0")] -impl<'a, T: Clone> From<Vec<T>> for Cow<'a, [T]> { - fn from(v: Vec<T>) -> Cow<'a, [T]> { - Cow::Owned(v) - } -} - -#[stable(feature = "cow_from_vec_ref", since = "1.28.0")] -impl<'a, T: Clone> From<&'a Vec<T>> for Cow<'a, [T]> { - fn from(v: &'a Vec<T>) -> Cow<'a, [T]> { - Cow::Borrowed(v.as_slice()) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> FromIterator<T> for Cow<'a, [T]> -where - T: Clone, -{ - fn from_iter<I: IntoIterator<Item = T>>(it: I) -> Cow<'a, [T]> { - Cow::Owned(FromIterator::from_iter(it)) - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Iterators -//////////////////////////////////////////////////////////////////////////////// - -/// An iterator that moves out of a vector. -/// -/// This `struct` is created by the `into_iter` method on [`Vec`] (provided -/// by the [`IntoIterator`] trait). -/// -/// # Example -/// -/// ``` -/// let v = vec![0, 1, 2]; -/// let iter: std::vec::IntoIter<_> = v.into_iter(); -/// ``` -#[stable(feature = "rust1", since = "1.0.0")] -pub struct IntoIter< - T, - #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, -> { - buf: NonNull<T>, - phantom: PhantomData<T>, - cap: usize, - alloc: A, - ptr: *const T, - end: *const T, -} - -#[stable(feature = "vec_intoiter_debug", since = "1.13.0")] -impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("IntoIter").field(&self.as_slice()).finish() - } -} - -impl<T, A: Allocator> IntoIter<T, A> { - /// Returns the remaining items of this iterator as a slice. - /// - /// # Examples - /// - /// ``` - /// let vec = vec!['a', 'b', 'c']; - /// let mut into_iter = vec.into_iter(); - /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']); - /// let _ = into_iter.next().unwrap(); - /// assert_eq!(into_iter.as_slice(), &['b', 'c']); - /// ``` - #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")] - pub fn as_slice(&self) -> &[T] { - unsafe { slice::from_raw_parts(self.ptr, self.len()) } - } - - /// Returns the remaining items of this iterator as a mutable slice. - /// - /// # Examples - /// - /// ``` - /// let vec = vec!['a', 'b', 'c']; - /// let mut into_iter = vec.into_iter(); - /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']); - /// into_iter.as_mut_slice()[2] = 'z'; - /// assert_eq!(into_iter.next().unwrap(), 'a'); - /// assert_eq!(into_iter.next().unwrap(), 'b'); - /// assert_eq!(into_iter.next().unwrap(), 'z'); - /// ``` - #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")] - pub fn as_mut_slice(&mut self) -> &mut [T] { - unsafe { &mut *self.as_raw_mut_slice() } - } - - /// Returns a reference to the underlying allocator. - #[unstable(feature = "allocator_api", issue = "32838")] - #[inline] - pub fn allocator(&self) -> &A { - &self.alloc - } - - fn as_raw_mut_slice(&mut self) -> *mut [T] { - ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len()) - } - - fn drop_remaining(&mut self) { - unsafe { - ptr::drop_in_place(self.as_mut_slice()); - } - self.ptr = self.end; - } - - /// Relinquishes the backing allocation, equivalent to - /// `ptr::write(&mut self, Vec::new().into_iter())` - fn forget_allocation(&mut self) { - self.cap = 0; - self.buf = unsafe { NonNull::new_unchecked(RawVec::NEW.ptr()) }; - self.ptr = self.buf.as_ptr(); - self.end = self.buf.as_ptr(); - } -} - -#[stable(feature = "vec_intoiter_as_ref", since = "1.46.0")] -impl<T, A: Allocator> AsRef<[T]> for IntoIter<T, A> { - fn as_ref(&self) -> &[T] { - self.as_slice() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<T: Send, A: Allocator + Send> Send for IntoIter<T, A> {} -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<T: Sync, A: Allocator> Sync for IntoIter<T, A> {} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<T, A: Allocator> Iterator for IntoIter<T, A> { - type Item = T; - - #[inline] - fn next(&mut self) -> Option<T> { - if self.ptr as *const _ == self.end { - None - } else if mem::size_of::<T>() == 0 { - // purposefully don't use 'ptr.offset' because for - // vectors with 0-size elements this would return the - // same pointer. - self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T }; - - // Make up a value of this ZST. - Some(unsafe { mem::zeroed() }) - } else { - let old = self.ptr; - self.ptr = unsafe { self.ptr.offset(1) }; - - Some(unsafe { ptr::read(old) }) - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option<usize>) { - let exact = if mem::size_of::<T>() == 0 { - (self.end as usize).wrapping_sub(self.ptr as usize) - } else { - unsafe { self.end.offset_from(self.ptr) as usize } - }; - (exact, Some(exact)) - } - - #[inline] - fn count(self) -> usize { - self.len() - } - - unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item - where - Self: TrustedRandomAccess, - { - // SAFETY: the caller must guarantee that `i` is in bounds of the - // `Vec<T>`, so `i` cannot overflow an `isize`, and the `self.ptr.add(i)` - // is guaranteed to pointer to an element of the `Vec<T>` and - // thus guaranteed to be valid to dereference. - // - // Also note the implementation of `Self: TrustedRandomAccess` requires - // that `T: Copy` so reading elements from the buffer doesn't invalidate - // them for `Drop`. - unsafe { - if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) } - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> { - #[inline] - fn next_back(&mut self) -> Option<T> { - if self.end == self.ptr { - None - } else if mem::size_of::<T>() == 0 { - // See above for why 'ptr.offset' isn't used - self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T }; - - // Make up a value of this ZST. - Some(unsafe { mem::zeroed() }) - } else { - self.end = unsafe { self.end.offset(-1) }; - - Some(unsafe { ptr::read(self.end) }) - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> { - fn is_empty(&self) -> bool { - self.ptr == self.end - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {} - -#[doc(hidden)] -#[unstable(issue = "none", feature = "std_internals")] -// T: Copy as approximation for !Drop since get_unchecked does not advance self.ptr -// and thus we can't implement drop-handling -unsafe impl<T, A: Allocator> TrustedRandomAccess for IntoIter<T, A> -where - T: Copy, -{ - fn may_have_side_effect() -> bool { - false - } -} - -#[stable(feature = "vec_into_iter_clone", since = "1.8.0")] -impl<T: Clone, A: Allocator + Clone> Clone for IntoIter<T, A> { - #[cfg(not(test))] - fn clone(&self) -> Self { - self.as_slice().to_vec_in(self.alloc.clone()).into_iter() - } - #[cfg(test)] - fn clone(&self) -> Self { - crate::slice::to_vec(self.as_slice(), self.alloc.clone()).into_iter() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> { - fn drop(&mut self) { - struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter<T, A>); - - impl<T, A: Allocator> Drop for DropGuard<'_, T, A> { - fn drop(&mut self) { - unsafe { - // `IntoIter::alloc` is not used anymore after this - let alloc = ptr::read(&self.0.alloc); - // RawVec handles deallocation - let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc); - } - } - } - - let guard = DropGuard(self); - // destroy the remaining elements - unsafe { - ptr::drop_in_place(guard.0.as_raw_mut_slice()); - } - // now `guard` will be dropped and do the rest - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> { - type Source = Self; - - #[inline] - unsafe fn as_inner(&mut self) -> &mut Self::Source { - self - } -} - -// internal helper trait for in-place iteration specialization. -#[rustc_specialization_trait] -pub(crate) trait AsIntoIter { - type Item; - fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item>; -} - -impl<T> AsIntoIter for IntoIter<T> { - type Item = T; - - fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item> { - self - } -} - -/// A draining iterator for `Vec<T>`. -/// -/// This `struct` is created by [`Vec::drain`]. -/// See its documentation for more. -/// -/// # Example -/// -/// ``` -/// let mut v = vec![0, 1, 2]; -/// let iter: std::vec::Drain<_> = v.drain(..); -/// ``` -#[stable(feature = "drain", since = "1.6.0")] -pub struct Drain< - 'a, - T: 'a, - #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global, -> { - /// Index of tail to preserve - tail_start: usize, - /// Length of tail - tail_len: usize, - /// Current remaining range to remove - iter: slice::Iter<'a, T>, - vec: NonNull<Vec<T, A>>, -} - -#[stable(feature = "collection_debug", since = "1.17.0")] -impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Drain").field(&self.iter.as_slice()).finish() - } -} - -impl<'a, T, A: Allocator> Drain<'a, T, A> { - /// Returns the remaining items of this iterator as a slice. - /// - /// # Examples - /// - /// ``` - /// let mut vec = vec!['a', 'b', 'c']; - /// let mut drain = vec.drain(..); - /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']); - /// let _ = drain.next().unwrap(); - /// assert_eq!(drain.as_slice(), &['b', 'c']); - /// ``` - #[stable(feature = "vec_drain_as_slice", since = "1.46.0")] - pub fn as_slice(&self) -> &[T] { - self.iter.as_slice() - } - - /// Returns a reference to the underlying allocator. - #[unstable(feature = "allocator_api", issue = "32838")] - #[inline] - pub fn allocator(&self) -> &A { - unsafe { self.vec.as_ref().allocator() } - } -} - -#[stable(feature = "vec_drain_as_slice", since = "1.46.0")] -impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> { - fn as_ref(&self) -> &[T] { - self.as_slice() - } -} - -#[stable(feature = "drain", since = "1.6.0")] -unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {} -#[stable(feature = "drain", since = "1.6.0")] -unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {} - -#[stable(feature = "drain", since = "1.6.0")] -impl<T, A: Allocator> Iterator for Drain<'_, T, A> { - type Item = T; - - #[inline] - fn next(&mut self) -> Option<T> { - self.iter.next().map(|elt| unsafe { ptr::read(elt as *const _) }) - } - - fn size_hint(&self) -> (usize, Option<usize>) { - self.iter.size_hint() - } -} - -#[stable(feature = "drain", since = "1.6.0")] -impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> { - #[inline] - fn next_back(&mut self) -> Option<T> { - self.iter.next_back().map(|elt| unsafe { ptr::read(elt as *const _) }) - } -} - -#[stable(feature = "drain", since = "1.6.0")] -impl<T, A: Allocator> Drop for Drain<'_, T, A> { - fn drop(&mut self) { - /// Continues dropping the remaining elements in the `Drain`, then moves back the - /// un-`Drain`ed elements to restore the original `Vec`. - struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>); - - impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> { - fn drop(&mut self) { - // Continue the same loop we have below. If the loop already finished, this does - // nothing. - self.0.for_each(drop); - - if self.0.tail_len > 0 { - unsafe { - let source_vec = self.0.vec.as_mut(); - // memmove back untouched tail, update to new length - let start = source_vec.len(); - let tail = self.0.tail_start; - if tail != start { - let src = source_vec.as_ptr().add(tail); - let dst = source_vec.as_mut_ptr().add(start); - ptr::copy(src, dst, self.0.tail_len); - } - source_vec.set_len(start + self.0.tail_len); - } - } - } - } - - // exhaust self first - while let Some(item) = self.next() { - let guard = DropGuard(self); - drop(item); - mem::forget(guard); - } - - // Drop a `DropGuard` to move back the non-drained tail of `self`. - DropGuard(self); - } -} - -#[stable(feature = "drain", since = "1.6.0")] -impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> { - fn is_empty(&self) -> bool { - self.iter.is_empty() - } -} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl<T, A: Allocator> TrustedLen for Drain<'_, T, A> {} - -#[stable(feature = "fused", since = "1.26.0")] -impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {} - -/// A splicing iterator for `Vec`. -/// -/// This struct is created by [`Vec::splice()`]. -/// See its documentation for more. -/// -/// # Example -/// -/// ``` -/// let mut v = vec![0, 1, 2]; -/// let new = [7, 8]; -/// let iter: std::vec::Splice<_> = v.splice(1.., new.iter().cloned()); -/// ``` -#[derive(Debug)] -#[stable(feature = "vec_splice", since = "1.21.0")] -pub struct Splice< - 'a, - I: Iterator + 'a, - #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global, -> { - drain: Drain<'a, I::Item, A>, - replace_with: I, -} - -#[stable(feature = "vec_splice", since = "1.21.0")] -impl<I: Iterator, A: Allocator> Iterator for Splice<'_, I, A> { - type Item = I::Item; - - fn next(&mut self) -> Option<Self::Item> { - self.drain.next() - } - - fn size_hint(&self) -> (usize, Option<usize>) { - self.drain.size_hint() - } -} - -#[stable(feature = "vec_splice", since = "1.21.0")] -impl<I: Iterator, A: Allocator> DoubleEndedIterator for Splice<'_, I, A> { - fn next_back(&mut self) -> Option<Self::Item> { - self.drain.next_back() - } -} - -#[stable(feature = "vec_splice", since = "1.21.0")] -impl<I: Iterator, A: Allocator> ExactSizeIterator for Splice<'_, I, A> {} - -#[stable(feature = "vec_splice", since = "1.21.0")] -impl<I: Iterator, A: Allocator> Drop for Splice<'_, I, A> { - fn drop(&mut self) { - self.drain.by_ref().for_each(drop); - - unsafe { - if self.drain.tail_len == 0 { - self.drain.vec.as_mut().extend(self.replace_with.by_ref()); - return; - } - - // First fill the range left by drain(). - if !self.drain.fill(&mut self.replace_with) { - return; - } - - // There may be more elements. Use the lower bound as an estimate. - // FIXME: Is the upper bound a better guess? Or something else? - let (lower_bound, _upper_bound) = self.replace_with.size_hint(); - if lower_bound > 0 { - self.drain.move_tail(lower_bound); - if !self.drain.fill(&mut self.replace_with) { - return; - } - } - - // Collect any remaining elements. - // This is a zero-length vector which does not allocate if `lower_bound` was exact. - let mut collected = self.replace_with.by_ref().collect::<Vec<I::Item>>().into_iter(); - // Now we have an exact count. - if collected.len() > 0 { - self.drain.move_tail(collected.len()); - let filled = self.drain.fill(&mut collected); - debug_assert!(filled); - debug_assert_eq!(collected.len(), 0); - } - } - // Let `Drain::drop` move the tail back if necessary and restore `vec.len`. - } -} - -/// Private helper methods for `Splice::drop` -impl<T, A: Allocator> Drain<'_, T, A> { - /// The range from `self.vec.len` to `self.tail_start` contains elements - /// that have been moved out. - /// Fill that range as much as possible with new elements from the `replace_with` iterator. - /// Returns `true` if we filled the entire range. (`replace_with.next()` didn’t return `None`.) - unsafe fn fill<I: Iterator<Item = T>>(&mut self, replace_with: &mut I) -> bool { - let vec = unsafe { self.vec.as_mut() }; - let range_start = vec.len; - let range_end = self.tail_start; - let range_slice = unsafe { - slice::from_raw_parts_mut(vec.as_mut_ptr().add(range_start), range_end - range_start) - }; - - for place in range_slice { - if let Some(new_item) = replace_with.next() { - unsafe { ptr::write(place, new_item) }; - vec.len += 1; - } else { - return false; - } - } - true - } - - /// Makes room for inserting more elements before the tail. - unsafe fn move_tail(&mut self, additional: usize) { - let vec = unsafe { self.vec.as_mut() }; - let len = self.tail_start + self.tail_len; - vec.buf.reserve(len, additional); - - let new_tail_start = self.tail_start + additional; - unsafe { - let src = vec.as_ptr().add(self.tail_start); - let dst = vec.as_mut_ptr().add(new_tail_start); - ptr::copy(src, dst, self.tail_len); - } - self.tail_start = new_tail_start; - } -} - -/// An iterator which uses a closure to determine if an element should be removed. -/// -/// This struct is created by [`Vec::drain_filter`]. -/// See its documentation for more. -/// -/// # Example -/// -/// ``` -/// #![feature(drain_filter)] -/// -/// let mut v = vec![0, 1, 2]; -/// let iter: std::vec::DrainFilter<_, _> = v.drain_filter(|x| *x % 2 == 0); -/// ``` -#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] -#[derive(Debug)] -pub struct DrainFilter< - 'a, - T, - F, - #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, -> where - F: FnMut(&mut T) -> bool, -{ - vec: &'a mut Vec<T, A>, - /// The index of the item that will be inspected by the next call to `next`. - idx: usize, - /// The number of items that have been drained (removed) thus far. - del: usize, - /// The original length of `vec` prior to draining. - old_len: usize, - /// The filter test predicate. - pred: F, - /// A flag that indicates a panic has occurred in the filter test predicate. - /// This is used as a hint in the drop implementation to prevent consumption - /// of the remainder of the `DrainFilter`. Any unprocessed items will be - /// backshifted in the `vec`, but no further items will be dropped or - /// tested by the filter predicate. - panic_flag: bool, -} - -impl<T, F, A: Allocator> DrainFilter<'_, T, F, A> -where - F: FnMut(&mut T) -> bool, -{ - /// Returns a reference to the underlying allocator. - #[unstable(feature = "allocator_api", issue = "32838")] - #[inline] - pub fn allocator(&self) -> &A { - self.vec.allocator() - } -} - -#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] -impl<T, F, A: Allocator> Iterator for DrainFilter<'_, T, F, A> -where - F: FnMut(&mut T) -> bool, -{ - type Item = T; - - fn next(&mut self) -> Option<T> { - unsafe { - while self.idx < self.old_len { - let i = self.idx; - let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len); - self.panic_flag = true; - let drained = (self.pred)(&mut v[i]); - self.panic_flag = false; - // Update the index *after* the predicate is called. If the index - // is updated prior and the predicate panics, the element at this - // index would be leaked. - self.idx += 1; - if drained { - self.del += 1; - return Some(ptr::read(&v[i])); - } else if self.del > 0 { - let del = self.del; - let src: *const T = &v[i]; - let dst: *mut T = &mut v[i - del]; - ptr::copy_nonoverlapping(src, dst, 1); - } - } - None - } - } - - fn size_hint(&self) -> (usize, Option<usize>) { - (0, Some(self.old_len - self.idx)) - } -} - -#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] -impl<T, F, A: Allocator> Drop for DrainFilter<'_, T, F, A> -where - F: FnMut(&mut T) -> bool, -{ - fn drop(&mut self) { - struct BackshiftOnDrop<'a, 'b, T, F, A: Allocator> - where - F: FnMut(&mut T) -> bool, - { - drain: &'b mut DrainFilter<'a, T, F, A>, - } - - impl<'a, 'b, T, F, A: Allocator> Drop for BackshiftOnDrop<'a, 'b, T, F, A> - where - F: FnMut(&mut T) -> bool, - { - fn drop(&mut self) { - unsafe { - if self.drain.idx < self.drain.old_len && self.drain.del > 0 { - // This is a pretty messed up state, and there isn't really an - // obviously right thing to do. We don't want to keep trying - // to execute `pred`, so we just backshift all the unprocessed - // elements and tell the vec that they still exist. The backshift - // is required to prevent a double-drop of the last successfully - // drained item prior to a panic in the predicate. - let ptr = self.drain.vec.as_mut_ptr(); - let src = ptr.add(self.drain.idx); - let dst = src.sub(self.drain.del); - let tail_len = self.drain.old_len - self.drain.idx; - src.copy_to(dst, tail_len); - } - self.drain.vec.set_len(self.drain.old_len - self.drain.del); - } - } - } - - let backshift = BackshiftOnDrop { drain: self }; - - // Attempt to consume any remaining elements if the filter predicate - // has not yet panicked. We'll backshift any remaining elements - // whether we've already panicked or if the consumption here panics. - if !backshift.drain.panic_flag { - backshift.drain.for_each(drop); - } - } -} diff --git a/library/alloc/src/vec/partial_eq.rs b/library/alloc/src/vec/partial_eq.rs new file mode 100644 index 00000000000..ff90b6caf46 --- /dev/null +++ b/library/alloc/src/vec/partial_eq.rs @@ -0,0 +1,43 @@ +use crate::alloc::Allocator; +use crate::borrow::Cow; + +use super::Vec; + +macro_rules! __impl_slice_eq1 { + ([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?, #[$stability:meta]) => { + #[$stability] + impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs + where + T: PartialEq<U>, + $($ty: $bound)? + { + #[inline] + fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] } + #[inline] + fn ne(&self, other: &$rhs) -> bool { self[..] != other[..] } + } + } +} + +__impl_slice_eq1! { [A: Allocator] Vec<T, A>, Vec<U, A>, #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U], #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U], #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] } +__impl_slice_eq1! { [A: Allocator] &mut [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] } +__impl_slice_eq1! { [A: Allocator] Vec<T, A>, [U], #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] } +__impl_slice_eq1! { [A: Allocator] [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] } +__impl_slice_eq1! { [A: Allocator] Cow<'_, [T]>, Vec<U, A> where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [] Cow<'_, [T]>, &[U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [] Cow<'_, [T]>, &mut [U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, [U; N], #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, &[U; N], #[stable(feature = "rust1", since = "1.0.0")] } + +// NOTE: some less important impls are omitted to reduce code bloat +// FIXME(Centril): Reconsider this? +//__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], } +//__impl_slice_eq1! { [const N: usize] [A; N], Vec<B>, } +//__impl_slice_eq1! { [const N: usize] &[A; N], Vec<B>, } +//__impl_slice_eq1! { [const N: usize] &mut [A; N], Vec<B>, } +//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], } +//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], } +//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], } diff --git a/library/alloc/src/vec/set_len_on_drop.rs b/library/alloc/src/vec/set_len_on_drop.rs new file mode 100644 index 00000000000..8b66bc81212 --- /dev/null +++ b/library/alloc/src/vec/set_len_on_drop.rs @@ -0,0 +1,28 @@ +// Set the length of the vec when the `SetLenOnDrop` value goes out of scope. +// +// The idea is: The length field in SetLenOnDrop is a local variable +// that the optimizer will see does not alias with any stores through the Vec's data +// pointer. This is a workaround for alias analysis issue #32155 +pub(super) struct SetLenOnDrop<'a> { + len: &'a mut usize, + local_len: usize, +} + +impl<'a> SetLenOnDrop<'a> { + #[inline] + pub(super) fn new(len: &'a mut usize) -> Self { + SetLenOnDrop { local_len: *len, len } + } + + #[inline] + pub(super) fn increment_len(&mut self, increment: usize) { + self.local_len += increment; + } +} + +impl Drop for SetLenOnDrop<'_> { + #[inline] + fn drop(&mut self) { + *self.len = self.local_len; + } +} diff --git a/library/alloc/src/vec/source_iter_marker.rs b/library/alloc/src/vec/source_iter_marker.rs new file mode 100644 index 00000000000..8c0e95559fa --- /dev/null +++ b/library/alloc/src/vec/source_iter_marker.rs @@ -0,0 +1,108 @@ +use core::iter::{InPlaceIterable, SourceIter}; +use core::mem::{self, ManuallyDrop}; +use core::ptr::{self}; + +use super::{AsIntoIter, InPlaceDrop, SpecFromIter, SpecFromIterNested, Vec}; + +/// Specialization marker for collecting an iterator pipeline into a Vec while reusing the +/// source allocation, i.e. executing the pipeline in place. +/// +/// The SourceIter parent trait is necessary for the specializing function to access the allocation +/// which is to be reused. But it is not sufficient for the specialization to be valid. See +/// additional bounds on the impl. +#[rustc_unsafe_specialization_marker] +pub(super) trait SourceIterMarker: SourceIter<Source: AsIntoIter> {} + +// The std-internal SourceIter/InPlaceIterable traits are only implemented by chains of +// Adapter<Adapter<Adapter<IntoIter>>> (all owned by core/std). Additional bounds +// on the adapter implementations (beyond `impl<I: Trait> Trait for Adapter<I>`) only depend on other +// traits already marked as specialization traits (Copy, TrustedRandomAccess, FusedIterator). +// I.e. the marker does not depend on lifetimes of user-supplied types. Modulo the Copy hole, which +// several other specializations already depend on. +impl<T> SourceIterMarker for T where T: SourceIter<Source: AsIntoIter> + InPlaceIterable {} + +impl<T, I> SpecFromIter<T, I> for Vec<T> +where + I: Iterator<Item = T> + SourceIterMarker, +{ + default fn from_iter(mut iterator: I) -> Self { + // Additional requirements which cannot expressed via trait bounds. We rely on const eval + // instead: + // a) no ZSTs as there would be no allocation to reuse and pointer arithmetic would panic + // b) size match as required by Alloc contract + // c) alignments match as required by Alloc contract + if mem::size_of::<T>() == 0 + || mem::size_of::<T>() + != mem::size_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>() + || mem::align_of::<T>() + != mem::align_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>() + { + // fallback to more generic implementations + return SpecFromIterNested::from_iter(iterator); + } + + let (src_buf, src_ptr, dst_buf, dst_end, cap) = unsafe { + let inner = iterator.as_inner().as_into_iter(); + ( + inner.buf.as_ptr(), + inner.ptr, + inner.buf.as_ptr() as *mut T, + inner.end as *const T, + inner.cap, + ) + }; + + // use try-fold since + // - it vectorizes better for some iterator adapters + // - unlike most internal iteration methods, it only takes a &mut self + // - it lets us thread the write pointer through its innards and get it back in the end + let sink = InPlaceDrop { inner: dst_buf, dst: dst_buf }; + let sink = iterator + .try_fold::<_, _, Result<_, !>>(sink, write_in_place_with_drop(dst_end)) + .unwrap(); + // iteration succeeded, don't drop head + let dst = ManuallyDrop::new(sink).dst; + + let src = unsafe { iterator.as_inner().as_into_iter() }; + // check if SourceIter contract was upheld + // caveat: if they weren't we may not even make it to this point + debug_assert_eq!(src_buf, src.buf.as_ptr()); + // check InPlaceIterable contract. This is only possible if the iterator advanced the + // source pointer at all. If it uses unchecked access via TrustedRandomAccess + // then the source pointer will stay in its initial position and we can't use it as reference + if src.ptr != src_ptr { + debug_assert!( + dst as *const _ <= src.ptr, + "InPlaceIterable contract violation, write pointer advanced beyond read pointer" + ); + } + + // drop any remaining values at the tail of the source + src.drop_remaining(); + // but prevent drop of the allocation itself once IntoIter goes out of scope + src.forget_allocation(); + + let vec = unsafe { + let len = dst.offset_from(dst_buf) as usize; + Vec::from_raw_parts(dst_buf, len, cap) + }; + + vec + } +} + +fn write_in_place_with_drop<T>( + src_end: *const T, +) -> impl FnMut(InPlaceDrop<T>, T) -> Result<InPlaceDrop<T>, !> { + move |mut sink, item| { + unsafe { + // the InPlaceIterable contract cannot be verified precisely here since + // try_fold has an exclusive reference to the source pointer + // all we can do is check if it's still in range + debug_assert!(sink.dst as *const _ <= src_end, "InPlaceIterable contract violation"); + ptr::write(sink.dst, item); + sink.dst = sink.dst.add(1); + } + Ok(sink) + } +} diff --git a/library/alloc/src/vec/spec_extend.rs b/library/alloc/src/vec/spec_extend.rs new file mode 100644 index 00000000000..b6186a7ebaf --- /dev/null +++ b/library/alloc/src/vec/spec_extend.rs @@ -0,0 +1,82 @@ +use crate::alloc::Allocator; +use core::iter::TrustedLen; +use core::ptr::{self}; +use core::slice::{self}; + +use super::{IntoIter, SetLenOnDrop, Vec}; + +// Specialization trait used for Vec::extend +pub(super) trait SpecExtend<T, I> { + fn spec_extend(&mut self, iter: I); +} + +impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A> +where + I: Iterator<Item = T>, +{ + default fn spec_extend(&mut self, iter: I) { + self.extend_desugared(iter) + } +} + +impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A> +where + I: TrustedLen<Item = T>, +{ + default fn spec_extend(&mut self, iterator: I) { + // This is the case for a TrustedLen iterator. + let (low, high) = iterator.size_hint(); + if let Some(high_value) = high { + debug_assert_eq!( + low, + high_value, + "TrustedLen iterator's size hint is not exact: {:?}", + (low, high) + ); + } + if let Some(additional) = high { + self.reserve(additional); + unsafe { + let mut ptr = self.as_mut_ptr().add(self.len()); + let mut local_len = SetLenOnDrop::new(&mut self.len); + iterator.for_each(move |element| { + ptr::write(ptr, element); + ptr = ptr.offset(1); + // NB can't overflow since we would have had to alloc the address space + local_len.increment_len(1); + }); + } + } else { + self.extend_desugared(iterator) + } + } +} + +impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> { + fn spec_extend(&mut self, mut iterator: IntoIter<T>) { + unsafe { + self.append_elements(iterator.as_slice() as _); + } + iterator.ptr = iterator.end; + } +} + +impl<'a, T: 'a, I, A: Allocator + 'a> SpecExtend<&'a T, I> for Vec<T, A> +where + I: Iterator<Item = &'a T>, + T: Clone, +{ + default fn spec_extend(&mut self, iterator: I) { + self.spec_extend(iterator.cloned()) + } +} + +impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A> +where + T: Copy, +{ + fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) { + let slice = iterator.as_slice(); + unsafe { self.append_elements(slice) }; + } +} diff --git a/library/alloc/src/vec/spec_from_elem.rs b/library/alloc/src/vec/spec_from_elem.rs new file mode 100644 index 00000000000..de610174783 --- /dev/null +++ b/library/alloc/src/vec/spec_from_elem.rs @@ -0,0 +1,60 @@ +use crate::alloc::Allocator; +use crate::raw_vec::RawVec; +use core::ptr::{self}; + +use super::{ExtendElement, IsZero, Vec}; + +// Specialization trait used for Vec::from_elem +pub(super) trait SpecFromElem: Sized { + fn from_elem<A: Allocator>(elem: Self, n: usize, alloc: A) -> Vec<Self, A>; +} + +impl<T: Clone> SpecFromElem for T { + default fn from_elem<A: Allocator>(elem: Self, n: usize, alloc: A) -> Vec<Self, A> { + let mut v = Vec::with_capacity_in(n, alloc); + v.extend_with(n, ExtendElement(elem)); + v + } +} + +impl SpecFromElem for i8 { + #[inline] + fn from_elem<A: Allocator>(elem: i8, n: usize, alloc: A) -> Vec<i8, A> { + if elem == 0 { + return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + } + unsafe { + let mut v = Vec::with_capacity_in(n, alloc); + ptr::write_bytes(v.as_mut_ptr(), elem as u8, n); + v.set_len(n); + v + } + } +} + +impl SpecFromElem for u8 { + #[inline] + fn from_elem<A: Allocator>(elem: u8, n: usize, alloc: A) -> Vec<u8, A> { + if elem == 0 { + return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + } + unsafe { + let mut v = Vec::with_capacity_in(n, alloc); + ptr::write_bytes(v.as_mut_ptr(), elem, n); + v.set_len(n); + v + } + } +} + +impl<T: Clone + IsZero> SpecFromElem for T { + #[inline] + fn from_elem<A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<T, A> { + if elem.is_zero() { + return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + } + let mut v = Vec::with_capacity_in(n, alloc); + v.extend_with(n, ExtendElement(elem)); + v + } +} diff --git a/library/alloc/src/vec/spec_from_iter.rs b/library/alloc/src/vec/spec_from_iter.rs new file mode 100644 index 00000000000..bbfcc68daef --- /dev/null +++ b/library/alloc/src/vec/spec_from_iter.rs @@ -0,0 +1,97 @@ +use core::mem::ManuallyDrop; +use core::ptr::{self}; +use core::slice::{self}; + +use super::{IntoIter, SpecExtend, SpecFromIterNested, Vec}; + +/// Specialization trait used for Vec::from_iter +/// +/// ## The delegation graph: +/// +/// ```text +/// +-------------+ +/// |FromIterator | +/// +-+-----------+ +/// | +/// v +/// +-+-------------------------------+ +---------------------+ +/// |SpecFromIter +---->+SpecFromIterNested | +/// |where I: | | |where I: | +/// | Iterator (default)----------+ | | Iterator (default) | +/// | vec::IntoIter | | | TrustedLen | +/// | SourceIterMarker---fallback-+ | | | +/// | slice::Iter | | | +/// | Iterator<Item = &Clone> | +---------------------+ +/// +---------------------------------+ +/// ``` +pub(super) trait SpecFromIter<T, I> { + fn from_iter(iter: I) -> Self; +} + +impl<T, I> SpecFromIter<T, I> for Vec<T> +where + I: Iterator<Item = T>, +{ + default fn from_iter(iterator: I) -> Self { + SpecFromIterNested::from_iter(iterator) + } +} + +impl<T> SpecFromIter<T, IntoIter<T>> for Vec<T> { + fn from_iter(iterator: IntoIter<T>) -> Self { + // A common case is passing a vector into a function which immediately + // re-collects into a vector. We can short circuit this if the IntoIter + // has not been advanced at all. + // When it has been advanced We can also reuse the memory and move the data to the front. + // But we only do so when the resulting Vec wouldn't have more unused capacity + // than creating it through the generic FromIterator implementation would. That limitation + // is not strictly necessary as Vec's allocation behavior is intentionally unspecified. + // But it is a conservative choice. + let has_advanced = iterator.buf.as_ptr() as *const _ != iterator.ptr; + if !has_advanced || iterator.len() >= iterator.cap / 2 { + unsafe { + let it = ManuallyDrop::new(iterator); + if has_advanced { + ptr::copy(it.ptr, it.buf.as_ptr(), it.len()); + } + return Vec::from_raw_parts(it.buf.as_ptr(), it.len(), it.cap); + } + } + + let mut vec = Vec::new(); + // must delegate to spec_extend() since extend() itself delegates + // to spec_from for empty Vecs + vec.spec_extend(iterator); + vec + } +} + +impl<'a, T: 'a, I> SpecFromIter<&'a T, I> for Vec<T> +where + I: Iterator<Item = &'a T>, + T: Clone, +{ + default fn from_iter(iterator: I) -> Self { + SpecFromIter::from_iter(iterator.cloned()) + } +} + +// This utilizes `iterator.as_slice().to_vec()` since spec_extend +// must take more steps to reason about the final capacity + length +// and thus do more work. `to_vec()` directly allocates the correct amount +// and fills it exactly. +impl<'a, T: 'a + Clone> SpecFromIter<&'a T, slice::Iter<'a, T>> for Vec<T> { + #[cfg(not(test))] + fn from_iter(iterator: slice::Iter<'a, T>) -> Self { + iterator.as_slice().to_vec() + } + + // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is + // required for this method definition, is not available. Instead use the + // `slice::to_vec` function which is only available with cfg(test) + // NB see the slice::hack module in slice.rs for more information + #[cfg(test)] + fn from_iter(iterator: slice::Iter<'a, T>) -> Self { + crate::slice::to_vec(iterator.as_slice(), crate::alloc::Global) + } +} diff --git a/library/alloc/src/vec/spec_from_iter_nested.rs b/library/alloc/src/vec/spec_from_iter_nested.rs new file mode 100644 index 00000000000..6abd4ff2a3f --- /dev/null +++ b/library/alloc/src/vec/spec_from_iter_nested.rs @@ -0,0 +1,56 @@ +use core::iter::TrustedLen; +use core::ptr::{self}; + +use super::{SpecExtend, Vec}; + +/// Another specialization trait for Vec::from_iter +/// necessary to manually prioritize overlapping specializations +/// see [`SpecFromIter`] for details. +pub(super) trait SpecFromIterNested<T, I> { + fn from_iter(iter: I) -> Self; +} + +impl<T, I> SpecFromIterNested<T, I> for Vec<T> +where + I: Iterator<Item = T>, +{ + default fn from_iter(mut iterator: I) -> Self { + // Unroll the first iteration, as the vector is going to be + // expanded on this iteration in every case when the iterable is not + // empty, but the loop in extend_desugared() is not going to see the + // vector being full in the few subsequent loop iterations. + // So we get better branch prediction. + let mut vector = match iterator.next() { + None => return Vec::new(), + Some(element) => { + let (lower, _) = iterator.size_hint(); + let mut vector = Vec::with_capacity(lower.saturating_add(1)); + unsafe { + ptr::write(vector.as_mut_ptr(), element); + vector.set_len(1); + } + vector + } + }; + // must delegate to spec_extend() since extend() itself delegates + // to spec_from for empty Vecs + <Vec<T> as SpecExtend<T, I>>::spec_extend(&mut vector, iterator); + vector + } +} + +impl<T, I> SpecFromIterNested<T, I> for Vec<T> +where + I: TrustedLen<Item = T>, +{ + fn from_iter(iterator: I) -> Self { + let mut vector = match iterator.size_hint() { + (_, Some(upper)) => Vec::with_capacity(upper), + _ => Vec::new(), + }; + // must delegate to spec_extend() since extend() itself delegates + // to spec_from for empty Vecs + vector.spec_extend(iterator); + vector + } +} diff --git a/library/alloc/src/vec/splice.rs b/library/alloc/src/vec/splice.rs new file mode 100644 index 00000000000..0a27b5b62ec --- /dev/null +++ b/library/alloc/src/vec/splice.rs @@ -0,0 +1,133 @@ +use crate::alloc::{Allocator, Global}; +use core::ptr::{self}; +use core::slice::{self}; + +use super::{Drain, Vec}; + +/// A splicing iterator for `Vec`. +/// +/// This struct is created by [`Vec::splice()`]. +/// See its documentation for more. +/// +/// # Example +/// +/// ``` +/// let mut v = vec![0, 1, 2]; +/// let new = [7, 8]; +/// let iter: std::vec::Splice<_> = v.splice(1.., new.iter().cloned()); +/// ``` +#[derive(Debug)] +#[stable(feature = "vec_splice", since = "1.21.0")] +pub struct Splice< + 'a, + I: Iterator + 'a, + #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global, +> { + pub(super) drain: Drain<'a, I::Item, A>, + pub(super) replace_with: I, +} + +#[stable(feature = "vec_splice", since = "1.21.0")] +impl<I: Iterator, A: Allocator> Iterator for Splice<'_, I, A> { + type Item = I::Item; + + fn next(&mut self) -> Option<Self::Item> { + self.drain.next() + } + + fn size_hint(&self) -> (usize, Option<usize>) { + self.drain.size_hint() + } +} + +#[stable(feature = "vec_splice", since = "1.21.0")] +impl<I: Iterator, A: Allocator> DoubleEndedIterator for Splice<'_, I, A> { + fn next_back(&mut self) -> Option<Self::Item> { + self.drain.next_back() + } +} + +#[stable(feature = "vec_splice", since = "1.21.0")] +impl<I: Iterator, A: Allocator> ExactSizeIterator for Splice<'_, I, A> {} + +#[stable(feature = "vec_splice", since = "1.21.0")] +impl<I: Iterator, A: Allocator> Drop for Splice<'_, I, A> { + fn drop(&mut self) { + self.drain.by_ref().for_each(drop); + + unsafe { + if self.drain.tail_len == 0 { + self.drain.vec.as_mut().extend(self.replace_with.by_ref()); + return; + } + + // First fill the range left by drain(). + if !self.drain.fill(&mut self.replace_with) { + return; + } + + // There may be more elements. Use the lower bound as an estimate. + // FIXME: Is the upper bound a better guess? Or something else? + let (lower_bound, _upper_bound) = self.replace_with.size_hint(); + if lower_bound > 0 { + self.drain.move_tail(lower_bound); + if !self.drain.fill(&mut self.replace_with) { + return; + } + } + + // Collect any remaining elements. + // This is a zero-length vector which does not allocate if `lower_bound` was exact. + let mut collected = self.replace_with.by_ref().collect::<Vec<I::Item>>().into_iter(); + // Now we have an exact count. + if collected.len() > 0 { + self.drain.move_tail(collected.len()); + let filled = self.drain.fill(&mut collected); + debug_assert!(filled); + debug_assert_eq!(collected.len(), 0); + } + } + // Let `Drain::drop` move the tail back if necessary and restore `vec.len`. + } +} + +/// Private helper methods for `Splice::drop` +impl<T, A: Allocator> Drain<'_, T, A> { + /// The range from `self.vec.len` to `self.tail_start` contains elements + /// that have been moved out. + /// Fill that range as much as possible with new elements from the `replace_with` iterator. + /// Returns `true` if we filled the entire range. (`replace_with.next()` didn’t return `None`.) + unsafe fn fill<I: Iterator<Item = T>>(&mut self, replace_with: &mut I) -> bool { + let vec = unsafe { self.vec.as_mut() }; + let range_start = vec.len; + let range_end = self.tail_start; + let range_slice = unsafe { + slice::from_raw_parts_mut(vec.as_mut_ptr().add(range_start), range_end - range_start) + }; + + for place in range_slice { + if let Some(new_item) = replace_with.next() { + unsafe { ptr::write(place, new_item) }; + vec.len += 1; + } else { + return false; + } + } + true + } + + /// Makes room for inserting more elements before the tail. + unsafe fn move_tail(&mut self, additional: usize) { + let vec = unsafe { self.vec.as_mut() }; + let len = self.tail_start + self.tail_len; + vec.buf.reserve(len, additional); + + let new_tail_start = self.tail_start + additional; + unsafe { + let src = vec.as_ptr().add(self.tail_start); + let dst = vec.as_mut_ptr().add(new_tail_start); + ptr::copy(src, dst, self.tail_len); + } + self.tail_start = new_tail_start; + } +} |
