about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--library/alloc/src/collections/vec_deque/drain.rs79
-rw-r--r--library/alloc/src/collections/vec_deque/iter.rs30
-rw-r--r--library/alloc/src/collections/vec_deque/iter_mut.rs10
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs182
-rw-r--r--library/alloc/src/collections/vec_deque/spec_extend.rs11
-rw-r--r--library/alloc/src/collections/vec_deque/tests.rs13
6 files changed, 181 insertions, 144 deletions
diff --git a/library/alloc/src/collections/vec_deque/drain.rs b/library/alloc/src/collections/vec_deque/drain.rs
index a4a96bad5e0..89feb361ddc 100644
--- a/library/alloc/src/collections/vec_deque/drain.rs
+++ b/library/alloc/src/collections/vec_deque/drain.rs
@@ -53,27 +53,36 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
     }
 
     // Only returns pointers to the slices, as that's
-    // all we need to drop them
-    fn as_slices(&self) -> (*mut [T], *mut [T]) {
+    // all we need to drop them. May only be called if `self.remaining != 0`.
+    unsafe fn as_slices(&self) -> (*mut [T], *mut [T]) {
         unsafe {
             let deque = self.deque.as_ref();
-            let wrapped_start = deque.wrap_idx(self.idx);
-
-            if self.remaining <= deque.capacity() - wrapped_start {
-                // there's only one contigous slice
-                (
-                    ptr::slice_from_raw_parts_mut(deque.ptr().add(wrapped_start), self.remaining),
-                    &mut [] as *mut [T],
-                )
+            // FIXME: This is doing almost exactly the same thing as the else branch in `VecDeque::slice_ranges`.
+            // Unfortunately, we can't just call `slice_ranges` here, as the deque's `len` is currently
+            // just `drain_start`, so the range check would (almost) always panic. Between temporarily
+            // adjusting the deques `len` to call `slice_ranges`, and just copy pasting the `slice_ranges`
+            // implementation, this seemed like the less hacky solution, though it might be good to
+            // find a better one in the future.
+
+            // because `self.remaining != 0`, we know that `self.idx < deque.original_len`, so it's a valid
+            // logical index.
+            let wrapped_start = deque.to_physical_idx(self.idx);
+
+            let head_len = deque.capacity() - wrapped_start;
+
+            let (a_range, b_range) = if head_len >= self.remaining {
+                (wrapped_start..wrapped_start + self.remaining, 0..0)
             } else {
-                let head_len = deque.capacity() - wrapped_start;
-                // this will never overflow due to the if condition
                 let tail_len = self.remaining - head_len;
-                (
-                    ptr::slice_from_raw_parts_mut(deque.ptr().add(wrapped_start), head_len),
-                    ptr::slice_from_raw_parts_mut(deque.ptr(), tail_len),
-                )
-            }
+                (wrapped_start..deque.capacity(), 0..tail_len)
+            };
+
+            // SAFETY: the range `self.idx..self.idx+self.remaining` lies strictly inside
+            // the range `0..deque.original_len`. because of this, and because of the fact
+            // that we acquire `a_range` and `b_range` exactly like `slice_ranges` would,
+            // it's guaranteed that `a_range` and `b_range` represent valid ranges into
+            // the deques buffer.
+            (deque.buffer_range(a_range), deque.buffer_range(b_range))
         }
     }
 }
@@ -103,8 +112,9 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
         impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
             fn drop(&mut self) {
                 if self.0.remaining != 0 {
-                    let (front, back) = self.0.as_slices();
                     unsafe {
+                        // SAFETY: We just checked that `self.remaining != 0`.
+                        let (front, back) = self.0.as_slices();
                         ptr::drop_in_place(front);
                         ptr::drop_in_place(back);
                     }
@@ -133,7 +143,7 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
                         source_deque.len = 0;
                     }
                     (0, _) => {
-                        source_deque.head = source_deque.wrap_idx(drain_len);
+                        source_deque.head = source_deque.to_physical_idx(drain_len);
                         source_deque.len = orig_len - drain_len;
                     }
                     (_, 0) => {
@@ -143,15 +153,15 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
                         if head_len <= tail_len {
                             source_deque.wrap_copy(
                                 source_deque.head,
-                                source_deque.wrap_idx(drain_len),
+                                source_deque.to_physical_idx(drain_len),
                                 head_len,
                             );
-                            source_deque.head = source_deque.wrap_idx(drain_len);
+                            source_deque.head = source_deque.to_physical_idx(drain_len);
                             source_deque.len = orig_len - drain_len;
                         } else {
                             source_deque.wrap_copy(
-                                source_deque.wrap_idx(head_len + drain_len),
-                                source_deque.wrap_idx(head_len),
+                                source_deque.to_physical_idx(head_len + drain_len),
+                                source_deque.to_physical_idx(head_len),
                                 tail_len,
                             );
                             source_deque.len = orig_len - drain_len;
@@ -162,14 +172,17 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
         }
 
         let guard = DropGuard(self);
-        let (front, back) = guard.0.as_slices();
-        unsafe {
-            // since idx is a logical index, we don't need to worry about wrapping.
-            guard.0.idx += front.len();
-            guard.0.remaining -= front.len();
-            ptr::drop_in_place(front);
-            guard.0.remaining = 0;
-            ptr::drop_in_place(back);
+        if guard.0.remaining != 0 {
+            unsafe {
+                // SAFETY: We just checked that `self.remaining != 0`.
+                let (front, back) = guard.0.as_slices();
+                // since idx is a logical index, we don't need to worry about wrapping.
+                guard.0.idx += front.len();
+                guard.0.remaining -= front.len();
+                ptr::drop_in_place(front);
+                guard.0.remaining = 0;
+                ptr::drop_in_place(back);
+            }
         }
 
         // Dropping `guard` handles moving the remaining elements into place.
@@ -185,7 +198,7 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
         if self.remaining == 0 {
             return None;
         }
-        let wrapped_idx = unsafe { self.deque.as_ref().wrap_idx(self.idx) };
+        let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx) };
         self.idx += 1;
         self.remaining -= 1;
         Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
@@ -206,7 +219,7 @@ impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
             return None;
         }
         self.remaining -= 1;
-        let wrapped_idx = unsafe { self.deque.as_ref().wrap_idx(self.idx + self.remaining) };
+        let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx + self.remaining) };
         Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
     }
 }
diff --git a/library/alloc/src/collections/vec_deque/iter.rs b/library/alloc/src/collections/vec_deque/iter.rs
index d4e66db4903..d9f3937144d 100644
--- a/library/alloc/src/collections/vec_deque/iter.rs
+++ b/library/alloc/src/collections/vec_deque/iter.rs
@@ -39,15 +39,6 @@ impl<T> Clone for Iter<'_, T> {
 impl<'a, T> Iterator for Iter<'a, T> {
     type Item = &'a T;
 
-    fn advance_by(&mut self, n: usize) -> Result<(), usize> {
-        let m = match self.i1.advance_by(n) {
-            Ok(_) => return Ok(()),
-            Err(m) => m,
-        };
-        mem::swap(&mut self.i1, &mut self.i2);
-        self.i1.advance_by(n - m).map_err(|o| o + m)
-    }
-
     #[inline]
     fn next(&mut self) -> Option<&'a T> {
         match self.i1.next() {
@@ -64,6 +55,15 @@ impl<'a, T> Iterator for Iter<'a, T> {
         }
     }
 
+    fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+        let m = match self.i1.advance_by(n) {
+            Ok(_) => return Ok(()),
+            Err(m) => m,
+        };
+        mem::swap(&mut self.i1, &mut self.i2);
+        self.i1.advance_by(n - m).map_err(|o| o + m)
+    }
+
     #[inline]
     fn size_hint(&self) -> (usize, Option<usize>) {
         let len = self.len();
@@ -75,17 +75,16 @@ impl<'a, T> Iterator for Iter<'a, T> {
         F: FnMut(Acc, Self::Item) -> Acc,
     {
         let accum = self.i1.fold(accum, &mut f);
-        self.i2.fold(accum, f)
+        self.i2.fold(accum, &mut f)
     }
 
     fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
     where
-        Self: Sized,
         F: FnMut(B, Self::Item) -> R,
         R: Try<Output = B>,
     {
         let acc = self.i1.try_fold(init, &mut f)?;
-        self.i2.try_fold(acc, f)
+        self.i2.try_fold(acc, &mut f)
     }
 
     #[inline]
@@ -117,7 +116,7 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
             None => {
                 // most of the time, the iterator will either always
                 // call next(), or always call next_back(). By swapping
-                // the iterators once the first one is empty, we ensure
+                // the iterators once the second one is empty, we ensure
                 // that the first branch is taken as often as possible,
                 // without sacrificing correctness, as i2 is empty anyways
                 mem::swap(&mut self.i1, &mut self.i2);
@@ -141,17 +140,16 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
         F: FnMut(Acc, Self::Item) -> Acc,
     {
         let accum = self.i2.rfold(accum, &mut f);
-        self.i1.rfold(accum, f)
+        self.i1.rfold(accum, &mut f)
     }
 
     fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
     where
-        Self: Sized,
         F: FnMut(B, Self::Item) -> R,
         R: Try<Output = B>,
     {
         let acc = self.i2.try_rfold(init, &mut f)?;
-        self.i1.try_rfold(acc, f)
+        self.i1.try_rfold(acc, &mut f)
     }
 }
 
diff --git a/library/alloc/src/collections/vec_deque/iter_mut.rs b/library/alloc/src/collections/vec_deque/iter_mut.rs
index 7c955663bde..2c59d95cd53 100644
--- a/library/alloc/src/collections/vec_deque/iter_mut.rs
+++ b/library/alloc/src/collections/vec_deque/iter_mut.rs
@@ -67,17 +67,16 @@ impl<'a, T> Iterator for IterMut<'a, T> {
         F: FnMut(Acc, Self::Item) -> Acc,
     {
         let accum = self.i1.fold(accum, &mut f);
-        self.i2.fold(accum, f)
+        self.i2.fold(accum, &mut f)
     }
 
     fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
     where
-        Self: Sized,
         F: FnMut(B, Self::Item) -> R,
         R: Try<Output = B>,
     {
         let acc = self.i1.try_fold(init, &mut f)?;
-        self.i2.try_fold(acc, f)
+        self.i2.try_fold(acc, &mut f)
     }
 
     #[inline]
@@ -133,17 +132,16 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
         F: FnMut(Acc, Self::Item) -> Acc,
     {
         let accum = self.i2.rfold(accum, &mut f);
-        self.i1.rfold(accum, f)
+        self.i1.rfold(accum, &mut f)
     }
 
     fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
     where
-        Self: Sized,
         F: FnMut(B, Self::Item) -> R,
         R: Try<Output = B>,
     {
         let acc = self.i2.try_rfold(init, &mut f)?;
-        self.i1.try_rfold(acc, f)
+        self.i1.try_rfold(acc, &mut f)
     }
 }
 
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index bb0e11d6c2d..52b46e448c4 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -91,7 +91,12 @@ pub struct VecDeque<
     T,
     #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
 > {
+    // `self[0]`, if it exists, is `buf[head]`.
+    // `head < buf.capacity()`, unless `buf.capacity() == 0` when `head == 0`.
     head: usize,
+    // the number of initialized elements, starting from the one at `head` and potentially wrapping around.
+    // if `len == 0`, the exact value of `head` is unimportant.
+    // if `T` is zero-Sized, then `self.len <= usize::MAX`, otherwise `self.len <= isize::MAX as usize`.
     len: usize,
     buf: RawVec<T, A>,
 }
@@ -106,15 +111,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for VecDeque<T, A> {
 
     fn clone_from(&mut self, other: &Self) {
         self.clear();
-        self.head = 0;
-        self.reserve(other.len);
-
-        let (a, b) = other.as_slices();
-        unsafe {
-            self.write_iter(0, a.iter().cloned(), &mut 0);
-            self.write_iter(a.len(), b.iter().cloned(), &mut 0);
-        }
-        self.len = other.len;
+        self.extend(other.iter().cloned());
     }
 }
 
@@ -133,13 +130,11 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for VecDeque<T, A> {
             }
         }
 
-        if mem::needs_drop::<T>() {
-            let (front, back) = self.as_mut_slices();
-            unsafe {
-                let _back_dropper = Dropper(back);
-                // use drop for [T]
-                ptr::drop_in_place(front);
-            }
+        let (front, back) = self.as_mut_slices();
+        unsafe {
+            let _back_dropper = Dropper(back);
+            // use drop for [T]
+            ptr::drop_in_place(front);
         }
         // RawVec handles deallocation
     }
@@ -175,6 +170,15 @@ impl<T, A: Allocator> VecDeque<T, A> {
         }
     }
 
+    /// Returns a slice pointer into the buffer.
+    /// `range` must lie inside `0..self.capacity()`.
+    #[inline]
+    unsafe fn buffer_range(&self, range: Range<usize>) -> *mut [T] {
+        unsafe {
+            ptr::slice_from_raw_parts_mut(self.ptr().add(range.start), range.end - range.start)
+        }
+    }
+
     /// Returns `true` if the buffer is at full capacity.
     #[inline]
     fn is_full(&self) -> bool {
@@ -189,7 +193,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
     }
 
     #[inline]
-    fn wrap_idx(&self, idx: usize) -> usize {
+    fn to_physical_idx(&self, idx: usize) -> usize {
         self.wrap_add(self.head, idx)
     }
 
@@ -473,6 +477,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
         debug_assert!(new_capacity >= old_capacity);
 
         // Move the shortest contiguous section of the ring buffer
+        //
+        // H := head
+        // L := last element (`self.to_physical_idx(self.len - 1)`)
+        //
         //    H           L
         //   [o o o o o o o . ]
         //    H           L
@@ -597,7 +605,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn get(&self, index: usize) -> Option<&T> {
         if index < self.len {
-            let idx = self.wrap_idx(index);
+            let idx = self.to_physical_idx(index);
             unsafe { Some(&*self.ptr().add(idx)) }
         } else {
             None
@@ -626,7 +634,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
         if index < self.len {
-            let idx = self.wrap_idx(index);
+            let idx = self.to_physical_idx(index);
             unsafe { Some(&mut *self.ptr().add(idx)) }
         } else {
             None
@@ -660,8 +668,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
     pub fn swap(&mut self, i: usize, j: usize) {
         assert!(i < self.len());
         assert!(j < self.len());
-        let ri = self.wrap_idx(i);
-        let rj = self.wrap_idx(j);
+        let ri = self.to_physical_idx(i);
+        let rj = self.to_physical_idx(j);
         unsafe { ptr::swap(self.ptr().add(ri), self.ptr().add(rj)) }
     }
 
@@ -913,6 +921,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
             if self.len == 0 {
                 self.head = 0;
             } else if self.head >= target_cap && tail_outside {
+                //  H := head
+                //  L := last element
                 //                    H           L
                 //   [. . . . . . . . o o o o o o o . ]
                 //    H           L
@@ -923,6 +933,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
                 }
                 self.head = 0;
             } else if self.head < target_cap && tail_outside {
+                //  H := head
+                //  L := last element
                 //          H           L
                 //   [. . . o o o o o o o . . . . . . ]
                 //      L   H
@@ -932,6 +944,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
                     self.copy_nonoverlapping(target_cap, 0, len);
                 }
             } else if self.head >= target_cap {
+                //  H := head
+                //  L := last element
                 //            L                   H
                 //   [o o o o o . . . . . . . . . o o ]
                 //            L   H
@@ -998,11 +1012,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
                 return;
             }
 
-            if !mem::needs_drop::<T>() {
-                self.len = len;
-                return;
-            }
-
             let (front, back) = self.as_mut_slices();
             if len > front.len() {
                 let begin = len - front.len();
@@ -1102,18 +1111,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
     #[inline]
     #[stable(feature = "deque_extras_15", since = "1.5.0")]
     pub fn as_slices(&self) -> (&[T], &[T]) {
-        if self.is_contiguous() {
-            (unsafe { slice::from_raw_parts(self.ptr().add(self.head), self.len) }, &[])
-        } else {
-            let head_len = self.capacity() - self.head;
-            let tail_len = self.len - head_len;
-            unsafe {
-                (
-                    slice::from_raw_parts(self.ptr().add(self.head), head_len),
-                    slice::from_raw_parts(self.ptr(), tail_len),
-                )
-            }
-        }
+        let (a_range, b_range) = self.slice_ranges(..);
+        // SAFETY: `slice_ranges` always returns valid ranges into
+        // the physical buffer.
+        unsafe { (&*self.buffer_range(a_range), &*self.buffer_range(b_range)) }
     }
 
     /// Returns a pair of slices which contain, in order, the contents of the
@@ -1144,18 +1145,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
     #[inline]
     #[stable(feature = "deque_extras_15", since = "1.5.0")]
     pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
-        if self.is_contiguous() {
-            (unsafe { slice::from_raw_parts_mut(self.ptr().add(self.head), self.len) }, &mut [])
-        } else {
-            let head_len = self.capacity() - self.head;
-            let tail_len = self.len - head_len;
-            unsafe {
-                (
-                    slice::from_raw_parts_mut(self.ptr().add(self.head), head_len),
-                    slice::from_raw_parts_mut(self.ptr(), tail_len),
-                )
-            }
-        }
+        let (a_range, b_range) = self.slice_ranges(..);
+        // SAFETY: `slice_ranges` always returns valid ranges into
+        // the physical buffer.
+        unsafe { (&mut *self.buffer_range(a_range), &mut *self.buffer_range(b_range)) }
     }
 
     /// Returns the number of elements in the deque.
@@ -1192,18 +1185,37 @@ impl<T, A: Allocator> VecDeque<T, A> {
         self.len == 0
     }
 
+    /// Given a range into the logical buffer of the deque, this function
+    /// return two ranges into the physical buffer that correspond to
+    /// the given range.
     fn slice_ranges<R>(&self, range: R) -> (Range<usize>, Range<usize>)
     where
         R: RangeBounds<usize>,
     {
         let Range { start, end } = slice::range(range, ..self.len);
-        let a_len = self.len.min(self.capacity() - self.head);
-        if end <= a_len {
-            (start..end, 0..0)
-        } else if start >= a_len {
-            (0..0, start - a_len..end - a_len)
+        let len = end - start;
+
+        if len == 0 {
+            (0..0, 0..0)
         } else {
-            (start..a_len, 0..end - a_len)
+            // `slice::range` guarantees that `start <= end <= self.len`.
+            // because `len != 0`, we know that `start < end`, so `start < self.len`
+            // and the indexing is valid.
+            let wrapped_start = self.to_physical_idx(start);
+
+            // this subtraction can never overflow because `wrapped_start` is
+            // at most `self.capacity()` (and if `self.capacity != 0`, then `wrapped_start` is strictly less
+            // than `self.capacity`).
+            let head_len = self.capacity() - wrapped_start;
+
+            if head_len >= len {
+                // we know that `len + wrapped_start <= self.capacity <= usize::MAX`, so this addition can't overflow
+                (wrapped_start..wrapped_start + len, 0..0)
+            } else {
+                // can't overflow because of the if condition
+                let tail_len = len - head_len;
+                (wrapped_start..self.capacity(), 0..tail_len)
+            }
         }
     }
 
@@ -1233,9 +1245,14 @@ impl<T, A: Allocator> VecDeque<T, A> {
     where
         R: RangeBounds<usize>,
     {
-        let (a, b) = self.as_slices();
         let (a_range, b_range) = self.slice_ranges(range);
-        Iter::new(a[a_range].iter(), b[b_range].iter())
+        // SAFETY: The ranges returned by `slice_ranges`
+        // are valid ranges into the physical buffer, so
+        // it's ok to pass them to `buffer_range` and
+        // dereference the result.
+        let a = unsafe { &*self.buffer_range(a_range) };
+        let b = unsafe { &*self.buffer_range(b_range) };
+        Iter::new(a.iter(), b.iter())
     }
 
     /// Creates an iterator that covers the specified mutable range in the deque.
@@ -1269,8 +1286,13 @@ impl<T, A: Allocator> VecDeque<T, A> {
         R: RangeBounds<usize>,
     {
         let (a_range, b_range) = self.slice_ranges(range);
-        let (a, b) = self.as_mut_slices();
-        IterMut::new(a[a_range].iter_mut(), b[b_range].iter_mut())
+        // SAFETY: The ranges returned by `slice_ranges`
+        // are valid ranges into the physical buffer, so
+        // it's ok to pass them to `buffer_range` and
+        // dereference the result.
+        let a = unsafe { &mut *self.buffer_range(a_range) };
+        let b = unsafe { &mut *self.buffer_range(b_range) };
+        IterMut::new(a.iter_mut(), b.iter_mut())
     }
 
     /// Removes the specified range from the deque in bulk, returning all
@@ -1509,7 +1531,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
             None
         } else {
             let old_head = self.head;
-            self.head = self.wrap_idx(1);
+            self.head = self.to_physical_idx(1);
             self.len -= 1;
             Some(unsafe { self.buffer_read(old_head) })
         }
@@ -1535,7 +1557,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
             None
         } else {
             self.len -= 1;
-            Some(unsafe { self.buffer_read(self.wrap_idx(self.len)) })
+            Some(unsafe { self.buffer_read(self.to_physical_idx(self.len)) })
         }
     }
 
@@ -1583,7 +1605,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
             self.grow();
         }
 
-        unsafe { self.buffer_write(self.wrap_idx(self.len), value) }
+        unsafe { self.buffer_write(self.to_physical_idx(self.len), value) }
         self.len += 1;
     }
 
@@ -1700,8 +1722,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
             // and panicked.
             unsafe {
                 // see `remove()` for explanation why this wrap_copy() call is safe.
-                self.wrap_copy(self.wrap_idx(index), self.wrap_idx(index + 1), k);
-                self.buffer_write(self.wrap_idx(index), value);
+                self.wrap_copy(self.to_physical_idx(index), self.to_physical_idx(index + 1), k);
+                self.buffer_write(self.to_physical_idx(index), value);
                 self.len += 1;
             }
         } else {
@@ -1709,7 +1731,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
             self.head = self.wrap_sub(self.head, 1);
             unsafe {
                 self.wrap_copy(old_head, self.head, index);
-                self.buffer_write(self.wrap_idx(index), value);
+                self.buffer_write(self.to_physical_idx(index), value);
                 self.len += 1;
             }
         }
@@ -1742,7 +1764,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
             return None;
         }
 
-        let wrapped_idx = self.wrap_idx(index);
+        let wrapped_idx = self.to_physical_idx(index);
 
         let elem = unsafe { Some(self.buffer_read(wrapped_idx)) };
 
@@ -1755,7 +1777,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
             self.len -= 1;
         } else {
             let old_head = self.head;
-            self.head = self.wrap_idx(1);
+            self.head = self.to_physical_idx(1);
             unsafe { self.wrap_copy(old_head, self.head, index) };
             self.len -= 1;
         }
@@ -1866,14 +1888,14 @@ impl<T, A: Allocator> VecDeque<T, A> {
         self.reserve(other.len);
         unsafe {
             let (left, right) = other.as_slices();
-            self.copy_slice(self.wrap_idx(self.len), left);
+            self.copy_slice(self.to_physical_idx(self.len), left);
             // no overflow, because self.capacity() >= old_cap + left.len() >= self.len + left.len()
-            self.copy_slice(self.wrap_idx(self.len + left.len()), right);
+            self.copy_slice(self.to_physical_idx(self.len + left.len()), right);
         }
         // SAFETY: Update pointers after copying to avoid leaving doppelganger
         // in case of panics.
         self.len += other.len;
-        // Silently drop values in `other`.
+        // Now that we own its values, forget everything in `other`.
         other.len = 0;
         other.head = 0;
     }
@@ -1982,8 +2004,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
         // buffer without it being full emerge
         debug_assert!(self.is_full());
         let old_cap = self.capacity();
-        // if the cap was 0, we need to reserve at least 1.
-        self.buf.reserve(old_cap, old_cap.max(1));
+        self.buf.reserve_for_push(old_cap);
         unsafe {
             self.handle_capacity_increase(old_cap);
         }
@@ -2265,16 +2286,16 @@ impl<T, A: Allocator> VecDeque<T, A> {
     unsafe fn rotate_left_inner(&mut self, mid: usize) {
         debug_assert!(mid * 2 <= self.len());
         unsafe {
-            self.wrap_copy(self.head, self.wrap_idx(self.len), mid);
+            self.wrap_copy(self.head, self.to_physical_idx(self.len), mid);
         }
-        self.head = self.wrap_idx(mid);
+        self.head = self.to_physical_idx(mid);
     }
 
     unsafe fn rotate_right_inner(&mut self, k: usize) {
         debug_assert!(k * 2 <= self.len());
         self.head = self.wrap_sub(self.head, k);
         unsafe {
-            self.wrap_copy(self.wrap_idx(self.len), self.head, k);
+            self.wrap_copy(self.to_physical_idx(self.len), self.head, k);
         }
     }
 
@@ -2532,8 +2553,13 @@ impl<T: Clone, A: Allocator> VecDeque<T, A> {
 
 /// Returns the index in the underlying buffer for a given logical element index.
 #[inline]
-fn wrap_index(index: usize, size: usize) -> usize {
-    if index >= size { index - size } else { index }
+fn wrap_index(logical_index: usize, capacity: usize) -> usize {
+    debug_assert!(
+        (logical_index == 0 && capacity == 0)
+            || logical_index < capacity
+            || (logical_index - capacity) < capacity
+    );
+    if logical_index >= capacity { logical_index - capacity } else { logical_index }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/alloc/src/collections/vec_deque/spec_extend.rs b/library/alloc/src/collections/vec_deque/spec_extend.rs
index 0f722ceb082..adcc8862a8c 100644
--- a/library/alloc/src/collections/vec_deque/spec_extend.rs
+++ b/library/alloc/src/collections/vec_deque/spec_extend.rs
@@ -38,7 +38,7 @@ where
                 // and `room == self.capacity() - self.len`
                 //   => `self.len + room <= self.capacity()`
                 self.write_iter_wrapping(
-                    self.wrap_idx(self.len),
+                    self.to_physical_idx(self.len),
                     ByRefSized(&mut iter).take(room),
                     room,
                 );
@@ -63,8 +63,9 @@ where
             );
             self.reserve(additional);
 
-            let written =
-                unsafe { self.write_iter_wrapping(self.wrap_idx(self.len), iter, additional) };
+            let written = unsafe {
+                self.write_iter_wrapping(self.to_physical_idx(self.len), iter, additional)
+            };
 
             debug_assert_eq!(
                 additional, written,
@@ -87,7 +88,7 @@ impl<T, A: Allocator> SpecExtend<T, vec::IntoIter<T>> for VecDeque<T, A> {
         self.reserve(slice.len());
 
         unsafe {
-            self.copy_slice(self.wrap_idx(self.len), slice);
+            self.copy_slice(self.to_physical_idx(self.len), slice);
             self.len += slice.len();
         }
         iterator.forget_remaining_elements();
@@ -113,7 +114,7 @@ where
         self.reserve(slice.len());
 
         unsafe {
-            self.copy_slice(self.wrap_idx(self.len), slice);
+            self.copy_slice(self.to_physical_idx(self.len), slice);
             self.len += slice.len();
         }
     }
diff --git a/library/alloc/src/collections/vec_deque/tests.rs b/library/alloc/src/collections/vec_deque/tests.rs
index 6995c5b47db..553f8da3e2d 100644
--- a/library/alloc/src/collections/vec_deque/tests.rs
+++ b/library/alloc/src/collections/vec_deque/tests.rs
@@ -257,13 +257,14 @@ fn test_swap_panic() {
 #[test]
 fn test_reserve_exact() {
     let mut tester: VecDeque<i32> = VecDeque::with_capacity(1);
-    assert!(tester.capacity() == 1);
+    assert_eq!(tester.capacity(), 1);
     tester.reserve_exact(50);
-    assert!(tester.capacity() >= 50);
+    assert_eq!(tester.capacity(), 50);
     tester.reserve_exact(40);
-    assert!(tester.capacity() >= 40);
+    // reserving won't shrink the buffer
+    assert_eq!(tester.capacity(), 50);
     tester.reserve_exact(200);
-    assert!(tester.capacity() >= 200);
+    assert_eq!(tester.capacity(), 200);
 }
 
 #[test]
@@ -479,7 +480,7 @@ fn make_contiguous_big_tail() {
     assert_eq!(tester.capacity(), 15);
     assert_eq!((&[9, 8, 7, 6, 5, 4, 3] as &[_], &[0, 1, 2] as &[_]), tester.as_slices());
 
-    let expected_start = tester.wrap_idx(tester.len);
+    let expected_start = tester.to_physical_idx(tester.len);
     tester.make_contiguous();
     assert_eq!(tester.head, expected_start);
     assert_eq!((&[9, 8, 7, 6, 5, 4, 3, 0, 1, 2] as &[_], &[] as &[_]), tester.as_slices());
@@ -679,7 +680,7 @@ fn test_drain() {
 
     let cap = tester.capacity();
     for len in 0..=cap {
-        for head in 0..=cap {
+        for head in 0..cap {
             for drain_start in 0..=len {
                 for drain_end in drain_start..=len {
                     tester.head = head;