diff options
| author | Bryan Drewery <bryan@shatow.net> | 2018-03-19 13:11:55 -0700 |
|---|---|---|
| committer | Bryan Drewery <bryan@shatow.net> | 2018-03-19 13:11:55 -0700 |
| commit | 00dac20e017d321b9999b04fd6d3132c4a21a388 (patch) | |
| tree | 4a3b8da2afb957823317ffc83a8b0532934e46d0 /src/liballoc | |
| parent | d740083fc8981ee933dc48a6b3dcee21b82c993e (diff) | |
| parent | 57c74c39813c4668d3be5a0c244758f59ab32d9a (diff) | |
| download | rust-00dac20e017d321b9999b04fd6d3132c4a21a388.tar.gz rust-00dac20e017d321b9999b04fd6d3132c4a21a388.zip | |
Merge branch 'update-beta-freebsd' into freebsd-posix-spawn
Diffstat (limited to 'src/liballoc')
| -rw-r--r-- | src/liballoc/Cargo.toml | 2 | ||||
| -rw-r--r-- | src/liballoc/allocator.rs | 18 | ||||
| -rw-r--r-- | src/liballoc/benches/lib.rs | 1 | ||||
| -rw-r--r-- | src/liballoc/binary_heap.rs | 6 | ||||
| -rw-r--r-- | src/liballoc/boxed.rs | 9 | ||||
| -rw-r--r-- | src/liballoc/btree/map.rs | 19 | ||||
| -rw-r--r-- | src/liballoc/btree/set.rs | 14 | ||||
| -rw-r--r-- | src/liballoc/fmt.rs | 2 | ||||
| -rw-r--r-- | src/liballoc/heap.rs | 8 | ||||
| -rw-r--r-- | src/liballoc/lib.rs | 8 | ||||
| -rw-r--r-- | src/liballoc/linked_list.rs | 10 | ||||
| -rw-r--r-- | src/liballoc/range.rs | 4 | ||||
| -rw-r--r-- | src/liballoc/raw_vec.rs | 102 | ||||
| -rw-r--r-- | src/liballoc/slice.rs | 20 | ||||
| -rw-r--r-- | src/liballoc/str.rs | 59 | ||||
| -rw-r--r-- | src/liballoc/string.rs | 86 | ||||
| -rw-r--r-- | src/liballoc/tests/binary_heap.rs | 83 | ||||
| -rw-r--r-- | src/liballoc/tests/lib.rs | 5 | ||||
| -rw-r--r-- | src/liballoc/tests/slice.rs | 165 | ||||
| -rw-r--r-- | src/liballoc/tests/string.rs | 163 | ||||
| -rw-r--r-- | src/liballoc/tests/vec.rs | 209 | ||||
| -rw-r--r-- | src/liballoc/tests/vec_deque.rs | 208 | ||||
| -rw-r--r-- | src/liballoc/vec.rs | 245 | ||||
| -rw-r--r-- | src/liballoc/vec_deque.rs | 100 |
24 files changed, 1283 insertions, 263 deletions
diff --git a/src/liballoc/Cargo.toml b/src/liballoc/Cargo.toml index 0a265ee1376..3bf919b0c00 100644 --- a/src/liballoc/Cargo.toml +++ b/src/liballoc/Cargo.toml @@ -12,7 +12,7 @@ core = { path = "../libcore" } std_unicode = { path = "../libstd_unicode" } [dev-dependencies] -rand = "0.3" +rand = "0.4" [[test]] name = "collectionstests" diff --git a/src/liballoc/allocator.rs b/src/liballoc/allocator.rs index 55e8c0b430f..fdc4efc66b9 100644 --- a/src/liballoc/allocator.rs +++ b/src/liballoc/allocator.rs @@ -373,6 +373,24 @@ impl fmt::Display for CannotReallocInPlace { } } +/// Augments `AllocErr` with a CapacityOverflow variant. +#[derive(Clone, PartialEq, Eq, Debug)] +#[unstable(feature = "try_reserve", reason = "new API", issue="48043")] +pub enum CollectionAllocErr { + /// Error due to the computed capacity exceeding the collection's maximum + /// (usually `isize::MAX` bytes). + CapacityOverflow, + /// Error due to the allocator (see the `AllocErr` type's docs). + AllocErr(AllocErr), +} + +#[unstable(feature = "try_reserve", reason = "new API", issue="48043")] +impl From<AllocErr> for CollectionAllocErr { + fn from(err: AllocErr) -> Self { + CollectionAllocErr::AllocErr(err) + } +} + /// An implementation of `Alloc` can allocate, reallocate, and /// deallocate arbitrary blocks of data described via `Layout`. /// diff --git a/src/liballoc/benches/lib.rs b/src/liballoc/benches/lib.rs index 174628ccd07..2de0ffb4b26 100644 --- a/src/liballoc/benches/lib.rs +++ b/src/liballoc/benches/lib.rs @@ -13,7 +13,6 @@ #![feature(i128_type)] #![feature(rand)] #![feature(repr_simd)] -#![feature(slice_rotate)] #![feature(test)] extern crate rand; diff --git a/src/liballoc/binary_heap.rs b/src/liballoc/binary_heap.rs index 3041f85cd4c..8aaac5d6e08 100644 --- a/src/liballoc/binary_heap.rs +++ b/src/liballoc/binary_heap.rs @@ -964,7 +964,7 @@ impl<'a, T> ExactSizeIterator for Iter<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Iter<'a, T> {} /// An owning iterator over the elements of a `BinaryHeap`. @@ -1019,7 +1019,7 @@ impl<T> ExactSizeIterator for IntoIter<T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<T> FusedIterator for IntoIter<T> {} /// A draining iterator over the elements of a `BinaryHeap`. @@ -1065,7 +1065,7 @@ impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: 'a> FusedIterator for Drain<'a, T> {} #[stable(feature = "binary_heap_extras_15", since = "1.5.0")] diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index cdaad973a71..b776556d59f 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -359,8 +359,6 @@ impl<T: ?Sized> Box<T> { /// Simple usage: /// /// ``` - /// #![feature(box_leak)] - /// /// fn main() { /// let x = Box::new(41); /// let static_ref: &'static mut usize = Box::leak(x); @@ -372,8 +370,6 @@ impl<T: ?Sized> Box<T> { /// Unsized data: /// /// ``` - /// #![feature(box_leak)] - /// /// fn main() { /// let x = vec![1, 2, 3].into_boxed_slice(); /// let static_ref = Box::leak(x); @@ -381,8 +377,7 @@ impl<T: ?Sized> Box<T> { /// assert_eq!(*static_ref, [4, 2, 3]); /// } /// ``` - #[unstable(feature = "box_leak", reason = "needs an FCP to stabilize", - issue = "46179")] + #[stable(feature = "box_leak", since = "1.26.0")] #[inline] pub fn leak<'a>(b: Box<T>) -> &'a mut T where @@ -727,7 +722,7 @@ impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<I: FusedIterator + ?Sized> FusedIterator for Box<I> {} diff --git a/src/liballoc/btree/map.rs b/src/liballoc/btree/map.rs index b320bed5432..ed9c8c18f0d 100644 --- a/src/liballoc/btree/map.rs +++ b/src/liballoc/btree/map.rs @@ -1156,7 +1156,7 @@ impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for Iter<'a, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1235,7 +1235,7 @@ impl<'a, K: 'a, V: 'a> ExactSizeIterator for IterMut<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for IterMut<'a, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1365,7 +1365,7 @@ impl<K, V> ExactSizeIterator for IntoIter<K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<K, V> FusedIterator for IntoIter<K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1395,7 +1395,7 @@ impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for Keys<'a, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1432,7 +1432,7 @@ impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for Values<'a, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1482,7 +1482,7 @@ impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for ValuesMut<'a, K, V> {} @@ -1561,7 +1561,7 @@ impl<'a, K, V> Range<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for Range<'a, K, V> {} #[stable(feature = "btree_range", since = "1.17.0")] @@ -1630,7 +1630,7 @@ impl<'a, K, V> DoubleEndedIterator for RangeMut<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for RangeMut<'a, K, V> {} impl<'a, K, V> RangeMut<'a, K, V> { @@ -2114,7 +2114,6 @@ impl<'a, K: Ord, V> Entry<'a, K, V> { /// # Examples /// /// ``` - /// #![feature(entry_and_modify)] /// use std::collections::BTreeMap; /// /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); @@ -2129,7 +2128,7 @@ impl<'a, K: Ord, V> Entry<'a, K, V> { /// .or_insert(42); /// assert_eq!(map["poneyland"], 43); /// ``` - #[unstable(feature = "entry_and_modify", issue = "44733")] + #[stable(feature = "entry_and_modify", since = "1.26.0")] pub fn and_modify<F>(self, mut f: F) -> Self where F: FnMut(&mut V) { diff --git a/src/liballoc/btree/set.rs b/src/liballoc/btree/set.rs index 327eaaf4651..2e3157147a0 100644 --- a/src/liballoc/btree/set.rs +++ b/src/liballoc/btree/set.rs @@ -946,7 +946,7 @@ impl<'a, T> ExactSizeIterator for Iter<'a, T> { fn len(&self) -> usize { self.iter.len() } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Iter<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -971,7 +971,7 @@ impl<T> ExactSizeIterator for IntoIter<T> { fn len(&self) -> usize { self.iter.len() } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<T> FusedIterator for IntoIter<T> {} #[stable(feature = "btree_range", since = "1.17.0")] @@ -997,7 +997,7 @@ impl<'a, T> DoubleEndedIterator for Range<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Range<'a, T> {} /// Compare `x` and `y`, but return `short` if x is None and `long` if y is None @@ -1044,7 +1044,7 @@ impl<'a, T: Ord> Iterator for Difference<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: Ord> FusedIterator for Difference<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1078,7 +1078,7 @@ impl<'a, T: Ord> Iterator for SymmetricDifference<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: Ord> FusedIterator for SymmetricDifference<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1116,7 +1116,7 @@ impl<'a, T: Ord> Iterator for Intersection<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: Ord> FusedIterator for Intersection<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1150,5 +1150,5 @@ impl<'a, T: Ord> Iterator for Union<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: Ord> FusedIterator for Union<'a, T> {} diff --git a/src/liballoc/fmt.rs b/src/liballoc/fmt.rs index a092bfb3b0a..2c4cdef03b0 100644 --- a/src/liballoc/fmt.rs +++ b/src/liballoc/fmt.rs @@ -113,6 +113,8 @@ //! //! * *nothing* ⇒ [`Display`] //! * `?` ⇒ [`Debug`] +//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers +//! * `X?` ⇒ [`Debug`] with lower-case hexadecimal integers //! * `o` ⇒ [`Octal`](trait.Octal.html) //! * `x` ⇒ [`LowerHex`](trait.LowerHex.html) //! * `X` ⇒ [`UpperHex`](trait.UpperHex.html) diff --git a/src/liballoc/heap.rs b/src/liballoc/heap.rs index 372d606e457..c13ad39e5e1 100644 --- a/src/liballoc/heap.rs +++ b/src/liballoc/heap.rs @@ -228,14 +228,6 @@ unsafe impl Alloc for Heap { } } -/// An arbitrary non-null address to represent zero-size allocations. -/// -/// This preserves the non-null invariant for types like `Box<T>`. The address -/// may overlap with non-zero-size memory allocations. -#[rustc_deprecated(since = "1.19.0", reason = "Use Unique/NonNull::empty() instead")] -#[unstable(feature = "heap_api", issue = "27700")] -pub const EMPTY: *mut () = 1 as *mut (); - /// The allocator for unique pointers. // This function must not unwind. If it does, MIR trans will fail. #[cfg(not(test))] diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 5139e54b560..2727bcaa28a 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -79,7 +79,6 @@ #![cfg_attr(test, feature(placement_in))] #![cfg_attr(not(test), feature(core_float))] #![cfg_attr(not(test), feature(exact_size_is_empty))] -#![cfg_attr(not(test), feature(slice_rotate))] #![cfg_attr(not(test), feature(generator_trait))] #![cfg_attr(test, feature(rand, test))] #![feature(allow_internal_unstable)] @@ -97,10 +96,8 @@ #![feature(fmt_internals)] #![feature(from_ref)] #![feature(fundamental)] -#![feature(fused)] #![feature(generic_param_attrs)] #![feature(i128_type)] -#![feature(inclusive_range)] #![feature(iter_rfold)] #![feature(lang_items)] #![feature(needs_allocator)] @@ -119,14 +116,17 @@ #![feature(staged_api)] #![feature(str_internals)] #![feature(trusted_len)] +#![feature(try_reserve)] #![feature(unboxed_closures)] #![feature(unicode)] #![feature(unsize)] #![feature(allocator_internals)] #![feature(on_unimplemented)] #![feature(exact_chunks)] +#![feature(pointer_methods)] +#![feature(inclusive_range_fields)] -#![cfg_attr(not(test), feature(fused, fn_traits, placement_new_protocol, swap_with_slice, i128))] +#![cfg_attr(not(test), feature(fn_traits, placement_new_protocol, swap_with_slice, i128))] #![cfg_attr(test, feature(test, box_heap))] // Allow testing this library diff --git a/src/liballoc/linked_list.rs b/src/liballoc/linked_list.rs index 65be087b35e..097d2e414f5 100644 --- a/src/liballoc/linked_list.rs +++ b/src/liballoc/linked_list.rs @@ -747,8 +747,8 @@ impl<T> LinkedList<T> { /// Creates an iterator which uses a closure to determine if an element should be removed. /// /// If the closure returns true, then the element is removed and yielded. - /// If the closure returns false, it will try again, and call the closure on the next element, - /// seeing if it passes the test. + /// If the closure returns false, the element will remain in the list and will not be yielded + /// by the iterator. /// /// Note that `drain_filter` lets you mutate every element in the filter closure, regardless of /// whether you choose to keep or remove it. @@ -897,7 +897,7 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Iter<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -946,7 +946,7 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for IterMut<'a, T> {} impl<'a, T> IterMut<'a, T> { @@ -1117,7 +1117,7 @@ impl<T> DoubleEndedIterator for IntoIter<T> { #[stable(feature = "rust1", since = "1.0.0")] impl<T> ExactSizeIterator for IntoIter<T> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<T> FusedIterator for IntoIter<T> {} #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/liballoc/range.rs b/src/liballoc/range.rs index f862da0d61e..b03abc85180 100644 --- a/src/liballoc/range.rs +++ b/src/liballoc/range.rs @@ -103,7 +103,7 @@ impl<T> RangeArgument<T> for Range<T> { } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl<T> RangeArgument<T> for RangeInclusive<T> { fn start(&self) -> Bound<&T> { Included(&self.start) @@ -113,7 +113,7 @@ impl<T> RangeArgument<T> for RangeInclusive<T> { } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl<T> RangeArgument<T> for RangeToInclusive<T> { fn start(&self) -> Bound<&T> { Unbounded diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index 621e1906961..229ae54d747 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -15,6 +15,8 @@ use core::ptr::{self, Unique}; use core::slice; use heap::{Alloc, Layout, Heap}; use super::boxed::Box; +use super::allocator::CollectionAllocErr; +use super::allocator::CollectionAllocErr::*; /// A low-level utility for more ergonomically allocating, reallocating, and deallocating /// a buffer of memory on the heap without having to worry about all the corner cases @@ -84,7 +86,7 @@ impl<T, A: Alloc> RawVec<T, A> { let elem_size = mem::size_of::<T>(); let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow"); - alloc_guard(alloc_size); + alloc_guard(alloc_size).expect("capacity overflow"); // handles ZSTs and `cap = 0` alike let ptr = if alloc_size == 0 { @@ -308,7 +310,7 @@ impl<T, A: Alloc> RawVec<T, A> { let new_cap = 2 * self.cap; let new_size = new_cap * elem_size; let new_layout = Layout::from_size_align_unchecked(new_size, cur.align()); - alloc_guard(new_size); + alloc_guard(new_size).expect("capacity overflow"); let ptr_res = self.a.realloc(self.ptr.as_ptr() as *mut u8, cur, new_layout); @@ -367,7 +369,7 @@ impl<T, A: Alloc> RawVec<T, A> { // overflow and the alignment is sufficiently small. let new_cap = 2 * self.cap; let new_size = new_cap * elem_size; - alloc_guard(new_size); + alloc_guard(new_size).expect("capacity overflow"); let ptr = self.ptr() as *mut _; let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align()); match self.a.grow_in_place(ptr, old_layout, new_layout) { @@ -403,7 +405,9 @@ impl<T, A: Alloc> RawVec<T, A> { /// # Aborts /// /// Aborts on OOM - pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) { + pub fn try_reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) + -> Result<(), CollectionAllocErr> { + unsafe { // NOTE: we don't early branch on ZSTs here because we want this // to actually catch "asking for more than usize::MAX" in that case. @@ -413,16 +417,15 @@ impl<T, A: Alloc> RawVec<T, A> { // Don't actually need any more capacity. // Wrapping in case they gave a bad `used_cap`. if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { - return; + return Ok(()); } // Nothing we can really do about these checks :( - let new_cap = used_cap.checked_add(needed_extra_cap).expect("capacity overflow"); - let new_layout = match Layout::array::<T>(new_cap) { - Some(layout) => layout, - None => panic!("capacity overflow"), - }; - alloc_guard(new_layout.size()); + let new_cap = used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?; + let new_layout = Layout::array::<T>(new_cap).ok_or(CapacityOverflow)?; + + alloc_guard(new_layout.size())?; + let res = match self.current_layout() { Some(layout) => { let old_ptr = self.ptr.as_ptr() as *mut u8; @@ -430,26 +433,34 @@ impl<T, A: Alloc> RawVec<T, A> { } None => self.a.alloc(new_layout), }; - let uniq = match res { - Ok(ptr) => Unique::new_unchecked(ptr as *mut T), - Err(e) => self.a.oom(e), - }; - self.ptr = uniq; + + self.ptr = Unique::new_unchecked(res? as *mut T); self.cap = new_cap; + + Ok(()) } } + pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) { + match self.try_reserve_exact(used_cap, needed_extra_cap) { + Err(CapacityOverflow) => panic!("capacity overflow"), + Err(AllocErr(e)) => self.a.oom(e), + Ok(()) => { /* yay */ } + } + } + /// Calculates the buffer's new size given that it'll hold `used_cap + /// needed_extra_cap` elements. This logic is used in amortized reserve methods. /// Returns `(new_capacity, new_alloc_size)`. - fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) -> usize { + fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) + -> Result<usize, CollectionAllocErr> { + // Nothing we can really do about these checks :( - let required_cap = used_cap.checked_add(needed_extra_cap) - .expect("capacity overflow"); + let required_cap = used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?; // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. let double_cap = self.cap * 2; // `double_cap` guarantees exponential growth. - cmp::max(double_cap, required_cap) + Ok(cmp::max(double_cap, required_cap)) } /// Ensures that the buffer contains at least enough space to hold @@ -504,8 +515,9 @@ impl<T, A: Alloc> RawVec<T, A> { /// # vector.push_all(&[1, 3, 5, 7, 9]); /// # } /// ``` - pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) { - unsafe { + pub fn try_reserve(&mut self, used_cap: usize, needed_extra_cap: usize) + -> Result<(), CollectionAllocErr> { + unsafe { // NOTE: we don't early branch on ZSTs here because we want this // to actually catch "asking for more than usize::MAX" in that case. // If we make it past the first branch then we are guaranteed to @@ -514,17 +526,15 @@ impl<T, A: Alloc> RawVec<T, A> { // Don't actually need any more capacity. // Wrapping in case they give a bad `used_cap` if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { - return; + return Ok(()); } - let new_cap = self.amortized_new_size(used_cap, needed_extra_cap); + let new_cap = self.amortized_new_size(used_cap, needed_extra_cap)?; + let new_layout = Layout::array::<T>(new_cap).ok_or(CapacityOverflow)?; + + // FIXME: may crash and burn on over-reserve + alloc_guard(new_layout.size())?; - let new_layout = match Layout::array::<T>(new_cap) { - Some(layout) => layout, - None => panic!("capacity overflow"), - }; - // FIXME: may crash and burn on over-reserve - alloc_guard(new_layout.size()); let res = match self.current_layout() { Some(layout) => { let old_ptr = self.ptr.as_ptr() as *mut u8; @@ -532,15 +542,22 @@ impl<T, A: Alloc> RawVec<T, A> { } None => self.a.alloc(new_layout), }; - let uniq = match res { - Ok(ptr) => Unique::new_unchecked(ptr as *mut T), - Err(e) => self.a.oom(e), - }; - self.ptr = uniq; + + self.ptr = Unique::new_unchecked(res? as *mut T); self.cap = new_cap; + + Ok(()) } } + /// The same as try_reserve, but errors are lowered to a call to oom(). + pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) { + match self.try_reserve(used_cap, needed_extra_cap) { + Err(CapacityOverflow) => panic!("capacity overflow"), + Err(AllocErr(e)) => self.a.oom(e), + Ok(()) => { /* yay */ } + } + } /// Attempts to ensure that the buffer contains at least enough space to hold /// `used_cap + needed_extra_cap` elements. If it doesn't already have /// enough capacity, will reallocate in place enough space plus comfortable slack @@ -576,7 +593,8 @@ impl<T, A: Alloc> RawVec<T, A> { return false; } - let new_cap = self.amortized_new_size(used_cap, needed_extra_cap); + let new_cap = self.amortized_new_size(used_cap, needed_extra_cap) + .expect("capacity overflow"); // Here, `cap < used_cap + needed_extra_cap <= new_cap` // (regardless of whether `self.cap - used_cap` wrapped). @@ -585,7 +603,7 @@ impl<T, A: Alloc> RawVec<T, A> { let ptr = self.ptr() as *mut _; let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0; // FIXME: may crash and burn on over-reserve - alloc_guard(new_layout.size()); + alloc_guard(new_layout.size()).expect("capacity overflow"); match self.a.grow_in_place(ptr, old_layout, new_layout) { Ok(_) => { self.cap = new_cap; @@ -709,14 +727,14 @@ unsafe impl<#[may_dangle] T, A: Alloc> Drop for RawVec<T, A> { // all 4GB in user-space. e.g. PAE or x32 #[inline] -fn alloc_guard(alloc_size: usize) { - if mem::size_of::<usize>() < 8 { - assert!(alloc_size <= ::core::isize::MAX as usize, - "capacity overflow"); +fn alloc_guard(alloc_size: usize) -> Result<(), CollectionAllocErr> { + if mem::size_of::<usize>() < 8 && alloc_size > ::core::isize::MAX as usize { + Err(CapacityOverflow) + } else { + Ok(()) } } - #[cfg(test)] mod tests { use super::*; diff --git a/src/liballoc/slice.rs b/src/liballoc/slice.rs index 028983de556..dc40062ef13 100644 --- a/src/liballoc/slice.rs +++ b/src/liballoc/slice.rs @@ -1460,8 +1460,6 @@ impl<T> [T] { /// # Examples /// /// ``` - /// #![feature(slice_rotate)] - /// /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; /// a.rotate_left(2); /// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); @@ -1470,23 +1468,15 @@ impl<T> [T] { /// Rotating a subslice: /// /// ``` - /// #![feature(slice_rotate)] - /// /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; /// a[1..5].rotate_left(1); /// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']); - /// ``` - #[unstable(feature = "slice_rotate", issue = "41891")] + /// ``` + #[stable(feature = "slice_rotate", since = "1.26.0")] pub fn rotate_left(&mut self, mid: usize) { core_slice::SliceExt::rotate_left(self, mid); } - #[unstable(feature = "slice_rotate", issue = "41891")] - #[rustc_deprecated(since = "", reason = "renamed to `rotate_left`")] - pub fn rotate(&mut self, mid: usize) { - core_slice::SliceExt::rotate_left(self, mid); - } - /// Rotates the slice in-place such that the first `self.len() - k` /// elements of the slice move to the end while the last `k` elements move /// to the front. After calling `rotate_right`, the element previously at @@ -1505,8 +1495,6 @@ impl<T> [T] { /// # Examples /// /// ``` - /// #![feature(slice_rotate)] - /// /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; /// a.rotate_right(2); /// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']); @@ -1515,13 +1503,11 @@ impl<T> [T] { /// Rotate a subslice: /// /// ``` - /// #![feature(slice_rotate)] - /// /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; /// a[1..5].rotate_right(1); /// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']); /// ``` - #[unstable(feature = "slice_rotate", issue = "41891")] + #[stable(feature = "slice_rotate", since = "1.26.0")] pub fn rotate_right(&mut self, k: usize) { core_slice::SliceExt::rotate_right(self, k); } diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs index a00e3d17dd0..14d5e96d2e7 100644 --- a/src/liballoc/str.rs +++ b/src/liballoc/str.rs @@ -43,6 +43,7 @@ use core::str as core_str; use core::str::pattern::Pattern; use core::str::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher}; use core::mem; +use core::ptr; use core::iter::FusedIterator; use std_unicode::str::{UnicodeStr, Utf16Encoder}; @@ -171,7 +172,7 @@ impl<'a> Iterator for EncodeUtf16<'a> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a> FusedIterator for EncodeUtf16<'a> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -2066,9 +2067,59 @@ impl str { /// ``` #[stable(feature = "repeat_str", since = "1.16.0")] pub fn repeat(&self, n: usize) -> String { - let mut s = String::with_capacity(self.len() * n); - s.extend((0..n).map(|_| self)); - s + if n == 0 { + return String::new(); + } + + // If `n` is larger than zero, it can be split as + // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`. + // `2^expn` is the number represented by the leftmost '1' bit of `n`, + // and `rem` is the remaining part of `n`. + + // Using `Vec` to access `set_len()`. + let mut buf = Vec::with_capacity(self.len() * n); + + // `2^expn` repetition is done by doubling `buf` `expn`-times. + buf.extend(self.as_bytes()); + { + let mut m = n >> 1; + // If `m > 0`, there are remaining bits up to the leftmost '1'. + while m > 0 { + // `buf.extend(buf)`: + unsafe { + ptr::copy_nonoverlapping( + buf.as_ptr(), + (buf.as_mut_ptr() as *mut u8).add(buf.len()), + buf.len(), + ); + // `buf` has capacity of `self.len() * n`. + let buf_len = buf.len(); + buf.set_len(buf_len * 2); + } + + m >>= 1; + } + } + + // `rem` (`= n - 2^expn`) repetition is done by copying + // first `rem` repetitions from `buf` itself. + let rem_len = self.len() * n - buf.len(); // `self.len() * rem` + if rem_len > 0 { + // `buf.extend(buf[0 .. rem_len])`: + unsafe { + // This is non-overlapping since `2^expn > rem`. + ptr::copy_nonoverlapping( + buf.as_ptr(), + (buf.as_mut_ptr() as *mut u8).add(buf.len()), + rem_len, + ); + // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`). + let buf_cap = buf.capacity(); + buf.set_len(buf_cap); + } + } + + unsafe { String::from_utf8_unchecked(buf) } } /// Checks if all characters in this string are within the ASCII range. diff --git a/src/liballoc/string.rs b/src/liballoc/string.rs index 8d99d0bc8f4..9fec9091498 100644 --- a/src/liballoc/string.rs +++ b/src/liballoc/string.rs @@ -71,6 +71,7 @@ use Bound::{Excluded, Included, Unbounded}; use str::{self, from_boxed_utf8_unchecked, FromStr, Utf8Error, Chars}; use vec::Vec; use boxed::Box; +use super::allocator::CollectionAllocErr; /// A UTF-8 encoded, growable string. /// @@ -364,7 +365,7 @@ impl String { /// /// Given that the `String` is empty, this will not allocate any initial /// buffer. While that means that this initial operation is very - /// inexpensive, but may cause excessive allocation later, when you add + /// inexpensive, it may cause excessive allocation later when you add /// data. If you have an idea of how much data the `String` will hold, /// consider the [`with_capacity`] method to prevent excessive /// re-allocation. @@ -920,6 +921,79 @@ impl String { self.vec.reserve_exact(additional) } + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `String`. The collection may reserve more space to avoid + /// frequent reallocations. After calling `reserve`, capacity will be + /// greater than or equal to `self.len() + additional`. Does nothing if + /// capacity is already sufficient. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// + /// fn process_data(data: &str) -> Result<String, CollectionAllocErr> { + /// let mut output = String::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.push_str(data); + /// + /// Ok(output) + /// } + /// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.vec.try_reserve(additional) + } + + /// Tries to reserves the minimum capacity for exactly `additional` more elements to + /// be inserted in the given `String`. After calling `reserve_exact`, + /// capacity will be greater than or equal to `self.len() + additional`. + /// Does nothing if the capacity is already sufficient. + /// + /// Note that the allocator may give the collection more space than it + /// requests. Therefore capacity can not be relied upon to be precisely + /// minimal. Prefer `reserve` if future insertions are expected. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// + /// fn process_data(data: &str) -> Result<String, CollectionAllocErr> { + /// let mut output = String::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.push_str(data); + /// + /// Ok(output) + /// } + /// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.vec.try_reserve_exact(additional) + } + /// Shrinks the capacity of this `String` to match its length. /// /// # Examples @@ -1876,7 +1950,7 @@ impl ops::Index<ops::RangeFull> for String { unsafe { str::from_utf8_unchecked(&self.vec) } } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::Index<ops::RangeInclusive<usize>> for String { type Output = str; @@ -1885,7 +1959,7 @@ impl ops::Index<ops::RangeInclusive<usize>> for String { Index::index(&**self, index) } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::Index<ops::RangeToInclusive<usize>> for String { type Output = str; @@ -1923,14 +1997,14 @@ impl ops::IndexMut<ops::RangeFull> for String { unsafe { str::from_utf8_unchecked_mut(&mut *self.vec) } } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::IndexMut<ops::RangeInclusive<usize>> for String { #[inline] fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut str { IndexMut::index_mut(&mut **self, index) } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::IndexMut<ops::RangeToInclusive<usize>> for String { #[inline] fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut str { @@ -2254,5 +2328,5 @@ impl<'a> DoubleEndedIterator for Drain<'a> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a> FusedIterator for Drain<'a> {} diff --git a/src/liballoc/tests/binary_heap.rs b/src/liballoc/tests/binary_heap.rs index 06d585f8ea8..5c979d82e55 100644 --- a/src/liballoc/tests/binary_heap.rs +++ b/src/liballoc/tests/binary_heap.rs @@ -8,9 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::panic; +use std::cmp; use std::collections::BinaryHeap; use std::collections::binary_heap::{Drain, PeekMut}; +use std::panic::{self, AssertUnwindSafe}; +use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; + +use rand::{thread_rng, Rng}; #[test] fn test_iterator() { @@ -300,3 +304,80 @@ fn assert_covariance() { d } } + +// old binaryheap failed this test +// +// Integrity means that all elements are present after a comparison panics, +// even if the order may not be correct. +// +// Destructors must be called exactly once per element. +#[test] +fn panic_safe() { + static DROP_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; + + #[derive(Eq, PartialEq, Ord, Clone, Debug)] + struct PanicOrd<T>(T, bool); + + impl<T> Drop for PanicOrd<T> { + fn drop(&mut self) { + // update global drop count + DROP_COUNTER.fetch_add(1, Ordering::SeqCst); + } + } + + impl<T: PartialOrd> PartialOrd for PanicOrd<T> { + fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { + if self.1 || other.1 { + panic!("Panicking comparison"); + } + self.0.partial_cmp(&other.0) + } + } + let mut rng = thread_rng(); + const DATASZ: usize = 32; + const NTEST: usize = 10; + + // don't use 0 in the data -- we want to catch the zeroed-out case. + let data = (1..DATASZ + 1).collect::<Vec<_>>(); + + // since it's a fuzzy test, run several tries. + for _ in 0..NTEST { + for i in 1..DATASZ + 1 { + DROP_COUNTER.store(0, Ordering::SeqCst); + + let mut panic_ords: Vec<_> = data.iter() + .filter(|&&x| x != i) + .map(|&x| PanicOrd(x, false)) + .collect(); + let panic_item = PanicOrd(i, true); + + // heapify the sane items + rng.shuffle(&mut panic_ords); + let mut heap = BinaryHeap::from(panic_ords); + let inner_data; + + { + // push the panicking item to the heap and catch the panic + let thread_result = { + let mut heap_ref = AssertUnwindSafe(&mut heap); + panic::catch_unwind(move || { + heap_ref.push(panic_item); + }) + }; + assert!(thread_result.is_err()); + + // Assert no elements were dropped + let drops = DROP_COUNTER.load(Ordering::SeqCst); + assert!(drops == 0, "Must not drop items. drops={}", drops); + inner_data = heap.clone().into_vec(); + drop(heap); + } + let drops = DROP_COUNTER.load(Ordering::SeqCst); + assert_eq!(drops, DATASZ); + + let mut data_sorted = inner_data.into_iter().map(|p| p.0).collect::<Vec<_>>(); + data_sorted.sort(); + assert_eq!(data_sorted, data); + } + } +} diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs index 427a7adcbde..bcd2ef27605 100644 --- a/src/liballoc/tests/lib.rs +++ b/src/liballoc/tests/lib.rs @@ -14,7 +14,7 @@ #![feature(alloc_system)] #![feature(attr_literals)] #![feature(box_syntax)] -#![feature(inclusive_range_syntax)] +#![cfg_attr(stage0, feature(inclusive_range_syntax))] #![feature(collection_placement)] #![feature(const_fn)] #![feature(drain_filter)] @@ -23,13 +23,14 @@ #![feature(pattern)] #![feature(placement_in_syntax)] #![feature(rand)] -#![feature(slice_rotate)] #![feature(splice)] #![feature(str_escape)] #![feature(string_retain)] +#![feature(try_reserve)] #![feature(unboxed_closures)] #![feature(unicode)] #![feature(exact_chunks)] +#![feature(inclusive_range_fields)] extern crate alloc_system; extern crate std_unicode; diff --git a/src/liballoc/tests/slice.rs b/src/liballoc/tests/slice.rs index 1a9d26fd1a2..d9e9d91cea8 100644 --- a/src/liballoc/tests/slice.rs +++ b/src/liballoc/tests/slice.rs @@ -8,9 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::cell::Cell; use std::cmp::Ordering::{Equal, Greater, Less}; +use std::cmp::Ordering; use std::mem; +use std::panic; use std::rc::Rc; +use std::sync::atomic::Ordering::Relaxed; +use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize}; +use std::thread; use rand::{Rng, thread_rng}; @@ -1341,3 +1347,162 @@ fn test_copy_from_slice_dst_shorter() { let mut dst = [0; 3]; dst.copy_from_slice(&src); } + +const MAX_LEN: usize = 80; + +static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [ + // FIXME #5244: AtomicUsize is not Copy. + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), +]; + +static VERSIONS: AtomicUsize = ATOMIC_USIZE_INIT; + +#[derive(Clone, Eq)] +struct DropCounter { + x: u32, + id: usize, + version: Cell<usize>, +} + +impl PartialEq for DropCounter { + fn eq(&self, other: &Self) -> bool { + self.partial_cmp(other) == Some(Ordering::Equal) + } +} + +impl PartialOrd for DropCounter { + fn partial_cmp(&self, other: &Self) -> Option<Ordering> { + self.version.set(self.version.get() + 1); + other.version.set(other.version.get() + 1); + VERSIONS.fetch_add(2, Relaxed); + self.x.partial_cmp(&other.x) + } +} + +impl Ord for DropCounter { + fn cmp(&self, other: &Self) -> Ordering { + self.partial_cmp(other).unwrap() + } +} + +impl Drop for DropCounter { + fn drop(&mut self) { + DROP_COUNTS[self.id].fetch_add(1, Relaxed); + VERSIONS.fetch_sub(self.version.get(), Relaxed); + } +} + +macro_rules! test { + ($input:ident, $func:ident) => { + let len = $input.len(); + + // Work out the total number of comparisons required to sort + // this array... + let mut count = 0usize; + $input.to_owned().$func(|a, b| { count += 1; a.cmp(b) }); + + // ... and then panic on each and every single one. + for panic_countdown in 0..count { + // Refresh the counters. + VERSIONS.store(0, Relaxed); + for i in 0..len { + DROP_COUNTS[i].store(0, Relaxed); + } + + let v = $input.to_owned(); + let _ = thread::spawn(move || { + let mut v = v; + let mut panic_countdown = panic_countdown; + v.$func(|a, b| { + if panic_countdown == 0 { + SILENCE_PANIC.with(|s| s.set(true)); + panic!(); + } + panic_countdown -= 1; + a.cmp(b) + }) + }).join(); + + // Check that the number of things dropped is exactly + // what we expect (i.e. the contents of `v`). + for (i, c) in DROP_COUNTS.iter().enumerate().take(len) { + let count = c.load(Relaxed); + assert!(count == 1, + "found drop count == {} for i == {}, len == {}", + count, i, len); + } + + // Check that the most recent versions of values were dropped. + assert_eq!(VERSIONS.load(Relaxed), 0); + } + } +} + +thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false)); + +#[test] +#[cfg_attr(target_os = "emscripten", ignore)] // no threads +fn panic_safe() { + let prev = panic::take_hook(); + panic::set_hook(Box::new(move |info| { + if !SILENCE_PANIC.with(|s| s.get()) { + prev(info); + } + })); + + let mut rng = thread_rng(); + + for len in (1..20).chain(70..MAX_LEN) { + for &modulus in &[5, 20, 50] { + for &has_runs in &[false, true] { + let mut input = (0..len) + .map(|id| { + DropCounter { + x: rng.next_u32() % modulus, + id: id, + version: Cell::new(0), + } + }) + .collect::<Vec<_>>(); + + if has_runs { + for c in &mut input { + c.x = c.id as u32; + } + + for _ in 0..5 { + let a = rng.gen::<usize>() % len; + let b = rng.gen::<usize>() % len; + if a < b { + input[a..b].reverse(); + } else { + input.swap(a, b); + } + } + } + + test!(input, sort_by); + test!(input, sort_unstable_by); + } + } + } +} diff --git a/src/liballoc/tests/string.rs b/src/liballoc/tests/string.rs index ef6f5e10a72..d1e746ea43b 100644 --- a/src/liballoc/tests/string.rs +++ b/src/liballoc/tests/string.rs @@ -9,6 +9,9 @@ // except according to those terms. use std::borrow::Cow; +use std::collections::CollectionAllocErr::*; +use std::mem::size_of; +use std::{usize, isize}; pub trait IntoCow<'a, B: ?Sized> where B: ToOwned { fn into_cow(self) -> Cow<'a, B>; @@ -504,3 +507,163 @@ fn test_into_boxed_str() { let ys = xs.into_boxed_str(); assert_eq!(&*ys, "hello my name is bob"); } + +#[test] +fn test_reserve_exact() { + // This is all the same as test_reserve + + let mut s = String::new(); + assert_eq!(s.capacity(), 0); + + s.reserve_exact(2); + assert!(s.capacity() >= 2); + + for _i in 0..16 { + s.push('0'); + } + + assert!(s.capacity() >= 16); + s.reserve_exact(16); + assert!(s.capacity() >= 32); + + s.push('0'); + + s.reserve_exact(16); + assert!(s.capacity() >= 33) +} + +#[test] +fn test_try_reserve() { + + // These are the interesting cases: + // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM) + // * > isize::MAX should always fail + // * On 16/32-bit should CapacityOverflow + // * On 64-bit should OOM + // * overflow may trigger when adding `len` to `cap` (in number of elements) + // * overflow may trigger when multiplying `new_cap` by size_of::<T> (to get bytes) + + const MAX_CAP: usize = isize::MAX as usize; + const MAX_USIZE: usize = usize::MAX; + + // On 16/32-bit, we check that allocations don't exceed isize::MAX, + // on 64-bit, we assume the OS will give an OOM for such a ridiculous size. + // Any platform that succeeds for these requests is technically broken with + // ptr::offset because LLVM is the worst. + let guards_against_isize = size_of::<usize>() < 8; + + { + // Note: basic stuff is checked by test_reserve + let mut empty_string: String = String::new(); + + // Check isize::MAX doesn't count as an overflow + if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + // Play it again, frank! (just to be sure) + if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + // Check isize::MAX + 1 does count as overflow + if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + // Check usize::MAX does count as overflow + if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + // Check isize::MAX + 1 is an OOM + if let Err(AllocErr(_)) = empty_string.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + + // Check usize::MAX is an OOM + if let Err(AllocErr(_)) = empty_string.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an OOM!") } + } + } + + + { + // Same basic idea, but with non-zero len + let mut ten_bytes: String = String::from("0123456789"); + + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr(_)) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should always overflow in the add-to-len + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + +} + +#[test] +fn test_try_reserve_exact() { + + // This is exactly the same as test_try_reserve with the method changed. + // See that test for comments. + + const MAX_CAP: usize = isize::MAX as usize; + const MAX_USIZE: usize = usize::MAX; + + let guards_against_isize = size_of::<usize>() < 8; + + { + let mut empty_string: String = String::new(); + + if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + if let Err(AllocErr(_)) = empty_string.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + + if let Err(AllocErr(_)) = empty_string.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an OOM!") } + } + } + + + { + let mut ten_bytes: String = String::from("0123456789"); + + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr(_)) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + +} diff --git a/src/liballoc/tests/vec.rs b/src/liballoc/tests/vec.rs index 9cfde5dcc73..3c17a401bba 100644 --- a/src/liballoc/tests/vec.rs +++ b/src/liballoc/tests/vec.rs @@ -10,8 +10,9 @@ use std::borrow::Cow; use std::mem::size_of; -use std::panic; +use std::{usize, isize, panic}; use std::vec::{Drain, IntoIter}; +use std::collections::CollectionAllocErr::*; struct DropCounter<'a> { count: &'a mut u32, @@ -965,3 +966,209 @@ fn drain_filter_complex() { assert_eq!(vec, vec![1, 3, 5, 7, 9, 11, 13, 15, 17, 19]); } } + +#[test] +fn test_reserve_exact() { + // This is all the same as test_reserve + + let mut v = Vec::new(); + assert_eq!(v.capacity(), 0); + + v.reserve_exact(2); + assert!(v.capacity() >= 2); + + for i in 0..16 { + v.push(i); + } + + assert!(v.capacity() >= 16); + v.reserve_exact(16); + assert!(v.capacity() >= 32); + + v.push(16); + + v.reserve_exact(16); + assert!(v.capacity() >= 33) +} + +#[test] +fn test_try_reserve() { + + // These are the interesting cases: + // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM) + // * > isize::MAX should always fail + // * On 16/32-bit should CapacityOverflow + // * On 64-bit should OOM + // * overflow may trigger when adding `len` to `cap` (in number of elements) + // * overflow may trigger when multiplying `new_cap` by size_of::<T> (to get bytes) + + const MAX_CAP: usize = isize::MAX as usize; + const MAX_USIZE: usize = usize::MAX; + + // On 16/32-bit, we check that allocations don't exceed isize::MAX, + // on 64-bit, we assume the OS will give an OOM for such a ridiculous size. + // Any platform that succeeds for these requests is technically broken with + // ptr::offset because LLVM is the worst. + let guards_against_isize = size_of::<usize>() < 8; + + { + // Note: basic stuff is checked by test_reserve + let mut empty_bytes: Vec<u8> = Vec::new(); + + // Check isize::MAX doesn't count as an overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + // Play it again, frank! (just to be sure) + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + // Check isize::MAX + 1 does count as overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + // Check usize::MAX does count as overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + // Check isize::MAX + 1 is an OOM + if let Err(AllocErr(_)) = empty_bytes.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + + // Check usize::MAX is an OOM + if let Err(AllocErr(_)) = empty_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an OOM!") } + } + } + + + { + // Same basic idea, but with non-zero len + let mut ten_bytes: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr(_)) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should always overflow in the add-to-len + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + + + { + // Same basic idea, but with interesting type size + let mut ten_u32s: Vec<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr(_)) = ten_u32s.try_reserve(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should fail in the mul-by-size + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20) { + } else { + panic!("usize::MAX should trigger an overflow!"); + } + } + +} + +#[test] +fn test_try_reserve_exact() { + + // This is exactly the same as test_try_reserve with the method changed. + // See that test for comments. + + const MAX_CAP: usize = isize::MAX as usize; + const MAX_USIZE: usize = usize::MAX; + + let guards_against_isize = size_of::<usize>() < 8; + + { + let mut empty_bytes: Vec<u8> = Vec::new(); + + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + if let Err(AllocErr(_)) = empty_bytes.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + + if let Err(AllocErr(_)) = empty_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an OOM!") } + } + } + + + { + let mut ten_bytes: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr(_)) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + + + { + let mut ten_u32s: Vec<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr(_)) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_USIZE - 20) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + +} diff --git a/src/liballoc/tests/vec_deque.rs b/src/liballoc/tests/vec_deque.rs index f2935c05d4f..fc1a0b624a5 100644 --- a/src/liballoc/tests/vec_deque.rs +++ b/src/liballoc/tests/vec_deque.rs @@ -11,6 +11,9 @@ use std::collections::VecDeque; use std::fmt::Debug; use std::collections::vec_deque::{Drain}; +use std::collections::CollectionAllocErr::*; +use std::mem::size_of; +use std::{usize, isize}; use self::Taggy::*; use self::Taggypar::*; @@ -1022,3 +1025,208 @@ fn test_placement_in() { } assert_eq!(buf, [5,4,3,1,2,6]); } + +#[test] +fn test_reserve_exact_2() { + // This is all the same as test_reserve + + let mut v = VecDeque::new(); + + v.reserve_exact(2); + assert!(v.capacity() >= 2); + + for i in 0..16 { + v.push_back(i); + } + + assert!(v.capacity() >= 16); + v.reserve_exact(16); + assert!(v.capacity() >= 32); + + v.push_back(16); + + v.reserve_exact(16); + assert!(v.capacity() >= 48) +} + +#[test] +fn test_try_reserve() { + + // These are the interesting cases: + // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM) + // * > isize::MAX should always fail + // * On 16/32-bit should CapacityOverflow + // * On 64-bit should OOM + // * overflow may trigger when adding `len` to `cap` (in number of elements) + // * overflow may trigger when multiplying `new_cap` by size_of::<T> (to get bytes) + + const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1; + const MAX_USIZE: usize = usize::MAX; + + // On 16/32-bit, we check that allocations don't exceed isize::MAX, + // on 64-bit, we assume the OS will give an OOM for such a ridiculous size. + // Any platform that succeeds for these requests is technically broken with + // ptr::offset because LLVM is the worst. + let guards_against_isize = size_of::<usize>() < 8; + + { + // Note: basic stuff is checked by test_reserve + let mut empty_bytes: VecDeque<u8> = VecDeque::new(); + + // Check isize::MAX doesn't count as an overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + // Play it again, frank! (just to be sure) + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + // Check isize::MAX + 1 does count as overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + // Check usize::MAX does count as overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + // Check isize::MAX is an OOM + // VecDeque starts with capacity 7, always adds 1 to the capacity + // and also rounds the number to next power of 2 so this is the + // furthest we can go without triggering CapacityOverflow + if let Err(AllocErr(_)) = empty_bytes.try_reserve(MAX_CAP) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + } + + + { + // Same basic idea, but with non-zero len + let mut ten_bytes: VecDeque<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr(_)) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should always overflow in the add-to-len + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + + + { + // Same basic idea, but with interesting type size + let mut ten_u32s: VecDeque<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr(_)) = ten_u32s.try_reserve(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should fail in the mul-by-size + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20) { + } else { + panic!("usize::MAX should trigger an overflow!"); + } + } + +} + +#[test] +fn test_try_reserve_exact() { + + // This is exactly the same as test_try_reserve with the method changed. + // See that test for comments. + + const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1; + const MAX_USIZE: usize = usize::MAX; + + let guards_against_isize = size_of::<usize>() < 8; + + { + let mut empty_bytes: VecDeque<u8> = VecDeque::new(); + + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + // Check isize::MAX is an OOM + // VecDeque starts with capacity 7, always adds 1 to the capacity + // and also rounds the number to next power of 2 so this is the + // furthest we can go without triggering CapacityOverflow + if let Err(AllocErr(_)) = empty_bytes.try_reserve_exact(MAX_CAP) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + } + + + { + let mut ten_bytes: VecDeque<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr(_)) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + + + { + let mut ten_u32s: VecDeque<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr(_)) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_USIZE - 20) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + +} diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index 39a4d271bd6..953f95876be 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -86,6 +86,7 @@ use borrow::Cow; use boxed::Box; use raw_vec::RawVec; use super::range::RangeArgument; +use super::allocator::CollectionAllocErr; use Bound::{Excluded, Included, Unbounded}; /// A contiguous growable array type, written `Vec<T>` but pronounced 'vector'. @@ -489,6 +490,83 @@ impl<T> Vec<T> { self.buf.reserve_exact(self.len, additional); } + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `Vec<T>`. The collection may reserve more space to avoid + /// frequent reallocations. After calling `reserve`, capacity will be + /// greater than or equal to `self.len() + additional`. Does nothing if + /// capacity is already sufficient. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// + /// fn process_data(data: &[u32]) -> Result<Vec<u32>, CollectionAllocErr> { + /// let mut output = Vec::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.extend(data.iter().map(|&val| { + /// val * 2 + 5 // very complicated + /// })); + /// + /// Ok(output) + /// } + /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.buf.try_reserve(self.len, additional) + } + + /// Tries to reserves the minimum capacity for exactly `additional` more elements to + /// be inserted in the given `Vec<T>`. After calling `reserve_exact`, + /// capacity will be greater than or equal to `self.len() + additional`. + /// Does nothing if the capacity is already sufficient. + /// + /// Note that the allocator may give the collection more space than it + /// requests. Therefore capacity can not be relied upon to be precisely + /// minimal. Prefer `reserve` if future insertions are expected. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// + /// fn process_data(data: &[u32]) -> Result<Vec<u32>, CollectionAllocErr> { + /// let mut output = Vec::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.extend(data.iter().map(|&val| { + /// val * 2 + 5 // very complicated + /// })); + /// + /// Ok(output) + /// } + /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.buf.try_reserve_exact(self.len, additional) + } + /// Shrinks the capacity of the vector as much as possible. /// /// It will drop down as close as possible to the length but the allocator @@ -1212,8 +1290,9 @@ impl<T: Clone> Vec<T> { /// difference, with each additional slot filled with `value`. /// If `new_len` is less than `len`, the `Vec` is simply truncated. /// - /// This method requires `Clone` to clone the passed value. If you'd - /// rather create a value with `Default` instead, see [`resize_default`]. + /// This method requires [`Clone`] to be able clone the passed value. If + /// you'd rather create a value with [`Default`] instead, see + /// [`resize_default`]. /// /// # Examples /// @@ -1227,6 +1306,8 @@ impl<T: Clone> Vec<T> { /// assert_eq!(vec, [1, 2]); /// ``` /// + /// [`Clone`]: ../../std/clone/trait.Clone.html + /// [`Default`]: ../../std/default/trait.Default.html /// [`resize_default`]: #method.resize_default #[stable(feature = "vec_resize", since = "1.5.0")] pub fn resize(&mut self, new_len: usize, value: T) { @@ -1244,7 +1325,7 @@ impl<T: Clone> Vec<T> { /// Iterates over the slice `other`, clones each element, and then appends /// it to this `Vec`. The `other` vector is traversed in-order. /// - /// Note that this function is same as `extend` except that it is + /// Note that this function is same as [`extend`] except that it is /// specialized to work with slices instead. If and when Rust gets /// specialization this function will likely be deprecated (but still /// available). @@ -1256,6 +1337,8 @@ impl<T: Clone> Vec<T> { /// vec.extend_from_slice(&[2, 3, 4]); /// assert_eq!(vec, [1, 2, 3, 4]); /// ``` + /// + /// [`extend`]: #method.extend #[stable(feature = "vec_extend_from_slice", since = "1.6.0")] pub fn extend_from_slice(&mut self, other: &[T]) { self.spec_extend(other.iter()) @@ -1266,12 +1349,11 @@ impl<T: Default> Vec<T> { /// Resizes the `Vec` in-place so that `len` is equal to `new_len`. /// /// If `new_len` is greater than `len`, the `Vec` is extended by the - /// difference, with each additional slot filled with `Default::default()`. + /// difference, with each additional slot filled with [`Default::default()`]. /// If `new_len` is less than `len`, the `Vec` is simply truncated. /// - /// This method uses `Default` to create new values on every push. If - /// you'd rather `Clone` a given value, use [`resize`]. - /// + /// This method uses [`Default`] to create new values on every push. If + /// you'd rather [`Clone`] a given value, use [`resize`]. /// /// # Examples /// @@ -1288,6 +1370,9 @@ impl<T: Default> Vec<T> { /// ``` /// /// [`resize`]: #method.resize + /// [`Default::default()`]: ../../std/default/trait.Default.html#tymethod.default + /// [`Default`]: ../../std/default/trait.Default.html + /// [`Clone`]: ../../std/clone/trait.Clone.html #[unstable(feature = "vec_resize_default", issue = "41758")] pub fn resize_default(&mut self, new_len: usize) { let len = self.len(); @@ -1527,142 +1612,26 @@ impl<T: Hash> Hash for Vec<T> { #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> Index<usize> for Vec<T> { - type Output = T; - - #[inline] - fn index(&self, index: usize) -> &T { - // NB built-in indexing via `&[T]` - &(**self)[index] - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> IndexMut<usize> for Vec<T> { - #[inline] - fn index_mut(&mut self, index: usize) -> &mut T { - // NB built-in indexing via `&mut [T]` - &mut (**self)[index] - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::Index<ops::Range<usize>> for Vec<T> { - type Output = [T]; - - #[inline] - fn index(&self, index: ops::Range<usize>) -> &[T] { - Index::index(&**self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::Index<ops::RangeTo<usize>> for Vec<T> { - type Output = [T]; - - #[inline] - fn index(&self, index: ops::RangeTo<usize>) -> &[T] { - Index::index(&**self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::Index<ops::RangeFrom<usize>> for Vec<T> { - type Output = [T]; - - #[inline] - fn index(&self, index: ops::RangeFrom<usize>) -> &[T] { - Index::index(&**self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::Index<ops::RangeFull> for Vec<T> { - type Output = [T]; - - #[inline] - fn index(&self, _index: ops::RangeFull) -> &[T] { - self - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::Index<ops::RangeInclusive<usize>> for Vec<T> { - type Output = [T]; - - #[inline] - fn index(&self, index: ops::RangeInclusive<usize>) -> &[T] { - Index::index(&**self, index) - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::Index<ops::RangeToInclusive<usize>> for Vec<T> { - type Output = [T]; +impl<T, I> Index<I> for Vec<T> +where + I: ::core::slice::SliceIndex<[T]>, +{ + type Output = I::Output; #[inline] - fn index(&self, index: ops::RangeToInclusive<usize>) -> &[T] { + fn index(&self, index: I) -> &Self::Output { Index::index(&**self, index) } } #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::IndexMut<ops::Range<usize>> for Vec<T> { - #[inline] - fn index_mut(&mut self, index: ops::Range<usize>) -> &mut [T] { - IndexMut::index_mut(&mut **self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::IndexMut<ops::RangeTo<usize>> for Vec<T> { - #[inline] - fn index_mut(&mut self, index: ops::RangeTo<usize>) -> &mut [T] { - IndexMut::index_mut(&mut **self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::IndexMut<ops::RangeFrom<usize>> for Vec<T> { - #[inline] - fn index_mut(&mut self, index: ops::RangeFrom<usize>) -> &mut [T] { - IndexMut::index_mut(&mut **self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::IndexMut<ops::RangeFull> for Vec<T> { - #[inline] - fn index_mut(&mut self, _index: ops::RangeFull) -> &mut [T] { - self - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::IndexMut<ops::RangeInclusive<usize>> for Vec<T> { - #[inline] - fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut [T] { - IndexMut::index_mut(&mut **self, index) - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::IndexMut<ops::RangeToInclusive<usize>> for Vec<T> { +impl<T, I> IndexMut<I> for Vec<T> +where + I: ::core::slice::SliceIndex<[T]>, +{ #[inline] - fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut [T] { + fn index_mut(&mut self, index: I) -> &mut Self::Output { IndexMut::index_mut(&mut **self, index) } } @@ -1966,8 +1935,8 @@ impl<T> Vec<T> { /// Creates an iterator which uses a closure to determine if an element should be removed. /// /// If the closure returns true, then the element is removed and yielded. - /// If the closure returns false, it will try again, and call the closure - /// on the next element, seeing if it passes the test. + /// If the closure returns false, the element will remain in the vector and will not be yielded + /// by the iterator. /// /// Using this method is equivalent to the following code: /// @@ -2389,7 +2358,7 @@ impl<T> ExactSizeIterator for IntoIter<T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<T> FusedIterator for IntoIter<T> {} #[unstable(feature = "trusted_len", issue = "37572")] @@ -2495,7 +2464,7 @@ impl<'a, T> ExactSizeIterator for Drain<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Drain<'a, T> {} /// A place for insertion at the back of a `Vec`. diff --git a/src/liballoc/vec_deque.rs b/src/liballoc/vec_deque.rs index 8b686365e69..0658777f0a0 100644 --- a/src/liballoc/vec_deque.rs +++ b/src/liballoc/vec_deque.rs @@ -31,6 +31,7 @@ use core::cmp; use raw_vec::RawVec; +use super::allocator::CollectionAllocErr; use super::range::RangeArgument; use Bound::{Excluded, Included, Unbounded}; use super::vec::Vec; @@ -566,6 +567,97 @@ impl<T> VecDeque<T> { } } + /// Tries to reserves the minimum capacity for exactly `additional` more elements to + /// be inserted in the given `VecDeque<T>`. After calling `reserve_exact`, + /// capacity will be greater than or equal to `self.len() + additional`. + /// Does nothing if the capacity is already sufficient. + /// + /// Note that the allocator may give the collection more space than it + /// requests. Therefore capacity can not be relied upon to be precisely + /// minimal. Prefer `reserve` if future insertions are expected. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// use std::collections::VecDeque; + /// + /// fn process_data(data: &[u32]) -> Result<VecDeque<u32>, CollectionAllocErr> { + /// let mut output = VecDeque::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve_exact(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.extend(data.iter().map(|&val| { + /// val * 2 + 5 // very complicated + /// })); + /// + /// Ok(output) + /// } + /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.try_reserve(additional) + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `VecDeque<T>`. The collection may reserve more space to avoid + /// frequent reallocations. After calling `reserve`, capacity will be + /// greater than or equal to `self.len() + additional`. Does nothing if + /// capacity is already sufficient. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// use std::collections::VecDeque; + /// + /// fn process_data(data: &[u32]) -> Result<VecDeque<u32>, CollectionAllocErr> { + /// let mut output = VecDeque::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.extend(data.iter().map(|&val| { + /// val * 2 + 5 // very complicated + /// })); + /// + /// Ok(output) + /// } + /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + let old_cap = self.cap(); + let used_cap = self.len() + 1; + let new_cap = used_cap.checked_add(additional) + .and_then(|needed_cap| needed_cap.checked_next_power_of_two()) + .ok_or(CollectionAllocErr::CapacityOverflow)?; + + if new_cap > old_cap { + self.buf.try_reserve_exact(used_cap, new_cap - used_cap)?; + unsafe { + self.handle_cap_increase(old_cap); + } + } + Ok(()) + } + /// Shrinks the capacity of the `VecDeque` as much as possible. /// /// It will drop down as close as possible to the length but the allocator may still inform the @@ -1991,7 +2083,7 @@ impl<'a, T> ExactSizeIterator for Iter<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Iter<'a, T> {} @@ -2084,7 +2176,7 @@ impl<'a, T> ExactSizeIterator for IterMut<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for IterMut<'a, T> {} /// An owning iterator over the elements of a `VecDeque`. @@ -2140,7 +2232,7 @@ impl<T> ExactSizeIterator for IntoIter<T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<T> FusedIterator for IntoIter<T> {} /// A draining iterator over the elements of a `VecDeque`. @@ -2247,7 +2339,7 @@ impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { #[stable(feature = "drain", since = "1.6.0")] impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: 'a> FusedIterator for Drain<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] |
