about summary refs log tree commit diff
path: root/src/libstd
diff options
context:
space:
mode:
authorsnf <snf@users.noreply.github.com>2018-03-08 14:36:43 +0000
committersnf <snf@users.noreply.github.com>2018-03-14 03:48:42 -0700
commit92bfcd2b192e59d12d64acf6f46c1897a3273b3e (patch)
tree001462078f9c699aa3cf33322be05cc6f7b9b3d5 /src/libstd
parentfab632f9759af4f3d96c6ec69e24e5428060dba4 (diff)
downloadrust-92bfcd2b192e59d12d64acf6f46c1897a3273b3e.tar.gz
rust-92bfcd2b192e59d12d64acf6f46c1897a3273b3e.zip
implementing fallible allocation API (try_reserve) for Vec, String and HashMap
Diffstat (limited to 'src/libstd')
-rw-r--r--src/libstd/collections/hash/map.rs96
-rw-r--r--src/libstd/collections/hash/table.rs57
-rw-r--r--src/libstd/collections/mod.rs3
-rw-r--r--src/libstd/lib.rs1
4 files changed, 128 insertions, 29 deletions
diff --git a/src/libstd/collections/hash/map.rs b/src/libstd/collections/hash/map.rs
index 6f4528a0e24..b18b38ec302 100644
--- a/src/libstd/collections/hash/map.rs
+++ b/src/libstd/collections/hash/map.rs
@@ -11,6 +11,8 @@
 use self::Entry::*;
 use self::VacantEntryState::*;
 
+use alloc::heap::{Heap, Alloc};
+use alloc::allocator::CollectionAllocErr;
 use cell::Cell;
 use borrow::Borrow;
 use cmp::max;
@@ -42,21 +44,28 @@ impl DefaultResizePolicy {
     /// provide that capacity, accounting for maximum loading. The raw capacity
     /// is always zero or a power of two.
     #[inline]
-    fn raw_capacity(&self, len: usize) -> usize {
+    fn try_raw_capacity(&self, len: usize) -> Result<usize, CollectionAllocErr> {
         if len == 0 {
-            0
+            Ok(0)
         } else {
             // 1. Account for loading: `raw_capacity >= len * 1.1`.
             // 2. Ensure it is a power of two.
             // 3. Ensure it is at least the minimum size.
-            let mut raw_cap = len * 11 / 10;
-            assert!(raw_cap >= len, "raw_cap overflow");
-            raw_cap = raw_cap.checked_next_power_of_two().expect("raw_capacity overflow");
+            let mut raw_cap = len.checked_mul(11)
+                .map(|l| l / 10)
+                .and_then(|l| l.checked_next_power_of_two())
+                .ok_or(CollectionAllocErr::CapacityOverflow)?;
+
             raw_cap = max(MIN_NONZERO_RAW_CAPACITY, raw_cap);
-            raw_cap
+            Ok(raw_cap)
         }
     }
 
+    #[inline]
+    fn raw_capacity(&self, len: usize) -> usize {
+        self.try_raw_capacity(len).expect("raw_capacity overflow")
+    }
+
     /// The capacity of the given raw capacity.
     #[inline]
     fn capacity(&self, raw_cap: usize) -> usize {
@@ -775,17 +784,45 @@ impl<K, V, S> HashMap<K, V, S>
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn reserve(&mut self, additional: usize) {
+        match self.try_reserve(additional) {
+            Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
+            Err(CollectionAllocErr::AllocErr(e)) => Heap.oom(e),
+            Ok(()) => { /* yay */ }
+         }
+    }
+
+    /// Tries to reserve capacity for at least `additional` more elements to be inserted
+    /// in the given `HashMap<K,V>`. The collection may reserve more space to avoid
+    /// frequent reallocations.
+    ///
+    /// # Errors
+    ///
+    /// If the capacity overflows, or the allocator reports a failure, then an error
+    /// is returned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(try_reserve)]
+    /// use std::collections::HashMap;
+    /// let mut map: HashMap<&str, isize> = HashMap::new();
+    /// map.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?");
+    /// ```
+    #[unstable(feature = "try_reserve", reason = "new API", issue="48043")]
+    pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> {
         let remaining = self.capacity() - self.len(); // this can't overflow
         if remaining < additional {
-            let min_cap = self.len().checked_add(additional).expect("reserve overflow");
-            let raw_cap = self.resize_policy.raw_capacity(min_cap);
-            self.resize(raw_cap);
+            let min_cap = self.len().checked_add(additional)
+                .ok_or(CollectionAllocErr::CapacityOverflow)?;
+            let raw_cap = self.resize_policy.try_raw_capacity(min_cap)?;
+            self.try_resize(raw_cap)?;
         } else if self.table.tag() && remaining <= self.len() {
             // Probe sequence is too long and table is half full,
             // resize early to reduce probing length.
             let new_capacity = self.table.capacity() * 2;
-            self.resize(new_capacity);
+            self.try_resize(new_capacity)?;
         }
+        Ok(())
     }
 
     /// Resizes the internal vectors to a new capacity. It's your
@@ -795,15 +832,15 @@ impl<K, V, S> HashMap<K, V, S>
     ///   2) Ensure `new_raw_cap` is a power of two or zero.
     #[inline(never)]
     #[cold]
-    fn resize(&mut self, new_raw_cap: usize) {
+    fn try_resize(&mut self, new_raw_cap: usize) -> Result<(), CollectionAllocErr> {
         assert!(self.table.size() <= new_raw_cap);
         assert!(new_raw_cap.is_power_of_two() || new_raw_cap == 0);
 
-        let mut old_table = replace(&mut self.table, RawTable::new(new_raw_cap));
+        let mut old_table = replace(&mut self.table, RawTable::try_new(new_raw_cap)?);
         let old_size = old_table.size();
 
         if old_table.size() == 0 {
-            return;
+            return Ok(());
         }
 
         let mut bucket = Bucket::head_bucket(&mut old_table);
@@ -838,6 +875,7 @@ impl<K, V, S> HashMap<K, V, S>
         }
 
         assert_eq!(self.table.size(), old_size);
+        Ok(())
     }
 
     /// Shrinks the capacity of the map as much as possible. It will drop
@@ -2717,6 +2755,9 @@ mod test_map {
     use cell::RefCell;
     use rand::{thread_rng, Rng};
     use panic;
+    use realstd::collections::CollectionAllocErr::*;
+    use realstd::mem::size_of;
+    use realstd::usize;
 
     #[test]
     fn test_zero_capacities() {
@@ -3651,4 +3692,33 @@ mod test_map {
         let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| { hm.entry(0) <- makepanic(); }));
         assert_eq!(hm.len(), 0);
     }
+
+    #[test]
+    fn test_try_reserve() {
+
+        let mut empty_bytes: HashMap<u8,u8> = HashMap::new();
+
+        const MAX_USIZE: usize = usize::MAX;
+
+        // HashMap and RawTables use complicated size calculations
+        // hashes_size is sizeof(HashUint) * capacity;
+        // pairs_size is sizeof((K. V)) * capacity;
+        // alignment_hashes_size is 8
+        // alignment_pairs size is 4
+        let size_of_multiplier = (size_of::<usize>() + size_of::<(u8, u8)>()).next_power_of_two();
+        // The following formula is used to calculate the new capacity
+        let max_no_ovf = ((MAX_USIZE / 11) * 10) / size_of_multiplier - 1;
+
+        if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) {
+        } else { panic!("usize::MAX should trigger an overflow!"); }
+
+        if size_of::<usize>() < 8 {
+            if let Err(CapacityOverflow) = empty_bytes.try_reserve(max_no_ovf) {
+            } else { panic!("isize::MAX + 1 should trigger a CapacityOverflow!") }
+        } else {
+            if let Err(AllocErr(_)) = empty_bytes.try_reserve(max_no_ovf) {
+            } else { panic!("isize::MAX + 1 should trigger an OOM!") }
+        }
+    }
+
 }
diff --git a/src/libstd/collections/hash/table.rs b/src/libstd/collections/hash/table.rs
index 73bd5747c10..8e78dc546c6 100644
--- a/src/libstd/collections/hash/table.rs
+++ b/src/libstd/collections/hash/table.rs
@@ -17,6 +17,7 @@ use mem::{align_of, size_of, needs_drop};
 use mem;
 use ops::{Deref, DerefMut};
 use ptr::{self, Unique, NonNull};
+use alloc::allocator::CollectionAllocErr;
 
 use self::BucketState::*;
 
@@ -741,14 +742,15 @@ fn test_offset_calculation() {
 impl<K, V> RawTable<K, V> {
     /// Does not initialize the buckets. The caller should ensure they,
     /// at the very least, set every hash to EMPTY_BUCKET.
-    unsafe fn new_uninitialized(capacity: usize) -> RawTable<K, V> {
+    /// Returns an error if it cannot allocate or capacity overflows.
+    unsafe fn try_new_uninitialized(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> {
         if capacity == 0 {
-            return RawTable {
+            return Ok(RawTable {
                 size: 0,
                 capacity_mask: capacity.wrapping_sub(1),
                 hashes: TaggedHashUintPtr::new(EMPTY as *mut HashUint),
                 marker: marker::PhantomData,
-            };
+            });
         }
 
         // No need for `checked_mul` before a more restrictive check performed
@@ -768,25 +770,38 @@ impl<K, V> RawTable<K, V> {
                                                            align_of::<HashUint>(),
                                                            pairs_size,
                                                            align_of::<(K, V)>());
-        assert!(!oflo, "capacity overflow");
+        if oflo {
+            return Err(CollectionAllocErr::CapacityOverflow);
+        }
 
         // One check for overflow that covers calculation and rounding of size.
-        let size_of_bucket = size_of::<HashUint>().checked_add(size_of::<(K, V)>()).unwrap();
-        assert!(size >=
-                capacity.checked_mul(size_of_bucket)
-                    .expect("capacity overflow"),
-                "capacity overflow");
+        let size_of_bucket = size_of::<HashUint>().checked_add(size_of::<(K, V)>())
+            .ok_or(CollectionAllocErr::CapacityOverflow)?;
+        let capacity_mul_size_of_bucket = capacity.checked_mul(size_of_bucket);
+        if capacity_mul_size_of_bucket.is_none() || size < capacity_mul_size_of_bucket.unwrap() {
+            return Err(CollectionAllocErr::CapacityOverflow);
+        }
 
-        let buffer = Heap.alloc(Layout::from_size_align(size, alignment).unwrap())
-            .unwrap_or_else(|e| Heap.oom(e));
+        let buffer = Heap.alloc(Layout::from_size_align(size, alignment)
+            .ok_or(CollectionAllocErr::CapacityOverflow)?)?;
 
         let hashes = buffer as *mut HashUint;
 
-        RawTable {
+        Ok(RawTable {
             capacity_mask: capacity.wrapping_sub(1),
             size: 0,
             hashes: TaggedHashUintPtr::new(hashes),
             marker: marker::PhantomData,
+        })
+    }
+
+    /// Does not initialize the buckets. The caller should ensure they,
+    /// at the very least, set every hash to EMPTY_BUCKET.
+    unsafe fn new_uninitialized(capacity: usize) -> RawTable<K, V> {
+        match Self::try_new_uninitialized(capacity) {
+            Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
+            Err(CollectionAllocErr::AllocErr(e)) => Heap.oom(e),
+            Ok(table) => { table }
         }
     }
 
@@ -809,13 +824,23 @@ impl<K, V> RawTable<K, V> {
         }
     }
 
+    /// Tries to create a new raw table from a given capacity. If it cannot allocate,
+    /// it returns with AllocErr.
+    pub fn try_new(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> {
+        unsafe {
+            let ret = RawTable::try_new_uninitialized(capacity)?;
+            ptr::write_bytes(ret.hashes.ptr(), 0, capacity);
+            Ok(ret)
+        }
+    }
+
     /// Creates a new raw table from a given capacity. All buckets are
     /// initially empty.
     pub fn new(capacity: usize) -> RawTable<K, V> {
-        unsafe {
-            let ret = RawTable::new_uninitialized(capacity);
-            ptr::write_bytes(ret.hashes.ptr(), 0, capacity);
-            ret
+        match Self::try_new(capacity) {
+            Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
+            Err(CollectionAllocErr::AllocErr(e)) => Heap.oom(e),
+            Ok(table) => { table }
         }
     }
 
diff --git a/src/libstd/collections/mod.rs b/src/libstd/collections/mod.rs
index e9a150f34a5..be88f4e268a 100644
--- a/src/libstd/collections/mod.rs
+++ b/src/libstd/collections/mod.rs
@@ -438,6 +438,9 @@ pub use self::hash_set::HashSet;
 #[stable(feature = "rust1", since = "1.0.0")]
 pub use alloc::range;
 
+#[unstable(feature = "try_reserve", reason = "new API", issue="48043")]
+pub use alloc::allocator::CollectionAllocErr;
+
 mod hash;
 
 #[stable(feature = "rust1", since = "1.0.0")]
diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs
index da15941374d..ccc5373acc7 100644
--- a/src/libstd/lib.rs
+++ b/src/libstd/lib.rs
@@ -314,6 +314,7 @@
 #![feature(thread_local)]
 #![feature(toowned_clone_into)]
 #![feature(try_from)]
+#![feature(try_reserve)]
 #![feature(unboxed_closures)]
 #![feature(unicode)]
 #![feature(untagged_unions)]