about summary refs log tree commit diff
path: root/src/libstd
diff options
context:
space:
mode:
authorarthurprs <arthurprs@gmail.com>2016-11-27 00:57:09 +0100
committerarthurprs <arthurprs@gmail.com>2016-11-27 21:38:46 +0100
commit178e29df7d2927641ddbadaaa92e1f1c17fa5c92 (patch)
tree3ce2f24dd05674cab258402606487acbe8d55809 /src/libstd
parent73e98a0210f0afdec28b4f5bc0f7327d6a5a8555 (diff)
downloadrust-178e29df7d2927641ddbadaaa92e1f1c17fa5c92.tar.gz
rust-178e29df7d2927641ddbadaaa92e1f1c17fa5c92.zip
Use displacement instead of initial bucket in HashMap code
Diffstat (limited to 'src/libstd')
-rw-r--r--src/libstd/collections/hash/map.rs39
1 files changed, 21 insertions, 18 deletions
diff --git a/src/libstd/collections/hash/map.rs b/src/libstd/collections/hash/map.rs
index ece51d6d826..f102a1bf630 100644
--- a/src/libstd/collections/hash/map.rs
+++ b/src/libstd/collections/hash/map.rs
@@ -371,9 +371,9 @@ fn search_hashed<K, V, M, F>(table: M, hash: SafeHash, mut is_match: F) -> Inter
         return InternalEntry::TableIsEmpty;
     }
 
-    let size = table.size() as isize;
+    let size = table.size();
     let mut probe = Bucket::new(table, hash);
-    let ib = probe.index() as isize;
+    let mut displacement = 0;
 
     loop {
         let full = match probe.peek() {
@@ -387,15 +387,15 @@ fn search_hashed<K, V, M, F>(table: M, hash: SafeHash, mut is_match: F) -> Inter
             Full(bucket) => bucket,
         };
 
-        let robin_ib = full.index() as isize - full.displacement() as isize;
+        let probe_displacement = full.displacement();
 
-        if ib < robin_ib {
+        if probe_displacement < displacement {
             // Found a luckier bucket than me.
             // We can finish the search early if we hit any bucket
             // with a lower distance to initial bucket than we've probed.
             return InternalEntry::Vacant {
                 hash: hash,
-                elem: NeqElem(full, robin_ib as usize),
+                elem: NeqElem(full, probe_displacement),
             };
         }
 
@@ -406,9 +406,9 @@ fn search_hashed<K, V, M, F>(table: M, hash: SafeHash, mut is_match: F) -> Inter
                 return InternalEntry::Occupied { elem: full };
             }
         }
-
+        displacement += 1;
         probe = full.next();
-        debug_assert!(probe.index() as isize != ib + size + 1);
+        debug_assert!(displacement <= size);
     }
 }
 
@@ -431,12 +431,11 @@ fn pop_internal<K, V>(starting_bucket: FullBucketMut<K, V>) -> (K, V) {
 }
 
 /// Perform robin hood bucket stealing at the given `bucket`. You must
-/// also pass the position of that bucket's initial bucket so we don't have
-/// to recalculate it.
+/// also pass that bucket's displacement so we don't have to recalculate it.
 ///
 /// `hash`, `k`, and `v` are the elements to "robin hood" into the hashtable.
 fn robin_hood<'a, K: 'a, V: 'a>(bucket: FullBucketMut<'a, K, V>,
-                                mut ib: usize,
+                                mut displacement: usize,
                                 mut hash: SafeHash,
                                 mut key: K,
                                 mut val: V)
@@ -457,6 +456,7 @@ fn robin_hood<'a, K: 'a, V: 'a>(bucket: FullBucketMut<'a, K, V>,
         val = old_val;
 
         loop {
+            displacement += 1;
             let probe = bucket.next();
             debug_assert!(probe.index() != idx_end);
 
@@ -476,13 +476,13 @@ fn robin_hood<'a, K: 'a, V: 'a>(bucket: FullBucketMut<'a, K, V>,
                 Full(bucket) => bucket,
             };
 
-            let probe_ib = full_bucket.index() - full_bucket.displacement();
+            let probe_displacement = full_bucket.displacement();
 
             bucket = full_bucket;
 
             // Robin hood! Steal the spot.
-            if ib < probe_ib {
-                ib = probe_ib;
+            if probe_displacement < displacement {
+                displacement = probe_displacement;
                 break;
             }
         }
@@ -520,13 +520,16 @@ impl<K, V, S> HashMap<K, V, S>
         search_hashed(&mut self.table, hash, |k| q.eq(k.borrow()))
     }
 
-    // The caller should ensure that invariants by Robin Hood Hashing hold.
+    // The caller should ensure that invariants by Robin Hood Hashing hold
+    // and that there's space in the underlying table.
     fn insert_hashed_ordered(&mut self, hash: SafeHash, k: K, v: V) {
         let raw_cap = self.raw_capacity();
         let mut buckets = Bucket::new(&mut self.table, hash);
-        let ib = buckets.index();
+        // note that buckets.index() keeps increasing
+        // even if the pointer wraps back to the first bucket.
+        let limit_bucket = buckets.index() + raw_cap;
 
-        while buckets.index() != ib + raw_cap {
+        loop {
             // We don't need to compare hashes for value swap.
             // Not even DIBs for Robin Hood.
             buckets = match buckets.peek() {
@@ -537,8 +540,8 @@ impl<K, V, S> HashMap<K, V, S>
                 Full(b) => b.into_bucket(),
             };
             buckets.next();
+            debug_assert!(buckets.index() < limit_bucket);
         }
-        panic!("Internal HashMap error: Out of space.");
     }
 }
 
@@ -1959,7 +1962,7 @@ impl<'a, K: 'a, V: 'a> VacantEntry<'a, K, V> {
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn insert(self, value: V) -> &'a mut V {
         match self.elem {
-            NeqElem(bucket, ib) => robin_hood(bucket, ib, self.hash, self.key, value),
+            NeqElem(bucket, disp) => robin_hood(bucket, disp, self.hash, self.key, value),
             NoElem(bucket) => bucket.put(self.hash, self.key, value).into_mut_refs().1,
         }
     }