diff options
| author | Manish Goregaokar <manishsmail@gmail.com> | 2015-02-06 05:40:28 +0530 |
|---|---|---|
| committer | Manish Goregaokar <manishsmail@gmail.com> | 2015-02-06 16:21:09 +0530 |
| commit | f6d08b0b175afb64157f466dd2aea7fb14d97339 (patch) | |
| tree | 05def53b3d42d6ff5de1f0fcfd3c2492240678be /src/libstd | |
| parent | 60cb151be061f859feaf6cf0246f0189b3ad35a9 (diff) | |
| parent | 15fb06d73008d868f10707f32c5caebff41b3026 (diff) | |
| download | rust-f6d08b0b175afb64157f466dd2aea7fb14d97339.tar.gz rust-f6d08b0b175afb64157f466dd2aea7fb14d97339.zip | |
Rollup merge of #21969 - Gankro:collections-cleanup, r=alexcrichton
This is 99% burning ints to the ground, but I also got rid of useless annotations or made code more \"idiomatic\" as I went along. Mostly changes in tests.
Diffstat (limited to 'src/libstd')
| -rw-r--r-- | src/libstd/collections/hash/bench.rs | 2 | ||||
| -rw-r--r-- | src/libstd/collections/hash/map.rs | 112 | ||||
| -rw-r--r-- | src/libstd/collections/hash/set.rs | 66 | ||||
| -rw-r--r-- | src/libstd/collections/hash/table.rs | 68 |
4 files changed, 124 insertions, 124 deletions
diff --git a/src/libstd/collections/hash/bench.rs b/src/libstd/collections/hash/bench.rs index ce02648b8f2..ca506e8c36f 100644 --- a/src/libstd/collections/hash/bench.rs +++ b/src/libstd/collections/hash/bench.rs @@ -21,7 +21,7 @@ fn new_drop(b : &mut Bencher) { use super::map::HashMap; b.iter(|| { - let m : HashMap<int, int> = HashMap::new(); + let m : HashMap<i32, i32> = HashMap::new(); assert_eq!(m.len(), 0); }) } diff --git a/src/libstd/collections/hash/map.rs b/src/libstd/collections/hash/map.rs index 42f365b4e1b..283210ed644 100644 --- a/src/libstd/collections/hash/map.rs +++ b/src/libstd/collections/hash/map.rs @@ -45,9 +45,9 @@ use super::table::BucketState::{ }; use super::state::HashState; -const INITIAL_LOG2_CAP: uint = 5; +const INITIAL_LOG2_CAP: usize = 5; #[unstable(feature = "std_misc")] -pub const INITIAL_CAPACITY: uint = 1 << INITIAL_LOG2_CAP; // 2^5 +pub const INITIAL_CAPACITY: usize = 1 << INITIAL_LOG2_CAP; // 2^5 /// The default behavior of HashMap implements a load factor of 90.9%. /// This behavior is characterized by the following condition: @@ -62,7 +62,7 @@ impl DefaultResizePolicy { } #[inline] - fn min_capacity(&self, usable_size: uint) -> uint { + fn min_capacity(&self, usable_size: usize) -> usize { // Here, we are rephrasing the logic by specifying the lower limit // on capacity: // @@ -72,7 +72,7 @@ impl DefaultResizePolicy { /// An inverse of `min_capacity`, approximately. #[inline] - fn usable_capacity(&self, cap: uint) -> uint { + fn usable_capacity(&self, cap: usize) -> usize { // As the number of entries approaches usable capacity, // min_capacity(size) must be smaller than the internal capacity, // so that the map is not resized: @@ -369,7 +369,7 @@ fn pop_internal<K, V>(starting_bucket: FullBucketMut<K, V>) -> (K, V) { /// /// `hash`, `k`, and `v` are the elements to "robin hood" into the hashtable. fn robin_hood<'a, K: 'a, V: 'a>(mut bucket: FullBucketMut<'a, K, V>, - mut ib: uint, + mut ib: usize, mut hash: SafeHash, mut k: K, mut v: V) @@ -515,7 +515,7 @@ impl<K: Hash<Hasher> + Eq, V> HashMap<K, V, RandomState> { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn with_capacity(capacity: uint) -> HashMap<K, V, RandomState> { + pub fn with_capacity(capacity: usize) -> HashMap<K, V, RandomState> { HashMap::with_capacity_and_hash_state(capacity, Default::default()) } } @@ -569,7 +569,7 @@ impl<K, V, S, H> HashMap<K, V, S> /// ``` #[inline] #[unstable(feature = "std_misc", reason = "hasher stuff is unclear")] - pub fn with_capacity_and_hash_state(capacity: uint, hash_state: S) + pub fn with_capacity_and_hash_state(capacity: usize, hash_state: S) -> HashMap<K, V, S> { let resize_policy = DefaultResizePolicy::new(); let min_cap = max(INITIAL_CAPACITY, resize_policy.min_capacity(capacity)); @@ -593,7 +593,7 @@ impl<K, V, S, H> HashMap<K, V, S> /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn capacity(&self) -> uint { + pub fn capacity(&self) -> usize { self.resize_policy.usable_capacity(self.table.capacity()) } @@ -603,7 +603,7 @@ impl<K, V, S, H> HashMap<K, V, S> /// /// # Panics /// - /// Panics if the new allocation size overflows `uint`. + /// Panics if the new allocation size overflows `usize`. /// /// # Example /// @@ -613,7 +613,7 @@ impl<K, V, S, H> HashMap<K, V, S> /// map.reserve(10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve(&mut self, additional: uint) { + pub fn reserve(&mut self, additional: usize) { let new_size = self.len().checked_add(additional).expect("capacity overflow"); let min_cap = self.resize_policy.min_capacity(new_size); @@ -631,7 +631,7 @@ impl<K, V, S, H> HashMap<K, V, S> /// 1) Make sure the new capacity is enough for all the elements, accounting /// for the load factor. /// 2) Ensure new_capacity is a power of two or zero. - fn resize(&mut self, new_capacity: uint) { + fn resize(&mut self, new_capacity: usize) { assert!(self.table.size() <= new_capacity); assert!(new_capacity.is_power_of_two() || new_capacity == 0); @@ -793,7 +793,7 @@ impl<K, V, S, H> HashMap<K, V, S> if (ib as int) < robin_ib { // Found a luckier bucket than me. Better steal his spot. - return robin_hood(bucket, robin_ib as uint, hash, k, v); + return robin_hood(bucket, robin_ib as usize, hash, k, v); } probe = bucket.next(); @@ -951,7 +951,7 @@ impl<K, V, S, H> HashMap<K, V, S> /// assert_eq!(a.len(), 1); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn len(&self) -> uint { self.table.size() } + pub fn len(&self) -> usize { self.table.size() } /// Returns true if the map contains no elements. /// @@ -1186,7 +1186,7 @@ fn search_entry_hashed<'a, K: Eq, V>(table: &'a mut RawTable<K,V>, hash: SafeHas return Vacant(VacantEntry { hash: hash, key: k, - elem: NeqElem(bucket, robin_ib as uint), + elem: NeqElem(bucket, robin_ib as usize), }); } @@ -1369,7 +1369,7 @@ pub enum Entry<'a, K: 'a, V: 'a> { enum VacantEntryState<K, V, M> { /// The index is occupied, but the key to insert has precedence, /// and will kick the current one out on insertion. - NeqElem(FullBucket<K, V, M>, uint), + NeqElem(FullBucket<K, V, M>, usize), /// The index is genuinely vacant. NoElem(EmptyBucket<K, V, M>), } @@ -1672,11 +1672,11 @@ mod test_map { #[derive(Hash, PartialEq, Eq)] struct Dropable { - k: uint + k: usize } impl Dropable { - fn new(k: uint) -> Dropable { + fn new(k: usize) -> Dropable { DROP_VECTOR.with(|slot| { slot.borrow_mut()[k] += 1; }); @@ -1709,24 +1709,24 @@ mod test_map { let mut m = HashMap::new(); DROP_VECTOR.with(|v| { - for i in 0u..200 { + for i in 0..200 { assert_eq!(v.borrow()[i], 0); } }); - for i in 0u..100 { + for i in 0..100 { let d1 = Dropable::new(i); let d2 = Dropable::new(i+100); m.insert(d1, d2); } DROP_VECTOR.with(|v| { - for i in 0u..200 { + for i in 0..200 { assert_eq!(v.borrow()[i], 1); } }); - for i in 0u..50 { + for i in 0..50 { let k = Dropable::new(i); let v = m.remove(&k); @@ -1739,12 +1739,12 @@ mod test_map { } DROP_VECTOR.with(|v| { - for i in 0u..50 { + for i in 0..50 { assert_eq!(v.borrow()[i], 0); assert_eq!(v.borrow()[i+100], 0); } - for i in 50u..100 { + for i in 50..100 { assert_eq!(v.borrow()[i], 1); assert_eq!(v.borrow()[i+100], 1); } @@ -1752,7 +1752,7 @@ mod test_map { } DROP_VECTOR.with(|v| { - for i in 0u..200 { + for i in 0..200 { assert_eq!(v.borrow()[i], 0); } }); @@ -1768,19 +1768,19 @@ mod test_map { let mut hm = HashMap::new(); DROP_VECTOR.with(|v| { - for i in 0u..200 { + for i in 0..200 { assert_eq!(v.borrow()[i], 0); } }); - for i in 0u..100 { + for i in 0..100 { let d1 = Dropable::new(i); let d2 = Dropable::new(i+100); hm.insert(d1, d2); } DROP_VECTOR.with(|v| { - for i in 0u..200 { + for i in 0..200 { assert_eq!(v.borrow()[i], 1); } }); @@ -1795,7 +1795,7 @@ mod test_map { let mut half = hm.into_iter().take(50); DROP_VECTOR.with(|v| { - for i in 0u..200 { + for i in 0..200 { assert_eq!(v.borrow()[i], 1); } }); @@ -1803,11 +1803,11 @@ mod test_map { for _ in half.by_ref() {} DROP_VECTOR.with(|v| { - let nk = (0u..100).filter(|&i| { + let nk = (0..100).filter(|&i| { v.borrow()[i] == 1 }).count(); - let nv = (0u..100).filter(|&i| { + let nv = (0..100).filter(|&i| { v.borrow()[i+100] == 1 }).count(); @@ -1817,7 +1817,7 @@ mod test_map { }; DROP_VECTOR.with(|v| { - for i in 0u..200 { + for i in 0..200 { assert_eq!(v.borrow()[i], 0); } }); @@ -1962,7 +1962,7 @@ mod test_map { #[test] fn test_iterate() { let mut m = HashMap::with_capacity(4); - for i in 0u..32 { + for i in 0..32 { assert!(m.insert(i, i*2).is_none()); } assert_eq!(m.len(), 32); @@ -1979,8 +1979,8 @@ mod test_map { #[test] fn test_keys() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map = vec.into_iter().collect::<HashMap<int, char>>(); - let keys = map.keys().map(|&k| k).collect::<Vec<int>>(); + let map: HashMap<_, _> = vec.into_iter().collect(); + let keys: Vec<_> = map.keys().cloned().collect(); assert_eq!(keys.len(), 3); assert!(keys.contains(&1)); assert!(keys.contains(&2)); @@ -1990,8 +1990,8 @@ mod test_map { #[test] fn test_values() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map = vec.into_iter().collect::<HashMap<int, char>>(); - let values = map.values().map(|&v| v).collect::<Vec<char>>(); + let map: HashMap<_, _> = vec.into_iter().collect(); + let values: Vec<_> = map.values().cloned().collect(); assert_eq!(values.len(), 3); assert!(values.contains(&'a')); assert!(values.contains(&'b')); @@ -2029,8 +2029,8 @@ mod test_map { #[test] fn test_show() { - let mut map: HashMap<int, int> = HashMap::new(); - let empty: HashMap<int, int> = HashMap::new(); + let mut map = HashMap::new(); + let empty: HashMap<i32, i32> = HashMap::new(); map.insert(1, 2); map.insert(3, 4); @@ -2049,7 +2049,7 @@ mod test_map { assert_eq!(m.len(), 0); assert!(m.is_empty()); - let mut i = 0u; + let mut i = 0; let old_cap = m.table.capacity(); while old_cap == m.table.capacity() { m.insert(i, i); @@ -2077,7 +2077,7 @@ mod test_map { assert_eq!(cap, initial_cap * 2); - let mut i = 0u; + let mut i = 0; for _ in 0..cap * 3 / 4 { m.insert(i, i); i += 1; @@ -2119,21 +2119,21 @@ mod test_map { #[test] fn test_reserve_shrink_to_fit() { let mut m = HashMap::new(); - m.insert(0u, 0u); + m.insert(0, 0); m.remove(&0); assert!(m.capacity() >= m.len()); - for i in 0us..128 { + for i in 0..128 { m.insert(i, i); } m.reserve(256); let usable_cap = m.capacity(); - for i in 128us..128+256 { + for i in 128..(128 + 256) { m.insert(i, i); assert_eq!(m.capacity(), usable_cap); } - for i in 100us..128+256 { + for i in 100..(128 + 256) { assert_eq!(m.remove(&i), Some(i)); } m.shrink_to_fit(); @@ -2142,7 +2142,7 @@ mod test_map { assert!(!m.is_empty()); assert!(m.capacity() >= m.len()); - for i in 0us..100 { + for i in 0..100 { assert_eq!(m.remove(&i), Some(i)); } m.shrink_to_fit(); @@ -2157,7 +2157,7 @@ mod test_map { fn test_from_iter() { let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; - let map: HashMap<int, int> = xs.iter().map(|&x| x).collect(); + let map: HashMap<_, _> = xs.iter().cloned().collect(); for &(k, v) in &xs { assert_eq!(map.get(&k), Some(&v)); @@ -2168,7 +2168,7 @@ mod test_map { fn test_size_hint() { let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; - let map: HashMap<int, int> = xs.iter().map(|&x| x).collect(); + let map: HashMap<_, _> = xs.iter().cloned().collect(); let mut iter = map.iter(); @@ -2181,7 +2181,7 @@ mod test_map { fn test_iter_len() { let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; - let map: HashMap<int, int> = xs.iter().map(|&x| x).collect(); + let map: HashMap<_, _> = xs.iter().cloned().collect(); let mut iter = map.iter(); @@ -2194,7 +2194,7 @@ mod test_map { fn test_mut_size_hint() { let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; - let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect(); + let mut map: HashMap<_, _> = xs.iter().cloned().collect(); let mut iter = map.iter_mut(); @@ -2207,7 +2207,7 @@ mod test_map { fn test_iter_mut_len() { let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; - let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect(); + let mut map: HashMap<_, _> = xs.iter().cloned().collect(); let mut iter = map.iter_mut(); @@ -2218,7 +2218,7 @@ mod test_map { #[test] fn test_index() { - let mut map: HashMap<int, int> = HashMap::new(); + let mut map = HashMap::new(); map.insert(1, 2); map.insert(2, 1); @@ -2230,7 +2230,7 @@ mod test_map { #[test] #[should_fail] fn test_index_nonexistent() { - let mut map: HashMap<int, int> = HashMap::new(); + let mut map = HashMap::new(); map.insert(1, 2); map.insert(2, 1); @@ -2243,7 +2243,7 @@ mod test_map { fn test_entry(){ let xs = [(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)]; - let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect(); + let mut map: HashMap<_, _> = xs.iter().cloned().collect(); // Existing key (insert) match map.entry(1) { @@ -2294,7 +2294,7 @@ mod test_map { #[test] fn test_entry_take_doesnt_corrupt() { // Test for #19292 - fn check(m: &HashMap<int, ()>) { + fn check(m: &HashMap<isize, ()>) { for k in m.keys() { assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); @@ -2305,12 +2305,12 @@ mod test_map { let mut rng = weak_rng(); // Populate the map with some items. - for _ in 0u..50 { + for _ in 0..50 { let x = rng.gen_range(-10, 10); m.insert(x, ()); } - for i in 0u..1000 { + for i in 0..1000 { let x = rng.gen_range(-10, 10); match m.entry(x) { Vacant(_) => {}, diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index f5877e1dd99..a4641f14e30 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -76,7 +76,7 @@ use super::state::HashState; /// #[derive(Hash, Eq, PartialEq, Debug)] /// struct Viking<'a> { /// name: &'a str, -/// power: uint, +/// power: usize, /// } /// /// let mut vikings = HashSet::new(); @@ -123,7 +123,7 @@ impl<T: Hash<Hasher> + Eq> HashSet<T, RandomState> { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn with_capacity(capacity: uint) -> HashSet<T, RandomState> { + pub fn with_capacity(capacity: usize) -> HashSet<T, RandomState> { HashSet { map: HashMap::with_capacity(capacity) } } } @@ -174,7 +174,7 @@ impl<T, S, H> HashSet<T, S> /// ``` #[inline] #[unstable(feature = "std_misc", reason = "hasher stuff is unclear")] - pub fn with_capacity_and_hash_state(capacity: uint, hash_state: S) + pub fn with_capacity_and_hash_state(capacity: usize, hash_state: S) -> HashSet<T, S> { HashSet { map: HashMap::with_capacity_and_hash_state(capacity, hash_state), @@ -192,7 +192,7 @@ impl<T, S, H> HashSet<T, S> /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn capacity(&self) -> uint { + pub fn capacity(&self) -> usize { self.map.capacity() } @@ -202,7 +202,7 @@ impl<T, S, H> HashSet<T, S> /// /// # Panics /// - /// Panics if the new allocation size overflows `uint`. + /// Panics if the new allocation size overflows `usize`. /// /// # Example /// @@ -212,7 +212,7 @@ impl<T, S, H> HashSet<T, S> /// set.reserve(10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve(&mut self, additional: uint) { + pub fn reserve(&mut self, additional: usize) { self.map.reserve(additional) } @@ -402,7 +402,7 @@ impl<T, S, H> HashSet<T, S> /// assert_eq!(v.len(), 1); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn len(&self) -> uint { self.map.len() } + pub fn len(&self) -> usize { self.map.len() } /// Returns true if the set contains no elements /// @@ -456,7 +456,7 @@ impl<T, S, H> HashSet<T, S> /// ``` /// use std::collections::HashSet; /// - /// let set: HashSet<uint> = [1, 2, 3].iter().map(|&x| x).collect(); + /// let set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); /// assert_eq!(set.contains(&1), true); /// assert_eq!(set.contains(&4), false); /// ``` @@ -475,8 +475,8 @@ impl<T, S, H> HashSet<T, S> /// ``` /// use std::collections::HashSet; /// - /// let a: HashSet<uint> = [1, 2, 3].iter().map(|&x| x).collect(); - /// let mut b: HashSet<uint> = HashSet::new(); + /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let mut b = HashSet::new(); /// /// assert_eq!(a.is_disjoint(&b), true); /// b.insert(4); @@ -496,8 +496,8 @@ impl<T, S, H> HashSet<T, S> /// ``` /// use std::collections::HashSet; /// - /// let sup: HashSet<uint> = [1, 2, 3].iter().map(|&x| x).collect(); - /// let mut set: HashSet<uint> = HashSet::new(); + /// let sup: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let mut set = HashSet::new(); /// /// assert_eq!(set.is_subset(&sup), true); /// set.insert(2); @@ -517,8 +517,8 @@ impl<T, S, H> HashSet<T, S> /// ``` /// use std::collections::HashSet; /// - /// let sub: HashSet<uint> = [1, 2].iter().map(|&x| x).collect(); - /// let mut set: HashSet<uint> = HashSet::new(); + /// let sub: HashSet<_> = [1, 2].iter().cloned().collect(); + /// let mut set = HashSet::new(); /// /// assert_eq!(set.is_superset(&sub), false); /// @@ -670,10 +670,10 @@ impl<'a, 'b, T, S, H> BitOr<&'b HashSet<T, S>> for &'a HashSet<T, S> /// ``` /// use std::collections::HashSet; /// - /// let a: HashSet<int> = vec![1, 2, 3].into_iter().collect(); - /// let b: HashSet<int> = vec![3, 4, 5].into_iter().collect(); + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); /// - /// let set: HashSet<int> = &a | &b; + /// let set = &a | &b; /// /// let mut i = 0; /// let expected = [1, 2, 3, 4, 5]; @@ -703,10 +703,10 @@ impl<'a, 'b, T, S, H> BitAnd<&'b HashSet<T, S>> for &'a HashSet<T, S> /// ``` /// use std::collections::HashSet; /// - /// let a: HashSet<int> = vec![1, 2, 3].into_iter().collect(); - /// let b: HashSet<int> = vec![2, 3, 4].into_iter().collect(); + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![2, 3, 4].into_iter().collect(); /// - /// let set: HashSet<int> = &a & &b; + /// let set = &a & &b; /// /// let mut i = 0; /// let expected = [2, 3]; @@ -736,10 +736,10 @@ impl<'a, 'b, T, S, H> BitXor<&'b HashSet<T, S>> for &'a HashSet<T, S> /// ``` /// use std::collections::HashSet; /// - /// let a: HashSet<int> = vec![1, 2, 3].into_iter().collect(); - /// let b: HashSet<int> = vec![3, 4, 5].into_iter().collect(); + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); /// - /// let set: HashSet<int> = &a ^ &b; + /// let set = &a ^ &b; /// /// let mut i = 0; /// let expected = [1, 2, 4, 5]; @@ -769,10 +769,10 @@ impl<'a, 'b, T, S, H> Sub<&'b HashSet<T, S>> for &'a HashSet<T, S> /// ``` /// use std::collections::HashSet; /// - /// let a: HashSet<int> = vec![1, 2, 3].into_iter().collect(); - /// let b: HashSet<int> = vec![3, 4, 5].into_iter().collect(); + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); /// - /// let set: HashSet<int> = &a - &b; + /// let set = &a - &b; /// /// let mut i = 0; /// let expected = [1, 2]; @@ -1029,7 +1029,7 @@ mod test_set { #[test] fn test_iterate() { let mut a = HashSet::new(); - for i in 0u..32 { + for i in 0..32 { assert!(a.insert(i)); } let mut observed: u32 = 0; @@ -1152,7 +1152,7 @@ mod test_set { fn test_from_iter() { let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9]; - let set: HashSet<int> = xs.iter().map(|&x| x).collect(); + let set: HashSet<_> = xs.iter().cloned().collect(); for x in &xs { assert!(set.contains(x)); @@ -1198,8 +1198,8 @@ mod test_set { #[test] fn test_show() { - let mut set: HashSet<int> = HashSet::new(); - let empty: HashSet<int> = HashSet::new(); + let mut set = HashSet::new(); + let empty = HashSet::<i32>::new(); set.insert(1); set.insert(2); @@ -1212,19 +1212,19 @@ mod test_set { #[test] fn test_trivial_drain() { - let mut s = HashSet::<int>::new(); + let mut s = HashSet::<i32>::new(); for _ in s.drain() {} assert!(s.is_empty()); drop(s); - let mut s = HashSet::<int>::new(); + let mut s = HashSet::<i32>::new(); drop(s.drain()); assert!(s.is_empty()); } #[test] fn test_drain() { - let mut s: HashSet<i32> = (1..100).collect(); + let mut s: HashSet<_> = (1..100).collect(); // try this a bunch of times to make sure we don't screw up internal state. for _ in 0..20 { diff --git a/src/libstd/collections/hash/table.rs b/src/libstd/collections/hash/table.rs index 8952b816901..0bb6bd4cf35 100644 --- a/src/libstd/collections/hash/table.rs +++ b/src/libstd/collections/hash/table.rs @@ -67,8 +67,8 @@ const EMPTY_BUCKET: u64 = 0u64; /// but in general is just a tricked out `Vec<Option<u64, K, V>>`. #[unsafe_no_drop_flag] pub struct RawTable<K, V> { - capacity: uint, - size: uint, + capacity: usize, + size: usize, hashes: *mut u64, // Because K/V do not appear directly in any of the types in the struct, // inform rustc that in fact instances of K and V are reachable from here. @@ -88,7 +88,7 @@ impl<K,V> Copy for RawBucket<K,V> {} pub struct Bucket<K, V, M> { raw: RawBucket<K, V>, - idx: uint, + idx: usize, table: M } @@ -96,13 +96,13 @@ impl<K,V,M:Copy> Copy for Bucket<K,V,M> {} pub struct EmptyBucket<K, V, M> { raw: RawBucket<K, V>, - idx: uint, + idx: usize, table: M } pub struct FullBucket<K, V, M> { raw: RawBucket<K, V>, - idx: uint, + idx: usize, table: M } @@ -190,7 +190,7 @@ impl<K, V, M> FullBucket<K, V, M> { self.table } /// Get the raw index. - pub fn index(&self) -> uint { + pub fn index(&self) -> usize { self.idx } } @@ -212,21 +212,21 @@ impl<K, V, M> Bucket<K, V, M> { self.table } /// Get the raw index. - pub fn index(&self) -> uint { + pub fn index(&self) -> usize { self.idx } } impl<K, V, M: Deref<Target=RawTable<K, V>>> Bucket<K, V, M> { pub fn new(table: M, hash: SafeHash) -> Bucket<K, V, M> { - Bucket::at_index(table, hash.inspect() as uint) + Bucket::at_index(table, hash.inspect() as usize) } - pub fn at_index(table: M, ib_index: uint) -> Bucket<K, V, M> { + pub fn at_index(table: M, ib_index: usize) -> Bucket<K, V, M> { let ib_index = ib_index & (table.capacity() - 1); Bucket { raw: unsafe { - table.first_bucket_raw().offset(ib_index as int) + table.first_bucket_raw().offset(ib_index as isize) }, idx: ib_index, table: table @@ -276,7 +276,7 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> Bucket<K, V, M> { // ... and it's zero at all other times. let maybe_wraparound_dist = (self.idx ^ (self.idx + 1)) & self.table.capacity(); // Finally, we obtain the offset 1 or the offset -cap + 1. - let dist = 1 - (maybe_wraparound_dist as int); + let dist = 1 - (maybe_wraparound_dist as isize); self.idx += 1; @@ -366,11 +366,11 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> FullBucket<K, V, M> { /// /// In the cited blog posts above, this is called the "distance to /// initial bucket", or DIB. Also known as "probe count". - pub fn distance(&self) -> uint { + pub fn distance(&self) -> usize { // Calculates the distance one has to travel when going from // `hash mod capacity` onwards to `idx mod capacity`, wrapping around // if the destination is not reached before the end of the table. - (self.idx - self.hash().inspect() as uint) & (self.table.capacity() - 1) + (self.idx - self.hash().inspect() as usize) & (self.table.capacity() - 1) } #[inline] @@ -503,7 +503,7 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> GapThenFull<K, V, M> { /// # Panics /// /// Panics if `target_alignment` is not a power of two. -fn round_up_to_next(unrounded: uint, target_alignment: uint) -> uint { +fn round_up_to_next(unrounded: usize, target_alignment: usize) -> usize { assert!(target_alignment.is_power_of_two()); (unrounded + target_alignment - 1) & !(target_alignment - 1) } @@ -520,10 +520,10 @@ fn test_rounding() { // Returns a tuple of (key_offset, val_offset), // from the start of a mallocated array. -fn calculate_offsets(hashes_size: uint, - keys_size: uint, keys_align: uint, - vals_align: uint) - -> (uint, uint) { +fn calculate_offsets(hashes_size: usize, + keys_size: usize, keys_align: usize, + vals_align: usize) + -> (usize, usize) { let keys_offset = round_up_to_next(hashes_size, keys_align); let end_of_keys = keys_offset + keys_size; @@ -534,10 +534,10 @@ fn calculate_offsets(hashes_size: uint, // Returns a tuple of (minimum required malloc alignment, hash_offset, // array_size), from the start of a mallocated array. -fn calculate_allocation(hash_size: uint, hash_align: uint, - keys_size: uint, keys_align: uint, - vals_size: uint, vals_align: uint) - -> (uint, uint, uint) { +fn calculate_allocation(hash_size: usize, hash_align: usize, + keys_size: usize, keys_align: usize, + vals_size: usize, vals_align: usize) + -> (usize, usize, usize) { let hash_offset = 0; let (_, vals_offset) = calculate_offsets(hash_size, keys_size, keys_align, @@ -562,7 +562,7 @@ fn test_offset_calculation() { impl<K, V> RawTable<K, V> { /// Does not initialize the buckets. The caller should ensure they, /// at the very least, set every hash to EMPTY_BUCKET. - unsafe fn new_uninitialized(capacity: uint) -> RawTable<K, V> { + unsafe fn new_uninitialized(capacity: usize) -> RawTable<K, V> { if capacity == 0 { return RawTable { size: 0, @@ -601,7 +601,7 @@ impl<K, V> RawTable<K, V> { let buffer = allocate(size, malloc_alignment); if buffer.is_null() { ::alloc::oom() } - let hashes = buffer.offset(hash_offset as int) as *mut u64; + let hashes = buffer.offset(hash_offset as isize) as *mut u64; RawTable { capacity: capacity, @@ -623,15 +623,15 @@ impl<K, V> RawTable<K, V> { unsafe { RawBucket { hash: self.hashes, - key: buffer.offset(keys_offset as int) as *mut K, - val: buffer.offset(vals_offset as int) as *mut V + key: buffer.offset(keys_offset as isize) as *mut K, + val: buffer.offset(vals_offset as isize) as *mut V } } } /// Creates a new raw table from a given capacity. All buckets are /// initially empty. - pub fn new(capacity: uint) -> RawTable<K, V> { + pub fn new(capacity: usize) -> RawTable<K, V> { unsafe { let ret = RawTable::new_uninitialized(capacity); zero_memory(ret.hashes, capacity); @@ -640,13 +640,13 @@ impl<K, V> RawTable<K, V> { } /// The hashtable's capacity, similar to a vector's. - pub fn capacity(&self) -> uint { + pub fn capacity(&self) -> usize { self.capacity } /// The number of elements ever `put` in the hashtable, minus the number /// of elements ever `take`n. - pub fn size(&self) -> uint { + pub fn size(&self) -> usize { self.size } @@ -654,7 +654,7 @@ impl<K, V> RawTable<K, V> { RawBuckets { raw: self.first_bucket_raw(), hashes_end: unsafe { - self.hashes.offset(self.capacity as int) + self.hashes.offset(self.capacity as isize) }, marker: marker::ContravariantLifetime, } @@ -705,7 +705,7 @@ impl<K, V> RawTable<K, V> { unsafe fn rev_move_buckets(&mut self) -> RevMoveBuckets<K, V> { let raw_bucket = self.first_bucket_raw(); RevMoveBuckets { - raw: raw_bucket.offset(self.capacity as int), + raw: raw_bucket.offset(self.capacity as isize), hashes_end: raw_bucket.hash, elems_left: self.size, marker: marker::ContravariantLifetime, @@ -758,7 +758,7 @@ impl<'a, K, V> Iterator for RawBuckets<'a, K, V> { struct RevMoveBuckets<'a, K, V> { raw: RawBucket<K, V>, hashes_end: *mut u64, - elems_left: uint, + elems_left: usize, marker: marker::ContravariantLifetime<'a>, } @@ -791,7 +791,7 @@ impl<'a, K, V> Iterator for RevMoveBuckets<'a, K, V> { /// Iterator over shared references to entries in a table. pub struct Iter<'a, K: 'a, V: 'a> { iter: RawBuckets<'a, K, V>, - elems_left: uint, + elems_left: usize, } // FIXME(#19839) Remove in favor of `#[derive(Clone)]` @@ -808,7 +808,7 @@ impl<'a, K, V> Clone for Iter<'a, K, V> { /// Iterator over mutable references to entries in a table. pub struct IterMut<'a, K: 'a, V: 'a> { iter: RawBuckets<'a, K, V>, - elems_left: uint, + elems_left: usize, } /// Iterator over the entries in a table, consuming the table. |
