diff options
| author | bors <bors@rust-lang.org> | 2020-05-14 23:22:47 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2020-05-14 23:22:47 +0000 |
| commit | 85f0da67ff31923955f7fb107fb097835bb3b6ff (patch) | |
| tree | 341c7d6dbfebd78f1864d5d5048ea3b33c44e30d /src/liballoc | |
| parent | a74d1862d4d87a56244958416fd05976c58ca1a8 (diff) | |
| parent | cb2703945ca3c6c9664a5a9ec606430cb79ba2c8 (diff) | |
| download | rust-85f0da67ff31923955f7fb107fb097835bb3b6ff.tar.gz rust-85f0da67ff31923955f7fb107fb097835bb3b6ff.zip | |
Auto merge of #71321 - matthewjasper:alloc-min-spec, r=sfackler
Use min_specialization in liballoc - Remove a type parameter from `[A]RcFromIter`. - Remove an implementation of `[A]RcFromIter` that didn't actually specialize anything. - Remove unused implementation of `IsZero` for `Option<&mut T>`. - Change specializations of `[A]RcEqIdent` to use a marker trait version of `Eq`. - Remove `BTreeClone`. I couldn't find a way to make this work with `min_specialization`. - Add `rustc_unsafe_specialization_marker` to `Copy` and `TrustedLen`. After this only libcore is the only standard library crate using `feature(specialization)`. cc #31844
Diffstat (limited to 'src/liballoc')
| -rw-r--r-- | src/liballoc/collections/btree/map.rs | 53 | ||||
| -rw-r--r-- | src/liballoc/lib.rs | 2 | ||||
| -rw-r--r-- | src/liballoc/rc.rs | 46 | ||||
| -rw-r--r-- | src/liballoc/sync.rs | 40 | ||||
| -rw-r--r-- | src/liballoc/vec.rs | 21 |
5 files changed, 42 insertions, 120 deletions
diff --git a/src/liballoc/collections/btree/map.rs b/src/liballoc/collections/btree/map.rs index 113df80d0c2..c6cb39b1bf5 100644 --- a/src/liballoc/collections/btree/map.rs +++ b/src/liballoc/collections/btree/map.rs @@ -215,59 +215,6 @@ impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> { clone_subtree(self.root.as_ref().unwrap().as_ref()) } } - - fn clone_from(&mut self, other: &Self) { - BTreeClone::clone_from(self, other); - } -} - -trait BTreeClone { - fn clone_from(&mut self, other: &Self); -} - -impl<K: Clone, V: Clone> BTreeClone for BTreeMap<K, V> { - default fn clone_from(&mut self, other: &Self) { - *self = other.clone(); - } -} - -impl<K: Clone + Ord, V: Clone> BTreeClone for BTreeMap<K, V> { - fn clone_from(&mut self, other: &Self) { - // This truncates `self` to `other.len()` by calling `split_off` on - // the first key after `other.len()` elements if it exists. - let split_off_key = if self.len() > other.len() { - let diff = self.len() - other.len(); - if diff <= other.len() { - self.iter().nth_back(diff - 1).map(|pair| (*pair.0).clone()) - } else { - self.iter().nth(other.len()).map(|pair| (*pair.0).clone()) - } - } else { - None - }; - if let Some(key) = split_off_key { - self.split_off(&key); - } - - let mut siter = self.range_mut(..); - let mut oiter = other.iter(); - // After truncation, `self` is at most as long as `other` so this loop - // replaces every key-value pair in `self`. Since `oiter` is in sorted - // order and the structure of the `BTreeMap` stays the same, - // the BTree invariants are maintained at the end of the loop. - while !siter.is_empty() { - if let Some((ok, ov)) = oiter.next() { - // SAFETY: This is safe because `siter` is nonempty. - let (sk, sv) = unsafe { siter.next_unchecked() }; - sk.clone_from(ok); - sv.clone_from(ov); - } else { - break; - } - } - // If `other` is longer than `self`, the remaining elements are inserted. - self.extend(oiter.map(|(k, v)| ((*k).clone(), (*v).clone()))); - } } impl<K, Q: ?Sized> super::Recover<Q> for BTreeMap<K, ()> diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 5365c9d0168..7aaa91ee10d 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -109,7 +109,7 @@ #![feature(ptr_offset_from)] #![feature(rustc_attrs)] #![feature(receiver_trait)] -#![feature(specialization)] +#![feature(min_specialization)] #![feature(staged_api)] #![feature(std_internals)] #![feature(str_internals)] diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 2f9505ec79f..307f6714f32 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -249,7 +249,7 @@ use core::mem::{self, align_of, align_of_val, forget, size_of_val}; use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver}; use core::pin::Pin; use core::ptr::{self, NonNull}; -use core::slice::{self, from_raw_parts_mut}; +use core::slice::from_raw_parts_mut; use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout}; use crate::string::String; @@ -1221,6 +1221,12 @@ impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> { } } +// Hack to allow specializing on `Eq` even though `Eq` has a method. +#[rustc_unsafe_specialization_marker] +pub(crate) trait MarkerEq: PartialEq<Self> {} + +impl<T: Eq> MarkerEq for T {} + /// We're doing this specialization here, and not as a more general optimization on `&T`, because it /// would otherwise add a cost to all equality checks on refs. We assume that `Rc`s are used to /// store large values, that are slow to clone, but also heavy to check for equality, causing this @@ -1229,7 +1235,7 @@ impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> { /// /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive. #[stable(feature = "rust1", since = "1.0.0")] -impl<T: ?Sized + Eq> RcEqIdent<T> for Rc<T> { +impl<T: ?Sized + MarkerEq> RcEqIdent<T> for Rc<T> { #[inline] fn eq(&self, other: &Rc<T>) -> bool { Rc::ptr_eq(self, other) || **self == **other @@ -1548,25 +1554,25 @@ impl<T> iter::FromIterator<T> for Rc<[T]> { /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>()); /// ``` fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self { - RcFromIter::from_iter(iter.into_iter()) + ToRcSlice::to_rc_slice(iter.into_iter()) } } /// Specialization trait used for collecting into `Rc<[T]>`. -trait RcFromIter<T, I> { - fn from_iter(iter: I) -> Self; +trait ToRcSlice<T>: Iterator<Item = T> + Sized { + fn to_rc_slice(self) -> Rc<[T]>; } -impl<T, I: Iterator<Item = T>> RcFromIter<T, I> for Rc<[T]> { - default fn from_iter(iter: I) -> Self { - iter.collect::<Vec<T>>().into() +impl<T, I: Iterator<Item = T>> ToRcSlice<T> for I { + default fn to_rc_slice(self) -> Rc<[T]> { + self.collect::<Vec<T>>().into() } } -impl<T, I: iter::TrustedLen<Item = T>> RcFromIter<T, I> for Rc<[T]> { - default fn from_iter(iter: I) -> Self { +impl<T, I: iter::TrustedLen<Item = T>> ToRcSlice<T> for I { + fn to_rc_slice(self) -> Rc<[T]> { // This is the case for a `TrustedLen` iterator. - let (low, high) = iter.size_hint(); + let (low, high) = self.size_hint(); if let Some(high) = high { debug_assert_eq!( low, @@ -1577,29 +1583,15 @@ impl<T, I: iter::TrustedLen<Item = T>> RcFromIter<T, I> for Rc<[T]> { unsafe { // SAFETY: We need to ensure that the iterator has an exact length and we have. - Rc::from_iter_exact(iter, low) + Rc::from_iter_exact(self, low) } } else { // Fall back to normal implementation. - iter.collect::<Vec<T>>().into() + self.collect::<Vec<T>>().into() } } } -impl<'a, T: 'a + Clone> RcFromIter<&'a T, slice::Iter<'a, T>> for Rc<[T]> { - fn from_iter(iter: slice::Iter<'a, T>) -> Self { - // Delegate to `impl<T: Clone> From<&[T]> for Rc<[T]>`. - // - // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping` - // which is even more performant. - // - // In the fall-back case we have `T: Clone`. This is still better - // than the `TrustedLen` implementation as slices have a known length - // and so we get to avoid calling `size_hint` and avoid the branching. - iter.as_slice().into() - } -} - /// `Weak` is a version of [`Rc`] that holds a non-owning reference to the /// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak` /// pointer, which returns an [`Option`]`<`[`Rc`]`<T>>`. diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs index dbee9d27d8a..2bcf7633542 100644 --- a/src/liballoc/sync.rs +++ b/src/liballoc/sync.rs @@ -20,7 +20,7 @@ use core::mem::{self, align_of, align_of_val, size_of_val}; use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver}; use core::pin::Pin; use core::ptr::{self, NonNull}; -use core::slice::{self, from_raw_parts_mut}; +use core::slice::from_raw_parts_mut; use core::sync::atomic; use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; @@ -1854,7 +1854,7 @@ impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> { /// /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive. #[stable(feature = "rust1", since = "1.0.0")] -impl<T: ?Sized + Eq> ArcEqIdent<T> for Arc<T> { +impl<T: ?Sized + crate::rc::MarkerEq> ArcEqIdent<T> for Arc<T> { #[inline] fn eq(&self, other: &Arc<T>) -> bool { Arc::ptr_eq(self, other) || **self == **other @@ -2180,25 +2180,25 @@ impl<T> iter::FromIterator<T> for Arc<[T]> { /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>()); /// ``` fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self { - ArcFromIter::from_iter(iter.into_iter()) + ToArcSlice::to_arc_slice(iter.into_iter()) } } /// Specialization trait used for collecting into `Arc<[T]>`. -trait ArcFromIter<T, I> { - fn from_iter(iter: I) -> Self; +trait ToArcSlice<T>: Iterator<Item = T> + Sized { + fn to_arc_slice(self) -> Arc<[T]>; } -impl<T, I: Iterator<Item = T>> ArcFromIter<T, I> for Arc<[T]> { - default fn from_iter(iter: I) -> Self { - iter.collect::<Vec<T>>().into() +impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I { + default fn to_arc_slice(self) -> Arc<[T]> { + self.collect::<Vec<T>>().into() } } -impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> { - default fn from_iter(iter: I) -> Self { +impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I { + fn to_arc_slice(self) -> Arc<[T]> { // This is the case for a `TrustedLen` iterator. - let (low, high) = iter.size_hint(); + let (low, high) = self.size_hint(); if let Some(high) = high { debug_assert_eq!( low, @@ -2209,29 +2209,15 @@ impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> { unsafe { // SAFETY: We need to ensure that the iterator has an exact length and we have. - Arc::from_iter_exact(iter, low) + Arc::from_iter_exact(self, low) } } else { // Fall back to normal implementation. - iter.collect::<Vec<T>>().into() + self.collect::<Vec<T>>().into() } } } -impl<'a, T: 'a + Clone> ArcFromIter<&'a T, slice::Iter<'a, T>> for Arc<[T]> { - fn from_iter(iter: slice::Iter<'a, T>) -> Self { - // Delegate to `impl<T: Clone> From<&[T]> for Arc<[T]>`. - // - // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping` - // which is even more performant. - // - // In the fall-back case we have `T: Clone`. This is still better - // than the `TrustedLen` implementation as slices have a known length - // and so we get to avoid calling `size_hint` and avoid the branching. - iter.as_slice().into() - } -} - #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> borrow::Borrow<T> for Arc<T> { fn borrow(&self) -> &T { diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index cbfbf4d1cd3..d26cd77aae4 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -1619,8 +1619,8 @@ impl<T: Default> Vec<T> { #[unstable(feature = "vec_resize_default", issue = "41758")] #[rustc_deprecated( reason = "This is moving towards being removed in favor \ - of `.resize_with(Default::default)`. If you disagree, please comment \ - in the tracking issue.", + of `.resize_with(Default::default)`. If you disagree, please comment \ + in the tracking issue.", since = "1.33.0" )] pub fn resize_default(&mut self, new_len: usize) { @@ -1825,6 +1825,7 @@ impl<T: Clone + IsZero> SpecFromElem for T { } } +#[rustc_specialization_trait] unsafe trait IsZero { /// Whether this value is zero fn is_zero(&self) -> bool; @@ -1874,9 +1875,12 @@ unsafe impl<T> IsZero for *mut T { } } -// `Option<&T>`, `Option<&mut T>` and `Option<Box<T>>` are guaranteed to represent `None` as null. -// For fat pointers, the bytes that would be the pointer metadata in the `Some` variant -// are padding in the `None` variant, so ignoring them and zero-initializing instead is ok. +// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null. +// For fat pointers, the bytes that would be the pointer metadata in the `Some` +// variant are padding in the `None` variant, so ignoring them and +// zero-initializing instead is ok. +// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of +// `SpecFromElem`. unsafe impl<T: ?Sized> IsZero for Option<&T> { #[inline] @@ -1885,13 +1889,6 @@ unsafe impl<T: ?Sized> IsZero for Option<&T> { } } -unsafe impl<T: ?Sized> IsZero for Option<&mut T> { - #[inline] - fn is_zero(&self) -> bool { - self.is_none() - } -} - unsafe impl<T: ?Sized> IsZero for Option<Box<T>> { #[inline] fn is_zero(&self) -> bool { |
