diff options
| author | csmoe <csmoe@msn.com> | 2020-05-18 08:46:24 +0800 |
|---|---|---|
| committer | csmoe <csmoe@msn.com> | 2020-05-19 11:02:29 +0800 |
| commit | 2f311b07c8d95b1192e585e983535de89bcbdfaa (patch) | |
| tree | 0e12c995dfdd9352eaa61d542c6f604aca23b456 /src/liballoc | |
| parent | 8841ede3648b5f12284dae850ec065374fd3af46 (diff) | |
| parent | d79f1bd31a1401b5d08096fcdf9a9eb23ddf95df (diff) | |
| download | rust-2f311b07c8d95b1192e585e983535de89bcbdfaa.tar.gz rust-2f311b07c8d95b1192e585e983535de89bcbdfaa.zip | |
Merge branch 'master' into issue-69276
Diffstat (limited to 'src/liballoc')
| -rw-r--r-- | src/liballoc/alloc/tests.rs | 2 | ||||
| -rw-r--r-- | src/liballoc/collections/btree/map.rs | 53 | ||||
| -rw-r--r-- | src/liballoc/collections/linked_list/tests.rs | 1 | ||||
| -rw-r--r-- | src/liballoc/collections/vec_deque.rs | 20 | ||||
| -rw-r--r-- | src/liballoc/collections/vec_deque/tests.rs | 8 | ||||
| -rw-r--r-- | src/liballoc/lib.rs | 2 | ||||
| -rw-r--r-- | src/liballoc/raw_vec.rs | 224 | ||||
| -rw-r--r-- | src/liballoc/rc.rs | 50 | ||||
| -rw-r--r-- | src/liballoc/sync.rs | 53 | ||||
| -rw-r--r-- | src/liballoc/sync/tests.rs | 8 | ||||
| -rw-r--r-- | src/liballoc/vec.rs | 21 |
11 files changed, 161 insertions, 281 deletions
diff --git a/src/liballoc/alloc/tests.rs b/src/liballoc/alloc/tests.rs index 1ad40eca93b..1c003983df9 100644 --- a/src/liballoc/alloc/tests.rs +++ b/src/liballoc/alloc/tests.rs @@ -23,7 +23,7 @@ fn allocate_zeroed() { } #[bench] -#[cfg_attr(miri, ignore)] // Miri does not support benchmarks +#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks fn alloc_owned_small(b: &mut Bencher) { b.iter(|| { let _: Box<_> = box 10; diff --git a/src/liballoc/collections/btree/map.rs b/src/liballoc/collections/btree/map.rs index 113df80d0c2..c6cb39b1bf5 100644 --- a/src/liballoc/collections/btree/map.rs +++ b/src/liballoc/collections/btree/map.rs @@ -215,59 +215,6 @@ impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> { clone_subtree(self.root.as_ref().unwrap().as_ref()) } } - - fn clone_from(&mut self, other: &Self) { - BTreeClone::clone_from(self, other); - } -} - -trait BTreeClone { - fn clone_from(&mut self, other: &Self); -} - -impl<K: Clone, V: Clone> BTreeClone for BTreeMap<K, V> { - default fn clone_from(&mut self, other: &Self) { - *self = other.clone(); - } -} - -impl<K: Clone + Ord, V: Clone> BTreeClone for BTreeMap<K, V> { - fn clone_from(&mut self, other: &Self) { - // This truncates `self` to `other.len()` by calling `split_off` on - // the first key after `other.len()` elements if it exists. - let split_off_key = if self.len() > other.len() { - let diff = self.len() - other.len(); - if diff <= other.len() { - self.iter().nth_back(diff - 1).map(|pair| (*pair.0).clone()) - } else { - self.iter().nth(other.len()).map(|pair| (*pair.0).clone()) - } - } else { - None - }; - if let Some(key) = split_off_key { - self.split_off(&key); - } - - let mut siter = self.range_mut(..); - let mut oiter = other.iter(); - // After truncation, `self` is at most as long as `other` so this loop - // replaces every key-value pair in `self`. Since `oiter` is in sorted - // order and the structure of the `BTreeMap` stays the same, - // the BTree invariants are maintained at the end of the loop. - while !siter.is_empty() { - if let Some((ok, ov)) = oiter.next() { - // SAFETY: This is safe because `siter` is nonempty. - let (sk, sv) = unsafe { siter.next_unchecked() }; - sk.clone_from(ok); - sv.clone_from(ov); - } else { - break; - } - } - // If `other` is longer than `self`, the remaining elements are inserted. - self.extend(oiter.map(|(k, v)| ((*k).clone(), (*v).clone()))); - } } impl<K, Q: ?Sized> super::Recover<Q> for BTreeMap<K, ()> diff --git a/src/liballoc/collections/linked_list/tests.rs b/src/liballoc/collections/linked_list/tests.rs index 085f734ed91..b8c93a28bba 100644 --- a/src/liballoc/collections/linked_list/tests.rs +++ b/src/liballoc/collections/linked_list/tests.rs @@ -182,7 +182,6 @@ fn test_insert_prev() { #[test] #[cfg_attr(target_os = "emscripten", ignore)] -#[cfg_attr(miri, ignore)] // Miri does not support threads fn test_send() { let n = list_from(&[1, 2, 3]); thread::spawn(move || { diff --git a/src/liballoc/collections/vec_deque.rs b/src/liballoc/collections/vec_deque.rs index 2f50234b6d5..540649c61b3 100644 --- a/src/liballoc/collections/vec_deque.rs +++ b/src/liballoc/collections/vec_deque.rs @@ -1354,7 +1354,9 @@ impl<T> VecDeque<T> { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_front(&mut self, value: T) { - self.grow_if_necessary(); + if self.is_full() { + self.grow(); + } self.tail = self.wrap_sub(self.tail, 1); let tail = self.tail; @@ -1377,7 +1379,9 @@ impl<T> VecDeque<T> { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_back(&mut self, value: T) { - self.grow_if_necessary(); + if self.is_full() { + self.grow(); + } let head = self.head; self.head = self.wrap_add(self.head, 1); @@ -1485,7 +1489,9 @@ impl<T> VecDeque<T> { #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn insert(&mut self, index: usize, value: T) { assert!(index <= self.len(), "index out of bounds"); - self.grow_if_necessary(); + if self.is_full() { + self.grow(); + } // Move the least number of elements in the ring buffer and insert // the given object @@ -2003,11 +2009,13 @@ impl<T> VecDeque<T> { } // This may panic or abort - #[inline] - fn grow_if_necessary(&mut self) { + #[inline(never)] + fn grow(&mut self) { if self.is_full() { let old_cap = self.cap(); - self.buf.double(); + // Double the buffer size. + self.buf.reserve_exact(old_cap, old_cap); + assert!(self.cap() == old_cap * 2); unsafe { self.handle_capacity_increase(old_cap); } diff --git a/src/liballoc/collections/vec_deque/tests.rs b/src/liballoc/collections/vec_deque/tests.rs index 0a3f3300323..fc2ec7908e8 100644 --- a/src/liballoc/collections/vec_deque/tests.rs +++ b/src/liballoc/collections/vec_deque/tests.rs @@ -3,7 +3,7 @@ use super::*; use test; #[bench] -#[cfg_attr(miri, ignore)] // Miri does not support benchmarks +#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks fn bench_push_back_100(b: &mut test::Bencher) { let mut deq = VecDeque::with_capacity(101); b.iter(|| { @@ -16,7 +16,7 @@ fn bench_push_back_100(b: &mut test::Bencher) { } #[bench] -#[cfg_attr(miri, ignore)] // Miri does not support benchmarks +#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks fn bench_push_front_100(b: &mut test::Bencher) { let mut deq = VecDeque::with_capacity(101); b.iter(|| { @@ -29,7 +29,7 @@ fn bench_push_front_100(b: &mut test::Bencher) { } #[bench] -#[cfg_attr(miri, ignore)] // Miri does not support benchmarks +#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks fn bench_pop_back_100(b: &mut test::Bencher) { let mut deq = VecDeque::<i32>::with_capacity(101); @@ -43,7 +43,7 @@ fn bench_pop_back_100(b: &mut test::Bencher) { } #[bench] -#[cfg_attr(miri, ignore)] // Miri does not support benchmarks +#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks fn bench_pop_front_100(b: &mut test::Bencher) { let mut deq = VecDeque::<i32>::with_capacity(101); diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 5365c9d0168..7aaa91ee10d 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -109,7 +109,7 @@ #![feature(ptr_offset_from)] #![feature(rustc_attrs)] #![feature(receiver_trait)] -#![feature(specialization)] +#![feature(min_specialization)] #![feature(staged_api)] #![feature(std_internals)] #![feature(str_internals)] diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index a8e19c9cbaa..d46bf81f996 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -1,7 +1,7 @@ #![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "none")] #![doc(hidden)] -use core::alloc::MemoryBlock; +use core::alloc::{LayoutErr, MemoryBlock}; use core::cmp; use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::ops::Drop; @@ -211,82 +211,6 @@ impl<T, A: AllocRef> RawVec<T, A> { } } - /// Doubles the size of the type's backing allocation. This is common enough - /// to want to do that it's easiest to just have a dedicated method. Slightly - /// more efficient logic can be provided for this than the general case. - /// - /// This function is ideal for when pushing elements one-at-a-time because - /// you don't need to incur the costs of the more general computations - /// reserve needs to do to guard against overflow. You do however need to - /// manually check if your `len == capacity`. - /// - /// # Panics - /// - /// * Panics if `T` is zero-sized on the assumption that you managed to exhaust - /// all `usize::MAX` slots in your imaginary buffer. - /// * Panics on 32-bit platforms if the requested capacity exceeds - /// `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM - /// - /// # Examples - /// - /// ``` - /// # #![feature(raw_vec_internals)] - /// # extern crate alloc; - /// # use std::ptr; - /// # use alloc::raw_vec::RawVec; - /// struct MyVec<T> { - /// buf: RawVec<T>, - /// len: usize, - /// } - /// - /// impl<T> MyVec<T> { - /// pub fn push(&mut self, elem: T) { - /// if self.len == self.buf.capacity() { self.buf.double(); } - /// // double would have aborted or panicked if the len exceeded - /// // `isize::MAX` so this is safe to do unchecked now. - /// unsafe { - /// ptr::write(self.buf.ptr().add(self.len), elem); - /// } - /// self.len += 1; - /// } - /// } - /// # fn main() { - /// # let mut vec = MyVec { buf: RawVec::new(), len: 0 }; - /// # vec.push(1); - /// # } - /// ``` - #[inline(never)] - #[cold] - pub fn double(&mut self) { - match self.grow(Double, MayMove, Uninitialized) { - Err(CapacityOverflow) => capacity_overflow(), - Err(AllocError { layout, .. }) => handle_alloc_error(layout), - Ok(()) => { /* yay */ } - } - } - - /// Attempts to double the size of the type's backing allocation in place. This is common - /// enough to want to do that it's easiest to just have a dedicated method. Slightly - /// more efficient logic can be provided for this than the general case. - /// - /// Returns `true` if the reallocation attempt has succeeded. - /// - /// # Panics - /// - /// * Panics if `T` is zero-sized on the assumption that you managed to exhaust - /// all `usize::MAX` slots in your imaginary buffer. - /// * Panics on 32-bit platforms if the requested capacity exceeds - /// `isize::MAX` bytes. - #[inline(never)] - #[cold] - pub fn double_in_place(&mut self) -> bool { - self.grow(Double, InPlace, Uninitialized).is_ok() - } - /// Ensures that the buffer contains at least enough space to hold /// `used_capacity + needed_extra_capacity` elements. If it doesn't already have /// enough capacity, will reallocate enough space plus comfortable slack @@ -354,7 +278,7 @@ impl<T, A: AllocRef> RawVec<T, A> { needed_extra_capacity: usize, ) -> Result<(), TryReserveError> { if self.needs_to_grow(used_capacity, needed_extra_capacity) { - self.grow(Amortized { used_capacity, needed_extra_capacity }, MayMove, Uninitialized) + self.grow_amortized(used_capacity, needed_extra_capacity, MayMove) } else { Ok(()) } @@ -381,8 +305,7 @@ impl<T, A: AllocRef> RawVec<T, A> { // This is more readable than putting this in one line: // `!self.needs_to_grow(...) || self.grow(...).is_ok()` if self.needs_to_grow(used_capacity, needed_extra_capacity) { - self.grow(Amortized { used_capacity, needed_extra_capacity }, InPlace, Uninitialized) - .is_ok() + self.grow_amortized(used_capacity, needed_extra_capacity, InPlace).is_ok() } else { true } @@ -423,7 +346,7 @@ impl<T, A: AllocRef> RawVec<T, A> { needed_extra_capacity: usize, ) -> Result<(), TryReserveError> { if self.needs_to_grow(used_capacity, needed_extra_capacity) { - self.grow(Exact { used_capacity, needed_extra_capacity }, MayMove, Uninitialized) + self.grow_exact(used_capacity, needed_extra_capacity) } else { Ok(()) } @@ -448,14 +371,6 @@ impl<T, A: AllocRef> RawVec<T, A> { } } -#[derive(Copy, Clone)] -enum Strategy { - Double, - Amortized { used_capacity: usize, needed_extra_capacity: usize }, - Exact { used_capacity: usize, needed_extra_capacity: usize }, -} -use Strategy::*; - impl<T, A: AllocRef> RawVec<T, A> { /// Returns if the buffer needs to grow to fulfill the needed extra capacity. /// Mainly used to make inlining reserve-calls possible without inlining `grow`. @@ -473,68 +388,59 @@ impl<T, A: AllocRef> RawVec<T, A> { self.cap = Self::capacity_from_bytes(memory.size); } - /// Single method to handle all possibilities of growing the buffer. - fn grow( + // This method is usually instantiated many times. So we want it to be as + // small as possible, to improve compile times. But we also want as much of + // its contents to be statically computable as possible, to make the + // generated code run faster. Therefore, this method is carefully written + // so that all of the code that depends on `T` is within it, while as much + // of the code that doesn't depend on `T` as possible is in functions that + // are non-generic over `T`. + fn grow_amortized( &mut self, - strategy: Strategy, + used_capacity: usize, + needed_extra_capacity: usize, placement: ReallocPlacement, - init: AllocInit, ) -> Result<(), TryReserveError> { - let elem_size = mem::size_of::<T>(); - if elem_size == 0 { + if mem::size_of::<T>() == 0 { // Since we return a capacity of `usize::MAX` when `elem_size` is // 0, getting to here necessarily means the `RawVec` is overfull. return Err(CapacityOverflow); } - let new_layout = match strategy { - Double => unsafe { - // Since we guarantee that we never allocate more than `isize::MAX` bytes, - // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow. - // Additionally the alignment will never be too large as to "not be satisfiable", - // so `Layout::from_size_align` will always return `Some`. - // - // TL;DR, we bypass runtime checks due to dynamic assertions in this module, - // allowing us to use `from_size_align_unchecked`. - let cap = if self.cap == 0 { - // Skip to 4 because tiny `Vec`'s are dumb; but not if that would cause overflow. - if elem_size > usize::MAX / 8 { 1 } else { 4 } - } else { - self.cap * 2 - }; - Layout::from_size_align_unchecked(cap * elem_size, mem::align_of::<T>()) - }, - Amortized { used_capacity, needed_extra_capacity } => { - // Nothing we can really do about these checks, sadly. - let required_cap = - used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?; - // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. - let double_cap = self.cap * 2; - // `double_cap` guarantees exponential growth. - let cap = cmp::max(double_cap, required_cap); - Layout::array::<T>(cap).map_err(|_| CapacityOverflow)? - } - Exact { used_capacity, needed_extra_capacity } => { - let cap = - used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?; - Layout::array::<T>(cap).map_err(|_| CapacityOverflow)? - } - }; - alloc_guard(new_layout.size())?; - let memory = if let Some((ptr, old_layout)) = self.current_memory() { - debug_assert_eq!(old_layout.align(), new_layout.align()); - unsafe { - self.alloc - .grow(ptr, old_layout, new_layout.size(), placement, init) - .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? - } - } else { - match placement { - MayMove => self.alloc.alloc(new_layout, init), - InPlace => Err(AllocErr), - } - .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? - }; + // Nothing we can really do about these checks, sadly. + let required_cap = + used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?; + // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. + let double_cap = self.cap * 2; + // `double_cap` guarantees exponential growth. + let cap = cmp::max(double_cap, required_cap); + let new_layout = Layout::array::<T>(cap); + + // `finish_grow` is non-generic over `T`. + let memory = finish_grow(new_layout, placement, self.current_memory(), &mut self.alloc)?; + self.set_memory(memory); + Ok(()) + } + + // The constraints on this method are much the same as those on + // `grow_amortized`, but this method is usually instantiated less often so + // it's less critical. + fn grow_exact( + &mut self, + used_capacity: usize, + needed_extra_capacity: usize, + ) -> Result<(), TryReserveError> { + if mem::size_of::<T>() == 0 { + // Since we return a capacity of `usize::MAX` when the type size is + // 0, getting to here necessarily means the `RawVec` is overfull. + return Err(CapacityOverflow); + } + + let cap = used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?; + let new_layout = Layout::array::<T>(cap); + + // `finish_grow` is non-generic over `T`. + let memory = finish_grow(new_layout, MayMove, self.current_memory(), &mut self.alloc)?; self.set_memory(memory); Ok(()) } @@ -562,6 +468,38 @@ impl<T, A: AllocRef> RawVec<T, A> { } } +// This function is outside `RawVec` to minimize compile times. See the comment +// above `RawVec::grow_amortized` for details. (The `A` parameter isn't +// significant, because the number of different `A` types seen in practice is +// much smaller than the number of `T` types.) +fn finish_grow<A>( + new_layout: Result<Layout, LayoutErr>, + placement: ReallocPlacement, + current_memory: Option<(NonNull<u8>, Layout)>, + alloc: &mut A, +) -> Result<MemoryBlock, TryReserveError> +where + A: AllocRef, +{ + // Check for the error here to minimize the size of `RawVec::grow_*`. + let new_layout = new_layout.map_err(|_| CapacityOverflow)?; + + alloc_guard(new_layout.size())?; + + let memory = if let Some((ptr, old_layout)) = current_memory { + debug_assert_eq!(old_layout.align(), new_layout.align()); + unsafe { alloc.grow(ptr, old_layout, new_layout.size(), placement, Uninitialized) } + } else { + match placement { + MayMove => alloc.alloc(new_layout, Uninitialized), + InPlace => Err(AllocErr), + } + } + .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?; + + Ok(memory) +} + impl<T> RawVec<T, Global> { /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`. /// diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 2f9505ec79f..76213e65bab 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -249,7 +249,7 @@ use core::mem::{self, align_of, align_of_val, forget, size_of_val}; use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver}; use core::pin::Pin; use core::ptr::{self, NonNull}; -use core::slice::{self, from_raw_parts_mut}; +use core::slice::from_raw_parts_mut; use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout}; use crate::string::String; @@ -1221,6 +1221,12 @@ impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> { } } +// Hack to allow specializing on `Eq` even though `Eq` has a method. +#[rustc_unsafe_specialization_marker] +pub(crate) trait MarkerEq: PartialEq<Self> {} + +impl<T: Eq> MarkerEq for T {} + /// We're doing this specialization here, and not as a more general optimization on `&T`, because it /// would otherwise add a cost to all equality checks on refs. We assume that `Rc`s are used to /// store large values, that are slow to clone, but also heavy to check for equality, causing this @@ -1229,7 +1235,7 @@ impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> { /// /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive. #[stable(feature = "rust1", since = "1.0.0")] -impl<T: ?Sized + Eq> RcEqIdent<T> for Rc<T> { +impl<T: ?Sized + MarkerEq> RcEqIdent<T> for Rc<T> { #[inline] fn eq(&self, other: &Rc<T>) -> bool { Rc::ptr_eq(self, other) || **self == **other @@ -1548,25 +1554,25 @@ impl<T> iter::FromIterator<T> for Rc<[T]> { /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>()); /// ``` fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self { - RcFromIter::from_iter(iter.into_iter()) + ToRcSlice::to_rc_slice(iter.into_iter()) } } /// Specialization trait used for collecting into `Rc<[T]>`. -trait RcFromIter<T, I> { - fn from_iter(iter: I) -> Self; +trait ToRcSlice<T>: Iterator<Item = T> + Sized { + fn to_rc_slice(self) -> Rc<[T]>; } -impl<T, I: Iterator<Item = T>> RcFromIter<T, I> for Rc<[T]> { - default fn from_iter(iter: I) -> Self { - iter.collect::<Vec<T>>().into() +impl<T, I: Iterator<Item = T>> ToRcSlice<T> for I { + default fn to_rc_slice(self) -> Rc<[T]> { + self.collect::<Vec<T>>().into() } } -impl<T, I: iter::TrustedLen<Item = T>> RcFromIter<T, I> for Rc<[T]> { - default fn from_iter(iter: I) -> Self { +impl<T, I: iter::TrustedLen<Item = T>> ToRcSlice<T> for I { + fn to_rc_slice(self) -> Rc<[T]> { // This is the case for a `TrustedLen` iterator. - let (low, high) = iter.size_hint(); + let (low, high) = self.size_hint(); if let Some(high) = high { debug_assert_eq!( low, @@ -1577,29 +1583,15 @@ impl<T, I: iter::TrustedLen<Item = T>> RcFromIter<T, I> for Rc<[T]> { unsafe { // SAFETY: We need to ensure that the iterator has an exact length and we have. - Rc::from_iter_exact(iter, low) + Rc::from_iter_exact(self, low) } } else { // Fall back to normal implementation. - iter.collect::<Vec<T>>().into() + self.collect::<Vec<T>>().into() } } } -impl<'a, T: 'a + Clone> RcFromIter<&'a T, slice::Iter<'a, T>> for Rc<[T]> { - fn from_iter(iter: slice::Iter<'a, T>) -> Self { - // Delegate to `impl<T: Clone> From<&[T]> for Rc<[T]>`. - // - // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping` - // which is even more performant. - // - // In the fall-back case we have `T: Clone`. This is still better - // than the `TrustedLen` implementation as slices have a known length - // and so we get to avoid calling `size_hint` and avoid the branching. - iter.as_slice().into() - } -} - /// `Weak` is a version of [`Rc`] that holds a non-owning reference to the /// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak` /// pointer, which returns an [`Option`]`<`[`Rc`]`<T>>`. @@ -2035,6 +2027,8 @@ trait RcBoxPtr<T: ?Sized> { // nevertheless, we insert an abort here to hint LLVM at // an otherwise missed optimization. if strong == 0 || strong == usize::max_value() { + // remove `unsafe` on bootstrap bump + #[cfg_attr(not(bootstrap), allow(unused_unsafe))] unsafe { abort(); } @@ -2061,6 +2055,8 @@ trait RcBoxPtr<T: ?Sized> { // nevertheless, we insert an abort here to hint LLVM at // an otherwise missed optimization. if weak == 0 || weak == usize::max_value() { + // remove `unsafe` on bootstrap bump + #[cfg_attr(not(bootstrap), allow(unused_unsafe))] unsafe { abort(); } diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs index 19d289c87fd..2c6d130826b 100644 --- a/src/liballoc/sync.rs +++ b/src/liballoc/sync.rs @@ -20,7 +20,7 @@ use core::mem::{self, align_of, align_of_val, size_of_val}; use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver}; use core::pin::Pin; use core::ptr::{self, NonNull}; -use core::slice::{self, from_raw_parts_mut}; +use core::slice::from_raw_parts_mut; use core::sync::atomic; use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; @@ -835,12 +835,14 @@ impl<T: ?Sized> Arc<T> { /// /// unsafe { /// let ptr = Arc::into_raw(five); - /// Arc::decr_strong_count(ptr); + /// Arc::incr_strong_count(ptr); /// - /// // This assertion is deterministic because we haven't shared + /// // Those assertions are deterministic because we haven't shared /// // the `Arc` between threads. /// let five = Arc::from_raw(ptr); - /// assert_eq!(0, Arc::strong_count(&five)); + /// assert_eq!(2, Arc::strong_count(&five)); + /// Arc::decr_strong_count(ptr); + /// assert_eq!(1, Arc::strong_count(&five)); /// } /// ``` #[inline] @@ -1094,6 +1096,8 @@ impl<T: ?Sized> Clone for Arc<T> { // We abort because such a program is incredibly degenerate, and we // don't care to support it. if old_size > MAX_REFCOUNT { + // remove `unsafe` on bootstrap bump + #[cfg_attr(not(bootstrap), allow(unused_unsafe))] unsafe { abort(); } @@ -1612,6 +1616,8 @@ impl<T: ?Sized> Weak<T> { // See comments in `Arc::clone` for why we do this (for `mem::forget`). if n > MAX_REFCOUNT { + // remove `unsafe` on bootstrap bump + #[cfg_attr(not(bootstrap), allow(unused_unsafe))] unsafe { abort(); } @@ -1751,6 +1757,7 @@ impl<T: ?Sized> Clone for Weak<T> { // See comments in Arc::clone() for why we do this (for mem::forget). if old_size > MAX_REFCOUNT { + #[cfg_attr(not(bootstrap), allow(unused_unsafe))] // remove `unsafe` on bootstrap bump unsafe { abort(); } @@ -1852,7 +1859,7 @@ impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> { /// /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive. #[stable(feature = "rust1", since = "1.0.0")] -impl<T: ?Sized + Eq> ArcEqIdent<T> for Arc<T> { +impl<T: ?Sized + crate::rc::MarkerEq> ArcEqIdent<T> for Arc<T> { #[inline] fn eq(&self, other: &Arc<T>) -> bool { Arc::ptr_eq(self, other) || **self == **other @@ -2178,25 +2185,25 @@ impl<T> iter::FromIterator<T> for Arc<[T]> { /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>()); /// ``` fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self { - ArcFromIter::from_iter(iter.into_iter()) + ToArcSlice::to_arc_slice(iter.into_iter()) } } /// Specialization trait used for collecting into `Arc<[T]>`. -trait ArcFromIter<T, I> { - fn from_iter(iter: I) -> Self; +trait ToArcSlice<T>: Iterator<Item = T> + Sized { + fn to_arc_slice(self) -> Arc<[T]>; } -impl<T, I: Iterator<Item = T>> ArcFromIter<T, I> for Arc<[T]> { - default fn from_iter(iter: I) -> Self { - iter.collect::<Vec<T>>().into() +impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I { + default fn to_arc_slice(self) -> Arc<[T]> { + self.collect::<Vec<T>>().into() } } -impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> { - default fn from_iter(iter: I) -> Self { +impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I { + fn to_arc_slice(self) -> Arc<[T]> { // This is the case for a `TrustedLen` iterator. - let (low, high) = iter.size_hint(); + let (low, high) = self.size_hint(); if let Some(high) = high { debug_assert_eq!( low, @@ -2207,29 +2214,15 @@ impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> { unsafe { // SAFETY: We need to ensure that the iterator has an exact length and we have. - Arc::from_iter_exact(iter, low) + Arc::from_iter_exact(self, low) } } else { // Fall back to normal implementation. - iter.collect::<Vec<T>>().into() + self.collect::<Vec<T>>().into() } } } -impl<'a, T: 'a + Clone> ArcFromIter<&'a T, slice::Iter<'a, T>> for Arc<[T]> { - fn from_iter(iter: slice::Iter<'a, T>) -> Self { - // Delegate to `impl<T: Clone> From<&[T]> for Arc<[T]>`. - // - // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping` - // which is even more performant. - // - // In the fall-back case we have `T: Clone`. This is still better - // than the `TrustedLen` implementation as slices have a known length - // and so we get to avoid calling `size_hint` and avoid the branching. - iter.as_slice().into() - } -} - #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> borrow::Borrow<T> for Arc<T> { fn borrow(&self) -> &T { diff --git a/src/liballoc/sync/tests.rs b/src/liballoc/sync/tests.rs index edc2820ee22..a2bb651e2b7 100644 --- a/src/liballoc/sync/tests.rs +++ b/src/liballoc/sync/tests.rs @@ -32,7 +32,6 @@ impl Drop for Canary { #[test] #[cfg_attr(target_os = "emscripten", ignore)] -#[cfg_attr(miri, ignore)] // Miri does not support threads fn manually_share_arc() { let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let arc_v = Arc::new(v); @@ -337,12 +336,13 @@ fn test_ptr_eq() { #[test] #[cfg_attr(target_os = "emscripten", ignore)] -#[cfg_attr(miri, ignore)] // Miri does not support threads fn test_weak_count_locked() { let mut a = Arc::new(atomic::AtomicBool::new(false)); let a2 = a.clone(); let t = thread::spawn(move || { - for _i in 0..1000000 { + // Miri is too slow + let count = if cfg!(miri) { 1000 } else { 1000000 }; + for _i in 0..count { Arc::get_mut(&mut a); } a.store(true, SeqCst); @@ -351,6 +351,8 @@ fn test_weak_count_locked() { while !a2.load(SeqCst) { let n = Arc::weak_count(&a2); assert!(n < 2, "bad weak count: {}", n); + #[cfg(miri)] // Miri's scheduler does not guarantee liveness, and thus needs this hint. + atomic::spin_loop_hint(); } t.join().unwrap(); } diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index cbfbf4d1cd3..d26cd77aae4 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -1619,8 +1619,8 @@ impl<T: Default> Vec<T> { #[unstable(feature = "vec_resize_default", issue = "41758")] #[rustc_deprecated( reason = "This is moving towards being removed in favor \ - of `.resize_with(Default::default)`. If you disagree, please comment \ - in the tracking issue.", + of `.resize_with(Default::default)`. If you disagree, please comment \ + in the tracking issue.", since = "1.33.0" )] pub fn resize_default(&mut self, new_len: usize) { @@ -1825,6 +1825,7 @@ impl<T: Clone + IsZero> SpecFromElem for T { } } +#[rustc_specialization_trait] unsafe trait IsZero { /// Whether this value is zero fn is_zero(&self) -> bool; @@ -1874,9 +1875,12 @@ unsafe impl<T> IsZero for *mut T { } } -// `Option<&T>`, `Option<&mut T>` and `Option<Box<T>>` are guaranteed to represent `None` as null. -// For fat pointers, the bytes that would be the pointer metadata in the `Some` variant -// are padding in the `None` variant, so ignoring them and zero-initializing instead is ok. +// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null. +// For fat pointers, the bytes that would be the pointer metadata in the `Some` +// variant are padding in the `None` variant, so ignoring them and +// zero-initializing instead is ok. +// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of +// `SpecFromElem`. unsafe impl<T: ?Sized> IsZero for Option<&T> { #[inline] @@ -1885,13 +1889,6 @@ unsafe impl<T: ?Sized> IsZero for Option<&T> { } } -unsafe impl<T: ?Sized> IsZero for Option<&mut T> { - #[inline] - fn is_zero(&self) -> bool { - self.is_none() - } -} - unsafe impl<T: ?Sized> IsZero for Option<Box<T>> { #[inline] fn is_zero(&self) -> bool { |
