diff options
| author | Tim Diekmann <tim.diekmann@3dvision.de> | 2020-03-24 11:45:38 +0100 |
|---|---|---|
| committer | Tim Diekmann <tim.diekmann@3dvision.de> | 2020-03-26 17:10:54 +0100 |
| commit | 56cbf2f22aeb6448acd7eb49e9b2554c80bdbf79 (patch) | |
| tree | af24a0972cda1bd07560e36c5edd5aa14b53fc7d /src/liballoc | |
| parent | 2fbb07525e2f07a815e780a4268b11916248b5a9 (diff) | |
| download | rust-56cbf2f22aeb6448acd7eb49e9b2554c80bdbf79.tar.gz rust-56cbf2f22aeb6448acd7eb49e9b2554c80bdbf79.zip | |
Overhaul of the `AllocRef` trait to match allocator-wg's latest consens
Diffstat (limited to 'src/liballoc')
| -rw-r--r-- | src/liballoc/alloc.rs | 111 | ||||
| -rw-r--r-- | src/liballoc/alloc/tests.rs | 5 | ||||
| -rw-r--r-- | src/liballoc/boxed.rs | 44 | ||||
| -rw-r--r-- | src/liballoc/raw_vec.rs | 568 | ||||
| -rw-r--r-- | src/liballoc/raw_vec/tests.rs | 9 | ||||
| -rw-r--r-- | src/liballoc/rc.rs | 6 | ||||
| -rw-r--r-- | src/liballoc/sync.rs | 6 | ||||
| -rw-r--r-- | src/liballoc/tests/heap.rs | 10 | ||||
| -rw-r--r-- | src/liballoc/vec.rs | 2 |
9 files changed, 347 insertions, 414 deletions
diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs index 9f82b2c6fa6..26524f62962 100644 --- a/src/liballoc/alloc.rs +++ b/src/liballoc/alloc.rs @@ -2,7 +2,7 @@ #![stable(feature = "alloc_module", since = "1.28.0")] -use core::intrinsics::{min_align_of_val, size_of_val}; +use core::intrinsics::{self, min_align_of_val, size_of_val}; use core::ptr::{NonNull, Unique}; use core::usize; @@ -165,11 +165,19 @@ pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 { #[unstable(feature = "allocator_api", issue = "32838")] unsafe impl AllocRef for Global { #[inline] - fn alloc(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> { - if layout.size() == 0 { + fn alloc(&mut self, layout: Layout, init: AllocInit) -> Result<(NonNull<u8>, usize), AllocErr> { + let new_size = layout.size(); + if new_size == 0 { Ok((layout.dangling(), 0)) } else { - unsafe { NonNull::new(alloc(layout)).ok_or(AllocErr).map(|p| (p, layout.size())) } + unsafe { + let raw_ptr = match init { + AllocInit::Uninitialized => alloc(layout), + AllocInit::Zeroed => alloc_zeroed(layout), + }; + let ptr = NonNull::new(raw_ptr).ok_or(AllocErr)?; + Ok((ptr, new_size)) + } } } @@ -181,33 +189,77 @@ unsafe impl AllocRef for Global { } #[inline] - unsafe fn realloc( + unsafe fn grow( &mut self, ptr: NonNull<u8>, layout: Layout, new_size: usize, + placement: ReallocPlacement, + init: AllocInit, ) -> Result<(NonNull<u8>, usize), AllocErr> { - match (layout.size(), new_size) { - (0, 0) => Ok((layout.dangling(), 0)), - (0, _) => self.alloc(Layout::from_size_align_unchecked(new_size, layout.align())), - (_, 0) => { - self.dealloc(ptr, layout); - Ok((layout.dangling(), 0)) + let old_size = layout.size(); + debug_assert!( + new_size >= old_size, + "`new_size` must be greater than or equal to `layout.size()`" + ); + + if old_size == new_size { + return Ok((ptr, new_size)); + } + + match placement { + ReallocPlacement::MayMove => { + if old_size == 0 { + self.alloc(Layout::from_size_align_unchecked(new_size, layout.align()), init) + } else { + // `realloc` probably checks for `new_size > old_size` or something similar. + // `new_size` must be greater than or equal to `old_size` due to the safety constraint, + // and `new_size` == `old_size` was caught before + intrinsics::assume(new_size > old_size); + let ptr = + NonNull::new(realloc(ptr.as_ptr(), layout, new_size)).ok_or(AllocErr)?; + let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); + init.initialize_offset(ptr, new_layout, old_size); + Ok((ptr, new_size)) + } } - (_, _) => NonNull::new(realloc(ptr.as_ptr(), layout, new_size)) - .ok_or(AllocErr) - .map(|p| (p, new_size)), + ReallocPlacement::InPlace => Err(AllocErr), } } #[inline] - fn alloc_zeroed(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> { - if layout.size() == 0 { - Ok((layout.dangling(), 0)) - } else { - unsafe { - NonNull::new(alloc_zeroed(layout)).ok_or(AllocErr).map(|p| (p, layout.size())) + unsafe fn shrink( + &mut self, + ptr: NonNull<u8>, + layout: Layout, + new_size: usize, + placement: ReallocPlacement, + ) -> Result<(NonNull<u8>, usize), AllocErr> { + let old_size = layout.size(); + debug_assert!( + new_size <= old_size, + "`new_size` must be smaller than or equal to `layout.size()`" + ); + + if old_size == new_size { + return Ok((ptr, new_size)); + } + + match placement { + ReallocPlacement::MayMove => { + let ptr = if new_size == 0 { + self.dealloc(ptr, layout); + layout.dangling() + } else { + // `realloc` probably checks for `new_size > old_size` or something similar. + // `new_size` must be smaller than or equal to `old_size` due to the safety constraint, + // and `new_size` == `old_size` was caught before + intrinsics::assume(new_size < old_size); + NonNull::new(realloc(ptr.as_ptr(), layout, new_size)).ok_or(AllocErr)? + }; + Ok((ptr, new_size)) } + ReallocPlacement::InPlace => Err(AllocErr), } } } @@ -218,14 +270,10 @@ unsafe impl AllocRef for Global { #[lang = "exchange_malloc"] #[inline] unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 { - if size == 0 { - align as *mut u8 - } else { - let layout = Layout::from_size_align_unchecked(size, align); - match Global.alloc(layout) { - Ok((ptr, _)) => ptr.as_ptr(), - Err(_) => handle_alloc_error(layout), - } + let layout = Layout::from_size_align_unchecked(size, align); + match Global.alloc(layout, AllocInit::Uninitialized) { + Ok((ptr, _)) => ptr.as_ptr(), + Err(_) => handle_alloc_error(layout), } } @@ -239,11 +287,8 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 { pub(crate) unsafe fn box_free<T: ?Sized>(ptr: Unique<T>) { let size = size_of_val(ptr.as_ref()); let align = min_align_of_val(ptr.as_ref()); - // We do not allocate for Box<T> when T is ZST, so deallocation is also not necessary. - if size != 0 { - let layout = Layout::from_size_align_unchecked(size, align); - Global.dealloc(ptr.cast().into(), layout); - } + let layout = Layout::from_size_align_unchecked(size, align); + Global.dealloc(ptr.cast().into(), layout) } /// Abort on memory allocation error or failure. diff --git a/src/liballoc/alloc/tests.rs b/src/liballoc/alloc/tests.rs index 55944398e16..6a2130a7192 100644 --- a/src/liballoc/alloc/tests.rs +++ b/src/liballoc/alloc/tests.rs @@ -8,8 +8,9 @@ use test::Bencher; fn allocate_zeroed() { unsafe { let layout = Layout::from_size_align(1024, 1).unwrap(); - let (ptr, _) = - Global.alloc_zeroed(layout.clone()).unwrap_or_else(|_| handle_alloc_error(layout)); + let (ptr, _) = Global + .alloc(layout.clone(), AllocInit::Zeroed) + .unwrap_or_else(|_| handle_alloc_error(layout)); let mut i = ptr.cast::<u8>().as_ptr(); let end = i.add(layout.size()); diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index 36641284a76..9690e311e96 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -146,7 +146,7 @@ use core::ptr::{self, NonNull, Unique}; use core::slice; use core::task::{Context, Poll}; -use crate::alloc::{self, AllocRef, Global}; +use crate::alloc::{self, AllocInit, AllocRef, Global}; use crate::raw_vec::RawVec; use crate::str::from_boxed_utf8_unchecked; use crate::vec::Vec; @@ -196,14 +196,12 @@ impl<T> Box<T> { #[unstable(feature = "new_uninit", issue = "63291")] pub fn new_uninit() -> Box<mem::MaybeUninit<T>> { let layout = alloc::Layout::new::<mem::MaybeUninit<T>>(); - unsafe { - let ptr = if layout.size() == 0 { - NonNull::dangling() - } else { - Global.alloc(layout).unwrap_or_else(|_| alloc::handle_alloc_error(layout)).0.cast() - }; - Box::from_raw(ptr.as_ptr()) - } + let ptr = Global + .alloc(layout, AllocInit::Uninitialized) + .unwrap_or_else(|_| alloc::handle_alloc_error(layout)) + .0 + .cast(); + unsafe { Box::from_raw(ptr.as_ptr()) } } /// Constructs a new `Box` with uninitialized contents, with the memory @@ -226,11 +224,13 @@ impl<T> Box<T> { /// [zeroed]: ../../std/mem/union.MaybeUninit.html#method.zeroed #[unstable(feature = "new_uninit", issue = "63291")] pub fn new_zeroed() -> Box<mem::MaybeUninit<T>> { - unsafe { - let mut uninit = Self::new_uninit(); - ptr::write_bytes::<T>(uninit.as_mut_ptr(), 0, 1); - uninit - } + let layout = alloc::Layout::new::<mem::MaybeUninit<T>>(); + let ptr = Global + .alloc(layout, AllocInit::Zeroed) + .unwrap_or_else(|_| alloc::handle_alloc_error(layout)) + .0 + .cast(); + unsafe { Box::from_raw(ptr.as_ptr()) } } /// Constructs a new `Pin<Box<T>>`. If `T` does not implement `Unpin`, then @@ -266,14 +266,12 @@ impl<T> Box<[T]> { #[unstable(feature = "new_uninit", issue = "63291")] pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> { let layout = alloc::Layout::array::<mem::MaybeUninit<T>>(len).unwrap(); - unsafe { - let ptr = if layout.size() == 0 { - NonNull::dangling() - } else { - Global.alloc(layout).unwrap_or_else(|_| alloc::handle_alloc_error(layout)).0.cast() - }; - Box::from_raw(slice::from_raw_parts_mut(ptr.as_ptr(), len)) - } + let ptr = Global + .alloc(layout, AllocInit::Uninitialized) + .unwrap_or_else(|_| alloc::handle_alloc_error(layout)) + .0 + .cast(); + unsafe { Box::from_raw(slice::from_raw_parts_mut(ptr.as_ptr(), len)) } } } @@ -778,7 +776,7 @@ impl<T: Copy> From<&[T]> for Box<[T]> { let buf = RawVec::with_capacity(len); unsafe { ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len); - buf.into_box() + buf.into_box().assume_init() } } } diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index b31fec7f037..3a108adb218 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -2,12 +2,17 @@ #![doc(hidden)] use core::cmp; -use core::mem; +use core::mem::{self, MaybeUninit}; use core::ops::Drop; -use core::ptr::{self, NonNull, Unique}; +use core::ptr::Unique; use core::slice; -use crate::alloc::{handle_alloc_error, AllocErr, AllocRef, Global, Layout}; +use crate::alloc::{ + handle_alloc_error, AllocErr, + AllocInit::{self, *}, + AllocRef, Global, Layout, + ReallocPlacement::{self, *}, +}; use crate::boxed::Box; use crate::collections::TryReserveError::{self, *}; @@ -19,28 +24,22 @@ mod tests; /// involved. This type is excellent for building your own data structures like Vec and VecDeque. /// In particular: /// -/// * Produces `Unique::empty()` on zero-sized types. -/// * Produces `Unique::empty()` on zero-length allocations. /// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics). /// * Guards against 32-bit systems allocating more than isize::MAX bytes. /// * Guards against overflowing your length. -/// * Aborts on OOM or calls `handle_alloc_error` as applicable. -/// * Avoids freeing `Unique::empty()`. +/// * Calls `handle_alloc_error` for fallible allocations. /// * Contains a `ptr::Unique` and thus endows the user with all related benefits. +/// * Uses the excess returned from the allocator to use the largest available capacity. /// /// This type does not in anyway inspect the memory that it manages. When dropped it *will* /// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec` /// to handle the actual things *stored* inside of a `RawVec`. /// -/// Note that a `RawVec` always forces its capacity to be `usize::MAX` for zero-sized types. -/// This enables you to use capacity-growing logic catch the overflows in your length -/// that might occur with zero-sized types. -/// -/// The above means that you need to be careful when round-tripping this type with a +/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns +/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a /// `Box<[T]>`, since `capacity()` won't yield the length. However, `with_capacity`, -/// `shrink_to_fit`, and `from_box` will actually set `RawVec`'s private capacity -/// field. This allows zero-sized types to not be special-cased by consumers of -/// this type. +/// `shrink_to_fit`, and `from_box` will actually set `RawVec`'s private capacity field. This allows +/// zero-sized types to not be special-cased by consumers of this type. #[allow(missing_debug_implementations)] pub struct RawVec<T, A: AllocRef = Global> { ptr: Unique<T>, @@ -52,49 +51,30 @@ impl<T, A: AllocRef> RawVec<T, A> { /// Like `new`, but parameterized over the choice of allocator for /// the returned `RawVec`. pub const fn new_in(a: A) -> Self { - let cap = if mem::size_of::<T>() == 0 { core::usize::MAX } else { 0 }; - - // `Unique::empty()` doubles as "unallocated" and "zero-sized allocation". - RawVec { ptr: Unique::empty(), cap, a } + // `cap: 0` means "unallocated". zero-sized allocations are handled by `AllocRef` + Self { ptr: Unique::empty(), cap: 0, a } } /// Like `with_capacity`, but parameterized over the choice of /// allocator for the returned `RawVec`. #[inline] pub fn with_capacity_in(capacity: usize, a: A) -> Self { - RawVec::allocate_in(capacity, false, a) + Self::allocate_in(capacity, Uninitialized, a) } /// Like `with_capacity_zeroed`, but parameterized over the choice /// of allocator for the returned `RawVec`. #[inline] pub fn with_capacity_zeroed_in(capacity: usize, a: A) -> Self { - RawVec::allocate_in(capacity, true, a) + Self::allocate_in(capacity, Zeroed, a) } - fn allocate_in(mut capacity: usize, zeroed: bool, mut a: A) -> Self { - let elem_size = mem::size_of::<T>(); - - let alloc_size = capacity.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow()); - alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow()); + fn allocate_in(capacity: usize, init: AllocInit, mut a: A) -> Self { + let layout = Layout::array::<T>(capacity).unwrap_or_else(|_| capacity_overflow()); + alloc_guard(layout.size()).unwrap_or_else(|_| capacity_overflow()); - // Handles ZSTs and `capacity == 0` alike. - let ptr = if alloc_size == 0 { - NonNull::<T>::dangling() - } else { - let align = mem::align_of::<T>(); - let layout = Layout::from_size_align(alloc_size, align).unwrap(); - let result = if zeroed { a.alloc_zeroed(layout) } else { a.alloc(layout) }; - match result { - Ok((ptr, size)) => { - capacity = size / elem_size; - ptr.cast() - } - Err(_) => handle_alloc_error(layout), - } - }; - - RawVec { ptr: ptr.into(), cap: capacity, a } + let (ptr, excess) = a.alloc(layout, init).unwrap_or_else(|_| handle_alloc_error(layout)); + Self { ptr: ptr.cast().into(), cap: Self::capacity_from_bytes(excess), a } } } @@ -138,13 +118,13 @@ impl<T> RawVec<T, Global> { /// Aborts on OOM. #[inline] pub fn with_capacity(capacity: usize) -> Self { - RawVec::allocate_in(capacity, false, Global) + Self::with_capacity_in(capacity, Global) } /// Like `with_capacity`, but guarantees the buffer is zeroed. #[inline] pub fn with_capacity_zeroed(capacity: usize) -> Self { - RawVec::allocate_in(capacity, true, Global) + Self::with_capacity_zeroed_in(capacity, Global) } } @@ -156,8 +136,9 @@ impl<T, A: AllocRef> RawVec<T, A> { /// The `ptr` must be allocated (via the given allocator `a`), and with the given `capacity`. /// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems). /// If the `ptr` and `capacity` come from a `RawVec` created via `a`, then this is guaranteed. + #[inline] pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self { - RawVec { ptr: Unique::new_unchecked(ptr), cap: capacity, a } + Self { ptr: Unique::new_unchecked(ptr), cap: capacity, a } } } @@ -169,8 +150,9 @@ impl<T> RawVec<T, Global> { /// The `ptr` must be allocated (on the system heap), and with the given `capacity`. /// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems). /// If the `ptr` and `capacity` come from a `RawVec`, then this is guaranteed. + #[inline] pub unsafe fn from_raw_parts(ptr: *mut T, capacity: usize) -> Self { - RawVec { ptr: Unique::new_unchecked(ptr), cap: capacity, a: Global } + Self::from_raw_parts_in(ptr, capacity, Global) } /// Converts a `Box<[T]>` into a `RawVec<T>`. @@ -196,7 +178,7 @@ impl<T, A: AllocRef> RawVec<T, A> { /// This will always be `usize::MAX` if `T` is zero-sized. #[inline(always)] pub fn capacity(&self) -> usize { - if mem::size_of::<T>() == 0 { !0 } else { self.cap } + if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap } } /// Returns a shared reference to the allocator backing this `RawVec`. @@ -274,50 +256,10 @@ impl<T, A: AllocRef> RawVec<T, A> { #[inline(never)] #[cold] pub fn double(&mut self) { - unsafe { - let elem_size = mem::size_of::<T>(); - - // Since we set the capacity to `usize::MAX` when `elem_size` is - // 0, getting to here necessarily means the `RawVec` is overfull. - assert!(elem_size != 0, "capacity overflow"); - - let (ptr, new_cap) = match self.current_layout() { - Some(cur) => { - // Since we guarantee that we never allocate more than - // `isize::MAX` bytes, `elem_size * self.cap <= isize::MAX` as - // a precondition, so this can't overflow. Additionally the - // alignment will never be too large as to "not be - // satisfiable", so `Layout::from_size_align` will always - // return `Some`. - // - // TL;DR, we bypass runtime checks due to dynamic assertions - // in this module, allowing us to use - // `from_size_align_unchecked`. - let new_cap = 2 * self.cap; - let new_size = new_cap * elem_size; - alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow()); - let ptr_res = self.a.realloc(NonNull::from(self.ptr).cast(), cur, new_size); - match ptr_res { - Ok((ptr, new_size)) => (ptr, new_size / elem_size), - Err(_) => handle_alloc_error(Layout::from_size_align_unchecked( - new_size, - cur.align(), - )), - } - } - None => { - // Skip to 4 because tiny `Vec`'s are dumb; but not if that - // would cause overflow. - let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 }; - let layout = Layout::array::<T>(new_cap).unwrap(); - match self.a.alloc(layout) { - Ok((ptr, new_size)) => (ptr, new_size / elem_size), - Err(_) => handle_alloc_error(layout), - } - } - }; - self.ptr = ptr.cast().into(); - self.cap = new_cap; + match self.grow(Double, MayMove, Uninitialized) { + Err(CapacityOverflow) => capacity_overflow(), + Err(AllocError { layout, .. }) => handle_alloc_error(layout), + Ok(()) => { /* yay */ } } } @@ -336,99 +278,7 @@ impl<T, A: AllocRef> RawVec<T, A> { #[inline(never)] #[cold] pub fn double_in_place(&mut self) -> bool { - unsafe { - let elem_size = mem::size_of::<T>(); - let old_layout = match self.current_layout() { - Some(layout) => layout, - None => return false, // nothing to double - }; - - // Since we set the capacity to `usize::MAX` when `elem_size` is - // 0, getting to here necessarily means the `RawVec` is overfull. - assert!(elem_size != 0, "capacity overflow"); - - // Since we guarantee that we never allocate more than `isize::MAX` - // bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so - // this can't overflow. - // - // Similarly to with `double` above, we can go straight to - // `Layout::from_size_align_unchecked` as we know this won't - // overflow and the alignment is sufficiently small. - let new_cap = 2 * self.cap; - let new_size = new_cap * elem_size; - alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow()); - match self.a.grow_in_place(NonNull::from(self.ptr).cast(), old_layout, new_size) { - Ok(_) => { - // We can't directly divide `size`. - self.cap = new_cap; - true - } - Err(_) => false, - } - } - } - - /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. - pub fn try_reserve_exact( - &mut self, - used_capacity: usize, - needed_extra_capacity: usize, - ) -> Result<(), TryReserveError> { - self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Exact) - } - - /// Ensures that the buffer contains at least enough space to hold - /// `used_capacity + needed_extra_capacity` elements. If it doesn't already, - /// will reallocate the minimum possible amount of memory necessary. - /// Generally this will be exactly the amount of memory necessary, - /// but in principle the allocator is free to give back more than - /// we asked for. - /// - /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate - /// the requested space. This is not really unsafe, but the unsafe - /// code *you* write that relies on the behavior of this function may break. - /// - /// # Panics - /// - /// * Panics if the requested capacity exceeds `usize::MAX` bytes. - /// * Panics on 32-bit platforms if the requested capacity exceeds - /// `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) { - match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Exact) { - Err(CapacityOverflow) => capacity_overflow(), - Err(AllocError { .. }) => unreachable!(), - Ok(()) => { /* yay */ } - } - } - - /// Calculates the buffer's new size given that it'll hold `used_capacity + - /// needed_extra_capacity` elements. This logic is used in amortized reserve methods. - /// Returns `(new_capacity, new_alloc_size)`. - fn amortized_new_size( - &self, - used_capacity: usize, - needed_extra_capacity: usize, - ) -> Result<usize, TryReserveError> { - // Nothing we can really do about these checks, sadly. - let required_cap = - used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?; - // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. - let double_cap = self.cap * 2; - // `double_cap` guarantees exponential growth. - Ok(cmp::max(double_cap, required_cap)) - } - - /// The same as `reserve`, but returns on errors instead of panicking or aborting. - pub fn try_reserve( - &mut self, - used_capacity: usize, - needed_extra_capacity: usize, - ) -> Result<(), TryReserveError> { - self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Amortized) + self.grow(Double, InPlace, Uninitialized).is_ok() } /// Ensures that the buffer contains at least enough space to hold @@ -484,12 +334,26 @@ impl<T, A: AllocRef> RawVec<T, A> { /// # } /// ``` pub fn reserve(&mut self, used_capacity: usize, needed_extra_capacity: usize) { - match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Amortized) { + match self.try_reserve(used_capacity, needed_extra_capacity) { Err(CapacityOverflow) => capacity_overflow(), - Err(AllocError { .. }) => unreachable!(), + Err(AllocError { layout, .. }) => handle_alloc_error(layout), Ok(()) => { /* yay */ } } } + + /// The same as `reserve`, but returns on errors instead of panicking or aborting. + pub fn try_reserve( + &mut self, + used_capacity: usize, + needed_extra_capacity: usize, + ) -> Result<(), TryReserveError> { + if self.needs_to_grow(used_capacity, needed_extra_capacity) { + self.grow(Amortized { used_capacity, needed_extra_capacity }, MayMove, Uninitialized) + } else { + Ok(()) + } + } + /// Attempts to ensure that the buffer contains at least enough space to hold /// `used_capacity + needed_extra_capacity` elements. If it doesn't already have /// enough capacity, will reallocate in place enough space plus comfortable slack @@ -508,45 +372,54 @@ impl<T, A: AllocRef> RawVec<T, A> { /// * Panics on 32-bit platforms if the requested capacity exceeds /// `isize::MAX` bytes. pub fn reserve_in_place(&mut self, used_capacity: usize, needed_extra_capacity: usize) -> bool { - unsafe { - // NOTE: we don't early branch on ZSTs here because we want this - // to actually catch "asking for more than usize::MAX" in that case. - // If we make it past the first branch then we are guaranteed to - // panic. - - // Don't actually need any more capacity. If the current `cap` is 0, we can't - // reallocate in place. - // Wrapping in case they give a bad `used_capacity` - let old_layout = match self.current_layout() { - Some(layout) => layout, - None => return false, - }; - if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity { - return false; - } + // This is more readable than putting this in one line: + // `!self.needs_to_grow(...) || self.grow(...).is_ok()` + if self.needs_to_grow(used_capacity, needed_extra_capacity) { + self.grow(Amortized { used_capacity, needed_extra_capacity }, InPlace, Uninitialized) + .is_ok() + } else { + true + } + } - let new_cap = self - .amortized_new_size(used_capacity, needed_extra_capacity) - .unwrap_or_else(|_| capacity_overflow()); - - // Here, `cap < used_capacity + needed_extra_capacity <= new_cap` - // (regardless of whether `self.cap - used_capacity` wrapped). - // Therefore, we can safely call `grow_in_place`. - - let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0; - // FIXME: may crash and burn on over-reserve - alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow()); - match self.a.grow_in_place( - NonNull::from(self.ptr).cast(), - old_layout, - new_layout.size(), - ) { - Ok(_) => { - self.cap = new_cap; - true - } - Err(_) => false, - } + /// Ensures that the buffer contains at least enough space to hold + /// `used_capacity + needed_extra_capacity` elements. If it doesn't already, + /// will reallocate the minimum possible amount of memory necessary. + /// Generally this will be exactly the amount of memory necessary, + /// but in principle the allocator is free to give back more than + /// we asked for. + /// + /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate + /// the requested space. This is not really unsafe, but the unsafe + /// code *you* write that relies on the behavior of this function may break. + /// + /// # Panics + /// + /// * Panics if the requested capacity exceeds `usize::MAX` bytes. + /// * Panics on 32-bit platforms if the requested capacity exceeds + /// `isize::MAX` bytes. + /// + /// # Aborts + /// + /// Aborts on OOM. + pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) { + match self.try_reserve_exact(used_capacity, needed_extra_capacity) { + Err(CapacityOverflow) => capacity_overflow(), + Err(AllocError { layout, .. }) => handle_alloc_error(layout), + Ok(()) => { /* yay */ } + } + } + + /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. + pub fn try_reserve_exact( + &mut self, + used_capacity: usize, + needed_extra_capacity: usize, + ) -> Result<(), TryReserveError> { + if self.needs_to_grow(used_capacity, needed_extra_capacity) { + self.grow(Exact { used_capacity, needed_extra_capacity }, MayMove, Uninitialized) + } else { + Ok(()) } } @@ -561,126 +434,134 @@ impl<T, A: AllocRef> RawVec<T, A> { /// /// Aborts on OOM. pub fn shrink_to_fit(&mut self, amount: usize) { - let elem_size = mem::size_of::<T>(); - - // Set the `cap` because they might be about to promote to a `Box<[T]>` - if elem_size == 0 { - self.cap = amount; - return; - } - - // This check is my waterloo; it's the only thing `Vec` wouldn't have to do. - assert!(self.cap >= amount, "Tried to shrink to a larger capacity"); - - if amount == 0 { - // We want to create a new zero-length vector within the - // same allocator. We use `ptr::write` to avoid an - // erroneous attempt to drop the contents, and we use - // `ptr::read` to sidestep condition against destructuring - // types that implement Drop. - - unsafe { - let a = ptr::read(&self.a as *const A); - self.dealloc_buffer(); - ptr::write(self, RawVec::new_in(a)); - } - } else if self.cap != amount { - unsafe { - // We know here that our `amount` is greater than zero. This - // implies, via the assert above, that capacity is also greater - // than zero, which means that we've got a current layout that - // "fits" - // - // We also know that `self.cap` is greater than `amount`, and - // consequently we don't need runtime checks for creating either - // layout. - let old_size = elem_size * self.cap; - let new_size = elem_size * amount; - let align = mem::align_of::<T>(); - let old_layout = Layout::from_size_align_unchecked(old_size, align); - match self.a.realloc(NonNull::from(self.ptr).cast(), old_layout, new_size) { - Ok((ptr, _)) => self.ptr = ptr.cast().into(), - Err(_) => { - handle_alloc_error(Layout::from_size_align_unchecked(new_size, align)) - } - } - } - self.cap = amount; + match self.shrink(amount, MayMove) { + Err(CapacityOverflow) => capacity_overflow(), + Err(AllocError { layout, .. }) => handle_alloc_error(layout), + Ok(()) => { /* yay */ } } } } -enum Fallibility { - Fallible, - Infallible, +#[derive(Copy, Clone)] +enum Strategy { + Double, + Amortized { used_capacity: usize, needed_extra_capacity: usize }, + Exact { used_capacity: usize, needed_extra_capacity: usize }, } +use Strategy::*; -use Fallibility::*; - -enum ReserveStrategy { - Exact, - Amortized, -} +impl<T, A: AllocRef> RawVec<T, A> { + /// Returns if the buffer needs to grow to fulfill the needed extra capacity. + /// Mainly used to make inlining reserve-calls possible without inlining `grow`. + fn needs_to_grow(&self, used_capacity: usize, needed_extra_capacity: usize) -> bool { + needed_extra_capacity > self.capacity().wrapping_sub(used_capacity) + } -use ReserveStrategy::*; + fn capacity_from_bytes(excess: usize) -> usize { + match mem::size_of::<T>() { + 0 => usize::MAX, + elem_size => excess / elem_size, + } + } -impl<T, A: AllocRef> RawVec<T, A> { - fn reserve_internal( + /// Single method to handle all possibilities of growing the buffer. + fn grow( &mut self, - used_capacity: usize, - needed_extra_capacity: usize, - fallibility: Fallibility, - strategy: ReserveStrategy, + strategy: Strategy, + placement: ReallocPlacement, + init: AllocInit, ) -> Result<(), TryReserveError> { let elem_size = mem::size_of::<T>(); + let new_layout = match strategy { + Double => unsafe { + if elem_size == 0 { + // Since we return a capacity of `usize::MAX` when `elem_size` is + // 0, getting to here necessarily means the `RawVec` is overfull. + return Err(CapacityOverflow); + } + // Since we guarantee that we never allocate more than `isize::MAX` bytes, + // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow. + // Additionally the alignment will never be too large as to "not be satisfiable", + // so `Layout::from_size_align` will always return `Some`. + // + // TL;DR, we bypass runtime checks due to dynamic assertions in this module, + // allowing us to use `from_size_align_unchecked`. + let cap = if self.cap == 0 { + // Skip to 4 because tiny `Vec`'s are dumb; but not if that would cause overflow. + if elem_size > usize::MAX / 8 { 1 } else { 4 } + } else { + self.cap * 2 + }; + Layout::from_size_align_unchecked(cap * elem_size, mem::align_of::<T>()) + }, + Amortized { used_capacity, needed_extra_capacity } => { + // Nothing we can really do about these checks, sadly. + let required_cap = + used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?; + // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. + let double_cap = self.cap * 2; + // `double_cap` guarantees exponential growth. + let cap = cmp::max(double_cap, required_cap); + Layout::array::<T>(cap).map_err(|_| CapacityOverflow)? + } + Exact { used_capacity, needed_extra_capacity } => { + let cap = + used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?; + Layout::array::<T>(cap).map_err(|_| CapacityOverflow)? + } + }; - unsafe { - // NOTE: we don't early branch on ZSTs here because we want this - // to actually catch "asking for more than usize::MAX" in that case. - // If we make it past the first branch then we are guaranteed to - // panic. - - // Don't actually need any more capacity. - // Wrapping in case they gave a bad `used_capacity`. - if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity { - return Ok(()); + let allocation = if let Some(old_layout) = self.current_layout() { + debug_assert!(old_layout.align() == new_layout.align()); + unsafe { + self.a.grow(self.ptr.cast().into(), old_layout, new_layout.size(), placement, init) } + } else { + match placement { + MayMove => self.a.alloc(new_layout, init), + InPlace => Err(AllocErr), + } + }; - // Nothing we can really do about these checks, sadly. - let new_cap = match strategy { - Exact => { - used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)? - } - Amortized => self.amortized_new_size(used_capacity, needed_extra_capacity)?, - }; - let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?; + allocation + .map(|(ptr, excess)| { + self.ptr = ptr.cast().into(); + self.cap = Self::capacity_from_bytes(excess); + }) + .map_err(|_| TryReserveError::AllocError { layout: new_layout, non_exhaustive: () }) + } - alloc_guard(new_layout.size())?; + fn shrink( + &mut self, + amount: usize, + placement: ReallocPlacement, + ) -> Result<(), TryReserveError> { + assert!(amount <= self.cap, "Tried to shrink to a larger capacity"); - let res = match self.current_layout() { - Some(layout) => { - debug_assert!(new_layout.align() == layout.align()); - self.a.realloc(NonNull::from(self.ptr).cast(), layout, new_layout.size()) - } - None => self.a.alloc(new_layout), - }; - - let (ptr, new_cap) = match (res, fallibility) { - (Err(AllocErr), Infallible) => handle_alloc_error(new_layout), - (Err(AllocErr), Fallible) => { - return Err(TryReserveError::AllocError { - layout: new_layout, - non_exhaustive: (), - }); - } - (Ok((ptr, new_size)), _) => (ptr, new_size / elem_size), - }; + let elem_size = mem::size_of::<T>(); + let old_layout = + if let Some(layout) = self.current_layout() { layout } else { return Ok(()) }; + let old_ptr = self.ptr.cast().into(); + let new_size = amount * elem_size; - self.ptr = ptr.cast().into(); - self.cap = new_cap; + let allocation = unsafe { + if amount == 0 && placement == MayMove { + self.dealloc_buffer(); + Ok((old_layout.dangling(), 0)) + } else { + self.a.shrink(old_ptr, old_layout, new_size, placement) + } + }; - Ok(()) - } + allocation + .map(|(ptr, excess)| { + self.ptr = ptr.cast().into(); + self.cap = Self::capacity_from_bytes(excess); + }) + .map_err(|_| TryReserveError::AllocError { + layout: unsafe { Layout::from_size_align_unchecked(new_size, old_layout.align()) }, + non_exhaustive: (), + }) } } @@ -689,29 +570,24 @@ impl<T> RawVec<T, Global> { /// /// Note that this will correctly reconstitute any `cap` changes /// that may have been performed. (See description of type for details.) - /// - /// # Undefined Behavior - /// - /// All elements of `RawVec<T, Global>` must be initialized. Notice that - /// the rules around uninitialized boxed values are not finalized yet, - /// but until they are, it is advisable to avoid them. - pub unsafe fn into_box(self) -> Box<[T]> { - // NOTE: not calling `capacity()` here; actually using the real `cap` field! - let slice = slice::from_raw_parts_mut(self.ptr(), self.cap); - let output: Box<[T]> = Box::from_raw(slice); - mem::forget(self); - output + pub fn into_box(self) -> Box<[MaybeUninit<T>]> { + unsafe { + // NOTE: not calling `capacity()` here; actually using the real `cap` field! + let slice = slice::from_raw_parts_mut(self.ptr() as *mut MaybeUninit<T>, self.cap); + let output = Box::from_raw(slice); + mem::forget(self); + output + } } } impl<T, A: AllocRef> RawVec<T, A> { /// Frees the memory owned by the `RawVec` *without* trying to drop its contents. pub unsafe fn dealloc_buffer(&mut self) { - let elem_size = mem::size_of::<T>(); - if elem_size != 0 { - if let Some(layout) = self.current_layout() { - self.a.dealloc(NonNull::from(self.ptr).cast(), layout); - } + if let Some(layout) = self.current_layout() { + self.a.dealloc(self.ptr.cast().into(), layout); + self.ptr = Unique::empty(); + self.cap = 0; } } } @@ -719,9 +595,7 @@ impl<T, A: AllocRef> RawVec<T, A> { unsafe impl<#[may_dangle] T, A: AllocRef> Drop for RawVec<T, A> { /// Frees the memory owned by the `RawVec` *without* trying to drop its contents. fn drop(&mut self) { - unsafe { - self.dealloc_buffer(); - } + unsafe { self.dealloc_buffer() } } } diff --git a/src/liballoc/raw_vec/tests.rs b/src/liballoc/raw_vec/tests.rs index 21a8a76d0a7..a2d6cc63c92 100644 --- a/src/liballoc/raw_vec/tests.rs +++ b/src/liballoc/raw_vec/tests.rs @@ -1,4 +1,5 @@ use super::*; +use core::ptr::NonNull; #[test] fn allocator_param() { @@ -20,12 +21,16 @@ fn allocator_param() { fuel: usize, } unsafe impl AllocRef for BoundedAlloc { - fn alloc(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> { + fn alloc( + &mut self, + layout: Layout, + init: AllocInit, + ) -> Result<(NonNull<u8>, usize), AllocErr> { let size = layout.size(); if size > self.fuel { return Err(AllocErr); } - match Global.alloc(layout) { + match Global.alloc(layout, init) { ok @ Ok(_) => { self.fuel -= size; ok diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index e7f7608e676..495e196df40 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -252,7 +252,7 @@ use core::ptr::{self, NonNull}; use core::slice::{self, from_raw_parts_mut}; use core::usize; -use crate::alloc::{box_free, handle_alloc_error, AllocRef, Global, Layout}; +use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout}; use crate::string::String; use crate::vec::Vec; @@ -936,7 +936,9 @@ impl<T: ?Sized> Rc<T> { let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align(); // Allocate for the layout. - let (mem, _) = Global.alloc(layout).unwrap_or_else(|_| handle_alloc_error(layout)); + let (mem, _) = Global + .alloc(layout, AllocInit::Uninitialized) + .unwrap_or_else(|_| handle_alloc_error(layout)); // Initialize the RcBox let inner = mem_to_rcbox(mem.as_ptr()); diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs index e8985e20256..048c89d1280 100644 --- a/src/liballoc/sync.rs +++ b/src/liballoc/sync.rs @@ -25,7 +25,7 @@ use core::sync::atomic; use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; use core::{isize, usize}; -use crate::alloc::{box_free, handle_alloc_error, AllocRef, Global, Layout}; +use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout}; use crate::boxed::Box; use crate::rc::is_dangling; use crate::string::String; @@ -814,7 +814,9 @@ impl<T: ?Sized> Arc<T> { // reference (see #54908). let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align(); - let (mem, _) = Global.alloc(layout).unwrap_or_else(|_| handle_alloc_error(layout)); + let (mem, _) = Global + .alloc(layout, AllocInit::Uninitialized) + .unwrap_or_else(|_| handle_alloc_error(layout)); // Initialize the ArcInner let inner = mem_to_arcinner(mem.as_ptr()); diff --git a/src/liballoc/tests/heap.rs b/src/liballoc/tests/heap.rs index d159126f426..690ae84a5df 100644 --- a/src/liballoc/tests/heap.rs +++ b/src/liballoc/tests/heap.rs @@ -1,4 +1,4 @@ -use std::alloc::{AllocRef, Global, Layout, System}; +use std::alloc::{AllocInit, AllocRef, Global, Layout, System}; /// Issue #45955 and #62251. #[test] @@ -20,7 +20,13 @@ fn check_overalign_requests<T: AllocRef>(mut allocator: T) { unsafe { let pointers: Vec<_> = (0..iterations) .map(|_| { - allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap().0 + allocator + .alloc( + Layout::from_size_align(size, align).unwrap(), + AllocInit::Uninitialized, + ) + .unwrap() + .0 }) .collect(); for &ptr in &pointers { diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index 4769091183a..528a4f73293 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -679,7 +679,7 @@ impl<T> Vec<T> { self.shrink_to_fit(); let buf = ptr::read(&self.buf); mem::forget(self); - buf.into_box() + buf.into_box().assume_init() } } |
