diff options
| author | bors <bors@rust-lang.org> | 2020-04-02 06:08:35 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2020-04-02 06:08:35 +0000 |
| commit | 127a11a344eb59b5aea1464e98257c262dcba967 (patch) | |
| tree | 2bc294b4383cc4446add6e4a96f57161eea9f78c /src/liballoc | |
| parent | b793f403bdfbcc0ff3e15ed8177a81d79ba4a29b (diff) | |
| parent | 89ed59d8841a2b6057f61a3469c10bb2e6242160 (diff) | |
| download | rust-127a11a344eb59b5aea1464e98257c262dcba967.tar.gz rust-127a11a344eb59b5aea1464e98257c262dcba967.zip | |
Auto merge of #70362 - TimDiekmann:alloc-overhaul, r=Amanieu
Overhaul of the `AllocRef` trait to match allocator-wg's latest consens; Take 2 GitHub won't let me reopen #69889 so I make a new PR. In addition to #69889 this fixes the unsoundness of `RawVec::into_box` when using allocators supporting overallocating. Also it uses `MemoryBlock` in `AllocRef` to unify `_in_place` methods by passing `&mut MemoryBlock`. Additionally, `RawVec` now checks for `size_of::<T>()` again and ignore every ZST. The internal capacity of `RawVec` isn't used by ZSTs anymore, as `into_box` now requires a length to be specified. r? @Amanieu fixes rust-lang/wg-allocators#38 fixes rust-lang/wg-allocators#41 fixes rust-lang/wg-allocators#44 fixes rust-lang/wg-allocators#51
Diffstat (limited to 'src/liballoc')
| -rw-r--r-- | src/liballoc/alloc.rs | 112 | ||||
| -rw-r--r-- | src/liballoc/alloc/tests.rs | 9 | ||||
| -rw-r--r-- | src/liballoc/boxed.rs | 41 | ||||
| -rw-r--r-- | src/liballoc/collections/btree/node.rs | 9 | ||||
| -rw-r--r-- | src/liballoc/lib.rs | 1 | ||||
| -rw-r--r-- | src/liballoc/raw_vec.rs | 655 | ||||
| -rw-r--r-- | src/liballoc/raw_vec/tests.rs | 9 | ||||
| -rw-r--r-- | src/liballoc/rc.rs | 8 | ||||
| -rw-r--r-- | src/liballoc/sync.rs | 8 | ||||
| -rw-r--r-- | src/liballoc/tests/heap.rs | 10 | ||||
| -rw-r--r-- | src/liballoc/vec.rs | 3 |
11 files changed, 391 insertions, 474 deletions
diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs index 9f82b2c6fa6..66575e3ef55 100644 --- a/src/liballoc/alloc.rs +++ b/src/liballoc/alloc.rs @@ -2,7 +2,7 @@ #![stable(feature = "alloc_module", since = "1.28.0")] -use core::intrinsics::{min_align_of_val, size_of_val}; +use core::intrinsics::{self, min_align_of_val, size_of_val}; use core::ptr::{NonNull, Unique}; use core::usize; @@ -165,11 +165,19 @@ pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 { #[unstable(feature = "allocator_api", issue = "32838")] unsafe impl AllocRef for Global { #[inline] - fn alloc(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> { - if layout.size() == 0 { - Ok((layout.dangling(), 0)) - } else { - unsafe { NonNull::new(alloc(layout)).ok_or(AllocErr).map(|p| (p, layout.size())) } + fn alloc(&mut self, layout: Layout, init: AllocInit) -> Result<MemoryBlock, AllocErr> { + unsafe { + let size = layout.size(); + if size == 0 { + Ok(MemoryBlock { ptr: layout.dangling(), size: 0 }) + } else { + let raw_ptr = match init { + AllocInit::Uninitialized => alloc(layout), + AllocInit::Zeroed => alloc_zeroed(layout), + }; + let ptr = NonNull::new(raw_ptr).ok_or(AllocErr)?; + Ok(MemoryBlock { ptr, size }) + } } } @@ -181,32 +189,71 @@ unsafe impl AllocRef for Global { } #[inline] - unsafe fn realloc( + unsafe fn grow( &mut self, ptr: NonNull<u8>, layout: Layout, new_size: usize, - ) -> Result<(NonNull<u8>, usize), AllocErr> { - match (layout.size(), new_size) { - (0, 0) => Ok((layout.dangling(), 0)), - (0, _) => self.alloc(Layout::from_size_align_unchecked(new_size, layout.align())), - (_, 0) => { - self.dealloc(ptr, layout); - Ok((layout.dangling(), 0)) + placement: ReallocPlacement, + init: AllocInit, + ) -> Result<MemoryBlock, AllocErr> { + let size = layout.size(); + debug_assert!( + new_size >= size, + "`new_size` must be greater than or equal to `memory.size()`" + ); + + if size == new_size { + return Ok(MemoryBlock { ptr, size }); + } + + match placement { + ReallocPlacement::InPlace => Err(AllocErr), + ReallocPlacement::MayMove if layout.size() == 0 => { + let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); + self.alloc(new_layout, init) + } + ReallocPlacement::MayMove => { + // `realloc` probably checks for `new_size > size` or something similar. + intrinsics::assume(new_size > size); + let ptr = realloc(ptr.as_ptr(), layout, new_size); + let memory = + MemoryBlock { ptr: NonNull::new(ptr).ok_or(AllocErr)?, size: new_size }; + init.init_offset(memory, size); + Ok(memory) } - (_, _) => NonNull::new(realloc(ptr.as_ptr(), layout, new_size)) - .ok_or(AllocErr) - .map(|p| (p, new_size)), } } #[inline] - fn alloc_zeroed(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> { - if layout.size() == 0 { - Ok((layout.dangling(), 0)) - } else { - unsafe { - NonNull::new(alloc_zeroed(layout)).ok_or(AllocErr).map(|p| (p, layout.size())) + unsafe fn shrink( + &mut self, + ptr: NonNull<u8>, + layout: Layout, + new_size: usize, + placement: ReallocPlacement, + ) -> Result<MemoryBlock, AllocErr> { + let size = layout.size(); + debug_assert!( + new_size <= size, + "`new_size` must be smaller than or equal to `memory.size()`" + ); + + if size == new_size { + return Ok(MemoryBlock { ptr, size }); + } + + match placement { + ReallocPlacement::InPlace => Err(AllocErr), + ReallocPlacement::MayMove if new_size == 0 => { + self.dealloc(ptr, layout); + Ok(MemoryBlock { ptr: layout.dangling(), size: 0 }) + } + ReallocPlacement::MayMove => { + // `realloc` probably checks for `new_size < size` or something similar. + intrinsics::assume(new_size < size); + let ptr = realloc(ptr.as_ptr(), layout, new_size); + Ok(MemoryBlock { ptr: NonNull::new(ptr).ok_or(AllocErr)?, size: new_size }) } } } @@ -218,14 +265,10 @@ unsafe impl AllocRef for Global { #[lang = "exchange_malloc"] #[inline] unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 { - if size == 0 { - align as *mut u8 - } else { - let layout = Layout::from_size_align_unchecked(size, align); - match Global.alloc(layout) { - Ok((ptr, _)) => ptr.as_ptr(), - Err(_) => handle_alloc_error(layout), - } + let layout = Layout::from_size_align_unchecked(size, align); + match Global.alloc(layout, AllocInit::Uninitialized) { + Ok(memory) => memory.ptr.as_ptr(), + Err(_) => handle_alloc_error(layout), } } @@ -239,11 +282,8 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 { pub(crate) unsafe fn box_free<T: ?Sized>(ptr: Unique<T>) { let size = size_of_val(ptr.as_ref()); let align = min_align_of_val(ptr.as_ref()); - // We do not allocate for Box<T> when T is ZST, so deallocation is also not necessary. - if size != 0 { - let layout = Layout::from_size_align_unchecked(size, align); - Global.dealloc(ptr.cast().into(), layout); - } + let layout = Layout::from_size_align_unchecked(size, align); + Global.dealloc(ptr.cast().into(), layout) } /// Abort on memory allocation error or failure. diff --git a/src/liballoc/alloc/tests.rs b/src/liballoc/alloc/tests.rs index 55944398e16..1ad40eca93b 100644 --- a/src/liballoc/alloc/tests.rs +++ b/src/liballoc/alloc/tests.rs @@ -8,16 +8,17 @@ use test::Bencher; fn allocate_zeroed() { unsafe { let layout = Layout::from_size_align(1024, 1).unwrap(); - let (ptr, _) = - Global.alloc_zeroed(layout.clone()).unwrap_or_else(|_| handle_alloc_error(layout)); + let memory = Global + .alloc(layout.clone(), AllocInit::Zeroed) + .unwrap_or_else(|_| handle_alloc_error(layout)); - let mut i = ptr.cast::<u8>().as_ptr(); + let mut i = memory.ptr.cast::<u8>().as_ptr(); let end = i.add(layout.size()); while i < end { assert_eq!(*i, 0); i = i.offset(1); } - Global.dealloc(ptr, layout); + Global.dealloc(memory.ptr, layout); } } diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index 36641284a76..5406956a528 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -143,10 +143,9 @@ use core::ops::{ }; use core::pin::Pin; use core::ptr::{self, NonNull, Unique}; -use core::slice; use core::task::{Context, Poll}; -use crate::alloc::{self, AllocRef, Global}; +use crate::alloc::{self, AllocInit, AllocRef, Global}; use crate::raw_vec::RawVec; use crate::str::from_boxed_utf8_unchecked; use crate::vec::Vec; @@ -196,14 +195,12 @@ impl<T> Box<T> { #[unstable(feature = "new_uninit", issue = "63291")] pub fn new_uninit() -> Box<mem::MaybeUninit<T>> { let layout = alloc::Layout::new::<mem::MaybeUninit<T>>(); - unsafe { - let ptr = if layout.size() == 0 { - NonNull::dangling() - } else { - Global.alloc(layout).unwrap_or_else(|_| alloc::handle_alloc_error(layout)).0.cast() - }; - Box::from_raw(ptr.as_ptr()) - } + let ptr = Global + .alloc(layout, AllocInit::Uninitialized) + .unwrap_or_else(|_| alloc::handle_alloc_error(layout)) + .ptr + .cast(); + unsafe { Box::from_raw(ptr.as_ptr()) } } /// Constructs a new `Box` with uninitialized contents, with the memory @@ -226,11 +223,13 @@ impl<T> Box<T> { /// [zeroed]: ../../std/mem/union.MaybeUninit.html#method.zeroed #[unstable(feature = "new_uninit", issue = "63291")] pub fn new_zeroed() -> Box<mem::MaybeUninit<T>> { - unsafe { - let mut uninit = Self::new_uninit(); - ptr::write_bytes::<T>(uninit.as_mut_ptr(), 0, 1); - uninit - } + let layout = alloc::Layout::new::<mem::MaybeUninit<T>>(); + let ptr = Global + .alloc(layout, AllocInit::Zeroed) + .unwrap_or_else(|_| alloc::handle_alloc_error(layout)) + .ptr + .cast(); + unsafe { Box::from_raw(ptr.as_ptr()) } } /// Constructs a new `Pin<Box<T>>`. If `T` does not implement `Unpin`, then @@ -265,15 +264,7 @@ impl<T> Box<[T]> { /// ``` #[unstable(feature = "new_uninit", issue = "63291")] pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> { - let layout = alloc::Layout::array::<mem::MaybeUninit<T>>(len).unwrap(); - unsafe { - let ptr = if layout.size() == 0 { - NonNull::dangling() - } else { - Global.alloc(layout).unwrap_or_else(|_| alloc::handle_alloc_error(layout)).0.cast() - }; - Box::from_raw(slice::from_raw_parts_mut(ptr.as_ptr(), len)) - } + unsafe { RawVec::with_capacity(len).into_box(len) } } } @@ -778,7 +769,7 @@ impl<T: Copy> From<&[T]> for Box<[T]> { let buf = RawVec::with_capacity(len); unsafe { ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len); - buf.into_box() + buf.into_box(slice.len()).assume_init() } } } diff --git a/src/liballoc/collections/btree/node.rs b/src/liballoc/collections/btree/node.rs index 6ebb98c42cd..11c14299573 100644 --- a/src/liballoc/collections/btree/node.rs +++ b/src/liballoc/collections/btree/node.rs @@ -1142,7 +1142,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker:: (*left_node.as_leaf_mut()).len += right_len as u16 + 1; - if self.node.height > 1 { + let layout = if self.node.height > 1 { ptr::copy_nonoverlapping( right_node.cast_unchecked().as_internal().edges.as_ptr(), left_node @@ -1159,10 +1159,11 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker:: .correct_parent_link(); } - Global.dealloc(right_node.node.cast(), Layout::new::<InternalNode<K, V>>()); + Layout::new::<InternalNode<K, V>>() } else { - Global.dealloc(right_node.node.cast(), Layout::new::<LeafNode<K, V>>()); - } + Layout::new::<LeafNode<K, V>>() + }; + Global.dealloc(right_node.node.cast(), layout); Handle::new_edge(self.node, self.idx) } diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 5857b79d5ee..121c1cde548 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -100,6 +100,7 @@ #![feature(lang_items)] #![feature(libc)] #![cfg_attr(not(bootstrap), feature(negative_impls))] +#![feature(new_uninit)] #![feature(nll)] #![feature(optin_builtin_traits)] #![feature(pattern)] diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index b31fec7f037..2bf40490e78 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -1,13 +1,19 @@ #![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "none")] #![doc(hidden)] +use core::alloc::MemoryBlock; use core::cmp; -use core::mem; +use core::mem::{self, MaybeUninit}; use core::ops::Drop; -use core::ptr::{self, NonNull, Unique}; +use core::ptr::{NonNull, Unique}; use core::slice; -use crate::alloc::{handle_alloc_error, AllocErr, AllocRef, Global, Layout}; +use crate::alloc::{ + handle_alloc_error, AllocErr, + AllocInit::{self, *}, + AllocRef, Global, Layout, + ReallocPlacement::{self, *}, +}; use crate::boxed::Box; use crate::collections::TryReserveError::{self, *}; @@ -21,81 +27,26 @@ mod tests; /// /// * Produces `Unique::empty()` on zero-sized types. /// * Produces `Unique::empty()` on zero-length allocations. +/// * Avoids freeing `Unique::empty()`. /// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics). /// * Guards against 32-bit systems allocating more than isize::MAX bytes. /// * Guards against overflowing your length. -/// * Aborts on OOM or calls `handle_alloc_error` as applicable. -/// * Avoids freeing `Unique::empty()`. +/// * Calls `handle_alloc_error` for fallible allocations. /// * Contains a `ptr::Unique` and thus endows the user with all related benefits. +/// * Uses the excess returned from the allocator to use the largest available capacity. /// /// This type does not in anyway inspect the memory that it manages. When dropped it *will* /// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec` /// to handle the actual things *stored* inside of a `RawVec`. /// -/// Note that a `RawVec` always forces its capacity to be `usize::MAX` for zero-sized types. -/// This enables you to use capacity-growing logic catch the overflows in your length -/// that might occur with zero-sized types. -/// -/// The above means that you need to be careful when round-tripping this type with a -/// `Box<[T]>`, since `capacity()` won't yield the length. However, `with_capacity`, -/// `shrink_to_fit`, and `from_box` will actually set `RawVec`'s private capacity -/// field. This allows zero-sized types to not be special-cased by consumers of -/// this type. +/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns +/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a +/// `Box<[T]>`, since `capacity()` won't yield the length. #[allow(missing_debug_implementations)] pub struct RawVec<T, A: AllocRef = Global> { ptr: Unique<T>, cap: usize, - a: A, -} - -impl<T, A: AllocRef> RawVec<T, A> { - /// Like `new`, but parameterized over the choice of allocator for - /// the returned `RawVec`. - pub const fn new_in(a: A) -> Self { - let cap = if mem::size_of::<T>() == 0 { core::usize::MAX } else { 0 }; - - // `Unique::empty()` doubles as "unallocated" and "zero-sized allocation". - RawVec { ptr: Unique::empty(), cap, a } - } - - /// Like `with_capacity`, but parameterized over the choice of - /// allocator for the returned `RawVec`. - #[inline] - pub fn with_capacity_in(capacity: usize, a: A) -> Self { - RawVec::allocate_in(capacity, false, a) - } - - /// Like `with_capacity_zeroed`, but parameterized over the choice - /// of allocator for the returned `RawVec`. - #[inline] - pub fn with_capacity_zeroed_in(capacity: usize, a: A) -> Self { - RawVec::allocate_in(capacity, true, a) - } - - fn allocate_in(mut capacity: usize, zeroed: bool, mut a: A) -> Self { - let elem_size = mem::size_of::<T>(); - - let alloc_size = capacity.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow()); - alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow()); - - // Handles ZSTs and `capacity == 0` alike. - let ptr = if alloc_size == 0 { - NonNull::<T>::dangling() - } else { - let align = mem::align_of::<T>(); - let layout = Layout::from_size_align(alloc_size, align).unwrap(); - let result = if zeroed { a.alloc_zeroed(layout) } else { a.alloc(layout) }; - match result { - Ok((ptr, size)) => { - capacity = size / elem_size; - ptr.cast() - } - Err(_) => handle_alloc_error(layout), - } - }; - - RawVec { ptr: ptr.into(), cap: capacity, a } - } + alloc: A, } impl<T> RawVec<T, Global> { @@ -138,39 +89,26 @@ impl<T> RawVec<T, Global> { /// Aborts on OOM. #[inline] pub fn with_capacity(capacity: usize) -> Self { - RawVec::allocate_in(capacity, false, Global) + Self::with_capacity_in(capacity, Global) } /// Like `with_capacity`, but guarantees the buffer is zeroed. #[inline] pub fn with_capacity_zeroed(capacity: usize) -> Self { - RawVec::allocate_in(capacity, true, Global) + Self::with_capacity_zeroed_in(capacity, Global) } -} -impl<T, A: AllocRef> RawVec<T, A> { - /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator. - /// - /// # Undefined Behavior - /// - /// The `ptr` must be allocated (via the given allocator `a`), and with the given `capacity`. - /// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems). - /// If the `ptr` and `capacity` come from a `RawVec` created via `a`, then this is guaranteed. - pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self { - RawVec { ptr: Unique::new_unchecked(ptr), cap: capacity, a } - } -} - -impl<T> RawVec<T, Global> { /// Reconstitutes a `RawVec` from a pointer and capacity. /// - /// # Undefined Behavior + /// # Safety /// /// The `ptr` must be allocated (on the system heap), and with the given `capacity`. - /// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems). + /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit + /// systems). ZST vectors may have a capacity up to `usize::MAX`. /// If the `ptr` and `capacity` come from a `RawVec`, then this is guaranteed. + #[inline] pub unsafe fn from_raw_parts(ptr: *mut T, capacity: usize) -> Self { - RawVec { ptr: Unique::new_unchecked(ptr), cap: capacity, a: Global } + Self::from_raw_parts_in(ptr, capacity, Global) } /// Converts a `Box<[T]>` into a `RawVec<T>`. @@ -184,6 +122,56 @@ impl<T> RawVec<T, Global> { } impl<T, A: AllocRef> RawVec<T, A> { + /// Like `new`, but parameterized over the choice of allocator for + /// the returned `RawVec`. + pub const fn new_in(alloc: A) -> Self { + // `cap: 0` means "unallocated". zero-sized types are ignored. + Self { ptr: Unique::empty(), cap: 0, alloc } + } + + /// Like `with_capacity`, but parameterized over the choice of + /// allocator for the returned `RawVec`. + #[inline] + pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + Self::allocate_in(capacity, Uninitialized, alloc) + } + + /// Like `with_capacity_zeroed`, but parameterized over the choice + /// of allocator for the returned `RawVec`. + #[inline] + pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self { + Self::allocate_in(capacity, Zeroed, alloc) + } + + fn allocate_in(capacity: usize, init: AllocInit, mut alloc: A) -> Self { + if mem::size_of::<T>() == 0 { + Self::new_in(alloc) + } else { + let layout = Layout::array::<T>(capacity).unwrap_or_else(|_| capacity_overflow()); + alloc_guard(layout.size()).unwrap_or_else(|_| capacity_overflow()); + + let memory = alloc.alloc(layout, init).unwrap_or_else(|_| handle_alloc_error(layout)); + Self { + ptr: memory.ptr.cast().into(), + cap: Self::capacity_from_bytes(memory.size), + alloc, + } + } + } + + /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator. + /// + /// # Safety + /// + /// The `ptr` must be allocated (via the given allocator `a`), and with the given `capacity`. + /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit + /// systems). ZST vectors may have a capacity up to `usize::MAX`. + /// If the `ptr` and `capacity` come from a `RawVec` created via `a`, then this is guaranteed. + #[inline] + pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self { + Self { ptr: Unique::new_unchecked(ptr), cap: capacity, alloc: a } + } + /// Gets a raw pointer to the start of the allocation. Note that this is /// `Unique::empty()` if `capacity == 0` or `T` is zero-sized. In the former case, you must /// be careful. @@ -196,21 +184,21 @@ impl<T, A: AllocRef> RawVec<T, A> { /// This will always be `usize::MAX` if `T` is zero-sized. #[inline(always)] pub fn capacity(&self) -> usize { - if mem::size_of::<T>() == 0 { !0 } else { self.cap } + if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap } } /// Returns a shared reference to the allocator backing this `RawVec`. pub fn alloc(&self) -> &A { - &self.a + &self.alloc } /// Returns a mutable reference to the allocator backing this `RawVec`. pub fn alloc_mut(&mut self) -> &mut A { - &mut self.a + &mut self.alloc } - fn current_layout(&self) -> Option<Layout> { - if self.cap == 0 { + fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> { + if mem::size_of::<T>() == 0 || self.cap == 0 { None } else { // We have an allocated chunk of memory, so we can bypass runtime @@ -218,7 +206,8 @@ impl<T, A: AllocRef> RawVec<T, A> { unsafe { let align = mem::align_of::<T>(); let size = mem::size_of::<T>() * self.cap; - Some(Layout::from_size_align_unchecked(size, align)) + let layout = Layout::from_size_align_unchecked(size, align); + Some((self.ptr.cast().into(), layout)) } } } @@ -274,50 +263,10 @@ impl<T, A: AllocRef> RawVec<T, A> { #[inline(never)] #[cold] pub fn double(&mut self) { - unsafe { - let elem_size = mem::size_of::<T>(); - - // Since we set the capacity to `usize::MAX` when `elem_size` is - // 0, getting to here necessarily means the `RawVec` is overfull. - assert!(elem_size != 0, "capacity overflow"); - - let (ptr, new_cap) = match self.current_layout() { - Some(cur) => { - // Since we guarantee that we never allocate more than - // `isize::MAX` bytes, `elem_size * self.cap <= isize::MAX` as - // a precondition, so this can't overflow. Additionally the - // alignment will never be too large as to "not be - // satisfiable", so `Layout::from_size_align` will always - // return `Some`. - // - // TL;DR, we bypass runtime checks due to dynamic assertions - // in this module, allowing us to use - // `from_size_align_unchecked`. - let new_cap = 2 * self.cap; - let new_size = new_cap * elem_size; - alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow()); - let ptr_res = self.a.realloc(NonNull::from(self.ptr).cast(), cur, new_size); - match ptr_res { - Ok((ptr, new_size)) => (ptr, new_size / elem_size), - Err(_) => handle_alloc_error(Layout::from_size_align_unchecked( - new_size, - cur.align(), - )), - } - } - None => { - // Skip to 4 because tiny `Vec`'s are dumb; but not if that - // would cause overflow. - let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 }; - let layout = Layout::array::<T>(new_cap).unwrap(); - match self.a.alloc(layout) { - Ok((ptr, new_size)) => (ptr, new_size / elem_size), - Err(_) => handle_alloc_error(layout), - } - } - }; - self.ptr = ptr.cast().into(); - self.cap = new_cap; + match self.grow(Double, MayMove, Uninitialized) { + Err(CapacityOverflow) => capacity_overflow(), + Err(AllocError { layout, .. }) => handle_alloc_error(layout), + Ok(()) => { /* yay */ } } } @@ -336,99 +285,7 @@ impl<T, A: AllocRef> RawVec<T, A> { #[inline(never)] #[cold] pub fn double_in_place(&mut self) -> bool { - unsafe { - let elem_size = mem::size_of::<T>(); - let old_layout = match self.current_layout() { - Some(layout) => layout, - None => return false, // nothing to double - }; - - // Since we set the capacity to `usize::MAX` when `elem_size` is - // 0, getting to here necessarily means the `RawVec` is overfull. - assert!(elem_size != 0, "capacity overflow"); - - // Since we guarantee that we never allocate more than `isize::MAX` - // bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so - // this can't overflow. - // - // Similarly to with `double` above, we can go straight to - // `Layout::from_size_align_unchecked` as we know this won't - // overflow and the alignment is sufficiently small. - let new_cap = 2 * self.cap; - let new_size = new_cap * elem_size; - alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow()); - match self.a.grow_in_place(NonNull::from(self.ptr).cast(), old_layout, new_size) { - Ok(_) => { - // We can't directly divide `size`. - self.cap = new_cap; - true - } - Err(_) => false, - } - } - } - - /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. - pub fn try_reserve_exact( - &mut self, - used_capacity: usize, - needed_extra_capacity: usize, - ) -> Result<(), TryReserveError> { - self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Exact) - } - - /// Ensures that the buffer contains at least enough space to hold - /// `used_capacity + needed_extra_capacity` elements. If it doesn't already, - /// will reallocate the minimum possible amount of memory necessary. - /// Generally this will be exactly the amount of memory necessary, - /// but in principle the allocator is free to give back more than - /// we asked for. - /// - /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate - /// the requested space. This is not really unsafe, but the unsafe - /// code *you* write that relies on the behavior of this function may break. - /// - /// # Panics - /// - /// * Panics if the requested capacity exceeds `usize::MAX` bytes. - /// * Panics on 32-bit platforms if the requested capacity exceeds - /// `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) { - match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Exact) { - Err(CapacityOverflow) => capacity_overflow(), - Err(AllocError { .. }) => unreachable!(), - Ok(()) => { /* yay */ } - } - } - - /// Calculates the buffer's new size given that it'll hold `used_capacity + - /// needed_extra_capacity` elements. This logic is used in amortized reserve methods. - /// Returns `(new_capacity, new_alloc_size)`. - fn amortized_new_size( - &self, - used_capacity: usize, - needed_extra_capacity: usize, - ) -> Result<usize, TryReserveError> { - // Nothing we can really do about these checks, sadly. - let required_cap = - used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?; - // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. - let double_cap = self.cap * 2; - // `double_cap` guarantees exponential growth. - Ok(cmp::max(double_cap, required_cap)) - } - - /// The same as `reserve`, but returns on errors instead of panicking or aborting. - pub fn try_reserve( - &mut self, - used_capacity: usize, - needed_extra_capacity: usize, - ) -> Result<(), TryReserveError> { - self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Amortized) + self.grow(Double, InPlace, Uninitialized).is_ok() } /// Ensures that the buffer contains at least enough space to hold @@ -484,12 +341,26 @@ impl<T, A: AllocRef> RawVec<T, A> { /// # } /// ``` pub fn reserve(&mut self, used_capacity: usize, needed_extra_capacity: usize) { - match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Amortized) { + match self.try_reserve(used_capacity, needed_extra_capacity) { Err(CapacityOverflow) => capacity_overflow(), - Err(AllocError { .. }) => unreachable!(), + Err(AllocError { layout, .. }) => handle_alloc_error(layout), Ok(()) => { /* yay */ } } } + + /// The same as `reserve`, but returns on errors instead of panicking or aborting. + pub fn try_reserve( + &mut self, + used_capacity: usize, + needed_extra_capacity: usize, + ) -> Result<(), TryReserveError> { + if self.needs_to_grow(used_capacity, needed_extra_capacity) { + self.grow(Amortized { used_capacity, needed_extra_capacity }, MayMove, Uninitialized) + } else { + Ok(()) + } + } + /// Attempts to ensure that the buffer contains at least enough space to hold /// `used_capacity + needed_extra_capacity` elements. If it doesn't already have /// enough capacity, will reallocate in place enough space plus comfortable slack @@ -508,45 +379,54 @@ impl<T, A: AllocRef> RawVec<T, A> { /// * Panics on 32-bit platforms if the requested capacity exceeds /// `isize::MAX` bytes. pub fn reserve_in_place(&mut self, used_capacity: usize, needed_extra_capacity: usize) -> bool { - unsafe { - // NOTE: we don't early branch on ZSTs here because we want this - // to actually catch "asking for more than usize::MAX" in that case. - // If we make it past the first branch then we are guaranteed to - // panic. - - // Don't actually need any more capacity. If the current `cap` is 0, we can't - // reallocate in place. - // Wrapping in case they give a bad `used_capacity` - let old_layout = match self.current_layout() { - Some(layout) => layout, - None => return false, - }; - if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity { - return false; - } + // This is more readable than putting this in one line: + // `!self.needs_to_grow(...) || self.grow(...).is_ok()` + if self.needs_to_grow(used_capacity, needed_extra_capacity) { + self.grow(Amortized { used_capacity, needed_extra_capacity }, InPlace, Uninitialized) + .is_ok() + } else { + true + } + } - let new_cap = self - .amortized_new_size(used_capacity, needed_extra_capacity) - .unwrap_or_else(|_| capacity_overflow()); - - // Here, `cap < used_capacity + needed_extra_capacity <= new_cap` - // (regardless of whether `self.cap - used_capacity` wrapped). - // Therefore, we can safely call `grow_in_place`. - - let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0; - // FIXME: may crash and burn on over-reserve - alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow()); - match self.a.grow_in_place( - NonNull::from(self.ptr).cast(), - old_layout, - new_layout.size(), - ) { - Ok(_) => { - self.cap = new_cap; - true - } - Err(_) => false, - } + /// Ensures that the buffer contains at least enough space to hold + /// `used_capacity + needed_extra_capacity` elements. If it doesn't already, + /// will reallocate the minimum possible amount of memory necessary. + /// Generally this will be exactly the amount of memory necessary, + /// but in principle the allocator is free to give back more than + /// we asked for. + /// + /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate + /// the requested space. This is not really unsafe, but the unsafe + /// code *you* write that relies on the behavior of this function may break. + /// + /// # Panics + /// + /// * Panics if the requested capacity exceeds `usize::MAX` bytes. + /// * Panics on 32-bit platforms if the requested capacity exceeds + /// `isize::MAX` bytes. + /// + /// # Aborts + /// + /// Aborts on OOM. + pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) { + match self.try_reserve_exact(used_capacity, needed_extra_capacity) { + Err(CapacityOverflow) => capacity_overflow(), + Err(AllocError { layout, .. }) => handle_alloc_error(layout), + Ok(()) => { /* yay */ } + } + } + + /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. + pub fn try_reserve_exact( + &mut self, + used_capacity: usize, + needed_extra_capacity: usize, + ) -> Result<(), TryReserveError> { + if self.needs_to_grow(used_capacity, needed_extra_capacity) { + self.grow(Exact { used_capacity, needed_extra_capacity }, MayMove, Uninitialized) + } else { + Ok(()) } } @@ -561,166 +441,157 @@ impl<T, A: AllocRef> RawVec<T, A> { /// /// Aborts on OOM. pub fn shrink_to_fit(&mut self, amount: usize) { - let elem_size = mem::size_of::<T>(); - - // Set the `cap` because they might be about to promote to a `Box<[T]>` - if elem_size == 0 { - self.cap = amount; - return; - } - - // This check is my waterloo; it's the only thing `Vec` wouldn't have to do. - assert!(self.cap >= amount, "Tried to shrink to a larger capacity"); - - if amount == 0 { - // We want to create a new zero-length vector within the - // same allocator. We use `ptr::write` to avoid an - // erroneous attempt to drop the contents, and we use - // `ptr::read` to sidestep condition against destructuring - // types that implement Drop. - - unsafe { - let a = ptr::read(&self.a as *const A); - self.dealloc_buffer(); - ptr::write(self, RawVec::new_in(a)); - } - } else if self.cap != amount { - unsafe { - // We know here that our `amount` is greater than zero. This - // implies, via the assert above, that capacity is also greater - // than zero, which means that we've got a current layout that - // "fits" - // - // We also know that `self.cap` is greater than `amount`, and - // consequently we don't need runtime checks for creating either - // layout. - let old_size = elem_size * self.cap; - let new_size = elem_size * amount; - let align = mem::align_of::<T>(); - let old_layout = Layout::from_size_align_unchecked(old_size, align); - match self.a.realloc(NonNull::from(self.ptr).cast(), old_layout, new_size) { - Ok((ptr, _)) => self.ptr = ptr.cast().into(), - Err(_) => { - handle_alloc_error(Layout::from_size_align_unchecked(new_size, align)) - } - } - } - self.cap = amount; + match self.shrink(amount, MayMove) { + Err(CapacityOverflow) => capacity_overflow(), + Err(AllocError { layout, .. }) => handle_alloc_error(layout), + Ok(()) => { /* yay */ } } } } -enum Fallibility { - Fallible, - Infallible, +#[derive(Copy, Clone)] +enum Strategy { + Double, + Amortized { used_capacity: usize, needed_extra_capacity: usize }, + Exact { used_capacity: usize, needed_extra_capacity: usize }, } +use Strategy::*; -use Fallibility::*; +impl<T, A: AllocRef> RawVec<T, A> { + /// Returns if the buffer needs to grow to fulfill the needed extra capacity. + /// Mainly used to make inlining reserve-calls possible without inlining `grow`. + fn needs_to_grow(&self, used_capacity: usize, needed_extra_capacity: usize) -> bool { + needed_extra_capacity > self.capacity().wrapping_sub(used_capacity) + } -enum ReserveStrategy { - Exact, - Amortized, -} + fn capacity_from_bytes(excess: usize) -> usize { + debug_assert_ne!(mem::size_of::<T>(), 0); + excess / mem::size_of::<T>() + } -use ReserveStrategy::*; + fn set_memory(&mut self, memory: MemoryBlock) { + self.ptr = memory.ptr.cast().into(); + self.cap = Self::capacity_from_bytes(memory.size); + } -impl<T, A: AllocRef> RawVec<T, A> { - fn reserve_internal( + /// Single method to handle all possibilities of growing the buffer. + fn grow( &mut self, - used_capacity: usize, - needed_extra_capacity: usize, - fallibility: Fallibility, - strategy: ReserveStrategy, + strategy: Strategy, + placement: ReallocPlacement, + init: AllocInit, ) -> Result<(), TryReserveError> { let elem_size = mem::size_of::<T>(); + if elem_size == 0 { + // Since we return a capacity of `usize::MAX` when `elem_size` is + // 0, getting to here necessarily means the `RawVec` is overfull. + return Err(CapacityOverflow); + } + let new_layout = match strategy { + Double => unsafe { + // Since we guarantee that we never allocate more than `isize::MAX` bytes, + // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow. + // Additionally the alignment will never be too large as to "not be satisfiable", + // so `Layout::from_size_align` will always return `Some`. + // + // TL;DR, we bypass runtime checks due to dynamic assertions in this module, + // allowing us to use `from_size_align_unchecked`. + let cap = if self.cap == 0 { + // Skip to 4 because tiny `Vec`'s are dumb; but not if that would cause overflow. + if elem_size > usize::MAX / 8 { 1 } else { 4 } + } else { + self.cap * 2 + }; + Layout::from_size_align_unchecked(cap * elem_size, mem::align_of::<T>()) + }, + Amortized { used_capacity, needed_extra_capacity } => { + // Nothing we can really do about these checks, sadly. + let required_cap = + used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?; + // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. + let double_cap = self.cap * 2; + // `double_cap` guarantees exponential growth. + let cap = cmp::max(double_cap, required_cap); + Layout::array::<T>(cap).map_err(|_| CapacityOverflow)? + } + Exact { used_capacity, needed_extra_capacity } => { + let cap = + used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?; + Layout::array::<T>(cap).map_err(|_| CapacityOverflow)? + } + }; + alloc_guard(new_layout.size())?; - unsafe { - // NOTE: we don't early branch on ZSTs here because we want this - // to actually catch "asking for more than usize::MAX" in that case. - // If we make it past the first branch then we are guaranteed to - // panic. - - // Don't actually need any more capacity. - // Wrapping in case they gave a bad `used_capacity`. - if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity { - return Ok(()); + let memory = if let Some((ptr, old_layout)) = self.current_memory() { + debug_assert_eq!(old_layout.align(), new_layout.align()); + unsafe { + self.alloc + .grow(ptr, old_layout, new_layout.size(), placement, init) + .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? + } + } else { + match placement { + MayMove => self.alloc.alloc(new_layout, init), + InPlace => Err(AllocErr), } + .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? + }; + self.set_memory(memory); + Ok(()) + } - // Nothing we can really do about these checks, sadly. - let new_cap = match strategy { - Exact => { - used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)? - } - Amortized => self.amortized_new_size(used_capacity, needed_extra_capacity)?, - }; - let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?; + fn shrink( + &mut self, + amount: usize, + placement: ReallocPlacement, + ) -> Result<(), TryReserveError> { + assert!(amount <= self.capacity(), "Tried to shrink to a larger capacity"); - alloc_guard(new_layout.size())?; + let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) }; + let new_size = amount * mem::size_of::<T>(); - let res = match self.current_layout() { - Some(layout) => { - debug_assert!(new_layout.align() == layout.align()); - self.a.realloc(NonNull::from(self.ptr).cast(), layout, new_layout.size()) - } - None => self.a.alloc(new_layout), - }; - - let (ptr, new_cap) = match (res, fallibility) { - (Err(AllocErr), Infallible) => handle_alloc_error(new_layout), - (Err(AllocErr), Fallible) => { - return Err(TryReserveError::AllocError { - layout: new_layout, - non_exhaustive: (), - }); + let memory = unsafe { + self.alloc.shrink(ptr, layout, new_size, placement).map_err(|_| { + TryReserveError::AllocError { + layout: Layout::from_size_align_unchecked(new_size, layout.align()), + non_exhaustive: (), } - (Ok((ptr, new_size)), _) => (ptr, new_size / elem_size), - }; - - self.ptr = ptr.cast().into(); - self.cap = new_cap; - - Ok(()) - } + })? + }; + self.set_memory(memory); + Ok(()) } } impl<T> RawVec<T, Global> { - /// Converts the entire buffer into `Box<[T]>`. + /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`. /// /// Note that this will correctly reconstitute any `cap` changes /// that may have been performed. (See description of type for details.) /// - /// # Undefined Behavior + /// # Safety /// - /// All elements of `RawVec<T, Global>` must be initialized. Notice that - /// the rules around uninitialized boxed values are not finalized yet, - /// but until they are, it is advisable to avoid them. - pub unsafe fn into_box(self) -> Box<[T]> { + /// `shrink_to_fit(len)` must be called immediately prior to calling this function. This + /// implies, that `len` must be smaller than or equal to `self.capacity()`. + pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>]> { + debug_assert!( + len <= self.capacity(), + "`len` must be smaller than or equal to `self.capacity()`" + ); + // NOTE: not calling `capacity()` here; actually using the real `cap` field! - let slice = slice::from_raw_parts_mut(self.ptr(), self.cap); - let output: Box<[T]> = Box::from_raw(slice); + let slice = slice::from_raw_parts_mut(self.ptr() as *mut MaybeUninit<T>, len); + let output = Box::from_raw(slice); mem::forget(self); output } } -impl<T, A: AllocRef> RawVec<T, A> { - /// Frees the memory owned by the `RawVec` *without* trying to drop its contents. - pub unsafe fn dealloc_buffer(&mut self) { - let elem_size = mem::size_of::<T>(); - if elem_size != 0 { - if let Some(layout) = self.current_layout() { - self.a.dealloc(NonNull::from(self.ptr).cast(), layout); - } - } - } -} - unsafe impl<#[may_dangle] T, A: AllocRef> Drop for RawVec<T, A> { /// Frees the memory owned by the `RawVec` *without* trying to drop its contents. fn drop(&mut self) { - unsafe { - self.dealloc_buffer(); + if let Some((ptr, layout)) = self.current_memory() { + unsafe { self.alloc.dealloc(ptr, layout) } } } } diff --git a/src/liballoc/raw_vec/tests.rs b/src/liballoc/raw_vec/tests.rs index 21a8a76d0a7..e7ab8a305d2 100644 --- a/src/liballoc/raw_vec/tests.rs +++ b/src/liballoc/raw_vec/tests.rs @@ -12,6 +12,7 @@ fn allocator_param() { // // Instead, this just checks that the `RawVec` methods do at // least go through the Allocator API when it reserves + // storage. // A dumb allocator that consumes a fixed amount of fuel @@ -20,12 +21,12 @@ fn allocator_param() { fuel: usize, } unsafe impl AllocRef for BoundedAlloc { - fn alloc(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> { + fn alloc(&mut self, layout: Layout, init: AllocInit) -> Result<MemoryBlock, AllocErr> { let size = layout.size(); if size > self.fuel { return Err(AllocErr); } - match Global.alloc(layout) { + match Global.alloc(layout, init) { ok @ Ok(_) => { self.fuel -= size; ok @@ -40,9 +41,9 @@ fn allocator_param() { let a = BoundedAlloc { fuel: 500 }; let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a); - assert_eq!(v.a.fuel, 450); + assert_eq!(v.alloc.fuel, 450); v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel) - assert_eq!(v.a.fuel, 250); + assert_eq!(v.alloc.fuel, 250); } #[test] diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index e7f7608e676..6a78a7398a6 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -252,7 +252,7 @@ use core::ptr::{self, NonNull}; use core::slice::{self, from_raw_parts_mut}; use core::usize; -use crate::alloc::{box_free, handle_alloc_error, AllocRef, Global, Layout}; +use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout}; use crate::string::String; use crate::vec::Vec; @@ -936,10 +936,12 @@ impl<T: ?Sized> Rc<T> { let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align(); // Allocate for the layout. - let (mem, _) = Global.alloc(layout).unwrap_or_else(|_| handle_alloc_error(layout)); + let mem = Global + .alloc(layout, AllocInit::Uninitialized) + .unwrap_or_else(|_| handle_alloc_error(layout)); // Initialize the RcBox - let inner = mem_to_rcbox(mem.as_ptr()); + let inner = mem_to_rcbox(mem.ptr.as_ptr()); debug_assert_eq!(Layout::for_value(&*inner), layout); ptr::write(&mut (*inner).strong, Cell::new(1)); diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs index e8985e20256..111a7651b5e 100644 --- a/src/liballoc/sync.rs +++ b/src/liballoc/sync.rs @@ -25,7 +25,7 @@ use core::sync::atomic; use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; use core::{isize, usize}; -use crate::alloc::{box_free, handle_alloc_error, AllocRef, Global, Layout}; +use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout}; use crate::boxed::Box; use crate::rc::is_dangling; use crate::string::String; @@ -814,10 +814,12 @@ impl<T: ?Sized> Arc<T> { // reference (see #54908). let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align(); - let (mem, _) = Global.alloc(layout).unwrap_or_else(|_| handle_alloc_error(layout)); + let mem = Global + .alloc(layout, AllocInit::Uninitialized) + .unwrap_or_else(|_| handle_alloc_error(layout)); // Initialize the ArcInner - let inner = mem_to_arcinner(mem.as_ptr()); + let inner = mem_to_arcinner(mem.ptr.as_ptr()); debug_assert_eq!(Layout::for_value(&*inner), layout); ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1)); diff --git a/src/liballoc/tests/heap.rs b/src/liballoc/tests/heap.rs index d159126f426..62f062b83d7 100644 --- a/src/liballoc/tests/heap.rs +++ b/src/liballoc/tests/heap.rs @@ -1,4 +1,4 @@ -use std::alloc::{AllocRef, Global, Layout, System}; +use std::alloc::{AllocInit, AllocRef, Global, Layout, System}; /// Issue #45955 and #62251. #[test] @@ -20,7 +20,13 @@ fn check_overalign_requests<T: AllocRef>(mut allocator: T) { unsafe { let pointers: Vec<_> = (0..iterations) .map(|_| { - allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap().0 + allocator + .alloc( + Layout::from_size_align(size, align).unwrap(), + AllocInit::Uninitialized, + ) + .unwrap() + .ptr }) .collect(); for &ptr in &pointers { diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index e171edef736..96a6399d051 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -679,8 +679,9 @@ impl<T> Vec<T> { unsafe { self.shrink_to_fit(); let buf = ptr::read(&self.buf); + let len = self.len(); mem::forget(self); - buf.into_box() + buf.into_box(len).assume_init() } } |
