diff options
Diffstat (limited to 'src/liballoc')
43 files changed, 4221 insertions, 6489 deletions
diff --git a/src/liballoc/Cargo.toml b/src/liballoc/Cargo.toml index 0a265ee1376..1dad323769a 100644 --- a/src/liballoc/Cargo.toml +++ b/src/liballoc/Cargo.toml @@ -2,6 +2,8 @@ authors = ["The Rust Project Developers"] name = "alloc" version = "0.0.0" +autotests = false +autobenches = false [lib] name = "alloc" @@ -9,10 +11,10 @@ path = "lib.rs" [dependencies] core = { path = "../libcore" } -std_unicode = { path = "../libstd_unicode" } +compiler_builtins = { path = "../rustc/compiler_builtins_shim" } [dev-dependencies] -rand = "0.3" +rand = "0.4" [[test]] name = "collectionstests" @@ -21,3 +23,8 @@ path = "../liballoc/tests/lib.rs" [[bench]] name = "collectionsbenches" path = "../liballoc/benches/lib.rs" + +[[bench]] +name = "vec_deque_append_bench" +path = "../liballoc/benches/vec_deque_append.rs" +harness = false diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs new file mode 100644 index 00000000000..c69b2fb5e1c --- /dev/null +++ b/src/liballoc/alloc.rs @@ -0,0 +1,263 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Memory allocation APIs + +#![stable(feature = "alloc_module", since = "1.28.0")] + +use core::intrinsics::{min_align_of_val, size_of_val}; +use core::ptr::{NonNull, Unique}; +use core::usize; + +#[stable(feature = "alloc_module", since = "1.28.0")] +#[doc(inline)] +pub use core::alloc::*; + +extern "Rust" { + #[allocator] + #[rustc_allocator_nounwind] + fn __rust_alloc(size: usize, align: usize) -> *mut u8; + #[rustc_allocator_nounwind] + fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize); + #[rustc_allocator_nounwind] + fn __rust_realloc(ptr: *mut u8, + old_size: usize, + align: usize, + new_size: usize) -> *mut u8; + #[rustc_allocator_nounwind] + fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8; +} + +/// The global memory allocator. +/// +/// This type implements the [`Alloc`] trait by forwarding calls +/// to the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +#[unstable(feature = "allocator_api", issue = "32838")] +#[derive(Copy, Clone, Default, Debug)] +pub struct Global; + +/// Allocate memory with the global allocator. +/// +/// This function forwards calls to the [`GlobalAlloc::alloc`] method +/// of the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +/// +/// This function is expected to be deprecated in favor of the `alloc` method +/// of the [`Global`] type when it and the [`Alloc`] trait become stable. +/// +/// # Safety +/// +/// See [`GlobalAlloc::alloc`]. +/// +/// # Examples +/// +/// ``` +/// use std::alloc::{alloc, dealloc, Layout}; +/// +/// unsafe { +/// let layout = Layout::new::<u16>(); +/// let ptr = alloc(layout); +/// +/// *(ptr as *mut u16) = 42; +/// assert_eq!(*(ptr as *mut u16), 42); +/// +/// dealloc(ptr, layout); +/// } +/// ``` +#[stable(feature = "global_alloc", since = "1.28.0")] +#[inline] +pub unsafe fn alloc(layout: Layout) -> *mut u8 { + __rust_alloc(layout.size(), layout.align()) +} + +/// Deallocate memory with the global allocator. +/// +/// This function forwards calls to the [`GlobalAlloc::dealloc`] method +/// of the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +/// +/// This function is expected to be deprecated in favor of the `dealloc` method +/// of the [`Global`] type when it and the [`Alloc`] trait become stable. +/// +/// # Safety +/// +/// See [`GlobalAlloc::dealloc`]. +#[stable(feature = "global_alloc", since = "1.28.0")] +#[inline] +pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) { + __rust_dealloc(ptr, layout.size(), layout.align()) +} + +/// Reallocate memory with the global allocator. +/// +/// This function forwards calls to the [`GlobalAlloc::realloc`] method +/// of the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +/// +/// This function is expected to be deprecated in favor of the `realloc` method +/// of the [`Global`] type when it and the [`Alloc`] trait become stable. +/// +/// # Safety +/// +/// See [`GlobalAlloc::realloc`]. +#[stable(feature = "global_alloc", since = "1.28.0")] +#[inline] +pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + __rust_realloc(ptr, layout.size(), layout.align(), new_size) +} + +/// Allocate zero-initialized memory with the global allocator. +/// +/// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method +/// of the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +/// +/// This function is expected to be deprecated in favor of the `alloc_zeroed` method +/// of the [`Global`] type when it and the [`Alloc`] trait become stable. +/// +/// # Safety +/// +/// See [`GlobalAlloc::alloc_zeroed`]. +/// +/// # Examples +/// +/// ``` +/// use std::alloc::{alloc_zeroed, dealloc, Layout}; +/// +/// unsafe { +/// let layout = Layout::new::<u16>(); +/// let ptr = alloc_zeroed(layout); +/// +/// assert_eq!(*(ptr as *mut u16), 0); +/// +/// dealloc(ptr, layout); +/// } +/// ``` +#[stable(feature = "global_alloc", since = "1.28.0")] +#[inline] +pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 { + __rust_alloc_zeroed(layout.size(), layout.align()) +} + +#[unstable(feature = "allocator_api", issue = "32838")] +unsafe impl Alloc for Global { + #[inline] + unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> { + NonNull::new(alloc(layout)).ok_or(AllocErr) + } + + #[inline] + unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) { + dealloc(ptr.as_ptr(), layout) + } + + #[inline] + unsafe fn realloc(&mut self, + ptr: NonNull<u8>, + layout: Layout, + new_size: usize) + -> Result<NonNull<u8>, AllocErr> + { + NonNull::new(realloc(ptr.as_ptr(), layout, new_size)).ok_or(AllocErr) + } + + #[inline] + unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> { + NonNull::new(alloc_zeroed(layout)).ok_or(AllocErr) + } +} + +/// The allocator for unique pointers. +// This function must not unwind. If it does, MIR codegen will fail. +#[cfg(not(test))] +#[lang = "exchange_malloc"] +#[inline] +unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 { + if size == 0 { + align as *mut u8 + } else { + let layout = Layout::from_size_align_unchecked(size, align); + let ptr = alloc(layout); + if !ptr.is_null() { + ptr + } else { + handle_alloc_error(layout) + } + } +} + +#[cfg_attr(not(test), lang = "box_free")] +#[inline] +pub(crate) unsafe fn box_free<T: ?Sized>(ptr: Unique<T>) { + let ptr = ptr.as_ptr(); + let size = size_of_val(&*ptr); + let align = min_align_of_val(&*ptr); + // We do not allocate for Box<T> when T is ZST, so deallocation is also not necessary. + if size != 0 { + let layout = Layout::from_size_align_unchecked(size, align); + dealloc(ptr as *mut u8, layout); + } +} + +/// Abort on memory allocation error or failure. +/// +/// Callers of memory allocation APIs wishing to abort computation +/// in response to an allocation error are encouraged to call this function, +/// rather than directly invoking `panic!` or similar. +/// +/// The default behavior of this function is to print a message to standard error +/// and abort the process. +/// It can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`]. +/// +/// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html +/// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html +#[stable(feature = "global_alloc", since = "1.28.0")] +#[rustc_allocator_nounwind] +pub fn handle_alloc_error(layout: Layout) -> ! { + #[allow(improper_ctypes)] + extern "Rust" { + #[lang = "oom"] + fn oom_impl(layout: Layout) -> !; + } + unsafe { oom_impl(layout) } +} + +#[cfg(test)] +mod tests { + extern crate test; + use self::test::Bencher; + use boxed::Box; + use alloc::{Global, Alloc, Layout, handle_alloc_error}; + + #[test] + fn allocate_zeroed() { + unsafe { + let layout = Layout::from_size_align(1024, 1).unwrap(); + let ptr = Global.alloc_zeroed(layout.clone()) + .unwrap_or_else(|_| handle_alloc_error(layout)); + + let mut i = ptr.cast::<u8>().as_ptr(); + let end = i.offset(layout.size() as isize); + while i < end { + assert_eq!(*i, 0); + i = i.offset(1); + } + Global.dealloc(ptr, layout); + } + } + + #[bench] + fn alloc_owned_small(b: &mut Bencher) { + b.iter(|| { + let _: Box<_> = box 10; + }) + } +} diff --git a/src/liballoc/allocator.rs b/src/liballoc/allocator.rs deleted file mode 100644 index c2a8f5f8ff9..00000000000 --- a/src/liballoc/allocator.rs +++ /dev/null @@ -1,1064 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![unstable(feature = "allocator_api", - reason = "the precise API and guarantees it provides may be tweaked \ - slightly, especially to possibly take into account the \ - types being stored to make room for a future \ - tracing garbage collector", - issue = "32838")] - -use core::cmp; -use core::fmt; -use core::mem; -use core::usize; -use core::ptr::{self, Unique}; - -/// Represents the combination of a starting address and -/// a total capacity of the returned block. -#[derive(Debug)] -pub struct Excess(pub *mut u8, pub usize); - -fn size_align<T>() -> (usize, usize) { - (mem::size_of::<T>(), mem::align_of::<T>()) -} - -/// Layout of a block of memory. -/// -/// An instance of `Layout` describes a particular layout of memory. -/// You build a `Layout` up as an input to give to an allocator. -/// -/// All layouts have an associated non-negative size and a -/// power-of-two alignment. -/// -/// (Note however that layouts are *not* required to have positive -/// size, even though many allocators require that all memory -/// requests have positive size. A caller to the `Alloc::alloc` -/// method must either ensure that conditions like this are met, or -/// use specific allocators with looser requirements.) -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct Layout { - // size of the requested block of memory, measured in bytes. - size: usize, - - // alignment of the requested block of memory, measured in bytes. - // we ensure that this is always a power-of-two, because API's - // like `posix_memalign` require it and it is a reasonable - // constraint to impose on Layout constructors. - // - // (However, we do not analogously require `align >= sizeof(void*)`, - // even though that is *also* a requirement of `posix_memalign`.) - align: usize, -} - - -// FIXME: audit default implementations for overflow errors, -// (potentially switching to overflowing_add and -// overflowing_mul as necessary). - -impl Layout { - /// Constructs a `Layout` from a given `size` and `align`, - /// or returns `None` if any of the following conditions - /// are not met: - /// - /// * `align` must be a power of two, - /// - /// * `align` must not exceed 2<sup>31</sup> (i.e. `1 << 31`), - /// - /// * `size`, when rounded up to the nearest multiple of `align`, - /// must not overflow (i.e. the rounded value must be less than - /// `usize::MAX`). - #[inline] - pub fn from_size_align(size: usize, align: usize) -> Option<Layout> { - if !align.is_power_of_two() { - return None; - } - - if align > (1 << 31) { - return None; - } - - // (power-of-two implies align != 0.) - - // Rounded up size is: - // size_rounded_up = (size + align - 1) & !(align - 1); - // - // We know from above that align != 0. If adding (align - 1) - // does not overflow, then rounding up will be fine. - // - // Conversely, &-masking with !(align - 1) will subtract off - // only low-order-bits. Thus if overflow occurs with the sum, - // the &-mask cannot subtract enough to undo that overflow. - // - // Above implies that checking for summation overflow is both - // necessary and sufficient. - if size > usize::MAX - (align - 1) { - return None; - } - - unsafe { - Some(Layout::from_size_align_unchecked(size, align)) - } - } - - /// Creates a layout, bypassing all checks. - /// - /// # Safety - /// - /// This function is unsafe as it does not verify that `align` is - /// a power-of-two that is also less than or equal to 2<sup>31</sup>, nor - /// that `size` aligned to `align` fits within the address space - /// (i.e. the `Layout::from_size_align` preconditions). - #[inline] - pub unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Layout { - Layout { size: size, align: align } - } - - /// The minimum size in bytes for a memory block of this layout. - #[inline] - pub fn size(&self) -> usize { self.size } - - /// The minimum byte alignment for a memory block of this layout. - #[inline] - pub fn align(&self) -> usize { self.align } - - /// Constructs a `Layout` suitable for holding a value of type `T`. - pub fn new<T>() -> Self { - let (size, align) = size_align::<T>(); - Layout::from_size_align(size, align).unwrap() - } - - /// Produces layout describing a record that could be used to - /// allocate backing structure for `T` (which could be a trait - /// or other unsized type like a slice). - pub fn for_value<T: ?Sized>(t: &T) -> Self { - let (size, align) = (mem::size_of_val(t), mem::align_of_val(t)); - Layout::from_size_align(size, align).unwrap() - } - - /// Creates a layout describing the record that can hold a value - /// of the same layout as `self`, but that also is aligned to - /// alignment `align` (measured in bytes). - /// - /// If `self` already meets the prescribed alignment, then returns - /// `self`. - /// - /// Note that this method does not add any padding to the overall - /// size, regardless of whether the returned layout has a different - /// alignment. In other words, if `K` has size 16, `K.align_to(32)` - /// will *still* have size 16. - /// - /// # Panics - /// - /// Panics if the combination of `self.size` and the given `align` - /// violates the conditions listed in `from_size_align`. - #[inline] - pub fn align_to(&self, align: usize) -> Self { - Layout::from_size_align(self.size, cmp::max(self.align, align)).unwrap() - } - - /// Returns the amount of padding we must insert after `self` - /// to ensure that the following address will satisfy `align` - /// (measured in bytes). - /// - /// E.g. if `self.size` is 9, then `self.padding_needed_for(4)` - /// returns 3, because that is the minimum number of bytes of - /// padding required to get a 4-aligned address (assuming that the - /// corresponding memory block starts at a 4-aligned address). - /// - /// The return value of this function has no meaning if `align` is - /// not a power-of-two. - /// - /// Note that the utility of the returned value requires `align` - /// to be less than or equal to the alignment of the starting - /// address for the whole allocated block of memory. One way to - /// satisfy this constraint is to ensure `align <= self.align`. - #[inline] - pub fn padding_needed_for(&self, align: usize) -> usize { - let len = self.size(); - - // Rounded up value is: - // len_rounded_up = (len + align - 1) & !(align - 1); - // and then we return the padding difference: `len_rounded_up - len`. - // - // We use modular arithmetic throughout: - // - // 1. align is guaranteed to be > 0, so align - 1 is always - // valid. - // - // 2. `len + align - 1` can overflow by at most `align - 1`, - // so the &-mask wth `!(align - 1)` will ensure that in the - // case of overflow, `len_rounded_up` will itself be 0. - // Thus the returned padding, when added to `len`, yields 0, - // which trivially satisfies the alignment `align`. - // - // (Of course, attempts to allocate blocks of memory whose - // size and padding overflow in the above manner should cause - // the allocator to yield an error anyway.) - - let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1); - return len_rounded_up.wrapping_sub(len); - } - - /// Creates a layout describing the record for `n` instances of - /// `self`, with a suitable amount of padding between each to - /// ensure that each instance is given its requested size and - /// alignment. On success, returns `(k, offs)` where `k` is the - /// layout of the array and `offs` is the distance between the start - /// of each element in the array. - /// - /// On arithmetic overflow, returns `None`. - #[inline] - pub fn repeat(&self, n: usize) -> Option<(Self, usize)> { - let padded_size = self.size.checked_add(self.padding_needed_for(self.align))?; - let alloc_size = padded_size.checked_mul(n)?; - - // We can assume that `self.align` is a power-of-two that does - // not exceed 2<sup>31</sup>. Furthermore, `alloc_size` has already been - // rounded up to a multiple of `self.align`; therefore, the - // call to `Layout::from_size_align` below should never panic. - Some((Layout::from_size_align(alloc_size, self.align).unwrap(), padded_size)) - } - - /// Creates a layout describing the record for `self` followed by - /// `next`, including any necessary padding to ensure that `next` - /// will be properly aligned. Note that the result layout will - /// satisfy the alignment properties of both `self` and `next`. - /// - /// Returns `Some((k, offset))`, where `k` is layout of the concatenated - /// record and `offset` is the relative location, in bytes, of the - /// start of the `next` embedded within the concatenated record - /// (assuming that the record itself starts at offset 0). - /// - /// On arithmetic overflow, returns `None`. - pub fn extend(&self, next: Self) -> Option<(Self, usize)> { - let new_align = cmp::max(self.align, next.align); - let realigned = Layout::from_size_align(self.size, new_align)?; - - let pad = realigned.padding_needed_for(next.align); - - let offset = self.size.checked_add(pad)?; - let new_size = offset.checked_add(next.size)?; - - let layout = Layout::from_size_align(new_size, new_align)?; - Some((layout, offset)) - } - - /// Creates a layout describing the record for `n` instances of - /// `self`, with no padding between each instance. - /// - /// Note that, unlike `repeat`, `repeat_packed` does not guarantee - /// that the repeated instances of `self` will be properly - /// aligned, even if a given instance of `self` is properly - /// aligned. In other words, if the layout returned by - /// `repeat_packed` is used to allocate an array, it is not - /// guaranteed that all elements in the array will be properly - /// aligned. - /// - /// On arithmetic overflow, returns `None`. - pub fn repeat_packed(&self, n: usize) -> Option<Self> { - let size = self.size().checked_mul(n)?; - Layout::from_size_align(size, self.align) - } - - /// Creates a layout describing the record for `self` followed by - /// `next` with no additional padding between the two. Since no - /// padding is inserted, the alignment of `next` is irrelevant, - /// and is not incorporated *at all* into the resulting layout. - /// - /// Returns `(k, offset)`, where `k` is layout of the concatenated - /// record and `offset` is the relative location, in bytes, of the - /// start of the `next` embedded within the concatenated record - /// (assuming that the record itself starts at offset 0). - /// - /// (The `offset` is always the same as `self.size()`; we use this - /// signature out of convenience in matching the signature of - /// `extend`.) - /// - /// On arithmetic overflow, returns `None`. - pub fn extend_packed(&self, next: Self) -> Option<(Self, usize)> { - let new_size = self.size().checked_add(next.size())?; - let layout = Layout::from_size_align(new_size, self.align)?; - Some((layout, self.size())) - } - - /// Creates a layout describing the record for a `[T; n]`. - /// - /// On arithmetic overflow, returns `None`. - pub fn array<T>(n: usize) -> Option<Self> { - Layout::new::<T>() - .repeat(n) - .map(|(k, offs)| { - debug_assert!(offs == mem::size_of::<T>()); - k - }) - } -} - -/// The `AllocErr` error specifies whether an allocation failure is -/// specifically due to resource exhaustion or if it is due to -/// something wrong when combining the given input arguments with this -/// allocator. -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum AllocErr { - /// Error due to hitting some resource limit or otherwise running - /// out of memory. This condition strongly implies that *some* - /// series of deallocations would allow a subsequent reissuing of - /// the original allocation request to succeed. - Exhausted { request: Layout }, - - /// Error due to allocator being fundamentally incapable of - /// satisfying the original request. This condition implies that - /// such an allocation request will never succeed on the given - /// allocator, regardless of environment, memory pressure, or - /// other contextual conditions. - /// - /// For example, an allocator that does not support requests for - /// large memory blocks might return this error variant. - Unsupported { details: &'static str }, -} - -impl AllocErr { - #[inline] - pub fn invalid_input(details: &'static str) -> Self { - AllocErr::Unsupported { details: details } - } - #[inline] - pub fn is_memory_exhausted(&self) -> bool { - if let AllocErr::Exhausted { .. } = *self { true } else { false } - } - #[inline] - pub fn is_request_unsupported(&self) -> bool { - if let AllocErr::Unsupported { .. } = *self { true } else { false } - } - #[inline] - pub fn description(&self) -> &str { - match *self { - AllocErr::Exhausted { .. } => "allocator memory exhausted", - AllocErr::Unsupported { .. } => "unsupported allocator request", - } - } -} - -// (we need this for downstream impl of trait Error) -impl fmt::Display for AllocErr { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.description()) - } -} - -/// The `CannotReallocInPlace` error is used when `grow_in_place` or -/// `shrink_in_place` were unable to reuse the given memory block for -/// a requested layout. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct CannotReallocInPlace; - -impl CannotReallocInPlace { - pub fn description(&self) -> &str { - "cannot reallocate allocator's memory in place" - } -} - -// (we need this for downstream impl of trait Error) -impl fmt::Display for CannotReallocInPlace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.description()) - } -} - -/// An implementation of `Alloc` can allocate, reallocate, and -/// deallocate arbitrary blocks of data described via `Layout`. -/// -/// Some of the methods require that a memory block be *currently -/// allocated* via an allocator. This means that: -/// -/// * the starting address for that memory block was previously -/// returned by a previous call to an allocation method (`alloc`, -/// `alloc_zeroed`, `alloc_excess`, `alloc_one`, `alloc_array`) or -/// reallocation method (`realloc`, `realloc_excess`, or -/// `realloc_array`), and -/// -/// * the memory block has not been subsequently deallocated, where -/// blocks are deallocated either by being passed to a deallocation -/// method (`dealloc`, `dealloc_one`, `dealloc_array`) or by being -/// passed to a reallocation method (see above) that returns `Ok`. -/// -/// A note regarding zero-sized types and zero-sized layouts: many -/// methods in the `Alloc` trait state that allocation requests -/// must be non-zero size, or else undefined behavior can result. -/// -/// * However, some higher-level allocation methods (`alloc_one`, -/// `alloc_array`) are well-defined on zero-sized types and can -/// optionally support them: it is left up to the implementor -/// whether to return `Err`, or to return `Ok` with some pointer. -/// -/// * If an `Alloc` implementation chooses to return `Ok` in this -/// case (i.e. the pointer denotes a zero-sized inaccessible block) -/// then that returned pointer must be considered "currently -/// allocated". On such an allocator, *all* methods that take -/// currently-allocated pointers as inputs must accept these -/// zero-sized pointers, *without* causing undefined behavior. -/// -/// * In other words, if a zero-sized pointer can flow out of an -/// allocator, then that allocator must likewise accept that pointer -/// flowing back into its deallocation and reallocation methods. -/// -/// Some of the methods require that a layout *fit* a memory block. -/// What it means for a layout to "fit" a memory block means (or -/// equivalently, for a memory block to "fit" a layout) is that the -/// following two conditions must hold: -/// -/// 1. The block's starting address must be aligned to `layout.align()`. -/// -/// 2. The block's size must fall in the range `[use_min, use_max]`, where: -/// -/// * `use_min` is `self.usable_size(layout).0`, and -/// -/// * `use_max` is the capacity that was (or would have been) -/// returned when (if) the block was allocated via a call to -/// `alloc_excess` or `realloc_excess`. -/// -/// Note that: -/// -/// * the size of the layout most recently used to allocate the block -/// is guaranteed to be in the range `[use_min, use_max]`, and -/// -/// * a lower-bound on `use_max` can be safely approximated by a call to -/// `usable_size`. -/// -/// * if a layout `k` fits a memory block (denoted by `ptr`) -/// currently allocated via an allocator `a`, then it is legal to -/// use that layout to deallocate it, i.e. `a.dealloc(ptr, k);`. -/// -/// # Unsafety -/// -/// The `Alloc` trait is an `unsafe` trait for a number of reasons, and -/// implementors must ensure that they adhere to these contracts: -/// -/// * Pointers returned from allocation functions must point to valid memory and -/// retain their validity until at least the instance of `Alloc` is dropped -/// itself. -/// -/// * It's undefined behavior if global allocators unwind. This restriction may -/// be lifted in the future, but currently a panic from any of these -/// functions may lead to memory unsafety. Note that as of the time of this -/// writing allocators *not* intending to be global allocators can still panic -/// in their implementation without violating memory safety. -/// -/// * `Layout` queries and calculations in general must be correct. Callers of -/// this trait are allowed to rely on the contracts defined on each method, -/// and implementors must ensure such contracts remain true. -/// -/// Note that this list may get tweaked over time as clarifications are made in -/// the future. Additionally global allocators may gain unique requirements for -/// how to safely implement one in the future as well. -pub unsafe trait Alloc { - - // (Note: existing allocators have unspecified but well-defined - // behavior in response to a zero size allocation request ; - // e.g. in C, `malloc` of 0 will either return a null pointer or a - // unique pointer, but will not have arbitrary undefined - // behavior. Rust should consider revising the alloc::heap crate - // to reflect this reality.) - - /// Returns a pointer meeting the size and alignment guarantees of - /// `layout`. - /// - /// If this method returns an `Ok(addr)`, then the `addr` returned - /// will be non-null address pointing to a block of storage - /// suitable for holding an instance of `layout`. - /// - /// The returned block of storage may or may not have its contents - /// initialized. (Extension subtraits might restrict this - /// behavior, e.g. to ensure initialization to particular sets of - /// bit patterns.) - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure that `layout` has non-zero size. - /// - /// (Extension subtraits might provide more specific bounds on - /// behavior, e.g. guarantee a sentinel address or a null pointer - /// in response to a zero-size allocation request.) - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `layout` does not meet allocator's size or alignment - /// constraints. - /// - /// Implementations are encouraged to return `Err` on memory - /// exhaustion rather than panicking or aborting, but this is not - /// a strict requirement. (Specifically: it is *legal* to - /// implement this trait atop an underlying native allocation - /// library that aborts on memory exhaustion.) - /// - /// Clients wishing to abort computation in response to an - /// allocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr>; - - /// Deallocate the memory referenced by `ptr`. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure all of the following: - /// - /// * `ptr` must denote a block of memory currently allocated via - /// this allocator, - /// - /// * `layout` must *fit* that block of memory, - /// - /// * In addition to fitting the block of memory `layout`, the - /// alignment of the `layout` must match the alignment used - /// to allocate that block of memory. - unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout); - - /// Allocator-specific method for signaling an out-of-memory - /// condition. - /// - /// `oom` aborts the thread or process, optionally performing - /// cleanup or logging diagnostic information before panicking or - /// aborting. - /// - /// `oom` is meant to be used by clients unable to cope with an - /// unsatisfied allocation request (signaled by an error such as - /// `AllocErr::Exhausted`), and wish to abandon computation rather - /// than attempt to recover locally. Such clients should pass the - /// signaling error value back into `oom`, where the allocator - /// may incorporate that error value into its diagnostic report - /// before aborting. - /// - /// Implementations of the `oom` method are discouraged from - /// infinitely regressing in nested calls to `oom`. In - /// practice this means implementors should eschew allocating, - /// especially from `self` (directly or indirectly). - /// - /// Implementations of the allocation and reallocation methods - /// (e.g. `alloc`, `alloc_one`, `realloc`) are discouraged from - /// panicking (or aborting) in the event of memory exhaustion; - /// instead they should return an appropriate error from the - /// invoked method, and let the client decide whether to invoke - /// this `oom` method in response. - fn oom(&mut self, _: AllocErr) -> ! { - unsafe { ::core::intrinsics::abort() } - } - - // == ALLOCATOR-SPECIFIC QUANTITIES AND LIMITS == - // usable_size - - /// Returns bounds on the guaranteed usable size of a successful - /// allocation created with the specified `layout`. - /// - /// In particular, if one has a memory block allocated via a given - /// allocator `a` and layout `k` where `a.usable_size(k)` returns - /// `(l, u)`, then one can pass that block to `a.dealloc()` with a - /// layout in the size range [l, u]. - /// - /// (All implementors of `usable_size` must ensure that - /// `l <= k.size() <= u`) - /// - /// Both the lower- and upper-bounds (`l` and `u` respectively) - /// are provided, because an allocator based on size classes could - /// misbehave if one attempts to deallocate a block without - /// providing a correct value for its size (i.e., one within the - /// range `[l, u]`). - /// - /// Clients who wish to make use of excess capacity are encouraged - /// to use the `alloc_excess` and `realloc_excess` instead, as - /// this method is constrained to report conservative values that - /// serve as valid bounds for *all possible* allocation method - /// calls. - /// - /// However, for clients that do not wish to track the capacity - /// returned by `alloc_excess` locally, this method is likely to - /// produce useful results. - #[inline] - fn usable_size(&self, layout: &Layout) -> (usize, usize) { - (layout.size(), layout.size()) - } - - // == METHODS FOR MEMORY REUSE == - // realloc. alloc_excess, realloc_excess - - /// Returns a pointer suitable for holding data described by - /// `new_layout`, meeting its size and alignment guarantees. To - /// accomplish this, this may extend or shrink the allocation - /// referenced by `ptr` to fit `new_layout`. - /// - /// If this returns `Ok`, then ownership of the memory block - /// referenced by `ptr` has been transferred to this - /// allocator. The memory may or may not have been freed, and - /// should be considered unusable (unless of course it was - /// transferred back to the caller again via the return value of - /// this method). - /// - /// If this method returns `Err`, then ownership of the memory - /// block has not been transferred to this allocator, and the - /// contents of the memory block are unaltered. - /// - /// For best results, `new_layout` should not impose a different - /// alignment constraint than `layout`. (In other words, - /// `new_layout.align()` should equal `layout.align()`.) However, - /// behavior is well-defined (though underspecified) when this - /// constraint is violated; further discussion below. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure all of the following: - /// - /// * `ptr` must be currently allocated via this allocator, - /// - /// * `layout` must *fit* the `ptr` (see above). (The `new_layout` - /// argument need not fit it.) - /// - /// * `new_layout` must have size greater than zero. - /// - /// * the alignment of `new_layout` is non-zero. - /// - /// (Extension subtraits might provide more specific bounds on - /// behavior, e.g. guarantee a sentinel address or a null pointer - /// in response to a zero-size allocation request.) - /// - /// # Errors - /// - /// Returns `Err` only if `new_layout` does not match the - /// alignment of `layout`, or does not meet the allocator's size - /// and alignment constraints of the allocator, or if reallocation - /// otherwise fails. - /// - /// (Note the previous sentence did not say "if and only if" -- in - /// particular, an implementation of this method *can* return `Ok` - /// if `new_layout.align() != old_layout.align()`; or it can - /// return `Err` in that scenario, depending on whether this - /// allocator can dynamically adjust the alignment constraint for - /// the block.) - /// - /// Implementations are encouraged to return `Err` on memory - /// exhaustion rather than panicking or aborting, but this is not - /// a strict requirement. (Specifically: it is *legal* to - /// implement this trait atop an underlying native allocation - /// library that aborts on memory exhaustion.) - /// - /// Clients wishing to abort computation in response to an - /// reallocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - unsafe fn realloc(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<*mut u8, AllocErr> { - let new_size = new_layout.size(); - let old_size = layout.size(); - let aligns_match = layout.align == new_layout.align; - - if new_size >= old_size && aligns_match { - if let Ok(()) = self.grow_in_place(ptr, layout.clone(), new_layout.clone()) { - return Ok(ptr); - } - } else if new_size < old_size && aligns_match { - if let Ok(()) = self.shrink_in_place(ptr, layout.clone(), new_layout.clone()) { - return Ok(ptr); - } - } - - // otherwise, fall back on alloc + copy + dealloc. - let result = self.alloc(new_layout); - if let Ok(new_ptr) = result { - ptr::copy_nonoverlapping(ptr as *const u8, new_ptr, cmp::min(old_size, new_size)); - self.dealloc(ptr, layout); - } - result - } - - /// Behaves like `alloc`, but also ensures that the contents - /// are set to zero before being returned. - /// - /// # Safety - /// - /// This function is unsafe for the same reasons that `alloc` is. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `layout` does not meet allocator's size or alignment - /// constraints, just as in `alloc`. - /// - /// Clients wishing to abort computation in response to an - /// allocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { - let size = layout.size(); - let p = self.alloc(layout); - if let Ok(p) = p { - ptr::write_bytes(p, 0, size); - } - p - } - - /// Behaves like `alloc`, but also returns the whole size of - /// the returned block. For some `layout` inputs, like arrays, this - /// may include extra storage usable for additional data. - /// - /// # Safety - /// - /// This function is unsafe for the same reasons that `alloc` is. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `layout` does not meet allocator's size or alignment - /// constraints, just as in `alloc`. - /// - /// Clients wishing to abort computation in response to an - /// allocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - unsafe fn alloc_excess(&mut self, layout: Layout) -> Result<Excess, AllocErr> { - let usable_size = self.usable_size(&layout); - self.alloc(layout).map(|p| Excess(p, usable_size.1)) - } - - /// Behaves like `realloc`, but also returns the whole size of - /// the returned block. For some `layout` inputs, like arrays, this - /// may include extra storage usable for additional data. - /// - /// # Safety - /// - /// This function is unsafe for the same reasons that `realloc` is. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `layout` does not meet allocator's size or alignment - /// constraints, just as in `realloc`. - /// - /// Clients wishing to abort computation in response to an - /// reallocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - unsafe fn realloc_excess(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<Excess, AllocErr> { - let usable_size = self.usable_size(&new_layout); - self.realloc(ptr, layout, new_layout) - .map(|p| Excess(p, usable_size.1)) - } - - /// Attempts to extend the allocation referenced by `ptr` to fit `new_layout`. - /// - /// If this returns `Ok`, then the allocator has asserted that the - /// memory block referenced by `ptr` now fits `new_layout`, and thus can - /// be used to carry data of that layout. (The allocator is allowed to - /// expend effort to accomplish this, such as extending the memory block to - /// include successor blocks, or virtual memory tricks.) - /// - /// Regardless of what this method returns, ownership of the - /// memory block referenced by `ptr` has not been transferred, and - /// the contents of the memory block are unaltered. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure all of the following: - /// - /// * `ptr` must be currently allocated via this allocator, - /// - /// * `layout` must *fit* the `ptr` (see above); note the - /// `new_layout` argument need not fit it, - /// - /// * `new_layout.size()` must not be less than `layout.size()`, - /// - /// * `new_layout.align()` must equal `layout.align()`. - /// - /// # Errors - /// - /// Returns `Err(CannotReallocInPlace)` when the allocator is - /// unable to assert that the memory block referenced by `ptr` - /// could fit `layout`. - /// - /// Note that one cannot pass `CannotReallocInPlace` to the `oom` - /// method; clients are expected either to be able to recover from - /// `grow_in_place` failures without aborting, or to fall back on - /// another reallocation method before resorting to an abort. - unsafe fn grow_in_place(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<(), CannotReallocInPlace> { - let _ = ptr; // this default implementation doesn't care about the actual address. - debug_assert!(new_layout.size >= layout.size); - debug_assert!(new_layout.align == layout.align); - let (_l, u) = self.usable_size(&layout); - // _l <= layout.size() [guaranteed by usable_size()] - // layout.size() <= new_layout.size() [required by this method] - if new_layout.size <= u { - return Ok(()); - } else { - return Err(CannotReallocInPlace); - } - } - - /// Attempts to shrink the allocation referenced by `ptr` to fit `new_layout`. - /// - /// If this returns `Ok`, then the allocator has asserted that the - /// memory block referenced by `ptr` now fits `new_layout`, and - /// thus can only be used to carry data of that smaller - /// layout. (The allocator is allowed to take advantage of this, - /// carving off portions of the block for reuse elsewhere.) The - /// truncated contents of the block within the smaller layout are - /// unaltered, and ownership of block has not been transferred. - /// - /// If this returns `Err`, then the memory block is considered to - /// still represent the original (larger) `layout`. None of the - /// block has been carved off for reuse elsewhere, ownership of - /// the memory block has not been transferred, and the contents of - /// the memory block are unaltered. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure all of the following: - /// - /// * `ptr` must be currently allocated via this allocator, - /// - /// * `layout` must *fit* the `ptr` (see above); note the - /// `new_layout` argument need not fit it, - /// - /// * `new_layout.size()` must not be greater than `layout.size()` - /// (and must be greater than zero), - /// - /// * `new_layout.align()` must equal `layout.align()`. - /// - /// # Errors - /// - /// Returns `Err(CannotReallocInPlace)` when the allocator is - /// unable to assert that the memory block referenced by `ptr` - /// could fit `layout`. - /// - /// Note that one cannot pass `CannotReallocInPlace` to the `oom` - /// method; clients are expected either to be able to recover from - /// `shrink_in_place` failures without aborting, or to fall back - /// on another reallocation method before resorting to an abort. - unsafe fn shrink_in_place(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<(), CannotReallocInPlace> { - let _ = ptr; // this default implementation doesn't care about the actual address. - debug_assert!(new_layout.size <= layout.size); - debug_assert!(new_layout.align == layout.align); - let (l, _u) = self.usable_size(&layout); - // layout.size() <= _u [guaranteed by usable_size()] - // new_layout.size() <= layout.size() [required by this method] - if l <= new_layout.size { - return Ok(()); - } else { - return Err(CannotReallocInPlace); - } - } - - - // == COMMON USAGE PATTERNS == - // alloc_one, dealloc_one, alloc_array, realloc_array. dealloc_array - - /// Allocates a block suitable for holding an instance of `T`. - /// - /// Captures a common usage pattern for allocators. - /// - /// The returned block is suitable for passing to the - /// `alloc`/`realloc` methods of this allocator. - /// - /// Note to implementors: If this returns `Ok(ptr)`, then `ptr` - /// must be considered "currently allocated" and must be - /// acceptable input to methods such as `realloc` or `dealloc`, - /// *even if* `T` is a zero-sized type. In other words, if your - /// `Alloc` implementation overrides this method in a manner - /// that can return a zero-sized `ptr`, then all reallocation and - /// deallocation methods need to be similarly overridden to accept - /// such values as input. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `T` does not meet allocator's size or alignment constraints. - /// - /// For zero-sized `T`, may return either of `Ok` or `Err`, but - /// will *not* yield undefined behavior. - /// - /// Clients wishing to abort computation in response to an - /// allocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - fn alloc_one<T>(&mut self) -> Result<Unique<T>, AllocErr> - where Self: Sized - { - let k = Layout::new::<T>(); - if k.size() > 0 { - unsafe { self.alloc(k).map(|p| Unique::new_unchecked(p as *mut T)) } - } else { - Err(AllocErr::invalid_input("zero-sized type invalid for alloc_one")) - } - } - - /// Deallocates a block suitable for holding an instance of `T`. - /// - /// The given block must have been produced by this allocator, - /// and must be suitable for storing a `T` (in terms of alignment - /// as well as minimum and maximum size); otherwise yields - /// undefined behavior. - /// - /// Captures a common usage pattern for allocators. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure both: - /// - /// * `ptr` must denote a block of memory currently allocated via this allocator - /// - /// * the layout of `T` must *fit* that block of memory. - unsafe fn dealloc_one<T>(&mut self, ptr: Unique<T>) - where Self: Sized - { - let raw_ptr = ptr.as_ptr() as *mut u8; - let k = Layout::new::<T>(); - if k.size() > 0 { - self.dealloc(raw_ptr, k); - } - } - - /// Allocates a block suitable for holding `n` instances of `T`. - /// - /// Captures a common usage pattern for allocators. - /// - /// The returned block is suitable for passing to the - /// `alloc`/`realloc` methods of this allocator. - /// - /// Note to implementors: If this returns `Ok(ptr)`, then `ptr` - /// must be considered "currently allocated" and must be - /// acceptable input to methods such as `realloc` or `dealloc`, - /// *even if* `T` is a zero-sized type. In other words, if your - /// `Alloc` implementation overrides this method in a manner - /// that can return a zero-sized `ptr`, then all reallocation and - /// deallocation methods need to be similarly overridden to accept - /// such values as input. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `[T; n]` does not meet allocator's size or alignment - /// constraints. - /// - /// For zero-sized `T` or `n == 0`, may return either of `Ok` or - /// `Err`, but will *not* yield undefined behavior. - /// - /// Always returns `Err` on arithmetic overflow. - /// - /// Clients wishing to abort computation in response to an - /// allocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - fn alloc_array<T>(&mut self, n: usize) -> Result<Unique<T>, AllocErr> - where Self: Sized - { - match Layout::array::<T>(n) { - Some(ref layout) if layout.size() > 0 => { - unsafe { - self.alloc(layout.clone()) - .map(|p| { - Unique::new_unchecked(p as *mut T) - }) - } - } - _ => Err(AllocErr::invalid_input("invalid layout for alloc_array")), - } - } - - /// Reallocates a block previously suitable for holding `n_old` - /// instances of `T`, returning a block suitable for holding - /// `n_new` instances of `T`. - /// - /// Captures a common usage pattern for allocators. - /// - /// The returned block is suitable for passing to the - /// `alloc`/`realloc` methods of this allocator. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure all of the following: - /// - /// * `ptr` must be currently allocated via this allocator, - /// - /// * the layout of `[T; n_old]` must *fit* that block of memory. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `[T; n_new]` does not meet allocator's size or alignment - /// constraints. - /// - /// For zero-sized `T` or `n_new == 0`, may return either of `Ok` or - /// `Err`, but will *not* yield undefined behavior. - /// - /// Always returns `Err` on arithmetic overflow. - /// - /// Clients wishing to abort computation in response to an - /// reallocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - unsafe fn realloc_array<T>(&mut self, - ptr: Unique<T>, - n_old: usize, - n_new: usize) -> Result<Unique<T>, AllocErr> - where Self: Sized - { - match (Layout::array::<T>(n_old), Layout::array::<T>(n_new), ptr.as_ptr()) { - (Some(ref k_old), Some(ref k_new), ptr) if k_old.size() > 0 && k_new.size() > 0 => { - self.realloc(ptr as *mut u8, k_old.clone(), k_new.clone()) - .map(|p|Unique::new_unchecked(p as *mut T)) - } - _ => { - Err(AllocErr::invalid_input("invalid layout for realloc_array")) - } - } - } - - /// Deallocates a block suitable for holding `n` instances of `T`. - /// - /// Captures a common usage pattern for allocators. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure both: - /// - /// * `ptr` must denote a block of memory currently allocated via this allocator - /// - /// * the layout of `[T; n]` must *fit* that block of memory. - /// - /// # Errors - /// - /// Returning `Err` indicates that either `[T; n]` or the given - /// memory block does not meet allocator's size or alignment - /// constraints. - /// - /// Always returns `Err` on arithmetic overflow. - unsafe fn dealloc_array<T>(&mut self, ptr: Unique<T>, n: usize) -> Result<(), AllocErr> - where Self: Sized - { - let raw_ptr = ptr.as_ptr() as *mut u8; - match Layout::array::<T>(n) { - Some(ref k) if k.size() > 0 => { - Ok(self.dealloc(raw_ptr, k.clone())) - } - _ => { - Err(AllocErr::invalid_input("invalid layout for dealloc_array")) - } - } - } -} diff --git a/src/liballoc/benches/lib.rs b/src/liballoc/benches/lib.rs index 174628ccd07..b4f4fd74f3a 100644 --- a/src/liballoc/benches/lib.rs +++ b/src/liballoc/benches/lib.rs @@ -8,12 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(warnings)] - -#![feature(i128_type)] -#![feature(rand)] #![feature(repr_simd)] -#![feature(slice_rotate)] +#![feature(slice_sort_by_cached_key)] #![feature(test)] extern crate rand; diff --git a/src/liballoc/benches/slice.rs b/src/liballoc/benches/slice.rs index ee5182a1d46..a699ff9c0a7 100644 --- a/src/liballoc/benches/slice.rs +++ b/src/liballoc/benches/slice.rs @@ -284,6 +284,17 @@ macro_rules! sort_expensive { } } +macro_rules! sort_lexicographic { + ($f:ident, $name:ident, $gen:expr, $len:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let v = $gen($len); + b.iter(|| v.clone().$f(|x| x.to_string())); + b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64; + } + } +} + sort!(sort, sort_small_ascending, gen_ascending, 10); sort!(sort, sort_small_descending, gen_descending, 10); sort!(sort, sort_small_random, gen_random, 10); @@ -312,6 +323,10 @@ sort!(sort_unstable, sort_unstable_large_big, gen_big_random, 10000); sort_strings!(sort_unstable, sort_unstable_large_strings, gen_strings, 10000); sort_expensive!(sort_unstable_by, sort_unstable_large_expensive, gen_random, 10000); +sort_lexicographic!(sort_by_key, sort_by_key_lexicographic, gen_random, 10000); +sort_lexicographic!(sort_unstable_by_key, sort_unstable_by_key_lexicographic, gen_random, 10000); +sort_lexicographic!(sort_by_cached_key, sort_by_cached_key_lexicographic, gen_random, 10000); + macro_rules! reverse { ($name:ident, $ty:ty, $f:expr) => { #[bench] diff --git a/src/liballoc/benches/vec_deque_append.rs b/src/liballoc/benches/vec_deque_append.rs new file mode 100644 index 00000000000..bd335651137 --- /dev/null +++ b/src/liballoc/benches/vec_deque_append.rs @@ -0,0 +1,48 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(duration_as_u128)] +use std::{collections::VecDeque, time::Instant}; + +const VECDEQUE_LEN: i32 = 100000; +const WARMUP_N: usize = 100; +const BENCH_N: usize = 1000; + +fn main() { + let a: VecDeque<i32> = (0..VECDEQUE_LEN).collect(); + let b: VecDeque<i32> = (0..VECDEQUE_LEN).collect(); + + for _ in 0..WARMUP_N { + let mut c = a.clone(); + let mut d = b.clone(); + c.append(&mut d); + } + + let mut durations = Vec::with_capacity(BENCH_N); + + for _ in 0..BENCH_N { + let mut c = a.clone(); + let mut d = b.clone(); + let before = Instant::now(); + c.append(&mut d); + let after = Instant::now(); + durations.push(after.duration_since(before)); + } + + let l = durations.len(); + durations.sort(); + + assert!(BENCH_N % 2 == 0); + let median = (durations[(l / 2) - 1] + durations[l / 2]) / 2; + println!( + "\ncustom-bench vec_deque_append {:?} ns/iter\n", + median.as_nanos() + ); +} diff --git a/src/liballoc/borrow.rs b/src/liballoc/borrow.rs index acae0daa86b..c6741ddb822 100644 --- a/src/liballoc/borrow.rs +++ b/src/liballoc/borrow.rs @@ -59,6 +59,7 @@ pub trait ToOwned { /// let vv: Vec<i32> = v.to_owned(); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[must_use = "cloning is often expensive and is not expected to have side effects"] fn to_owned(&self) -> Self::Owned; /// Uses borrowed data to replace owned data, usually by cloning. diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index 6f125cdba81..08db5136d04 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -55,53 +55,22 @@ #![stable(feature = "rust1", since = "1.0.0")] -use heap::{Heap, Layout, Alloc}; -use raw_vec::RawVec; - use core::any::Any; use core::borrow; use core::cmp::Ordering; +use core::convert::From; use core::fmt; -use core::hash::{self, Hash, Hasher}; +use core::future::{Future, FutureObj, LocalFutureObj, UnsafeFutureObj}; +use core::hash::{Hash, Hasher}; use core::iter::FusedIterator; -use core::marker::{self, Unsize}; -use core::mem; +use core::marker::{Unpin, Unsize}; +use core::mem::{self, PinMut}; use core::ops::{CoerceUnsized, Deref, DerefMut, Generator, GeneratorState}; -use core::ops::{BoxPlace, Boxed, InPlace, Place, Placer}; -use core::ptr::{self, Unique}; -use core::convert::From; -use str::from_boxed_utf8_unchecked; +use core::ptr::{self, NonNull, Unique}; +use core::task::{Context, Poll, Spawn, SpawnErrorKind, SpawnObjError}; -/// A value that represents the heap. This is the default place that the `box` -/// keyword allocates into when no place is supplied. -/// -/// The following two examples are equivalent: -/// -/// ``` -/// #![feature(box_heap)] -/// -/// #![feature(box_syntax, placement_in_syntax)] -/// use std::boxed::HEAP; -/// -/// fn main() { -/// let foo: Box<i32> = in HEAP { 5 }; -/// let foo = box 5; -/// } -/// ``` -#[unstable(feature = "box_heap", - reason = "may be renamed; uncertain about custom allocator design", - issue = "27779")] -pub const HEAP: ExchangeHeapSingleton = ExchangeHeapSingleton { _force_singleton: () }; - -/// This the singleton type used solely for `boxed::HEAP`. -#[unstable(feature = "box_heap", - reason = "may be renamed; uncertain about custom allocator design", - issue = "27779")] -#[allow(missing_debug_implementations)] -#[derive(Copy, Clone)] -pub struct ExchangeHeapSingleton { - _force_singleton: (), -} +use raw_vec::RawVec; +use str::from_boxed_utf8_unchecked; /// A pointer type for heap allocation. /// @@ -111,121 +80,6 @@ pub struct ExchangeHeapSingleton { #[stable(feature = "rust1", since = "1.0.0")] pub struct Box<T: ?Sized>(Unique<T>); -/// `IntermediateBox` represents uninitialized backing storage for `Box`. -/// -/// FIXME (pnkfelix): Ideally we would just reuse `Box<T>` instead of -/// introducing a separate `IntermediateBox<T>`; but then you hit -/// issues when you e.g. attempt to destructure an instance of `Box`, -/// since it is a lang item and so it gets special handling by the -/// compiler. Easier just to make this parallel type for now. -/// -/// FIXME (pnkfelix): Currently the `box` protocol only supports -/// creating instances of sized types. This IntermediateBox is -/// designed to be forward-compatible with a future protocol that -/// supports creating instances of unsized types; that is why the type -/// parameter has the `?Sized` generalization marker, and is also why -/// this carries an explicit size. However, it probably does not need -/// to carry the explicit alignment; that is just a work-around for -/// the fact that the `align_of` intrinsic currently requires the -/// input type to be Sized (which I do not think is strictly -/// necessary). -#[unstable(feature = "placement_in", - reason = "placement box design is still being worked out.", - issue = "27779")] -#[allow(missing_debug_implementations)] -pub struct IntermediateBox<T: ?Sized> { - ptr: *mut u8, - layout: Layout, - marker: marker::PhantomData<*mut T>, -} - -#[unstable(feature = "placement_in", - reason = "placement box design is still being worked out.", - issue = "27779")] -impl<T> Place<T> for IntermediateBox<T> { - fn pointer(&mut self) -> *mut T { - self.ptr as *mut T - } -} - -unsafe fn finalize<T>(b: IntermediateBox<T>) -> Box<T> { - let p = b.ptr as *mut T; - mem::forget(b); - Box::from_raw(p) -} - -fn make_place<T>() -> IntermediateBox<T> { - let layout = Layout::new::<T>(); - - let p = if layout.size() == 0 { - mem::align_of::<T>() as *mut u8 - } else { - unsafe { - Heap.alloc(layout.clone()).unwrap_or_else(|err| { - Heap.oom(err) - }) - } - }; - - IntermediateBox { - ptr: p, - layout, - marker: marker::PhantomData, - } -} - -#[unstable(feature = "placement_in", - reason = "placement box design is still being worked out.", - issue = "27779")] -impl<T> BoxPlace<T> for IntermediateBox<T> { - fn make_place() -> IntermediateBox<T> { - make_place() - } -} - -#[unstable(feature = "placement_in", - reason = "placement box design is still being worked out.", - issue = "27779")] -impl<T> InPlace<T> for IntermediateBox<T> { - type Owner = Box<T>; - unsafe fn finalize(self) -> Box<T> { - finalize(self) - } -} - -#[unstable(feature = "placement_new_protocol", issue = "27779")] -impl<T> Boxed for Box<T> { - type Data = T; - type Place = IntermediateBox<T>; - unsafe fn finalize(b: IntermediateBox<T>) -> Box<T> { - finalize(b) - } -} - -#[unstable(feature = "placement_in", - reason = "placement box design is still being worked out.", - issue = "27779")] -impl<T> Placer<T> for ExchangeHeapSingleton { - type Place = IntermediateBox<T>; - - fn make_place(self) -> IntermediateBox<T> { - make_place() - } -} - -#[unstable(feature = "placement_in", - reason = "placement box design is still being worked out.", - issue = "27779")] -impl<T: ?Sized> Drop for IntermediateBox<T> { - fn drop(&mut self) { - if self.layout.size() > 0 { - unsafe { - Heap.dealloc(self.ptr, self.layout.clone()) - } - } - } -} - impl<T> Box<T> { /// Allocates memory on the heap and then places `x` into it. /// @@ -269,38 +123,7 @@ impl<T: ?Sized> Box<T> { #[stable(feature = "box_raw", since = "1.4.0")] #[inline] pub unsafe fn from_raw(raw: *mut T) -> Self { - Box::from_unique(Unique::new_unchecked(raw)) - } - - /// Constructs a `Box` from a `Unique<T>` pointer. - /// - /// After calling this function, the memory is owned by a `Box` and `T` can - /// then be destroyed and released upon drop. - /// - /// # Safety - /// - /// A `Unique<T>` can be safely created via [`Unique::new`] and thus doesn't - /// necessarily own the data pointed to nor is the data guaranteed to live - /// as long as the pointer. - /// - /// [`Unique::new`]: ../../core/ptr/struct.Unique.html#method.new - /// - /// # Examples - /// - /// ``` - /// #![feature(unique)] - /// - /// fn main() { - /// let x = Box::new(5); - /// let ptr = Box::into_unique(x); - /// let x = unsafe { Box::from_unique(ptr) }; - /// } - /// ``` - #[unstable(feature = "unique", reason = "needs an RFC to flesh out design", - issue = "27730")] - #[inline] - pub unsafe fn from_unique(u: Unique<T>) -> Self { - Box(u) + Box(Unique::new_unchecked(raw)) } /// Consumes the `Box`, returning the wrapped raw pointer. @@ -326,41 +149,44 @@ impl<T: ?Sized> Box<T> { #[stable(feature = "box_raw", since = "1.4.0")] #[inline] pub fn into_raw(b: Box<T>) -> *mut T { - Box::into_unique(b).as_ptr() + Box::into_raw_non_null(b).as_ptr() } - /// Consumes the `Box`, returning the wrapped pointer as `Unique<T>`. + /// Consumes the `Box`, returning the wrapped pointer as `NonNull<T>`. /// /// After calling this function, the caller is responsible for the /// memory previously managed by the `Box`. In particular, the /// caller should properly destroy `T` and release the memory. The - /// proper way to do so is to either convert the `Unique<T>` pointer: - /// - /// - Into a `Box` with the [`Box::from_unique`] function. - /// - /// - Into a raw pointer and back into a `Box` with the [`Box::from_raw`] - /// function. + /// proper way to do so is to convert the `NonNull<T>` pointer + /// into a raw pointer and back into a `Box` with the [`Box::from_raw`] + /// function. /// /// Note: this is an associated function, which means that you have - /// to call it as `Box::into_unique(b)` instead of `b.into_unique()`. This + /// to call it as `Box::into_raw_non_null(b)` + /// instead of `b.into_raw_non_null()`. This /// is so that there is no conflict with a method on the inner type. /// - /// [`Box::from_unique`]: struct.Box.html#method.from_unique /// [`Box::from_raw`]: struct.Box.html#method.from_raw /// /// # Examples /// /// ``` - /// #![feature(unique)] + /// #![feature(box_into_raw_non_null)] /// /// fn main() { /// let x = Box::new(5); - /// let ptr = Box::into_unique(x); + /// let ptr = Box::into_raw_non_null(x); /// } /// ``` - #[unstable(feature = "unique", reason = "needs an RFC to flesh out design", - issue = "27730")] + #[unstable(feature = "box_into_raw_non_null", issue = "47336")] #[inline] + pub fn into_raw_non_null(b: Box<T>) -> NonNull<T> { + Box::into_unique(b).into() + } + + #[unstable(feature = "ptr_internals", issue = "0", reason = "use into_raw_non_null instead")] + #[inline] + #[doc(hidden)] pub fn into_unique(b: Box<T>) -> Unique<T> { let unique = b.0; mem::forget(b); @@ -368,7 +194,9 @@ impl<T: ?Sized> Box<T> { } /// Consumes and leaks the `Box`, returning a mutable reference, - /// `&'a mut T`. Here, the lifetime `'a` may be chosen to be `'static`. + /// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime + /// `'a`. If the type has only static references, or none at all, then this + /// may be chosen to be `'static`. /// /// This function is mainly useful for data that lives for the remainder of /// the program's life. Dropping the returned reference will cause a memory @@ -388,8 +216,6 @@ impl<T: ?Sized> Box<T> { /// Simple usage: /// /// ``` - /// #![feature(box_leak)] - /// /// fn main() { /// let x = Box::new(41); /// let static_ref: &'static mut usize = Box::leak(x); @@ -401,8 +227,6 @@ impl<T: ?Sized> Box<T> { /// Unsized data: /// /// ``` - /// #![feature(box_leak)] - /// /// fn main() { /// let x = vec![1, 2, 3].into_boxed_slice(); /// let static_ref = Box::leak(x); @@ -410,8 +234,7 @@ impl<T: ?Sized> Box<T> { /// assert_eq!(*static_ref, [4, 2, 3]); /// } /// ``` - #[unstable(feature = "box_leak", reason = "needs an FCP to stabilize", - issue = "46179")] + #[stable(feature = "box_leak", since = "1.26.0")] #[inline] pub fn leak<'a>(b: Box<T>) -> &'a mut T where @@ -542,7 +365,7 @@ impl<T: ?Sized + Eq> Eq for Box<T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized + Hash> Hash for Box<T> { - fn hash<H: hash::Hasher>(&self, state: &mut H) { + fn hash<H: Hasher>(&self, state: &mut H) { (**self).hash(state); } } @@ -611,6 +434,7 @@ impl<'a, T: Copy> From<&'a [T]> for Box<[T]> { #[stable(feature = "box_from_slice", since = "1.17.0")] impl<'a> From<&'a str> for Box<str> { + #[inline] fn from(s: &'a str) -> Box<str> { unsafe { from_boxed_utf8_unchecked(Box::from(s.as_bytes())) } } @@ -618,12 +442,13 @@ impl<'a> From<&'a str> for Box<str> { #[stable(feature = "boxed_str_conv", since = "1.19.0")] impl From<Box<str>> for Box<[u8]> { + #[inline] fn from(s: Box<str>) -> Self { unsafe { Box::from_raw(Box::into_raw(s) as *mut [u8]) } } } -impl Box<Any> { +impl Box<dyn Any> { #[inline] #[stable(feature = "rust1", since = "1.0.0")] /// Attempt to downcast the box to a concrete type. @@ -645,10 +470,10 @@ impl Box<Any> { /// print_if_string(Box::new(0i8)); /// } /// ``` - pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<Any>> { + pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<dyn Any>> { if self.is::<T>() { unsafe { - let raw: *mut Any = Box::into_raw(self); + let raw: *mut dyn Any = Box::into_raw(self); Ok(Box::from_raw(raw as *mut T)) } } else { @@ -657,7 +482,7 @@ impl Box<Any> { } } -impl Box<Any + Send> { +impl Box<dyn Any + Send> { #[inline] #[stable(feature = "rust1", since = "1.0.0")] /// Attempt to downcast the box to a concrete type. @@ -679,10 +504,10 @@ impl Box<Any + Send> { /// print_if_string(Box::new(0i8)); /// } /// ``` - pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<Any + Send>> { - <Box<Any>>::downcast(self).map_err(|s| unsafe { + pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<dyn Any + Send>> { + <Box<dyn Any>>::downcast(self).map_err(|s| unsafe { // reapply the Send marker - Box::from_raw(Box::into_raw(s) as *mut (Any + Send)) + Box::from_raw(Box::into_raw(s) as *mut (dyn Any + Send)) }) } } @@ -756,7 +581,7 @@ impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<I: FusedIterator + ?Sized> FusedIterator for Box<I> {} @@ -820,7 +645,7 @@ impl<A, F> FnBox<A> for F #[unstable(feature = "fnbox", reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")] -impl<'a, A, R> FnOnce<A> for Box<FnBox<A, Output = R> + 'a> { +impl<'a, A, R> FnOnce<A> for Box<dyn FnBox<A, Output = R> + 'a> { type Output = R; extern "rust-call" fn call_once(self, args: A) -> R { @@ -830,7 +655,7 @@ impl<'a, A, R> FnOnce<A> for Box<FnBox<A, Output = R> + 'a> { #[unstable(feature = "fnbox", reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")] -impl<'a, A, R> FnOnce<A> for Box<FnBox<A, Output = R> + Send + 'a> { +impl<'a, A, R> FnOnce<A> for Box<dyn FnBox<A, Output = R> + Send + 'a> { type Output = R; extern "rust-call" fn call_once(self, args: A) -> R { @@ -926,7 +751,267 @@ impl<T> Generator for Box<T> { type Yield = T::Yield; type Return = T::Return; - fn resume(&mut self) -> GeneratorState<Self::Yield, Self::Return> { + unsafe fn resume(&mut self) -> GeneratorState<Self::Yield, Self::Return> { (**self).resume() } } + +/// A pinned, heap allocated reference. +#[unstable(feature = "pin", issue = "49150")] +#[fundamental] +#[repr(transparent)] +pub struct PinBox<T: ?Sized> { + inner: Box<T>, +} + +#[unstable(feature = "pin", issue = "49150")] +impl<T> PinBox<T> { + /// Allocate memory on the heap, move the data into it and pin it. + #[unstable(feature = "pin", issue = "49150")] + pub fn new(data: T) -> PinBox<T> { + PinBox { inner: Box::new(data) } + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<T: ?Sized> PinBox<T> { + /// Get a pinned reference to the data in this PinBox. + #[inline] + pub fn as_pin_mut<'a>(&'a mut self) -> PinMut<'a, T> { + unsafe { PinMut::new_unchecked(&mut *self.inner) } + } + + /// Constructs a `PinBox` from a raw pointer. + /// + /// After calling this function, the raw pointer is owned by the + /// resulting `PinBox`. Specifically, the `PinBox` destructor will call + /// the destructor of `T` and free the allocated memory. Since the + /// way `PinBox` allocates and releases memory is unspecified, the + /// only valid pointer to pass to this function is the one taken + /// from another `PinBox` via the [`PinBox::into_raw`] function. + /// + /// This function is unsafe because improper use may lead to + /// memory problems. For example, a double-free may occur if the + /// function is called twice on the same raw pointer. + /// + /// [`PinBox::into_raw`]: struct.PinBox.html#method.into_raw + /// + /// # Examples + /// + /// ``` + /// #![feature(pin)] + /// use std::boxed::PinBox; + /// let x = PinBox::new(5); + /// let ptr = PinBox::into_raw(x); + /// let x = unsafe { PinBox::from_raw(ptr) }; + /// ``` + #[inline] + pub unsafe fn from_raw(raw: *mut T) -> Self { + PinBox { inner: Box::from_raw(raw) } + } + + /// Consumes the `PinBox`, returning the wrapped raw pointer. + /// + /// After calling this function, the caller is responsible for the + /// memory previously managed by the `PinBox`. In particular, the + /// caller should properly destroy `T` and release the memory. The + /// proper way to do so is to convert the raw pointer back into a + /// `PinBox` with the [`PinBox::from_raw`] function. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `PinBox::into_raw(b)` instead of `b.into_raw()`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// [`PinBox::from_raw`]: struct.PinBox.html#method.from_raw + /// + /// # Examples + /// + /// ``` + /// #![feature(pin)] + /// use std::boxed::PinBox; + /// let x = PinBox::new(5); + /// let ptr = PinBox::into_raw(x); + /// ``` + #[inline] + pub fn into_raw(b: PinBox<T>) -> *mut T { + Box::into_raw(b.inner) + } + + /// Get a mutable reference to the data inside this PinBox. + /// + /// This function is unsafe. Users must guarantee that the data is never + /// moved out of this reference. + #[inline] + pub unsafe fn get_mut<'a>(this: &'a mut PinBox<T>) -> &'a mut T { + &mut *this.inner + } + + /// Convert this PinBox into an unpinned Box. + /// + /// This function is unsafe. Users must guarantee that the data is never + /// moved out of the box. + #[inline] + pub unsafe fn unpin(this: PinBox<T>) -> Box<T> { + this.inner + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<T: ?Sized> From<Box<T>> for PinBox<T> { + fn from(boxed: Box<T>) -> PinBox<T> { + PinBox { inner: boxed } + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<T: Unpin + ?Sized> From<PinBox<T>> for Box<T> { + fn from(pinned: PinBox<T>) -> Box<T> { + pinned.inner + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<T: ?Sized> Deref for PinBox<T> { + type Target = T; + + fn deref(&self) -> &T { + &*self.inner + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<T: Unpin + ?Sized> DerefMut for PinBox<T> { + fn deref_mut(&mut self) -> &mut T { + &mut *self.inner + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<T: fmt::Display + ?Sized> fmt::Display for PinBox<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&*self.inner, f) + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<T: fmt::Debug + ?Sized> fmt::Debug for PinBox<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&*self.inner, f) + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<T: ?Sized> fmt::Pointer for PinBox<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // It's not possible to extract the inner Uniq directly from the Box, + // instead we cast it to a *const which aliases the Unique + let ptr: *const T = &*self.inner; + fmt::Pointer::fmt(&ptr, f) + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<PinBox<U>> for PinBox<T> {} + +#[unstable(feature = "pin", issue = "49150")] +impl<T: ?Sized> Unpin for PinBox<T> {} + +#[unstable(feature = "futures_api", issue = "50547")] +impl<F: ?Sized + Future + Unpin> Future for Box<F> { + type Output = F::Output; + + fn poll(mut self: PinMut<Self>, cx: &mut Context) -> Poll<Self::Output> { + PinMut::new(&mut **self).poll(cx) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +impl<F: ?Sized + Future> Future for PinBox<F> { + type Output = F::Output; + + fn poll(mut self: PinMut<Self>, cx: &mut Context) -> Poll<Self::Output> { + self.as_pin_mut().poll(cx) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for Box<F> + where F: Future<Output = T> + 'a +{ + fn into_raw(self) -> *mut () { + Box::into_raw(self) as *mut () + } + + unsafe fn poll(ptr: *mut (), cx: &mut Context) -> Poll<T> { + let ptr = ptr as *mut F; + let pin: PinMut<F> = PinMut::new_unchecked(&mut *ptr); + pin.poll(cx) + } + + unsafe fn drop(ptr: *mut ()) { + drop(Box::from_raw(ptr as *mut F)) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for PinBox<F> + where F: Future<Output = T> + 'a +{ + fn into_raw(self) -> *mut () { + PinBox::into_raw(self) as *mut () + } + + unsafe fn poll(ptr: *mut (), cx: &mut Context) -> Poll<T> { + let ptr = ptr as *mut F; + let pin: PinMut<F> = PinMut::new_unchecked(&mut *ptr); + pin.poll(cx) + } + + unsafe fn drop(ptr: *mut ()) { + drop(PinBox::from_raw(ptr as *mut F)) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +impl<Sp> Spawn for Box<Sp> + where Sp: Spawn + ?Sized +{ + fn spawn_obj( + &mut self, + future: FutureObj<'static, ()>, + ) -> Result<(), SpawnObjError> { + (**self).spawn_obj(future) + } + + fn status(&self) -> Result<(), SpawnErrorKind> { + (**self).status() + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +impl<'a, F: Future<Output = ()> + Send + 'a> From<PinBox<F>> for FutureObj<'a, ()> { + fn from(boxed: PinBox<F>) -> Self { + FutureObj::new(boxed) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +impl<'a, F: Future<Output = ()> + Send + 'a> From<Box<F>> for FutureObj<'a, ()> { + fn from(boxed: Box<F>) -> Self { + FutureObj::new(boxed) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +impl<'a, F: Future<Output = ()> + 'a> From<PinBox<F>> for LocalFutureObj<'a, ()> { + fn from(boxed: PinBox<F>) -> Self { + LocalFutureObj::new(boxed) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +impl<'a, F: Future<Output = ()> + 'a> From<Box<F>> for LocalFutureObj<'a, ()> { + fn from(boxed: Box<F>) -> Self { + LocalFutureObj::new(boxed) + } +} diff --git a/src/liballoc/boxed_test.rs b/src/liballoc/boxed_test.rs index 837f8dfaca1..55995742a4a 100644 --- a/src/liballoc/boxed_test.rs +++ b/src/liballoc/boxed_test.rs @@ -31,8 +31,8 @@ struct Test; #[test] fn any_move() { - let a = Box::new(8) as Box<Any>; - let b = Box::new(Test) as Box<Any>; + let a = Box::new(8) as Box<dyn Any>; + let b = Box::new(Test) as Box<dyn Any>; match a.downcast::<i32>() { Ok(a) => { @@ -47,8 +47,8 @@ fn any_move() { Err(..) => panic!(), } - let a = Box::new(8) as Box<Any>; - let b = Box::new(Test) as Box<Any>; + let a = Box::new(8) as Box<dyn Any>; + let b = Box::new(Test) as Box<dyn Any>; assert!(a.downcast::<Box<Test>>().is_err()); assert!(b.downcast::<Box<i32>>().is_err()); @@ -56,8 +56,8 @@ fn any_move() { #[test] fn test_show() { - let a = Box::new(8) as Box<Any>; - let b = Box::new(Test) as Box<Any>; + let a = Box::new(8) as Box<dyn Any>; + let b = Box::new(Test) as Box<dyn Any>; let a_str = format!("{:?}", a); let b_str = format!("{:?}", b); assert_eq!(a_str, "Any"); @@ -65,8 +65,8 @@ fn test_show() { static EIGHT: usize = 8; static TEST: Test = Test; - let a = &EIGHT as &Any; - let b = &TEST as &Any; + let a = &EIGHT as &dyn Any; + let b = &TEST as &dyn Any; let s = format!("{:?}", a); assert_eq!(s, "Any"); let s = format!("{:?}", b); @@ -110,12 +110,12 @@ fn raw_trait() { } } - let x: Box<Foo> = Box::new(Bar(17)); + let x: Box<dyn Foo> = Box::new(Bar(17)); let p = Box::into_raw(x); unsafe { assert_eq!(17, (*p).get()); (*p).set(19); - let y: Box<Foo> = Box::from_raw(p); + let y: Box<dyn Foo> = Box::from_raw(p); assert_eq!(19, y.get()); } } diff --git a/src/liballoc/binary_heap.rs b/src/liballoc/collections/binary_heap.rs index 94bbaf92ce9..fcadcb544c4 100644 --- a/src/liballoc/binary_heap.rs +++ b/src/liballoc/collections/binary_heap.rs @@ -155,9 +155,9 @@ #![allow(missing_docs)] #![stable(feature = "rust1", since = "1.0.0")] -use core::ops::{Deref, DerefMut, Place, Placer, InPlace}; +use core::ops::{Deref, DerefMut}; use core::iter::{FromIterator, FusedIterator}; -use core::mem::{swap, size_of}; +use core::mem::{swap, size_of, ManuallyDrop}; use core::ptr; use core::fmt; @@ -509,6 +509,31 @@ impl<T: Ord> BinaryHeap<T> { self.data.shrink_to_fit(); } + /// Discards capacity with a lower bound. + /// + /// The capacity will remain at least as large as both the length + /// and the supplied value. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// #![feature(shrink_to)] + /// use std::collections::BinaryHeap; + /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100); + /// + /// assert!(heap.capacity() >= 100); + /// heap.shrink_to(10); + /// assert!(heap.capacity() >= 10); + /// ``` + #[inline] + #[unstable(feature = "shrink_to", reason = "new API", issue="0")] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.data.shrink_to(min_capacity) + } + /// Removes the greatest item from the binary heap and returns it, or `None` if it /// is empty. /// @@ -839,8 +864,7 @@ impl<T: Ord> BinaryHeap<T> { /// position with the value that was originally removed. struct Hole<'a, T: 'a> { data: &'a mut [T], - /// `elt` is always `Some` from new until drop. - elt: Option<T>, + elt: ManuallyDrop<T>, pos: usize, } @@ -854,7 +878,7 @@ impl<'a, T> Hole<'a, T> { let elt = ptr::read(&data[pos]); Hole { data, - elt: Some(elt), + elt: ManuallyDrop::new(elt), pos, } } @@ -867,7 +891,7 @@ impl<'a, T> Hole<'a, T> { /// Returns a reference to the element removed. #[inline] fn element(&self) -> &T { - self.elt.as_ref().unwrap() + &self.elt } /// Returns a reference to the element at `index`. @@ -900,7 +924,7 @@ impl<'a, T> Drop for Hole<'a, T> { // fill the hole again unsafe { let pos = self.pos; - ptr::write(self.data.get_unchecked_mut(pos), self.elt.take().unwrap()); + ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1); } } } @@ -964,7 +988,7 @@ impl<'a, T> ExactSizeIterator for Iter<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Iter<'a, T> {} /// An owning iterator over the elements of a `BinaryHeap`. @@ -1019,7 +1043,7 @@ impl<T> ExactSizeIterator for IntoIter<T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<T> FusedIterator for IntoIter<T> {} /// A draining iterator over the elements of a `BinaryHeap`. @@ -1065,7 +1089,7 @@ impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: 'a> FusedIterator for Drain<'a, T> {} #[stable(feature = "binary_heap_extras_15", since = "1.5.0")] @@ -1170,67 +1194,3 @@ impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> { self.extend(iter.into_iter().cloned()); } } - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -pub struct BinaryHeapPlace<'a, T: 'a> -where T: Clone + Ord { - heap: *mut BinaryHeap<T>, - place: vec::PlaceBack<'a, T>, -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T: Clone + Ord + fmt::Debug> fmt::Debug for BinaryHeapPlace<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("BinaryHeapPlace") - .field(&self.place) - .finish() - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T: 'a> Placer<T> for &'a mut BinaryHeap<T> -where T: Clone + Ord { - type Place = BinaryHeapPlace<'a, T>; - - fn make_place(self) -> Self::Place { - let ptr = self as *mut BinaryHeap<T>; - let place = Placer::make_place(self.data.place_back()); - BinaryHeapPlace { - heap: ptr, - place, - } - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Place<T> for BinaryHeapPlace<'a, T> -where T: Clone + Ord { - fn pointer(&mut self) -> *mut T { - self.place.pointer() - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> InPlace<T> for BinaryHeapPlace<'a, T> -where T: Clone + Ord { - type Owner = &'a T; - - unsafe fn finalize(self) -> &'a T { - self.place.finalize(); - - let heap: &mut BinaryHeap<T> = &mut *self.heap; - let len = heap.len(); - let i = heap.sift_up(0, len - 1); - heap.data.get_unchecked(i) - } -} diff --git a/src/liballoc/btree/map.rs b/src/liballoc/collections/btree/map.rs index b114dc640fb..8c950cd06d9 100644 --- a/src/liballoc/btree/map.rs +++ b/src/liballoc/collections/btree/map.rs @@ -13,12 +13,12 @@ use core::fmt::Debug; use core::hash::{Hash, Hasher}; use core::iter::{FromIterator, Peekable, FusedIterator}; use core::marker::PhantomData; +use core::ops::Bound::{Excluded, Included, Unbounded}; use core::ops::Index; +use core::ops::RangeBounds; use core::{fmt, intrinsics, mem, ptr}; use borrow::Borrow; -use Bound::{Excluded, Included, Unbounded}; -use range::RangeArgument; use super::node::{self, Handle, NodeRef, marker}; use super::search; @@ -149,12 +149,11 @@ unsafe impl<#[may_dangle] K, #[may_dangle] V> Drop for BTreeMap<K, V> { #[stable(feature = "rust1", since = "1.0.0")] impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> { fn clone(&self) -> BTreeMap<K, V> { - fn clone_subtree<K: Clone, V: Clone>(node: node::NodeRef<marker::Immut, - K, - V, - marker::LeafOrInternal>) - -> BTreeMap<K, V> { - + fn clone_subtree<'a, K: Clone, V: Clone>( + node: node::NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> + ) -> BTreeMap<K, V> + where K: 'a, V: 'a, + { match node.force() { Leaf(leaf) => { let mut out_tree = BTreeMap { @@ -213,7 +212,16 @@ impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> { } } - clone_subtree(self.root.as_ref()) + if self.len() == 0 { + // Ideally we'd call `BTreeMap::new` here, but that has the `K: + // Ord` constraint, which this method lacks. + BTreeMap { + root: node::Root::shared_empty_root(), + length: 0, + } + } else { + clone_subtree(self.root.as_ref()) + } } } @@ -246,6 +254,7 @@ impl<K, Q: ?Sized> super::Recover<Q> for BTreeMap<K, ()> } fn replace(&mut self, key: K) -> Option<K> { + self.ensure_root_is_owned(); match search::search_tree::<marker::Mut, K, (), K>(self.root.as_mut(), &key) { Found(handle) => Some(mem::replace(handle.into_kv_mut().0, key)), GoDown(handle) => { @@ -523,7 +532,7 @@ impl<K: Ord, V> BTreeMap<K, V> { #[stable(feature = "rust1", since = "1.0.0")] pub fn new() -> BTreeMap<K, V> { BTreeMap { - root: node::Root::new_leaf(), + root: node::Root::shared_empty_root(), length: 0, } } @@ -544,7 +553,6 @@ impl<K: Ord, V> BTreeMap<K, V> { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn clear(&mut self) { - // FIXME(gereeter) .clear() allocates *self = BTreeMap::new(); } @@ -576,6 +584,33 @@ impl<K: Ord, V> BTreeMap<K, V> { } } + /// Returns the key-value pair corresponding to the supplied key. + /// + /// The supplied key may be any borrowed form of the map's key type, but the ordering + /// on the borrowed form *must* match the ordering on the key type. + /// + /// # Examples + /// + /// ``` + /// #![feature(map_get_key_value)] + /// use std::collections::BTreeMap; + /// + /// let mut map = BTreeMap::new(); + /// map.insert(1, "a"); + /// assert_eq!(map.get_key_value(&1), Some((&1, &"a"))); + /// assert_eq!(map.get_key_value(&2), None); + /// ``` + #[unstable(feature = "map_get_key_value", issue = "49347")] + pub fn get_key_value<Q: ?Sized>(&self, k: &Q) -> Option<(&K, &V)> + where K: Borrow<Q>, + Q: Ord + { + match search::search_tree(self.root.as_ref(), k) { + Found(handle) => Some(handle.into_kv()), + GoDown(_) => None, + } + } + /// Returns `true` if the map contains a value for the specified key. /// /// The key may be any borrowed form of the map's key type, but the ordering @@ -777,7 +812,7 @@ impl<K: Ord, V> BTreeMap<K, V> { /// /// ``` /// use std::collections::BTreeMap; - /// use std::collections::Bound::Included; + /// use std::ops::Bound::Included; /// /// let mut map = BTreeMap::new(); /// map.insert(3, "a"); @@ -790,7 +825,7 @@ impl<K: Ord, V> BTreeMap<K, V> { /// ``` #[stable(feature = "btree_range", since = "1.17.0")] pub fn range<T: ?Sized, R>(&self, range: R) -> Range<K, V> - where T: Ord, K: Borrow<T>, R: RangeArgument<T> + where T: Ord, K: Borrow<T>, R: RangeBounds<T> { let root1 = self.root.as_ref(); let root2 = self.root.as_ref(); @@ -830,7 +865,7 @@ impl<K: Ord, V> BTreeMap<K, V> { /// ``` #[stable(feature = "btree_range", since = "1.17.0")] pub fn range_mut<T: ?Sized, R>(&mut self, range: R) -> RangeMut<K, V> - where T: Ord, K: Borrow<T>, R: RangeArgument<T> + where T: Ord, K: Borrow<T>, R: RangeBounds<T> { let root1 = self.root.as_mut(); let root2 = unsafe { ptr::read(&root1) }; @@ -863,6 +898,8 @@ impl<K: Ord, V> BTreeMap<K, V> { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn entry(&mut self, key: K) -> Entry<K, V> { + // FIXME(@porglezomp) Avoid allocating if we don't insert + self.ensure_root_is_owned(); match search::search_tree(self.root.as_mut(), &key) { Found(handle) => { Occupied(OccupiedEntry { @@ -883,6 +920,7 @@ impl<K: Ord, V> BTreeMap<K, V> { } fn from_sorted_iter<I: Iterator<Item = (K, V)>>(&mut self, iter: I) { + self.ensure_root_is_owned(); let mut cur_node = last_leaf_edge(self.root.as_mut()).into_node(); // Iterate through all key-value pairs, pushing them into nodes at the right level. for (key, value) in iter { @@ -992,6 +1030,7 @@ impl<K: Ord, V> BTreeMap<K, V> { let total_num = self.len(); let mut right = Self::new(); + right.root = node::Root::new_leaf(); for _ in 0..(self.root.as_ref().height()) { right.root.push_level(); } @@ -1040,7 +1079,11 @@ impl<K: Ord, V> BTreeMap<K, V> { /// Calculates the number of elements if it is incorrect. fn recalc_length(&mut self) { - fn dfs<K, V>(node: NodeRef<marker::Immut, K, V, marker::LeafOrInternal>) -> usize { + fn dfs<'a, K, V>( + node: NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> + ) -> usize + where K: 'a, V: 'a + { let mut res = node.len(); if let Internal(node) = node.force() { @@ -1126,6 +1169,13 @@ impl<K: Ord, V> BTreeMap<K, V> { self.fix_top(); } + + /// If the root node is the shared root node, allocate our own node. + fn ensure_root_is_owned(&mut self) { + if self.root.is_shared_root() { + self.root = node::Root::new_leaf(); + } + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1156,7 +1206,7 @@ impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for Iter<'a, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1235,7 +1285,7 @@ impl<'a, K: 'a, V: 'a> ExactSizeIterator for IterMut<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for IterMut<'a, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1260,10 +1310,13 @@ impl<K, V> IntoIterator for BTreeMap<K, V> { #[stable(feature = "btree_drop", since = "1.7.0")] impl<K, V> Drop for IntoIter<K, V> { fn drop(&mut self) { - for _ in &mut *self { - } + self.for_each(drop); unsafe { let leaf_node = ptr::read(&self.front).into_node(); + if leaf_node.is_shared_root() { + return; + } + if let Some(first_parent) = leaf_node.deallocate_and_ascend() { let mut cur_node = first_parent.into_node(); while let Some(parent) = cur_node.deallocate_and_ascend() { @@ -1365,7 +1418,7 @@ impl<K, V> ExactSizeIterator for IntoIter<K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<K, V> FusedIterator for IntoIter<K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1395,7 +1448,7 @@ impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for Keys<'a, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1432,7 +1485,7 @@ impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for Values<'a, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1482,7 +1535,7 @@ impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for ValuesMut<'a, K, V> {} @@ -1561,7 +1614,7 @@ impl<'a, K, V> Range<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for Range<'a, K, V> {} #[stable(feature = "btree_range", since = "1.17.0")] @@ -1630,7 +1683,7 @@ impl<'a, K, V> DoubleEndedIterator for RangeMut<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for RangeMut<'a, K, V> {} impl<'a, K, V> RangeMut<'a, K, V> { @@ -1748,6 +1801,11 @@ impl<'a, K: Ord, Q: ?Sized, V> Index<&'a Q> for BTreeMap<K, V> { type Output = V; + /// Returns a reference to the value corresponding to the supplied key. + /// + /// # Panics + /// + /// Panics if the key is not present in the `BTreeMap`. #[inline] fn index(&self, key: &Q) -> &V { self.get(key).expect("no entry found for key") @@ -1780,7 +1838,7 @@ fn last_leaf_edge<BorrowType, K, V> } } -fn range_search<BorrowType, K, V, Q: ?Sized, R: RangeArgument<Q>>( +fn range_search<BorrowType, K, V, Q: ?Sized, R: RangeBounds<Q>>( root1: NodeRef<BorrowType, K, V, marker::LeafOrInternal>, root2: NodeRef<BorrowType, K, V, marker::LeafOrInternal>, range: R @@ -1788,7 +1846,7 @@ fn range_search<BorrowType, K, V, Q: ?Sized, R: RangeArgument<Q>>( Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>) where Q: Ord, K: Borrow<Q> { - match (range.start(), range.end()) { + match (range.start_bound(), range.end_bound()) { (Excluded(s), Excluded(e)) if s==e => panic!("range start and end are equal and excluded in BTreeMap"), (Included(s), Included(e)) | @@ -1806,7 +1864,7 @@ fn range_search<BorrowType, K, V, Q: ?Sized, R: RangeArgument<Q>>( let mut diverged = false; loop { - let min_edge = match (min_found, range.start()) { + let min_edge = match (min_found, range.start_bound()) { (false, Included(key)) => match search::search_linear(&min_node, key) { (i, true) => { min_found = true; i }, (i, false) => i, @@ -1820,7 +1878,7 @@ fn range_search<BorrowType, K, V, Q: ?Sized, R: RangeArgument<Q>>( (true, Excluded(_)) => 0, }; - let max_edge = match (max_found, range.end()) { + let max_edge = match (max_found, range.end_bound()) { (false, Included(key)) => match search::search_linear(&max_node, key) { (i, true) => { max_found = true; i+1 }, (i, false) => i, @@ -2109,7 +2167,6 @@ impl<'a, K: Ord, V> Entry<'a, K, V> { /// # Examples /// /// ``` - /// #![feature(entry_and_modify)] /// use std::collections::BTreeMap; /// /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); @@ -2124,9 +2181,9 @@ impl<'a, K: Ord, V> Entry<'a, K, V> { /// .or_insert(42); /// assert_eq!(map["poneyland"], 43); /// ``` - #[unstable(feature = "entry_and_modify", issue = "44733")] - pub fn and_modify<F>(self, mut f: F) -> Self - where F: FnMut(&mut V) + #[stable(feature = "entry_and_modify", since = "1.26.0")] + pub fn and_modify<F>(self, f: F) -> Self + where F: FnOnce(&mut V) { match self { Occupied(mut entry) => { @@ -2139,14 +2196,13 @@ impl<'a, K: Ord, V> Entry<'a, K, V> { } impl<'a, K: Ord, V: Default> Entry<'a, K, V> { - #[unstable(feature = "entry_or_default", issue = "44324")] + #[stable(feature = "entry_or_default", since = "1.28.0")] /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// /// # Examples /// /// ``` - /// #![feature(entry_or_default)] /// # fn main() { /// use std::collections::BTreeMap; /// @@ -2324,6 +2380,11 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> { /// Gets a mutable reference to the value in the entry. /// + /// If you need a reference to the `OccupiedEntry` which may outlive the + /// destruction of the `Entry` value, see [`into_mut`]. + /// + /// [`into_mut`]: #method.into_mut + /// /// # Examples /// /// ``` @@ -2335,9 +2396,13 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> { /// /// assert_eq!(map["poneyland"], 12); /// if let Entry::Occupied(mut o) = map.entry("poneyland") { - /// *o.get_mut() += 10; + /// *o.get_mut() += 10; + /// assert_eq!(*o.get(), 22); + /// + /// // We can use the same Entry multiple times. + /// *o.get_mut() += 2; /// } - /// assert_eq!(map["poneyland"], 22); + /// assert_eq!(map["poneyland"], 24); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get_mut(&mut self) -> &mut V { @@ -2346,6 +2411,10 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> { /// Converts the entry into a mutable reference to its value. /// + /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`]. + /// + /// [`get_mut`]: #method.get_mut + /// /// # Examples /// /// ``` diff --git a/src/liballoc/btree/mod.rs b/src/liballoc/collections/btree/mod.rs index 087c9f228d4..087c9f228d4 100644 --- a/src/liballoc/btree/mod.rs +++ b/src/liballoc/collections/btree/mod.rs diff --git a/src/liballoc/btree/node.rs b/src/liballoc/collections/btree/node.rs index c1618043ce6..0ae45b31232 100644 --- a/src/liballoc/btree/node.rs +++ b/src/liballoc/collections/btree/node.rs @@ -43,12 +43,11 @@ use core::marker::PhantomData; use core::mem; -use core::nonzero::NonZero; -use core::ptr::{self, Unique}; +use core::ptr::{self, Unique, NonNull}; use core::slice; +use alloc::{Global, Alloc, Layout}; use boxed::Box; -use heap::{Heap, Alloc, Layout}; const B: usize = 6; pub const MIN_LEN: usize = B - 1; @@ -61,12 +60,12 @@ pub const CAPACITY: usize = 2 * B - 1; /// /// See also rust-lang/rfcs#197, which would make this structure significantly more safe by /// avoiding accidentally dropping unused and uninitialized keys and values. +/// +/// We put the metadata first so that its position is the same for every `K` and `V`, in order +/// to statically allocate a single dummy node to avoid allocations. This struct is `repr(C)` to +/// prevent them from being reordered. +#[repr(C)] struct LeafNode<K, V> { - /// The arrays storing the actual data of the node. Only the first `len` elements of each - /// array are initialized and valid. - keys: [K; CAPACITY], - vals: [V; CAPACITY], - /// We use `*const` as opposed to `*mut` so as to be covariant in `K` and `V`. /// This either points to an actual node or is null. parent: *const InternalNode<K, V>, @@ -78,10 +77,14 @@ struct LeafNode<K, V> { /// The number of keys and values this node stores. /// - /// This is at the end of the node's representation and next to `parent_idx` to encourage - /// the compiler to join `len` and `parent_idx` into the same 32-bit word, reducing space - /// overhead. + /// This next to `parent_idx` to encourage the compiler to join `len` and + /// `parent_idx` into the same 32-bit word, reducing space overhead. len: u16, + + /// The arrays storing the actual data of the node. Only the first `len` elements of each + /// array are initialized and valid. + keys: [K; CAPACITY], + vals: [V; CAPACITY], } impl<K, V> LeafNode<K, V> { @@ -98,8 +101,26 @@ impl<K, V> LeafNode<K, V> { len: 0 } } + + fn is_shared_root(&self) -> bool { + ptr::eq(self, &EMPTY_ROOT_NODE as *const _ as *const _) + } } +// We need to implement Sync here in order to make a static instance. +unsafe impl Sync for LeafNode<(), ()> {} + +// An empty node used as a placeholder for the root node, to avoid allocations. +// We use () in order to save space, since no operation on an empty tree will +// ever take a pointer past the first key. +static EMPTY_ROOT_NODE: LeafNode<(), ()> = LeafNode { + parent: ptr::null(), + parent_idx: 0, + len: 0, + keys: [(); CAPACITY], + vals: [(); CAPACITY], +}; + /// The underlying representation of internal nodes. As with `LeafNode`s, these should be hidden /// behind `BoxedNode`s to prevent dropping uninitialized keys and values. Any pointer to an /// `InternalNode` can be directly casted to a pointer to the underlying `LeafNode` portion of the @@ -149,14 +170,12 @@ impl<K, V> BoxedNode<K, V> { } } - unsafe fn from_ptr(ptr: NonZero<*const LeafNode<K, V>>) -> Self { - BoxedNode { ptr: Unique::new_unchecked(ptr.get() as *mut LeafNode<K, V>) } + unsafe fn from_ptr(ptr: NonNull<LeafNode<K, V>>) -> Self { + BoxedNode { ptr: Unique::from(ptr) } } - fn as_ptr(&self) -> NonZero<*const LeafNode<K, V>> { - unsafe { - NonZero::from(self.ptr.as_ref()) - } + fn as_ptr(&self) -> NonNull<LeafNode<K, V>> { + NonNull::from(self.ptr) } } @@ -171,6 +190,21 @@ unsafe impl<K: Sync, V: Sync> Sync for Root<K, V> { } unsafe impl<K: Send, V: Send> Send for Root<K, V> { } impl<K, V> Root<K, V> { + pub fn is_shared_root(&self) -> bool { + self.as_ref().is_shared_root() + } + + pub fn shared_empty_root() -> Self { + Root { + node: unsafe { + BoxedNode::from_ptr(NonNull::new_unchecked( + &EMPTY_ROOT_NODE as *const _ as *const LeafNode<K, V> as *mut _ + )) + }, + height: 0, + } + } + pub fn new_leaf() -> Self { Root { node: BoxedNode::from_leaf(Box::new(unsafe { LeafNode::new() })), @@ -212,6 +246,7 @@ impl<K, V> Root<K, V> { /// new node the root. This increases the height by 1 and is the opposite of `pop_level`. pub fn push_level(&mut self) -> NodeRef<marker::Mut, K, V, marker::Internal> { + debug_assert!(!self.is_shared_root()); let mut new_node = Box::new(unsafe { InternalNode::new() }); new_node.edges[0] = unsafe { BoxedNode::from_ptr(self.node.as_ptr()) }; @@ -239,7 +274,7 @@ impl<K, V> Root<K, V> { pub fn pop_level(&mut self) { debug_assert!(self.height > 0); - let top = self.node.ptr.as_ptr() as *mut u8; + let top = self.node.ptr; self.node = unsafe { BoxedNode::from_ptr(self.as_mut() @@ -252,7 +287,7 @@ impl<K, V> Root<K, V> { self.as_mut().as_leaf_mut().parent = ptr::null(); unsafe { - Heap.dealloc(top, Layout::new::<InternalNode<K, V>>()); + Global.dealloc(NonNull::from(top).cast(), Layout::new::<InternalNode<K, V>>()); } } } @@ -276,7 +311,7 @@ impl<K, V> Root<K, V> { /// `NodeRef` could be pointing to either type of node. pub struct NodeRef<BorrowType, K, V, Type> { height: usize, - node: NonZero<*const LeafNode<K, V>>, + node: NonNull<LeafNode<K, V>>, // This is null unless the borrow type is `Mut` root: *const Root<K, V>, _marker: PhantomData<(BorrowType, Type)> @@ -302,7 +337,7 @@ unsafe impl<K: Send, V: Send, Type> Send impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> { fn as_internal(&self) -> &InternalNode<K, V> { unsafe { - &*(self.node.get() as *const InternalNode<K, V>) + &*(self.node.as_ptr() as *mut InternalNode<K, V>) } } } @@ -310,7 +345,7 @@ impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> { impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> { fn as_internal_mut(&mut self) -> &mut InternalNode<K, V> { unsafe { - &mut *(self.node.get() as *mut InternalNode<K, V>) + &mut *(self.node.as_ptr() as *mut InternalNode<K, V>) } } } @@ -352,16 +387,20 @@ impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> { fn as_leaf(&self) -> &LeafNode<K, V> { unsafe { - &*self.node.get() + self.node.as_ref() } } + pub fn is_shared_root(&self) -> bool { + self.as_leaf().is_shared_root() + } + pub fn keys(&self) -> &[K] { - self.reborrow().into_slices().0 + self.reborrow().into_key_slice() } - pub fn vals(&self) -> &[V] { - self.reborrow().into_slices().1 + fn vals(&self) -> &[V] { + self.reborrow().into_val_slice() } /// Finds the parent of the current node. Returns `Ok(handle)` if the current @@ -382,7 +421,8 @@ impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> { >, Self > { - if let Some(non_zero) = NonZero::new(self.as_leaf().parent as *const LeafNode<K, V>) { + let parent_as_leaf = self.as_leaf().parent as *const LeafNode<K, V>; + if let Some(non_zero) = NonNull::new(parent_as_leaf as *mut _) { Ok(Handle { node: NodeRef { height: self.height + 1, @@ -435,9 +475,10 @@ impl<K, V> NodeRef<marker::Owned, K, V, marker::Leaf> { marker::Edge > > { - let ptr = self.as_leaf() as *const LeafNode<K, V> as *const u8 as *mut u8; + debug_assert!(!self.is_shared_root()); + let node = self.node; let ret = self.ascend().ok(); - Heap.dealloc(ptr, Layout::new::<LeafNode<K, V>>()); + Global.dealloc(node.cast(), Layout::new::<LeafNode<K, V>>()); ret } } @@ -456,9 +497,9 @@ impl<K, V> NodeRef<marker::Owned, K, V, marker::Internal> { marker::Edge > > { - let ptr = self.as_internal() as *const InternalNode<K, V> as *const u8 as *mut u8; + let node = self.node; let ret = self.ascend().ok(); - Heap.dealloc(ptr, Layout::new::<InternalNode<K, V>>()); + Global.dealloc(node.cast(), Layout::new::<InternalNode<K, V>>()); ret } } @@ -498,34 +539,55 @@ impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> { fn as_leaf_mut(&mut self) -> &mut LeafNode<K, V> { unsafe { - &mut *(self.node.get() as *mut LeafNode<K, V>) + self.node.as_mut() } } - pub fn keys_mut(&mut self) -> &mut [K] { - unsafe { self.reborrow_mut().into_slices_mut().0 } + fn keys_mut(&mut self) -> &mut [K] { + unsafe { self.reborrow_mut().into_key_slice_mut() } } - pub fn vals_mut(&mut self) -> &mut [V] { - unsafe { self.reborrow_mut().into_slices_mut().1 } + fn vals_mut(&mut self) -> &mut [V] { + unsafe { self.reborrow_mut().into_val_slice_mut() } } } impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Immut<'a>, K, V, Type> { - pub fn into_slices(self) -> (&'a [K], &'a [V]) { - unsafe { - ( + fn into_key_slice(self) -> &'a [K] { + // When taking a pointer to the keys, if our key has a stricter + // alignment requirement than the shared root does, then the pointer + // would be out of bounds, which LLVM assumes will not happen. If the + // alignment is more strict, we need to make an empty slice that doesn't + // use an out of bounds pointer. + if mem::align_of::<K>() > mem::align_of::<LeafNode<(), ()>>() && self.is_shared_root() { + &[] + } else { + // Here either it's not the root, or the alignment is less strict, + // in which case the keys pointer will point "one-past-the-end" of + // the node, which is allowed by LLVM. + unsafe { slice::from_raw_parts( self.as_leaf().keys.as_ptr(), self.len() - ), - slice::from_raw_parts( - self.as_leaf().vals.as_ptr(), - self.len() ) + } + } + } + + fn into_val_slice(self) -> &'a [V] { + debug_assert!(!self.is_shared_root()); + unsafe { + slice::from_raw_parts( + self.as_leaf().vals.as_ptr(), + self.len() ) } } + + fn into_slices(self) -> (&'a [K], &'a [V]) { + let k = unsafe { ptr::read(&self) }; + (k.into_key_slice(), self.into_val_slice()) + } } impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> { @@ -537,20 +599,33 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> { } } - pub fn into_slices_mut(mut self) -> (&'a mut [K], &'a mut [V]) { - unsafe { - ( + fn into_key_slice_mut(mut self) -> &'a mut [K] { + if mem::align_of::<K>() > mem::align_of::<LeafNode<(), ()>>() && self.is_shared_root() { + &mut [] + } else { + unsafe { slice::from_raw_parts_mut( &mut self.as_leaf_mut().keys as *mut [K] as *mut K, self.len() - ), - slice::from_raw_parts_mut( - &mut self.as_leaf_mut().vals as *mut [V] as *mut V, - self.len() ) + } + } + } + + fn into_val_slice_mut(mut self) -> &'a mut [V] { + debug_assert!(!self.is_shared_root()); + unsafe { + slice::from_raw_parts_mut( + &mut self.as_leaf_mut().vals as *mut [V] as *mut V, + self.len() ) } } + + fn into_slices_mut(self) -> (&'a mut [K], &'a mut [V]) { + let k = unsafe { ptr::read(&self) }; + (k.into_key_slice_mut(), self.into_val_slice_mut()) + } } impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> { @@ -558,6 +633,7 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> { pub fn push(&mut self, key: K, val: V) { // Necessary for correctness, but this is an internal module debug_assert!(self.len() < CAPACITY); + debug_assert!(!self.is_shared_root()); let idx = self.len(); @@ -573,6 +649,7 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> { pub fn push_front(&mut self, key: K, val: V) { // Necessary for correctness, but this is an internal module debug_assert!(self.len() < CAPACITY); + debug_assert!(!self.is_shared_root()); unsafe { slice_insert(self.keys_mut(), 0, key); @@ -886,6 +963,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge fn insert_fit(&mut self, key: K, val: V) -> *mut V { // Necessary for correctness, but in a private module debug_assert!(self.node.len() < CAPACITY); + debug_assert!(!self.node.is_shared_root()); unsafe { slice_insert(self.node.keys_mut(), self.idx, key); @@ -1063,6 +1141,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> /// allocated node. pub fn split(mut self) -> (NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, K, V, Root<K, V>) { + debug_assert!(!self.node.is_shared_root()); unsafe { let mut new_node = Box::new(LeafNode::new()); @@ -1100,6 +1179,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> /// now adjacent key/value pairs to the left and right of this handle. pub fn remove(mut self) -> (Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>, K, V) { + debug_assert!(!self.node.is_shared_root()); unsafe { let k = slice_remove(self.node.keys_mut(), self.idx); let v = slice_remove(self.node.vals_mut(), self.idx); @@ -1240,13 +1320,13 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker:: ).correct_parent_link(); } - Heap.dealloc( - right_node.node.get() as *mut u8, + Global.dealloc( + right_node.node.cast(), Layout::new::<InternalNode<K, V>>(), ); } else { - Heap.dealloc( - right_node.node.get() as *mut u8, + Global.dealloc( + right_node.node.cast(), Layout::new::<LeafNode<K, V>>(), ); } diff --git a/src/liballoc/btree/search.rs b/src/liballoc/collections/btree/search.rs index bc1272fbc78..bc1272fbc78 100644 --- a/src/liballoc/btree/search.rs +++ b/src/liballoc/collections/btree/search.rs diff --git a/src/liballoc/btree/set.rs b/src/liballoc/collections/btree/set.rs index e094070fc3d..af9a7074e4a 100644 --- a/src/liballoc/btree/set.rs +++ b/src/liballoc/collections/btree/set.rs @@ -16,12 +16,11 @@ use core::cmp::{min, max}; use core::fmt::Debug; use core::fmt; use core::iter::{Peekable, FromIterator, FusedIterator}; -use core::ops::{BitOr, BitAnd, BitXor, Sub}; +use core::ops::{BitOr, BitAnd, BitXor, Sub, RangeBounds}; use borrow::Borrow; -use btree_map::{BTreeMap, Keys}; +use collections::btree_map::{self, BTreeMap, Keys}; use super::Recover; -use range::RangeArgument; // FIXME(conventions): implement bounded iterators @@ -105,7 +104,7 @@ impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] #[derive(Debug)] pub struct IntoIter<T> { - iter: ::btree_map::IntoIter<T, ()>, + iter: btree_map::IntoIter<T, ()>, } /// An iterator over a sub-range of items in a `BTreeSet`. @@ -118,7 +117,7 @@ pub struct IntoIter<T> { #[derive(Debug)] #[stable(feature = "btree_range", since = "1.17.0")] pub struct Range<'a, T: 'a> { - iter: ::btree_map::Range<'a, T, ()>, + iter: btree_map::Range<'a, T, ()>, } /// A lazy iterator producing elements in the difference of `BTreeSet`s. @@ -240,7 +239,7 @@ impl<T: Ord> BTreeSet<T> { /// /// ``` /// use std::collections::BTreeSet; - /// use std::collections::Bound::Included; + /// use std::ops::Bound::Included; /// /// let mut set = BTreeSet::new(); /// set.insert(3); @@ -253,7 +252,7 @@ impl<T: Ord> BTreeSet<T> { /// ``` #[stable(feature = "btree_range", since = "1.17.0")] pub fn range<K: ?Sized, R>(&self, range: R) -> Range<T> - where K: Ord, T: Borrow<K>, R: RangeArgument<K> + where K: Ord, T: Borrow<K>, R: RangeBounds<K> { Range { iter: self.map.range(range) } } @@ -658,26 +657,26 @@ impl<T: Ord> BTreeSet<T> { /// Basic usage: /// /// ``` - /// use std::collections::BTreeMap; + /// use std::collections::BTreeSet; /// - /// let mut a = BTreeMap::new(); - /// a.insert(1, "a"); - /// a.insert(2, "b"); - /// a.insert(3, "c"); - /// a.insert(17, "d"); - /// a.insert(41, "e"); + /// let mut a = BTreeSet::new(); + /// a.insert(1); + /// a.insert(2); + /// a.insert(3); + /// a.insert(17); + /// a.insert(41); /// /// let b = a.split_off(&3); /// /// assert_eq!(a.len(), 2); /// assert_eq!(b.len(), 3); /// - /// assert_eq!(a[&1], "a"); - /// assert_eq!(a[&2], "b"); + /// assert!(a.contains(&1)); + /// assert!(a.contains(&2)); /// - /// assert_eq!(b[&3], "c"); - /// assert_eq!(b[&17], "d"); - /// assert_eq!(b[&41], "e"); + /// assert!(b.contains(&3)); + /// assert!(b.contains(&17)); + /// assert!(b.contains(&41)); /// ``` #[stable(feature = "btree_split_off", since = "1.11.0")] pub fn split_off<Q: ?Sized + Ord>(&mut self, key: &Q) -> Self where T: Borrow<Q> { @@ -946,7 +945,7 @@ impl<'a, T> ExactSizeIterator for Iter<'a, T> { fn len(&self) -> usize { self.iter.len() } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Iter<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -971,7 +970,7 @@ impl<T> ExactSizeIterator for IntoIter<T> { fn len(&self) -> usize { self.iter.len() } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<T> FusedIterator for IntoIter<T> {} #[stable(feature = "btree_range", since = "1.17.0")] @@ -997,7 +996,7 @@ impl<'a, T> DoubleEndedIterator for Range<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Range<'a, T> {} /// Compare `x` and `y`, but return `short` if x is None and `long` if y is None @@ -1044,7 +1043,7 @@ impl<'a, T: Ord> Iterator for Difference<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: Ord> FusedIterator for Difference<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1078,7 +1077,7 @@ impl<'a, T: Ord> Iterator for SymmetricDifference<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: Ord> FusedIterator for SymmetricDifference<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1116,7 +1115,7 @@ impl<'a, T: Ord> Iterator for Intersection<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: Ord> FusedIterator for Intersection<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1150,5 +1149,5 @@ impl<'a, T: Ord> Iterator for Union<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: Ord> FusedIterator for Union<'a, T> {} diff --git a/src/liballoc/linked_list.rs b/src/liballoc/collections/linked_list.rs index 3ac5a85d721..9844de9a57d 100644 --- a/src/liballoc/linked_list.rs +++ b/src/liballoc/collections/linked_list.rs @@ -28,10 +28,9 @@ use core::hash::{Hasher, Hash}; use core::iter::{FromIterator, FusedIterator}; use core::marker::PhantomData; use core::mem; -use core::ops::{BoxPlace, InPlace, Place, Placer}; -use core::ptr::{self, Shared}; +use core::ptr::NonNull; -use boxed::{Box, IntermediateBox}; +use boxed::Box; use super::SpecExtend; /// A doubly-linked list with owned nodes. @@ -44,15 +43,15 @@ use super::SpecExtend; /// more memory efficient and make better use of CPU cache. #[stable(feature = "rust1", since = "1.0.0")] pub struct LinkedList<T> { - head: Option<Shared<Node<T>>>, - tail: Option<Shared<Node<T>>>, + head: Option<NonNull<Node<T>>>, + tail: Option<NonNull<Node<T>>>, len: usize, marker: PhantomData<Box<Node<T>>>, } struct Node<T> { - next: Option<Shared<Node<T>>>, - prev: Option<Shared<Node<T>>>, + next: Option<NonNull<Node<T>>>, + prev: Option<NonNull<Node<T>>>, element: T, } @@ -65,8 +64,8 @@ struct Node<T> { /// [`LinkedList`]: struct.LinkedList.html #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, T: 'a> { - head: Option<Shared<Node<T>>>, - tail: Option<Shared<Node<T>>>, + head: Option<NonNull<Node<T>>>, + tail: Option<NonNull<Node<T>>>, len: usize, marker: PhantomData<&'a Node<T>>, } @@ -98,8 +97,8 @@ impl<'a, T> Clone for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T: 'a> { list: &'a mut LinkedList<T>, - head: Option<Shared<Node<T>>>, - tail: Option<Shared<Node<T>>>, + head: Option<NonNull<Node<T>>>, + tail: Option<NonNull<Node<T>>>, len: usize, } @@ -157,7 +156,7 @@ impl<T> LinkedList<T> { unsafe { node.next = self.head; node.prev = None; - let node = Some(Shared::from(Box::into_unique(node))); + let node = Some(Box::into_raw_non_null(node)); match self.head { None => self.tail = node, @@ -192,7 +191,7 @@ impl<T> LinkedList<T> { unsafe { node.next = None; node.prev = self.tail; - let node = Some(Shared::from(Box::into_unique(node))); + let node = Some(Box::into_raw_non_null(node)); match self.tail { None => self.head = node, @@ -225,7 +224,7 @@ impl<T> LinkedList<T> { /// /// Warning: this will not check that the provided node belongs to the current list. #[inline] - unsafe fn unlink_node(&mut self, mut node: Shared<Node<T>>) { + unsafe fn unlink_node(&mut self, mut node: NonNull<Node<T>>) { let node = node.as_mut(); match node.prev { @@ -747,8 +746,8 @@ impl<T> LinkedList<T> { /// Creates an iterator which uses a closure to determine if an element should be removed. /// /// If the closure returns true, then the element is removed and yielded. - /// If the closure returns false, it will try again, and call the closure on the next element, - /// seeing if it passes the test. + /// If the closure returns false, the element will remain in the list and will not be yielded + /// by the iterator. /// /// Note that `drain_filter` lets you mutate every element in the filter closure, regardless of /// whether you choose to keep or remove it. @@ -786,62 +785,6 @@ impl<T> LinkedList<T> { old_len: old_len, } } - - /// Returns a place for insertion at the front of the list. - /// - /// Using this method with placement syntax is equivalent to - /// [`push_front`](#method.push_front), but may be more efficient. - /// - /// # Examples - /// - /// ``` - /// #![feature(collection_placement)] - /// #![feature(placement_in_syntax)] - /// - /// use std::collections::LinkedList; - /// - /// let mut list = LinkedList::new(); - /// list.front_place() <- 2; - /// list.front_place() <- 4; - /// assert!(list.iter().eq(&[4, 2])); - /// ``` - #[unstable(feature = "collection_placement", - reason = "method name and placement protocol are subject to change", - issue = "30172")] - pub fn front_place(&mut self) -> FrontPlace<T> { - FrontPlace { - list: self, - node: IntermediateBox::make_place(), - } - } - - /// Returns a place for insertion at the back of the list. - /// - /// Using this method with placement syntax is equivalent to [`push_back`](#method.push_back), - /// but may be more efficient. - /// - /// # Examples - /// - /// ``` - /// #![feature(collection_placement)] - /// #![feature(placement_in_syntax)] - /// - /// use std::collections::LinkedList; - /// - /// let mut list = LinkedList::new(); - /// list.back_place() <- 2; - /// list.back_place() <- 4; - /// assert!(list.iter().eq(&[2, 4])); - /// ``` - #[unstable(feature = "collection_placement", - reason = "method name and placement protocol are subject to change", - issue = "30172")] - pub fn back_place(&mut self) -> BackPlace<T> { - BackPlace { - list: self, - node: IntermediateBox::make_place(), - } - } } #[stable(feature = "rust1", since = "1.0.0")] @@ -897,7 +840,7 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Iter<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -946,7 +889,7 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for IterMut<'a, T> {} impl<'a, T> IterMut<'a, T> { @@ -986,11 +929,11 @@ impl<'a, T> IterMut<'a, T> { Some(prev) => prev, }; - let node = Some(Shared::from(Box::into_unique(box Node { + let node = Some(Box::into_raw_non_null(box Node { next: Some(head), prev: Some(prev), element, - }))); + })); prev.as_mut().next = node; head.as_mut().prev = node; @@ -1038,7 +981,7 @@ pub struct DrainFilter<'a, T: 'a, F: 'a> where F: FnMut(&mut T) -> bool, { list: &'a mut LinkedList<T>, - it: Option<Shared<Node<T>>>, + it: Option<NonNull<Node<T>>>, pred: F, idx: usize, old_len: usize, @@ -1076,7 +1019,7 @@ impl<'a, T, F> Drop for DrainFilter<'a, T, F> where F: FnMut(&mut T) -> bool, { fn drop(&mut self) { - for _ in self { } + self.for_each(drop); } } @@ -1117,7 +1060,7 @@ impl<T> DoubleEndedIterator for IntoIter<T> { #[stable(feature = "rust1", since = "1.0.0")] impl<T> ExactSizeIterator for IntoIter<T> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<T> FusedIterator for IntoIter<T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1242,123 +1185,6 @@ impl<T: Hash> Hash for LinkedList<T> { } } -unsafe fn finalize<T>(node: IntermediateBox<Node<T>>) -> Box<Node<T>> { - let mut node = node.finalize(); - ptr::write(&mut node.next, None); - ptr::write(&mut node.prev, None); - node -} - -/// A place for insertion at the front of a `LinkedList`. -/// -/// See [`LinkedList::front_place`](struct.LinkedList.html#method.front_place) for details. -#[must_use = "places do nothing unless written to with `<-` syntax"] -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -pub struct FrontPlace<'a, T: 'a> { - list: &'a mut LinkedList<T>, - node: IntermediateBox<Node<T>>, -} - -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for FrontPlace<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("FrontPlace") - .field(&self.list) - .finish() - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Placer<T> for FrontPlace<'a, T> { - type Place = Self; - - fn make_place(self) -> Self { - self - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Place<T> for FrontPlace<'a, T> { - fn pointer(&mut self) -> *mut T { - unsafe { &mut (*self.node.pointer()).element } - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> InPlace<T> for FrontPlace<'a, T> { - type Owner = (); - - unsafe fn finalize(self) { - let FrontPlace { list, node } = self; - list.push_front_node(finalize(node)); - } -} - -/// A place for insertion at the back of a `LinkedList`. -/// -/// See [`LinkedList::back_place`](struct.LinkedList.html#method.back_place) for details. -#[must_use = "places do nothing unless written to with `<-` syntax"] -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -pub struct BackPlace<'a, T: 'a> { - list: &'a mut LinkedList<T>, - node: IntermediateBox<Node<T>>, -} - -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for BackPlace<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("BackPlace") - .field(&self.list) - .finish() - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Placer<T> for BackPlace<'a, T> { - type Place = Self; - - fn make_place(self) -> Self { - self - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Place<T> for BackPlace<'a, T> { - fn pointer(&mut self) -> *mut T { - unsafe { &mut (*self.node.pointer()).element } - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> InPlace<T> for BackPlace<'a, T> { - type Owner = (); - - unsafe fn finalize(self) { - let BackPlace { list, node } = self; - list.push_back_node(finalize(node)); - } -} - // Ensure that `LinkedList` and its read-only iterators are covariant in their type parameters. #[allow(dead_code)] fn assert_covariance() { diff --git a/src/liballoc/collections/mod.rs b/src/liballoc/collections/mod.rs new file mode 100644 index 00000000000..96e0eb633b2 --- /dev/null +++ b/src/liballoc/collections/mod.rs @@ -0,0 +1,88 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Collection types. + +#![stable(feature = "rust1", since = "1.0.0")] + +pub mod binary_heap; +mod btree; +pub mod linked_list; +pub mod vec_deque; + +#[stable(feature = "rust1", since = "1.0.0")] +pub mod btree_map { + //! A map based on a B-Tree. + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::btree::map::*; +} + +#[stable(feature = "rust1", since = "1.0.0")] +pub mod btree_set { + //! A set based on a B-Tree. + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::btree::set::*; +} + +#[stable(feature = "rust1", since = "1.0.0")] +#[doc(no_inline)] +pub use self::binary_heap::BinaryHeap; + +#[stable(feature = "rust1", since = "1.0.0")] +#[doc(no_inline)] +pub use self::btree_map::BTreeMap; + +#[stable(feature = "rust1", since = "1.0.0")] +#[doc(no_inline)] +pub use self::btree_set::BTreeSet; + +#[stable(feature = "rust1", since = "1.0.0")] +#[doc(no_inline)] +pub use self::linked_list::LinkedList; + +#[stable(feature = "rust1", since = "1.0.0")] +#[doc(no_inline)] +pub use self::vec_deque::VecDeque; + +use alloc::{AllocErr, LayoutErr}; + +/// Augments `AllocErr` with a CapacityOverflow variant. +#[derive(Clone, PartialEq, Eq, Debug)] +#[unstable(feature = "try_reserve", reason = "new API", issue="48043")] +pub enum CollectionAllocErr { + /// Error due to the computed capacity exceeding the collection's maximum + /// (usually `isize::MAX` bytes). + CapacityOverflow, + /// Error due to the allocator (see the `AllocErr` type's docs). + AllocErr, +} + +#[unstable(feature = "try_reserve", reason = "new API", issue="48043")] +impl From<AllocErr> for CollectionAllocErr { + #[inline] + fn from(AllocErr: AllocErr) -> Self { + CollectionAllocErr::AllocErr + } +} + +#[unstable(feature = "try_reserve", reason = "new API", issue="48043")] +impl From<LayoutErr> for CollectionAllocErr { + #[inline] + fn from(_: LayoutErr) -> Self { + CollectionAllocErr::CapacityOverflow + } +} + +/// An intermediate trait for specialization of `Extend`. +#[doc(hidden)] +trait SpecExtend<I: IntoIterator> { + /// Extends `self` with the contents of the given iterator. + fn spec_extend(&mut self, iter: I); +} diff --git a/src/liballoc/vec_deque.rs b/src/liballoc/collections/vec_deque.rs index f56aa23a4eb..0f759bb8f0b 100644 --- a/src/liballoc/vec_deque.rs +++ b/src/liballoc/collections/vec_deque.rs @@ -21,19 +21,18 @@ use core::cmp::Ordering; use core::fmt; use core::iter::{repeat, FromIterator, FusedIterator}; use core::mem; -use core::ops::{Index, IndexMut, Place, Placer, InPlace}; +use core::ops::Bound::{Excluded, Included, Unbounded}; +use core::ops::{Index, IndexMut, RangeBounds}; use core::ptr; -use core::ptr::Shared; +use core::ptr::NonNull; use core::slice; use core::hash::{Hash, Hasher}; use core::cmp; +use collections::CollectionAllocErr; use raw_vec::RawVec; - -use super::range::RangeArgument; -use Bound::{Excluded, Included, Unbounded}; -use super::vec::Vec; +use vec::Vec; const INITIAL_CAPACITY: usize = 7; // 2^3 - 1 const MINIMUM_CAPACITY: usize = 1; // 2 - 1 @@ -203,6 +202,23 @@ impl<T> VecDeque<T> { len); } + /// Returns a pair of slices which contain the contents of the buffer not used by the VecDeque. + #[inline] + unsafe fn unused_as_mut_slices<'a>(&'a mut self) -> (&'a mut [T], &'a mut [T]) { + let head = self.head; + let tail = self.tail; + let buf = self.buffer_as_mut_slice(); + if head != tail { + // In buf, head..tail contains the VecDeque and tail..head is unused. + // So calling `ring_slices` with tail and head swapped returns unused slices. + RingSlices::ring_slices(buf, tail, head) + } else { + // Swapping doesn't help when head == tail. + let (before, after) = buf.split_at_mut(head); + (after, before) + } + } + /// Copies a potentially wrapping block of memory len long from src to dest. /// (abs(dst - src) + len) must be no larger than cap() (There must be at /// most one continuous overlapping region between src and dest). @@ -566,6 +582,97 @@ impl<T> VecDeque<T> { } } + /// Tries to reserves the minimum capacity for exactly `additional` more elements to + /// be inserted in the given `VecDeque<T>`. After calling `reserve_exact`, + /// capacity will be greater than or equal to `self.len() + additional`. + /// Does nothing if the capacity is already sufficient. + /// + /// Note that the allocator may give the collection more space than it + /// requests. Therefore capacity can not be relied upon to be precisely + /// minimal. Prefer `reserve` if future insertions are expected. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// use std::collections::VecDeque; + /// + /// fn process_data(data: &[u32]) -> Result<VecDeque<u32>, CollectionAllocErr> { + /// let mut output = VecDeque::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve_exact(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.extend(data.iter().map(|&val| { + /// val * 2 + 5 // very complicated + /// })); + /// + /// Ok(output) + /// } + /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.try_reserve(additional) + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `VecDeque<T>`. The collection may reserve more space to avoid + /// frequent reallocations. After calling `reserve`, capacity will be + /// greater than or equal to `self.len() + additional`. Does nothing if + /// capacity is already sufficient. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// use std::collections::VecDeque; + /// + /// fn process_data(data: &[u32]) -> Result<VecDeque<u32>, CollectionAllocErr> { + /// let mut output = VecDeque::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.extend(data.iter().map(|&val| { + /// val * 2 + 5 // very complicated + /// })); + /// + /// Ok(output) + /// } + /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + let old_cap = self.cap(); + let used_cap = self.len() + 1; + let new_cap = used_cap.checked_add(additional) + .and_then(|needed_cap| needed_cap.checked_next_power_of_two()) + .ok_or(CollectionAllocErr::CapacityOverflow)?; + + if new_cap > old_cap { + self.buf.try_reserve_exact(used_cap, new_cap - used_cap)?; + unsafe { + self.handle_cap_increase(old_cap); + } + } + Ok(()) + } + /// Shrinks the capacity of the `VecDeque` as much as possible. /// /// It will drop down as close as possible to the length but the allocator may still inform the @@ -584,9 +691,42 @@ impl<T> VecDeque<T> { /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn shrink_to_fit(&mut self) { + self.shrink_to(0); + } + + /// Shrinks the capacity of the `VecDeque` with a lower bound. + /// + /// The capacity will remain at least as large as both the length + /// and the supplied value. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// #![feature(shrink_to)] + /// use std::collections::VecDeque; + /// + /// let mut buf = VecDeque::with_capacity(15); + /// buf.extend(0..4); + /// assert_eq!(buf.capacity(), 15); + /// buf.shrink_to(6); + /// assert!(buf.capacity() >= 6); + /// buf.shrink_to(0); + /// assert!(buf.capacity() >= 4); + /// ``` + #[unstable(feature = "shrink_to", reason = "new API", issue="0")] + pub fn shrink_to(&mut self, min_capacity: usize) { + assert!(self.capacity() >= min_capacity, "Tried to shrink to a larger capacity"); + // +1 since the ringbuffer always leaves one space empty // len + 1 can't overflow for an existing, well-formed ringbuffer. - let target_cap = cmp::max(self.len() + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); + let target_cap = cmp::max( + cmp::max(min_capacity, self.len()) + 1, + MINIMUM_CAPACITY + 1 + ).next_power_of_two(); + if target_cap < self.cap() { // There are three cases of interest: // All elements are out of desired bounds @@ -844,7 +984,7 @@ impl<T> VecDeque<T> { #[inline] #[stable(feature = "drain", since = "1.6.0")] pub fn drain<R>(&mut self, range: R) -> Drain<T> - where R: RangeArgument<usize> + where R: RangeBounds<usize> { // Memory safety // @@ -857,12 +997,12 @@ impl<T> VecDeque<T> { // and the head/tail values will be restored correctly. // let len = self.len(); - let start = match range.start() { + let start = match range.start_bound() { Included(&n) => n, Excluded(&n) => n + 1, Unbounded => 0, }; - let end = match range.end() { + let end = match range.end_bound() { Included(&n) => n + 1, Excluded(&n) => n, Unbounded => len, @@ -895,7 +1035,7 @@ impl<T> VecDeque<T> { self.head = drain_tail; Drain { - deque: Shared::from(&mut *self), + deque: NonNull::from(&mut *self), after_tail: drain_head, after_head: head, iter: Iter { @@ -906,7 +1046,7 @@ impl<T> VecDeque<T> { } } - /// Clears the buffer, removing all values. + /// Clears the `VecDeque`, removing all values. /// /// # Examples /// @@ -1624,10 +1764,10 @@ impl<T> VecDeque<T> { return elem; } - /// Splits the collection into two at the given index. + /// Splits the `VecDeque` into two at the given index. /// - /// Returns a newly allocated `Self`. `self` contains elements `[0, at)`, - /// and the returned `Self` contains elements `[at, len)`. + /// Returns a newly allocated `VecDeque`. `self` contains elements `[0, at)`, + /// and the returned `VecDeque` contains elements `[at, len)`. /// /// Note that the capacity of `self` does not change. /// @@ -1635,7 +1775,7 @@ impl<T> VecDeque<T> { /// /// # Panics /// - /// Panics if `at > len` + /// Panics if `at > len`. /// /// # Examples /// @@ -1711,8 +1851,148 @@ impl<T> VecDeque<T> { #[inline] #[stable(feature = "append", since = "1.4.0")] pub fn append(&mut self, other: &mut Self) { - // naive impl - self.extend(other.drain(..)); + // Copies all values from `src_slice` to the start of `dst_slice`. + unsafe fn copy_whole_slice<T>(src_slice: &[T], dst_slice: &mut [T]) { + let len = src_slice.len(); + ptr::copy_nonoverlapping(src_slice.as_ptr(), dst_slice[..len].as_mut_ptr(), len); + } + + let src_total = other.len(); + + // Guarantees there is space in `self` for `other`. + self.reserve(src_total); + + self.head = { + let original_head = self.head; + + // The goal is to copy all values from `other` into `self`. To avoid any + // mismatch, all valid values in `other` are retrieved... + let (src_high, src_low) = other.as_slices(); + // and unoccupied parts of self are retrieved. + let (dst_high, dst_low) = unsafe { self.unused_as_mut_slices() }; + + // Then all that is needed is to copy all values from + // src (src_high and src_low) to dst (dst_high and dst_low). + // + // other [o o o . . . . . o o o o] + // [5 6 7] [1 2 3 4] + // src_low src_high + // + // self [. . . . . . o o o o . .] + // [3 4 5 6 7 .] [1 2] + // dst_low dst_high + // + // Values are not copied one by one but as slices in `copy_whole_slice`. + // What slices are used depends on various properties of src and dst. + // There are 6 cases in total: + // 1. `src` is contiguous and fits in dst_high + // 2. `src` is contiguous and does not fit in dst_high + // 3. `src` is discontiguous and fits in dst_high + // 4. `src` is discontiguous and does not fit in dst_high + // + src_high is smaller than dst_high + // 5. `src` is discontiguous and does not fit in dst_high + // + dst_high is smaller than src_high + // 6. `src` is discontiguous and does not fit in dst_high + // + dst_high is the same size as src_high + let src_contiguous = src_low.is_empty(); + let dst_high_fits_src = dst_high.len() >= src_total; + match (src_contiguous, dst_high_fits_src) { + (true, true) => { + // 1. + // other [. . . o o o . . . . . .] + // [] [1 1 1] + // + // self [. o o o o o . . . . . .] + // [.] [1 1 1 . . .] + + unsafe { + copy_whole_slice(src_high, dst_high); + } + original_head + src_total + } + (true, false) => { + // 2. + // other [. . . o o o o o . . . .] + // [] [1 1 2 2 2] + // + // self [. . . . . . . o o o . .] + // [2 2 2 . . . .] [1 1] + + let (src_1, src_2) = src_high.split_at(dst_high.len()); + unsafe { + copy_whole_slice(src_1, dst_high); + copy_whole_slice(src_2, dst_low); + } + src_total - dst_high.len() + } + (false, true) => { + // 3. + // other [o o . . . . . . . o o o] + // [2 2] [1 1 1] + // + // self [. o o . . . . . . . . .] + // [.] [1 1 1 2 2 . . . .] + + let (dst_1, dst_2) = dst_high.split_at_mut(src_high.len()); + unsafe { + copy_whole_slice(src_high, dst_1); + copy_whole_slice(src_low, dst_2); + } + original_head + src_total + } + (false, false) => { + if src_high.len() < dst_high.len() { + // 4. + // other [o o o . . . . . . o o o] + // [2 3 3] [1 1 1] + // + // self [. . . . . . o o . . . .] + // [3 3 . . . .] [1 1 1 2] + + let (dst_1, dst_2) = dst_high.split_at_mut(src_high.len()); + let (src_2, src_3) = src_low.split_at(dst_2.len()); + unsafe { + copy_whole_slice(src_high, dst_1); + copy_whole_slice(src_2, dst_2); + copy_whole_slice(src_3, dst_low); + } + src_3.len() + } else if src_high.len() > dst_high.len() { + // 5. + // other [o o o . . . . . o o o o] + // [3 3 3] [1 1 2 2] + // + // self [. . . . . . o o o o . .] + // [2 2 3 3 3 .] [1 1] + + let (src_1, src_2) = src_high.split_at(dst_high.len()); + let (dst_2, dst_3) = dst_low.split_at_mut(src_2.len()); + unsafe { + copy_whole_slice(src_1, dst_high); + copy_whole_slice(src_2, dst_2); + copy_whole_slice(src_low, dst_3); + } + dst_2.len() + src_low.len() + } else { + // 6. + // other [o o . . . . . . . o o o] + // [2 2] [1 1 1] + // + // self [. . . . . . . o o . . .] + // [2 2 . . . . .] [1 1 1] + + unsafe { + copy_whole_slice(src_high, dst_high); + copy_whole_slice(src_low, dst_low); + } + src_low.len() + } + } + } + }; + + // Some values now exist in both `other` and `self` but are made inaccessible in `other`. + other.tail = other.head; } /// Retains only the elements specified by the predicate. @@ -1761,61 +2041,12 @@ impl<T> VecDeque<T> { debug_assert!(!self.is_full()); } } - - /// Returns a place for insertion at the back of the `VecDeque`. - /// - /// Using this method with placement syntax is equivalent to [`push_back`](#method.push_back), - /// but may be more efficient. - /// - /// # Examples - /// - /// ``` - /// #![feature(collection_placement)] - /// #![feature(placement_in_syntax)] - /// - /// use std::collections::VecDeque; - /// - /// let mut buf = VecDeque::new(); - /// buf.place_back() <- 3; - /// buf.place_back() <- 4; - /// assert_eq!(&buf, &[3, 4]); - /// ``` - #[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] - pub fn place_back(&mut self) -> PlaceBack<T> { - PlaceBack { vec_deque: self } - } - - /// Returns a place for insertion at the front of the `VecDeque`. - /// - /// Using this method with placement syntax is equivalent to [`push_front`](#method.push_front), - /// but may be more efficient. - /// - /// # Examples - /// - /// ``` - /// #![feature(collection_placement)] - /// #![feature(placement_in_syntax)] - /// - /// use std::collections::VecDeque; - /// - /// let mut buf = VecDeque::new(); - /// buf.place_front() <- 3; - /// buf.place_front() <- 4; - /// assert_eq!(&buf, &[4, 3]); - /// ``` - #[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] - pub fn place_front(&mut self) -> PlaceFront<T> { - PlaceFront { vec_deque: self } - } } impl<T: Clone> VecDeque<T> { /// Modifies the `VecDeque` in-place so that `len()` is equal to new_len, - /// either by removing excess elements or by appending clones of `value` to the back. + /// either by removing excess elements from the back or by appending clones of `value` + /// to the back. /// /// # Examples /// @@ -1990,7 +2221,7 @@ impl<'a, T> ExactSizeIterator for Iter<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Iter<'a, T> {} @@ -2083,7 +2314,7 @@ impl<'a, T> ExactSizeIterator for IterMut<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for IterMut<'a, T> {} /// An owning iterator over the elements of a `VecDeque`. @@ -2139,7 +2370,7 @@ impl<T> ExactSizeIterator for IntoIter<T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<T> FusedIterator for IntoIter<T> {} /// A draining iterator over the elements of a `VecDeque`. @@ -2154,7 +2385,7 @@ pub struct Drain<'a, T: 'a> { after_tail: usize, after_head: usize, iter: Iter<'a, T>, - deque: Shared<VecDeque<T>>, + deque: NonNull<VecDeque<T>>, } #[stable(feature = "collection_debug", since = "1.17.0")] @@ -2176,7 +2407,7 @@ unsafe impl<'a, T: Send> Send for Drain<'a, T> {} #[stable(feature = "drain", since = "1.6.0")] impl<'a, T: 'a> Drop for Drain<'a, T> { fn drop(&mut self) { - for _ in self.by_ref() {} + self.for_each(drop); let source_deque = unsafe { self.deque.as_mut() }; @@ -2246,7 +2477,7 @@ impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { #[stable(feature = "drain", since = "1.6.0")] impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: 'a> FusedIterator for Drain<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -2296,7 +2527,7 @@ macro_rules! __impl_slice_eq1 { __impl_slice_eq1! { $Lhs, $Rhs, Sized } }; ($Lhs: ty, $Rhs: ty, $Bound: ident) => { - #[stable(feature = "vec-deque-partial-eq-slice", since = "1.17.0")] + #[stable(feature = "vec_deque_partial_eq_slice", since = "1.17.0")] impl<'a, 'b, A: $Bound, B> PartialEq<$Rhs> for $Lhs where A: PartialEq<B> { fn eq(&self, other: &$Rhs) -> bool { if self.len() != other.len() { @@ -2390,7 +2621,7 @@ impl<T> IntoIterator for VecDeque<T> { type Item = T; type IntoIter = IntoIter<T>; - /// Consumes the list into a front-to-back iterator yielding elements by + /// Consumes the `VecDeque` into a front-to-back iterator yielding elements by /// value. fn into_iter(self) -> IntoIter<T> { IntoIter { inner: self } @@ -2480,7 +2711,7 @@ impl<T> From<VecDeque<T>> for Vec<T> { if other.is_contiguous() { ptr::copy(buf.offset(tail as isize), buf, len); } else { - if (tail - head) >= cmp::min((cap - tail), head) { + if (tail - head) >= cmp::min(cap - tail, head) { // There is enough free space in the centre for the shortest block so we can // do this in at most three copy moves. if (cap - tail) > head { @@ -2537,98 +2768,6 @@ impl<T> From<VecDeque<T>> for Vec<T> { } } -/// A place for insertion at the back of a `VecDeque`. -/// -/// See [`VecDeque::place_back`](struct.VecDeque.html#method.place_back) for details. -#[must_use = "places do nothing unless written to with `<-` syntax"] -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -#[derive(Debug)] -pub struct PlaceBack<'a, T: 'a> { - vec_deque: &'a mut VecDeque<T>, -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Placer<T> for PlaceBack<'a, T> { - type Place = PlaceBack<'a, T>; - - fn make_place(self) -> Self { - self.vec_deque.grow_if_necessary(); - self - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Place<T> for PlaceBack<'a, T> { - fn pointer(&mut self) -> *mut T { - unsafe { self.vec_deque.ptr().offset(self.vec_deque.head as isize) } - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> InPlace<T> for PlaceBack<'a, T> { - type Owner = &'a mut T; - - unsafe fn finalize(self) -> &'a mut T { - let head = self.vec_deque.head; - self.vec_deque.head = self.vec_deque.wrap_add(head, 1); - &mut *(self.vec_deque.ptr().offset(head as isize)) - } -} - -/// A place for insertion at the front of a `VecDeque`. -/// -/// See [`VecDeque::place_front`](struct.VecDeque.html#method.place_front) for details. -#[must_use = "places do nothing unless written to with `<-` syntax"] -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -#[derive(Debug)] -pub struct PlaceFront<'a, T: 'a> { - vec_deque: &'a mut VecDeque<T>, -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Placer<T> for PlaceFront<'a, T> { - type Place = PlaceFront<'a, T>; - - fn make_place(self) -> Self { - self.vec_deque.grow_if_necessary(); - self - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Place<T> for PlaceFront<'a, T> { - fn pointer(&mut self) -> *mut T { - let tail = self.vec_deque.wrap_sub(self.vec_deque.tail, 1); - unsafe { self.vec_deque.ptr().offset(tail as isize) } - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> InPlace<T> for PlaceFront<'a, T> { - type Owner = &'a mut T; - - unsafe fn finalize(self) -> &'a mut T { - self.vec_deque.tail = self.vec_deque.wrap_sub(self.vec_deque.tail, 1); - &mut *(self.vec_deque.ptr().offset(self.vec_deque.tail as isize)) - } -} - #[cfg(test)] mod tests { use test; @@ -2909,7 +3048,7 @@ mod tests { #[test] fn test_from_vec() { - use super::super::vec::Vec; + use vec::Vec; for cap in 0..35 { for len in 0..cap + 1 { let mut vec = Vec::with_capacity(cap); @@ -2925,7 +3064,7 @@ mod tests { #[test] fn test_vec_from_vecdeque() { - use super::super::vec::Vec; + use vec::Vec; fn create_vec_and_test_convert(cap: usize, offset: usize, len: usize) { let mut vd = VecDeque::with_capacity(cap); diff --git a/src/liballoc/fmt.rs b/src/liballoc/fmt.rs index a092bfb3b0a..b49ec0ae252 100644 --- a/src/liballoc/fmt.rs +++ b/src/liballoc/fmt.rs @@ -113,6 +113,8 @@ //! //! * *nothing* ⇒ [`Display`] //! * `?` ⇒ [`Debug`] +//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers +//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers //! * `o` ⇒ [`Octal`](trait.Octal.html) //! * `x` ⇒ [`LowerHex`](trait.LowerHex.html) //! * `X` ⇒ [`UpperHex`](trait.UpperHex.html) @@ -324,7 +326,7 @@ //! sign := '+' | '-' //! width := count //! precision := count | '*' -//! type := identifier | '' +//! type := identifier | '?' | '' //! count := parameter | integer //! parameter := argument '$' //! ``` @@ -338,7 +340,8 @@ //! //! ## Fill/Alignment //! -//! The fill character is provided normally in conjunction with the `width` +//! The fill character is provided normally in conjunction with the +//! [`width`](#width) //! parameter. This indicates that if the value being formatted is smaller than //! `width` some extra characters will be printed around it. The extra //! characters are specified by `fill`, and the alignment can be one of the @@ -386,7 +389,8 @@ //! padding specified by fill/alignment will be used to take up the required //! space. //! -//! The default fill/alignment for non-numerics is a space and left-aligned. The +//! The default [fill/alignment](#fillalignment) for non-numerics is a space and +//! left-aligned. The //! defaults for numeric formatters is also a space but with right-alignment. If //! the `0` flag is specified for numerics, then the implicit fill character is //! `0`. @@ -514,19 +518,21 @@ pub use core::fmt::rt; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::{Formatter, Result, Write}; #[stable(feature = "rust1", since = "1.0.0")] -pub use core::fmt::{Octal, Binary}; +pub use core::fmt::{Binary, Octal}; #[stable(feature = "rust1", since = "1.0.0")] -pub use core::fmt::{Display, Debug}; +pub use core::fmt::{Debug, Display}; #[stable(feature = "rust1", since = "1.0.0")] -pub use core::fmt::{LowerHex, UpperHex, Pointer}; +pub use core::fmt::{LowerHex, Pointer, UpperHex}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::{LowerExp, UpperExp}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::Error; #[stable(feature = "rust1", since = "1.0.0")] -pub use core::fmt::{ArgumentV1, Arguments, write}; +pub use core::fmt::{write, ArgumentV1, Arguments}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple}; +#[stable(feature = "fmt_flags_align", since = "1.28.0")] +pub use core::fmt::{Alignment}; use string; @@ -561,7 +567,8 @@ use string; pub fn format(args: Arguments) -> string::String { let capacity = args.estimated_capacity(); let mut output = string::String::with_capacity(capacity); - output.write_fmt(args) - .expect("a formatting trait implementation returned an error"); + output + .write_fmt(args) + .expect("a formatting trait implementation returned an error"); output } diff --git a/src/liballoc/heap.rs b/src/liballoc/heap.rs deleted file mode 100644 index b2bd9d7d8fa..00000000000 --- a/src/liballoc/heap.rs +++ /dev/null @@ -1,297 +0,0 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![unstable(feature = "allocator_api", - reason = "the precise API and guarantees it provides may be tweaked \ - slightly, especially to possibly take into account the \ - types being stored to make room for a future \ - tracing garbage collector", - issue = "32838")] - -use core::intrinsics::{min_align_of_val, size_of_val}; -use core::mem::{self, ManuallyDrop}; -use core::usize; - -pub use allocator::*; -#[doc(hidden)] -pub mod __core { - pub use core::*; -} - -extern "Rust" { - #[allocator] - #[rustc_allocator_nounwind] - fn __rust_alloc(size: usize, align: usize, err: *mut u8) -> *mut u8; - #[cold] - #[rustc_allocator_nounwind] - fn __rust_oom(err: *const u8) -> !; - #[rustc_allocator_nounwind] - fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize); - #[rustc_allocator_nounwind] - fn __rust_usable_size(layout: *const u8, - min: *mut usize, - max: *mut usize); - #[rustc_allocator_nounwind] - fn __rust_realloc(ptr: *mut u8, - old_size: usize, - old_align: usize, - new_size: usize, - new_align: usize, - err: *mut u8) -> *mut u8; - #[rustc_allocator_nounwind] - fn __rust_alloc_zeroed(size: usize, align: usize, err: *mut u8) -> *mut u8; - #[rustc_allocator_nounwind] - fn __rust_alloc_excess(size: usize, - align: usize, - excess: *mut usize, - err: *mut u8) -> *mut u8; - #[rustc_allocator_nounwind] - fn __rust_realloc_excess(ptr: *mut u8, - old_size: usize, - old_align: usize, - new_size: usize, - new_align: usize, - excess: *mut usize, - err: *mut u8) -> *mut u8; - #[rustc_allocator_nounwind] - fn __rust_grow_in_place(ptr: *mut u8, - old_size: usize, - old_align: usize, - new_size: usize, - new_align: usize) -> u8; - #[rustc_allocator_nounwind] - fn __rust_shrink_in_place(ptr: *mut u8, - old_size: usize, - old_align: usize, - new_size: usize, - new_align: usize) -> u8; -} - -#[derive(Copy, Clone, Default, Debug)] -pub struct Heap; - -unsafe impl Alloc for Heap { - #[inline] - unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { - let mut err = ManuallyDrop::new(mem::uninitialized::<AllocErr>()); - let ptr = __rust_alloc(layout.size(), - layout.align(), - &mut *err as *mut AllocErr as *mut u8); - if ptr.is_null() { - Err(ManuallyDrop::into_inner(err)) - } else { - Ok(ptr) - } - } - - #[inline] - #[cold] - fn oom(&mut self, err: AllocErr) -> ! { - unsafe { - __rust_oom(&err as *const AllocErr as *const u8) - } - } - - #[inline] - unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { - __rust_dealloc(ptr, layout.size(), layout.align()) - } - - #[inline] - fn usable_size(&self, layout: &Layout) -> (usize, usize) { - let mut min = 0; - let mut max = 0; - unsafe { - __rust_usable_size(layout as *const Layout as *const u8, - &mut min, - &mut max); - } - (min, max) - } - - #[inline] - unsafe fn realloc(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) - -> Result<*mut u8, AllocErr> - { - let mut err = ManuallyDrop::new(mem::uninitialized::<AllocErr>()); - let ptr = __rust_realloc(ptr, - layout.size(), - layout.align(), - new_layout.size(), - new_layout.align(), - &mut *err as *mut AllocErr as *mut u8); - if ptr.is_null() { - Err(ManuallyDrop::into_inner(err)) - } else { - mem::forget(err); - Ok(ptr) - } - } - - #[inline] - unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { - let mut err = ManuallyDrop::new(mem::uninitialized::<AllocErr>()); - let ptr = __rust_alloc_zeroed(layout.size(), - layout.align(), - &mut *err as *mut AllocErr as *mut u8); - if ptr.is_null() { - Err(ManuallyDrop::into_inner(err)) - } else { - Ok(ptr) - } - } - - #[inline] - unsafe fn alloc_excess(&mut self, layout: Layout) -> Result<Excess, AllocErr> { - let mut err = ManuallyDrop::new(mem::uninitialized::<AllocErr>()); - let mut size = 0; - let ptr = __rust_alloc_excess(layout.size(), - layout.align(), - &mut size, - &mut *err as *mut AllocErr as *mut u8); - if ptr.is_null() { - Err(ManuallyDrop::into_inner(err)) - } else { - Ok(Excess(ptr, size)) - } - } - - #[inline] - unsafe fn realloc_excess(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<Excess, AllocErr> { - let mut err = ManuallyDrop::new(mem::uninitialized::<AllocErr>()); - let mut size = 0; - let ptr = __rust_realloc_excess(ptr, - layout.size(), - layout.align(), - new_layout.size(), - new_layout.align(), - &mut size, - &mut *err as *mut AllocErr as *mut u8); - if ptr.is_null() { - Err(ManuallyDrop::into_inner(err)) - } else { - Ok(Excess(ptr, size)) - } - } - - #[inline] - unsafe fn grow_in_place(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) - -> Result<(), CannotReallocInPlace> - { - debug_assert!(new_layout.size() >= layout.size()); - debug_assert!(new_layout.align() == layout.align()); - let ret = __rust_grow_in_place(ptr, - layout.size(), - layout.align(), - new_layout.size(), - new_layout.align()); - if ret != 0 { - Ok(()) - } else { - Err(CannotReallocInPlace) - } - } - - #[inline] - unsafe fn shrink_in_place(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<(), CannotReallocInPlace> { - debug_assert!(new_layout.size() <= layout.size()); - debug_assert!(new_layout.align() == layout.align()); - let ret = __rust_shrink_in_place(ptr, - layout.size(), - layout.align(), - new_layout.size(), - new_layout.align()); - if ret != 0 { - Ok(()) - } else { - Err(CannotReallocInPlace) - } - } -} - -/// An arbitrary non-null address to represent zero-size allocations. -/// -/// This preserves the non-null invariant for types like `Box<T>`. The address -/// may overlap with non-zero-size memory allocations. -#[rustc_deprecated(since = "1.19", reason = "Use Unique/Shared::empty() instead")] -#[unstable(feature = "heap_api", issue = "27700")] -pub const EMPTY: *mut () = 1 as *mut (); - -/// The allocator for unique pointers. -// This function must not unwind. If it does, MIR trans will fail. -#[cfg(not(test))] -#[lang = "exchange_malloc"] -#[inline] -unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 { - if size == 0 { - align as *mut u8 - } else { - let layout = Layout::from_size_align_unchecked(size, align); - Heap.alloc(layout).unwrap_or_else(|err| { - Heap.oom(err) - }) - } -} - -#[cfg_attr(not(test), lang = "box_free")] -#[inline] -pub(crate) unsafe fn box_free<T: ?Sized>(ptr: *mut T) { - let size = size_of_val(&*ptr); - let align = min_align_of_val(&*ptr); - // We do not allocate for Box<T> when T is ZST, so deallocation is also not necessary. - if size != 0 { - let layout = Layout::from_size_align_unchecked(size, align); - Heap.dealloc(ptr as *mut u8, layout); - } -} - -#[cfg(test)] -mod tests { - extern crate test; - use self::test::Bencher; - use boxed::Box; - use heap::{Heap, Alloc, Layout}; - - #[test] - fn allocate_zeroed() { - unsafe { - let layout = Layout::from_size_align(1024, 1).unwrap(); - let ptr = Heap.alloc_zeroed(layout.clone()) - .unwrap_or_else(|e| Heap.oom(e)); - - let end = ptr.offset(layout.size() as isize); - let mut i = ptr; - while i < end { - assert_eq!(*i, 0); - i = i.offset(1); - } - Heap.dealloc(ptr, layout); - } - } - - #[bench] - fn alloc_owned_small(b: &mut Bencher) { - b.iter(|| { - let _: Box<_> = box 10; - }) - } -} diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 6ee4f802802..bcdfd8c9aa5 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -13,10 +13,10 @@ //! This library provides smart pointers and collections for managing //! heap-allocated values. //! -//! This library, like libcore, is not intended for general usage, but rather as -//! a building block of other libraries. The types and interfaces in this -//! library are re-exported through the [standard library](../std/index.html), -//! and should not be used through this library. +//! This library, like libcore, normally doesn’t need to be used directly +//! since its contents are re-exported in the [`std` crate](../std/index.html). +//! Crates that use the `#![no_std]` attribute however will typically +//! not depend on `std`, so they’d use this crate instead. //! //! ## Boxed values //! @@ -40,7 +40,7 @@ //! //! ## Atomically reference counted pointers //! -//! The [`Arc`](arc/index.html) type is the threadsafe equivalent of the `Rc` +//! The [`Arc`](sync/index.html) type is the threadsafe equivalent of the `Rc` //! type. It provides all the same functionality of `Rc`, except it requires //! that the contained type `T` is shareable. Additionally, `Arc<T>` is itself //! sendable while `Rc<T>` is not. @@ -57,7 +57,7 @@ //! //! ## Heap interfaces //! -//! The [`heap`](heap/index.html) module defines the low-level interface to the +//! The [`alloc`](alloc/index.html) module defines the low-level interface to the //! default global allocator. It is not compatible with the libc allocator API. #![allow(unused_attributes)] @@ -72,18 +72,17 @@ test(no_crate_inject, attr(allow(unused_variables), deny(warnings))))] #![no_std] #![needs_allocator] -#![deny(warnings)] #![deny(missing_debug_implementations)] -#![cfg_attr(test, allow(deprecated))] // rand -#![cfg_attr(test, feature(placement_in))] -#![cfg_attr(not(test), feature(core_float))] -#![cfg_attr(not(test), feature(exact_size_is_empty))] -#![cfg_attr(not(test), feature(slice_rotate))] +#![cfg_attr(not(test), feature(fn_traits))] #![cfg_attr(not(test), feature(generator_trait))] -#![cfg_attr(test, feature(rand, test))] +#![cfg_attr(not(stage0), feature(nll))] +#![cfg_attr(test, feature(test))] + +#![feature(allocator_api)] #![feature(allow_internal_unstable)] -#![feature(ascii_ctype)] +#![feature(arbitrary_self_types)] +#![feature(box_into_raw_non_null)] #![feature(box_patterns)] #![feature(box_syntax)] #![feature(cfg_target_has_atomic)] @@ -94,40 +93,31 @@ #![feature(dropck_eyepatch)] #![feature(exact_size_is_empty)] #![feature(fmt_internals)] -#![feature(from_ref)] #![feature(fundamental)] -#![feature(fused)] -#![feature(generic_param_attrs)] -#![feature(i128_type)] -#![feature(inclusive_range)] -#![feature(iter_rfold)] +#![feature(futures_api)] #![feature(lang_items)] +#![feature(libc)] #![feature(needs_allocator)] -#![feature(nonzero)] -#![feature(offset_to)] #![feature(optin_builtin_traits)] #![feature(pattern)] -#![feature(placement_in_syntax)] -#![feature(placement_new_protocol)] +#![feature(pin)] +#![feature(ptr_internals)] +#![feature(ptr_offset_from)] #![feature(rustc_attrs)] -#![feature(shared)] -#![feature(slice_get_slice)] -#![feature(slice_patterns)] -#![feature(slice_rsplit)] #![feature(specialization)] +#![feature(split_ascii_whitespace)] #![feature(staged_api)] #![feature(str_internals)] #![feature(trusted_len)] +#![feature(try_reserve)] #![feature(unboxed_closures)] -#![feature(unicode)] -#![feature(unique)] +#![feature(unicode_internals)] #![feature(unsize)] #![feature(allocator_internals)] #![feature(on_unimplemented)] #![feature(exact_chunks)] - -#![cfg_attr(not(test), feature(fused, fn_traits, placement_new_protocol, swap_with_slice, i128))] -#![cfg_attr(test, feature(test, box_heap))] +#![feature(rustc_const_unstable)] +#![feature(const_vec_new)] // Allow testing this library @@ -139,140 +129,45 @@ extern crate test; #[cfg(test)] extern crate rand; -extern crate std_unicode; - // Module with internal macros used by other modules (needs to be included before other modules). #[macro_use] mod macros; -// Allocator trait and helper struct definitions - -pub mod allocator; - // Heaps provided for low-level allocation strategies -pub mod heap; +pub mod alloc; +#[unstable(feature = "futures_api", + reason = "futures in libcore are unstable", + issue = "50547")] +pub mod task; // Primitive types using the heaps above // Need to conditionally define the mod from `boxed.rs` to avoid // duplicating the lang-items when building in test cfg; but also need -// to allow code to have `use boxed::HEAP;` -// and `use boxed::Box;` declarations. +// to allow code to have `use boxed::Box;` declarations. #[cfg(not(test))] pub mod boxed; #[cfg(test)] mod boxed { - pub use std::boxed::{Box, IntermediateBox, HEAP}; + pub use std::boxed::Box; } #[cfg(test)] mod boxed_test; -#[cfg(target_has_atomic = "ptr")] -pub mod arc; +pub mod collections; +#[cfg(all(target_has_atomic = "ptr", target_has_atomic = "cas"))] +pub mod sync; pub mod rc; pub mod raw_vec; - -// collections modules -pub mod binary_heap; -mod btree; +pub mod prelude; pub mod borrow; pub mod fmt; -pub mod linked_list; -pub mod range; pub mod slice; pub mod str; pub mod string; pub mod vec; -pub mod vec_deque; - -#[stable(feature = "rust1", since = "1.0.0")] -pub mod btree_map { - //! A map based on a B-Tree. - #[stable(feature = "rust1", since = "1.0.0")] - pub use btree::map::*; -} - -#[stable(feature = "rust1", since = "1.0.0")] -pub mod btree_set { - //! A set based on a B-Tree. - #[stable(feature = "rust1", since = "1.0.0")] - pub use btree::set::*; -} #[cfg(not(test))] mod std { pub use core::ops; // RangeFull } - -/// An endpoint of a range of keys. -/// -/// # Examples -/// -/// `Bound`s are range endpoints: -/// -/// ``` -/// #![feature(collections_range)] -/// -/// use std::collections::range::RangeArgument; -/// use std::collections::Bound::*; -/// -/// assert_eq!((..100).start(), Unbounded); -/// assert_eq!((1..12).start(), Included(&1)); -/// assert_eq!((1..12).end(), Excluded(&12)); -/// ``` -/// -/// Using a tuple of `Bound`s as an argument to [`BTreeMap::range`]. -/// Note that in most cases, it's better to use range syntax (`1..5`) instead. -/// -/// ``` -/// use std::collections::BTreeMap; -/// use std::collections::Bound::{Excluded, Included, Unbounded}; -/// -/// let mut map = BTreeMap::new(); -/// map.insert(3, "a"); -/// map.insert(5, "b"); -/// map.insert(8, "c"); -/// -/// for (key, value) in map.range((Excluded(3), Included(8))) { -/// println!("{}: {}", key, value); -/// } -/// -/// assert_eq!(Some((&3, &"a")), map.range((Unbounded, Included(5))).next()); -/// ``` -/// -/// [`BTreeMap::range`]: btree_map/struct.BTreeMap.html#method.range -#[stable(feature = "collections_bound", since = "1.17.0")] -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] -pub enum Bound<T> { - /// An inclusive bound. - #[stable(feature = "collections_bound", since = "1.17.0")] - Included(#[stable(feature = "collections_bound", since = "1.17.0")] T), - /// An exclusive bound. - #[stable(feature = "collections_bound", since = "1.17.0")] - Excluded(#[stable(feature = "collections_bound", since = "1.17.0")] T), - /// An infinite endpoint. Indicates that there is no bound in this direction. - #[stable(feature = "collections_bound", since = "1.17.0")] - Unbounded, -} - -/// An intermediate trait for specialization of `Extend`. -#[doc(hidden)] -trait SpecExtend<I: IntoIterator> { - /// Extends `self` with the contents of the given iterator. - fn spec_extend(&mut self, iter: I); -} - -#[doc(no_inline)] -pub use binary_heap::BinaryHeap; -#[doc(no_inline)] -pub use btree_map::BTreeMap; -#[doc(no_inline)] -pub use btree_set::BTreeSet; -#[doc(no_inline)] -pub use linked_list::LinkedList; -#[doc(no_inline)] -pub use vec_deque::VecDeque; -#[doc(no_inline)] -pub use string::String; -#[doc(no_inline)] -pub use vec::Vec; diff --git a/src/liballoc/prelude.rs b/src/liballoc/prelude.rs new file mode 100644 index 00000000000..53b5e93a66e --- /dev/null +++ b/src/liballoc/prelude.rs @@ -0,0 +1,29 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The alloc Prelude +//! +//! The purpose of this module is to alleviate imports of commonly-used +//! items of the `alloc` crate by adding a glob import to the top of modules: +//! +//! ``` +//! # #![allow(unused_imports)] +//! # #![feature(alloc)] +//! extern crate alloc; +//! use alloc::prelude::*; +//! ``` + +#![unstable(feature = "alloc", issue = "27783")] + +#[unstable(feature = "alloc", issue = "27783")] pub use borrow::ToOwned; +#[unstable(feature = "alloc", issue = "27783")] pub use boxed::Box; +#[unstable(feature = "alloc", issue = "27783")] pub use slice::SliceConcatExt; +#[unstable(feature = "alloc", issue = "27783")] pub use string::{String, ToString}; +#[unstable(feature = "alloc", issue = "27783")] pub use vec::Vec; diff --git a/src/liballoc/range.rs b/src/liballoc/range.rs deleted file mode 100644 index f862da0d61e..00000000000 --- a/src/liballoc/range.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![unstable(feature = "collections_range", - reason = "waiting for dust to settle on inclusive ranges", - issue = "30877")] - -//! Range syntax. - -use core::ops::{RangeFull, Range, RangeTo, RangeFrom, RangeInclusive, RangeToInclusive}; -use Bound::{self, Excluded, Included, Unbounded}; - -/// `RangeArgument` is implemented by Rust's built-in range types, produced -/// by range syntax like `..`, `a..`, `..b` or `c..d`. -pub trait RangeArgument<T: ?Sized> { - /// Start index bound. - /// - /// Returns the start value as a `Bound`. - /// - /// # Examples - /// - /// ``` - /// #![feature(alloc)] - /// #![feature(collections_range)] - /// - /// extern crate alloc; - /// - /// # fn main() { - /// use alloc::range::RangeArgument; - /// use alloc::Bound::*; - /// - /// assert_eq!((..10).start(), Unbounded); - /// assert_eq!((3..10).start(), Included(&3)); - /// # } - /// ``` - fn start(&self) -> Bound<&T>; - - /// End index bound. - /// - /// Returns the end value as a `Bound`. - /// - /// # Examples - /// - /// ``` - /// #![feature(alloc)] - /// #![feature(collections_range)] - /// - /// extern crate alloc; - /// - /// # fn main() { - /// use alloc::range::RangeArgument; - /// use alloc::Bound::*; - /// - /// assert_eq!((3..).end(), Unbounded); - /// assert_eq!((3..10).end(), Excluded(&10)); - /// # } - /// ``` - fn end(&self) -> Bound<&T>; -} - -// FIXME add inclusive ranges to RangeArgument - -impl<T: ?Sized> RangeArgument<T> for RangeFull { - fn start(&self) -> Bound<&T> { - Unbounded - } - fn end(&self) -> Bound<&T> { - Unbounded - } -} - -impl<T> RangeArgument<T> for RangeFrom<T> { - fn start(&self) -> Bound<&T> { - Included(&self.start) - } - fn end(&self) -> Bound<&T> { - Unbounded - } -} - -impl<T> RangeArgument<T> for RangeTo<T> { - fn start(&self) -> Bound<&T> { - Unbounded - } - fn end(&self) -> Bound<&T> { - Excluded(&self.end) - } -} - -impl<T> RangeArgument<T> for Range<T> { - fn start(&self) -> Bound<&T> { - Included(&self.start) - } - fn end(&self) -> Bound<&T> { - Excluded(&self.end) - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -impl<T> RangeArgument<T> for RangeInclusive<T> { - fn start(&self) -> Bound<&T> { - Included(&self.start) - } - fn end(&self) -> Bound<&T> { - Included(&self.end) - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -impl<T> RangeArgument<T> for RangeToInclusive<T> { - fn start(&self) -> Bound<&T> { - Unbounded - } - fn end(&self) -> Bound<&T> { - Included(&self.end) - } -} - -impl<T> RangeArgument<T> for (Bound<T>, Bound<T>) { - fn start(&self) -> Bound<&T> { - match *self { - (Included(ref start), _) => Included(start), - (Excluded(ref start), _) => Excluded(start), - (Unbounded, _) => Unbounded, - } - } - - fn end(&self) -> Bound<&T> { - match *self { - (_, Included(ref end)) => Included(end), - (_, Excluded(ref end)) => Excluded(end), - (_, Unbounded) => Unbounded, - } - } -} - -impl<'a, T: ?Sized + 'a> RangeArgument<T> for (Bound<&'a T>, Bound<&'a T>) { - fn start(&self) -> Bound<&T> { - self.0 - } - - fn end(&self) -> Bound<&T> { - self.1 - } -} diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index dbf1fb1367d..4f2686abf45 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -8,13 +8,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![unstable(feature = "raw_vec_internals", reason = "implemention detail", issue = "0")] +#![doc(hidden)] + use core::cmp; use core::mem; use core::ops::Drop; -use core::ptr::{self, Unique}; +use core::ptr::{self, NonNull, Unique}; use core::slice; -use heap::{Alloc, Layout, Heap}; -use super::boxed::Box; + +use alloc::{Alloc, Layout, Global, handle_alloc_error}; +use collections::CollectionAllocErr; +use collections::CollectionAllocErr::*; +use boxed::Box; /// A low-level utility for more ergonomically allocating, reallocating, and deallocating /// a buffer of memory on the heap without having to worry about all the corner cases @@ -44,7 +50,7 @@ use super::boxed::Box; /// field. This allows zero-sized types to not be special-cased by consumers of /// this type. #[allow(missing_debug_implementations)] -pub struct RawVec<T, A: Alloc = Heap> { +pub struct RawVec<T, A: Alloc = Global> { ptr: Unique<T>, cap: usize, a: A, @@ -53,14 +59,16 @@ pub struct RawVec<T, A: Alloc = Heap> { impl<T, A: Alloc> RawVec<T, A> { /// Like `new` but parameterized over the choice of allocator for /// the returned RawVec. - pub fn new_in(a: A) -> Self { + pub const fn new_in(a: A) -> Self { // !0 is usize::MAX. This branch should be stripped at compile time. - let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 }; + // FIXME(mark-i-m): use this line when `if`s are allowed in `const` + //let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 }; // Unique::empty() doubles as "unallocated" and "zero-sized allocation" RawVec { ptr: Unique::empty(), - cap, + // FIXME(mark-i-m): use `cap` when ifs are allowed in const + cap: [0, !0][(mem::size_of::<T>() == 0) as usize], a, } } @@ -83,27 +91,28 @@ impl<T, A: Alloc> RawVec<T, A> { unsafe { let elem_size = mem::size_of::<T>(); - let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow"); - alloc_guard(alloc_size); + let alloc_size = cap.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow()); + alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow()); // handles ZSTs and `cap = 0` alike let ptr = if alloc_size == 0 { - mem::align_of::<T>() as *mut u8 + NonNull::<T>::dangling() } else { let align = mem::align_of::<T>(); + let layout = Layout::from_size_align(alloc_size, align).unwrap(); let result = if zeroed { - a.alloc_zeroed(Layout::from_size_align(alloc_size, align).unwrap()) + a.alloc_zeroed(layout) } else { - a.alloc(Layout::from_size_align(alloc_size, align).unwrap()) + a.alloc(layout) }; match result { - Ok(ptr) => ptr, - Err(err) => a.oom(err), + Ok(ptr) => ptr.cast(), + Err(_) => handle_alloc_error(layout), } }; RawVec { - ptr: Unique::new_unchecked(ptr as *mut _), + ptr: ptr.into(), cap, a, } @@ -111,14 +120,14 @@ impl<T, A: Alloc> RawVec<T, A> { } } -impl<T> RawVec<T, Heap> { +impl<T> RawVec<T, Global> { /// Creates the biggest possible RawVec (on the system heap) /// without allocating. If T has positive size, then this makes a /// RawVec with capacity 0. If T has 0 size, then it makes a /// RawVec with capacity `usize::MAX`. Useful for implementing /// delayed allocation. - pub fn new() -> Self { - Self::new_in(Heap) + pub const fn new() -> Self { + Self::new_in(Global) } /// Creates a RawVec (on the system heap) with exactly the @@ -138,13 +147,13 @@ impl<T> RawVec<T, Heap> { /// Aborts on OOM #[inline] pub fn with_capacity(cap: usize) -> Self { - RawVec::allocate_in(cap, false, Heap) + RawVec::allocate_in(cap, false, Global) } /// Like `with_capacity` but guarantees the buffer is zeroed. #[inline] pub fn with_capacity_zeroed(cap: usize) -> Self { - RawVec::allocate_in(cap, true, Heap) + RawVec::allocate_in(cap, true, Global) } } @@ -165,7 +174,7 @@ impl<T, A: Alloc> RawVec<T, A> { } } -impl<T> RawVec<T, Heap> { +impl<T> RawVec<T, Global> { /// Reconstitutes a RawVec from a pointer, capacity. /// /// # Undefined Behavior @@ -177,7 +186,7 @@ impl<T> RawVec<T, Heap> { RawVec { ptr: Unique::new_unchecked(ptr), cap, - a: Heap, + a: Global, } } @@ -258,7 +267,7 @@ impl<T, A: Alloc> RawVec<T, A> { /// # Examples /// /// ``` - /// # #![feature(alloc)] + /// # #![feature(alloc, raw_vec_internals)] /// # extern crate alloc; /// # use std::ptr; /// # use alloc::raw_vec::RawVec; @@ -307,14 +316,15 @@ impl<T, A: Alloc> RawVec<T, A> { // `from_size_align_unchecked`. let new_cap = 2 * self.cap; let new_size = new_cap * elem_size; - let new_layout = Layout::from_size_align_unchecked(new_size, cur.align()); - alloc_guard(new_size); - let ptr_res = self.a.realloc(self.ptr.as_ptr() as *mut u8, + alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow()); + let ptr_res = self.a.realloc(NonNull::from(self.ptr).cast(), cur, - new_layout); + new_size); match ptr_res { - Ok(ptr) => (new_cap, Unique::new_unchecked(ptr as *mut T)), - Err(e) => self.a.oom(e), + Ok(ptr) => (new_cap, ptr.cast().into()), + Err(_) => handle_alloc_error( + Layout::from_size_align_unchecked(new_size, cur.align()) + ), } } None => { @@ -322,8 +332,8 @@ impl<T, A: Alloc> RawVec<T, A> { // would cause overflow let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 }; match self.a.alloc_array::<T>(new_cap) { - Ok(ptr) => (new_cap, ptr), - Err(e) => self.a.oom(e), + Ok(ptr) => (new_cap, ptr.into()), + Err(_) => handle_alloc_error(Layout::array::<T>(new_cap).unwrap()), } } }; @@ -367,10 +377,8 @@ impl<T, A: Alloc> RawVec<T, A> { // overflow and the alignment is sufficiently small. let new_cap = 2 * self.cap; let new_size = new_cap * elem_size; - alloc_guard(new_size); - let ptr = self.ptr() as *mut _; - let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align()); - match self.a.grow_in_place(ptr, old_layout, new_layout) { + alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow()); + match self.a.grow_in_place(NonNull::from(self.ptr).cast(), old_layout, new_size) { Ok(_) => { // We can't directly divide `size`. self.cap = new_cap; @@ -383,6 +391,13 @@ impl<T, A: Alloc> RawVec<T, A> { } } + /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. + pub fn try_reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) + -> Result<(), CollectionAllocErr> { + + self.reserve_internal(used_cap, needed_extra_cap, Fallible, Exact) + } + /// Ensures that the buffer contains at least enough space to hold /// `used_cap + needed_extra_cap` elements. If it doesn't already, /// will reallocate the minimum possible amount of memory necessary. @@ -404,52 +419,31 @@ impl<T, A: Alloc> RawVec<T, A> { /// /// Aborts on OOM pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) { - unsafe { - // NOTE: we don't early branch on ZSTs here because we want this - // to actually catch "asking for more than usize::MAX" in that case. - // If we make it past the first branch then we are guaranteed to - // panic. - - // Don't actually need any more capacity. - // Wrapping in case they gave a bad `used_cap`. - if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { - return; - } - - // Nothing we can really do about these checks :( - let new_cap = used_cap.checked_add(needed_extra_cap).expect("capacity overflow"); - let new_layout = match Layout::array::<T>(new_cap) { - Some(layout) => layout, - None => panic!("capacity overflow"), - }; - alloc_guard(new_layout.size()); - let res = match self.current_layout() { - Some(layout) => { - let old_ptr = self.ptr.as_ptr() as *mut u8; - self.a.realloc(old_ptr, layout, new_layout) - } - None => self.a.alloc(new_layout), - }; - let uniq = match res { - Ok(ptr) => Unique::new_unchecked(ptr as *mut T), - Err(e) => self.a.oom(e), - }; - self.ptr = uniq; - self.cap = new_cap; - } - } + match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Exact) { + Err(CapacityOverflow) => capacity_overflow(), + Err(AllocErr) => unreachable!(), + Ok(()) => { /* yay */ } + } + } /// Calculates the buffer's new size given that it'll hold `used_cap + /// needed_extra_cap` elements. This logic is used in amortized reserve methods. /// Returns `(new_capacity, new_alloc_size)`. - fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) -> usize { + fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) + -> Result<usize, CollectionAllocErr> { + // Nothing we can really do about these checks :( - let required_cap = used_cap.checked_add(needed_extra_cap) - .expect("capacity overflow"); + let required_cap = used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?; // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. let double_cap = self.cap * 2; // `double_cap` guarantees exponential growth. - cmp::max(double_cap, required_cap) + Ok(cmp::max(double_cap, required_cap)) + } + + /// The same as `reserve`, but returns on errors instead of panicking or aborting. + pub fn try_reserve(&mut self, used_cap: usize, needed_extra_cap: usize) + -> Result<(), CollectionAllocErr> { + self.reserve_internal(used_cap, needed_extra_cap, Fallible, Amortized) } /// Ensures that the buffer contains at least enough space to hold @@ -477,7 +471,7 @@ impl<T, A: Alloc> RawVec<T, A> { /// # Examples /// /// ``` - /// # #![feature(alloc)] + /// # #![feature(alloc, raw_vec_internals)] /// # extern crate alloc; /// # use std::ptr; /// # use alloc::raw_vec::RawVec; @@ -505,42 +499,12 @@ impl<T, A: Alloc> RawVec<T, A> { /// # } /// ``` pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) { - unsafe { - // NOTE: we don't early branch on ZSTs here because we want this - // to actually catch "asking for more than usize::MAX" in that case. - // If we make it past the first branch then we are guaranteed to - // panic. - - // Don't actually need any more capacity. - // Wrapping in case they give a bad `used_cap` - if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { - return; - } - - let new_cap = self.amortized_new_size(used_cap, needed_extra_cap); - - let new_layout = match Layout::array::<T>(new_cap) { - Some(layout) => layout, - None => panic!("capacity overflow"), - }; - // FIXME: may crash and burn on over-reserve - alloc_guard(new_layout.size()); - let res = match self.current_layout() { - Some(layout) => { - let old_ptr = self.ptr.as_ptr() as *mut u8; - self.a.realloc(old_ptr, layout, new_layout) - } - None => self.a.alloc(new_layout), - }; - let uniq = match res { - Ok(ptr) => Unique::new_unchecked(ptr as *mut T), - Err(e) => self.a.oom(e), - }; - self.ptr = uniq; - self.cap = new_cap; + match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Amortized) { + Err(CapacityOverflow) => capacity_overflow(), + Err(AllocErr) => unreachable!(), + Ok(()) => { /* yay */ } } } - /// Attempts to ensure that the buffer contains at least enough space to hold /// `used_cap + needed_extra_cap` elements. If it doesn't already have /// enough capacity, will reallocate in place enough space plus comfortable slack @@ -576,17 +540,19 @@ impl<T, A: Alloc> RawVec<T, A> { return false; } - let new_cap = self.amortized_new_size(used_cap, needed_extra_cap); + let new_cap = self.amortized_new_size(used_cap, needed_extra_cap) + .unwrap_or_else(|_| capacity_overflow()); // Here, `cap < used_cap + needed_extra_cap <= new_cap` // (regardless of whether `self.cap - used_cap` wrapped). // Therefore we can safely call grow_in_place. - let ptr = self.ptr() as *mut _; let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0; // FIXME: may crash and burn on over-reserve - alloc_guard(new_layout.size()); - match self.a.grow_in_place(ptr, old_layout, new_layout) { + alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow()); + match self.a.grow_in_place( + NonNull::from(self.ptr).cast(), old_layout, new_layout.size(), + ) { Ok(_) => { self.cap = new_cap; true @@ -646,12 +612,13 @@ impl<T, A: Alloc> RawVec<T, A> { let new_size = elem_size * amount; let align = mem::align_of::<T>(); let old_layout = Layout::from_size_align_unchecked(old_size, align); - let new_layout = Layout::from_size_align_unchecked(new_size, align); - match self.a.realloc(self.ptr.as_ptr() as *mut u8, + match self.a.realloc(NonNull::from(self.ptr).cast(), old_layout, - new_layout) { - Ok(p) => self.ptr = Unique::new_unchecked(p as *mut T), - Err(err) => self.a.oom(err), + new_size) { + Ok(p) => self.ptr = p.cast().into(), + Err(_) => handle_alloc_error( + Layout::from_size_align_unchecked(new_size, align) + ), } } self.cap = amount; @@ -659,7 +626,74 @@ impl<T, A: Alloc> RawVec<T, A> { } } -impl<T> RawVec<T, Heap> { +enum Fallibility { + Fallible, + Infallible, +} + +use self::Fallibility::*; + +enum ReserveStrategy { + Exact, + Amortized, +} + +use self::ReserveStrategy::*; + +impl<T, A: Alloc> RawVec<T, A> { + fn reserve_internal( + &mut self, + used_cap: usize, + needed_extra_cap: usize, + fallibility: Fallibility, + strategy: ReserveStrategy, + ) -> Result<(), CollectionAllocErr> { + unsafe { + use alloc::AllocErr; + + // NOTE: we don't early branch on ZSTs here because we want this + // to actually catch "asking for more than usize::MAX" in that case. + // If we make it past the first branch then we are guaranteed to + // panic. + + // Don't actually need any more capacity. + // Wrapping in case they gave a bad `used_cap`. + if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { + return Ok(()); + } + + // Nothing we can really do about these checks :( + let new_cap = match strategy { + Exact => used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?, + Amortized => self.amortized_new_size(used_cap, needed_extra_cap)?, + }; + let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?; + + alloc_guard(new_layout.size())?; + + let res = match self.current_layout() { + Some(layout) => { + debug_assert!(new_layout.align() == layout.align()); + self.a.realloc(NonNull::from(self.ptr).cast(), layout, new_layout.size()) + } + None => self.a.alloc(new_layout), + }; + + match (&res, fallibility) { + (Err(AllocErr), Infallible) => handle_alloc_error(new_layout), + _ => {} + } + + self.ptr = res?.cast().into(); + self.cap = new_cap; + + Ok(()) + } + } + +} + +impl<T> RawVec<T, Global> { /// Converts the entire buffer into `Box<[T]>`. /// /// While it is not *strictly* Undefined Behavior to call @@ -683,8 +717,7 @@ impl<T, A: Alloc> RawVec<T, A> { let elem_size = mem::size_of::<T>(); if elem_size != 0 { if let Some(layout) = self.current_layout() { - let ptr = self.ptr() as *mut u8; - self.a.dealloc(ptr, layout); + self.a.dealloc(NonNull::from(self.ptr).cast(), layout); } } } @@ -709,13 +742,20 @@ unsafe impl<#[may_dangle] T, A: Alloc> Drop for RawVec<T, A> { // all 4GB in user-space. e.g. PAE or x32 #[inline] -fn alloc_guard(alloc_size: usize) { - if mem::size_of::<usize>() < 8 { - assert!(alloc_size <= ::core::isize::MAX as usize, - "capacity overflow"); +fn alloc_guard(alloc_size: usize) -> Result<(), CollectionAllocErr> { + if mem::size_of::<usize>() < 8 && alloc_size > ::core::isize::MAX as usize { + Err(CapacityOverflow) + } else { + Ok(()) } } +// One central function responsible for reporting capacity overflows. This'll +// ensure that the code generation related to these panics is minimal as there's +// only one location which panics rather than a bunch throughout the module. +fn capacity_overflow() -> ! { + panic!("capacity overflow") +} #[cfg(test)] mod tests { @@ -723,7 +763,7 @@ mod tests { #[test] fn allocator_param() { - use allocator::{Alloc, AllocErr}; + use alloc::AllocErr; // Writing a test of integration between third-party // allocators and RawVec is a little tricky because the RawVec @@ -739,18 +779,18 @@ mod tests { // before allocation attempts start failing. struct BoundedAlloc { fuel: usize } unsafe impl Alloc for BoundedAlloc { - unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { + unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> { let size = layout.size(); if size > self.fuel { - return Err(AllocErr::Unsupported { details: "fuel exhausted" }); + return Err(AllocErr); } - match Heap.alloc(layout) { + match Global.alloc(layout) { ok @ Ok(_) => { self.fuel -= size; ok } err @ Err(_) => err, } } - unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { - Heap.dealloc(ptr, layout) + unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) { + Global.dealloc(ptr, layout) } } diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 59079f9ba76..be049eb6e5e 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -253,13 +253,14 @@ use core::hash::{Hash, Hasher}; use core::intrinsics::abort; use core::marker; use core::marker::{Unsize, PhantomData}; -use core::mem::{self, align_of_val, forget, size_of_val, uninitialized}; +use core::mem::{self, align_of_val, forget, size_of_val}; use core::ops::Deref; use core::ops::CoerceUnsized; -use core::ptr::{self, Shared}; +use core::ptr::{self, NonNull}; use core::convert::From; +use core::usize; -use heap::{Heap, Alloc, Layout, box_free}; +use alloc::{Global, Alloc, Layout, box_free, handle_alloc_error}; use string::String; use vec::Vec; @@ -282,7 +283,7 @@ struct RcBox<T: ?Sized> { /// [get_mut]: #method.get_mut #[stable(feature = "rust1", since = "1.0.0")] pub struct Rc<T: ?Sized> { - ptr: Shared<RcBox<T>>, + ptr: NonNull<RcBox<T>>, phantom: PhantomData<T>, } @@ -311,11 +312,11 @@ impl<T> Rc<T> { // pointers, which ensures that the weak destructor never frees // the allocation while the strong destructor is running, even // if the weak pointer is stored inside the strong one. - ptr: Shared::from(Box::into_unique(box RcBox { + ptr: Box::into_raw_non_null(box RcBox { strong: Cell::new(1), weak: Cell::new(1), value, - })), + }), phantom: PhantomData, } } @@ -428,7 +429,7 @@ impl<T: ?Sized> Rc<T> { let rc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset)); Rc { - ptr: Shared::new_unchecked(rc_ptr), + ptr: NonNull::new_unchecked(rc_ptr), phantom: PhantomData, } } @@ -449,6 +450,8 @@ impl<T: ?Sized> Rc<T> { #[stable(feature = "rc_weak", since = "1.4.0")] pub fn downgrade(this: &Self) -> Weak<T> { this.inc_weak(); + // Make sure we do not create a dangling Weak + debug_assert!(!is_dangling(this.ptr)); Weak { ptr: this.ptr } } @@ -618,15 +621,14 @@ impl<T: Clone> Rc<T> { } } -impl Rc<Any> { +impl Rc<dyn Any> { #[inline] - #[unstable(feature = "rc_downcast", issue = "44608")] + #[stable(feature = "rc_downcast", since = "1.29.0")] /// Attempt to downcast the `Rc<Any>` to a concrete type. /// /// # Examples /// /// ``` - /// #![feature(rc_downcast)] /// use std::any::Any; /// use std::rc::Rc; /// @@ -642,17 +644,11 @@ impl Rc<Any> { /// print_if_string(Rc::new(0i8)); /// } /// ``` - pub fn downcast<T: Any>(self) -> Result<Rc<T>, Rc<Any>> { + pub fn downcast<T: Any>(self) -> Result<Rc<T>, Rc<dyn Any>> { if (*self).is::<T>() { - // avoid the pointer arithmetic in from_raw - unsafe { - let raw: *const RcBox<Any> = self.ptr.as_ptr(); - forget(self); - Ok(Rc { - ptr: Shared::new_unchecked(raw as *const RcBox<T> as *mut _), - phantom: PhantomData, - }) - } + let ptr = self.ptr.cast::<RcBox<T>>(); + forget(self); + Ok(Rc { ptr, phantom: PhantomData }) } else { Err(self) } @@ -667,11 +663,11 @@ impl<T: ?Sized> Rc<T> { let layout = Layout::for_value(&*fake_ptr); - let mem = Heap.alloc(layout) - .unwrap_or_else(|e| Heap.oom(e)); + let mem = Global.alloc(layout) + .unwrap_or_else(|_| handle_alloc_error(layout)); // Initialize the real RcBox - let inner = set_data_ptr(ptr as *mut T, mem) as *mut RcBox<T>; + let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut RcBox<T>; ptr::write(&mut (*inner).strong, Cell::new(1)); ptr::write(&mut (*inner).weak, Cell::new(1)); @@ -681,7 +677,8 @@ impl<T: ?Sized> Rc<T> { fn from_box(v: Box<T>) -> Rc<T> { unsafe { - let bptr = Box::into_raw(v); + let box_unique = Box::into_unique(v); + let bptr = box_unique.as_ptr(); let value_size = size_of_val(&*bptr); let ptr = Self::allocate_for_ptr(bptr); @@ -693,9 +690,9 @@ impl<T: ?Sized> Rc<T> { value_size); // Free the allocation without dropping its contents - box_free(bptr); + box_free(box_unique); - Rc { ptr: Shared::new_unchecked(ptr), phantom: PhantomData } + Rc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData } } } } @@ -722,7 +719,7 @@ impl<T> Rc<[T]> { &mut (*ptr).value as *mut [T] as *mut T, v.len()); - Rc { ptr: Shared::new_unchecked(ptr), phantom: PhantomData } + Rc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData } } } @@ -737,7 +734,7 @@ impl<T: Clone> RcFromSlice<T> for Rc<[T]> { // In the event of a panic, elements that have been written // into the new RcBox will be dropped, then the memory freed. struct Guard<T> { - mem: *mut u8, + mem: NonNull<u8>, elems: *mut T, layout: Layout, n_elems: usize, @@ -751,7 +748,7 @@ impl<T: Clone> RcFromSlice<T> for Rc<[T]> { let slice = from_raw_parts_mut(self.elems, self.n_elems); ptr::drop_in_place(slice); - Heap.dealloc(self.mem, self.layout.clone()); + Global.dealloc(self.mem, self.layout.clone()); } } } @@ -767,7 +764,7 @@ impl<T: Clone> RcFromSlice<T> for Rc<[T]> { let elems = &mut (*ptr).value as *mut [T] as *mut T; let mut guard = Guard{ - mem: mem, + mem: NonNull::new_unchecked(mem), elems: elems, layout: layout, n_elems: 0, @@ -781,7 +778,7 @@ impl<T: Clone> RcFromSlice<T> for Rc<[T]> { // All clear. Forget the guard so it doesn't free the new RcBox. forget(guard); - Rc { ptr: Shared::new_unchecked(ptr), phantom: PhantomData } + Rc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData } } } } @@ -834,8 +831,6 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc<T> { /// ``` fn drop(&mut self) { unsafe { - let ptr = self.ptr.as_ptr(); - self.dec_strong(); if self.strong() == 0 { // destroy the contained object @@ -846,7 +841,7 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc<T> { self.dec_weak(); if self.weak() == 0 { - Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr)); + Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())); } } } @@ -1160,7 +1155,12 @@ impl<T> From<Vec<T>> for Rc<[T]> { /// [`None`]: ../../std/option/enum.Option.html#variant.None #[stable(feature = "rc_weak", since = "1.4.0")] pub struct Weak<T: ?Sized> { - ptr: Shared<RcBox<T>>, + // This is a `NonNull` to allow optimizing the size of this type in enums, + // but it is not necessarily a valid pointer. + // `Weak::new` sets this to `usize::MAX` so that it doesn’t need + // to allocate space on the heap. That's not a value a real pointer + // will ever have because RcBox has alignment at least 2. + ptr: NonNull<RcBox<T>>, } #[stable(feature = "rc_weak", since = "1.4.0")] @@ -1172,8 +1172,8 @@ impl<T: ?Sized> !marker::Sync for Weak<T> {} impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {} impl<T> Weak<T> { - /// Constructs a new `Weak<T>`, allocating memory for `T` without initializing - /// it. Calling [`upgrade`] on the return value always gives [`None`]. + /// Constructs a new `Weak<T>`, without allocating any memory. + /// Calling [`upgrade`] on the return value always gives [`None`]. /// /// [`upgrade`]: struct.Weak.html#method.upgrade /// [`None`]: ../../std/option/enum.Option.html @@ -1188,18 +1188,17 @@ impl<T> Weak<T> { /// ``` #[stable(feature = "downgraded_weak", since = "1.10.0")] pub fn new() -> Weak<T> { - unsafe { - Weak { - ptr: Shared::from(Box::into_unique(box RcBox { - strong: Cell::new(0), - weak: Cell::new(1), - value: uninitialized(), - })), - } + Weak { + ptr: NonNull::new(usize::MAX as *mut RcBox<T>).expect("MAX is not 0"), } } } +pub(crate) fn is_dangling<T: ?Sized>(ptr: NonNull<T>) -> bool { + let address = ptr.as_ptr() as *mut () as usize; + address == usize::MAX +} + impl<T: ?Sized> Weak<T> { /// Attempts to upgrade the `Weak` pointer to an [`Rc`], extending /// the lifetime of the value if successful. @@ -1229,13 +1228,25 @@ impl<T: ?Sized> Weak<T> { /// ``` #[stable(feature = "rc_weak", since = "1.4.0")] pub fn upgrade(&self) -> Option<Rc<T>> { - if self.strong() == 0 { + let inner = self.inner()?; + if inner.strong() == 0 { None } else { - self.inc_strong(); + inner.inc_strong(); Some(Rc { ptr: self.ptr, phantom: PhantomData }) } } + + /// Return `None` when the pointer is dangling and there is no allocated `RcBox`, + /// i.e. this `Weak` was created by `Weak::new` + #[inline] + fn inner(&self) -> Option<&RcBox<T>> { + if is_dangling(self.ptr) { + None + } else { + Some(unsafe { self.ptr.as_ref() }) + } + } } #[stable(feature = "rc_weak", since = "1.4.0")] @@ -1265,14 +1276,14 @@ impl<T: ?Sized> Drop for Weak<T> { /// assert!(other_weak_foo.upgrade().is_none()); /// ``` fn drop(&mut self) { - unsafe { - let ptr = self.ptr.as_ptr(); - - self.dec_weak(); + if let Some(inner) = self.inner() { + inner.dec_weak(); // the weak count starts at 1, and will only go to zero if all // the strong pointers have disappeared. - if self.weak() == 0 { - Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr)); + if inner.weak() == 0 { + unsafe { + Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())); + } } } } @@ -1293,7 +1304,9 @@ impl<T: ?Sized> Clone for Weak<T> { /// ``` #[inline] fn clone(&self) -> Weak<T> { - self.inc_weak(); + if let Some(inner) = self.inner() { + inner.inc_weak() + } Weak { ptr: self.ptr } } } @@ -1326,7 +1339,7 @@ impl<T> Default for Weak<T> { } } -// NOTE: We checked_add here to deal with mem::forget safety. In particular +// NOTE: We checked_add here to deal with mem::forget safely. In particular // if you mem::forget Rcs (or Weaks), the ref-count can overflow, and then // you can free the allocation while outstanding Rcs (or Weaks) exist. // We abort because this is such a degenerate scenario that we don't care about @@ -1379,12 +1392,10 @@ impl<T: ?Sized> RcBoxPtr<T> for Rc<T> { } } -impl<T: ?Sized> RcBoxPtr<T> for Weak<T> { +impl<T: ?Sized> RcBoxPtr<T> for RcBox<T> { #[inline(always)] fn inner(&self) -> &RcBox<T> { - unsafe { - self.ptr.as_ref() - } + self } } @@ -1546,7 +1557,7 @@ mod tests { assert_eq!(unsafe { &*ptr }, "foo"); assert_eq!(rc, rc2); - let rc: Rc<Display> = Rc::new(123); + let rc: Rc<dyn Display> = Rc::new(123); let ptr = Rc::into_raw(rc.clone()); let rc2 = unsafe { Rc::from_raw(ptr) }; @@ -1747,8 +1758,8 @@ mod tests { use std::fmt::Display; use std::string::ToString; - let b: Box<Display> = box 123; - let r: Rc<Display> = Rc::from(b); + let b: Box<dyn Display> = box 123; + let r: Rc<dyn Display> = Rc::from(b); assert_eq!(r.to_string(), "123"); } @@ -1757,8 +1768,8 @@ mod tests { fn test_from_box_trait_zero_sized() { use std::fmt::Debug; - let b: Box<Debug> = box (); - let r: Rc<Debug> = Rc::from(b); + let b: Box<dyn Debug> = box (); + let r: Rc<dyn Debug> = Rc::from(b); assert_eq!(format!("{:?}", r), "()"); } @@ -1775,8 +1786,8 @@ mod tests { fn test_downcast() { use std::any::Any; - let r1: Rc<Any> = Rc::new(i32::max_value()); - let r2: Rc<Any> = Rc::new("abc"); + let r1: Rc<dyn Any> = Rc::new(i32::max_value()); + let r2: Rc<dyn Any> = Rc::new("abc"); assert!(r1.clone().downcast::<u32>().is_err()); diff --git a/src/liballoc/slice.rs b/src/liballoc/slice.rs index 861f72bcf88..c27c596e797 100644 --- a/src/liballoc/slice.rs +++ b/src/liballoc/slice.rs @@ -10,6 +10,8 @@ //! A dynamically-sized view into a contiguous sequence, `[T]`. //! +//! *[See also the slice primitive type](../../std/primitive.slice.html).* +//! //! Slices are a view into a block of memory represented as a pointer and a //! length. //! @@ -78,8 +80,6 @@ //! * Further methods that return iterators are [`.split`], [`.splitn`], //! [`.chunks`], [`.windows`] and more. //! -//! *[See also the slice primitive type](../../std/primitive.slice.html).* -//! //! [`Clone`]: ../../std/clone/trait.Clone.html //! [`Eq`]: ../../std/cmp/trait.Eq.html //! [`Ord`]: ../../std/cmp/trait.Ord.html @@ -101,7 +101,7 @@ use core::cmp::Ordering::{self, Less}; use core::mem::size_of; use core::mem; use core::ptr; -use core::slice as core_slice; +use core::{u8, u16, u32}; use borrow::{Borrow, BorrowMut, ToOwned}; use boxed::Box; @@ -115,13 +115,13 @@ pub use core::slice::{Iter, IterMut}; pub use core::slice::{SplitMut, ChunksMut, Split}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::slice::{SplitN, RSplitN, SplitNMut, RSplitNMut}; -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] pub use core::slice::{RSplit, RSplitMut}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::slice::{from_raw_parts, from_raw_parts_mut}; -#[unstable(feature = "from_ref", issue = "45703")] -pub use core::slice::{from_ref, from_ref_mut}; -#[unstable(feature = "slice_get_slice", issue = "35729")] +#[stable(feature = "from_ref", since = "1.28.0")] +pub use core::slice::{from_ref, from_mut}; +#[stable(feature = "slice_get_slice", since = "1.28.0")] pub use core::slice::SliceIndex; #[unstable(feature = "exact_chunks", issue = "47115")] pub use core::slice::{ExactChunks, ExactChunksMut}; @@ -170,1057 +170,9 @@ mod hack { } } -#[lang = "slice"] +#[lang = "slice_alloc"] #[cfg(not(test))] impl<T> [T] { - /// Returns the number of elements in the slice. - /// - /// # Examples - /// - /// ``` - /// let a = [1, 2, 3]; - /// assert_eq!(a.len(), 3); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn len(&self) -> usize { - core_slice::SliceExt::len(self) - } - - /// Returns `true` if the slice has a length of 0. - /// - /// # Examples - /// - /// ``` - /// let a = [1, 2, 3]; - /// assert!(!a.is_empty()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn is_empty(&self) -> bool { - core_slice::SliceExt::is_empty(self) - } - - /// Returns the first element of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let v = [10, 40, 30]; - /// assert_eq!(Some(&10), v.first()); - /// - /// let w: &[i32] = &[]; - /// assert_eq!(None, w.first()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn first(&self) -> Option<&T> { - core_slice::SliceExt::first(self) - } - - /// Returns a mutable pointer to the first element of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let x = &mut [0, 1, 2]; - /// - /// if let Some(first) = x.first_mut() { - /// *first = 5; - /// } - /// assert_eq!(x, &[5, 1, 2]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn first_mut(&mut self) -> Option<&mut T> { - core_slice::SliceExt::first_mut(self) - } - - /// Returns the first and all the rest of the elements of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let x = &[0, 1, 2]; - /// - /// if let Some((first, elements)) = x.split_first() { - /// assert_eq!(first, &0); - /// assert_eq!(elements, &[1, 2]); - /// } - /// ``` - #[stable(feature = "slice_splits", since = "1.5.0")] - #[inline] - pub fn split_first(&self) -> Option<(&T, &[T])> { - core_slice::SliceExt::split_first(self) - } - - /// Returns the first and all the rest of the elements of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let x = &mut [0, 1, 2]; - /// - /// if let Some((first, elements)) = x.split_first_mut() { - /// *first = 3; - /// elements[0] = 4; - /// elements[1] = 5; - /// } - /// assert_eq!(x, &[3, 4, 5]); - /// ``` - #[stable(feature = "slice_splits", since = "1.5.0")] - #[inline] - pub fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> { - core_slice::SliceExt::split_first_mut(self) - } - - /// Returns the last and all the rest of the elements of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let x = &[0, 1, 2]; - /// - /// if let Some((last, elements)) = x.split_last() { - /// assert_eq!(last, &2); - /// assert_eq!(elements, &[0, 1]); - /// } - /// ``` - #[stable(feature = "slice_splits", since = "1.5.0")] - #[inline] - pub fn split_last(&self) -> Option<(&T, &[T])> { - core_slice::SliceExt::split_last(self) - - } - - /// Returns the last and all the rest of the elements of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let x = &mut [0, 1, 2]; - /// - /// if let Some((last, elements)) = x.split_last_mut() { - /// *last = 3; - /// elements[0] = 4; - /// elements[1] = 5; - /// } - /// assert_eq!(x, &[4, 5, 3]); - /// ``` - #[stable(feature = "slice_splits", since = "1.5.0")] - #[inline] - pub fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> { - core_slice::SliceExt::split_last_mut(self) - } - - /// Returns the last element of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let v = [10, 40, 30]; - /// assert_eq!(Some(&30), v.last()); - /// - /// let w: &[i32] = &[]; - /// assert_eq!(None, w.last()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn last(&self) -> Option<&T> { - core_slice::SliceExt::last(self) - } - - /// Returns a mutable pointer to the last item in the slice. - /// - /// # Examples - /// - /// ``` - /// let x = &mut [0, 1, 2]; - /// - /// if let Some(last) = x.last_mut() { - /// *last = 10; - /// } - /// assert_eq!(x, &[0, 1, 10]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn last_mut(&mut self) -> Option<&mut T> { - core_slice::SliceExt::last_mut(self) - } - - /// Returns a reference to an element or subslice depending on the type of - /// index. - /// - /// - If given a position, returns a reference to the element at that - /// position or `None` if out of bounds. - /// - If given a range, returns the subslice corresponding to that range, - /// or `None` if out of bounds. - /// - /// # Examples - /// - /// ``` - /// let v = [10, 40, 30]; - /// assert_eq!(Some(&40), v.get(1)); - /// assert_eq!(Some(&[10, 40][..]), v.get(0..2)); - /// assert_eq!(None, v.get(3)); - /// assert_eq!(None, v.get(0..4)); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn get<I>(&self, index: I) -> Option<&I::Output> - where I: SliceIndex<Self> - { - core_slice::SliceExt::get(self, index) - } - - /// Returns a mutable reference to an element or subslice depending on the - /// type of index (see [`get`]) or `None` if the index is out of bounds. - /// - /// [`get`]: #method.get - /// - /// # Examples - /// - /// ``` - /// let x = &mut [0, 1, 2]; - /// - /// if let Some(elem) = x.get_mut(1) { - /// *elem = 42; - /// } - /// assert_eq!(x, &[0, 42, 2]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output> - where I: SliceIndex<Self> - { - core_slice::SliceExt::get_mut(self, index) - } - - /// Returns a reference to an element or subslice, without doing bounds - /// checking. - /// - /// This is generally not recommended, use with caution! For a safe - /// alternative see [`get`]. - /// - /// [`get`]: #method.get - /// - /// # Examples - /// - /// ``` - /// let x = &[1, 2, 4]; - /// - /// unsafe { - /// assert_eq!(x.get_unchecked(1), &2); - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output - where I: SliceIndex<Self> - { - core_slice::SliceExt::get_unchecked(self, index) - } - - /// Returns a mutable reference to an element or subslice, without doing - /// bounds checking. - /// - /// This is generally not recommended, use with caution! For a safe - /// alternative see [`get_mut`]. - /// - /// [`get_mut`]: #method.get_mut - /// - /// # Examples - /// - /// ``` - /// let x = &mut [1, 2, 4]; - /// - /// unsafe { - /// let elem = x.get_unchecked_mut(1); - /// *elem = 13; - /// } - /// assert_eq!(x, &[1, 13, 4]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output - where I: SliceIndex<Self> - { - core_slice::SliceExt::get_unchecked_mut(self, index) - } - - /// Returns a raw pointer to the slice's buffer. - /// - /// The caller must ensure that the slice outlives the pointer this - /// function returns, or else it will end up pointing to garbage. - /// - /// Modifying the container referenced by this slice may cause its buffer - /// to be reallocated, which would also make any pointers to it invalid. - /// - /// # Examples - /// - /// ``` - /// let x = &[1, 2, 4]; - /// let x_ptr = x.as_ptr(); - /// - /// unsafe { - /// for i in 0..x.len() { - /// assert_eq!(x.get_unchecked(i), &*x_ptr.offset(i as isize)); - /// } - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn as_ptr(&self) -> *const T { - core_slice::SliceExt::as_ptr(self) - } - - /// Returns an unsafe mutable pointer to the slice's buffer. - /// - /// The caller must ensure that the slice outlives the pointer this - /// function returns, or else it will end up pointing to garbage. - /// - /// Modifying the container referenced by this slice may cause its buffer - /// to be reallocated, which would also make any pointers to it invalid. - /// - /// # Examples - /// - /// ``` - /// let x = &mut [1, 2, 4]; - /// let x_ptr = x.as_mut_ptr(); - /// - /// unsafe { - /// for i in 0..x.len() { - /// *x_ptr.offset(i as isize) += 2; - /// } - /// } - /// assert_eq!(x, &[3, 4, 6]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn as_mut_ptr(&mut self) -> *mut T { - core_slice::SliceExt::as_mut_ptr(self) - } - - /// Swaps two elements in the slice. - /// - /// # Arguments - /// - /// * a - The index of the first element - /// * b - The index of the second element - /// - /// # Panics - /// - /// Panics if `a` or `b` are out of bounds. - /// - /// # Examples - /// - /// ``` - /// let mut v = ["a", "b", "c", "d"]; - /// v.swap(1, 3); - /// assert!(v == ["a", "d", "c", "b"]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn swap(&mut self, a: usize, b: usize) { - core_slice::SliceExt::swap(self, a, b) - } - - /// Reverses the order of elements in the slice, in place. - /// - /// # Examples - /// - /// ``` - /// let mut v = [1, 2, 3]; - /// v.reverse(); - /// assert!(v == [3, 2, 1]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn reverse(&mut self) { - core_slice::SliceExt::reverse(self) - } - - /// Returns an iterator over the slice. - /// - /// # Examples - /// - /// ``` - /// let x = &[1, 2, 4]; - /// let mut iterator = x.iter(); - /// - /// assert_eq!(iterator.next(), Some(&1)); - /// assert_eq!(iterator.next(), Some(&2)); - /// assert_eq!(iterator.next(), Some(&4)); - /// assert_eq!(iterator.next(), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn iter(&self) -> Iter<T> { - core_slice::SliceExt::iter(self) - } - - /// Returns an iterator that allows modifying each value. - /// - /// # Examples - /// - /// ``` - /// let x = &mut [1, 2, 4]; - /// for elem in x.iter_mut() { - /// *elem += 2; - /// } - /// assert_eq!(x, &[3, 4, 6]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn iter_mut(&mut self) -> IterMut<T> { - core_slice::SliceExt::iter_mut(self) - } - - /// Returns an iterator over all contiguous windows of length - /// `size`. The windows overlap. If the slice is shorter than - /// `size`, the iterator returns no values. - /// - /// # Panics - /// - /// Panics if `size` is 0. - /// - /// # Examples - /// - /// ``` - /// let slice = ['r', 'u', 's', 't']; - /// let mut iter = slice.windows(2); - /// assert_eq!(iter.next().unwrap(), &['r', 'u']); - /// assert_eq!(iter.next().unwrap(), &['u', 's']); - /// assert_eq!(iter.next().unwrap(), &['s', 't']); - /// assert!(iter.next().is_none()); - /// ``` - /// - /// If the slice is shorter than `size`: - /// - /// ``` - /// let slice = ['f', 'o', 'o']; - /// let mut iter = slice.windows(4); - /// assert!(iter.next().is_none()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn windows(&self, size: usize) -> Windows<T> { - core_slice::SliceExt::windows(self, size) - } - - /// Returns an iterator over `chunk_size` elements of the slice at a - /// time. The chunks are slices and do not overlap. If `chunk_size` does - /// not divide the length of the slice, then the last chunk will - /// not have length `chunk_size`. - /// - /// See [`exact_chunks`] for a variant of this iterator that returns chunks - /// of always exactly `chunk_size` elements. - /// - /// # Panics - /// - /// Panics if `chunk_size` is 0. - /// - /// # Examples - /// - /// ``` - /// let slice = ['l', 'o', 'r', 'e', 'm']; - /// let mut iter = slice.chunks(2); - /// assert_eq!(iter.next().unwrap(), &['l', 'o']); - /// assert_eq!(iter.next().unwrap(), &['r', 'e']); - /// assert_eq!(iter.next().unwrap(), &['m']); - /// assert!(iter.next().is_none()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn chunks(&self, chunk_size: usize) -> Chunks<T> { - core_slice::SliceExt::chunks(self, chunk_size) - } - - /// Returns an iterator over `chunk_size` elements of the slice at a - /// time. The chunks are slices and do not overlap. If `chunk_size` does - /// not divide the length of the slice, then the last up to `chunk_size-1` - /// elements will be omitted. - /// - /// Due to each chunk having exactly `chunk_size` elements, the compiler - /// can often optimize the resulting code better than in the case of - /// [`chunks`]. - /// - /// # Panics - /// - /// Panics if `chunk_size` is 0. - /// - /// # Examples - /// - /// ``` - /// #![feature(exact_chunks)] - /// - /// let slice = ['l', 'o', 'r', 'e', 'm']; - /// let mut iter = slice.exact_chunks(2); - /// assert_eq!(iter.next().unwrap(), &['l', 'o']); - /// assert_eq!(iter.next().unwrap(), &['r', 'e']); - /// assert!(iter.next().is_none()); - /// ``` - #[unstable(feature = "exact_chunks", issue = "47115")] - #[inline] - pub fn exact_chunks(&self, chunk_size: usize) -> ExactChunks<T> { - core_slice::SliceExt::exact_chunks(self, chunk_size) - } - - /// Returns an iterator over `chunk_size` elements of the slice at a time. - /// The chunks are mutable slices, and do not overlap. If `chunk_size` does - /// not divide the length of the slice, then the last chunk will not - /// have length `chunk_size`. - /// - /// See [`exact_chunks_mut`] for a variant of this iterator that returns chunks - /// of always exactly `chunk_size` elements. - /// - /// # Panics - /// - /// Panics if `chunk_size` is 0. - /// - /// # Examples - /// - /// ``` - /// let v = &mut [0, 0, 0, 0, 0]; - /// let mut count = 1; - /// - /// for chunk in v.chunks_mut(2) { - /// for elem in chunk.iter_mut() { - /// *elem += count; - /// } - /// count += 1; - /// } - /// assert_eq!(v, &[1, 1, 2, 2, 3]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<T> { - core_slice::SliceExt::chunks_mut(self, chunk_size) - } - - /// Returns an iterator over `chunk_size` elements of the slice at a time. - /// The chunks are mutable slices, and do not overlap. If `chunk_size` does - /// not divide the length of the slice, then the last up to `chunk_size-1` - /// elements will be omitted. - /// - /// - /// Due to each chunk having exactly `chunk_size` elements, the compiler - /// can often optimize the resulting code better than in the case of - /// [`chunks_mut`]. - /// - /// # Panics - /// - /// Panics if `chunk_size` is 0. - /// - /// # Examples - /// - /// ``` - /// #![feature(exact_chunks)] - /// - /// let v = &mut [0, 0, 0, 0, 0]; - /// let mut count = 1; - /// - /// for chunk in v.exact_chunks_mut(2) { - /// for elem in chunk.iter_mut() { - /// *elem += count; - /// } - /// count += 1; - /// } - /// assert_eq!(v, &[1, 1, 2, 2, 0]); - /// ``` - #[unstable(feature = "exact_chunks", issue = "47115")] - #[inline] - pub fn exact_chunks_mut(&mut self, chunk_size: usize) -> ExactChunksMut<T> { - core_slice::SliceExt::exact_chunks_mut(self, chunk_size) - } - - /// Divides one slice into two at an index. - /// - /// The first will contain all indices from `[0, mid)` (excluding - /// the index `mid` itself) and the second will contain all - /// indices from `[mid, len)` (excluding the index `len` itself). - /// - /// # Panics - /// - /// Panics if `mid > len`. - /// - /// # Examples - /// - /// ``` - /// let v = [1, 2, 3, 4, 5, 6]; - /// - /// { - /// let (left, right) = v.split_at(0); - /// assert!(left == []); - /// assert!(right == [1, 2, 3, 4, 5, 6]); - /// } - /// - /// { - /// let (left, right) = v.split_at(2); - /// assert!(left == [1, 2]); - /// assert!(right == [3, 4, 5, 6]); - /// } - /// - /// { - /// let (left, right) = v.split_at(6); - /// assert!(left == [1, 2, 3, 4, 5, 6]); - /// assert!(right == []); - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn split_at(&self, mid: usize) -> (&[T], &[T]) { - core_slice::SliceExt::split_at(self, mid) - } - - /// Divides one mutable slice into two at an index. - /// - /// The first will contain all indices from `[0, mid)` (excluding - /// the index `mid` itself) and the second will contain all - /// indices from `[mid, len)` (excluding the index `len` itself). - /// - /// # Panics - /// - /// Panics if `mid > len`. - /// - /// # Examples - /// - /// ``` - /// let mut v = [1, 0, 3, 0, 5, 6]; - /// // scoped to restrict the lifetime of the borrows - /// { - /// let (left, right) = v.split_at_mut(2); - /// assert!(left == [1, 0]); - /// assert!(right == [3, 0, 5, 6]); - /// left[1] = 2; - /// right[1] = 4; - /// } - /// assert!(v == [1, 2, 3, 4, 5, 6]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) { - core_slice::SliceExt::split_at_mut(self, mid) - } - - /// Returns an iterator over subslices separated by elements that match - /// `pred`. The matched element is not contained in the subslices. - /// - /// # Examples - /// - /// ``` - /// let slice = [10, 40, 33, 20]; - /// let mut iter = slice.split(|num| num % 3 == 0); - /// - /// assert_eq!(iter.next().unwrap(), &[10, 40]); - /// assert_eq!(iter.next().unwrap(), &[20]); - /// assert!(iter.next().is_none()); - /// ``` - /// - /// If the first element is matched, an empty slice will be the first item - /// returned by the iterator. Similarly, if the last element in the slice - /// is matched, an empty slice will be the last item returned by the - /// iterator: - /// - /// ``` - /// let slice = [10, 40, 33]; - /// let mut iter = slice.split(|num| num % 3 == 0); - /// - /// assert_eq!(iter.next().unwrap(), &[10, 40]); - /// assert_eq!(iter.next().unwrap(), &[]); - /// assert!(iter.next().is_none()); - /// ``` - /// - /// If two matched elements are directly adjacent, an empty slice will be - /// present between them: - /// - /// ``` - /// let slice = [10, 6, 33, 20]; - /// let mut iter = slice.split(|num| num % 3 == 0); - /// - /// assert_eq!(iter.next().unwrap(), &[10]); - /// assert_eq!(iter.next().unwrap(), &[]); - /// assert_eq!(iter.next().unwrap(), &[20]); - /// assert!(iter.next().is_none()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn split<F>(&self, pred: F) -> Split<T, F> - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::split(self, pred) - } - - /// Returns an iterator over mutable subslices separated by elements that - /// match `pred`. The matched element is not contained in the subslices. - /// - /// # Examples - /// - /// ``` - /// let mut v = [10, 40, 30, 20, 60, 50]; - /// - /// for group in v.split_mut(|num| *num % 3 == 0) { - /// group[0] = 1; - /// } - /// assert_eq!(v, [1, 40, 30, 1, 60, 1]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<T, F> - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::split_mut(self, pred) - } - - /// Returns an iterator over subslices separated by elements that match - /// `pred`, starting at the end of the slice and working backwards. - /// The matched element is not contained in the subslices. - /// - /// # Examples - /// - /// ``` - /// #![feature(slice_rsplit)] - /// - /// let slice = [11, 22, 33, 0, 44, 55]; - /// let mut iter = slice.rsplit(|num| *num == 0); - /// - /// assert_eq!(iter.next().unwrap(), &[44, 55]); - /// assert_eq!(iter.next().unwrap(), &[11, 22, 33]); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// As with `split()`, if the first or last element is matched, an empty - /// slice will be the first (or last) item returned by the iterator. - /// - /// ``` - /// #![feature(slice_rsplit)] - /// - /// let v = &[0, 1, 1, 2, 3, 5, 8]; - /// let mut it = v.rsplit(|n| *n % 2 == 0); - /// assert_eq!(it.next().unwrap(), &[]); - /// assert_eq!(it.next().unwrap(), &[3, 5]); - /// assert_eq!(it.next().unwrap(), &[1, 1]); - /// assert_eq!(it.next().unwrap(), &[]); - /// assert_eq!(it.next(), None); - /// ``` - #[unstable(feature = "slice_rsplit", issue = "41020")] - #[inline] - pub fn rsplit<F>(&self, pred: F) -> RSplit<T, F> - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::rsplit(self, pred) - } - - /// Returns an iterator over mutable subslices separated by elements that - /// match `pred`, starting at the end of the slice and working - /// backwards. The matched element is not contained in the subslices. - /// - /// # Examples - /// - /// ``` - /// #![feature(slice_rsplit)] - /// - /// let mut v = [100, 400, 300, 200, 600, 500]; - /// - /// let mut count = 0; - /// for group in v.rsplit_mut(|num| *num % 3 == 0) { - /// count += 1; - /// group[0] = count; - /// } - /// assert_eq!(v, [3, 400, 300, 2, 600, 1]); - /// ``` - /// - #[unstable(feature = "slice_rsplit", issue = "41020")] - #[inline] - pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<T, F> - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::rsplit_mut(self, pred) - } - - /// Returns an iterator over subslices separated by elements that match - /// `pred`, limited to returning at most `n` items. The matched element is - /// not contained in the subslices. - /// - /// The last element returned, if any, will contain the remainder of the - /// slice. - /// - /// # Examples - /// - /// Print the slice split once by numbers divisible by 3 (i.e. `[10, 40]`, - /// `[20, 60, 50]`): - /// - /// ``` - /// let v = [10, 40, 30, 20, 60, 50]; - /// - /// for group in v.splitn(2, |num| *num % 3 == 0) { - /// println!("{:?}", group); - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<T, F> - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::splitn(self, n, pred) - } - - /// Returns an iterator over subslices separated by elements that match - /// `pred`, limited to returning at most `n` items. The matched element is - /// not contained in the subslices. - /// - /// The last element returned, if any, will contain the remainder of the - /// slice. - /// - /// # Examples - /// - /// ``` - /// let mut v = [10, 40, 30, 20, 60, 50]; - /// - /// for group in v.splitn_mut(2, |num| *num % 3 == 0) { - /// group[0] = 1; - /// } - /// assert_eq!(v, [1, 40, 30, 1, 60, 50]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<T, F> - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::splitn_mut(self, n, pred) - } - - /// Returns an iterator over subslices separated by elements that match - /// `pred` limited to returning at most `n` items. This starts at the end of - /// the slice and works backwards. The matched element is not contained in - /// the subslices. - /// - /// The last element returned, if any, will contain the remainder of the - /// slice. - /// - /// # Examples - /// - /// Print the slice split once, starting from the end, by numbers divisible - /// by 3 (i.e. `[50]`, `[10, 40, 30, 20]`): - /// - /// ``` - /// let v = [10, 40, 30, 20, 60, 50]; - /// - /// for group in v.rsplitn(2, |num| *num % 3 == 0) { - /// println!("{:?}", group); - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<T, F> - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::rsplitn(self, n, pred) - } - - /// Returns an iterator over subslices separated by elements that match - /// `pred` limited to returning at most `n` items. This starts at the end of - /// the slice and works backwards. The matched element is not contained in - /// the subslices. - /// - /// The last element returned, if any, will contain the remainder of the - /// slice. - /// - /// # Examples - /// - /// ``` - /// let mut s = [10, 40, 30, 20, 60, 50]; - /// - /// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) { - /// group[0] = 1; - /// } - /// assert_eq!(s, [1, 40, 30, 20, 60, 1]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<T, F> - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::rsplitn_mut(self, n, pred) - } - - /// Returns `true` if the slice contains an element with the given value. - /// - /// # Examples - /// - /// ``` - /// let v = [10, 40, 30]; - /// assert!(v.contains(&30)); - /// assert!(!v.contains(&50)); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn contains(&self, x: &T) -> bool - where T: PartialEq - { - core_slice::SliceExt::contains(self, x) - } - - /// Returns `true` if `needle` is a prefix of the slice. - /// - /// # Examples - /// - /// ``` - /// let v = [10, 40, 30]; - /// assert!(v.starts_with(&[10])); - /// assert!(v.starts_with(&[10, 40])); - /// assert!(!v.starts_with(&[50])); - /// assert!(!v.starts_with(&[10, 50])); - /// ``` - /// - /// Always returns `true` if `needle` is an empty slice: - /// - /// ``` - /// let v = &[10, 40, 30]; - /// assert!(v.starts_with(&[])); - /// let v: &[u8] = &[]; - /// assert!(v.starts_with(&[])); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn starts_with(&self, needle: &[T]) -> bool - where T: PartialEq - { - core_slice::SliceExt::starts_with(self, needle) - } - - /// Returns `true` if `needle` is a suffix of the slice. - /// - /// # Examples - /// - /// ``` - /// let v = [10, 40, 30]; - /// assert!(v.ends_with(&[30])); - /// assert!(v.ends_with(&[40, 30])); - /// assert!(!v.ends_with(&[50])); - /// assert!(!v.ends_with(&[50, 30])); - /// ``` - /// - /// Always returns `true` if `needle` is an empty slice: - /// - /// ``` - /// let v = &[10, 40, 30]; - /// assert!(v.ends_with(&[])); - /// let v: &[u8] = &[]; - /// assert!(v.ends_with(&[])); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn ends_with(&self, needle: &[T]) -> bool - where T: PartialEq - { - core_slice::SliceExt::ends_with(self, needle) - } - - /// Binary searches this sorted slice for a given element. - /// - /// If the value is found then `Ok` is returned, containing the - /// index of the matching element; if the value is not found then - /// `Err` is returned, containing the index where a matching - /// element could be inserted while maintaining sorted order. - /// - /// # Examples - /// - /// Looks up a series of four elements. The first is found, with a - /// uniquely determined position; the second and third are not - /// found; the fourth could match any position in `[1, 4]`. - /// - /// ``` - /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]; - /// - /// assert_eq!(s.binary_search(&13), Ok(9)); - /// assert_eq!(s.binary_search(&4), Err(7)); - /// assert_eq!(s.binary_search(&100), Err(13)); - /// let r = s.binary_search(&1); - /// assert!(match r { Ok(1...4) => true, _ => false, }); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn binary_search(&self, x: &T) -> Result<usize, usize> - where T: Ord - { - core_slice::SliceExt::binary_search(self, x) - } - - /// Binary searches this sorted slice with a comparator function. - /// - /// The comparator function should implement an order consistent - /// with the sort order of the underlying slice, returning an - /// order code that indicates whether its argument is `Less`, - /// `Equal` or `Greater` the desired target. - /// - /// If a matching value is found then returns `Ok`, containing - /// the index for the matched element; if no match is found then - /// `Err` is returned, containing the index where a matching - /// element could be inserted while maintaining sorted order. - /// - /// # Examples - /// - /// Looks up a series of four elements. The first is found, with a - /// uniquely determined position; the second and third are not - /// found; the fourth could match any position in `[1, 4]`. - /// - /// ``` - /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]; - /// - /// let seek = 13; - /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9)); - /// let seek = 4; - /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7)); - /// let seek = 100; - /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13)); - /// let seek = 1; - /// let r = s.binary_search_by(|probe| probe.cmp(&seek)); - /// assert!(match r { Ok(1...4) => true, _ => false, }); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn binary_search_by<'a, F>(&'a self, f: F) -> Result<usize, usize> - where F: FnMut(&'a T) -> Ordering - { - core_slice::SliceExt::binary_search_by(self, f) - } - - /// Binary searches this sorted slice with a key extraction function. - /// - /// Assumes that the slice is sorted by the key, for instance with - /// [`sort_by_key`] using the same key extraction function. - /// - /// If a matching value is found then returns `Ok`, containing the - /// index for the matched element; if no match is found then `Err` - /// is returned, containing the index where a matching element could - /// be inserted while maintaining sorted order. - /// - /// [`sort_by_key`]: #method.sort_by_key - /// - /// # Examples - /// - /// Looks up a series of four elements in a slice of pairs sorted by - /// their second elements. The first is found, with a uniquely - /// determined position; the second and third are not found; the - /// fourth could match any position in `[1, 4]`. - /// - /// ``` - /// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1), - /// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13), - /// (1, 21), (2, 34), (4, 55)]; - /// - /// assert_eq!(s.binary_search_by_key(&13, |&(a,b)| b), Ok(9)); - /// assert_eq!(s.binary_search_by_key(&4, |&(a,b)| b), Err(7)); - /// assert_eq!(s.binary_search_by_key(&100, |&(a,b)| b), Err(13)); - /// let r = s.binary_search_by_key(&1, |&(a,b)| b); - /// assert!(match r { Ok(1...4) => true, _ => false, }); - /// ``` - #[stable(feature = "slice_binary_search_by_key", since = "1.10.0")] - #[inline] - pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result<usize, usize> - where F: FnMut(&'a T) -> B, - B: Ord - { - core_slice::SliceExt::binary_search_by_key(self, b, f) - } - /// Sorts the slice. /// /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case. @@ -1294,7 +246,8 @@ impl<T> [T] { /// Sorts the slice with a key extraction function. /// - /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case. + /// This sort is stable (i.e. does not reorder equal elements) and `O(m n log(m n))` + /// worst-case, where the key function is `O(m)`. /// /// When applicable, unstable sorting is preferred because it is generally faster than stable /// sorting and it doesn't allocate auxiliary memory. @@ -1320,89 +273,22 @@ impl<T> [T] { /// ``` #[stable(feature = "slice_sort_by_key", since = "1.7.0")] #[inline] - pub fn sort_by_key<B, F>(&mut self, mut f: F) - where F: FnMut(&T) -> B, B: Ord + pub fn sort_by_key<K, F>(&mut self, mut f: F) + where F: FnMut(&T) -> K, K: Ord { merge_sort(self, |a, b| f(a).lt(&f(b))); } - /// Sorts the slice, but may not preserve the order of equal elements. - /// - /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate), - /// and `O(n log n)` worst-case. - /// - /// # Current implementation - /// - /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters, - /// which combines the fast average case of randomized quicksort with the fast worst case of - /// heapsort, while achieving linear time on slices with certain patterns. It uses some - /// randomization to avoid degenerate cases, but with a fixed seed to always provide - /// deterministic behavior. - /// - /// It is typically faster than stable sorting, except in a few special cases, e.g. when the - /// slice consists of several concatenated sorted sequences. - /// - /// # Examples - /// - /// ``` - /// let mut v = [-5, 4, 1, -3, 2]; - /// - /// v.sort_unstable(); - /// assert!(v == [-5, -3, 1, 2, 4]); - /// ``` - /// - /// [pdqsort]: https://github.com/orlp/pdqsort - #[stable(feature = "sort_unstable", since = "1.20.0")] - #[inline] - pub fn sort_unstable(&mut self) - where T: Ord - { - core_slice::SliceExt::sort_unstable(self); - } - - /// Sorts the slice with a comparator function, but may not preserve the order of equal - /// elements. - /// - /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate), - /// and `O(n log n)` worst-case. - /// - /// # Current implementation - /// - /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters, - /// which combines the fast average case of randomized quicksort with the fast worst case of - /// heapsort, while achieving linear time on slices with certain patterns. It uses some - /// randomization to avoid degenerate cases, but with a fixed seed to always provide - /// deterministic behavior. - /// - /// It is typically faster than stable sorting, except in a few special cases, e.g. when the - /// slice consists of several concatenated sorted sequences. - /// - /// # Examples - /// - /// ``` - /// let mut v = [5, 4, 1, 3, 2]; - /// v.sort_unstable_by(|a, b| a.cmp(b)); - /// assert!(v == [1, 2, 3, 4, 5]); + /// Sorts the slice with a key extraction function. /// - /// // reverse sorting - /// v.sort_unstable_by(|a, b| b.cmp(a)); - /// assert!(v == [5, 4, 3, 2, 1]); - /// ``` + /// During sorting, the key function is called only once per element. /// - /// [pdqsort]: https://github.com/orlp/pdqsort - #[stable(feature = "sort_unstable", since = "1.20.0")] - #[inline] - pub fn sort_unstable_by<F>(&mut self, compare: F) - where F: FnMut(&T, &T) -> Ordering - { - core_slice::SliceExt::sort_unstable_by(self, compare); - } - - /// Sorts the slice with a key extraction function, but may not preserve the order of equal - /// elements. + /// This sort is stable (i.e. does not reorder equal elements) and `O(m n + n log n)` + /// worst-case, where the key function is `O(m)`. /// - /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate), - /// and `O(n log n)` worst-case. + /// For simple key functions (e.g. functions that are property accesses or + /// basic operations), [`sort_by_key`](#method.sort_by_key) is likely to be + /// faster. /// /// # Current implementation /// @@ -1412,281 +298,56 @@ impl<T> [T] { /// randomization to avoid degenerate cases, but with a fixed seed to always provide /// deterministic behavior. /// - /// It is typically faster than stable sorting, except in a few special cases, e.g. when the - /// slice consists of several concatenated sorted sequences. + /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the + /// length of the slice. /// /// # Examples /// /// ``` - /// let mut v = [-5i32, 4, 1, -3, 2]; + /// #![feature(slice_sort_by_cached_key)] + /// let mut v = [-5i32, 4, 32, -3, 2]; /// - /// v.sort_unstable_by_key(|k| k.abs()); - /// assert!(v == [1, 2, -3, 4, -5]); + /// v.sort_by_cached_key(|k| k.to_string()); + /// assert!(v == [-3, -5, 2, 32, 4]); /// ``` /// /// [pdqsort]: https://github.com/orlp/pdqsort - #[stable(feature = "sort_unstable", since = "1.20.0")] + #[unstable(feature = "slice_sort_by_cached_key", issue = "34447")] #[inline] - pub fn sort_unstable_by_key<B, F>(&mut self, f: F) - where F: FnMut(&T) -> B, - B: Ord + pub fn sort_by_cached_key<K, F>(&mut self, f: F) + where F: FnMut(&T) -> K, K: Ord { - core_slice::SliceExt::sort_unstable_by_key(self, f); - } + // Helper macro for indexing our vector by the smallest possible type, to reduce allocation. + macro_rules! sort_by_key { + ($t:ty, $slice:ident, $f:ident) => ({ + let mut indices: Vec<_> = + $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect(); + // The elements of `indices` are unique, as they are indexed, so any sort will be + // stable with respect to the original slice. We use `sort_unstable` here because + // it requires less memory allocation. + indices.sort_unstable(); + for i in 0..$slice.len() { + let mut index = indices[i].1; + while (index as usize) < i { + index = indices[index as usize].1; + } + indices[i].1 = index; + $slice.swap(i, index as usize); + } + }) + } - /// Rotates the slice in-place such that the first `mid` elements of the - /// slice move to the end while the last `self.len() - mid` elements move to - /// the front. After calling `rotate_left`, the element previously at index - /// `mid` will become the first element in the slice. - /// - /// # Panics - /// - /// This function will panic if `mid` is greater than the length of the - /// slice. Note that `mid == self.len()` does _not_ panic and is a no-op - /// rotation. - /// - /// # Complexity - /// - /// Takes linear (in `self.len()`) time. - /// - /// # Examples - /// - /// ``` - /// #![feature(slice_rotate)] - /// - /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; - /// a.rotate_left(2); - /// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); - /// ``` - /// - /// Rotating a subslice: - /// - /// ``` - /// #![feature(slice_rotate)] - /// - /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; - /// a[1..5].rotate_left(1); - /// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']); - /// ``` - #[unstable(feature = "slice_rotate", issue = "41891")] - pub fn rotate_left(&mut self, mid: usize) { - core_slice::SliceExt::rotate_left(self, mid); - } + let sz_u8 = mem::size_of::<(K, u8)>(); + let sz_u16 = mem::size_of::<(K, u16)>(); + let sz_u32 = mem::size_of::<(K, u32)>(); + let sz_usize = mem::size_of::<(K, usize)>(); - #[unstable(feature = "slice_rotate", issue = "41891")] - #[rustc_deprecated(since = "", reason = "renamed to `rotate_left`")] - pub fn rotate(&mut self, mid: usize) { - core_slice::SliceExt::rotate_left(self, mid); - } - - /// Rotates the slice in-place such that the first `self.len() - k` - /// elements of the slice move to the end while the last `k` elements move - /// to the front. After calling `rotate_right`, the element previously at - /// index `self.len() - k` will become the first element in the slice. - /// - /// # Panics - /// - /// This function will panic if `k` is greater than the length of the - /// slice. Note that `k == self.len()` does _not_ panic and is a no-op - /// rotation. - /// - /// # Complexity - /// - /// Takes linear (in `self.len()`) time. - /// - /// # Examples - /// - /// ``` - /// #![feature(slice_rotate)] - /// - /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; - /// a.rotate_right(2); - /// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']); - /// ``` - /// - /// Rotate a subslice: - /// - /// ``` - /// #![feature(slice_rotate)] - /// - /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; - /// a[1..5].rotate_right(1); - /// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']); - /// ``` - #[unstable(feature = "slice_rotate", issue = "41891")] - pub fn rotate_right(&mut self, k: usize) { - core_slice::SliceExt::rotate_right(self, k); - } - - /// Copies the elements from `src` into `self`. - /// - /// The length of `src` must be the same as `self`. - /// - /// If `src` implements `Copy`, it can be more performant to use - /// [`copy_from_slice`]. - /// - /// # Panics - /// - /// This function will panic if the two slices have different lengths. - /// - /// # Examples - /// - /// Cloning two elements from a slice into another: - /// - /// ``` - /// let src = [1, 2, 3, 4]; - /// let mut dst = [0, 0]; - /// - /// dst.clone_from_slice(&src[2..]); - /// - /// assert_eq!(src, [1, 2, 3, 4]); - /// assert_eq!(dst, [3, 4]); - /// ``` - /// - /// Rust enforces that there can only be one mutable reference with no - /// immutable references to a particular piece of data in a particular - /// scope. Because of this, attempting to use `clone_from_slice` on a - /// single slice will result in a compile failure: - /// - /// ```compile_fail - /// let mut slice = [1, 2, 3, 4, 5]; - /// - /// slice[..2].clone_from_slice(&slice[3..]); // compile fail! - /// ``` - /// - /// To work around this, we can use [`split_at_mut`] to create two distinct - /// sub-slices from a slice: - /// - /// ``` - /// let mut slice = [1, 2, 3, 4, 5]; - /// - /// { - /// let (left, right) = slice.split_at_mut(2); - /// left.clone_from_slice(&right[1..]); - /// } - /// - /// assert_eq!(slice, [4, 5, 3, 4, 5]); - /// ``` - /// - /// [`copy_from_slice`]: #method.copy_from_slice - /// [`split_at_mut`]: #method.split_at_mut - #[stable(feature = "clone_from_slice", since = "1.7.0")] - pub fn clone_from_slice(&mut self, src: &[T]) where T: Clone { - core_slice::SliceExt::clone_from_slice(self, src) - } - - /// Copies all elements from `src` into `self`, using a memcpy. - /// - /// The length of `src` must be the same as `self`. - /// - /// If `src` does not implement `Copy`, use [`clone_from_slice`]. - /// - /// # Panics - /// - /// This function will panic if the two slices have different lengths. - /// - /// # Examples - /// - /// Copying two elements from a slice into another: - /// - /// ``` - /// let src = [1, 2, 3, 4]; - /// let mut dst = [0, 0]; - /// - /// dst.copy_from_slice(&src[2..]); - /// - /// assert_eq!(src, [1, 2, 3, 4]); - /// assert_eq!(dst, [3, 4]); - /// ``` - /// - /// Rust enforces that there can only be one mutable reference with no - /// immutable references to a particular piece of data in a particular - /// scope. Because of this, attempting to use `copy_from_slice` on a - /// single slice will result in a compile failure: - /// - /// ```compile_fail - /// let mut slice = [1, 2, 3, 4, 5]; - /// - /// slice[..2].copy_from_slice(&slice[3..]); // compile fail! - /// ``` - /// - /// To work around this, we can use [`split_at_mut`] to create two distinct - /// sub-slices from a slice: - /// - /// ``` - /// let mut slice = [1, 2, 3, 4, 5]; - /// - /// { - /// let (left, right) = slice.split_at_mut(2); - /// left.copy_from_slice(&right[1..]); - /// } - /// - /// assert_eq!(slice, [4, 5, 3, 4, 5]); - /// ``` - /// - /// [`clone_from_slice`]: #method.clone_from_slice - /// [`split_at_mut`]: #method.split_at_mut - #[stable(feature = "copy_from_slice", since = "1.9.0")] - pub fn copy_from_slice(&mut self, src: &[T]) where T: Copy { - core_slice::SliceExt::copy_from_slice(self, src) - } - - /// Swaps all elements in `self` with those in `other`. - /// - /// The length of `other` must be the same as `self`. - /// - /// # Panics - /// - /// This function will panic if the two slices have different lengths. - /// - /// # Example - /// - /// Swapping two elements across slices: - /// - /// ``` - /// #![feature(swap_with_slice)] - /// - /// let mut slice1 = [0, 0]; - /// let mut slice2 = [1, 2, 3, 4]; - /// - /// slice1.swap_with_slice(&mut slice2[2..]); - /// - /// assert_eq!(slice1, [3, 4]); - /// assert_eq!(slice2, [1, 2, 0, 0]); - /// ``` - /// - /// Rust enforces that there can only be one mutable reference to a - /// particular piece of data in a particular scope. Because of this, - /// attempting to use `swap_with_slice` on a single slice will result in - /// a compile failure: - /// - /// ```compile_fail - /// #![feature(swap_with_slice)] - /// - /// let mut slice = [1, 2, 3, 4, 5]; - /// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail! - /// ``` - /// - /// To work around this, we can use [`split_at_mut`] to create two distinct - /// mutable sub-slices from a slice: - /// - /// ``` - /// #![feature(swap_with_slice)] - /// - /// let mut slice = [1, 2, 3, 4, 5]; - /// - /// { - /// let (left, right) = slice.split_at_mut(2); - /// left.swap_with_slice(&mut right[1..]); - /// } - /// - /// assert_eq!(slice, [4, 5, 3, 1, 2]); - /// ``` - /// - /// [`split_at_mut`]: #method.split_at_mut - #[unstable(feature = "swap_with_slice", issue = "44030")] - pub fn swap_with_slice(&mut self, other: &mut [T]) { - core_slice::SliceExt::swap_with_slice(self, other) + let len = self.len(); + if len < 2 { return } + if sz_u8 < sz_u16 && len <= ( u8::MAX as usize) { return sort_by_key!( u8, self, f) } + if sz_u16 < sz_u32 && len <= (u16::MAX as usize) { return sort_by_key!(u16, self, f) } + if sz_u32 < sz_usize && len <= (u32::MAX as usize) { return sort_by_key!(u32, self, f) } + sort_by_key!(usize, self, f) } /// Copies `self` into a new `Vec`. @@ -1728,18 +389,82 @@ impl<T> [T] { // NB see hack module in this file hack::into_vec(self) } + + /// Creates a vector by repeating a slice `n` times. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(repeat_generic_slice)] + /// + /// fn main() { + /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]); + /// } + /// ``` + #[unstable(feature = "repeat_generic_slice", + reason = "it's on str, why not on slice?", + issue = "48784")] + pub fn repeat(&self, n: usize) -> Vec<T> where T: Copy { + if n == 0 { + return Vec::new(); + } + + // If `n` is larger than zero, it can be split as + // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`. + // `2^expn` is the number represented by the leftmost '1' bit of `n`, + // and `rem` is the remaining part of `n`. + + // Using `Vec` to access `set_len()`. + let mut buf = Vec::with_capacity(self.len() * n); + + // `2^expn` repetition is done by doubling `buf` `expn`-times. + buf.extend(self); + { + let mut m = n >> 1; + // If `m > 0`, there are remaining bits up to the leftmost '1'. + while m > 0 { + // `buf.extend(buf)`: + unsafe { + ptr::copy_nonoverlapping( + buf.as_ptr(), + (buf.as_mut_ptr() as *mut T).add(buf.len()), + buf.len(), + ); + // `buf` has capacity of `self.len() * n`. + let buf_len = buf.len(); + buf.set_len(buf_len * 2); + } + + m >>= 1; + } + } + + // `rem` (`= n - 2^expn`) repetition is done by copying + // first `rem` repetitions from `buf` itself. + let rem_len = self.len() * n - buf.len(); // `self.len() * rem` + if rem_len > 0 { + // `buf.extend(buf[0 .. rem_len])`: + unsafe { + // This is non-overlapping since `2^expn > rem`. + ptr::copy_nonoverlapping( + buf.as_ptr(), + (buf.as_mut_ptr() as *mut T).add(buf.len()), + rem_len, + ); + // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`). + let buf_cap = buf.capacity(); + buf.set_len(buf_cap); + } + } + buf + } } -#[lang = "slice_u8"] +#[lang = "slice_u8_alloc"] #[cfg(not(test))] impl [u8] { - /// Checks if all bytes in this slice are within the ASCII range. - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - #[inline] - pub fn is_ascii(&self) -> bool { - self.iter().all(|b| b.is_ascii()) - } - /// Returns a vector containing a copy of this slice where each byte /// is mapped to its ASCII upper case equivalent. /// @@ -1773,53 +498,6 @@ impl [u8] { me.make_ascii_lowercase(); me } - - /// Checks that two slices are an ASCII case-insensitive match. - /// - /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`, - /// but without allocating and copying temporaries. - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - #[inline] - pub fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool { - self.len() == other.len() && - self.iter().zip(other).all(|(a, b)| { - a.eq_ignore_ascii_case(b) - }) - } - - /// Converts this slice to its ASCII upper case equivalent in-place. - /// - /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z', - /// but non-ASCII letters are unchanged. - /// - /// To return a new uppercased value without modifying the existing one, use - /// [`to_ascii_uppercase`]. - /// - /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - #[inline] - pub fn make_ascii_uppercase(&mut self) { - for byte in self { - byte.make_ascii_uppercase(); - } - } - - /// Converts this slice to its ASCII lower case equivalent in-place. - /// - /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z', - /// but non-ASCII letters are unchanged. - /// - /// To return a new lowercased value without modifying the existing one, use - /// [`to_ascii_lowercase`]. - /// - /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - #[inline] - pub fn make_ascii_lowercase(&mut self) { - for byte in self { - byte.make_ascii_lowercase(); - } - } } //////////////////////////////////////////////////////////////////////////////// @@ -1888,15 +566,17 @@ impl<T: Clone, V: Borrow<[T]>> SliceConcatExt<T> for [V] { } fn join(&self, sep: &T) -> Vec<T> { + let mut iter = self.iter(); + let first = match iter.next() { + Some(first) => first, + None => return vec![], + }; let size = self.iter().fold(0, |acc, v| acc + v.borrow().len()); let mut result = Vec::with_capacity(size + self.len()); - let mut first = true; - for v in self { - if first { - first = false - } else { - result.push(sep.clone()) - } + result.extend_from_slice(first.borrow()); + + for v in iter { + result.push(sep.clone()); result.extend_from_slice(v.borrow()) } result diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs index a00e3d17dd0..870bf971cd3 100644 --- a/src/liballoc/str.rs +++ b/src/liballoc/str.rs @@ -10,6 +10,8 @@ //! Unicode string slices. //! +//! *[See also the `str` primitive type](../../std/primitive.str.html).* +//! //! The `&str` type is one of the two main string types, the other being `String`. //! Unlike its `String` counterpart, its contents are borrowed. //! @@ -29,8 +31,6 @@ //! ``` //! let hello_world: &'static str = "Hello, world!"; //! ``` -//! -//! *[See also the `str` primitive type](../../std/primitive.str.html).* #![stable(feature = "rust1", since = "1.0.0")] @@ -43,16 +43,15 @@ use core::str as core_str; use core::str::pattern::Pattern; use core::str::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher}; use core::mem; +use core::ptr; use core::iter::FusedIterator; -use std_unicode::str::{UnicodeStr, Utf16Encoder}; +use core::unicode::conversions; -use vec_deque::VecDeque; use borrow::{Borrow, ToOwned}; +use boxed::Box; +use slice::{SliceConcatExt, SliceIndex}; use string::String; -use std_unicode; use vec::Vec; -use slice::{SliceConcatExt, SliceIndex}; -use boxed::Box; #[stable(feature = "rust1", since = "1.0.0")] pub use core::str::{FromStr, Utf8Error}; @@ -74,10 +73,13 @@ pub use core::str::{from_utf8, from_utf8_mut, Chars, CharIndices, Bytes}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::str::{from_utf8_unchecked, from_utf8_unchecked_mut, ParseBoolError}; #[stable(feature = "rust1", since = "1.0.0")] -pub use std_unicode::str::SplitWhitespace; +pub use core::str::SplitWhitespace; #[stable(feature = "rust1", since = "1.0.0")] pub use core::str::pattern; - +#[stable(feature = "encode_utf16", since = "1.8.0")] +pub use core::str::EncodeUtf16; +#[unstable(feature = "split_ascii_whitespace", issue = "48656")] +pub use core::str::SplitAsciiWhitespace; #[unstable(feature = "slice_concat_ext", reason = "trait should not have to exist", @@ -86,47 +88,13 @@ impl<S: Borrow<str>> SliceConcatExt<str> for [S] { type Output = String; fn concat(&self) -> String { - if self.is_empty() { - return String::new(); - } - - // `len` calculation may overflow but push_str will check boundaries - let len = self.iter().map(|s| s.borrow().len()).sum(); - let mut result = String::with_capacity(len); - - for s in self { - result.push_str(s.borrow()) - } - - result + self.join("") } fn join(&self, sep: &str) -> String { - if self.is_empty() { - return String::new(); - } - - // concat is faster - if sep.is_empty() { - return self.concat(); + unsafe { + String::from_utf8_unchecked( join_generic_copy(self, sep.as_bytes()) ) } - - // this is wrong without the guarantee that `self` is non-empty - // `len` calculation may overflow but push_str but will check boundaries - let len = sep.len() * (self.len() - 1) + - self.iter().map(|s| s.borrow().len()).sum::<usize>(); - let mut result = String::with_capacity(len); - let mut first = true; - - for s in self { - if first { - first = false; - } else { - result.push_str(sep); - } - result.push_str(s.borrow()); - } - result } fn connect(&self, sep: &str) -> String { @@ -134,46 +102,96 @@ impl<S: Borrow<str>> SliceConcatExt<str> for [S] { } } -/// An iterator of [`u16`] over the string encoded as UTF-16. -/// -/// [`u16`]: ../../std/primitive.u16.html -/// -/// This struct is created by the [`encode_utf16`] method on [`str`]. -/// See its documentation for more. -/// -/// [`encode_utf16`]: ../../std/primitive.str.html#method.encode_utf16 -/// [`str`]: ../../std/primitive.str.html -#[derive(Clone)] -#[stable(feature = "encode_utf16", since = "1.8.0")] -pub struct EncodeUtf16<'a> { - encoder: Utf16Encoder<Chars<'a>>, +macro_rules! spezialize_for_lengths { + ($separator:expr, $target:expr, $iter:expr; $($num:expr),*) => { + let mut target = $target; + let iter = $iter; + let sep_bytes = $separator; + match $separator.len() { + $( + // loops with hardcoded sizes run much faster + // specialize the cases with small separator lengths + $num => { + for s in iter { + copy_slice_and_advance!(target, sep_bytes); + copy_slice_and_advance!(target, s.borrow().as_ref()); + } + }, + )* + _ => { + // arbitrary non-zero size fallback + for s in iter { + copy_slice_and_advance!(target, sep_bytes); + copy_slice_and_advance!(target, s.borrow().as_ref()); + } + } + } + }; } -#[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a> fmt::Debug for EncodeUtf16<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad("EncodeUtf16 { .. }") +macro_rules! copy_slice_and_advance { + ($target:expr, $bytes:expr) => { + let len = $bytes.len(); + let (head, tail) = {$target}.split_at_mut(len); + head.copy_from_slice($bytes); + $target = tail; } } -#[stable(feature = "encode_utf16", since = "1.8.0")] -impl<'a> Iterator for EncodeUtf16<'a> { - type Item = u16; - - #[inline] - fn next(&mut self) -> Option<u16> { - self.encoder.next() - } - - #[inline] - fn size_hint(&self) -> (usize, Option<usize>) { - self.encoder.size_hint() +// Optimized join implementation that works for both Vec<T> (T: Copy) and String's inner vec +// Currently (2018-05-13) there is a bug with type inference and specialization (see issue #36262) +// For this reason SliceConcatExt<T> is not specialized for T: Copy and SliceConcatExt<str> is the +// only user of this function. It is left in place for the time when that is fixed. +// +// the bounds for String-join are S: Borrow<str> and for Vec-join Borrow<[T]> +// [T] and str both impl AsRef<[T]> for some T +// => s.borrow().as_ref() and we always have slices +fn join_generic_copy<B, T, S>(slice: &[S], sep: &[T]) -> Vec<T> +where + T: Copy, + B: AsRef<[T]> + ?Sized, + S: Borrow<B>, +{ + let sep_len = sep.len(); + let mut iter = slice.iter(); + + // the first slice is the only one without a separator preceding it + let first = match iter.next() { + Some(first) => first, + None => return vec![], + }; + + // compute the exact total length of the joined Vec + // if the `len` calculation overflows, we'll panic + // we would have run out of memory anyway and the rest of the function requires + // the entire Vec pre-allocated for safety + let len = sep_len.checked_mul(iter.len()).and_then(|n| { + slice.iter() + .map(|s| s.borrow().as_ref().len()) + .try_fold(n, usize::checked_add) + }).expect("attempt to join into collection with len > usize::MAX"); + + // crucial for safety + let mut result = Vec::with_capacity(len); + assert!(result.capacity() >= len); + + result.extend_from_slice(first.borrow().as_ref()); + + unsafe { + { + let pos = result.len(); + let target = result.get_unchecked_mut(pos..len); + + // copy separator and slices over without bounds checks + // generate loops with hardcoded offsets for small separators + // massive improvements possible (~ x2) + spezialize_for_lengths!(sep, target, iter; 0, 1, 2, 3, 4); + } + result.set_len(len); } + result } -#[unstable(feature = "fused", issue = "35602")] -impl<'a> FusedIterator for EncodeUtf16<'a> {} - #[stable(feature = "rust1", since = "1.0.0")] impl Borrow<str> for String { #[inline] @@ -197,1606 +215,9 @@ impl ToOwned for str { } /// Methods for string slices. -#[lang = "str"] +#[lang = "str_alloc"] #[cfg(not(test))] impl str { - /// Returns the length of `self`. - /// - /// This length is in bytes, not [`char`]s or graphemes. In other words, - /// it may not be what a human considers the length of the string. - /// - /// [`char`]: primitive.char.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let len = "foo".len(); - /// assert_eq!(3, len); - /// - /// let len = "ƒoo".len(); // fancy f! - /// assert_eq!(4, len); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn len(&self) -> usize { - core_str::StrExt::len(self) - } - - /// Returns `true` if `self` has a length of zero bytes. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = ""; - /// assert!(s.is_empty()); - /// - /// let s = "not empty"; - /// assert!(!s.is_empty()); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn is_empty(&self) -> bool { - core_str::StrExt::is_empty(self) - } - - /// Checks that `index`-th byte lies at the start and/or end of a - /// UTF-8 code point sequence. - /// - /// The start and end of the string (when `index == self.len()`) are - /// considered to be - /// boundaries. - /// - /// Returns `false` if `index` is greater than `self.len()`. - /// - /// # Examples - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// assert!(s.is_char_boundary(0)); - /// // start of `老` - /// assert!(s.is_char_boundary(6)); - /// assert!(s.is_char_boundary(s.len())); - /// - /// // second byte of `ö` - /// assert!(!s.is_char_boundary(2)); - /// - /// // third byte of `老` - /// assert!(!s.is_char_boundary(8)); - /// ``` - #[stable(feature = "is_char_boundary", since = "1.9.0")] - #[inline] - pub fn is_char_boundary(&self, index: usize) -> bool { - core_str::StrExt::is_char_boundary(self, index) - } - - /// Converts a string slice to a byte slice. To convert the byte slice back - /// into a string slice, use the [`str::from_utf8`] function. - /// - /// [`str::from_utf8`]: ./str/fn.from_utf8.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let bytes = "bors".as_bytes(); - /// assert_eq!(b"bors", bytes); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline(always)] - pub fn as_bytes(&self) -> &[u8] { - core_str::StrExt::as_bytes(self) - } - - /// Converts a mutable string slice to a mutable byte slice. To convert the - /// mutable byte slice back into a mutable string slice, use the - /// [`str::from_utf8_mut`] function. - /// - /// [`str::from_utf8_mut`]: ./str/fn.from_utf8_mut.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let mut s = String::from("Hello"); - /// let bytes = unsafe { s.as_bytes_mut() }; - /// - /// assert_eq!(b"Hello", bytes); - /// ``` - /// - /// Mutability: - /// - /// ``` - /// let mut s = String::from("🗻∈🌏"); - /// - /// unsafe { - /// let bytes = s.as_bytes_mut(); - /// - /// bytes[0] = 0xF0; - /// bytes[1] = 0x9F; - /// bytes[2] = 0x8D; - /// bytes[3] = 0x94; - /// } - /// - /// assert_eq!("🍔∈🌏", s); - /// ``` - #[stable(feature = "str_mut_extras", since = "1.20.0")] - #[inline(always)] - pub unsafe fn as_bytes_mut(&mut self) -> &mut [u8] { - core_str::StrExt::as_bytes_mut(self) - } - - /// Converts a string slice to a raw pointer. - /// - /// As string slices are a slice of bytes, the raw pointer points to a - /// [`u8`]. This pointer will be pointing to the first byte of the string - /// slice. - /// - /// [`u8`]: primitive.u8.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = "Hello"; - /// let ptr = s.as_ptr(); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn as_ptr(&self) -> *const u8 { - core_str::StrExt::as_ptr(self) - } - - /// Returns a subslice of `str`. - /// - /// This is the non-panicking alternative to indexing the `str`. Returns - /// [`None`] whenever equivalent indexing operation would panic. - /// - /// [`None`]: option/enum.Option.html#variant.None - /// - /// # Examples - /// - /// ``` - /// let v = String::from("🗻∈🌏"); - /// - /// assert_eq!(Some("🗻"), v.get(0..4)); - /// - /// // indices not on UTF-8 sequence boundaries - /// assert!(v.get(1..).is_none()); - /// assert!(v.get(..8).is_none()); - /// - /// // out of bounds - /// assert!(v.get(..42).is_none()); - /// ``` - #[stable(feature = "str_checked_slicing", since = "1.20.0")] - #[inline] - pub fn get<I: SliceIndex<str>>(&self, i: I) -> Option<&I::Output> { - core_str::StrExt::get(self, i) - } - - /// Returns a mutable subslice of `str`. - /// - /// This is the non-panicking alternative to indexing the `str`. Returns - /// [`None`] whenever equivalent indexing operation would panic. - /// - /// [`None`]: option/enum.Option.html#variant.None - /// - /// # Examples - /// - /// ``` - /// let mut v = String::from("hello"); - /// // correct length - /// assert!(v.get_mut(0..5).is_some()); - /// // out of bounds - /// assert!(v.get_mut(..42).is_none()); - /// assert_eq!(Some("he"), v.get_mut(0..2).map(|v| &*v)); - /// - /// assert_eq!("hello", v); - /// { - /// let s = v.get_mut(0..2); - /// let s = s.map(|s| { - /// s.make_ascii_uppercase(); - /// &*s - /// }); - /// assert_eq!(Some("HE"), s); - /// } - /// assert_eq!("HEllo", v); - /// ``` - #[stable(feature = "str_checked_slicing", since = "1.20.0")] - #[inline] - pub fn get_mut<I: SliceIndex<str>>(&mut self, i: I) -> Option<&mut I::Output> { - core_str::StrExt::get_mut(self, i) - } - - /// Returns a unchecked subslice of `str`. - /// - /// This is the unchecked alternative to indexing the `str`. - /// - /// # Safety - /// - /// Callers of this function are responsible that these preconditions are - /// satisfied: - /// - /// * The starting index must come before the ending index; - /// * Indexes must be within bounds of the original slice; - /// * Indexes must lie on UTF-8 sequence boundaries. - /// - /// Failing that, the returned string slice may reference invalid memory or - /// violate the invariants communicated by the `str` type. - /// - /// # Examples - /// - /// ``` - /// let v = "🗻∈🌏"; - /// unsafe { - /// assert_eq!("🗻", v.get_unchecked(0..4)); - /// assert_eq!("∈", v.get_unchecked(4..7)); - /// assert_eq!("🌏", v.get_unchecked(7..11)); - /// } - /// ``` - #[stable(feature = "str_checked_slicing", since = "1.20.0")] - #[inline] - pub unsafe fn get_unchecked<I: SliceIndex<str>>(&self, i: I) -> &I::Output { - core_str::StrExt::get_unchecked(self, i) - } - - /// Returns a mutable, unchecked subslice of `str`. - /// - /// This is the unchecked alternative to indexing the `str`. - /// - /// # Safety - /// - /// Callers of this function are responsible that these preconditions are - /// satisfied: - /// - /// * The starting index must come before the ending index; - /// * Indexes must be within bounds of the original slice; - /// * Indexes must lie on UTF-8 sequence boundaries. - /// - /// Failing that, the returned string slice may reference invalid memory or - /// violate the invariants communicated by the `str` type. - /// - /// # Examples - /// - /// ``` - /// let mut v = String::from("🗻∈🌏"); - /// unsafe { - /// assert_eq!("🗻", v.get_unchecked_mut(0..4)); - /// assert_eq!("∈", v.get_unchecked_mut(4..7)); - /// assert_eq!("🌏", v.get_unchecked_mut(7..11)); - /// } - /// ``` - #[stable(feature = "str_checked_slicing", since = "1.20.0")] - #[inline] - pub unsafe fn get_unchecked_mut<I: SliceIndex<str>>(&mut self, i: I) -> &mut I::Output { - core_str::StrExt::get_unchecked_mut(self, i) - } - - /// Creates a string slice from another string slice, bypassing safety - /// checks. - /// - /// This is generally not recommended, use with caution! For a safe - /// alternative see [`str`] and [`Index`]. - /// - /// [`str`]: primitive.str.html - /// [`Index`]: ops/trait.Index.html - /// - /// This new slice goes from `begin` to `end`, including `begin` but - /// excluding `end`. - /// - /// To get a mutable string slice instead, see the - /// [`slice_mut_unchecked`] method. - /// - /// [`slice_mut_unchecked`]: #method.slice_mut_unchecked - /// - /// # Safety - /// - /// Callers of this function are responsible that three preconditions are - /// satisfied: - /// - /// * `begin` must come before `end`. - /// * `begin` and `end` must be byte positions within the string slice. - /// * `begin` and `end` must lie on UTF-8 sequence boundaries. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// - /// unsafe { - /// assert_eq!("Löwe 老虎 Léopard", s.slice_unchecked(0, 21)); - /// } - /// - /// let s = "Hello, world!"; - /// - /// unsafe { - /// assert_eq!("world", s.slice_unchecked(7, 12)); - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str { - core_str::StrExt::slice_unchecked(self, begin, end) - } - - /// Creates a string slice from another string slice, bypassing safety - /// checks. - /// This is generally not recommended, use with caution! For a safe - /// alternative see [`str`] and [`IndexMut`]. - /// - /// [`str`]: primitive.str.html - /// [`IndexMut`]: ops/trait.IndexMut.html - /// - /// This new slice goes from `begin` to `end`, including `begin` but - /// excluding `end`. - /// - /// To get an immutable string slice instead, see the - /// [`slice_unchecked`] method. - /// - /// [`slice_unchecked`]: #method.slice_unchecked - /// - /// # Safety - /// - /// Callers of this function are responsible that three preconditions are - /// satisfied: - /// - /// * `begin` must come before `end`. - /// * `begin` and `end` must be byte positions within the string slice. - /// * `begin` and `end` must lie on UTF-8 sequence boundaries. - #[stable(feature = "str_slice_mut", since = "1.5.0")] - #[inline] - pub unsafe fn slice_mut_unchecked(&mut self, begin: usize, end: usize) -> &mut str { - core_str::StrExt::slice_mut_unchecked(self, begin, end) - } - - /// Divide one string slice into two at an index. - /// - /// The argument, `mid`, should be a byte offset from the start of the - /// string. It must also be on the boundary of a UTF-8 code point. - /// - /// The two slices returned go from the start of the string slice to `mid`, - /// and from `mid` to the end of the string slice. - /// - /// To get mutable string slices instead, see the [`split_at_mut`] - /// method. - /// - /// [`split_at_mut`]: #method.split_at_mut - /// - /// # Panics - /// - /// Panics if `mid` is not on a UTF-8 code point boundary, or if it is - /// beyond the last code point of the string slice. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = "Per Martin-Löf"; - /// - /// let (first, last) = s.split_at(3); - /// - /// assert_eq!("Per", first); - /// assert_eq!(" Martin-Löf", last); - /// ``` - #[inline] - #[stable(feature = "str_split_at", since = "1.4.0")] - pub fn split_at(&self, mid: usize) -> (&str, &str) { - core_str::StrExt::split_at(self, mid) - } - - /// Divide one mutable string slice into two at an index. - /// - /// The argument, `mid`, should be a byte offset from the start of the - /// string. It must also be on the boundary of a UTF-8 code point. - /// - /// The two slices returned go from the start of the string slice to `mid`, - /// and from `mid` to the end of the string slice. - /// - /// To get immutable string slices instead, see the [`split_at`] method. - /// - /// [`split_at`]: #method.split_at - /// - /// # Panics - /// - /// Panics if `mid` is not on a UTF-8 code point boundary, or if it is - /// beyond the last code point of the string slice. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let mut s = "Per Martin-Löf".to_string(); - /// { - /// let (first, last) = s.split_at_mut(3); - /// first.make_ascii_uppercase(); - /// assert_eq!("PER", first); - /// assert_eq!(" Martin-Löf", last); - /// } - /// assert_eq!("PER Martin-Löf", s); - /// ``` - #[inline] - #[stable(feature = "str_split_at", since = "1.4.0")] - pub fn split_at_mut(&mut self, mid: usize) -> (&mut str, &mut str) { - core_str::StrExt::split_at_mut(self, mid) - } - - /// Returns an iterator over the [`char`]s of a string slice. - /// - /// As a string slice consists of valid UTF-8, we can iterate through a - /// string slice by [`char`]. This method returns such an iterator. - /// - /// It's important to remember that [`char`] represents a Unicode Scalar - /// Value, and may not match your idea of what a 'character' is. Iteration - /// over grapheme clusters may be what you actually want. - /// - /// [`char`]: primitive.char.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let word = "goodbye"; - /// - /// let count = word.chars().count(); - /// assert_eq!(7, count); - /// - /// let mut chars = word.chars(); - /// - /// assert_eq!(Some('g'), chars.next()); - /// assert_eq!(Some('o'), chars.next()); - /// assert_eq!(Some('o'), chars.next()); - /// assert_eq!(Some('d'), chars.next()); - /// assert_eq!(Some('b'), chars.next()); - /// assert_eq!(Some('y'), chars.next()); - /// assert_eq!(Some('e'), chars.next()); - /// - /// assert_eq!(None, chars.next()); - /// ``` - /// - /// Remember, [`char`]s may not match your human intuition about characters: - /// - /// ``` - /// let y = "y̆"; - /// - /// let mut chars = y.chars(); - /// - /// assert_eq!(Some('y'), chars.next()); // not 'y̆' - /// assert_eq!(Some('\u{0306}'), chars.next()); - /// - /// assert_eq!(None, chars.next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn chars(&self) -> Chars { - core_str::StrExt::chars(self) - } - /// Returns an iterator over the [`char`]s of a string slice, and their - /// positions. - /// - /// As a string slice consists of valid UTF-8, we can iterate through a - /// string slice by [`char`]. This method returns an iterator of both - /// these [`char`]s, as well as their byte positions. - /// - /// The iterator yields tuples. The position is first, the [`char`] is - /// second. - /// - /// [`char`]: primitive.char.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let word = "goodbye"; - /// - /// let count = word.char_indices().count(); - /// assert_eq!(7, count); - /// - /// let mut char_indices = word.char_indices(); - /// - /// assert_eq!(Some((0, 'g')), char_indices.next()); - /// assert_eq!(Some((1, 'o')), char_indices.next()); - /// assert_eq!(Some((2, 'o')), char_indices.next()); - /// assert_eq!(Some((3, 'd')), char_indices.next()); - /// assert_eq!(Some((4, 'b')), char_indices.next()); - /// assert_eq!(Some((5, 'y')), char_indices.next()); - /// assert_eq!(Some((6, 'e')), char_indices.next()); - /// - /// assert_eq!(None, char_indices.next()); - /// ``` - /// - /// Remember, [`char`]s may not match your human intuition about characters: - /// - /// ``` - /// let yes = "y̆es"; - /// - /// let mut char_indices = yes.char_indices(); - /// - /// assert_eq!(Some((0, 'y')), char_indices.next()); // not (0, 'y̆') - /// assert_eq!(Some((1, '\u{0306}')), char_indices.next()); - /// - /// // note the 3 here - the last character took up two bytes - /// assert_eq!(Some((3, 'e')), char_indices.next()); - /// assert_eq!(Some((4, 's')), char_indices.next()); - /// - /// assert_eq!(None, char_indices.next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn char_indices(&self) -> CharIndices { - core_str::StrExt::char_indices(self) - } - - /// An iterator over the bytes of a string slice. - /// - /// As a string slice consists of a sequence of bytes, we can iterate - /// through a string slice by byte. This method returns such an iterator. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let mut bytes = "bors".bytes(); - /// - /// assert_eq!(Some(b'b'), bytes.next()); - /// assert_eq!(Some(b'o'), bytes.next()); - /// assert_eq!(Some(b'r'), bytes.next()); - /// assert_eq!(Some(b's'), bytes.next()); - /// - /// assert_eq!(None, bytes.next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn bytes(&self) -> Bytes { - core_str::StrExt::bytes(self) - } - - /// Split a string slice by whitespace. - /// - /// The iterator returned will return string slices that are sub-slices of - /// the original string slice, separated by any amount of whitespace. - /// - /// 'Whitespace' is defined according to the terms of the Unicode Derived - /// Core Property `White_Space`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let mut iter = "A few words".split_whitespace(); - /// - /// assert_eq!(Some("A"), iter.next()); - /// assert_eq!(Some("few"), iter.next()); - /// assert_eq!(Some("words"), iter.next()); - /// - /// assert_eq!(None, iter.next()); - /// ``` - /// - /// All kinds of whitespace are considered: - /// - /// ``` - /// let mut iter = " Mary had\ta\u{2009}little \n\t lamb".split_whitespace(); - /// assert_eq!(Some("Mary"), iter.next()); - /// assert_eq!(Some("had"), iter.next()); - /// assert_eq!(Some("a"), iter.next()); - /// assert_eq!(Some("little"), iter.next()); - /// assert_eq!(Some("lamb"), iter.next()); - /// - /// assert_eq!(None, iter.next()); - /// ``` - #[stable(feature = "split_whitespace", since = "1.1.0")] - #[inline] - pub fn split_whitespace(&self) -> SplitWhitespace { - UnicodeStr::split_whitespace(self) - } - - /// An iterator over the lines of a string, as string slices. - /// - /// Lines are ended with either a newline (`\n`) or a carriage return with - /// a line feed (`\r\n`). - /// - /// The final line ending is optional. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let text = "foo\r\nbar\n\nbaz\n"; - /// let mut lines = text.lines(); - /// - /// assert_eq!(Some("foo"), lines.next()); - /// assert_eq!(Some("bar"), lines.next()); - /// assert_eq!(Some(""), lines.next()); - /// assert_eq!(Some("baz"), lines.next()); - /// - /// assert_eq!(None, lines.next()); - /// ``` - /// - /// The final line ending isn't required: - /// - /// ``` - /// let text = "foo\nbar\n\r\nbaz"; - /// let mut lines = text.lines(); - /// - /// assert_eq!(Some("foo"), lines.next()); - /// assert_eq!(Some("bar"), lines.next()); - /// assert_eq!(Some(""), lines.next()); - /// assert_eq!(Some("baz"), lines.next()); - /// - /// assert_eq!(None, lines.next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn lines(&self) -> Lines { - core_str::StrExt::lines(self) - } - - /// An iterator over the lines of a string. - #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_deprecated(since = "1.4.0", reason = "use lines() instead now")] - #[inline] - #[allow(deprecated)] - pub fn lines_any(&self) -> LinesAny { - core_str::StrExt::lines_any(self) - } - - /// Returns an iterator of `u16` over the string encoded as UTF-16. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let text = "Zażółć gęślą jaźń"; - /// - /// let utf8_len = text.len(); - /// let utf16_len = text.encode_utf16().count(); - /// - /// assert!(utf16_len <= utf8_len); - /// ``` - #[stable(feature = "encode_utf16", since = "1.8.0")] - pub fn encode_utf16(&self) -> EncodeUtf16 { - EncodeUtf16 { encoder: Utf16Encoder::new(self[..].chars()) } - } - - /// Returns `true` if the given pattern matches a sub-slice of - /// this string slice. - /// - /// Returns `false` if it does not. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let bananas = "bananas"; - /// - /// assert!(bananas.contains("nana")); - /// assert!(!bananas.contains("apples")); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn contains<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool { - core_str::StrExt::contains(self, pat) - } - - /// Returns `true` if the given pattern matches a prefix of this - /// string slice. - /// - /// Returns `false` if it does not. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let bananas = "bananas"; - /// - /// assert!(bananas.starts_with("bana")); - /// assert!(!bananas.starts_with("nana")); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn starts_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool { - core_str::StrExt::starts_with(self, pat) - } - - /// Returns `true` if the given pattern matches a suffix of this - /// string slice. - /// - /// Returns `false` if it does not. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let bananas = "bananas"; - /// - /// assert!(bananas.ends_with("anas")); - /// assert!(!bananas.ends_with("nana")); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn ends_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::ends_with(self, pat) - } - - /// Returns the byte index of the first character of this string slice that - /// matches the pattern. - /// - /// Returns [`None`] if the pattern doesn't match. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines if - /// a character matches. - /// - /// [`char`]: primitive.char.html - /// [`None`]: option/enum.Option.html#variant.None - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// - /// assert_eq!(s.find('L'), Some(0)); - /// assert_eq!(s.find('é'), Some(14)); - /// assert_eq!(s.find("Léopard"), Some(13)); - /// ``` - /// - /// More complex patterns using point-free style and closures: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// - /// assert_eq!(s.find(char::is_whitespace), Some(5)); - /// assert_eq!(s.find(char::is_lowercase), Some(1)); - /// assert_eq!(s.find(|c: char| c.is_whitespace() || c.is_lowercase()), Some(1)); - /// assert_eq!(s.find(|c: char| (c < 'o') && (c > 'a')), Some(4)); - /// ``` - /// - /// Not finding the pattern: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// let x: &[_] = &['1', '2']; - /// - /// assert_eq!(s.find(x), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn find<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option<usize> { - core_str::StrExt::find(self, pat) - } - - /// Returns the byte index of the last character of this string slice that - /// matches the pattern. - /// - /// Returns [`None`] if the pattern doesn't match. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines if - /// a character matches. - /// - /// [`char`]: primitive.char.html - /// [`None`]: option/enum.Option.html#variant.None - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// - /// assert_eq!(s.rfind('L'), Some(13)); - /// assert_eq!(s.rfind('é'), Some(14)); - /// ``` - /// - /// More complex patterns with closures: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// - /// assert_eq!(s.rfind(char::is_whitespace), Some(12)); - /// assert_eq!(s.rfind(char::is_lowercase), Some(20)); - /// ``` - /// - /// Not finding the pattern: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// let x: &[_] = &['1', '2']; - /// - /// assert_eq!(s.rfind(x), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rfind<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option<usize> - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::rfind(self, pat) - } - - /// An iterator over substrings of this string slice, separated by - /// characters matched by a pattern. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines the - /// split. - /// - /// # Iterator behavior - /// - /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern - /// allows a reverse search and forward/reverse search yields the same - /// elements. This is true for, eg, [`char`] but not for `&str`. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// - /// If the pattern allows a reverse search but its results might differ - /// from a forward search, the [`rsplit`] method can be used. - /// - /// [`char`]: primitive.char.html - /// [`rsplit`]: #method.rsplit - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// let v: Vec<&str> = "Mary had a little lamb".split(' ').collect(); - /// assert_eq!(v, ["Mary", "had", "a", "little", "lamb"]); - /// - /// let v: Vec<&str> = "".split('X').collect(); - /// assert_eq!(v, [""]); - /// - /// let v: Vec<&str> = "lionXXtigerXleopard".split('X').collect(); - /// assert_eq!(v, ["lion", "", "tiger", "leopard"]); - /// - /// let v: Vec<&str> = "lion::tiger::leopard".split("::").collect(); - /// assert_eq!(v, ["lion", "tiger", "leopard"]); - /// - /// let v: Vec<&str> = "abc1def2ghi".split(char::is_numeric).collect(); - /// assert_eq!(v, ["abc", "def", "ghi"]); - /// - /// let v: Vec<&str> = "lionXtigerXleopard".split(char::is_uppercase).collect(); - /// assert_eq!(v, ["lion", "tiger", "leopard"]); - /// ``` - /// - /// A more complex pattern, using a closure: - /// - /// ``` - /// let v: Vec<&str> = "abc1defXghi".split(|c| c == '1' || c == 'X').collect(); - /// assert_eq!(v, ["abc", "def", "ghi"]); - /// ``` - /// - /// If a string contains multiple contiguous separators, you will end up - /// with empty strings in the output: - /// - /// ``` - /// let x = "||||a||b|c".to_string(); - /// let d: Vec<_> = x.split('|').collect(); - /// - /// assert_eq!(d, &["", "", "", "", "a", "", "b", "c"]); - /// ``` - /// - /// Contiguous separators are separated by the empty string. - /// - /// ``` - /// let x = "(///)".to_string(); - /// let d: Vec<_> = x.split('/').collect(); - /// - /// assert_eq!(d, &["(", "", "", ")"]); - /// ``` - /// - /// Separators at the start or end of a string are neighbored - /// by empty strings. - /// - /// ``` - /// let d: Vec<_> = "010".split("0").collect(); - /// assert_eq!(d, &["", "1", ""]); - /// ``` - /// - /// When the empty string is used as a separator, it separates - /// every character in the string, along with the beginning - /// and end of the string. - /// - /// ``` - /// let f: Vec<_> = "rust".split("").collect(); - /// assert_eq!(f, &["", "r", "u", "s", "t", ""]); - /// ``` - /// - /// Contiguous separators can lead to possibly surprising behavior - /// when whitespace is used as the separator. This code is correct: - /// - /// ``` - /// let x = " a b c".to_string(); - /// let d: Vec<_> = x.split(' ').collect(); - /// - /// assert_eq!(d, &["", "", "", "", "a", "", "b", "c"]); - /// ``` - /// - /// It does _not_ give you: - /// - /// ```,ignore - /// assert_eq!(d, &["a", "b", "c"]); - /// ``` - /// - /// Use [`split_whitespace`] for this behavior. - /// - /// [`split_whitespace`]: #method.split_whitespace - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn split<'a, P: Pattern<'a>>(&'a self, pat: P) -> Split<'a, P> { - core_str::StrExt::split(self, pat) - } - - /// An iterator over substrings of the given string slice, separated by - /// characters matched by a pattern and yielded in reverse order. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines the - /// split. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator requires that the pattern supports a reverse - /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse - /// search yields the same elements. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// - /// For iterating from the front, the [`split`] method can be used. - /// - /// [`split`]: #method.split - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// let v: Vec<&str> = "Mary had a little lamb".rsplit(' ').collect(); - /// assert_eq!(v, ["lamb", "little", "a", "had", "Mary"]); - /// - /// let v: Vec<&str> = "".rsplit('X').collect(); - /// assert_eq!(v, [""]); - /// - /// let v: Vec<&str> = "lionXXtigerXleopard".rsplit('X').collect(); - /// assert_eq!(v, ["leopard", "tiger", "", "lion"]); - /// - /// let v: Vec<&str> = "lion::tiger::leopard".rsplit("::").collect(); - /// assert_eq!(v, ["leopard", "tiger", "lion"]); - /// ``` - /// - /// A more complex pattern, using a closure: - /// - /// ``` - /// let v: Vec<&str> = "abc1defXghi".rsplit(|c| c == '1' || c == 'X').collect(); - /// assert_eq!(v, ["ghi", "def", "abc"]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rsplit<'a, P: Pattern<'a>>(&'a self, pat: P) -> RSplit<'a, P> - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::rsplit(self, pat) - } - - /// An iterator over substrings of the given string slice, separated by - /// characters matched by a pattern. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines the - /// split. - /// - /// Equivalent to [`split`], except that the trailing substring - /// is skipped if empty. - /// - /// [`split`]: #method.split - /// - /// This method can be used for string data that is _terminated_, - /// rather than _separated_ by a pattern. - /// - /// # Iterator behavior - /// - /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern - /// allows a reverse search and forward/reverse search yields the same - /// elements. This is true for, eg, [`char`] but not for `&str`. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// [`char`]: primitive.char.html - /// - /// If the pattern allows a reverse search but its results might differ - /// from a forward search, the [`rsplit_terminator`] method can be used. - /// - /// [`rsplit_terminator`]: #method.rsplit_terminator - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let v: Vec<&str> = "A.B.".split_terminator('.').collect(); - /// assert_eq!(v, ["A", "B"]); - /// - /// let v: Vec<&str> = "A..B..".split_terminator(".").collect(); - /// assert_eq!(v, ["A", "", "B", ""]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn split_terminator<'a, P: Pattern<'a>>(&'a self, pat: P) -> SplitTerminator<'a, P> { - core_str::StrExt::split_terminator(self, pat) - } - - /// An iterator over substrings of `self`, separated by characters - /// matched by a pattern and yielded in reverse order. - /// - /// The pattern can be a simple `&str`, [`char`], or a closure that - /// determines the split. - /// Additional libraries might provide more complex patterns like - /// regular expressions. - /// - /// [`char`]: primitive.char.html - /// - /// Equivalent to [`split`], except that the trailing substring is - /// skipped if empty. - /// - /// [`split`]: #method.split - /// - /// This method can be used for string data that is _terminated_, - /// rather than _separated_ by a pattern. - /// - /// # Iterator behavior - /// - /// The returned iterator requires that the pattern supports a - /// reverse search, and it will be double ended if a forward/reverse - /// search yields the same elements. - /// - /// For iterating from the front, the [`split_terminator`] method can be - /// used. - /// - /// [`split_terminator`]: #method.split_terminator - /// - /// # Examples - /// - /// ``` - /// let v: Vec<&str> = "A.B.".rsplit_terminator('.').collect(); - /// assert_eq!(v, ["B", "A"]); - /// - /// let v: Vec<&str> = "A..B..".rsplit_terminator(".").collect(); - /// assert_eq!(v, ["", "B", "", "A"]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rsplit_terminator<'a, P: Pattern<'a>>(&'a self, pat: P) -> RSplitTerminator<'a, P> - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::rsplit_terminator(self, pat) - } - - /// An iterator over substrings of the given string slice, separated by a - /// pattern, restricted to returning at most `n` items. - /// - /// If `n` substrings are returned, the last substring (the `n`th substring) - /// will contain the remainder of the string. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines the - /// split. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator will not be double ended, because it is - /// not efficient to support. - /// - /// If the pattern allows a reverse search, the [`rsplitn`] method can be - /// used. - /// - /// [`rsplitn`]: #method.rsplitn - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// let v: Vec<&str> = "Mary had a little lambda".splitn(3, ' ').collect(); - /// assert_eq!(v, ["Mary", "had", "a little lambda"]); - /// - /// let v: Vec<&str> = "lionXXtigerXleopard".splitn(3, "X").collect(); - /// assert_eq!(v, ["lion", "", "tigerXleopard"]); - /// - /// let v: Vec<&str> = "abcXdef".splitn(1, 'X').collect(); - /// assert_eq!(v, ["abcXdef"]); - /// - /// let v: Vec<&str> = "".splitn(1, 'X').collect(); - /// assert_eq!(v, [""]); - /// ``` - /// - /// A more complex pattern, using a closure: - /// - /// ``` - /// let v: Vec<&str> = "abc1defXghi".splitn(2, |c| c == '1' || c == 'X').collect(); - /// assert_eq!(v, ["abc", "defXghi"]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn splitn<'a, P: Pattern<'a>>(&'a self, n: usize, pat: P) -> SplitN<'a, P> { - core_str::StrExt::splitn(self, n, pat) - } - - /// An iterator over substrings of this string slice, separated by a - /// pattern, starting from the end of the string, restricted to returning - /// at most `n` items. - /// - /// If `n` substrings are returned, the last substring (the `n`th substring) - /// will contain the remainder of the string. - /// - /// The pattern can be a `&str`, [`char`], or a closure that - /// determines the split. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator will not be double ended, because it is not - /// efficient to support. - /// - /// For splitting from the front, the [`splitn`] method can be used. - /// - /// [`splitn`]: #method.splitn - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// let v: Vec<&str> = "Mary had a little lamb".rsplitn(3, ' ').collect(); - /// assert_eq!(v, ["lamb", "little", "Mary had a"]); - /// - /// let v: Vec<&str> = "lionXXtigerXleopard".rsplitn(3, 'X').collect(); - /// assert_eq!(v, ["leopard", "tiger", "lionX"]); - /// - /// let v: Vec<&str> = "lion::tiger::leopard".rsplitn(2, "::").collect(); - /// assert_eq!(v, ["leopard", "lion::tiger"]); - /// ``` - /// - /// A more complex pattern, using a closure: - /// - /// ``` - /// let v: Vec<&str> = "abc1defXghi".rsplitn(2, |c| c == '1' || c == 'X').collect(); - /// assert_eq!(v, ["ghi", "abc1def"]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rsplitn<'a, P: Pattern<'a>>(&'a self, n: usize, pat: P) -> RSplitN<'a, P> - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::rsplitn(self, n, pat) - } - - /// An iterator over the disjoint matches of a pattern within the given string - /// slice. - /// - /// The pattern can be a `&str`, [`char`], or a closure that - /// determines if a character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern - /// allows a reverse search and forward/reverse search yields the same - /// elements. This is true for, eg, [`char`] but not for `&str`. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// [`char`]: primitive.char.html - /// - /// If the pattern allows a reverse search but its results might differ - /// from a forward search, the [`rmatches`] method can be used. - /// - /// [`rmatches`]: #method.rmatches - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let v: Vec<&str> = "abcXXXabcYYYabc".matches("abc").collect(); - /// assert_eq!(v, ["abc", "abc", "abc"]); - /// - /// let v: Vec<&str> = "1abc2abc3".matches(char::is_numeric).collect(); - /// assert_eq!(v, ["1", "2", "3"]); - /// ``` - #[stable(feature = "str_matches", since = "1.2.0")] - #[inline] - pub fn matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> Matches<'a, P> { - core_str::StrExt::matches(self, pat) - } - - /// An iterator over the disjoint matches of a pattern within this string slice, - /// yielded in reverse order. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines if - /// a character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator requires that the pattern supports a reverse - /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse - /// search yields the same elements. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// - /// For iterating from the front, the [`matches`] method can be used. - /// - /// [`matches`]: #method.matches - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let v: Vec<&str> = "abcXXXabcYYYabc".rmatches("abc").collect(); - /// assert_eq!(v, ["abc", "abc", "abc"]); - /// - /// let v: Vec<&str> = "1abc2abc3".rmatches(char::is_numeric).collect(); - /// assert_eq!(v, ["3", "2", "1"]); - /// ``` - #[stable(feature = "str_matches", since = "1.2.0")] - #[inline] - pub fn rmatches<'a, P: Pattern<'a>>(&'a self, pat: P) -> RMatches<'a, P> - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::rmatches(self, pat) - } - - /// An iterator over the disjoint matches of a pattern within this string - /// slice as well as the index that the match starts at. - /// - /// For matches of `pat` within `self` that overlap, only the indices - /// corresponding to the first match are returned. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines - /// if a character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern - /// allows a reverse search and forward/reverse search yields the same - /// elements. This is true for, eg, [`char`] but not for `&str`. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// - /// If the pattern allows a reverse search but its results might differ - /// from a forward search, the [`rmatch_indices`] method can be used. - /// - /// [`rmatch_indices`]: #method.rmatch_indices - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let v: Vec<_> = "abcXXXabcYYYabc".match_indices("abc").collect(); - /// assert_eq!(v, [(0, "abc"), (6, "abc"), (12, "abc")]); - /// - /// let v: Vec<_> = "1abcabc2".match_indices("abc").collect(); - /// assert_eq!(v, [(1, "abc"), (4, "abc")]); - /// - /// let v: Vec<_> = "ababa".match_indices("aba").collect(); - /// assert_eq!(v, [(0, "aba")]); // only the first `aba` - /// ``` - #[stable(feature = "str_match_indices", since = "1.5.0")] - #[inline] - pub fn match_indices<'a, P: Pattern<'a>>(&'a self, pat: P) -> MatchIndices<'a, P> { - core_str::StrExt::match_indices(self, pat) - } - - /// An iterator over the disjoint matches of a pattern within `self`, - /// yielded in reverse order along with the index of the match. - /// - /// For matches of `pat` within `self` that overlap, only the indices - /// corresponding to the last match are returned. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines if a - /// character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator requires that the pattern supports a reverse - /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse - /// search yields the same elements. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// - /// For iterating from the front, the [`match_indices`] method can be used. - /// - /// [`match_indices`]: #method.match_indices - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let v: Vec<_> = "abcXXXabcYYYabc".rmatch_indices("abc").collect(); - /// assert_eq!(v, [(12, "abc"), (6, "abc"), (0, "abc")]); - /// - /// let v: Vec<_> = "1abcabc2".rmatch_indices("abc").collect(); - /// assert_eq!(v, [(4, "abc"), (1, "abc")]); - /// - /// let v: Vec<_> = "ababa".rmatch_indices("aba").collect(); - /// assert_eq!(v, [(2, "aba")]); // only the last `aba` - /// ``` - #[stable(feature = "str_match_indices", since = "1.5.0")] - #[inline] - pub fn rmatch_indices<'a, P: Pattern<'a>>(&'a self, pat: P) -> RMatchIndices<'a, P> - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::rmatch_indices(self, pat) - } - - /// Returns a string slice with leading and trailing whitespace removed. - /// - /// 'Whitespace' is defined according to the terms of the Unicode Derived - /// Core Property `White_Space`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = " Hello\tworld\t"; - /// - /// assert_eq!("Hello\tworld", s.trim()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn trim(&self) -> &str { - UnicodeStr::trim(self) - } - - /// Returns a string slice with leading whitespace removed. - /// - /// 'Whitespace' is defined according to the terms of the Unicode Derived - /// Core Property `White_Space`. - /// - /// # Text directionality - /// - /// A string is a sequence of bytes. 'Left' in this context means the first - /// position of that byte string; for a language like Arabic or Hebrew - /// which are 'right to left' rather than 'left to right', this will be - /// the _right_ side, not the left. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = " Hello\tworld\t"; - /// - /// assert_eq!("Hello\tworld\t", s.trim_left()); - /// ``` - /// - /// Directionality: - /// - /// ``` - /// let s = " English"; - /// assert!(Some('E') == s.trim_left().chars().next()); - /// - /// let s = " עברית"; - /// assert!(Some('ע') == s.trim_left().chars().next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn trim_left(&self) -> &str { - UnicodeStr::trim_left(self) - } - - /// Returns a string slice with trailing whitespace removed. - /// - /// 'Whitespace' is defined according to the terms of the Unicode Derived - /// Core Property `White_Space`. - /// - /// # Text directionality - /// - /// A string is a sequence of bytes. 'Right' in this context means the last - /// position of that byte string; for a language like Arabic or Hebrew - /// which are 'right to left' rather than 'left to right', this will be - /// the _left_ side, not the right. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = " Hello\tworld\t"; - /// - /// assert_eq!(" Hello\tworld", s.trim_right()); - /// ``` - /// - /// Directionality: - /// - /// ``` - /// let s = "English "; - /// assert!(Some('h') == s.trim_right().chars().rev().next()); - /// - /// let s = "עברית "; - /// assert!(Some('ת') == s.trim_right().chars().rev().next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn trim_right(&self) -> &str { - UnicodeStr::trim_right(self) - } - - /// Returns a string slice with all prefixes and suffixes that match a - /// pattern repeatedly removed. - /// - /// The pattern can be a [`char`] or a closure that determines if a - /// character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// assert_eq!("11foo1bar11".trim_matches('1'), "foo1bar"); - /// assert_eq!("123foo1bar123".trim_matches(char::is_numeric), "foo1bar"); - /// - /// let x: &[_] = &['1', '2']; - /// assert_eq!("12foo1bar12".trim_matches(x), "foo1bar"); - /// ``` - /// - /// A more complex pattern, using a closure: - /// - /// ``` - /// assert_eq!("1foo1barXX".trim_matches(|c| c == '1' || c == 'X'), "foo1bar"); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn trim_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str - where P::Searcher: DoubleEndedSearcher<'a> - { - core_str::StrExt::trim_matches(self, pat) - } - - /// Returns a string slice with all prefixes that match a pattern - /// repeatedly removed. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines if - /// a character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Text directionality - /// - /// A string is a sequence of bytes. 'Left' in this context means the first - /// position of that byte string; for a language like Arabic or Hebrew - /// which are 'right to left' rather than 'left to right', this will be - /// the _right_ side, not the left. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!("11foo1bar11".trim_left_matches('1'), "foo1bar11"); - /// assert_eq!("123foo1bar123".trim_left_matches(char::is_numeric), "foo1bar123"); - /// - /// let x: &[_] = &['1', '2']; - /// assert_eq!("12foo1bar12".trim_left_matches(x), "foo1bar12"); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn trim_left_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str { - core_str::StrExt::trim_left_matches(self, pat) - } - - /// Returns a string slice with all suffixes that match a pattern - /// repeatedly removed. - /// - /// The pattern can be a `&str`, [`char`], or a closure that - /// determines if a character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Text directionality - /// - /// A string is a sequence of bytes. 'Right' in this context means the last - /// position of that byte string; for a language like Arabic or Hebrew - /// which are 'right to left' rather than 'left to right', this will be - /// the _left_ side, not the right. - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// assert_eq!("11foo1bar11".trim_right_matches('1'), "11foo1bar"); - /// assert_eq!("123foo1bar123".trim_right_matches(char::is_numeric), "123foo1bar"); - /// - /// let x: &[_] = &['1', '2']; - /// assert_eq!("12foo1bar12".trim_right_matches(x), "12foo1bar"); - /// ``` - /// - /// A more complex pattern, using a closure: - /// - /// ``` - /// assert_eq!("1fooX".trim_right_matches(|c| c == '1' || c == 'X'), "1foo"); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn trim_right_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::trim_right_matches(self, pat) - } - - /// Parses this string slice into another type. - /// - /// Because `parse` is so general, it can cause problems with type - /// inference. As such, `parse` is one of the few times you'll see - /// the syntax affectionately known as the 'turbofish': `::<>`. This - /// helps the inference algorithm understand specifically which type - /// you're trying to parse into. - /// - /// `parse` can parse any type that implements the [`FromStr`] trait. - /// - /// [`FromStr`]: str/trait.FromStr.html - /// - /// # Errors - /// - /// Will return [`Err`] if it's not possible to parse this string slice into - /// the desired type. - /// - /// [`Err`]: str/trait.FromStr.html#associatedtype.Err - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// let four: u32 = "4".parse().unwrap(); - /// - /// assert_eq!(4, four); - /// ``` - /// - /// Using the 'turbofish' instead of annotating `four`: - /// - /// ``` - /// let four = "4".parse::<u32>(); - /// - /// assert_eq!(Ok(4), four); - /// ``` - /// - /// Failing to parse: - /// - /// ``` - /// let nope = "j".parse::<u32>(); - /// - /// assert!(nope.is_err()); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn parse<F: FromStr>(&self) -> Result<F, F::Err> { - core_str::StrExt::parse(self) - } - /// Converts a `Box<str>` into a `Box<[u8]>` without copying or allocating. /// /// # Examples @@ -1810,6 +231,7 @@ impl str { /// assert_eq!(*boxed_bytes, *s.as_bytes()); /// ``` #[stable(feature = "str_box_extras", since = "1.20.0")] + #[inline] pub fn into_boxed_bytes(self: Box<str>) -> Box<[u8]> { self.into() } @@ -1838,17 +260,19 @@ impl str { /// let s = "this is old"; /// assert_eq!(s, s.replace("cookie monster", "little lamb")); /// ``` + #[must_use = "this returns the replaced string as a new allocation, \ + without modifying the original"] #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn replace<'a, P: Pattern<'a>>(&'a self, from: P, to: &str) -> String { let mut result = String::new(); let mut last_end = 0; for (start, part) in self.match_indices(from) { - result.push_str(unsafe { self.slice_unchecked(last_end, start) }); + result.push_str(unsafe { self.get_unchecked(last_end..start) }); result.push_str(to); last_end = start + part.len(); } - result.push_str(unsafe { self.slice_unchecked(last_end, self.len()) }); + result.push_str(unsafe { self.get_unchecked(last_end..self.len()) }); result } @@ -1877,17 +301,19 @@ impl str { /// let s = "this is old"; /// assert_eq!(s, s.replacen("cookie monster", "little lamb", 10)); /// ``` + #[must_use = "this returns the replaced string as a new allocation, \ + without modifying the original"] #[stable(feature = "str_replacen", since = "1.16.0")] pub fn replacen<'a, P: Pattern<'a>>(&'a self, pat: P, to: &str, count: usize) -> String { // Hope to reduce the times of re-allocation let mut result = String::with_capacity(32); let mut last_end = 0; for (start, part) in self.match_indices(pat).take(count) { - result.push_str(unsafe { self.slice_unchecked(last_end, start) }); + result.push_str(unsafe { self.get_unchecked(last_end..start) }); result.push_str(to); last_end = start + part.len(); } - result.push_str(unsafe { self.slice_unchecked(last_end, self.len()) }); + result.push_str(unsafe { self.get_unchecked(last_end..self.len()) }); result } @@ -1944,7 +370,18 @@ impl str { // See https://github.com/rust-lang/rust/issues/26035 map_uppercase_sigma(self, i, &mut s) } else { - s.extend(c.to_lowercase()); + match conversions::to_lower(c) { + [a, '\0', _] => s.push(a), + [a, b, '\0'] => { + s.push(a); + s.push(b); + } + [a, b, c] => { + s.push(a); + s.push(b); + s.push(c); + } + } } } return s; @@ -1959,7 +396,7 @@ impl str { } fn case_ignoreable_then_cased<I: Iterator<Item = char>>(iter: I) -> bool { - use std_unicode::derived_property::{Cased, Case_Ignorable}; + use core::unicode::derived_property::{Cased, Case_Ignorable}; match iter.skip_while(|&c| Case_Ignorable(c)).next() { Some(c) => Cased(c), None => false, @@ -1998,18 +435,40 @@ impl str { #[stable(feature = "unicode_case_mapping", since = "1.2.0")] pub fn to_uppercase(&self) -> String { let mut s = String::with_capacity(self.len()); - s.extend(self.chars().flat_map(|c| c.to_uppercase())); + for c in self[..].chars() { + match conversions::to_upper(c) { + [a, '\0', _] => s.push(a), + [a, b, '\0'] => { + s.push(a); + s.push(b); + } + [a, b, c] => { + s.push(a); + s.push(b); + s.push(c); + } + } + } return s; } /// Escapes each char in `s` with [`char::escape_debug`]. /// + /// Note: only extended grapheme codepoints that begin the string will be + /// escaped. + /// /// [`char::escape_debug`]: primitive.char.html#method.escape_debug #[unstable(feature = "str_escape", reason = "return type may change to be an iterator", issue = "27791")] pub fn escape_debug(&self) -> String { - self.chars().flat_map(|c| c.escape_debug()).collect() + let mut string = String::with_capacity(self.len()); + let mut chars = self.chars(); + if let Some(first) = chars.next() { + string.extend(first.escape_debug_ext(true)) + } + string.extend(chars.flat_map(|c| c.escape_debug_ext(false))); + string } /// Escapes each char in `s` with [`char::escape_default`]. @@ -2048,6 +507,7 @@ impl str { /// assert_eq!(boxed_str.into_string(), string); /// ``` #[stable(feature = "box_str", since = "1.4.0")] + #[inline] pub fn into_string(self: Box<str>) -> String { let slice = Box::<[u8]>::from(self); unsafe { String::from_utf8_unchecked(slice.into_vec()) } @@ -2066,29 +526,7 @@ impl str { /// ``` #[stable(feature = "repeat_str", since = "1.16.0")] pub fn repeat(&self, n: usize) -> String { - let mut s = String::with_capacity(self.len() * n); - s.extend((0..n).map(|_| self)); - s - } - - /// Checks if all characters in this string are within the ASCII range. - /// - /// # Examples - /// - /// ``` - /// let ascii = "hello!\n"; - /// let non_ascii = "Grüße, Jürgen ❤"; - /// - /// assert!(ascii.is_ascii()); - /// assert!(!non_ascii.is_ascii()); - /// ``` - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - #[inline] - pub fn is_ascii(&self) -> bool { - // We can treat each byte as character here: all multibyte characters - // start with a byte that is not in the ascii range, so we will stop - // there already. - self.bytes().all(|b| b.is_ascii()) + unsafe { String::from_utf8_unchecked(self.as_bytes().repeat(n)) } } /// Returns a copy of this string where each character is mapped to its @@ -2150,54 +588,6 @@ impl str { // make_ascii_lowercase() preserves the UTF-8 invariant. unsafe { String::from_utf8_unchecked(bytes) } } - - /// Checks that two strings are an ASCII case-insensitive match. - /// - /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`, - /// but without allocating and copying temporaries. - /// - /// # Examples - /// - /// ``` - /// assert!("Ferris".eq_ignore_ascii_case("FERRIS")); - /// assert!("Ferrös".eq_ignore_ascii_case("FERRöS")); - /// assert!(!"Ferrös".eq_ignore_ascii_case("FERRÖS")); - /// ``` - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - #[inline] - pub fn eq_ignore_ascii_case(&self, other: &str) -> bool { - self.as_bytes().eq_ignore_ascii_case(other.as_bytes()) - } - - /// Converts this string to its ASCII upper case equivalent in-place. - /// - /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z', - /// but non-ASCII letters are unchanged. - /// - /// To return a new uppercased value without modifying the existing one, use - /// [`to_ascii_uppercase`]. - /// - /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - pub fn make_ascii_uppercase(&mut self) { - let me = unsafe { self.as_bytes_mut() }; - me.make_ascii_uppercase() - } - - /// Converts this string to its ASCII lower case equivalent in-place. - /// - /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z', - /// but non-ASCII letters are unchanged. - /// - /// To return a new lowercased value without modifying the existing one, use - /// [`to_ascii_lowercase`]. - /// - /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - pub fn make_ascii_lowercase(&mut self) { - let me = unsafe { self.as_bytes_mut() }; - me.make_ascii_lowercase() - } } /// Converts a boxed slice of bytes to a boxed string slice without checking @@ -2214,6 +604,7 @@ impl str { /// assert_eq!("☺", &*smile); /// ``` #[stable(feature = "str_box_extras", since = "1.20.0")] +#[inline] pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box<str> { Box::from_raw(Box::into_raw(v) as *mut str) } diff --git a/src/liballoc/string.rs b/src/liballoc/string.rs index 8d99d0bc8f4..dd559df08cc 100644 --- a/src/liballoc/string.rs +++ b/src/liballoc/string.rs @@ -56,21 +56,21 @@ #![stable(feature = "rust1", since = "1.0.0")] +use core::char::{decode_utf16, REPLACEMENT_CHARACTER}; use core::fmt; use core::hash; use core::iter::{FromIterator, FusedIterator}; -use core::ops::{self, Add, AddAssign, Index, IndexMut}; +use core::ops::Bound::{Excluded, Included, Unbounded}; +use core::ops::{self, Add, AddAssign, Index, IndexMut, RangeBounds}; use core::ptr; use core::str::pattern::Pattern; -use std_unicode::lossy; -use std_unicode::char::{decode_utf16, REPLACEMENT_CHARACTER}; +use core::str::lossy; +use collections::CollectionAllocErr; use borrow::{Cow, ToOwned}; -use range::RangeArgument; -use Bound::{Excluded, Included, Unbounded}; +use boxed::Box; use str::{self, from_boxed_utf8_unchecked, FromStr, Utf8Error, Chars}; use vec::Vec; -use boxed::Box; /// A UTF-8 encoded, growable string. /// @@ -364,7 +364,7 @@ impl String { /// /// Given that the `String` is empty, this will not allocate any initial /// buffer. While that means that this initial operation is very - /// inexpensive, but may cause excessive allocation later, when you add + /// inexpensive, it may cause excessive allocation later when you add /// data. If you have an idea of how much data the `String` will hold, /// consider the [`with_capacity`] method to prevent excessive /// re-allocation. @@ -380,7 +380,8 @@ impl String { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn new() -> String { + #[rustc_const_unstable(feature = "const_string_new")] + pub const fn new() -> String { String { vec: Vec::new() } } @@ -518,10 +519,11 @@ impl String { /// between the two. Not all byte slices are valid strings, however: strings /// are required to be valid UTF-8. During this conversion, /// `from_utf8_lossy()` will replace any invalid UTF-8 sequences with - /// `U+FFFD REPLACEMENT CHARACTER`, which looks like this: � + /// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD], which looks like this: � /// /// [`u8`]: ../../std/primitive.u8.html /// [byteslice]: ../../std/primitive.slice.html + /// [U+FFFD]: ../char/constant.REPLACEMENT_CHARACTER.html /// /// If you are sure that the byte slice is valid UTF-8, and you don't want /// to incur the overhead of the conversion, there is an unsafe version @@ -620,7 +622,7 @@ impl String { } /// Decode a UTF-16 encoded slice `v` into a `String`, replacing - /// invalid data with the replacement character (U+FFFD). + /// invalid data with [the replacement character (`U+FFFD`)][U+FFFD]. /// /// Unlike [`from_utf8_lossy`] which returns a [`Cow<'a, str>`], /// `from_utf16_lossy` returns a `String` since the UTF-16 to UTF-8 @@ -628,6 +630,7 @@ impl String { /// /// [`from_utf8_lossy`]: #method.from_utf8_lossy /// [`Cow<'a, str>`]: ../borrow/enum.Cow.html + /// [U+FFFD]: ../char/constant.REPLACEMENT_CHARACTER.html /// /// # Examples /// @@ -920,6 +923,79 @@ impl String { self.vec.reserve_exact(additional) } + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `String`. The collection may reserve more space to avoid + /// frequent reallocations. After calling `reserve`, capacity will be + /// greater than or equal to `self.len() + additional`. Does nothing if + /// capacity is already sufficient. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// + /// fn process_data(data: &str) -> Result<String, CollectionAllocErr> { + /// let mut output = String::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.push_str(data); + /// + /// Ok(output) + /// } + /// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.vec.try_reserve(additional) + } + + /// Tries to reserves the minimum capacity for exactly `additional` more elements to + /// be inserted in the given `String`. After calling `reserve_exact`, + /// capacity will be greater than or equal to `self.len() + additional`. + /// Does nothing if the capacity is already sufficient. + /// + /// Note that the allocator may give the collection more space than it + /// requests. Therefore capacity can not be relied upon to be precisely + /// minimal. Prefer `reserve` if future insertions are expected. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// + /// fn process_data(data: &str) -> Result<String, CollectionAllocErr> { + /// let mut output = String::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.push_str(data); + /// + /// Ok(output) + /// } + /// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.vec.try_reserve_exact(additional) + } + /// Shrinks the capacity of this `String` to match its length. /// /// # Examples @@ -941,6 +1017,34 @@ impl String { self.vec.shrink_to_fit() } + /// Shrinks the capacity of this `String` with a lower bound. + /// + /// The capacity will remain at least as large as both the length + /// and the supplied value. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// #![feature(shrink_to)] + /// let mut s = String::from("foo"); + /// + /// s.reserve(100); + /// assert!(s.capacity() >= 100); + /// + /// s.shrink_to(10); + /// assert!(s.capacity() >= 10); + /// s.shrink_to(0); + /// assert!(s.capacity() >= 3); + /// ``` + #[inline] + #[unstable(feature = "shrink_to", reason = "new API", issue="0")] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.vec.shrink_to(min_capacity) + } + /// Appends the given [`char`] to the end of this `String`. /// /// [`char`]: ../../std/primitive.char.html @@ -1103,8 +1207,6 @@ impl String { /// # Examples /// /// ``` - /// #![feature(string_retain)] - /// /// let mut s = String::from("f_o_ob_ar"); /// /// s.retain(|c| c != '_'); @@ -1112,7 +1214,7 @@ impl String { /// assert_eq!(s, "foobar"); /// ``` #[inline] - #[unstable(feature = "string_retain", issue = "43874")] + #[stable(feature = "string_retain", since = "1.26.0")] pub fn retain<F>(&mut self, mut f: F) where F: FnMut(char) -> bool { @@ -1122,7 +1224,7 @@ impl String { while idx < len { let ch = unsafe { - self.slice_unchecked(idx, len).chars().next().unwrap() + self.get_unchecked(idx..len).chars().next().unwrap() }; let ch_len = ch.len_utf8(); @@ -1384,7 +1486,7 @@ impl String { /// ``` #[stable(feature = "drain", since = "1.6.0")] pub fn drain<R>(&mut self, range: R) -> Drain - where R: RangeArgument<usize> + where R: RangeBounds<usize> { // Memory safety // @@ -1393,12 +1495,12 @@ impl String { // Because the range removal happens in Drop, if the Drain iterator is leaked, // the removal will not happen. let len = self.len(); - let start = match range.start() { + let start = match range.start_bound() { Included(&n) => n, Excluded(&n) => n + 1, Unbounded => 0, }; - let end = match range.end() { + let end = match range.end_bound() { Included(&n) => n + 1, Excluded(&n) => n, Unbounded => len, @@ -1418,13 +1520,10 @@ impl String { } } - /// Creates a splicing iterator that removes the specified range in the string, + /// Removes the specified range in the string, /// and replaces it with the given string. /// The given string doesn't need to be the same length as the range. /// - /// Note: Unlike [`Vec::splice`], the replacement happens eagerly, and this - /// method does not return the removed chars. - /// /// # Panics /// /// Panics if the starting point or end point do not lie on a [`char`] @@ -1438,29 +1537,28 @@ impl String { /// Basic usage: /// /// ``` - /// #![feature(splice)] /// let mut s = String::from("α is alpha, β is beta"); /// let beta_offset = s.find('β').unwrap_or(s.len()); /// /// // Replace the range up until the β from the string - /// s.splice(..beta_offset, "Α is capital alpha; "); + /// s.replace_range(..beta_offset, "Α is capital alpha; "); /// assert_eq!(s, "Α is capital alpha; β is beta"); /// ``` - #[unstable(feature = "splice", reason = "recently added", issue = "44643")] - pub fn splice<R>(&mut self, range: R, replace_with: &str) - where R: RangeArgument<usize> + #[stable(feature = "splice", since = "1.27.0")] + pub fn replace_range<R>(&mut self, range: R, replace_with: &str) + where R: RangeBounds<usize> { // Memory safety // - // The String version of Splice does not have the memory safety issues + // Replace_range does not have the memory safety issues of a vector Splice. // of the vector version. The data is just plain bytes. - match range.start() { + match range.start_bound() { Included(&n) => assert!(self.is_char_boundary(n)), Excluded(&n) => assert!(self.is_char_boundary(n + 1)), Unbounded => {}, }; - match range.end() { + match range.end_bound() { Included(&n) => assert!(self.is_char_boundary(n + 1)), Excluded(&n) => assert!(self.is_char_boundary(n)), Unbounded => {}, @@ -1488,6 +1586,7 @@ impl String { /// let b = s.into_boxed_str(); /// ``` #[stable(feature = "box_str", since = "1.4.0")] + #[inline] pub fn into_boxed_str(self) -> Box<str> { let slice = self.vec.into_boxed_slice(); unsafe { from_boxed_utf8_unchecked(slice) } @@ -1502,7 +1601,6 @@ impl FromUtf8Error { /// Basic usage: /// /// ``` - /// #![feature(from_utf8_error_as_bytes)] /// // some invalid bytes, in a vector /// let bytes = vec![0, 159]; /// @@ -1510,7 +1608,7 @@ impl FromUtf8Error { /// /// assert_eq!(&[0, 159], value.unwrap_err().as_bytes()); /// ``` - #[unstable(feature = "from_utf8_error_as_bytes", reason = "recently added", issue = "40895")] + #[stable(feature = "from_utf8_error_as_bytes", since = "1.26.0")] pub fn as_bytes(&self) -> &[u8] { &self.bytes[..] } @@ -1876,7 +1974,7 @@ impl ops::Index<ops::RangeFull> for String { unsafe { str::from_utf8_unchecked(&self.vec) } } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::Index<ops::RangeInclusive<usize>> for String { type Output = str; @@ -1885,7 +1983,7 @@ impl ops::Index<ops::RangeInclusive<usize>> for String { Index::index(&**self, index) } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::Index<ops::RangeToInclusive<usize>> for String { type Output = str; @@ -1923,14 +2021,14 @@ impl ops::IndexMut<ops::RangeFull> for String { unsafe { str::from_utf8_unchecked_mut(&mut *self.vec) } } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::IndexMut<ops::RangeInclusive<usize>> for String { #[inline] fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut str { IndexMut::index_mut(&mut **self, index) } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::IndexMut<ops::RangeToInclusive<usize>> for String { #[inline] fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut str { @@ -2144,6 +2242,14 @@ impl<'a> From<String> for Cow<'a, str> { } } +#[stable(feature = "cow_from_string_ref", since = "1.28.0")] +impl<'a> From<&'a String> for Cow<'a, str> { + #[inline] + fn from(s: &'a String) -> Cow<'a, str> { + Cow::Borrowed(s.as_str()) + } +} + #[stable(feature = "cow_str_from_iter", since = "1.12.0")] impl<'a> FromIterator<char> for Cow<'a, str> { fn from_iter<I: IntoIterator<Item = char>>(it: I) -> Cow<'a, str> { @@ -2254,5 +2360,5 @@ impl<'a> DoubleEndedIterator for Drain<'a> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a> FusedIterator for Drain<'a> {} diff --git a/src/liballoc/arc.rs b/src/liballoc/sync.rs index 185af8835d1..a00b6b4e435 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/sync.rs @@ -16,23 +16,25 @@ //! //! [arc]: struct.Arc.html +use core::any::Any; use core::sync::atomic; use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; use core::borrow; use core::fmt; use core::cmp::Ordering; use core::intrinsics::abort; -use core::mem::{self, align_of_val, size_of_val, uninitialized}; +use core::mem::{self, align_of_val, size_of_val}; use core::ops::Deref; use core::ops::CoerceUnsized; -use core::ptr::{self, Shared}; +use core::ptr::{self, NonNull}; use core::marker::{Unsize, PhantomData}; use core::hash::{Hash, Hasher}; use core::{isize, usize}; use core::convert::From; -use heap::{Heap, Alloc, Layout, box_free}; +use alloc::{Global, Alloc, Layout, box_free, handle_alloc_error}; use boxed::Box; +use rc::is_dangling; use string::String; use vec::Vec; @@ -60,7 +62,7 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// ## Thread Safety /// /// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference -/// counting This means that it is thread-safe. The disadvantage is that +/// counting. This means that it is thread-safe. The disadvantage is that /// atomic operations are more expensive than ordinary memory accesses. If you /// are not sharing reference-counted values between threads, consider using /// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the @@ -197,7 +199,7 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// [rc_examples]: ../../std/rc/index.html#examples #[stable(feature = "rust1", since = "1.0.0")] pub struct Arc<T: ?Sized> { - ptr: Shared<ArcInner<T>>, + ptr: NonNull<ArcInner<T>>, phantom: PhantomData<T>, } @@ -234,7 +236,12 @@ impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {} /// [`None`]: ../../std/option/enum.Option.html#variant.None #[stable(feature = "arc_weak", since = "1.4.0")] pub struct Weak<T: ?Sized> { - ptr: Shared<ArcInner<T>>, + // This is a `NonNull` to allow optimizing the size of this type in enums, + // but it is not necessarily a valid pointer. + // `Weak::new` sets this to `usize::MAX` so that it doesn’t need + // to allocate space on the heap. That's not a value a real pointer + // will ever have because RcBox has alignment at least 2. + ptr: NonNull<ArcInner<T>>, } #[stable(feature = "arc_weak", since = "1.4.0")] @@ -286,7 +293,7 @@ impl<T> Arc<T> { weak: atomic::AtomicUsize::new(1), data, }; - Arc { ptr: Shared::from(Box::into_unique(x)), phantom: PhantomData } + Arc { ptr: Box::into_raw_non_null(x), phantom: PhantomData } } /// Returns the contained value, if the `Arc` has exactly one strong reference. @@ -397,7 +404,7 @@ impl<T: ?Sized> Arc<T> { let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset)); Arc { - ptr: Shared::new_unchecked(arc_ptr), + ptr: NonNull::new_unchecked(arc_ptr), phantom: PhantomData, } } @@ -436,7 +443,11 @@ impl<T: ?Sized> Arc<T> { // synchronize with the write coming from `is_unique`, so that the // events prior to that write happen before this read. match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) { - Ok(_) => return Weak { ptr: this.ptr }, + Ok(_) => { + // Make sure we do not create a dangling Weak + debug_assert!(!is_dangling(this.ptr)); + return Weak { ptr: this.ptr }; + } Err(old) => cur = old, } } @@ -512,15 +523,13 @@ impl<T: ?Sized> Arc<T> { // Non-inlined part of `drop`. #[inline(never)] unsafe fn drop_slow(&mut self) { - let ptr = self.ptr.as_ptr(); - // Destroy the data at this time, even though we may not free the box // allocation itself (there may still be weak pointers lying around). ptr::drop_in_place(&mut self.ptr.as_mut().data); if self.inner().weak.fetch_sub(1, Release) == 1 { atomic::fence(Acquire); - Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr)) + Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())) } } @@ -554,11 +563,11 @@ impl<T: ?Sized> Arc<T> { let layout = Layout::for_value(&*fake_ptr); - let mem = Heap.alloc(layout) - .unwrap_or_else(|e| Heap.oom(e)); + let mem = Global.alloc(layout) + .unwrap_or_else(|_| handle_alloc_error(layout)); // Initialize the real ArcInner - let inner = set_data_ptr(ptr as *mut T, mem) as *mut ArcInner<T>; + let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut ArcInner<T>; ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1)); ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1)); @@ -568,7 +577,8 @@ impl<T: ?Sized> Arc<T> { fn from_box(v: Box<T>) -> Arc<T> { unsafe { - let bptr = Box::into_raw(v); + let box_unique = Box::into_unique(v); + let bptr = box_unique.as_ptr(); let value_size = size_of_val(&*bptr); let ptr = Self::allocate_for_ptr(bptr); @@ -580,9 +590,9 @@ impl<T: ?Sized> Arc<T> { value_size); // Free the allocation without dropping its contents - box_free(bptr); + box_free(box_unique); - Arc { ptr: Shared::new_unchecked(ptr), phantom: PhantomData } + Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData } } } } @@ -609,7 +619,7 @@ impl<T> Arc<[T]> { &mut (*ptr).data as *mut [T] as *mut T, v.len()); - Arc { ptr: Shared::new_unchecked(ptr), phantom: PhantomData } + Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData } } } @@ -625,7 +635,7 @@ impl<T: Clone> ArcFromSlice<T> for Arc<[T]> { // In the event of a panic, elements that have been written // into the new ArcInner will be dropped, then the memory freed. struct Guard<T> { - mem: *mut u8, + mem: NonNull<u8>, elems: *mut T, layout: Layout, n_elems: usize, @@ -639,7 +649,7 @@ impl<T: Clone> ArcFromSlice<T> for Arc<[T]> { let slice = from_raw_parts_mut(self.elems, self.n_elems); ptr::drop_in_place(slice); - Heap.dealloc(self.mem, self.layout.clone()); + Global.dealloc(self.mem.cast(), self.layout.clone()); } } } @@ -655,7 +665,7 @@ impl<T: Clone> ArcFromSlice<T> for Arc<[T]> { let elems = &mut (*ptr).data as *mut [T] as *mut T; let mut guard = Guard{ - mem: mem, + mem: NonNull::new_unchecked(mem), elems: elems, layout: layout, n_elems: 0, @@ -669,7 +679,7 @@ impl<T: Clone> ArcFromSlice<T> for Arc<[T]> { // All clear. Forget the guard so it doesn't free the new ArcInner. mem::forget(guard); - Arc { ptr: Shared::new_unchecked(ptr), phantom: PhantomData } + Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData } } } } @@ -879,13 +889,14 @@ impl<T: ?Sized> Arc<T> { // holder. // // The acquire label here ensures a happens-before relationship with any - // writes to `strong` prior to decrements of the `weak` count (via drop, - // which uses Release). + // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements + // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded + // weak ref was never dropped, the CAS here will fail so we do not care to synchronize. if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() { - // Due to the previous acquire read, this will observe any writes to - // `strong` that were due to upgrading weak pointers; only strong - // clones remain, which require that the strong count is > 1 anyway. - let unique = self.inner().strong.load(Relaxed) == 1; + // This needs to be an `Acquire` to synchronize with the decrement of the `strong` + // counter in `drop` -- the only access that happens when any but the last reference + // is being dropped. + let unique = self.inner().strong.load(Acquire) == 1; // The release write here synchronizes with a read in `downgrade`, // effectively preventing the above read of `strong` from happening @@ -972,9 +983,46 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> { } } +impl Arc<dyn Any + Send + Sync> { + #[inline] + #[stable(feature = "rc_downcast", since = "1.29.0")] + /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// use std::sync::Arc; + /// + /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) { + /// if let Ok(string) = value.downcast::<String>() { + /// println!("String ({}): {}", string.len(), string); + /// } + /// } + /// + /// fn main() { + /// let my_string = "Hello World".to_string(); + /// print_if_string(Arc::new(my_string)); + /// print_if_string(Arc::new(0i8)); + /// } + /// ``` + pub fn downcast<T>(self) -> Result<Arc<T>, Self> + where + T: Any + Send + Sync + 'static, + { + if (*self).is::<T>() { + let ptr = self.ptr.cast::<ArcInner<T>>(); + mem::forget(self); + Ok(Arc { ptr, phantom: PhantomData }) + } else { + Err(self) + } + } +} + impl<T> Weak<T> { - /// Constructs a new `Weak<T>`, allocating memory for `T` without initializing - /// it. Calling [`upgrade`] on the return value always gives [`None`]. + /// Constructs a new `Weak<T>`, without allocating any memory. + /// Calling [`upgrade`] on the return value always gives [`None`]. /// /// [`upgrade`]: struct.Weak.html#method.upgrade /// [`None`]: ../../std/option/enum.Option.html#variant.None @@ -989,14 +1037,8 @@ impl<T> Weak<T> { /// ``` #[stable(feature = "downgraded_weak", since = "1.10.0")] pub fn new() -> Weak<T> { - unsafe { - Weak { - ptr: Shared::from(Box::into_unique(box ArcInner { - strong: atomic::AtomicUsize::new(0), - weak: atomic::AtomicUsize::new(1), - data: uninitialized(), - })), - } + Weak { + ptr: NonNull::new(usize::MAX as *mut ArcInner<T>).expect("MAX is not 0"), } } } @@ -1032,7 +1074,7 @@ impl<T: ?Sized> Weak<T> { pub fn upgrade(&self) -> Option<Arc<T>> { // We use a CAS loop to increment the strong count instead of a // fetch_add because once the count hits 0 it must never be above 0. - let inner = self.inner(); + let inner = self.inner()?; // Relaxed load because any write of 0 that we can observe // leaves the field in a permanently zero state (so a @@ -1054,16 +1096,25 @@ impl<T: ?Sized> Weak<T> { // Relaxed is valid for the same reason it is on Arc's Clone impl match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) { - Ok(_) => return Some(Arc { ptr: self.ptr, phantom: PhantomData }), + Ok(_) => return Some(Arc { + // null checked above + ptr: self.ptr, + phantom: PhantomData, + }), Err(old) => n = old, } } } + /// Return `None` when the pointer is dangling and there is no allocated `ArcInner`, + /// i.e. this `Weak` was created by `Weak::new` #[inline] - fn inner(&self) -> &ArcInner<T> { - // See comments above for why this is "safe" - unsafe { self.ptr.as_ref() } + fn inner(&self) -> Option<&ArcInner<T>> { + if is_dangling(self.ptr) { + None + } else { + Some(unsafe { self.ptr.as_ref() }) + } } } @@ -1082,11 +1133,16 @@ impl<T: ?Sized> Clone for Weak<T> { /// ``` #[inline] fn clone(&self) -> Weak<T> { + let inner = if let Some(inner) = self.inner() { + inner + } else { + return Weak { ptr: self.ptr }; + }; // See comments in Arc::clone() for why this is relaxed. This can use a // fetch_add (ignoring the lock) because the weak count is only locked // where are *no other* weak pointers in existence. (So we can't be // running this code in that case). - let old_size = self.inner().weak.fetch_add(1, Relaxed); + let old_size = inner.weak.fetch_add(1, Relaxed); // See comments in Arc::clone() for why we do this (for mem::forget). if old_size > MAX_REFCOUNT { @@ -1101,8 +1157,8 @@ impl<T: ?Sized> Clone for Weak<T> { #[stable(feature = "downgraded_weak", since = "1.10.0")] impl<T> Default for Weak<T> { - /// Constructs a new `Weak<T>`, allocating memory for `T` without initializing - /// it. Calling [`upgrade`] on the return value always gives [`None`]. + /// Constructs a new `Weak<T>`, without allocating memory. + /// Calling [`upgrade`] on the return value always gives [`None`]. /// /// [`upgrade`]: struct.Weak.html#method.upgrade /// [`None`]: ../../std/option/enum.Option.html#variant.None @@ -1147,8 +1203,6 @@ impl<T: ?Sized> Drop for Weak<T> { /// assert!(other_weak_foo.upgrade().is_none()); /// ``` fn drop(&mut self) { - let ptr = self.ptr.as_ptr(); - // If we find out that we were the last weak pointer, then its time to // deallocate the data entirely. See the discussion in Arc::drop() about // the memory orderings @@ -1157,10 +1211,16 @@ impl<T: ?Sized> Drop for Weak<T> { // weak count can only be locked if there was precisely one weak ref, // meaning that drop could only subsequently run ON that remaining weak // ref, which can only happen after the lock is released. - if self.inner().weak.fetch_sub(1, Release) == 1 { + let inner = if let Some(inner) = self.inner() { + inner + } else { + return + }; + + if inner.weak.fetch_sub(1, Release) == 1 { atomic::fence(Acquire); unsafe { - Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr)) + Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())) } } } @@ -1519,7 +1579,7 @@ mod tests { assert_eq!(unsafe { &*ptr }, "foo"); assert_eq!(arc, arc2); - let arc: Arc<Display> = Arc::new(123); + let arc: Arc<dyn Display> = Arc::new(123); let ptr = Arc::into_raw(arc.clone()); let arc2 = unsafe { Arc::from_raw(ptr) }; @@ -1824,8 +1884,8 @@ mod tests { use std::fmt::Display; use std::string::ToString; - let b: Box<Display> = box 123; - let r: Arc<Display> = Arc::from(b); + let b: Box<dyn Display> = box 123; + let r: Arc<dyn Display> = Arc::from(b); assert_eq!(r.to_string(), "123"); } @@ -1834,8 +1894,8 @@ mod tests { fn test_from_box_trait_zero_sized() { use std::fmt::Debug; - let b: Box<Debug> = box (); - let r: Arc<Debug> = Arc::from(b); + let b: Box<dyn Debug> = box (); + let r: Arc<dyn Debug> = Arc::from(b); assert_eq!(format!("{:?}", r), "()"); } @@ -1847,6 +1907,26 @@ mod tests { assert_eq!(&r[..], [1, 2, 3]); } + + #[test] + fn test_downcast() { + use std::any::Any; + + let r1: Arc<dyn Any + Send + Sync> = Arc::new(i32::max_value()); + let r2: Arc<dyn Any + Send + Sync> = Arc::new("abc"); + + assert!(r1.clone().downcast::<u32>().is_err()); + + let r1i32 = r1.downcast::<i32>(); + assert!(r1i32.is_ok()); + assert_eq!(r1i32.unwrap(), Arc::new(i32::max_value())); + + assert!(r2.clone().downcast::<i32>().is_err()); + + let r2str = r2.downcast::<&'static str>(); + assert!(r2str.is_ok()); + assert_eq!(r2str.unwrap(), Arc::new("abc")); + } } #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/liballoc/task.rs b/src/liballoc/task.rs new file mode 100644 index 00000000000..7a4eda21a60 --- /dev/null +++ b/src/liballoc/task.rs @@ -0,0 +1,140 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Types and Traits for working with asynchronous tasks. + +pub use core::task::*; + +#[cfg(all(target_has_atomic = "ptr", target_has_atomic = "cas"))] +pub use self::if_arc::*; + +#[cfg(all(target_has_atomic = "ptr", target_has_atomic = "cas"))] +mod if_arc { + use super::*; + use core::marker::PhantomData; + use core::mem; + use core::ptr::{self, NonNull}; + use sync::Arc; + + /// A way of waking up a specific task. + /// + /// Any task executor must provide a way of signaling that a task it owns + /// is ready to be `poll`ed again. Executors do so by implementing this trait. + pub trait Wake: Send + Sync { + /// Indicates that the associated task is ready to make progress and should + /// be `poll`ed. + /// + /// Executors generally maintain a queue of "ready" tasks; `wake` should place + /// the associated task onto this queue. + fn wake(arc_self: &Arc<Self>); + + /// Indicates that the associated task is ready to make progress and should + /// be `poll`ed. This function is like `wake`, but can only be called from the + /// thread on which this `Wake` was created. + /// + /// Executors generally maintain a queue of "ready" tasks; `wake_local` should place + /// the associated task onto this queue. + #[inline] + unsafe fn wake_local(arc_self: &Arc<Self>) { + Self::wake(arc_self); + } + } + + #[cfg(all(target_has_atomic = "ptr", target_has_atomic = "cas"))] + struct ArcWrapped<T>(PhantomData<T>); + + unsafe impl<T: Wake + 'static> UnsafeWake for ArcWrapped<T> { + #[inline] + unsafe fn clone_raw(&self) -> Waker { + let me: *const ArcWrapped<T> = self; + let arc = (*(&me as *const *const ArcWrapped<T> as *const Arc<T>)).clone(); + Waker::from(arc) + } + + #[inline] + unsafe fn drop_raw(&self) { + let mut me: *const ArcWrapped<T> = self; + let me = &mut me as *mut *const ArcWrapped<T> as *mut Arc<T>; + ptr::drop_in_place(me); + } + + #[inline] + unsafe fn wake(&self) { + let me: *const ArcWrapped<T> = self; + T::wake(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>)) + } + + #[inline] + unsafe fn wake_local(&self) { + let me: *const ArcWrapped<T> = self; + T::wake_local(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>)) + } + } + + impl<T> From<Arc<T>> for Waker + where T: Wake + 'static, + { + fn from(rc: Arc<T>) -> Self { + unsafe { + let ptr = mem::transmute::<Arc<T>, NonNull<ArcWrapped<T>>>(rc); + Waker::new(ptr) + } + } + } + + /// Creates a `LocalWaker` from a local `wake`. + /// + /// This function requires that `wake` is "local" (created on the current thread). + /// The resulting `LocalWaker` will call `wake.wake_local()` when awoken, and + /// will call `wake.wake()` if awoken after being converted to a `Waker`. + #[inline] + pub unsafe fn local_waker<W: Wake + 'static>(wake: Arc<W>) -> LocalWaker { + let ptr = mem::transmute::<Arc<W>, NonNull<ArcWrapped<W>>>(wake); + LocalWaker::new(ptr) + } + + struct NonLocalAsLocal<T>(ArcWrapped<T>); + + unsafe impl<T: Wake + 'static> UnsafeWake for NonLocalAsLocal<T> { + #[inline] + unsafe fn clone_raw(&self) -> Waker { + self.0.clone_raw() + } + + #[inline] + unsafe fn drop_raw(&self) { + self.0.drop_raw() + } + + #[inline] + unsafe fn wake(&self) { + self.0.wake() + } + + #[inline] + unsafe fn wake_local(&self) { + // Since we're nonlocal, we can't call wake_local + self.0.wake() + } + } + + /// Creates a `LocalWaker` from a non-local `wake`. + /// + /// This function is similar to `local_waker`, but does not require that `wake` + /// is local to the current thread. The resulting `LocalWaker` will call + /// `wake.wake()` when awoken. + #[inline] + pub fn local_waker_from_nonlocal<W: Wake + 'static>(wake: Arc<W>) -> LocalWaker { + unsafe { + let ptr = mem::transmute::<Arc<W>, NonNull<NonLocalAsLocal<W>>>(wake); + LocalWaker::new(ptr) + } + } +} diff --git a/src/liballoc/tests/arc.rs b/src/liballoc/tests/arc.rs new file mode 100644 index 00000000000..d90c22a3b18 --- /dev/null +++ b/src/liballoc/tests/arc.rs @@ -0,0 +1,55 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::any::Any; +use std::sync::{Arc, Weak}; + +#[test] +fn uninhabited() { + enum Void {} + let mut a = Weak::<Void>::new(); + a = a.clone(); + assert!(a.upgrade().is_none()); + + let mut a: Weak<dyn Any> = a; // Unsizing + a = a.clone(); + assert!(a.upgrade().is_none()); +} + +#[test] +fn slice() { + let a: Arc<[u32; 3]> = Arc::new([3, 2, 1]); + let a: Arc<[u32]> = a; // Unsizing + let b: Arc<[u32]> = Arc::from(&[3, 2, 1][..]); // Conversion + assert_eq!(a, b); + + // Exercise is_dangling() with a DST + let mut a = Arc::downgrade(&a); + a = a.clone(); + assert!(a.upgrade().is_some()); +} + +#[test] +fn trait_object() { + let a: Arc<u32> = Arc::new(4); + let a: Arc<dyn Any> = a; // Unsizing + + // Exercise is_dangling() with a DST + let mut a = Arc::downgrade(&a); + a = a.clone(); + assert!(a.upgrade().is_some()); + + let mut b = Weak::<u32>::new(); + b = b.clone(); + assert!(b.upgrade().is_none()); + let mut b: Weak<dyn Any> = b; // Unsizing + b = b.clone(); + assert!(b.upgrade().is_none()); +} diff --git a/src/liballoc/tests/binary_heap.rs b/src/liballoc/tests/binary_heap.rs index 06d585f8ea8..8494463463c 100644 --- a/src/liballoc/tests/binary_heap.rs +++ b/src/liballoc/tests/binary_heap.rs @@ -8,9 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::panic; +use std::cmp; use std::collections::BinaryHeap; use std::collections::binary_heap::{Drain, PeekMut}; +use std::panic::{self, AssertUnwindSafe}; +use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; + +use rand::{thread_rng, Rng}; #[test] fn test_iterator() { @@ -274,29 +278,86 @@ fn test_extend_specialization() { assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]); } -#[test] -fn test_placement() { - let mut a = BinaryHeap::new(); - &mut a <- 2; - &mut a <- 4; - &mut a <- 3; - assert_eq!(a.peek(), Some(&4)); - assert_eq!(a.len(), 3); - &mut a <- 1; - assert_eq!(a.into_sorted_vec(), vec![1, 2, 3, 4]); -} - -#[test] -fn test_placement_panic() { - let mut heap = BinaryHeap::from(vec![1, 2, 3]); - fn mkpanic() -> usize { panic!() } - let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| { &mut heap <- mkpanic(); })); - assert_eq!(heap.len(), 3); -} - #[allow(dead_code)] fn assert_covariance() { fn drain<'new>(d: Drain<'static, &'static str>) -> Drain<'new, &'new str> { d } } + +// old binaryheap failed this test +// +// Integrity means that all elements are present after a comparison panics, +// even if the order may not be correct. +// +// Destructors must be called exactly once per element. +#[test] +fn panic_safe() { + static DROP_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; + + #[derive(Eq, PartialEq, Ord, Clone, Debug)] + struct PanicOrd<T>(T, bool); + + impl<T> Drop for PanicOrd<T> { + fn drop(&mut self) { + // update global drop count + DROP_COUNTER.fetch_add(1, Ordering::SeqCst); + } + } + + impl<T: PartialOrd> PartialOrd for PanicOrd<T> { + fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { + if self.1 || other.1 { + panic!("Panicking comparison"); + } + self.0.partial_cmp(&other.0) + } + } + let mut rng = thread_rng(); + const DATASZ: usize = 32; + const NTEST: usize = 10; + + // don't use 0 in the data -- we want to catch the zeroed-out case. + let data = (1..DATASZ + 1).collect::<Vec<_>>(); + + // since it's a fuzzy test, run several tries. + for _ in 0..NTEST { + for i in 1..DATASZ + 1 { + DROP_COUNTER.store(0, Ordering::SeqCst); + + let mut panic_ords: Vec<_> = data.iter() + .filter(|&&x| x != i) + .map(|&x| PanicOrd(x, false)) + .collect(); + let panic_item = PanicOrd(i, true); + + // heapify the sane items + rng.shuffle(&mut panic_ords); + let mut heap = BinaryHeap::from(panic_ords); + let inner_data; + + { + // push the panicking item to the heap and catch the panic + let thread_result = { + let mut heap_ref = AssertUnwindSafe(&mut heap); + panic::catch_unwind(move || { + heap_ref.push(panic_item); + }) + }; + assert!(thread_result.is_err()); + + // Assert no elements were dropped + let drops = DROP_COUNTER.load(Ordering::SeqCst); + assert!(drops == 0, "Must not drop items. drops={}", drops); + inner_data = heap.clone().into_vec(); + drop(heap); + } + let drops = DROP_COUNTER.load(Ordering::SeqCst); + assert_eq!(drops, DATASZ); + + let mut data_sorted = inner_data.into_iter().map(|p| p.0).collect::<Vec<_>>(); + data_sorted.sort(); + assert_eq!(data_sorted, data); + } + } +} diff --git a/src/liballoc/tests/btree/map.rs b/src/liballoc/tests/btree/map.rs index 2393101040d..6ebdb86cc4a 100644 --- a/src/liballoc/tests/btree/map.rs +++ b/src/liballoc/tests/btree/map.rs @@ -9,8 +9,8 @@ // except according to those terms. use std::collections::BTreeMap; -use std::collections::Bound::{self, Excluded, Included, Unbounded}; use std::collections::btree_map::Entry::{Occupied, Vacant}; +use std::ops::Bound::{self, Excluded, Included, Unbounded}; use std::rc::Rc; use std::iter::FromIterator; diff --git a/src/liballoc/tests/btree/set.rs b/src/liballoc/tests/btree/set.rs index 6171b8ba624..0330bda5e32 100644 --- a/src/liballoc/tests/btree/set.rs +++ b/src/liballoc/tests/btree/set.rs @@ -40,7 +40,7 @@ fn test_hash() { } fn check<F>(a: &[i32], b: &[i32], expected: &[i32], f: F) - where F: FnOnce(&BTreeSet<i32>, &BTreeSet<i32>, &mut FnMut(&i32) -> bool) -> bool + where F: FnOnce(&BTreeSet<i32>, &BTreeSet<i32>, &mut dyn FnMut(&i32) -> bool) -> bool { let mut set_a = BTreeSet::new(); let mut set_b = BTreeSet::new(); diff --git a/src/liballoc/tests/heap.rs b/src/liballoc/tests/heap.rs index d3ce12056bb..6fa88ce969a 100644 --- a/src/liballoc/tests/heap.rs +++ b/src/liballoc/tests/heap.rs @@ -9,7 +9,7 @@ // except according to those terms. use alloc_system::System; -use std::heap::{Heap, Alloc, Layout}; +use std::alloc::{Global, Alloc, Layout}; /// https://github.com/rust-lang/rust/issues/45955 /// @@ -22,7 +22,7 @@ fn alloc_system_overaligned_request() { #[test] fn std_heap_overaligned_request() { - check_overalign_requests(Heap) + check_overalign_requests(Global) } fn check_overalign_requests<T: Alloc>(mut allocator: T) { @@ -34,7 +34,8 @@ fn check_overalign_requests<T: Alloc>(mut allocator: T) { allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap() }).collect(); for &ptr in &pointers { - assert_eq!((ptr as usize) % align, 0, "Got a pointer less aligned than requested") + assert_eq!((ptr.as_ptr() as usize) % align, 0, + "Got a pointer less aligned than requested") } // Clean up diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs index eee229bc6fd..c12c7a81f79 100644 --- a/src/liballoc/tests/lib.rs +++ b/src/liballoc/tests/lib.rs @@ -8,43 +8,36 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(warnings)] - #![feature(allocator_api)] #![feature(alloc_system)] #![feature(attr_literals)] #![feature(box_syntax)] -#![feature(inclusive_range_syntax)] -#![feature(collection_placement)] #![feature(const_fn)] #![feature(drain_filter)] #![feature(exact_size_is_empty)] -#![feature(iterator_step_by)] #![feature(pattern)] -#![feature(placement_in_syntax)] -#![feature(rand)] -#![feature(repr_align)] -#![feature(slice_rotate)] -#![feature(splice)] +#![feature(slice_sort_by_cached_key)] #![feature(str_escape)] -#![feature(string_retain)] +#![feature(try_reserve)] #![feature(unboxed_closures)] -#![feature(unicode)] #![feature(exact_chunks)] +#![feature(repeat_generic_slice)] extern crate alloc_system; -extern crate std_unicode; +extern crate core; extern crate rand; use std::hash::{Hash, Hasher}; use std::collections::hash_map::DefaultHasher; +mod arc; mod binary_heap; mod btree; mod cow_str; mod fmt; mod heap; mod linked_list; +mod rc; mod slice; mod str; mod string; @@ -68,7 +61,7 @@ fn test_boxed_hasher() { 5u32.hash(&mut hasher_1); assert_eq!(ordinary_hash, hasher_1.finish()); - let mut hasher_2 = Box::new(DefaultHasher::new()) as Box<Hasher>; + let mut hasher_2 = Box::new(DefaultHasher::new()) as Box<dyn Hasher>; 5u32.hash(&mut hasher_2); assert_eq!(ordinary_hash, hasher_2.finish()); } diff --git a/src/liballoc/tests/rc.rs b/src/liballoc/tests/rc.rs new file mode 100644 index 00000000000..9ec7c831444 --- /dev/null +++ b/src/liballoc/tests/rc.rs @@ -0,0 +1,55 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::any::Any; +use std::rc::{Rc, Weak}; + +#[test] +fn uninhabited() { + enum Void {} + let mut a = Weak::<Void>::new(); + a = a.clone(); + assert!(a.upgrade().is_none()); + + let mut a: Weak<dyn Any> = a; // Unsizing + a = a.clone(); + assert!(a.upgrade().is_none()); +} + +#[test] +fn slice() { + let a: Rc<[u32; 3]> = Rc::new([3, 2, 1]); + let a: Rc<[u32]> = a; // Unsizing + let b: Rc<[u32]> = Rc::from(&[3, 2, 1][..]); // Conversion + assert_eq!(a, b); + + // Exercise is_dangling() with a DST + let mut a = Rc::downgrade(&a); + a = a.clone(); + assert!(a.upgrade().is_some()); +} + +#[test] +fn trait_object() { + let a: Rc<u32> = Rc::new(4); + let a: Rc<dyn Any> = a; // Unsizing + + // Exercise is_dangling() with a DST + let mut a = Rc::downgrade(&a); + a = a.clone(); + assert!(a.upgrade().is_some()); + + let mut b = Weak::<u32>::new(); + b = b.clone(); + assert!(b.upgrade().is_none()); + let mut b: Weak<dyn Any> = b; // Unsizing + b = b.clone(); + assert!(b.upgrade().is_none()); +} diff --git a/src/liballoc/tests/slice.rs b/src/liballoc/tests/slice.rs index 1a9d26fd1a2..df5e18a9a18 100644 --- a/src/liballoc/tests/slice.rs +++ b/src/liballoc/tests/slice.rs @@ -8,9 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::cell::Cell; use std::cmp::Ordering::{Equal, Greater, Less}; +use std::cmp::Ordering; use std::mem; +use std::panic; use std::rc::Rc; +use std::sync::atomic::Ordering::Relaxed; +use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize}; +use std::thread; use rand::{Rng, thread_rng}; @@ -419,6 +425,14 @@ fn test_sort() { v.sort_by(|a, b| b.cmp(a)); assert!(v.windows(2).all(|w| w[0] >= w[1])); + // Sort in lexicographic order. + let mut v1 = orig.clone(); + let mut v2 = orig.clone(); + v1.sort_by_key(|x| x.to_string()); + v2.sort_by_cached_key(|x| x.to_string()); + assert!(v1.windows(2).all(|w| w[0].to_string() <= w[1].to_string())); + assert!(v1 == v2); + // Sort with many pre-sorted runs. let mut v = orig.clone(); v.sort(); @@ -471,7 +485,7 @@ fn test_sort_stability() { // the second item represents which occurrence of that // number this element is, i.e. the second elements // will occur in sorted order. - let mut v: Vec<_> = (0..len) + let mut orig: Vec<_> = (0..len) .map(|_| { let n = thread_rng().gen::<usize>() % 10; counts[n] += 1; @@ -479,16 +493,21 @@ fn test_sort_stability() { }) .collect(); - // only sort on the first element, so an unstable sort + let mut v = orig.clone(); + // Only sort on the first element, so an unstable sort // may mix up the counts. v.sort_by(|&(a, _), &(b, _)| a.cmp(&b)); - // this comparison includes the count (the second item + // This comparison includes the count (the second item // of the tuple), so elements with equal first items // will need to be ordered with increasing // counts... i.e. exactly asserting that this sort is // stable. assert!(v.windows(2).all(|w| w[0] <= w[1])); + + let mut v = orig.clone(); + v.sort_by_cached_key(|&(x, _)| x); + assert!(v.windows(2).all(|w| w[0] <= w[1])); } } } @@ -591,6 +610,15 @@ fn test_join() { } #[test] +fn test_join_nocopy() { + let v: [String; 0] = []; + assert_eq!(v.join(","), ""); + assert_eq!(["a".to_string(), "ab".into()].join(","), "a,ab"); + assert_eq!(["a".to_string(), "ab".into(), "abc".into()].join(","), "a,ab,abc"); + assert_eq!(["a".to_string(), "ab".into(), "".into()].join(","), "a,ab,"); +} + +#[test] fn test_insert() { let mut a = vec![1, 2, 4]; a.insert(2, 3); @@ -1263,6 +1291,7 @@ fn test_box_slice_clone() { } #[test] +#[allow(unused_must_use)] // here, we care about the side effects of `.clone()` #[cfg_attr(target_os = "emscripten", ignore)] fn test_box_slice_clone_panics() { use std::sync::Arc; @@ -1341,3 +1370,173 @@ fn test_copy_from_slice_dst_shorter() { let mut dst = [0; 3]; dst.copy_from_slice(&src); } + +const MAX_LEN: usize = 80; + +static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [ + // FIXME(RFC 1109): AtomicUsize is not Copy. + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), +]; + +static VERSIONS: AtomicUsize = ATOMIC_USIZE_INIT; + +#[derive(Clone, Eq)] +struct DropCounter { + x: u32, + id: usize, + version: Cell<usize>, +} + +impl PartialEq for DropCounter { + fn eq(&self, other: &Self) -> bool { + self.partial_cmp(other) == Some(Ordering::Equal) + } +} + +impl PartialOrd for DropCounter { + fn partial_cmp(&self, other: &Self) -> Option<Ordering> { + self.version.set(self.version.get() + 1); + other.version.set(other.version.get() + 1); + VERSIONS.fetch_add(2, Relaxed); + self.x.partial_cmp(&other.x) + } +} + +impl Ord for DropCounter { + fn cmp(&self, other: &Self) -> Ordering { + self.partial_cmp(other).unwrap() + } +} + +impl Drop for DropCounter { + fn drop(&mut self) { + DROP_COUNTS[self.id].fetch_add(1, Relaxed); + VERSIONS.fetch_sub(self.version.get(), Relaxed); + } +} + +macro_rules! test { + ($input:ident, $func:ident) => { + let len = $input.len(); + + // Work out the total number of comparisons required to sort + // this array... + let mut count = 0usize; + $input.to_owned().$func(|a, b| { count += 1; a.cmp(b) }); + + // ... and then panic on each and every single one. + for panic_countdown in 0..count { + // Refresh the counters. + VERSIONS.store(0, Relaxed); + for i in 0..len { + DROP_COUNTS[i].store(0, Relaxed); + } + + let v = $input.to_owned(); + let _ = thread::spawn(move || { + let mut v = v; + let mut panic_countdown = panic_countdown; + v.$func(|a, b| { + if panic_countdown == 0 { + SILENCE_PANIC.with(|s| s.set(true)); + panic!(); + } + panic_countdown -= 1; + a.cmp(b) + }) + }).join(); + + // Check that the number of things dropped is exactly + // what we expect (i.e. the contents of `v`). + for (i, c) in DROP_COUNTS.iter().enumerate().take(len) { + let count = c.load(Relaxed); + assert!(count == 1, + "found drop count == {} for i == {}, len == {}", + count, i, len); + } + + // Check that the most recent versions of values were dropped. + assert_eq!(VERSIONS.load(Relaxed), 0); + } + } +} + +thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false)); + +#[test] +#[cfg_attr(target_os = "emscripten", ignore)] // no threads +fn panic_safe() { + let prev = panic::take_hook(); + panic::set_hook(Box::new(move |info| { + if !SILENCE_PANIC.with(|s| s.get()) { + prev(info); + } + })); + + let mut rng = thread_rng(); + + for len in (1..20).chain(70..MAX_LEN) { + for &modulus in &[5, 20, 50] { + for &has_runs in &[false, true] { + let mut input = (0..len) + .map(|id| { + DropCounter { + x: rng.next_u32() % modulus, + id: id, + version: Cell::new(0), + } + }) + .collect::<Vec<_>>(); + + if has_runs { + for c in &mut input { + c.x = c.id as u32; + } + + for _ in 0..5 { + let a = rng.gen::<usize>() % len; + let b = rng.gen::<usize>() % len; + if a < b { + input[a..b].reverse(); + } else { + input.swap(a, b); + } + } + } + + test!(input, sort_by); + test!(input, sort_unstable_by); + } + } + } +} + +#[test] +fn repeat_generic_slice() { + assert_eq!([1, 2].repeat(2), vec![1, 2, 1, 2]); + assert_eq!([1, 2, 3, 4].repeat(0), vec![]); + assert_eq!([1, 2, 3, 4].repeat(1), vec![1, 2, 3, 4]); + assert_eq!( + [1, 2, 3, 4].repeat(3), + vec![1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4] + ); +} diff --git a/src/liballoc/tests/str.rs b/src/liballoc/tests/str.rs index a14a5d32738..6275c7bb112 100644 --- a/src/liballoc/tests/str.rs +++ b/src/liballoc/tests/str.rs @@ -162,11 +162,24 @@ fn test_join_for_different_lengths() { test_join!("-a-bc", ["", "a", "bc"], "-"); } +// join has fast paths for small separators up to 4 bytes +// this tests the slow paths. +#[test] +fn test_join_for_different_lengths_with_long_separator() { + assert_eq!("~~~~~".len(), 15); + + let empty: &[&str] = &[]; + test_join!("", empty, "~~~~~"); + test_join!("a", ["a"], "~~~~~"); + test_join!("a~~~~~b", ["a", "b"], "~~~~~"); + test_join!("~~~~~a~~~~~bc", ["", "a", "bc"], "~~~~~"); +} + #[test] fn test_unsafe_slice() { - assert_eq!("ab", unsafe {"abc".slice_unchecked(0, 2)}); - assert_eq!("bc", unsafe {"abc".slice_unchecked(1, 3)}); - assert_eq!("", unsafe {"abc".slice_unchecked(1, 1)}); + assert_eq!("ab", unsafe {"abc".get_unchecked(0..2)}); + assert_eq!("bc", unsafe {"abc".get_unchecked(1..3)}); + assert_eq!("", unsafe {"abc".get_unchecked(1..1)}); fn a_million_letter_a() -> String { let mut i = 0; let mut rs = String::new(); @@ -187,7 +200,7 @@ fn test_unsafe_slice() { } let letters = a_million_letter_a(); assert_eq!(half_a_million_letter_a(), - unsafe { letters.slice_unchecked(0, 500000)}); + unsafe { letters.get_unchecked(0..500000)}); } #[test] @@ -291,114 +304,410 @@ fn test_replace_pattern() { assert_eq!(data.replace(|c| c == 'γ', "😺😺😺"), "abcdαβ😺😺😺δabcdαβ😺😺😺δ"); } -#[test] -fn test_slice() { - assert_eq!("ab", &"abc"[0..2]); - assert_eq!("bc", &"abc"[1..3]); - assert_eq!("", &"abc"[1..1]); - assert_eq!("\u{65e5}", &"\u{65e5}\u{672c}"[0..3]); +// The current implementation of SliceIndex fails to handle methods +// orthogonally from range types; therefore, it is worth testing +// all of the indexing operations on each input. +mod slice_index { + // Test a slicing operation **that should succeed,** + // testing it on all of the indexing methods. + // + // This is not suitable for testing failure on invalid inputs. + macro_rules! assert_range_eq { + ($s:expr, $range:expr, $expected:expr) + => { + let mut s: String = $s.to_owned(); + let mut expected: String = $expected.to_owned(); + { + let s: &str = &s; + let expected: &str = &expected; + + assert_eq!(&s[$range], expected, "(in assertion for: index)"); + assert_eq!(s.get($range), Some(expected), "(in assertion for: get)"); + unsafe { + assert_eq!( + s.get_unchecked($range), expected, + "(in assertion for: get_unchecked)", + ); + } + } + { + let s: &mut str = &mut s; + let expected: &mut str = &mut expected; + + assert_eq!( + &mut s[$range], expected, + "(in assertion for: index_mut)", + ); + assert_eq!( + s.get_mut($range), Some(&mut expected[..]), + "(in assertion for: get_mut)", + ); + unsafe { + assert_eq!( + s.get_unchecked_mut($range), expected, + "(in assertion for: get_unchecked_mut)", + ); + } + } + } + } - let data = "ประเทศไทย中华"; - assert_eq!("ป", &data[0..3]); - assert_eq!("ร", &data[3..6]); - assert_eq!("", &data[3..3]); - assert_eq!("华", &data[30..33]); + // Make sure the macro can actually detect bugs, + // because if it can't, then what are we even doing here? + // + // (Be aware this only demonstrates the ability to detect bugs + // in the FIRST method that panics, as the macro is not designed + // to be used in `should_panic`) + #[test] + #[should_panic(expected = "out of bounds")] + fn assert_range_eq_can_fail_by_panic() { + assert_range_eq!("abc", 0..5, "abc"); + } - fn a_million_letter_x() -> String { - let mut i = 0; - let mut rs = String::new(); - while i < 100000 { - rs.push_str("华华华华华华华华华华"); - i += 1; + // (Be aware this only demonstrates the ability to detect bugs + // in the FIRST method it calls, as the macro is not designed + // to be used in `should_panic`) + #[test] + #[should_panic(expected = "==")] + fn assert_range_eq_can_fail_by_inequality() { + assert_range_eq!("abc", 0..2, "abc"); + } + + // Generates test cases for bad index operations. + // + // This generates `should_panic` test cases for Index/IndexMut + // and `None` test cases for get/get_mut. + macro_rules! panic_cases { + ($( + in mod $case_name:ident { + data: $data:expr; + + // optional: + // + // a similar input for which DATA[input] succeeds, and the corresponding + // output str. This helps validate "critical points" where an input range + // straddles the boundary between valid and invalid. + // (such as the input `len..len`, which is just barely valid) + $( + good: data[$good:expr] == $output:expr; + )* + + bad: data[$bad:expr]; + message: $expect_msg:expr; // must be a literal + } + )*) => {$( + mod $case_name { + #[test] + fn pass() { + let mut v: String = $data.into(); + + $( assert_range_eq!(v, $good, $output); )* + + { + let v: &str = &v; + assert_eq!(v.get($bad), None, "(in None assertion for get)"); + } + + { + let v: &mut str = &mut v; + assert_eq!(v.get_mut($bad), None, "(in None assertion for get_mut)"); + } + } + + #[test] + #[should_panic(expected = $expect_msg)] + fn index_fail() { + let v: String = $data.into(); + let v: &str = &v; + let _v = &v[$bad]; + } + + #[test] + #[should_panic(expected = $expect_msg)] + fn index_mut_fail() { + let mut v: String = $data.into(); + let v: &mut str = &mut v; + let _v = &mut v[$bad]; + } + } + )*}; + } + + #[test] + fn simple_ascii() { + assert_range_eq!("abc", .., "abc"); + + assert_range_eq!("abc", 0..2, "ab"); + assert_range_eq!("abc", 0..=1, "ab"); + assert_range_eq!("abc", ..2, "ab"); + assert_range_eq!("abc", ..=1, "ab"); + + assert_range_eq!("abc", 1..3, "bc"); + assert_range_eq!("abc", 1..=2, "bc"); + assert_range_eq!("abc", 1..1, ""); + assert_range_eq!("abc", 1..=0, ""); + } + + #[test] + fn simple_unicode() { + // 日本 + assert_range_eq!("\u{65e5}\u{672c}", .., "\u{65e5}\u{672c}"); + + assert_range_eq!("\u{65e5}\u{672c}", 0..3, "\u{65e5}"); + assert_range_eq!("\u{65e5}\u{672c}", 0..=2, "\u{65e5}"); + assert_range_eq!("\u{65e5}\u{672c}", ..3, "\u{65e5}"); + assert_range_eq!("\u{65e5}\u{672c}", ..=2, "\u{65e5}"); + + assert_range_eq!("\u{65e5}\u{672c}", 3..6, "\u{672c}"); + assert_range_eq!("\u{65e5}\u{672c}", 3..=5, "\u{672c}"); + assert_range_eq!("\u{65e5}\u{672c}", 3.., "\u{672c}"); + + let data = "ประเทศไทย中华"; + assert_range_eq!(data, 0..3, "ป"); + assert_range_eq!(data, 3..6, "ร"); + assert_range_eq!(data, 3..3, ""); + assert_range_eq!(data, 30..33, "华"); + + /*0: 中 + 3: 华 + 6: V + 7: i + 8: ệ + 11: t + 12: + 13: N + 14: a + 15: m */ + let ss = "中华Việt Nam"; + assert_range_eq!(ss, 3..6, "华"); + assert_range_eq!(ss, 6..16, "Việt Nam"); + assert_range_eq!(ss, 6..=15, "Việt Nam"); + assert_range_eq!(ss, 6.., "Việt Nam"); + + assert_range_eq!(ss, 0..3, "中"); + assert_range_eq!(ss, 3..7, "华V"); + assert_range_eq!(ss, 3..=6, "华V"); + assert_range_eq!(ss, 3..3, ""); + assert_range_eq!(ss, 3..=2, ""); + } + + #[test] + #[cfg(not(target_arch = "asmjs"))] // hits an OOM + fn simple_big() { + fn a_million_letter_x() -> String { + let mut i = 0; + let mut rs = String::new(); + while i < 100000 { + rs.push_str("华华华华华华华华华华"); + i += 1; + } + rs } - rs + fn half_a_million_letter_x() -> String { + let mut i = 0; + let mut rs = String::new(); + while i < 100000 { + rs.push_str("华华华华华"); + i += 1; + } + rs + } + let letters = a_million_letter_x(); + assert_range_eq!(letters, 0..3 * 500000, half_a_million_letter_x()); } - fn half_a_million_letter_x() -> String { - let mut i = 0; - let mut rs = String::new(); - while i < 100000 { - rs.push_str("华华华华华"); - i += 1; + + #[test] + #[should_panic] + fn test_slice_fail() { + &"中华Việt Nam"[0..2]; + } + + panic_cases! { + in mod rangefrom_len { + data: "abcdef"; + good: data[6..] == ""; + bad: data[7..]; + message: "out of bounds"; + } + + in mod rangeto_len { + data: "abcdef"; + good: data[..6] == "abcdef"; + bad: data[..7]; + message: "out of bounds"; + } + + in mod rangetoinclusive_len { + data: "abcdef"; + good: data[..=5] == "abcdef"; + bad: data[..=6]; + message: "out of bounds"; + } + + in mod range_len_len { + data: "abcdef"; + good: data[6..6] == ""; + bad: data[7..7]; + message: "out of bounds"; + } + + in mod rangeinclusive_len_len { + data: "abcdef"; + good: data[6..=5] == ""; + bad: data[7..=6]; + message: "out of bounds"; } - rs } - let letters = a_million_letter_x(); - assert_eq!(half_a_million_letter_x(), &letters[0..3 * 500000]); -} -#[test] -fn test_slice_2() { - let ss = "中华Việt Nam"; + panic_cases! { + in mod range_neg_width { + data: "abcdef"; + good: data[4..4] == ""; + bad: data[4..3]; + message: "begin <= end (4 <= 3)"; + } - assert_eq!("华", &ss[3..6]); - assert_eq!("Việt Nam", &ss[6..16]); + in mod rangeinclusive_neg_width { + data: "abcdef"; + good: data[4..=3] == ""; + bad: data[4..=2]; + message: "begin <= end (4 <= 3)"; + } + } - assert_eq!("ab", &"abc"[0..2]); - assert_eq!("bc", &"abc"[1..3]); - assert_eq!("", &"abc"[1..1]); + mod overflow { + panic_cases! { + in mod rangeinclusive { + data: "hello"; + // note: using 0 specifically ensures that the result of overflowing is 0..0, + // so that `get` doesn't simply return None for the wrong reason. + bad: data[0..=usize::max_value()]; + message: "maximum usize"; + } - assert_eq!("中", &ss[0..3]); - assert_eq!("华V", &ss[3..7]); - assert_eq!("", &ss[3..3]); - /*0: 中 - 3: 华 - 6: V - 7: i - 8: ệ - 11: t - 12: - 13: N - 14: a - 15: m */ -} + in mod rangetoinclusive { + data: "hello"; + bad: data[..=usize::max_value()]; + message: "maximum usize"; + } + } + } -#[test] -#[should_panic] -fn test_slice_fail() { - &"中华Việt Nam"[0..2]; + mod boundary { + const DATA: &'static str = "abcαβγ"; + + const BAD_START: usize = 4; + const GOOD_START: usize = 3; + const BAD_END: usize = 6; + const GOOD_END: usize = 7; + const BAD_END_INCL: usize = BAD_END - 1; + const GOOD_END_INCL: usize = GOOD_END - 1; + + // it is especially important to test all of the different range types here + // because some of the logic may be duplicated as part of micro-optimizations + // to dodge unicode boundary checks on half-ranges. + panic_cases! { + in mod range_1 { + data: super::DATA; + bad: data[super::BAD_START..super::GOOD_END]; + message: + "byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of"; + } + + in mod range_2 { + data: super::DATA; + bad: data[super::GOOD_START..super::BAD_END]; + message: + "byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of"; + } + + in mod rangefrom { + data: super::DATA; + bad: data[super::BAD_START..]; + message: + "byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of"; + } + + in mod rangeto { + data: super::DATA; + bad: data[..super::BAD_END]; + message: + "byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of"; + } + + in mod rangeinclusive_1 { + data: super::DATA; + bad: data[super::BAD_START..=super::GOOD_END_INCL]; + message: + "byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of"; + } + + in mod rangeinclusive_2 { + data: super::DATA; + bad: data[super::GOOD_START..=super::BAD_END_INCL]; + message: + "byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of"; + } + + in mod rangetoinclusive { + data: super::DATA; + bad: data[..=super::BAD_END_INCL]; + message: + "byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of"; + } + } + } + + const LOREM_PARAGRAPH: &'static str = "\ + Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem \ + sit amet dolor ultricies condimentum. Praesent iaculis purus elit, ac malesuada \ + quam malesuada in. Duis sed orci eros. Suspendisse sit amet magna mollis, mollis \ + nunc luctus, imperdiet mi. Integer fringilla non sem ut lacinia. Fusce varius \ + tortor a risus porttitor hendrerit. Morbi mauris dui, ultricies nec tempus vel, \ + gravida nec quam."; + + // check the panic includes the prefix of the sliced string + #[test] + #[should_panic(expected="byte index 1024 is out of bounds of `Lorem ipsum dolor sit amet")] + fn test_slice_fail_truncated_1() { + &LOREM_PARAGRAPH[..1024]; + } + // check the truncation in the panic message + #[test] + #[should_panic(expected="luctus, im`[...]")] + fn test_slice_fail_truncated_2() { + &LOREM_PARAGRAPH[..1024]; + } } #[test] -#[should_panic] -fn test_str_slice_rangetoinclusive_max_panics() { - &"hello"[..=usize::max_value()]; +fn test_str_slice_rangetoinclusive_ok() { + let s = "abcαβγ"; + assert_eq!(&s[..=2], "abc"); + assert_eq!(&s[..=4], "abcα"); } #[test] #[should_panic] -fn test_str_slice_rangeinclusive_max_panics() { - &"hello"[1..=usize::max_value()]; +fn test_str_slice_rangetoinclusive_notok() { + let s = "abcαβγ"; + &s[..=3]; } #[test] -#[should_panic] -fn test_str_slicemut_rangetoinclusive_max_panics() { - let mut s = "hello".to_owned(); +fn test_str_slicemut_rangetoinclusive_ok() { + let mut s = "abcαβγ".to_owned(); let s: &mut str = &mut s; - &mut s[..=usize::max_value()]; + assert_eq!(&mut s[..=2], "abc"); + assert_eq!(&mut s[..=4], "abcα"); } #[test] #[should_panic] -fn test_str_slicemut_rangeinclusive_max_panics() { - let mut s = "hello".to_owned(); +fn test_str_slicemut_rangetoinclusive_notok() { + let mut s = "abcαβγ".to_owned(); let s: &mut str = &mut s; - &mut s[1..=usize::max_value()]; -} - -#[test] -fn test_str_get_maxinclusive() { - let mut s = "hello".to_owned(); - { - let s: &str = &s; - assert_eq!(s.get(..=usize::max_value()), None); - assert_eq!(s.get(1..=usize::max_value()), None); - } - { - let s: &mut str = &mut s; - assert_eq!(s.get(..=usize::max_value()), None); - assert_eq!(s.get(1..=usize::max_value()), None); - } + &mut s[..=3]; } #[test] @@ -416,50 +725,6 @@ fn test_is_char_boundary() { } } } -const LOREM_PARAGRAPH: &'static str = "\ -Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem sit amet dolor \ -ultricies condimentum. Praesent iaculis purus elit, ac malesuada quam malesuada in. Duis sed orci \ -eros. Suspendisse sit amet magna mollis, mollis nunc luctus, imperdiet mi. Integer fringilla non \ -sem ut lacinia. Fusce varius tortor a risus porttitor hendrerit. Morbi mauris dui, ultricies nec \ -tempus vel, gravida nec quam."; - -// check the panic includes the prefix of the sliced string -#[test] -#[should_panic(expected="byte index 1024 is out of bounds of `Lorem ipsum dolor sit amet")] -fn test_slice_fail_truncated_1() { - &LOREM_PARAGRAPH[..1024]; -} -// check the truncation in the panic message -#[test] -#[should_panic(expected="luctus, im`[...]")] -fn test_slice_fail_truncated_2() { - &LOREM_PARAGRAPH[..1024]; -} - -#[test] -#[should_panic(expected="byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of")] -fn test_slice_fail_boundary_1() { - &"abcαβγ"[4..]; -} - -#[test] -#[should_panic(expected="byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of")] -fn test_slice_fail_boundary_2() { - &"abcαβγ"[2..6]; -} - -#[test] -fn test_slice_from() { - assert_eq!(&"abcd"[0..], "abcd"); - assert_eq!(&"abcd"[2..], "cd"); - assert_eq!(&"abcd"[4..], ""); -} -#[test] -fn test_slice_to() { - assert_eq!(&"abcd"[..0], ""); - assert_eq!(&"abcd"[..2], "ab"); - assert_eq!(&"abcd"[..4], "abcd"); -} #[test] fn test_trim_left_matches() { @@ -737,6 +1002,12 @@ fn test_escape_unicode() { #[test] fn test_escape_debug() { + // Note that there are subtleties with the number of backslashes + // on the left- and right-hand sides. In particular, Unicode code points + // are usually escaped with two backslashes on the right-hand side, as + // they are escaped. However, when the character is unescaped (e.g. for + // printable characters), only a single backslash appears (as the character + // itself appears in the debug string). assert_eq!("abc".escape_debug(), "abc"); assert_eq!("a c".escape_debug(), "a c"); assert_eq!("éèê".escape_debug(), "éèê"); @@ -747,6 +1018,7 @@ fn test_escape_debug() { assert_eq!("\u{10000}\u{10ffff}".escape_debug(), "\u{10000}\\u{10ffff}"); assert_eq!("ab\u{200b}".escape_debug(), "ab\\u{200b}"); assert_eq!("\u{10d4ea}\r".escape_debug(), "\\u{10d4ea}\\r"); + assert_eq!("\u{301}a\u{301}bé\u{e000}".escape_debug(), "\\u{301}a\u{301}bé\\u{e000}"); } #[test] @@ -1054,6 +1326,7 @@ fn test_str_default() { t::<&str>(); t::<String>(); + t::<&mut str>(); } #[test] @@ -1204,8 +1477,7 @@ fn test_rev_split_char_iterator_no_trailing() { #[test] fn test_utf16_code_units() { - use std_unicode::str::Utf16Encoder; - assert_eq!(Utf16Encoder::new(vec!['é', '\u{1F4A9}'].into_iter()).collect::<Vec<u16>>(), + assert_eq!("é\u{1F4A9}".encode_utf16().collect::<Vec<u16>>(), [0xE9, 0xD83D, 0xDCA9]) } diff --git a/src/liballoc/tests/string.rs b/src/liballoc/tests/string.rs index ef6f5e10a72..befb36baeef 100644 --- a/src/liballoc/tests/string.rs +++ b/src/liballoc/tests/string.rs @@ -9,6 +9,9 @@ // except according to those terms. use std::borrow::Cow; +use std::collections::CollectionAllocErr::*; +use std::mem::size_of; +use std::{usize, isize}; pub trait IntoCow<'a, B: ?Sized> where B: ToOwned { fn into_cow(self) -> Cow<'a, B>; @@ -129,7 +132,7 @@ fn test_from_utf16() { let s_as_utf16 = s.encode_utf16().collect::<Vec<u16>>(); let u_as_string = String::from_utf16(&u).unwrap(); - assert!(::std_unicode::char::decode_utf16(u.iter().cloned()).all(|r| r.is_ok())); + assert!(::core::char::decode_utf16(u.iter().cloned()).all(|r| r.is_ok())); assert_eq!(s_as_utf16, u); assert_eq!(u_as_string, s); @@ -440,53 +443,53 @@ fn test_drain() { } #[test] -fn test_splice() { +fn test_replace_range() { let mut s = "Hello, world!".to_owned(); - s.splice(7..12, "世界"); + s.replace_range(7..12, "世界"); assert_eq!(s, "Hello, 世界!"); } #[test] #[should_panic] -fn test_splice_char_boundary() { +fn test_replace_range_char_boundary() { let mut s = "Hello, 世界!".to_owned(); - s.splice(..8, ""); + s.replace_range(..8, ""); } #[test] -fn test_splice_inclusive_range() { +fn test_replace_range_inclusive_range() { let mut v = String::from("12345"); - v.splice(2..=3, "789"); + v.replace_range(2..=3, "789"); assert_eq!(v, "127895"); - v.splice(1..=2, "A"); + v.replace_range(1..=2, "A"); assert_eq!(v, "1A895"); } #[test] #[should_panic] -fn test_splice_out_of_bounds() { +fn test_replace_range_out_of_bounds() { let mut s = String::from("12345"); - s.splice(5..6, "789"); + s.replace_range(5..6, "789"); } #[test] #[should_panic] -fn test_splice_inclusive_out_of_bounds() { +fn test_replace_range_inclusive_out_of_bounds() { let mut s = String::from("12345"); - s.splice(5..=5, "789"); + s.replace_range(5..=5, "789"); } #[test] -fn test_splice_empty() { +fn test_replace_range_empty() { let mut s = String::from("12345"); - s.splice(1..2, ""); + s.replace_range(1..2, ""); assert_eq!(s, "1345"); } #[test] -fn test_splice_unbounded() { +fn test_replace_range_unbounded() { let mut s = String::from("12345"); - s.splice(.., ""); + s.replace_range(.., ""); assert_eq!(s, ""); } @@ -504,3 +507,163 @@ fn test_into_boxed_str() { let ys = xs.into_boxed_str(); assert_eq!(&*ys, "hello my name is bob"); } + +#[test] +fn test_reserve_exact() { + // This is all the same as test_reserve + + let mut s = String::new(); + assert_eq!(s.capacity(), 0); + + s.reserve_exact(2); + assert!(s.capacity() >= 2); + + for _i in 0..16 { + s.push('0'); + } + + assert!(s.capacity() >= 16); + s.reserve_exact(16); + assert!(s.capacity() >= 32); + + s.push('0'); + + s.reserve_exact(16); + assert!(s.capacity() >= 33) +} + +#[test] +fn test_try_reserve() { + + // These are the interesting cases: + // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM) + // * > isize::MAX should always fail + // * On 16/32-bit should CapacityOverflow + // * On 64-bit should OOM + // * overflow may trigger when adding `len` to `cap` (in number of elements) + // * overflow may trigger when multiplying `new_cap` by size_of::<T> (to get bytes) + + const MAX_CAP: usize = isize::MAX as usize; + const MAX_USIZE: usize = usize::MAX; + + // On 16/32-bit, we check that allocations don't exceed isize::MAX, + // on 64-bit, we assume the OS will give an OOM for such a ridiculous size. + // Any platform that succeeds for these requests is technically broken with + // ptr::offset because LLVM is the worst. + let guards_against_isize = size_of::<usize>() < 8; + + { + // Note: basic stuff is checked by test_reserve + let mut empty_string: String = String::new(); + + // Check isize::MAX doesn't count as an overflow + if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + // Play it again, frank! (just to be sure) + if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + // Check isize::MAX + 1 does count as overflow + if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + // Check usize::MAX does count as overflow + if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + // Check isize::MAX + 1 is an OOM + if let Err(AllocErr) = empty_string.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + + // Check usize::MAX is an OOM + if let Err(AllocErr) = empty_string.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an OOM!") } + } + } + + + { + // Same basic idea, but with non-zero len + let mut ten_bytes: String = String::from("0123456789"); + + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should always overflow in the add-to-len + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + +} + +#[test] +fn test_try_reserve_exact() { + + // This is exactly the same as test_try_reserve with the method changed. + // See that test for comments. + + const MAX_CAP: usize = isize::MAX as usize; + const MAX_USIZE: usize = usize::MAX; + + let guards_against_isize = size_of::<usize>() < 8; + + { + let mut empty_string: String = String::new(); + + if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + if let Err(AllocErr) = empty_string.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + + if let Err(AllocErr) = empty_string.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an OOM!") } + } + } + + + { + let mut ten_bytes: String = String::from("0123456789"); + + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + +} diff --git a/src/liballoc/tests/vec.rs b/src/liballoc/tests/vec.rs index 9cfde5dcc73..e329b45a617 100644 --- a/src/liballoc/tests/vec.rs +++ b/src/liballoc/tests/vec.rs @@ -10,8 +10,9 @@ use std::borrow::Cow; use std::mem::size_of; -use std::panic; +use std::{usize, isize}; use std::vec::{Drain, IntoIter}; +use std::collections::CollectionAllocErr::*; struct DropCounter<'a> { count: &'a mut u32, @@ -753,24 +754,6 @@ fn assert_covariance() { } #[test] -fn test_placement() { - let mut vec = vec![1]; - assert_eq!(vec.place_back() <- 2, &2); - assert_eq!(vec.len(), 2); - assert_eq!(vec.place_back() <- 3, &3); - assert_eq!(vec.len(), 3); - assert_eq!(&vec, &[1, 2, 3]); -} - -#[test] -fn test_placement_panic() { - let mut vec = vec![1, 2, 3]; - fn mkpanic() -> usize { panic!() } - let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| { vec.place_back() <- mkpanic(); })); - assert_eq!(vec.len(), 3); -} - -#[test] fn from_into_inner() { let vec = vec![1, 2, 3]; let ptr = vec.as_ptr(); @@ -965,3 +948,209 @@ fn drain_filter_complex() { assert_eq!(vec, vec![1, 3, 5, 7, 9, 11, 13, 15, 17, 19]); } } + +#[test] +fn test_reserve_exact() { + // This is all the same as test_reserve + + let mut v = Vec::new(); + assert_eq!(v.capacity(), 0); + + v.reserve_exact(2); + assert!(v.capacity() >= 2); + + for i in 0..16 { + v.push(i); + } + + assert!(v.capacity() >= 16); + v.reserve_exact(16); + assert!(v.capacity() >= 32); + + v.push(16); + + v.reserve_exact(16); + assert!(v.capacity() >= 33) +} + +#[test] +fn test_try_reserve() { + + // These are the interesting cases: + // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM) + // * > isize::MAX should always fail + // * On 16/32-bit should CapacityOverflow + // * On 64-bit should OOM + // * overflow may trigger when adding `len` to `cap` (in number of elements) + // * overflow may trigger when multiplying `new_cap` by size_of::<T> (to get bytes) + + const MAX_CAP: usize = isize::MAX as usize; + const MAX_USIZE: usize = usize::MAX; + + // On 16/32-bit, we check that allocations don't exceed isize::MAX, + // on 64-bit, we assume the OS will give an OOM for such a ridiculous size. + // Any platform that succeeds for these requests is technically broken with + // ptr::offset because LLVM is the worst. + let guards_against_isize = size_of::<usize>() < 8; + + { + // Note: basic stuff is checked by test_reserve + let mut empty_bytes: Vec<u8> = Vec::new(); + + // Check isize::MAX doesn't count as an overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + // Play it again, frank! (just to be sure) + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + // Check isize::MAX + 1 does count as overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + // Check usize::MAX does count as overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + // Check isize::MAX + 1 is an OOM + if let Err(AllocErr) = empty_bytes.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + + // Check usize::MAX is an OOM + if let Err(AllocErr) = empty_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an OOM!") } + } + } + + + { + // Same basic idea, but with non-zero len + let mut ten_bytes: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should always overflow in the add-to-len + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + + + { + // Same basic idea, but with interesting type size + let mut ten_u32s: Vec<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_u32s.try_reserve(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should fail in the mul-by-size + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20) { + } else { + panic!("usize::MAX should trigger an overflow!"); + } + } + +} + +#[test] +fn test_try_reserve_exact() { + + // This is exactly the same as test_try_reserve with the method changed. + // See that test for comments. + + const MAX_CAP: usize = isize::MAX as usize; + const MAX_USIZE: usize = usize::MAX; + + let guards_against_isize = size_of::<usize>() < 8; + + { + let mut empty_bytes: Vec<u8> = Vec::new(); + + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + if let Err(AllocErr) = empty_bytes.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + + if let Err(AllocErr) = empty_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an OOM!") } + } + } + + + { + let mut ten_bytes: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + + + { + let mut ten_u32s: Vec<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_USIZE - 20) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + +} diff --git a/src/liballoc/tests/vec_deque.rs b/src/liballoc/tests/vec_deque.rs index f2935c05d4f..3ea6c87a651 100644 --- a/src/liballoc/tests/vec_deque.rs +++ b/src/liballoc/tests/vec_deque.rs @@ -11,6 +11,9 @@ use std::collections::VecDeque; use std::fmt::Debug; use std::collections::vec_deque::{Drain}; +use std::collections::CollectionAllocErr::*; +use std::mem::size_of; +use std::{usize, isize}; use self::Taggy::*; use self::Taggypar::*; @@ -926,6 +929,107 @@ fn test_append() { } #[test] +fn test_append_permutations() { + fn construct_vec_deque( + push_back: usize, + pop_back: usize, + push_front: usize, + pop_front: usize, + ) -> VecDeque<usize> { + let mut out = VecDeque::new(); + for a in 0..push_back { + out.push_back(a); + } + for b in 0..push_front { + out.push_front(push_back + b); + } + for _ in 0..pop_back { + out.pop_back(); + } + for _ in 0..pop_front { + out.pop_front(); + } + out + } + + const MAX: usize = 5; + + // Many different permutations of both the `VecDeque` getting appended to + // and the one getting appended are generated to check `append`. + // This ensures all 6 code paths of `append` are tested. + for src_push_back in 0..MAX { + for src_push_front in 0..MAX { + // doesn't pop more values than are pushed + for src_pop_back in 0..(src_push_back + src_push_front) { + for src_pop_front in 0..(src_push_back + src_push_front - src_pop_back) { + + let src = construct_vec_deque( + src_push_back, + src_pop_back, + src_push_front, + src_pop_front, + ); + + for dst_push_back in 0..MAX { + for dst_push_front in 0..MAX { + for dst_pop_back in 0..(dst_push_back + dst_push_front) { + for dst_pop_front + in 0..(dst_push_back + dst_push_front - dst_pop_back) + { + let mut dst = construct_vec_deque( + dst_push_back, + dst_pop_back, + dst_push_front, + dst_pop_front, + ); + let mut src = src.clone(); + + // Assert that appending `src` to `dst` gives the same order + // of values as iterating over both in sequence. + let correct = dst + .iter() + .chain(src.iter()) + .cloned() + .collect::<Vec<usize>>(); + dst.append(&mut src); + assert_eq!(dst, correct); + assert!(src.is_empty()); + } + } + } + } + } + } + } + } +} + +struct DropCounter<'a> { + count: &'a mut u32, +} + +impl<'a> Drop for DropCounter<'a> { + fn drop(&mut self) { + *self.count += 1; + } +} + +#[test] +fn test_append_double_drop() { + let (mut count_a, mut count_b) = (0, 0); + { + let mut a = VecDeque::new(); + let mut b = VecDeque::new(); + a.push_back(DropCounter { count: &mut count_a }); + b.push_back(DropCounter { count: &mut count_b }); + + a.append(&mut b); + } + assert_eq!(count_a, 1); + assert_eq!(count_b, 1); +} + +#[test] fn test_retain() { let mut buf = VecDeque::new(); buf.extend(1..5); @@ -1002,23 +1106,206 @@ fn test_is_empty() { } #[test] -fn test_placement_in() { - let mut buf: VecDeque<isize> = VecDeque::new(); - buf.place_back() <- 1; - buf.place_back() <- 2; - assert_eq!(buf, [1,2]); +fn test_reserve_exact_2() { + // This is all the same as test_reserve + + let mut v = VecDeque::new(); + + v.reserve_exact(2); + assert!(v.capacity() >= 2); + + for i in 0..16 { + v.push_back(i); + } + + assert!(v.capacity() >= 16); + v.reserve_exact(16); + assert!(v.capacity() >= 32); + + v.push_back(16); + + v.reserve_exact(16); + assert!(v.capacity() >= 48) +} + +#[test] +fn test_try_reserve() { + + // These are the interesting cases: + // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM) + // * > isize::MAX should always fail + // * On 16/32-bit should CapacityOverflow + // * On 64-bit should OOM + // * overflow may trigger when adding `len` to `cap` (in number of elements) + // * overflow may trigger when multiplying `new_cap` by size_of::<T> (to get bytes) + + const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1; + const MAX_USIZE: usize = usize::MAX; + + // On 16/32-bit, we check that allocations don't exceed isize::MAX, + // on 64-bit, we assume the OS will give an OOM for such a ridiculous size. + // Any platform that succeeds for these requests is technically broken with + // ptr::offset because LLVM is the worst. + let guards_against_isize = size_of::<usize>() < 8; + + { + // Note: basic stuff is checked by test_reserve + let mut empty_bytes: VecDeque<u8> = VecDeque::new(); + + // Check isize::MAX doesn't count as an overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + // Play it again, frank! (just to be sure) + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + // Check isize::MAX + 1 does count as overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + // Check usize::MAX does count as overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + // Check isize::MAX is an OOM + // VecDeque starts with capacity 7, always adds 1 to the capacity + // and also rounds the number to next power of 2 so this is the + // furthest we can go without triggering CapacityOverflow + if let Err(AllocErr) = empty_bytes.try_reserve(MAX_CAP) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + } + + + { + // Same basic idea, but with non-zero len + let mut ten_bytes: VecDeque<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should always overflow in the add-to-len + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + + + { + // Same basic idea, but with interesting type size + let mut ten_u32s: VecDeque<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_u32s.try_reserve(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should fail in the mul-by-size + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20) { + } else { + panic!("usize::MAX should trigger an overflow!"); + } + } + +} + +#[test] +fn test_try_reserve_exact() { + + // This is exactly the same as test_try_reserve with the method changed. + // See that test for comments. + + const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1; + const MAX_USIZE: usize = usize::MAX; + + let guards_against_isize = size_of::<usize>() < 8; + + { + let mut empty_bytes: VecDeque<u8> = VecDeque::new(); + + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + // Check isize::MAX is an OOM + // VecDeque starts with capacity 7, always adds 1 to the capacity + // and also rounds the number to next power of 2 so this is the + // furthest we can go without triggering CapacityOverflow + if let Err(AllocErr) = empty_bytes.try_reserve_exact(MAX_CAP) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + } - buf.place_front() <- 3; - buf.place_front() <- 4; - assert_eq!(buf, [4,3,1,2]); { - let ptr_head = buf.place_front() <- 5; - assert_eq!(*ptr_head, 5); + let mut ten_bytes: VecDeque<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } } + + { - let ptr_tail = buf.place_back() <- 6; - assert_eq!(*ptr_tail, 6); + let mut ten_u32s: VecDeque<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_USIZE - 20) { + } else { panic!("usize::MAX should trigger an overflow!") } } - assert_eq!(buf, [5,4,3,1,2,6]); + } diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index 301e44632b8..cc913dfbb4b 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -66,27 +66,25 @@ #![stable(feature = "rust1", since = "1.0.0")] -use core::cmp::Ordering; +use core::cmp::{self, Ordering}; use core::fmt; use core::hash::{self, Hash}; use core::intrinsics::{arith_offset, assume}; use core::iter::{FromIterator, FusedIterator, TrustedLen}; use core::marker::PhantomData; use core::mem; -#[cfg(not(test))] -use core::num::Float; -use core::ops::{InPlace, Index, IndexMut, Place, Placer}; +use core::ops::Bound::{Excluded, Included, Unbounded}; +use core::ops::{Index, IndexMut, RangeBounds}; use core::ops; use core::ptr; -use core::ptr::Shared; +use core::ptr::NonNull; use core::slice; +use collections::CollectionAllocErr; use borrow::ToOwned; use borrow::Cow; use boxed::Box; use raw_vec::RawVec; -use super::range::RangeArgument; -use Bound::{Excluded, Included, Unbounded}; /// A contiguous growable array type, written `Vec<T>` but pronounced 'vector'. /// @@ -231,9 +229,9 @@ use Bound::{Excluded, Included, Unbounded}; /// /// If a `Vec` *has* allocated memory, then the memory it points to is on the heap /// (as defined by the allocator Rust is configured to use by default), and its -/// pointer points to [`len`] initialized elements in order (what you would see -/// if you coerced it to a slice), followed by [`capacity`]` - `[`len`] -/// logically uninitialized elements. +/// pointer points to [`len`] initialized, contiguous elements in order (what +/// you would see if you coerced it to a slice), followed by [`capacity`]` - +/// `[`len`] logically uninitialized, contiguous elements. /// /// `Vec` will never perform a "small optimization" where elements are actually /// stored on the stack for two reasons: @@ -281,8 +279,8 @@ use Bound::{Excluded, Included, Unbounded}; /// not break, however: using `unsafe` code to write to the excess capacity, /// and then increasing the length to match, is always valid. /// -/// `Vec` does not currently guarantee the order in which elements are dropped -/// (the order has changed in the past, and may change again). +/// `Vec` does not currently guarantee the order in which elements are dropped. +/// The order has changed in the past and may change again. /// /// [`vec!`]: ../../std/macro.vec.html /// [`Index`]: ../../std/ops/trait.Index.html @@ -321,7 +319,8 @@ impl<T> Vec<T> { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn new() -> Vec<T> { + #[rustc_const_unstable(feature = "const_vec_new")] + pub const fn new() -> Vec<T> { Vec { buf: RawVec::new(), len: 0, @@ -333,9 +332,10 @@ impl<T> Vec<T> { /// The vector will be able to hold exactly `capacity` elements without /// reallocating. If `capacity` is 0, the vector will not allocate. /// - /// It is important to note that this function does not specify the *length* - /// of the returned vector, but only the *capacity*. For an explanation of - /// the difference between length and capacity, see *[Capacity and reallocation]*. + /// It is important to note that although the returned vector has the + /// *capacity* specified, the vector will have a zero *length*. For an + /// explanation of the difference between length and capacity, see + /// *[Capacity and reallocation]*. /// /// [Capacity and reallocation]: #capacity-and-reallocation /// @@ -489,6 +489,83 @@ impl<T> Vec<T> { self.buf.reserve_exact(self.len, additional); } + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `Vec<T>`. The collection may reserve more space to avoid + /// frequent reallocations. After calling `reserve`, capacity will be + /// greater than or equal to `self.len() + additional`. Does nothing if + /// capacity is already sufficient. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// + /// fn process_data(data: &[u32]) -> Result<Vec<u32>, CollectionAllocErr> { + /// let mut output = Vec::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.extend(data.iter().map(|&val| { + /// val * 2 + 5 // very complicated + /// })); + /// + /// Ok(output) + /// } + /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.buf.try_reserve(self.len, additional) + } + + /// Tries to reserves the minimum capacity for exactly `additional` more elements to + /// be inserted in the given `Vec<T>`. After calling `reserve_exact`, + /// capacity will be greater than or equal to `self.len() + additional`. + /// Does nothing if the capacity is already sufficient. + /// + /// Note that the allocator may give the collection more space than it + /// requests. Therefore capacity can not be relied upon to be precisely + /// minimal. Prefer `reserve` if future insertions are expected. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// + /// fn process_data(data: &[u32]) -> Result<Vec<u32>, CollectionAllocErr> { + /// let mut output = Vec::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.extend(data.iter().map(|&val| { + /// val * 2 + 5 // very complicated + /// })); + /// + /// Ok(output) + /// } + /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.buf.try_reserve_exact(self.len, additional) + } + /// Shrinks the capacity of the vector as much as possible. /// /// It will drop down as close as possible to the length but the allocator @@ -505,7 +582,34 @@ impl<T> Vec<T> { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn shrink_to_fit(&mut self) { - self.buf.shrink_to_fit(self.len); + if self.capacity() != self.len { + self.buf.shrink_to_fit(self.len); + } + } + + /// Shrinks the capacity of the vector with a lower bound. + /// + /// The capacity will remain at least as large as both the length + /// and the supplied value. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// #![feature(shrink_to)] + /// let mut vec = Vec::with_capacity(10); + /// vec.extend([1, 2, 3].iter().cloned()); + /// assert_eq!(vec.capacity(), 10); + /// vec.shrink_to(4); + /// assert!(vec.capacity() >= 4); + /// vec.shrink_to(0); + /// assert!(vec.capacity() >= 3); + /// ``` + #[unstable(feature = "shrink_to", reason = "new API", issue="0")] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.buf.shrink_to_fit(cmp::max(self.len, min_capacity)); } /// Converts the vector into [`Box<[T]>`][owned slice]. @@ -586,14 +690,20 @@ impl<T> Vec<T> { /// [`drain`]: #method.drain #[stable(feature = "rust1", since = "1.0.0")] pub fn truncate(&mut self, len: usize) { + let current_len = self.len; unsafe { + let mut ptr = self.as_mut_ptr().offset(self.len as isize); + // Set the final length at the end, keeping in mind that + // dropping an element might panic. Works around a missed + // optimization, as seen in the following issue: + // https://github.com/rust-lang/rust/issues/51802 + let mut local_len = SetLenOnDrop::new(&mut self.len); + // drop any extra elements - while len < self.len { - // decrement len before the drop_in_place(), so a panic on Drop - // doesn't re-drop the just-failed value. - self.len -= 1; - let len = self.len; - ptr::drop_in_place(self.get_unchecked_mut(len)); + for _ in len..current_len { + local_len.decrement_len(1); + ptr = ptr.offset(-1); + ptr::drop_in_place(ptr); } } } @@ -705,9 +815,15 @@ impl<T> Vec<T> { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn swap_remove(&mut self, index: usize) -> T { - let length = self.len(); - self.swap(index, length - 1); - self.pop().unwrap() + unsafe { + // We replace self[index] with the last element. Note that if the + // bounds check on hole succeeds there must be a last element (which + // can be self[index] itself). + let hole: *mut T = &mut self[index]; + let last = ptr::read(self.get_unchecked(self.len - 1)); + self.len -= 1; + ptr::replace(hole, last) + } } /// Inserts an element at position `index` within the vector, shifting all @@ -733,7 +849,7 @@ impl<T> Vec<T> { // space for the new element if len == self.buf.cap() { - self.buf.double(); + self.reserve(1); } unsafe { @@ -805,22 +921,7 @@ impl<T> Vec<T> { pub fn retain<F>(&mut self, mut f: F) where F: FnMut(&T) -> bool { - let len = self.len(); - let mut del = 0; - { - let v = &mut **self; - - for i in 0..len { - if !f(&v[i]) { - del += 1; - } else if del > 0 { - v.swap(i - del, i); - } - } - } - if del > 0 { - self.truncate(len - del); - } + self.drain_filter(|x| !f(x)); } /// Removes all but the first of consecutive elements in the vector that resolve to the same @@ -968,7 +1069,7 @@ impl<T> Vec<T> { // This will panic or abort if we would allocate > isize::MAX bytes // or if the length increment would overflow for zero-sized types. if self.len == self.buf.cap() { - self.buf.double(); + self.reserve(1); } unsafe { let end = self.as_mut_ptr().offset(self.len as isize); @@ -977,29 +1078,6 @@ impl<T> Vec<T> { } } - /// Returns a place for insertion at the back of the `Vec`. - /// - /// Using this method with placement syntax is equivalent to [`push`](#method.push), - /// but may be more efficient. - /// - /// # Examples - /// - /// ``` - /// #![feature(collection_placement)] - /// #![feature(placement_in_syntax)] - /// - /// let mut vec = vec![1, 2]; - /// vec.place_back() <- 3; - /// vec.place_back() <- 4; - /// assert_eq!(&vec, &[1, 2, 3, 4]); - /// ``` - #[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] - pub fn place_back(&mut self) -> PlaceBack<T> { - PlaceBack { vec: self } - } - /// Removes the last element from a vector and returns it, or [`None`] if it /// is empty. /// @@ -1087,7 +1165,7 @@ impl<T> Vec<T> { /// ``` #[stable(feature = "drain", since = "1.6.0")] pub fn drain<R>(&mut self, range: R) -> Drain<T> - where R: RangeArgument<usize> + where R: RangeBounds<usize> { // Memory safety // @@ -1100,12 +1178,12 @@ impl<T> Vec<T> { // the hole, and the vector length is restored to the new length. // let len = self.len(); - let start = match range.start() { + let start = match range.start_bound() { Included(&n) => n, Excluded(&n) => n + 1, Unbounded => 0, }; - let end = match range.end() { + let end = match range.end_bound() { Included(&n) => n + 1, Excluded(&n) => n, Unbounded => len, @@ -1124,7 +1202,7 @@ impl<T> Vec<T> { tail_start: end, tail_len: len - end, iter: range_slice.iter(), - vec: Shared::from(self), + vec: NonNull::from(self), } } } @@ -1218,6 +1296,49 @@ impl<T> Vec<T> { } other } + + /// Resizes the `Vec` in-place so that `len` is equal to `new_len`. + /// + /// If `new_len` is greater than `len`, the `Vec` is extended by the + /// difference, with each additional slot filled with the result of + /// calling the closure `f`. The return values from `f` will end up + /// in the `Vec` in the order they have been generated. + /// + /// If `new_len` is less than `len`, the `Vec` is simply truncated. + /// + /// This method uses a closure to create new values on every push. If + /// you'd rather [`Clone`] a given value, use [`resize`]. If you want + /// to use the [`Default`] trait to generate values, you can pass + /// [`Default::default()`] as the second argument.. + /// + /// # Examples + /// + /// ``` + /// #![feature(vec_resize_with)] + /// + /// let mut vec = vec![1, 2, 3]; + /// vec.resize_with(5, Default::default); + /// assert_eq!(vec, [1, 2, 3, 0, 0]); + /// + /// let mut vec = vec![]; + /// let mut p = 1; + /// vec.resize_with(4, || { p *= 2; p }); + /// assert_eq!(vec, [2, 4, 8, 16]); + /// ``` + /// + /// [`resize`]: #method.resize + /// [`Clone`]: ../../std/clone/trait.Clone.html + #[unstable(feature = "vec_resize_with", issue = "41758")] + pub fn resize_with<F>(&mut self, new_len: usize, f: F) + where F: FnMut() -> T + { + let len = self.len(); + if new_len > len { + self.extend_with(new_len - len, ExtendFunc(f)); + } else { + self.truncate(new_len); + } + } } impl<T: Clone> Vec<T> { @@ -1227,8 +1348,9 @@ impl<T: Clone> Vec<T> { /// difference, with each additional slot filled with `value`. /// If `new_len` is less than `len`, the `Vec` is simply truncated. /// - /// This method requires `Clone` to clone the passed value. If you'd - /// rather create a value with `Default` instead, see [`resize_default`]. + /// This method requires [`Clone`] to be able clone the passed value. If + /// you need more flexibility (or want to rely on [`Default`] instead of + /// [`Clone`]), use [`resize_with`]. /// /// # Examples /// @@ -1242,7 +1364,9 @@ impl<T: Clone> Vec<T> { /// assert_eq!(vec, [1, 2]); /// ``` /// - /// [`resize_default`]: #method.resize_default + /// [`Clone`]: ../../std/clone/trait.Clone.html + /// [`Default`]: ../../std/default/trait.Default.html + /// [`resize_with`]: #method.resize_with #[stable(feature = "vec_resize", since = "1.5.0")] pub fn resize(&mut self, new_len: usize, value: T) { let len = self.len(); @@ -1259,7 +1383,7 @@ impl<T: Clone> Vec<T> { /// Iterates over the slice `other`, clones each element, and then appends /// it to this `Vec`. The `other` vector is traversed in-order. /// - /// Note that this function is same as `extend` except that it is + /// Note that this function is same as [`extend`] except that it is /// specialized to work with slices instead. If and when Rust gets /// specialization this function will likely be deprecated (but still /// available). @@ -1271,6 +1395,8 @@ impl<T: Clone> Vec<T> { /// vec.extend_from_slice(&[2, 3, 4]); /// assert_eq!(vec, [1, 2, 3, 4]); /// ``` + /// + /// [`extend`]: #method.extend #[stable(feature = "vec_extend_from_slice", since = "1.6.0")] pub fn extend_from_slice(&mut self, other: &[T]) { self.spec_extend(other.iter()) @@ -1281,12 +1407,11 @@ impl<T: Default> Vec<T> { /// Resizes the `Vec` in-place so that `len` is equal to `new_len`. /// /// If `new_len` is greater than `len`, the `Vec` is extended by the - /// difference, with each additional slot filled with `Default::default()`. + /// difference, with each additional slot filled with [`Default::default()`]. /// If `new_len` is less than `len`, the `Vec` is simply truncated. /// - /// This method uses `Default` to create new values on every push. If - /// you'd rather `Clone` a given value, use [`resize`]. - /// + /// This method uses [`Default`] to create new values on every push. If + /// you'd rather [`Clone`] a given value, use [`resize`]. /// /// # Examples /// @@ -1303,6 +1428,9 @@ impl<T: Default> Vec<T> { /// ``` /// /// [`resize`]: #method.resize + /// [`Default::default()`]: ../../std/default/trait.Default.html#tymethod.default + /// [`Default`]: ../../std/default/trait.Default.html + /// [`Clone`]: ../../std/clone/trait.Clone.html #[unstable(feature = "vec_resize_default", issue = "41758")] pub fn resize_default(&mut self, new_len: usize) { let len = self.len(); @@ -1317,24 +1445,31 @@ impl<T: Default> Vec<T> { // This code generalises `extend_with_{element,default}`. trait ExtendWith<T> { - fn next(&self) -> T; + fn next(&mut self) -> T; fn last(self) -> T; } struct ExtendElement<T>(T); impl<T: Clone> ExtendWith<T> for ExtendElement<T> { - fn next(&self) -> T { self.0.clone() } + fn next(&mut self) -> T { self.0.clone() } fn last(self) -> T { self.0 } } struct ExtendDefault; impl<T: Default> ExtendWith<T> for ExtendDefault { - fn next(&self) -> T { Default::default() } + fn next(&mut self) -> T { Default::default() } fn last(self) -> T { Default::default() } } + +struct ExtendFunc<F>(F); +impl<T, F: FnMut() -> T> ExtendWith<T> for ExtendFunc<F> { + fn next(&mut self) -> T { (self.0)() } + fn last(mut self) -> T { (self.0)() } +} + impl<T> Vec<T> { /// Extend the vector by `n` values, using the given generator. - fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, value: E) { + fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) { self.reserve(n); unsafe { @@ -1383,6 +1518,11 @@ impl<'a> SetLenOnDrop<'a> { fn increment_len(&mut self, increment: usize) { self.local_len += increment; } + + #[inline] + fn decrement_len(&mut self, decrement: usize) { + self.local_len -= decrement; + } } impl<'a> Drop for SetLenOnDrop<'a> { @@ -1472,40 +1612,69 @@ impl SpecFromElem for u8 { } } -macro_rules! impl_spec_from_elem { +impl<T: Clone + IsZero> SpecFromElem for T { + #[inline] + fn from_elem(elem: T, n: usize) -> Vec<T> { + if elem.is_zero() { + return Vec { + buf: RawVec::with_capacity_zeroed(n), + len: n, + } + } + let mut v = Vec::with_capacity(n); + v.extend_with(n, ExtendElement(elem)); + v + } +} + +unsafe trait IsZero { + /// Whether this value is zero + fn is_zero(&self) -> bool; +} + +macro_rules! impl_is_zero { ($t: ty, $is_zero: expr) => { - impl SpecFromElem for $t { + unsafe impl IsZero for $t { #[inline] - fn from_elem(elem: $t, n: usize) -> Vec<$t> { - if $is_zero(elem) { - return Vec { - buf: RawVec::with_capacity_zeroed(n), - len: n, - } - } - let mut v = Vec::with_capacity(n); - v.extend_with(n, ExtendElement(elem)); - v + fn is_zero(&self) -> bool { + $is_zero(*self) } } - }; + } } -impl_spec_from_elem!(i8, |x| x == 0); -impl_spec_from_elem!(i16, |x| x == 0); -impl_spec_from_elem!(i32, |x| x == 0); -impl_spec_from_elem!(i64, |x| x == 0); -impl_spec_from_elem!(i128, |x| x == 0); -impl_spec_from_elem!(isize, |x| x == 0); +impl_is_zero!(i8, |x| x == 0); +impl_is_zero!(i16, |x| x == 0); +impl_is_zero!(i32, |x| x == 0); +impl_is_zero!(i64, |x| x == 0); +impl_is_zero!(i128, |x| x == 0); +impl_is_zero!(isize, |x| x == 0); + +impl_is_zero!(u16, |x| x == 0); +impl_is_zero!(u32, |x| x == 0); +impl_is_zero!(u64, |x| x == 0); +impl_is_zero!(u128, |x| x == 0); +impl_is_zero!(usize, |x| x == 0); + +impl_is_zero!(char, |x| x == '\0'); -impl_spec_from_elem!(u16, |x| x == 0); -impl_spec_from_elem!(u32, |x| x == 0); -impl_spec_from_elem!(u64, |x| x == 0); -impl_spec_from_elem!(u128, |x| x == 0); -impl_spec_from_elem!(usize, |x| x == 0); +impl_is_zero!(f32, |x: f32| x.to_bits() == 0); +impl_is_zero!(f64, |x: f64| x.to_bits() == 0); + +unsafe impl<T: ?Sized> IsZero for *const T { + #[inline] + fn is_zero(&self) -> bool { + (*self).is_null() + } +} + +unsafe impl<T: ?Sized> IsZero for *mut T { + #[inline] + fn is_zero(&self) -> bool { + (*self).is_null() + } +} -impl_spec_from_elem!(f32, |x: f32| x == 0. && x.is_sign_positive()); -impl_spec_from_elem!(f64, |x: f64| x == 0. && x.is_sign_positive()); //////////////////////////////////////////////////////////////////////////////// // Common trait implementations for Vec @@ -1541,143 +1710,33 @@ impl<T: Hash> Hash for Vec<T> { } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> Index<usize> for Vec<T> { - type Output = T; - - #[inline] - fn index(&self, index: usize) -> &T { - // NB built-in indexing via `&[T]` - &(**self)[index] - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> IndexMut<usize> for Vec<T> { - #[inline] - fn index_mut(&mut self, index: usize) -> &mut T { - // NB built-in indexing via `&mut [T]` - &mut (**self)[index] - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::Index<ops::Range<usize>> for Vec<T> { - type Output = [T]; - - #[inline] - fn index(&self, index: ops::Range<usize>) -> &[T] { - Index::index(&**self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::Index<ops::RangeTo<usize>> for Vec<T> { - type Output = [T]; - - #[inline] - fn index(&self, index: ops::RangeTo<usize>) -> &[T] { - Index::index(&**self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::Index<ops::RangeFrom<usize>> for Vec<T> { - type Output = [T]; - - #[inline] - fn index(&self, index: ops::RangeFrom<usize>) -> &[T] { - Index::index(&**self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::Index<ops::RangeFull> for Vec<T> { - type Output = [T]; - - #[inline] - fn index(&self, _index: ops::RangeFull) -> &[T] { - self - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::Index<ops::RangeInclusive<usize>> for Vec<T> { - type Output = [T]; - - #[inline] - fn index(&self, index: ops::RangeInclusive<usize>) -> &[T] { - Index::index(&**self, index) - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::Index<ops::RangeToInclusive<usize>> for Vec<T> { - type Output = [T]; +#[rustc_on_unimplemented( + message="vector indices are of type `usize` or ranges of `usize`", + label="vector indices are of type `usize` or ranges of `usize`", +)] +impl<T, I> Index<I> for Vec<T> +where + I: ::core::slice::SliceIndex<[T]>, +{ + type Output = I::Output; #[inline] - fn index(&self, index: ops::RangeToInclusive<usize>) -> &[T] { + fn index(&self, index: I) -> &Self::Output { Index::index(&**self, index) } } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::IndexMut<ops::Range<usize>> for Vec<T> { - #[inline] - fn index_mut(&mut self, index: ops::Range<usize>) -> &mut [T] { - IndexMut::index_mut(&mut **self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::IndexMut<ops::RangeTo<usize>> for Vec<T> { - #[inline] - fn index_mut(&mut self, index: ops::RangeTo<usize>) -> &mut [T] { - IndexMut::index_mut(&mut **self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::IndexMut<ops::RangeFrom<usize>> for Vec<T> { - #[inline] - fn index_mut(&mut self, index: ops::RangeFrom<usize>) -> &mut [T] { - IndexMut::index_mut(&mut **self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::IndexMut<ops::RangeFull> for Vec<T> { - #[inline] - fn index_mut(&mut self, _index: ops::RangeFull) -> &mut [T] { - self - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::IndexMut<ops::RangeInclusive<usize>> for Vec<T> { - #[inline] - fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut [T] { - IndexMut::index_mut(&mut **self, index) - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl<T> ops::IndexMut<ops::RangeToInclusive<usize>> for Vec<T> { +#[rustc_on_unimplemented( + message="vector indices are of type `usize` or ranges of `usize`", + label="vector indices are of type `usize` or ranges of `usize`", +)] +impl<T, I> IndexMut<I> for Vec<T> +where + I: ::core::slice::SliceIndex<[T]>, +{ #[inline] - fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut [T] { + fn index_mut(&mut self, index: I) -> &mut Self::Output { IndexMut::index_mut(&mut **self, index) } } @@ -1745,7 +1804,7 @@ impl<T> IntoIterator for Vec<T> { let cap = self.buf.cap(); mem::forget(self); IntoIter { - buf: Shared::new_unchecked(begin), + buf: NonNull::new_unchecked(begin), phantom: PhantomData, cap, ptr: begin, @@ -1970,7 +2029,7 @@ impl<T> Vec<T> { #[inline] #[stable(feature = "vec_splice", since = "1.21.0")] pub fn splice<R, I>(&mut self, range: R, replace_with: I) -> Splice<I::IntoIter> - where R: RangeArgument<usize>, I: IntoIterator<Item=T> + where R: RangeBounds<usize>, I: IntoIterator<Item=T> { Splice { drain: self.drain(range), @@ -1981,8 +2040,8 @@ impl<T> Vec<T> { /// Creates an iterator which uses a closure to determine if an element should be removed. /// /// If the closure returns true, then the element is removed and yielded. - /// If the closure returns false, it will try again, and call the closure - /// on the next element, seeing if it passes the test. + /// If the closure returns false, the element will remain in the vector and will not be yielded + /// by the iterator. /// /// Using this method is equivalent to the following code: /// @@ -2247,6 +2306,13 @@ impl<'a, T: Clone> From<Vec<T>> for Cow<'a, [T]> { } } +#[stable(feature = "cow_from_vec_ref", since = "1.28.0")] +impl<'a, T: Clone> From<&'a Vec<T>> for Cow<'a, [T]> { + fn from(v: &'a Vec<T>) -> Cow<'a, [T]> { + Cow::Borrowed(v.as_slice()) + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> FromIterator<T> for Cow<'a, [T]> where T: Clone { fn from_iter<I: IntoIterator<Item = T>>(it: I) -> Cow<'a, [T]> { @@ -2267,7 +2333,7 @@ impl<'a, T> FromIterator<T> for Cow<'a, [T]> where T: Clone { /// [`IntoIterator`]: ../../std/iter/trait.IntoIterator.html #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter<T> { - buf: Shared<T>, + buf: NonNull<T>, phantom: PhantomData<T>, cap: usize, ptr: *const T, @@ -2359,9 +2425,10 @@ impl<T> Iterator for IntoIter<T> { #[inline] fn size_hint(&self) -> (usize, Option<usize>) { - let exact = match self.ptr.offset_to(self.end) { - Some(x) => x as usize, - None => (self.end as usize).wrapping_sub(self.ptr as usize), + let exact = if mem::size_of::<T>() == 0 { + (self.end as usize).wrapping_sub(self.ptr as usize) + } else { + unsafe { self.end.offset_from(self.ptr) as usize } }; (exact, Some(exact)) } @@ -2404,7 +2471,7 @@ impl<T> ExactSizeIterator for IntoIter<T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<T> FusedIterator for IntoIter<T> {} #[unstable(feature = "trusted_len", issue = "37572")] @@ -2442,7 +2509,7 @@ pub struct Drain<'a, T: 'a> { tail_len: usize, /// Current remaining range to remove iter: slice::Iter<'a, T>, - vec: Shared<Vec<T>>, + vec: NonNull<Vec<T>>, } #[stable(feature = "collection_debug", since = "1.17.0")] @@ -2485,7 +2552,7 @@ impl<'a, T> DoubleEndedIterator for Drain<'a, T> { impl<'a, T> Drop for Drain<'a, T> { fn drop(&mut self) { // exhaust self first - while let Some(_) = self.next() {} + self.for_each(drop); if self.tail_len > 0 { unsafe { @@ -2493,9 +2560,11 @@ impl<'a, T> Drop for Drain<'a, T> { // memmove back untouched tail, update to new length let start = source_vec.len(); let tail = self.tail_start; - let src = source_vec.as_ptr().offset(tail as isize); - let dst = source_vec.as_mut_ptr().offset(start as isize); - ptr::copy(src, dst, self.tail_len); + if tail != start { + let src = source_vec.as_ptr().offset(tail as isize); + let dst = source_vec.as_mut_ptr().offset(start as isize); + ptr::copy(src, dst, self.tail_len); + } source_vec.set_len(start + self.tail_len); } } @@ -2510,60 +2579,9 @@ impl<'a, T> ExactSizeIterator for Drain<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Drain<'a, T> {} -/// A place for insertion at the back of a `Vec`. -/// -/// See [`Vec::place_back`](struct.Vec.html#method.place_back) for details. -#[must_use = "places do nothing unless written to with `<-` syntax"] -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -#[derive(Debug)] -pub struct PlaceBack<'a, T: 'a> { - vec: &'a mut Vec<T>, -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Placer<T> for PlaceBack<'a, T> { - type Place = PlaceBack<'a, T>; - - fn make_place(self) -> Self { - // This will panic or abort if we would allocate > isize::MAX bytes - // or if the length increment would overflow for zero-sized types. - if self.vec.len == self.vec.buf.cap() { - self.vec.buf.double(); - } - self - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Place<T> for PlaceBack<'a, T> { - fn pointer(&mut self) -> *mut T { - unsafe { self.vec.as_mut_ptr().offset(self.vec.len as isize) } - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> InPlace<T> for PlaceBack<'a, T> { - type Owner = &'a mut T; - - unsafe fn finalize(mut self) -> &'a mut T { - let ptr = self.pointer(); - self.vec.len += 1; - &mut *ptr - } -} - - /// A splicing iterator for `Vec`. /// /// This struct is created by the [`splice()`] method on [`Vec`]. See its @@ -2605,9 +2623,7 @@ impl<'a, I: Iterator> ExactSizeIterator for Splice<'a, I> {} #[stable(feature = "vec_splice", since = "1.21.0")] impl<'a, I: Iterator> Drop for Splice<'a, I> { fn drop(&mut self) { - // exhaust drain first - while let Some(_) = self.drain.next() {} - + self.drain.by_ref().for_each(drop); unsafe { if self.drain.tail_len == 0 { @@ -2736,8 +2752,7 @@ impl<'a, T, F> Drop for DrainFilter<'a, T, F> where F: FnMut(&mut T) -> bool, { fn drop(&mut self) { - for _ in self.by_ref() { } - + self.for_each(drop); unsafe { self.vec.set_len(self.old_len - self.del); } |
