diff options
Diffstat (limited to 'src/libcore')
32 files changed, 1240 insertions, 176 deletions
diff --git a/src/libcore/alloc.rs b/src/libcore/alloc.rs index 35e4eea756d..58639808fae 100644 --- a/src/libcore/alloc.rs +++ b/src/libcore/alloc.rs @@ -164,15 +164,13 @@ impl Layout { /// alignment. In other words, if `K` has size 16, `K.align_to(32)` /// will *still* have size 16. /// - /// # Panics - /// - /// Panics if the combination of `self.size()` and the given `align` - /// violates the conditions listed in + /// Returns an error if the combination of `self.size()` and the given + /// `align` violates the conditions listed in /// [`Layout::from_size_align`](#method.from_size_align). - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] - pub fn align_to(&self, align: usize) -> Self { - Layout::from_size_align(self.size(), cmp::max(self.align(), align)).unwrap() + pub fn align_to(&self, align: usize) -> Result<Self, LayoutErr> { + Layout::from_size_align(self.size(), cmp::max(self.align(), align)) } /// Returns the amount of padding we must insert after `self` @@ -191,7 +189,7 @@ impl Layout { /// to be less than or equal to the alignment of the starting /// address for the whole allocated block of memory. One way to /// satisfy this constraint is to ensure `align <= self.align()`. - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] pub fn padding_needed_for(&self, align: usize) -> usize { let len = self.size(); @@ -220,6 +218,23 @@ impl Layout { len_rounded_up.wrapping_sub(len) } + /// Creates a layout by rounding the size of this layout up to a multiple + /// of the layout's alignment. + /// + /// Returns `Err` if the padded size would overflow. + /// + /// This is equivalent to adding the result of `padding_needed_for` + /// to the layout's current size. + #[unstable(feature = "alloc_layout_extra", issue = "55724")] + #[inline] + pub fn pad_to_align(&self) -> Result<Layout, LayoutErr> { + let pad = self.padding_needed_for(self.align()); + let new_size = self.size().checked_add(pad) + .ok_or(LayoutErr { private: () })?; + + Layout::from_size_align(new_size, self.align()) + } + /// Creates a layout describing the record for `n` instances of /// `self`, with a suitable amount of padding between each to /// ensure that each instance is given its requested size and @@ -228,7 +243,7 @@ impl Layout { /// of each element in the array. /// /// On arithmetic overflow, returns `LayoutErr`. - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] pub fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutErr> { let padded_size = self.size().checked_add(self.padding_needed_for(self.align())) @@ -248,13 +263,16 @@ impl Layout { /// will be properly aligned. Note that the result layout will /// satisfy the alignment properties of both `self` and `next`. /// + /// The resulting layout will be the same as that of a C struct containing + /// two fields with the layouts of `self` and `next`, in that order. + /// /// Returns `Some((k, offset))`, where `k` is layout of the concatenated /// record and `offset` is the relative location, in bytes, of the /// start of the `next` embedded within the concatenated record /// (assuming that the record itself starts at offset 0). /// /// On arithmetic overflow, returns `LayoutErr`. - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] pub fn extend(&self, next: Self) -> Result<(Self, usize), LayoutErr> { let new_align = cmp::max(self.align(), next.align()); @@ -281,7 +299,7 @@ impl Layout { /// aligned. /// /// On arithmetic overflow, returns `LayoutErr`. - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] pub fn repeat_packed(&self, n: usize) -> Result<Self, LayoutErr> { let size = self.size().checked_mul(n).ok_or(LayoutErr { private: () })?; @@ -293,29 +311,20 @@ impl Layout { /// padding is inserted, the alignment of `next` is irrelevant, /// and is not incorporated *at all* into the resulting layout. /// - /// Returns `(k, offset)`, where `k` is layout of the concatenated - /// record and `offset` is the relative location, in bytes, of the - /// start of the `next` embedded within the concatenated record - /// (assuming that the record itself starts at offset 0). - /// - /// (The `offset` is always the same as `self.size()`; we use this - /// signature out of convenience in matching the signature of - /// `extend`.) - /// /// On arithmetic overflow, returns `LayoutErr`. - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] - pub fn extend_packed(&self, next: Self) -> Result<(Self, usize), LayoutErr> { + pub fn extend_packed(&self, next: Self) -> Result<Self, LayoutErr> { let new_size = self.size().checked_add(next.size()) .ok_or(LayoutErr { private: () })?; let layout = Layout::from_size_align(new_size, self.align())?; - Ok((layout, self.size())) + Ok(layout) } /// Creates a layout describing the record for a `[T; n]`. /// /// On arithmetic overflow, returns `LayoutErr`. - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] pub fn array<T>(n: usize) -> Result<Self, LayoutErr> { Layout::new::<T>() @@ -514,11 +523,11 @@ pub unsafe trait GlobalAlloc { ptr } - /// Shink or grow a block of memory to the given `new_size`. + /// Shrink or grow a block of memory to the given `new_size`. /// The block is described by the given `ptr` pointer and `layout`. /// /// If this returns a non-null pointer, then ownership of the memory block - /// referenced by `ptr` has been transferred to this alloctor. + /// referenced by `ptr` has been transferred to this allocator. /// The memory may or may not have been deallocated, /// and should be considered unusable (unless of course it was /// transferred back to the caller again via the return value of @@ -765,7 +774,7 @@ pub unsafe trait Alloc { // realloc. alloc_excess, realloc_excess /// Returns a pointer suitable for holding data described by - /// a new layout with `layout`’s alginment and a size given + /// a new layout with `layout`’s alignment and a size given /// by `new_size`. To /// accomplish this, this may extend or shrink the allocation /// referenced by `ptr` to fit the new layout. diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index ec7d366c3f5..9cf42eff219 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -207,8 +207,8 @@ use ptr; /// /// # Examples /// -/// Here you can see how using `Cell<T>` allows to use mutable field inside -/// immutable struct (which is also called 'interior mutability'). +/// In this example, you can see that `Cell<T>` enables mutation inside an +/// immutable struct. In other words, it enables "interior mutability". /// /// ``` /// use std::cell::Cell; @@ -225,10 +225,11 @@ use ptr; /// /// let new_value = 100; /// -/// // ERROR, because my_struct is immutable +/// // ERROR: `my_struct` is immutable /// // my_struct.regular_field = new_value; /// -/// // WORKS, although `my_struct` is immutable, field `special_field` is mutable because it is Cell +/// // WORKS: although `my_struct` is immutable, `special_field` is a `Cell`, +/// // which can always be mutated /// my_struct.special_field.set(new_value); /// assert_eq!(my_struct.special_field.get(), new_value); /// ``` @@ -473,7 +474,7 @@ impl<T: ?Sized> Cell<T> { /// ``` #[inline] #[stable(feature = "cell_as_ptr", since = "1.12.0")] - pub fn as_ptr(&self) -> *mut T { + pub const fn as_ptr(&self) -> *mut T { self.value.get() } @@ -1507,7 +1508,7 @@ impl<T: ?Sized> UnsafeCell<T> { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn get(&self) -> *mut T { + pub const fn get(&self) -> *mut T { &self.value as *const T as *mut T } } diff --git a/src/libcore/char/methods.rs b/src/libcore/char/methods.rs index 64a17786b0a..35181afea3d 100644 --- a/src/libcore/char/methods.rs +++ b/src/libcore/char/methods.rs @@ -903,7 +903,7 @@ impl char { /// ``` #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] #[inline] - pub fn is_ascii(&self) -> bool { + pub const fn is_ascii(&self) -> bool { *self as u32 <= 0x7F } diff --git a/src/libcore/convert.rs b/src/libcore/convert.rs index b900990d0a7..dbc28ef7cf6 100644 --- a/src/libcore/convert.rs +++ b/src/libcore/convert.rs @@ -104,7 +104,6 @@ /// assert_eq!(vec![1, 3], filtered); /// ``` #[unstable(feature = "convert_id", issue = "53500")] -#[rustc_const_unstable(feature = "const_convert_id")] #[inline] pub const fn identity<T>(x: T) -> T { x } diff --git a/src/libcore/default.rs b/src/libcore/default.rs index ab36e29b1e1..638acebd617 100644 --- a/src/libcore/default.rs +++ b/src/libcore/default.rs @@ -76,7 +76,7 @@ /// } /// /// impl Default for Kind { -/// fn default() -> Kind { Kind::A } +/// fn default() -> Self { Kind::A } /// } /// ``` /// @@ -118,7 +118,7 @@ pub trait Default: Sized { /// } /// /// impl Default for Kind { - /// fn default() -> Kind { Kind::A } + /// fn default() -> Self { Kind::A } /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/libcore/future/future.rs b/src/libcore/future/future.rs index 9176e0d32cb..0c870f9e404 100644 --- a/src/libcore/future/future.rs +++ b/src/libcore/future/future.rs @@ -17,7 +17,7 @@ use ops; use pin::Pin; use task::{Poll, LocalWaker}; -/// A future represents an asychronous computation. +/// A future represents an asynchronous computation. /// /// A future is a value that may not have finished computing yet. This kind of /// "asynchronous value" makes it possible for a thread to continue doing useful diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index 56a24168e28..7ed6e4a8f51 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -1025,7 +1025,7 @@ extern "rust-intrinsic" { /// // to avoid problems in case something further down panics. /// src.set_len(0); /// - /// // The two regions cannot overlap becuase mutable references do + /// // The two regions cannot overlap because mutable references do /// // not alias, and two different vectors cannot own the same /// // memory. /// ptr::copy_nonoverlapping(src_ptr, dst_ptr, src_len); @@ -1465,6 +1465,20 @@ extern "rust-intrinsic" { /// y < 0 or y >= N, where N is the width of T in bits. pub fn unchecked_shr<T>(x: T, y: T) -> T; + /// Performs rotate left. + /// The stabilized versions of this intrinsic are available on the integer + /// primitives via the `rotate_left` method. For example, + /// [`std::u32::rotate_left`](../../std/primitive.u32.html#method.rotate_left) + #[cfg(not(stage0))] + pub fn rotate_left<T>(x: T, y: T) -> T; + + /// Performs rotate right. + /// The stabilized versions of this intrinsic are available on the integer + /// primitives via the `rotate_right` method. For example, + /// [`std::u32::rotate_right`](../../std/primitive.u32.html#method.rotate_right) + #[cfg(not(stage0))] + pub fn rotate_right<T>(x: T, y: T) -> T; + /// Returns (a + b) mod 2<sup>N</sup>, where N is the width of T in bits. /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `wrapping_add` method. For example, diff --git a/src/libcore/iter/iterator.rs b/src/libcore/iter/iterator.rs index 5b6d9e2033c..2903c370df8 100644 --- a/src/libcore/iter/iterator.rs +++ b/src/libcore/iter/iterator.rs @@ -1857,7 +1857,7 @@ pub trait Iterator { /// ``` /// let a = ["lol", "NaN", "2", "5"]; /// - /// let mut first_number = a.iter().find_map(|s| s.parse().ok()); + /// let first_number = a.iter().find_map(|s| s.parse().ok()); /// /// assert_eq!(first_number, Some(2)); /// ``` diff --git a/src/libcore/iter/sources.rs b/src/libcore/iter/sources.rs index d500cc99fa1..7fa3a4bcce7 100644 --- a/src/libcore/iter/sources.rs +++ b/src/libcore/iter/sources.rs @@ -283,7 +283,7 @@ impl<T> Default for Empty<T> { /// assert_eq!(None, nope.next()); /// ``` #[stable(feature = "iter_empty", since = "1.2.0")] -pub fn empty<T>() -> Empty<T> { +pub const fn empty<T>() -> Empty<T> { Empty(marker::PhantomData) } diff --git a/src/libcore/iter/traits.rs b/src/libcore/iter/traits.rs index f95f8e7dbcb..d2c5a3bed28 100644 --- a/src/libcore/iter/traits.rs +++ b/src/libcore/iter/traits.rs @@ -960,7 +960,7 @@ impl<T, U, E> Product<Result<U, E>> for Result<T, E> /// /// Calling next on a fused iterator that has returned `None` once is guaranteed /// to return [`None`] again. This trait should be implemented by all iterators -/// that behave this way because it allows for some significant optimizations. +/// that behave this way because it allows optimizing [`Iterator::fuse`]. /// /// Note: In general, you should not use `FusedIterator` in generic bounds if /// you need a fused iterator. Instead, you should just call [`Iterator::fuse`] diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index 59cc312bee5..5872ac815c2 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -87,7 +87,6 @@ #![feature(doc_spotlight)] #![feature(extern_types)] #![feature(fundamental)] -#![feature(impl_header_lifetime_elision)] #![feature(intrinsics)] #![feature(lang_items)] #![feature(link_llvm_intrinsics)] @@ -129,6 +128,7 @@ #![feature(const_transmute)] #![feature(reverse_bits)] #![feature(non_exhaustive)] +#![feature(structural_match)] #[prelude_import] #[allow(unused)] @@ -228,7 +228,7 @@ mod nonzero; mod tuple; mod unit; -// Pull in the the `coresimd` crate directly into libcore. This is where all the +// Pull in the `coresimd` crate directly into libcore. This is where all the // architecture-specific (and vendor-specific) intrinsics are defined. AKA // things like SIMD and such. Note that the actual source for all this lies in a // different repository, rust-lang-nursery/stdsimd. That's why the setup here is diff --git a/src/libcore/macros.rs b/src/libcore/macros.rs index a0c87f13e5d..c008b78e450 100644 --- a/src/libcore/macros.rs +++ b/src/libcore/macros.rs @@ -350,9 +350,8 @@ macro_rules! try { /// assert_eq!(v, b"s = \"abc 123\""); /// ``` /// -/// Note: This macro can be used in `no_std` setups as well -/// In a `no_std` setup you are responsible for the -/// implementation details of the components. +/// Note: This macro can be used in `no_std` setups as well. +/// In a `no_std` setup you are responsible for the implementation details of the components. /// /// ```no_run /// # extern crate core; @@ -440,7 +439,7 @@ macro_rules! writeln { /// /// If the determination that the code is unreachable proves incorrect, the /// program immediately terminates with a [`panic!`]. The function [`unreachable_unchecked`], -/// which belongs to the [`std::hint`] module, informs the compilier to +/// which belongs to the [`std::hint`] module, informs the compiler to /// optimize the code out of the release version entirely. /// /// [`panic!`]: ../std/macro.panic.html diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs index 266c6913747..3bcdfabbb24 100644 --- a/src/libcore/marker.rs +++ b/src/libcore/marker.rs @@ -92,6 +92,7 @@ impl<T: ?Sized> !Send for *mut T { } #[stable(feature = "rust1", since = "1.0.0")] #[lang = "sized"] #[rustc_on_unimplemented( + on(parent_trait="std::path::Path", label="borrow the `Path` instead"), message="the size for values of type `{Self}` cannot be known at compilation time", label="doesn't have a size known at compile-time", note="to learn more, visit <https://doc.rust-lang.org/book/second-edition/\ @@ -577,6 +578,7 @@ macro_rules! impls{ /// /// [drop check]: ../../nomicon/dropck.html #[lang = "phantom_data"] +#[structural_match] #[stable(feature = "rust1", since = "1.0.0")] pub struct PhantomData<T:?Sized>; diff --git a/src/libcore/mem.rs b/src/libcore/mem.rs index 27ee9556bd0..56146a9a5fd 100644 --- a/src/libcore/mem.rs +++ b/src/libcore/mem.rs @@ -202,7 +202,7 @@ pub fn forget<T>(t: T) { /// /// ## Size of Enums /// -/// Enums that carry no data other than the descriminant have the same size as C enums +/// Enums that carry no data other than the discriminant have the same size as C enums /// on the platform they are compiled for. /// /// ## Size of Unions @@ -285,7 +285,7 @@ pub fn forget<T>(t: T) { /// [alignment]: ./fn.align_of.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(stage0), rustc_promotable)] +#[rustc_promotable] pub const fn size_of<T>() -> usize { intrinsics::size_of::<T>() } @@ -377,7 +377,7 @@ pub fn min_align_of_val<T: ?Sized>(val: &T) -> usize { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(stage0), rustc_promotable)] +#[rustc_promotable] pub const fn align_of<T>() -> usize { intrinsics::min_align_of::<T>() } @@ -458,19 +458,10 @@ pub fn align_of_val<T: ?Sized>(val: &T) -> usize { #[inline] #[stable(feature = "needs_drop", since = "1.21.0")] #[rustc_const_unstable(feature = "const_needs_drop")] -#[cfg(not(stage0))] pub const fn needs_drop<T>() -> bool { intrinsics::needs_drop::<T>() } -#[inline] -#[stable(feature = "needs_drop", since = "1.21.0")] -#[cfg(stage0)] -/// Ceci n'est pas la documentation -pub fn needs_drop<T>() -> bool { - unsafe { intrinsics::needs_drop::<T>() } -} - /// Creates a value whose bytes are all zero. /// /// This has the same effect as allocating space with @@ -818,7 +809,7 @@ pub fn drop<T>(_x: T) { } #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn transmute_copy<T, U>(src: &T) -> U { - ptr::read(src as *const T as *const U) + ptr::read_unaligned(src as *const T as *const U) } /// Opaque type representing the discriminant of an enum. @@ -951,7 +942,6 @@ impl<T> ManuallyDrop<T> { /// ManuallyDrop::new(Box::new(())); /// ``` #[stable(feature = "manually_drop", since = "1.20.0")] - #[rustc_const_unstable(feature = "const_manually_drop_new")] #[inline] pub const fn new(value: T) -> ManuallyDrop<T> { ManuallyDrop { value } @@ -970,9 +960,29 @@ impl<T> ManuallyDrop<T> { /// ``` #[stable(feature = "manually_drop", since = "1.20.0")] #[inline] - pub fn into_inner(slot: ManuallyDrop<T>) -> T { + pub const fn into_inner(slot: ManuallyDrop<T>) -> T { slot.value } + + /// Takes the contained value out. + /// + /// This method is primarily intended for moving out values in drop. + /// Instead of using [`ManuallyDrop::drop`] to manually drop the value, + /// you can use this method to take the value and use it however desired. + /// `Drop` will be invoked on the returned value following normal end-of-scope rules. + /// + /// If you have ownership of the container, you can use [`ManuallyDrop::into_inner`] instead. + /// + /// # Safety + /// + /// This function semantically moves out the contained value without preventing further usage. + /// It is up to the user of this method to ensure that this container is not used again. + #[must_use = "if you don't need the value, you can use `ManuallyDrop::drop` instead"] + #[unstable(feature = "manually_drop_take", issue = "55422")] + #[inline] + pub unsafe fn take(slot: &mut ManuallyDrop<T>) -> T { + ManuallyDrop::into_inner(ptr::read(slot)) + } } impl<T: ?Sized> ManuallyDrop<T> { @@ -1021,6 +1031,15 @@ pub union MaybeUninit<T> { } impl<T> MaybeUninit<T> { + /// Create a new `MaybeUninit` initialized with the given value. + /// + /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. + /// It is your responsibility to make sure `T` gets dropped if it got initialized. + #[unstable(feature = "maybe_uninit", issue = "53491")] + pub const fn new(val: T) -> MaybeUninit<T> { + MaybeUninit { value: ManuallyDrop::new(val) } + } + /// Create a new `MaybeUninit` in an uninitialized state. /// /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. @@ -1061,7 +1080,7 @@ impl<T> MaybeUninit<T> { /// /// # Unsafety /// - /// It is up to the caller to guarantee that the the `MaybeUninit` really is in an initialized + /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state, otherwise this will immediately cause undefined behavior. #[unstable(feature = "maybe_uninit", issue = "53491")] pub unsafe fn into_inner(self) -> T { @@ -1072,7 +1091,7 @@ impl<T> MaybeUninit<T> { /// /// # Unsafety /// - /// It is up to the caller to guarantee that the the `MaybeUninit` really is in an initialized + /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state, otherwise this will immediately cause undefined behavior. #[unstable(feature = "maybe_uninit", issue = "53491")] pub unsafe fn get_ref(&self) -> &T { @@ -1083,7 +1102,7 @@ impl<T> MaybeUninit<T> { /// /// # Unsafety /// - /// It is up to the caller to guarantee that the the `MaybeUninit` really is in an initialized + /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state, otherwise this will immediately cause undefined behavior. #[unstable(feature = "maybe_uninit", issue = "53491")] pub unsafe fn get_mut(&mut self) -> &mut T { diff --git a/src/libcore/nonzero.rs b/src/libcore/nonzero.rs index 118e75e1ee7..436cd1fc057 100644 --- a/src/libcore/nonzero.rs +++ b/src/libcore/nonzero.rs @@ -10,7 +10,7 @@ //! Exposes the NonZero lang item which provides optimization hints. -use ops::CoerceUnsized; +use ops::{CoerceUnsized, DispatchFromDyn}; /// A wrapper type for raw pointers and integers that will never be /// NULL or 0 that might allow certain optimizations. @@ -20,3 +20,5 @@ use ops::CoerceUnsized; pub(crate) struct NonZero<T>(pub(crate) T); impl<T: CoerceUnsized<U>, U> CoerceUnsized<NonZero<U>> for NonZero<T> {} + +impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<NonZero<U>> for NonZero<T> {} diff --git a/src/libcore/num/flt2dec/estimator.rs b/src/libcore/num/flt2dec/estimator.rs index d42e05a91f1..4e33fcfd76e 100644 --- a/src/libcore/num/flt2dec/estimator.rs +++ b/src/libcore/num/flt2dec/estimator.rs @@ -22,4 +22,3 @@ pub fn estimate_scaling_factor(mant: u64, exp: i16) -> i16 { // therefore this always underestimates (or is exact), but not much. (((nbits + exp as i64) * 1292913986) >> 32) as i16 } - diff --git a/src/libcore/num/flt2dec/mod.rs b/src/libcore/num/flt2dec/mod.rs index 21a2e72dac8..d58015beecb 100644 --- a/src/libcore/num/flt2dec/mod.rs +++ b/src/libcore/num/flt2dec/mod.rs @@ -658,4 +658,3 @@ pub fn to_exact_fixed_str<'a, T, F>(mut format_exact: F, v: T, } } } - diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index 772502cc800..30b7b454684 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -216,7 +216,7 @@ $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn min_value() -> Self { !0 ^ ((!0 as $UnsignedT) >> 1) as Self } @@ -235,7 +235,7 @@ $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn max_value() -> Self { !Self::min_value() } @@ -2152,6 +2152,7 @@ Basic usage: ", $Feature, "assert_eq!(", stringify!($SelfT), "::min_value(), 0);", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_promotable] #[inline] pub const fn min_value() -> Self { 0 } } @@ -2168,6 +2169,7 @@ Basic usage: stringify!($MaxV), ");", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_promotable] #[inline] pub const fn max_value() -> Self { !0 } } @@ -2301,7 +2303,12 @@ assert_eq!(n.rotate_left(", $rot, "), m); #[rustc_const_unstable(feature = "const_int_rotate")] #[inline] pub const fn rotate_left(self, n: u32) -> Self { - (self << (n % $BITS)) | (self >> (($BITS - (n % $BITS)) % $BITS)) + #[cfg(not(stage0))] { + unsafe { intrinsics::rotate_left(self, n as $SelfT) } + } + #[cfg(stage0)] { + (self << (n % $BITS)) | (self >> (($BITS - (n % $BITS)) % $BITS)) + } } } @@ -2326,7 +2333,12 @@ assert_eq!(n.rotate_right(", $rot, "), m); #[rustc_const_unstable(feature = "const_int_rotate")] #[inline] pub const fn rotate_right(self, n: u32) -> Self { - (self >> (n % $BITS)) | (self << (($BITS - (n % $BITS)) % $BITS)) + #[cfg(not(stage0))] { + unsafe { intrinsics::rotate_right(self, n as $SelfT) } + } + #[cfg(stage0)] { + (self >> (n % $BITS)) | (self << (($BITS - (n % $BITS)) % $BITS)) + } } } diff --git a/src/libcore/num/wrapping.rs b/src/libcore/num/wrapping.rs index 1c826c2fa76..00134a58d30 100644 --- a/src/libcore/num/wrapping.rs +++ b/src/libcore/num/wrapping.rs @@ -387,7 +387,7 @@ assert_eq!(n.count_ones(), 3); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn count_ones(self) -> u32 { + pub const fn count_ones(self) -> u32 { self.0.count_ones() } } @@ -407,7 +407,7 @@ assert_eq!(Wrapping(!0", stringify!($t), ").count_zeros(), 0); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn count_zeros(self) -> u32 { + pub const fn count_zeros(self) -> u32 { self.0.count_zeros() } } @@ -430,7 +430,7 @@ assert_eq!(n.trailing_zeros(), 3); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn trailing_zeros(self) -> u32 { + pub const fn trailing_zeros(self) -> u32 { self.0.trailing_zeros() } } @@ -456,7 +456,7 @@ assert_eq!(n.trailing_zeros(), 3); /// ``` #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn rotate_left(self, n: u32) -> Self { + pub const fn rotate_left(self, n: u32) -> Self { Wrapping(self.0.rotate_left(n)) } @@ -481,7 +481,7 @@ assert_eq!(n.trailing_zeros(), 3); /// ``` #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn rotate_right(self, n: u32) -> Self { + pub const fn rotate_right(self, n: u32) -> Self { Wrapping(self.0.rotate_right(n)) } @@ -505,7 +505,7 @@ assert_eq!(n.trailing_zeros(), 3); /// ``` #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn swap_bytes(self) -> Self { + pub const fn swap_bytes(self) -> Self { Wrapping(self.0.swap_bytes()) } @@ -532,7 +532,7 @@ assert_eq!(n.trailing_zeros(), 3); /// ``` #[unstable(feature = "reverse_bits", issue = "48763")] #[inline] - pub fn reverse_bits(self) -> Self { + pub const fn reverse_bits(self) -> Self { Wrapping(self.0.reverse_bits()) } @@ -560,7 +560,7 @@ if cfg!(target_endian = \"big\") { ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn from_be(x: Self) -> Self { + pub const fn from_be(x: Self) -> Self { Wrapping(<$t>::from_be(x.0)) } } @@ -589,7 +589,7 @@ if cfg!(target_endian = \"little\") { ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn from_le(x: Self) -> Self { + pub const fn from_le(x: Self) -> Self { Wrapping(<$t>::from_le(x.0)) } } @@ -618,7 +618,7 @@ if cfg!(target_endian = \"big\") { ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn to_be(self) -> Self { + pub const fn to_be(self) -> Self { Wrapping(self.0.to_be()) } } @@ -647,7 +647,7 @@ if cfg!(target_endian = \"little\") { ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn to_le(self) -> Self { + pub const fn to_le(self) -> Self { Wrapping(self.0.to_le()) } } @@ -707,7 +707,7 @@ assert_eq!(n.leading_zeros(), 3); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn leading_zeros(self) -> u32 { + pub const fn leading_zeros(self) -> u32 { self.0.leading_zeros() } } @@ -784,7 +784,7 @@ assert!(!Wrapping(-10", stringify!($t), ").is_positive()); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn is_positive(self) -> bool { + pub const fn is_positive(self) -> bool { self.0.is_positive() } } @@ -806,7 +806,7 @@ assert!(!Wrapping(10", stringify!($t), ").is_negative()); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn is_negative(self) -> bool { + pub const fn is_negative(self) -> bool { self.0.is_negative() } } @@ -836,7 +836,7 @@ assert_eq!(n.leading_zeros(), 2); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn leading_zeros(self) -> u32 { + pub const fn leading_zeros(self) -> u32 { self.0.leading_zeros() } } diff --git a/src/libcore/ops/mod.rs b/src/libcore/ops/mod.rs index ce4f45762de..edfa6df11ac 100644 --- a/src/libcore/ops/mod.rs +++ b/src/libcore/ops/mod.rs @@ -201,3 +201,6 @@ pub use self::generator::{Generator, GeneratorState}; #[unstable(feature = "coerce_unsized", issue = "27732")] pub use self::unsize::CoerceUnsized; + +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +pub use self::unsize::DispatchFromDyn; diff --git a/src/libcore/ops/range.rs b/src/libcore/ops/range.rs index 07ba285ea5c..908490e1c83 100644 --- a/src/libcore/ops/range.rs +++ b/src/libcore/ops/range.rs @@ -304,7 +304,7 @@ impl<Idx: PartialOrd<Idx>> RangeTo<Idx> { } } -/// An range bounded inclusively below and above (`start..=end`). +/// A range bounded inclusively below and above (`start..=end`). /// /// The `RangeInclusive` `start..=end` contains all values with `x >= start` /// and `x <= end`. It is empty unless `start <= end`. @@ -391,7 +391,7 @@ impl<Idx> RangeInclusive<Idx> { /// ``` #[stable(feature = "inclusive_range_methods", since = "1.27.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn new(start: Idx, end: Idx) -> Self { Self { start, end, is_empty: None } } @@ -416,7 +416,7 @@ impl<Idx> RangeInclusive<Idx> { /// ``` #[stable(feature = "inclusive_range_methods", since = "1.27.0")] #[inline] - pub fn start(&self) -> &Idx { + pub const fn start(&self) -> &Idx { &self.start } @@ -440,7 +440,7 @@ impl<Idx> RangeInclusive<Idx> { /// ``` #[stable(feature = "inclusive_range_methods", since = "1.27.0")] #[inline] - pub fn end(&self) -> &Idx { + pub const fn end(&self) -> &Idx { &self.end } diff --git a/src/libcore/ops/unsize.rs b/src/libcore/ops/unsize.rs index da72f374842..4d9a40a1b90 100644 --- a/src/libcore/ops/unsize.rs +++ b/src/libcore/ops/unsize.rs @@ -43,7 +43,7 @@ use marker::Unsize; /// [nomicon-coerce]: ../../nomicon/coercions.html #[unstable(feature = "coerce_unsized", issue = "27732")] #[lang = "coerce_unsized"] -pub trait CoerceUnsized<T> { +pub trait CoerceUnsized<T: ?Sized> { // Empty. } @@ -77,3 +77,37 @@ impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *mut T {} // *const T -> *const U #[unstable(feature = "coerce_unsized", issue = "27732")] impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {} + + +/// This is used for object safety, to check that a method's receiver type can be dispatched on. +/// +/// example impl: +/// +/// ``` +/// # #![feature(dispatch_from_dyn, unsize)] +/// # use std::{ops::DispatchFromDyn, marker::Unsize}; +/// # struct Rc<T: ?Sized>(::std::rc::Rc<T>); +/// impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Rc<U>> for Rc<T> +/// where +/// T: Unsize<U>, +/// {} +/// ``` +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +#[cfg_attr(not(stage0), lang = "dispatch_from_dyn")] +pub trait DispatchFromDyn<T> { + // Empty. +} + +// &T -> &U +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {} +// &mut T -> &mut U +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {} +// *const T -> *const U +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {} +// *mut T -> *mut U +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {} + diff --git a/src/libcore/pin.rs b/src/libcore/pin.rs index 0224560af4c..308dd9c79fa 100644 --- a/src/libcore/pin.rs +++ b/src/libcore/pin.rs @@ -3,7 +3,7 @@ //! It is sometimes useful to have objects that are guaranteed to not move, //! in the sense that their placement in memory does not change, and can thus be relied upon. //! -//! A prime example of such a scenario would be building self-referencial structs, +//! A prime example of such a scenario would be building self-referential structs, //! since moving an object with pointers to itself will invalidate them, //! which could cause undefined behavior. //! @@ -39,7 +39,7 @@ //! use std::marker::Pinned; //! use std::ptr::NonNull; //! -//! // This is a self referencial struct since the slice field points to the data field. +//! // This is a self-referential struct since the slice field points to the data field. //! // We cannot inform the compiler about that with a normal reference, //! // since this pattern cannot be described with the usual borrowing rules. //! // Instead we use a raw pointer, though one which is known to not be null, @@ -91,7 +91,7 @@ use fmt; use marker::Sized; -use ops::{Deref, DerefMut, CoerceUnsized}; +use ops::{Deref, DerefMut, CoerceUnsized, DispatchFromDyn}; #[doc(inline)] pub use marker::Unpin; @@ -102,7 +102,7 @@ pub use marker::Unpin; /// value in place, preventing the value referenced by that pointer from being moved /// unless it implements [`Unpin`]. /// -/// See the [`pin` module] documentation for furthur explanation on pinning. +/// See the [`pin` module] documentation for further explanation on pinning. /// /// [`Unpin`]: ../../std/marker/trait.Unpin.html /// [`pin` module]: ../../std/pin/index.html @@ -325,4 +325,10 @@ where {} #[unstable(feature = "pin", issue = "49150")] +impl<'a, P, U> DispatchFromDyn<Pin<U>> for Pin<P> +where + P: DispatchFromDyn<U>, +{} + +#[unstable(feature = "pin", issue = "49150")] impl<P> Unpin for Pin<P> {} diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index 1c761ba21b3..a7bfc3f5124 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -38,7 +38,7 @@ //! underlying object is live and no reference (just raw pointers) is used to //! access the same memory. //! -//! These axioms, along with careful use of [`offset`] for pointer arithmentic, +//! These axioms, along with careful use of [`offset`] for pointer arithmetic, //! are enough to correctly implement many useful things in unsafe code. Stronger guarantees //! will be provided eventually, as the [aliasing] rules are being determined. For more //! information, see the [book] as well as the section in the reference devoted @@ -75,7 +75,7 @@ use convert::From; use intrinsics; -use ops::CoerceUnsized; +use ops::{CoerceUnsized, DispatchFromDyn}; use fmt; use hash; use marker::{PhantomData, Unsize}; @@ -120,7 +120,7 @@ pub use intrinsics::write_bytes; /// /// Additionally, if `T` is not [`Copy`], using the pointed-to value after /// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop = -/// foo` counts as a use because it will cause the the value to be dropped +/// foo` counts as a use because it will cause the value to be dropped /// again. [`write`] can be used to overwrite data without causing it to be /// dropped. /// @@ -209,7 +209,7 @@ pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(stage0), rustc_promotable)] +#[rustc_promotable] pub const fn null<T>() -> *const T { 0 as *const T } /// Creates a null mutable raw pointer. @@ -224,7 +224,7 @@ pub const fn null<T>() -> *const T { 0 as *const T } /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(stage0), rustc_promotable)] +#[rustc_promotable] pub const fn null_mut<T>() -> *mut T { 0 as *mut T } /// Swaps the values at two mutable locations of the same type, without @@ -371,7 +371,7 @@ pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) { #[inline] unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { // The approach here is to utilize simd to swap x & y efficiently. Testing reveals - // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel + // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel // Haswell E processors. LLVM is more able to optimize if we give a struct a // #[repr(simd)], even if we don't actually use this struct directly. // @@ -1005,7 +1005,7 @@ impl<T: ?Sized> *const T { /// # Null-unchecked version /// /// If you are sure the pointer can never be null and are looking for some kind of - /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can + /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can /// dereference the pointer directly. /// /// ``` @@ -1625,7 +1625,7 @@ impl<T: ?Sized> *mut T { /// # Null-unchecked version /// /// If you are sure the pointer can never be null and are looking for some kind of - /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can + /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can /// dereference the pointer directly. /// /// ``` @@ -2796,6 +2796,9 @@ impl<T: ?Sized> Copy for Unique<T> { } impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { } #[unstable(feature = "ptr_internals", issue = "0")] +impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Unique<U>> for Unique<T> where T: Unsize<U> { } + +#[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> fmt::Pointer for Unique<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) @@ -2867,6 +2870,7 @@ impl<T: Sized> NonNull<T> { /// sentinel value. Types that lazily allocate must track initialization by /// some other means. #[stable(feature = "nonnull", since = "1.25.0")] + #[inline] pub fn dangling() -> Self { unsafe { let ptr = mem::align_of::<T>() as *mut T; @@ -2882,12 +2886,14 @@ impl<T: ?Sized> NonNull<T> { /// /// `ptr` must be non-null. #[stable(feature = "nonnull", since = "1.25.0")] + #[inline] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { NonNull { pointer: NonZero(ptr as _) } } /// Creates a new `NonNull` if `ptr` is non-null. #[stable(feature = "nonnull", since = "1.25.0")] + #[inline] pub fn new(ptr: *mut T) -> Option<Self> { if !ptr.is_null() { Some(NonNull { pointer: NonZero(ptr as _) }) @@ -2898,7 +2904,8 @@ impl<T: ?Sized> NonNull<T> { /// Acquires the underlying `*mut` pointer. #[stable(feature = "nonnull", since = "1.25.0")] - pub fn as_ptr(self) -> *mut T { + #[inline] + pub const fn as_ptr(self) -> *mut T { self.pointer.0 as *mut T } @@ -2908,6 +2915,7 @@ impl<T: ?Sized> NonNull<T> { /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. #[stable(feature = "nonnull", since = "1.25.0")] + #[inline] pub unsafe fn as_ref(&self) -> &T { &*self.as_ptr() } @@ -2918,12 +2926,14 @@ impl<T: ?Sized> NonNull<T> { /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. #[stable(feature = "nonnull", since = "1.25.0")] + #[inline] pub unsafe fn as_mut(&mut self) -> &mut T { &mut *self.as_ptr() } /// Cast to a pointer of another type #[stable(feature = "nonnull_cast", since = "1.27.0")] + #[inline] pub fn cast<U>(self) -> NonNull<U> { unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) @@ -2944,6 +2954,9 @@ impl<T: ?Sized> Copy for NonNull<T> { } #[unstable(feature = "coerce_unsized", issue = "27732")] impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { } +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NonNull<U>> for NonNull<T> where T: Unsize<U> { } + #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> fmt::Debug for NonNull<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -2963,6 +2976,7 @@ impl<T: ?Sized> Eq for NonNull<T> {} #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> PartialEq for NonNull<T> { + #[inline] fn eq(&self, other: &Self) -> bool { self.as_ptr() == other.as_ptr() } @@ -2970,6 +2984,7 @@ impl<T: ?Sized> PartialEq for NonNull<T> { #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Ord for NonNull<T> { + #[inline] fn cmp(&self, other: &Self) -> Ordering { self.as_ptr().cmp(&other.as_ptr()) } @@ -2977,6 +2992,7 @@ impl<T: ?Sized> Ord for NonNull<T> { #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> PartialOrd for NonNull<T> { + #[inline] fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.as_ptr().partial_cmp(&other.as_ptr()) } @@ -2984,6 +3000,7 @@ impl<T: ?Sized> PartialOrd for NonNull<T> { #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> hash::Hash for NonNull<T> { + #[inline] fn hash<H: hash::Hasher>(&self, state: &mut H) { self.as_ptr().hash(state) } @@ -2991,6 +3008,7 @@ impl<T: ?Sized> hash::Hash for NonNull<T> { #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> From<Unique<T>> for NonNull<T> { + #[inline] fn from(unique: Unique<T>) -> Self { NonNull { pointer: unique.pointer } } @@ -2998,6 +3016,7 @@ impl<T: ?Sized> From<Unique<T>> for NonNull<T> { #[stable(feature = "nonnull", since = "1.25.0")] impl<'a, T: ?Sized> From<&'a mut T> for NonNull<T> { + #[inline] fn from(reference: &'a mut T) -> Self { NonNull { pointer: NonZero(reference as _) } } @@ -3005,6 +3024,7 @@ impl<'a, T: ?Sized> From<&'a mut T> for NonNull<T> { #[stable(feature = "nonnull", since = "1.25.0")] impl<'a, T: ?Sized> From<&'a T> for NonNull<T> { + #[inline] fn from(reference: &'a T) -> Self { NonNull { pointer: NonZero(reference as _) } } diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs index c9013f589ed..fece328f51f 100644 --- a/src/libcore/slice/mod.rs +++ b/src/libcore/slice/mod.rs @@ -385,7 +385,6 @@ impl<T> [T] { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - #[rustc_const_unstable(feature = "const_slice_as_ptr")] pub const fn as_ptr(&self) -> *const T { self as *const [T] as *const T } @@ -620,13 +619,15 @@ impl<T> [T] { Windows { v: self, size } } - /// Returns an iterator over `chunk_size` elements of the slice at a - /// time. The chunks are slices and do not overlap. If `chunk_size` does - /// not divide the length of the slice, then the last chunk will - /// not have length `chunk_size`. + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the + /// beginning of the slice. /// - /// See [`chunks_exact`] for a variant of this iterator that returns chunks - /// of always exactly `chunk_size` elements. + /// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the + /// slice, then the last chunk will not have length `chunk_size`. + /// + /// See [`chunks_exact`] for a variant of this iterator that returns chunks of always exactly + /// `chunk_size` elements, and [`rchunks`] for the same iterator but starting at the end of the + /// slice of the slice. /// /// # Panics /// @@ -644,6 +645,7 @@ impl<T> [T] { /// ``` /// /// [`chunks_exact`]: #method.chunks_exact + /// [`rchunks`]: #method.rchunks #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn chunks(&self, chunk_size: usize) -> Chunks<T> { @@ -651,13 +653,15 @@ impl<T> [T] { Chunks { v: self, chunk_size } } - /// Returns an iterator over `chunk_size` elements of the slice at a time. - /// The chunks are mutable slices, and do not overlap. If `chunk_size` does - /// not divide the length of the slice, then the last chunk will not - /// have length `chunk_size`. + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the + /// beginning of the slice. + /// + /// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the + /// length of the slice, then the last chunk will not have length `chunk_size`. /// - /// See [`chunks_exact_mut`] for a variant of this iterator that returns chunks - /// of always exactly `chunk_size` elements. + /// See [`chunks_exact_mut`] for a variant of this iterator that returns chunks of always + /// exactly `chunk_size` elements, and [`rchunks_mut`] for the same iterator but starting at + /// the end of the slice of the slice. /// /// # Panics /// @@ -679,6 +683,7 @@ impl<T> [T] { /// ``` /// /// [`chunks_exact_mut`]: #method.chunks_exact_mut + /// [`rchunks_mut`]: #method.rchunks_mut #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<T> { @@ -686,15 +691,19 @@ impl<T> [T] { ChunksMut { v: self, chunk_size } } - /// Returns an iterator over `chunk_size` elements of the slice at a - /// time. The chunks are slices and do not overlap. If `chunk_size` does - /// not divide the length of the slice, then the last up to `chunk_size-1` - /// elements will be omitted and can be retrieved from the `remainder` - /// function of the iterator. + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the + /// beginning of the slice. + /// + /// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the + /// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved + /// from the `remainder` function of the iterator. + /// + /// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the + /// resulting code better than in the case of [`chunks`]. /// - /// Due to each chunk having exactly `chunk_size` elements, the compiler - /// can often optimize the resulting code better than in the case of - /// [`chunks`]. + /// See [`chunks`] for a variant of this iterator that also returns the remainder as a smaller + /// chunk, and [`rchunks_exact`] for the same iterator but starting at the end of the slice of + /// the slice. /// /// # Panics /// @@ -703,17 +712,17 @@ impl<T> [T] { /// # Examples /// /// ``` - /// #![feature(chunks_exact)] - /// /// let slice = ['l', 'o', 'r', 'e', 'm']; /// let mut iter = slice.chunks_exact(2); /// assert_eq!(iter.next().unwrap(), &['l', 'o']); /// assert_eq!(iter.next().unwrap(), &['r', 'e']); /// assert!(iter.next().is_none()); + /// assert_eq!(iter.remainder(), &['m']); /// ``` /// /// [`chunks`]: #method.chunks - #[unstable(feature = "chunks_exact", issue = "47115")] + /// [`rchunks_exact`]: #method.rchunks_exact + #[stable(feature = "chunks_exact", since = "1.31.0")] #[inline] pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<T> { assert!(chunk_size != 0); @@ -723,15 +732,19 @@ impl<T> [T] { ChunksExact { v: fst, rem: snd, chunk_size } } - /// Returns an iterator over `chunk_size` elements of the slice at a time. - /// The chunks are mutable slices, and do not overlap. If `chunk_size` does - /// not divide the length of the slice, then the last up to `chunk_size-1` - /// elements will be omitted and can be retrieved from the `into_remainder` - /// function of the iterator. + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the + /// beginning of the slice. + /// + /// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the + /// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be + /// retrieved from the `into_remainder` function of the iterator. /// - /// Due to each chunk having exactly `chunk_size` elements, the compiler - /// can often optimize the resulting code better than in the case of - /// [`chunks_mut`]. + /// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the + /// resulting code better than in the case of [`chunks_mut`]. + /// + /// See [`chunks_mut`] for a variant of this iterator that also returns the remainder as a + /// smaller chunk, and [`rchunks_exact_mut`] for the same iterator but starting at the end of + /// the slice of the slice. /// /// # Panics /// @@ -740,8 +753,6 @@ impl<T> [T] { /// # Examples /// /// ``` - /// #![feature(chunks_exact)] - /// /// let v = &mut [0, 0, 0, 0, 0]; /// let mut count = 1; /// @@ -755,7 +766,8 @@ impl<T> [T] { /// ``` /// /// [`chunks_mut`]: #method.chunks_mut - #[unstable(feature = "chunks_exact", issue = "47115")] + /// [`rchunks_exact_mut`]: #method.rchunks_exact_mut + #[stable(feature = "chunks_exact", since = "1.31.0")] #[inline] pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<T> { assert!(chunk_size != 0); @@ -765,6 +777,162 @@ impl<T> [T] { ChunksExactMut { v: fst, rem: snd, chunk_size } } + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end + /// of the slice. + /// + /// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the + /// slice, then the last chunk will not have length `chunk_size`. + /// + /// See [`rchunks_exact`] for a variant of this iterator that returns chunks of always exactly + /// `chunk_size` elements, and [`chunks`] for the same iterator but starting at the beginning + /// of the slice. + /// + /// # Panics + /// + /// Panics if `chunk_size` is 0. + /// + /// # Examples + /// + /// ``` + /// let slice = ['l', 'o', 'r', 'e', 'm']; + /// let mut iter = slice.rchunks(2); + /// assert_eq!(iter.next().unwrap(), &['e', 'm']); + /// assert_eq!(iter.next().unwrap(), &['o', 'r']); + /// assert_eq!(iter.next().unwrap(), &['l']); + /// assert!(iter.next().is_none()); + /// ``` + /// + /// [`rchunks_exact`]: #method.rchunks_exact + /// [`chunks`]: #method.chunks + #[stable(feature = "rchunks", since = "1.31.0")] + #[inline] + pub fn rchunks(&self, chunk_size: usize) -> RChunks<T> { + assert!(chunk_size != 0); + RChunks { v: self, chunk_size } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end + /// of the slice. + /// + /// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the + /// length of the slice, then the last chunk will not have length `chunk_size`. + /// + /// See [`rchunks_exact_mut`] for a variant of this iterator that returns chunks of always + /// exactly `chunk_size` elements, and [`chunks_mut`] for the same iterator but starting at the + /// beginning of the slice. + /// + /// # Panics + /// + /// Panics if `chunk_size` is 0. + /// + /// # Examples + /// + /// ``` + /// let v = &mut [0, 0, 0, 0, 0]; + /// let mut count = 1; + /// + /// for chunk in v.rchunks_mut(2) { + /// for elem in chunk.iter_mut() { + /// *elem += count; + /// } + /// count += 1; + /// } + /// assert_eq!(v, &[3, 2, 2, 1, 1]); + /// ``` + /// + /// [`rchunks_exact_mut`]: #method.rchunks_exact_mut + /// [`chunks_mut`]: #method.chunks_mut + #[stable(feature = "rchunks", since = "1.31.0")] + #[inline] + pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<T> { + assert!(chunk_size != 0); + RChunksMut { v: self, chunk_size } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the + /// beginning of the slice. + /// + /// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the + /// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved + /// from the `remainder` function of the iterator. + /// + /// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the + /// resulting code better than in the case of [`chunks`]. + /// + /// See [`rchunks`] for a variant of this iterator that also returns the remainder as a smaller + /// chunk, and [`chunks_exact`] for the same iterator but starting at the beginning of the + /// slice of the slice. + /// + /// # Panics + /// + /// Panics if `chunk_size` is 0. + /// + /// # Examples + /// + /// ``` + /// let slice = ['l', 'o', 'r', 'e', 'm']; + /// let mut iter = slice.rchunks_exact(2); + /// assert_eq!(iter.next().unwrap(), &['e', 'm']); + /// assert_eq!(iter.next().unwrap(), &['o', 'r']); + /// assert!(iter.next().is_none()); + /// assert_eq!(iter.remainder(), &['l']); + /// ``` + /// + /// [`rchunks`]: #method.rchunks + /// [`chunks_exact`]: #method.chunks_exact + #[stable(feature = "rchunks", since = "1.31.0")] + #[inline] + pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<T> { + assert!(chunk_size != 0); + let rem = self.len() % chunk_size; + let (fst, snd) = self.split_at(rem); + RChunksExact { v: snd, rem: fst, chunk_size } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end + /// of the slice. + /// + /// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the + /// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be + /// retrieved from the `into_remainder` function of the iterator. + /// + /// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the + /// resulting code better than in the case of [`chunks_mut`]. + /// + /// See [`rchunks_mut`] for a variant of this iterator that also returns the remainder as a + /// smaller chunk, and [`chunks_exact_mut`] for the same iterator but starting at the beginning + /// of the slice of the slice. + /// + /// # Panics + /// + /// Panics if `chunk_size` is 0. + /// + /// # Examples + /// + /// ``` + /// let v = &mut [0, 0, 0, 0, 0]; + /// let mut count = 1; + /// + /// for chunk in v.rchunks_exact_mut(2) { + /// for elem in chunk.iter_mut() { + /// *elem += count; + /// } + /// count += 1; + /// } + /// assert_eq!(v, &[0, 2, 2, 1, 1]); + /// ``` + /// + /// [`rchunks_mut`]: #method.rchunks_mut + /// [`chunks_exact_mut`]: #method.chunks_exact_mut + #[stable(feature = "rchunks", since = "1.31.0")] + #[inline] + pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<T> { + assert!(chunk_size != 0); + let rem = self.len() % chunk_size; + let (fst, snd) = self.split_at_mut(rem); + RChunksExactMut { v: snd, rem: fst, chunk_size } + } + /// Divides one slice into two at an index. /// /// The first will contain all indices from `[0, mid)` (excluding @@ -3581,7 +3749,7 @@ unsafe impl<'a, T> TrustedRandomAccess for Windows<'a, T> { } /// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a -/// time). +/// time), starting at the beginning of the slice. /// /// When the slice len is not evenly divided by the chunk size, the last slice /// of the iteration will be the remainder. @@ -3708,8 +3876,10 @@ unsafe impl<'a, T> TrustedRandomAccess for Chunks<'a, T> { } /// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size` -/// elements at a time). When the slice len is not evenly divided by the chunk -/// size, the last slice of the iteration will be the remainder. +/// elements at a time), starting at the beginning of the slice. +/// +/// When the slice len is not evenly divided by the chunk size, the last slice +/// of the iteration will be the remainder. /// /// This struct is created by the [`chunks_mut`] method on [slices]. /// @@ -3827,7 +3997,7 @@ unsafe impl<'a, T> TrustedRandomAccess for ChunksMut<'a, T> { } /// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a -/// time). +/// time), starting at the beginning of the slice. /// /// When the slice len is not evenly divided by the chunk size, the last /// up to `chunk_size-1` elements will be omitted but can be retrieved from @@ -3839,25 +4009,25 @@ unsafe impl<'a, T> TrustedRandomAccess for ChunksMut<'a, T> { /// [`remainder`]: ../../std/slice/struct.ChunksExact.html#method.remainder /// [slices]: ../../std/primitive.slice.html #[derive(Debug)] -#[unstable(feature = "chunks_exact", issue = "47115")] +#[stable(feature = "chunks_exact", since = "1.31.0")] pub struct ChunksExact<'a, T:'a> { v: &'a [T], rem: &'a [T], chunk_size: usize } -#[unstable(feature = "chunks_exact", issue = "47115")] impl<'a, T> ChunksExact<'a, T> { /// Return the remainder of the original slice that is not going to be /// returned by the iterator. The returned slice has at most `chunk_size-1` /// elements. + #[stable(feature = "chunks_exact", since = "1.31.0")] pub fn remainder(&self) -> &'a [T] { self.rem } } // FIXME(#26925) Remove in favor of `#[derive(Clone)]` -#[unstable(feature = "chunks_exact", issue = "47115")] +#[stable(feature = "chunks_exact", since = "1.31.0")] impl<T> Clone for ChunksExact<'_, T> { fn clone(&self) -> Self { ChunksExact { @@ -3868,7 +4038,7 @@ impl<T> Clone for ChunksExact<'_, T> { } } -#[unstable(feature = "chunks_exact", issue = "47115")] +#[stable(feature = "chunks_exact", since = "1.31.0")] impl<'a, T> Iterator for ChunksExact<'a, T> { type Item = &'a [T]; @@ -3913,7 +4083,7 @@ impl<'a, T> Iterator for ChunksExact<'a, T> { } } -#[unstable(feature = "chunks_exact", issue = "47115")] +#[stable(feature = "chunks_exact", since = "1.31.0")] impl<'a, T> DoubleEndedIterator for ChunksExact<'a, T> { #[inline] fn next_back(&mut self) -> Option<&'a [T]> { @@ -3927,7 +4097,7 @@ impl<'a, T> DoubleEndedIterator for ChunksExact<'a, T> { } } -#[unstable(feature = "chunks_exact", issue = "47115")] +#[stable(feature = "chunks_exact", since = "1.31.0")] impl<T> ExactSizeIterator for ChunksExact<'_, T> { fn is_empty(&self) -> bool { self.v.is_empty() @@ -3937,10 +4107,11 @@ impl<T> ExactSizeIterator for ChunksExact<'_, T> { #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl<T> TrustedLen for ChunksExact<'_, T> {} -#[unstable(feature = "chunks_exact", issue = "47115")] +#[stable(feature = "chunks_exact", since = "1.31.0")] impl<T> FusedIterator for ChunksExact<'_, T> {} #[doc(hidden)] +#[stable(feature = "chunks_exact", since = "1.31.0")] unsafe impl<'a, T> TrustedRandomAccess for ChunksExact<'a, T> { unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] { let start = i * self.chunk_size; @@ -3950,7 +4121,7 @@ unsafe impl<'a, T> TrustedRandomAccess for ChunksExact<'a, T> { } /// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size` -/// elements at a time). +/// elements at a time), starting at the beginning of the slice. /// /// When the slice len is not evenly divided by the chunk size, the last up to /// `chunk_size-1` elements will be omitted but can be retrieved from the @@ -3962,24 +4133,24 @@ unsafe impl<'a, T> TrustedRandomAccess for ChunksExact<'a, T> { /// [`into_remainder`]: ../../std/slice/struct.ChunksExactMut.html#method.into_remainder /// [slices]: ../../std/primitive.slice.html #[derive(Debug)] -#[unstable(feature = "chunks_exact", issue = "47115")] +#[stable(feature = "chunks_exact", since = "1.31.0")] pub struct ChunksExactMut<'a, T:'a> { v: &'a mut [T], rem: &'a mut [T], chunk_size: usize } -#[unstable(feature = "chunks_exact", issue = "47115")] impl<'a, T> ChunksExactMut<'a, T> { /// Return the remainder of the original slice that is not going to be /// returned by the iterator. The returned slice has at most `chunk_size-1` /// elements. + #[stable(feature = "chunks_exact", since = "1.31.0")] pub fn into_remainder(self) -> &'a mut [T] { self.rem } } -#[unstable(feature = "chunks_exact", issue = "47115")] +#[stable(feature = "chunks_exact", since = "1.31.0")] impl<'a, T> Iterator for ChunksExactMut<'a, T> { type Item = &'a mut [T]; @@ -4026,7 +4197,7 @@ impl<'a, T> Iterator for ChunksExactMut<'a, T> { } } -#[unstable(feature = "chunks_exact", issue = "47115")] +#[stable(feature = "chunks_exact", since = "1.31.0")] impl<'a, T> DoubleEndedIterator for ChunksExactMut<'a, T> { #[inline] fn next_back(&mut self) -> Option<&'a mut [T]> { @@ -4042,7 +4213,7 @@ impl<'a, T> DoubleEndedIterator for ChunksExactMut<'a, T> { } } -#[unstable(feature = "chunks_exact", issue = "47115")] +#[stable(feature = "chunks_exact", since = "1.31.0")] impl<T> ExactSizeIterator for ChunksExactMut<'_, T> { fn is_empty(&self) -> bool { self.v.is_empty() @@ -4052,10 +4223,11 @@ impl<T> ExactSizeIterator for ChunksExactMut<'_, T> { #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl<T> TrustedLen for ChunksExactMut<'_, T> {} -#[unstable(feature = "chunks_exact", issue = "47115")] +#[stable(feature = "chunks_exact", since = "1.31.0")] impl<T> FusedIterator for ChunksExactMut<'_, T> {} #[doc(hidden)] +#[stable(feature = "chunks_exact", since = "1.31.0")] unsafe impl<'a, T> TrustedRandomAccess for ChunksExactMut<'a, T> { unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] { let start = i * self.chunk_size; @@ -4064,6 +4236,505 @@ unsafe impl<'a, T> TrustedRandomAccess for ChunksExactMut<'a, T> { fn may_have_side_effect() -> bool { false } } +/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a +/// time), starting at the end of the slice. +/// +/// When the slice len is not evenly divided by the chunk size, the last slice +/// of the iteration will be the remainder. +/// +/// This struct is created by the [`rchunks`] method on [slices]. +/// +/// [`rchunks`]: ../../std/primitive.slice.html#method.rchunks +/// [slices]: ../../std/primitive.slice.html +#[derive(Debug)] +#[stable(feature = "rchunks", since = "1.31.0")] +pub struct RChunks<'a, T:'a> { + v: &'a [T], + chunk_size: usize +} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> Clone for RChunks<'a, T> { + fn clone(&self) -> RChunks<'a, T> { + RChunks { + v: self.v, + chunk_size: self.chunk_size, + } + } +} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> Iterator for RChunks<'a, T> { + type Item = &'a [T]; + + #[inline] + fn next(&mut self) -> Option<&'a [T]> { + if self.v.is_empty() { + None + } else { + let chunksz = cmp::min(self.v.len(), self.chunk_size); + let (fst, snd) = self.v.split_at(self.v.len() - chunksz); + self.v = fst; + Some(snd) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + if self.v.is_empty() { + (0, Some(0)) + } else { + let n = self.v.len() / self.chunk_size; + let rem = self.v.len() % self.chunk_size; + let n = if rem > 0 { n+1 } else { n }; + (n, Some(n)) + } + } + + #[inline] + fn count(self) -> usize { + self.len() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option<Self::Item> { + let (end, overflow) = n.overflowing_mul(self.chunk_size); + if end >= self.v.len() || overflow { + self.v = &[]; + None + } else { + // Can't underflow because of the check above + let end = self.v.len() - end; + let start = match end.checked_sub(self.chunk_size) { + Some(sum) => sum, + None => 0, + }; + let nth = &self.v[start..end]; + self.v = &self.v[0..start]; + Some(nth) + } + } + + #[inline] + fn last(self) -> Option<Self::Item> { + if self.v.is_empty() { + None + } else { + let rem = self.v.len() % self.chunk_size; + let end = if rem == 0 { self.chunk_size } else { rem }; + Some(&self.v[0..end]) + } + } +} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> DoubleEndedIterator for RChunks<'a, T> { + #[inline] + fn next_back(&mut self) -> Option<&'a [T]> { + if self.v.is_empty() { + None + } else { + let remainder = self.v.len() % self.chunk_size; + let chunksz = if remainder != 0 { remainder } else { self.chunk_size }; + let (fst, snd) = self.v.split_at(chunksz); + self.v = snd; + Some(fst) + } + } +} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> ExactSizeIterator for RChunks<'a, T> {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, T> TrustedLen for RChunks<'a, T> {} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> FusedIterator for RChunks<'a, T> {} + +#[doc(hidden)] +#[stable(feature = "rchunks", since = "1.31.0")] +unsafe impl<'a, T> TrustedRandomAccess for RChunks<'a, T> { + unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] { + let end = self.v.len() - i * self.chunk_size; + let start = match end.checked_sub(self.chunk_size) { + None => 0, + Some(start) => start, + }; + from_raw_parts(self.v.as_ptr().add(start), end - start) + } + fn may_have_side_effect() -> bool { false } +} + +/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size` +/// elements at a time), starting at the end of the slice. +/// +/// When the slice len is not evenly divided by the chunk size, the last slice +/// of the iteration will be the remainder. +/// +/// This struct is created by the [`rchunks_mut`] method on [slices]. +/// +/// [`rchunks_mut`]: ../../std/primitive.slice.html#method.rchunks_mut +/// [slices]: ../../std/primitive.slice.html +#[derive(Debug)] +#[stable(feature = "rchunks", since = "1.31.0")] +pub struct RChunksMut<'a, T:'a> { + v: &'a mut [T], + chunk_size: usize +} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> Iterator for RChunksMut<'a, T> { + type Item = &'a mut [T]; + + #[inline] + fn next(&mut self) -> Option<&'a mut [T]> { + if self.v.is_empty() { + None + } else { + let sz = cmp::min(self.v.len(), self.chunk_size); + let tmp = mem::replace(&mut self.v, &mut []); + let tmp_len = tmp.len(); + let (head, tail) = tmp.split_at_mut(tmp_len - sz); + self.v = head; + Some(tail) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + if self.v.is_empty() { + (0, Some(0)) + } else { + let n = self.v.len() / self.chunk_size; + let rem = self.v.len() % self.chunk_size; + let n = if rem > 0 { n + 1 } else { n }; + (n, Some(n)) + } + } + + #[inline] + fn count(self) -> usize { + self.len() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option<&'a mut [T]> { + let (end, overflow) = n.overflowing_mul(self.chunk_size); + if end >= self.v.len() || overflow { + self.v = &mut []; + None + } else { + // Can't underflow because of the check above + let end = self.v.len() - end; + let start = match end.checked_sub(self.chunk_size) { + Some(sum) => sum, + None => 0, + }; + let tmp = mem::replace(&mut self.v, &mut []); + let (head, tail) = tmp.split_at_mut(start); + let (nth, _) = tail.split_at_mut(end - start); + self.v = head; + Some(nth) + } + } + + #[inline] + fn last(self) -> Option<Self::Item> { + if self.v.is_empty() { + None + } else { + let rem = self.v.len() % self.chunk_size; + let end = if rem == 0 { self.chunk_size } else { rem }; + Some(&mut self.v[0..end]) + } + } +} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> DoubleEndedIterator for RChunksMut<'a, T> { + #[inline] + fn next_back(&mut self) -> Option<&'a mut [T]> { + if self.v.is_empty() { + None + } else { + let remainder = self.v.len() % self.chunk_size; + let sz = if remainder != 0 { remainder } else { self.chunk_size }; + let tmp = mem::replace(&mut self.v, &mut []); + let (head, tail) = tmp.split_at_mut(sz); + self.v = tail; + Some(head) + } + } +} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> ExactSizeIterator for RChunksMut<'a, T> {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, T> TrustedLen for RChunksMut<'a, T> {} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> FusedIterator for RChunksMut<'a, T> {} + +#[doc(hidden)] +#[stable(feature = "rchunks", since = "1.31.0")] +unsafe impl<'a, T> TrustedRandomAccess for RChunksMut<'a, T> { + unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] { + let end = self.v.len() - i * self.chunk_size; + let start = match end.checked_sub(self.chunk_size) { + None => 0, + Some(start) => start, + }; + from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) + } + fn may_have_side_effect() -> bool { false } +} + +/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a +/// time), starting at the end of the slice. +/// +/// When the slice len is not evenly divided by the chunk size, the last +/// up to `chunk_size-1` elements will be omitted but can be retrieved from +/// the [`remainder`] function from the iterator. +/// +/// This struct is created by the [`rchunks_exact`] method on [slices]. +/// +/// [`rchunks_exact`]: ../../std/primitive.slice.html#method.rchunks_exact +/// [`remainder`]: ../../std/slice/struct.ChunksExact.html#method.remainder +/// [slices]: ../../std/primitive.slice.html +#[derive(Debug)] +#[stable(feature = "rchunks", since = "1.31.0")] +pub struct RChunksExact<'a, T:'a> { + v: &'a [T], + rem: &'a [T], + chunk_size: usize +} + +impl<'a, T> RChunksExact<'a, T> { + /// Return the remainder of the original slice that is not going to be + /// returned by the iterator. The returned slice has at most `chunk_size-1` + /// elements. + #[stable(feature = "rchunks", since = "1.31.0")] + pub fn remainder(&self) -> &'a [T] { + self.rem + } +} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> Clone for RChunksExact<'a, T> { + fn clone(&self) -> RChunksExact<'a, T> { + RChunksExact { + v: self.v, + rem: self.rem, + chunk_size: self.chunk_size, + } + } +} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> Iterator for RChunksExact<'a, T> { + type Item = &'a [T]; + + #[inline] + fn next(&mut self) -> Option<&'a [T]> { + if self.v.len() < self.chunk_size { + None + } else { + let (fst, snd) = self.v.split_at(self.v.len() - self.chunk_size); + self.v = fst; + Some(snd) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + let n = self.v.len() / self.chunk_size; + (n, Some(n)) + } + + #[inline] + fn count(self) -> usize { + self.len() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option<Self::Item> { + let (end, overflow) = n.overflowing_mul(self.chunk_size); + if end >= self.v.len() || overflow { + self.v = &[]; + None + } else { + let (fst, _) = self.v.split_at(self.v.len() - end); + self.v = fst; + self.next() + } + } + + #[inline] + fn last(mut self) -> Option<Self::Item> { + self.next_back() + } +} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> DoubleEndedIterator for RChunksExact<'a, T> { + #[inline] + fn next_back(&mut self) -> Option<&'a [T]> { + if self.v.len() < self.chunk_size { + None + } else { + let (fst, snd) = self.v.split_at(self.chunk_size); + self.v = snd; + Some(fst) + } + } +} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> ExactSizeIterator for RChunksExact<'a, T> { + fn is_empty(&self) -> bool { + self.v.is_empty() + } +} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, T> TrustedLen for RChunksExact<'a, T> {} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> FusedIterator for RChunksExact<'a, T> {} + +#[doc(hidden)] +#[stable(feature = "rchunks", since = "1.31.0")] +unsafe impl<'a, T> TrustedRandomAccess for RChunksExact<'a, T> { + unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] { + let end = self.v.len() - i * self.chunk_size; + let start = end - self.chunk_size; + from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) + } + fn may_have_side_effect() -> bool { false } +} + +/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size` +/// elements at a time), starting at the end of the slice. +/// +/// When the slice len is not evenly divided by the chunk size, the last up to +/// `chunk_size-1` elements will be omitted but can be retrieved from the +/// [`into_remainder`] function from the iterator. +/// +/// This struct is created by the [`rchunks_exact_mut`] method on [slices]. +/// +/// [`rchunks_exact_mut`]: ../../std/primitive.slice.html#method.rchunks_exact_mut +/// [`into_remainder`]: ../../std/slice/struct.ChunksExactMut.html#method.into_remainder +/// [slices]: ../../std/primitive.slice.html +#[derive(Debug)] +#[stable(feature = "rchunks", since = "1.31.0")] +pub struct RChunksExactMut<'a, T:'a> { + v: &'a mut [T], + rem: &'a mut [T], + chunk_size: usize +} + +impl<'a, T> RChunksExactMut<'a, T> { + /// Return the remainder of the original slice that is not going to be + /// returned by the iterator. The returned slice has at most `chunk_size-1` + /// elements. + #[stable(feature = "rchunks", since = "1.31.0")] + pub fn into_remainder(self) -> &'a mut [T] { + self.rem + } +} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> Iterator for RChunksExactMut<'a, T> { + type Item = &'a mut [T]; + + #[inline] + fn next(&mut self) -> Option<&'a mut [T]> { + if self.v.len() < self.chunk_size { + None + } else { + let tmp = mem::replace(&mut self.v, &mut []); + let tmp_len = tmp.len(); + let (head, tail) = tmp.split_at_mut(tmp_len - self.chunk_size); + self.v = head; + Some(tail) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + let n = self.v.len() / self.chunk_size; + (n, Some(n)) + } + + #[inline] + fn count(self) -> usize { + self.len() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option<&'a mut [T]> { + let (end, overflow) = n.overflowing_mul(self.chunk_size); + if end >= self.v.len() || overflow { + self.v = &mut []; + None + } else { + let tmp = mem::replace(&mut self.v, &mut []); + let tmp_len = tmp.len(); + let (fst, _) = tmp.split_at_mut(tmp_len - end); + self.v = fst; + self.next() + } + } + + #[inline] + fn last(mut self) -> Option<Self::Item> { + self.next_back() + } +} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> DoubleEndedIterator for RChunksExactMut<'a, T> { + #[inline] + fn next_back(&mut self) -> Option<&'a mut [T]> { + if self.v.len() < self.chunk_size { + None + } else { + let tmp = mem::replace(&mut self.v, &mut []); + let (head, tail) = tmp.split_at_mut(self.chunk_size); + self.v = tail; + Some(head) + } + } +} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> ExactSizeIterator for RChunksExactMut<'a, T> { + fn is_empty(&self) -> bool { + self.v.is_empty() + } +} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, T> TrustedLen for RChunksExactMut<'a, T> {} + +#[stable(feature = "rchunks", since = "1.31.0")] +impl<'a, T> FusedIterator for RChunksExactMut<'a, T> {} + +#[doc(hidden)] +#[stable(feature = "rchunks", since = "1.31.0")] +unsafe impl<'a, T> TrustedRandomAccess for RChunksExactMut<'a, T> { + unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] { + let end = self.v.len() - i * self.chunk_size; + let start = end - self.chunk_size; + from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) + } + fn may_have_side_effect() -> bool { false } +} + // // Free functions // diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs index a2782dd8e2e..a316093825a 100644 --- a/src/libcore/str/mod.rs +++ b/src/libcore/str/mod.rs @@ -1896,7 +1896,7 @@ mod traits { #[inline] fn index_mut(self, slice: &mut str) -> &mut Self::Output { // is_char_boundary checks that the index is in [0, .len()] - // canot reuse `get` as above, because of NLL trouble + // cannot reuse `get` as above, because of NLL trouble if self.start <= self.end && slice.is_char_boundary(self.start) && slice.is_char_boundary(self.end) { @@ -2277,7 +2277,6 @@ impl str { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - #[rustc_const_unstable(feature = "const_str_as_ptr")] pub const fn as_ptr(&self) -> *const u8 { self as *const str as *const u8 } diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index f130dbfb0e3..56d3b429fdb 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -124,6 +124,7 @@ pub fn spin_loop_hint() { /// [`bool`]: ../../../std/primitive.bool.html #[cfg(target_has_atomic = "8")] #[stable(feature = "rust1", since = "1.0.0")] +#[repr(C, align(1))] pub struct AtomicBool { v: UnsafeCell<u8>, } @@ -147,6 +148,9 @@ unsafe impl Sync for AtomicBool {} /// This type has the same in-memory representation as a `*mut T`. #[cfg(target_has_atomic = "ptr")] #[stable(feature = "rust1", since = "1.0.0")] +#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))] +#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))] +#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))] pub struct AtomicPtr<T> { p: UnsafeCell<*mut T>, } @@ -1088,6 +1092,7 @@ macro_rules! atomic_int { $s_int_type:expr, $int_ref:expr, $extra_feature:expr, $min_fn:ident, $max_fn:ident, + $align:expr, $int_type:ident $atomic_type:ident $atomic_init:ident) => { /// An integer type which can be safely shared between threads. /// @@ -1101,6 +1106,7 @@ macro_rules! atomic_int { /// /// [module-level documentation]: index.html #[$stable] + #[repr(C, align($align))] pub struct $atomic_type { v: UnsafeCell<$int_type>, } @@ -1831,6 +1837,7 @@ atomic_int! { "i8", "../../../std/primitive.i8.html", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, + 1, i8 AtomicI8 ATOMIC_I8_INIT } #[cfg(target_has_atomic = "8")] @@ -1844,6 +1851,7 @@ atomic_int! { "u8", "../../../std/primitive.u8.html", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, + 1, u8 AtomicU8 ATOMIC_U8_INIT } #[cfg(target_has_atomic = "16")] @@ -1857,6 +1865,7 @@ atomic_int! { "i16", "../../../std/primitive.i16.html", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, + 2, i16 AtomicI16 ATOMIC_I16_INIT } #[cfg(target_has_atomic = "16")] @@ -1870,6 +1879,7 @@ atomic_int! { "u16", "../../../std/primitive.u16.html", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, + 2, u16 AtomicU16 ATOMIC_U16_INIT } #[cfg(target_has_atomic = "32")] @@ -1883,6 +1893,7 @@ atomic_int! { "i32", "../../../std/primitive.i32.html", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, + 4, i32 AtomicI32 ATOMIC_I32_INIT } #[cfg(target_has_atomic = "32")] @@ -1896,6 +1907,7 @@ atomic_int! { "u32", "../../../std/primitive.u32.html", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, + 4, u32 AtomicU32 ATOMIC_U32_INIT } #[cfg(target_has_atomic = "64")] @@ -1909,6 +1921,7 @@ atomic_int! { "i64", "../../../std/primitive.i64.html", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, + 8, i64 AtomicI64 ATOMIC_I64_INIT } #[cfg(target_has_atomic = "64")] @@ -1922,8 +1935,49 @@ atomic_int! { "u64", "../../../std/primitive.u64.html", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, + 8, u64 AtomicU64 ATOMIC_U64_INIT } +#[cfg(all(not(stage0), target_has_atomic = "128"))] +atomic_int! { + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + "i128", "../../../std/primitive.i128.html", + "#![feature(integer_atomics)]\n\n", + atomic_min, atomic_max, + 16, + i128 AtomicI128 ATOMIC_I128_INIT +} +#[cfg(all(not(stage0), target_has_atomic = "128"))] +atomic_int! { + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + "u128", "../../../std/primitive.u128.html", + "#![feature(integer_atomics)]\n\n", + atomic_umin, atomic_umax, + 16, + u128 AtomicU128 ATOMIC_U128_INIT +} +#[cfg(target_pointer_width = "16")] +macro_rules! ptr_width { + () => { 2 } +} +#[cfg(target_pointer_width = "32")] +macro_rules! ptr_width { + () => { 4 } +} +#[cfg(target_pointer_width = "64")] +macro_rules! ptr_width { + () => { 8 } +} #[cfg(target_has_atomic = "ptr")] atomic_int!{ stable(feature = "rust1", since = "1.0.0"), @@ -1935,6 +1989,7 @@ atomic_int!{ "isize", "../../../std/primitive.isize.html", "", atomic_min, atomic_max, + ptr_width!(), isize AtomicIsize ATOMIC_ISIZE_INIT } #[cfg(target_has_atomic = "ptr")] @@ -1948,6 +2003,7 @@ atomic_int!{ "usize", "../../../std/primitive.usize.html", "", atomic_umin, atomic_umax, + ptr_width!(), usize AtomicUsize ATOMIC_USIZE_INIT } diff --git a/src/libcore/task/wake.rs b/src/libcore/task/wake.rs index ab4ae50c443..c9fb22e0080 100644 --- a/src/libcore/task/wake.rs +++ b/src/libcore/task/wake.rs @@ -188,6 +188,11 @@ impl LocalWaker { } impl From<LocalWaker> for Waker { + /// Converts a `LocalWaker` into a `Waker`. + /// + /// This conversion turns a `!Sync` `LocalWaker` into a `Sync` `Waker`, allowing a wakeup + /// object to be sent to another thread, but giving up its ability to do specialized + /// thread-local wakeup behavior. #[inline] fn from(local_waker: LocalWaker) -> Self { local_waker.0 diff --git a/src/libcore/tests/lib.rs b/src/libcore/tests/lib.rs index 0beb60a1270..5ac89912268 100644 --- a/src/libcore/tests/lib.rs +++ b/src/libcore/tests/lib.rs @@ -19,7 +19,6 @@ #![feature(flt2dec)] #![feature(fmt_internals)] #![feature(hashmap_internals)] -#![feature(impl_header_lifetime_elision)] #![feature(pattern)] #![feature(range_is_empty)] #![feature(raw)] @@ -34,7 +33,6 @@ #![feature(trusted_len)] #![feature(try_from)] #![feature(try_trait)] -#![feature(chunks_exact)] #![feature(align_offset)] #![feature(reverse_bits)] #![feature(inner_deref)] diff --git a/src/libcore/tests/slice.rs b/src/libcore/tests/slice.rs index de49a7bed39..dba5a43eb21 100644 --- a/src/libcore/tests/slice.rs +++ b/src/libcore/tests/slice.rs @@ -339,6 +339,228 @@ fn test_chunks_exact_mut_zip() { } #[test] +fn test_rchunks_count() { + let v: &[i32] = &[0, 1, 2, 3, 4, 5]; + let c = v.rchunks(3); + assert_eq!(c.count(), 2); + + let v2: &[i32] = &[0, 1, 2, 3, 4]; + let c2 = v2.rchunks(2); + assert_eq!(c2.count(), 3); + + let v3: &[i32] = &[]; + let c3 = v3.rchunks(2); + assert_eq!(c3.count(), 0); +} + +#[test] +fn test_rchunks_nth() { + let v: &[i32] = &[0, 1, 2, 3, 4, 5]; + let mut c = v.rchunks(2); + assert_eq!(c.nth(1).unwrap(), &[2, 3]); + assert_eq!(c.next().unwrap(), &[0, 1]); + + let v2: &[i32] = &[0, 1, 2, 3, 4]; + let mut c2 = v2.rchunks(3); + assert_eq!(c2.nth(1).unwrap(), &[0, 1]); + assert_eq!(c2.next(), None); +} + +#[test] +fn test_rchunks_last() { + let v: &[i32] = &[0, 1, 2, 3, 4, 5]; + let c = v.rchunks(2); + assert_eq!(c.last().unwrap()[1], 1); + + let v2: &[i32] = &[0, 1, 2, 3, 4]; + let c2 = v2.rchunks(2); + assert_eq!(c2.last().unwrap()[0], 0); +} + +#[test] +fn test_rchunks_zip() { + let v1: &[i32] = &[0, 1, 2, 3, 4]; + let v2: &[i32] = &[6, 7, 8, 9, 10]; + + let res = v1.rchunks(2) + .zip(v2.rchunks(2)) + .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>()) + .collect::<Vec<_>>(); + assert_eq!(res, vec![26, 18, 6]); +} + +#[test] +fn test_rchunks_mut_count() { + let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; + let c = v.rchunks_mut(3); + assert_eq!(c.count(), 2); + + let v2: &mut [i32] = &mut [0, 1, 2, 3, 4]; + let c2 = v2.rchunks_mut(2); + assert_eq!(c2.count(), 3); + + let v3: &mut [i32] = &mut []; + let c3 = v3.rchunks_mut(2); + assert_eq!(c3.count(), 0); +} + +#[test] +fn test_rchunks_mut_nth() { + let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; + let mut c = v.rchunks_mut(2); + assert_eq!(c.nth(1).unwrap(), &[2, 3]); + assert_eq!(c.next().unwrap(), &[0, 1]); + + let v2: &mut [i32] = &mut [0, 1, 2, 3, 4]; + let mut c2 = v2.rchunks_mut(3); + assert_eq!(c2.nth(1).unwrap(), &[0, 1]); + assert_eq!(c2.next(), None); +} + +#[test] +fn test_rchunks_mut_last() { + let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; + let c = v.rchunks_mut(2); + assert_eq!(c.last().unwrap(), &[0, 1]); + + let v2: &mut [i32] = &mut [0, 1, 2, 3, 4]; + let c2 = v2.rchunks_mut(2); + assert_eq!(c2.last().unwrap(), &[0]); +} + +#[test] +fn test_rchunks_mut_zip() { + let v1: &mut [i32] = &mut [0, 1, 2, 3, 4]; + let v2: &[i32] = &[6, 7, 8, 9, 10]; + + for (a, b) in v1.rchunks_mut(2).zip(v2.rchunks(2)) { + let sum = b.iter().sum::<i32>(); + for v in a { + *v += sum; + } + } + assert_eq!(v1, [6, 16, 17, 22, 23]); +} + +#[test] +fn test_rchunks_exact_count() { + let v: &[i32] = &[0, 1, 2, 3, 4, 5]; + let c = v.rchunks_exact(3); + assert_eq!(c.count(), 2); + + let v2: &[i32] = &[0, 1, 2, 3, 4]; + let c2 = v2.rchunks_exact(2); + assert_eq!(c2.count(), 2); + + let v3: &[i32] = &[]; + let c3 = v3.rchunks_exact(2); + assert_eq!(c3.count(), 0); +} + +#[test] +fn test_rchunks_exact_nth() { + let v: &[i32] = &[0, 1, 2, 3, 4, 5]; + let mut c = v.rchunks_exact(2); + assert_eq!(c.nth(1).unwrap(), &[2, 3]); + assert_eq!(c.next().unwrap(), &[0, 1]); + + let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6]; + let mut c2 = v2.rchunks_exact(3); + assert_eq!(c2.nth(1).unwrap(), &[1, 2, 3]); + assert_eq!(c2.next(), None); +} + +#[test] +fn test_rchunks_exact_last() { + let v: &[i32] = &[0, 1, 2, 3, 4, 5]; + let c = v.rchunks_exact(2); + assert_eq!(c.last().unwrap(), &[0, 1]); + + let v2: &[i32] = &[0, 1, 2, 3, 4]; + let c2 = v2.rchunks_exact(2); + assert_eq!(c2.last().unwrap(), &[1, 2]); +} + +#[test] +fn test_rchunks_exact_remainder() { + let v: &[i32] = &[0, 1, 2, 3, 4]; + let c = v.rchunks_exact(2); + assert_eq!(c.remainder(), &[0]); +} + +#[test] +fn test_rchunks_exact_zip() { + let v1: &[i32] = &[0, 1, 2, 3, 4]; + let v2: &[i32] = &[6, 7, 8, 9, 10]; + + let res = v1.rchunks_exact(2) + .zip(v2.rchunks_exact(2)) + .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>()) + .collect::<Vec<_>>(); + assert_eq!(res, vec![26, 18]); +} + +#[test] +fn test_rchunks_exact_mut_count() { + let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; + let c = v.rchunks_exact_mut(3); + assert_eq!(c.count(), 2); + + let v2: &mut [i32] = &mut [0, 1, 2, 3, 4]; + let c2 = v2.rchunks_exact_mut(2); + assert_eq!(c2.count(), 2); + + let v3: &mut [i32] = &mut []; + let c3 = v3.rchunks_exact_mut(2); + assert_eq!(c3.count(), 0); +} + +#[test] +fn test_rchunks_exact_mut_nth() { + let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; + let mut c = v.rchunks_exact_mut(2); + assert_eq!(c.nth(1).unwrap(), &[2, 3]); + assert_eq!(c.next().unwrap(), &[0, 1]); + + let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6]; + let mut c2 = v2.rchunks_exact_mut(3); + assert_eq!(c2.nth(1).unwrap(), &[1, 2, 3]); + assert_eq!(c2.next(), None); +} + +#[test] +fn test_rchunks_exact_mut_last() { + let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; + let c = v.rchunks_exact_mut(2); + assert_eq!(c.last().unwrap(), &[0, 1]); + + let v2: &mut [i32] = &mut [0, 1, 2, 3, 4]; + let c2 = v2.rchunks_exact_mut(2); + assert_eq!(c2.last().unwrap(), &[1, 2]); +} + +#[test] +fn test_rchunks_exact_mut_remainder() { + let v: &mut [i32] = &mut [0, 1, 2, 3, 4]; + let c = v.rchunks_exact_mut(2); + assert_eq!(c.into_remainder(), &[0]); +} + +#[test] +fn test_rchunks_exact_mut_zip() { + let v1: &mut [i32] = &mut [0, 1, 2, 3, 4]; + let v2: &[i32] = &[6, 7, 8, 9, 10]; + + for (a, b) in v1.rchunks_exact_mut(2).zip(v2.rchunks_exact(2)) { + let sum = b.iter().sum::<i32>(); + for v in a { + *v += sum; + } + } + assert_eq!(v1, [0, 16, 17, 22, 23]); +} + +#[test] fn test_windows_count() { let v: &[i32] = &[0, 1, 2, 3, 4, 5]; let c = v.windows(3); diff --git a/src/libcore/time.rs b/src/libcore/time.rs index 81ae8ade12d..938e97503de 100644 --- a/src/libcore/time.rs +++ b/src/libcore/time.rs @@ -109,7 +109,7 @@ impl Duration { /// ``` #[stable(feature = "duration", since = "1.3.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn from_secs(secs: u64) -> Duration { Duration { secs, nanos: 0 } } @@ -128,7 +128,7 @@ impl Duration { /// ``` #[stable(feature = "duration", since = "1.3.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn from_millis(millis: u64) -> Duration { Duration { secs: millis / MILLIS_PER_SEC, @@ -150,7 +150,7 @@ impl Duration { /// ``` #[stable(feature = "duration_from_micros", since = "1.27.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn from_micros(micros: u64) -> Duration { Duration { secs: micros / MICROS_PER_SEC, @@ -172,7 +172,7 @@ impl Duration { /// ``` #[stable(feature = "duration_extras", since = "1.27.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn from_nanos(nanos: u64) -> Duration { Duration { secs: nanos / (NANOS_PER_SEC as u64), @@ -209,7 +209,6 @@ impl Duration { /// /// [`subsec_nanos`]: #method.subsec_nanos #[stable(feature = "duration", since = "1.3.0")] - #[rustc_const_unstable(feature="duration_getters")] #[inline] pub const fn as_secs(&self) -> u64 { self.secs } @@ -229,7 +228,6 @@ impl Duration { /// assert_eq!(duration.subsec_millis(), 432); /// ``` #[stable(feature = "duration_extras", since = "1.27.0")] - #[rustc_const_unstable(feature="duration_getters")] #[inline] pub const fn subsec_millis(&self) -> u32 { self.nanos / NANOS_PER_MILLI } @@ -249,7 +247,6 @@ impl Duration { /// assert_eq!(duration.subsec_micros(), 234_567); /// ``` #[stable(feature = "duration_extras", since = "1.27.0")] - #[rustc_const_unstable(feature="duration_getters")] #[inline] pub const fn subsec_micros(&self) -> u32 { self.nanos / NANOS_PER_MICRO } @@ -269,7 +266,6 @@ impl Duration { /// assert_eq!(duration.subsec_nanos(), 10_000_000); /// ``` #[stable(feature = "duration", since = "1.3.0")] - #[rustc_const_unstable(feature="duration_getters")] #[inline] pub const fn subsec_nanos(&self) -> u32 { self.nanos } @@ -286,7 +282,7 @@ impl Duration { /// ``` #[unstable(feature = "duration_as_u128", issue = "50202")] #[inline] - pub fn as_millis(&self) -> u128 { + pub const fn as_millis(&self) -> u128 { self.secs as u128 * MILLIS_PER_SEC as u128 + (self.nanos / NANOS_PER_MILLI) as u128 } @@ -303,7 +299,7 @@ impl Duration { /// ``` #[unstable(feature = "duration_as_u128", issue = "50202")] #[inline] - pub fn as_micros(&self) -> u128 { + pub const fn as_micros(&self) -> u128 { self.secs as u128 * MICROS_PER_SEC as u128 + (self.nanos / NANOS_PER_MICRO) as u128 } @@ -320,7 +316,7 @@ impl Duration { /// ``` #[unstable(feature = "duration_as_u128", issue = "50202")] #[inline] - pub fn as_nanos(&self) -> u128 { + pub const fn as_nanos(&self) -> u128 { self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos as u128 } @@ -478,7 +474,7 @@ impl Duration { /// ``` #[unstable(feature = "duration_float", issue = "54361")] #[inline] - pub fn as_float_secs(&self) -> f64 { + pub const fn as_float_secs(&self) -> f64 { (self.secs as f64) + (self.nanos as f64) / (NANOS_PER_SEC as f64) } diff --git a/src/libcore/unicode/tables.rs b/src/libcore/unicode/tables.rs index 3de855ac943..e525c057400 100644 --- a/src/libcore/unicode/tables.rs +++ b/src/libcore/unicode/tables.rs @@ -2598,4 +2598,3 @@ pub mod conversions { ]; } - |
