about summary refs log tree commit diff
path: root/library/core/src/slice
diff options
context:
space:
mode:
Diffstat (limited to 'library/core/src/slice')
-rw-r--r--library/core/src/slice/cmp.rs6
-rw-r--r--library/core/src/slice/memchr.rs5
-rw-r--r--library/core/src/slice/mod.rs26
-rw-r--r--library/core/src/slice/raw.rs8
-rw-r--r--library/core/src/slice/rotate.rs6
-rw-r--r--library/core/src/slice/sort/shared/smallsort.rs16
-rw-r--r--library/core/src/slice/sort/stable/mod.rs6
-rw-r--r--library/core/src/slice/sort/stable/quicksort.rs4
-rw-r--r--library/core/src/slice/sort/unstable/quicksort.rs8
9 files changed, 43 insertions, 42 deletions
diff --git a/library/core/src/slice/cmp.rs b/library/core/src/slice/cmp.rs
index 9cb00644e64..804bdfcbb4f 100644
--- a/library/core/src/slice/cmp.rs
+++ b/library/core/src/slice/cmp.rs
@@ -1,10 +1,10 @@
 //! Comparison traits for `[T]`.
 
 use super::{from_raw_parts, memchr};
+use crate::ascii;
 use crate::cmp::{self, BytewiseEq, Ordering};
 use crate::intrinsics::compare_bytes;
 use crate::num::NonZero;
-use crate::{ascii, mem};
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T, U> PartialEq<[U]> for [T]
@@ -87,7 +87,7 @@ where
         // SAFETY: `self` and `other` are references and are thus guaranteed to be valid.
         // The two slices have been checked to have the same size above.
         unsafe {
-            let size = mem::size_of_val(self);
+            let size = size_of_val(self);
             compare_bytes(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
         }
     }
@@ -266,7 +266,7 @@ macro_rules! impl_slice_contains {
                 fn slice_contains(&self, arr: &[$t]) -> bool {
                     // Make our LANE_COUNT 4x the normal lane count (aiming for 128 bit vectors).
                     // The compiler will nicely unroll it.
-                    const LANE_COUNT: usize = 4 * (128 / (mem::size_of::<$t>() * 8));
+                    const LANE_COUNT: usize = 4 * (128 / (size_of::<$t>() * 8));
                     // SIMD
                     let mut chunks = arr.chunks_exact(LANE_COUNT);
                     for chunk in &mut chunks {
diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs
index 98db7aaf533..1e1053583a6 100644
--- a/library/core/src/slice/memchr.rs
+++ b/library/core/src/slice/memchr.rs
@@ -2,11 +2,10 @@
 // Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
 
 use crate::intrinsics::const_eval_select;
-use crate::mem;
 
 const LO_USIZE: usize = usize::repeat_u8(0x01);
 const HI_USIZE: usize = usize::repeat_u8(0x80);
-const USIZE_BYTES: usize = mem::size_of::<usize>();
+const USIZE_BYTES: usize = size_of::<usize>();
 
 /// Returns `true` if `x` contains any zero byte.
 ///
@@ -138,7 +137,7 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
     // offset is always aligned, so just testing `>` is sufficient and avoids possible
     // overflow.
     let repeated_x = usize::repeat_u8(x);
-    let chunk_bytes = mem::size_of::<Chunk>();
+    let chunk_bytes = size_of::<Chunk>();
 
     while offset > min_aligned_offset {
         // SAFETY: offset starts at len - suffix.len(), as long as it is greater than
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index 7a2764206e8..c79ae2c1915 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -3894,9 +3894,9 @@ impl<T> [T] {
 
         // Explicitly wrap the function call in a const block so it gets
         // constant-evaluated even in debug mode.
-        let gcd: usize = const { gcd(mem::size_of::<T>(), mem::size_of::<U>()) };
-        let ts: usize = mem::size_of::<U>() / gcd;
-        let us: usize = mem::size_of::<T>() / gcd;
+        let gcd: usize = const { gcd(size_of::<T>(), size_of::<U>()) };
+        let ts: usize = size_of::<U>() / gcd;
+        let us: usize = size_of::<T>() / gcd;
 
         // Armed with this knowledge, we can find how many `U`s we can fit!
         let us_len = self.len() / ts * us;
@@ -3946,7 +3946,7 @@ impl<T> [T] {
         // ptr.align_offset.
         let ptr = self.as_ptr();
         // SAFETY: See the `align_to_mut` method for the detailed safety comment.
-        let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
+        let offset = unsafe { crate::ptr::align_offset(ptr, align_of::<U>()) };
         if offset > self.len() {
             (self, &[], &[])
         } else {
@@ -3956,7 +3956,7 @@ impl<T> [T] {
             #[cfg(miri)]
             crate::intrinsics::miri_promise_symbolic_alignment(
                 rest.as_ptr().cast(),
-                mem::align_of::<U>(),
+                align_of::<U>(),
             );
             // SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
             // since the caller guarantees that we can transmute `T` to `U` safely.
@@ -4017,7 +4017,7 @@ impl<T> [T] {
         // valid pointer `ptr` (it comes from a reference to `self`) and with
         // a size that is a power of two (since it comes from the alignment for U),
         // satisfying its safety constraints.
-        let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
+        let offset = unsafe { crate::ptr::align_offset(ptr, align_of::<U>()) };
         if offset > self.len() {
             (self, &mut [], &mut [])
         } else {
@@ -4029,7 +4029,7 @@ impl<T> [T] {
             #[cfg(miri)]
             crate::intrinsics::miri_promise_symbolic_alignment(
                 mut_ptr.cast() as *const (),
-                mem::align_of::<U>(),
+                align_of::<U>(),
             );
             // We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
             // SAFETY: see comments for `align_to`.
@@ -4100,7 +4100,7 @@ impl<T> [T] {
         // These are expected to always match, as vector types are laid out like
         // arrays per <https://llvm.org/docs/LangRef.html#vector-type>, but we
         // might as well double-check since it'll optimize away anyhow.
-        assert_eq!(mem::size_of::<Simd<T, LANES>>(), mem::size_of::<[T; LANES]>());
+        assert_eq!(size_of::<Simd<T, LANES>>(), size_of::<[T; LANES]>());
 
         // SAFETY: The simd types have the same layout as arrays, just with
         // potentially-higher alignment, so the de-facto transmutes are sound.
@@ -4136,7 +4136,7 @@ impl<T> [T] {
         // These are expected to always match, as vector types are laid out like
         // arrays per <https://llvm.org/docs/LangRef.html#vector-type>, but we
         // might as well double-check since it'll optimize away anyhow.
-        assert_eq!(mem::size_of::<Simd<T, LANES>>(), mem::size_of::<[T; LANES]>());
+        assert_eq!(size_of::<Simd<T, LANES>>(), size_of::<[T; LANES]>());
 
         // SAFETY: The simd types have the same layout as arrays, just with
         // potentially-higher alignment, so the de-facto transmutes are sound.
@@ -4721,11 +4721,11 @@ impl<T> [T] {
 
         let byte_offset = elem_start.wrapping_sub(self_start);
 
-        if byte_offset % mem::size_of::<T>() != 0 {
+        if byte_offset % size_of::<T>() != 0 {
             return None;
         }
 
-        let offset = byte_offset / mem::size_of::<T>();
+        let offset = byte_offset / size_of::<T>();
 
         if offset < self.len() { Some(offset) } else { None }
     }
@@ -4775,11 +4775,11 @@ impl<T> [T] {
 
         let byte_start = subslice_start.wrapping_sub(self_start);
 
-        if byte_start % core::mem::size_of::<T>() != 0 {
+        if byte_start % size_of::<T>() != 0 {
             return None;
         }
 
-        let start = byte_start / core::mem::size_of::<T>();
+        let start = byte_start / size_of::<T>();
         let end = start.wrapping_add(subslice.len());
 
         if start <= self.len() && end <= self.len() { Some(start..end) } else { None }
diff --git a/library/core/src/slice/raw.rs b/library/core/src/slice/raw.rs
index e24b52cff82..3582c7e8b3f 100644
--- a/library/core/src/slice/raw.rs
+++ b/library/core/src/slice/raw.rs
@@ -11,7 +11,7 @@ use crate::{array, ptr, ub_checks};
 ///
 /// Behavior is undefined if any of the following conditions are violated:
 ///
-/// * `data` must be non-null, [valid] for reads for `len * mem::size_of::<T>()` many bytes,
+/// * `data` must be non-null, [valid] for reads for `len * size_of::<T>()` many bytes,
 ///   and it must be properly aligned. This means in particular:
 ///
 ///     * The entire memory range of this slice must be contained within a single allocated object!
@@ -28,7 +28,7 @@ use crate::{array, ptr, ub_checks};
 /// * The memory referenced by the returned slice must not be mutated for the duration
 ///   of lifetime `'a`, except inside an `UnsafeCell`.
 ///
-/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`,
+/// * The total size `len * size_of::<T>()` of the slice must be no larger than `isize::MAX`,
 ///   and adding that size to `data` must not "wrap around" the address space.
 ///   See the safety documentation of [`pointer::offset`].
 ///
@@ -146,7 +146,7 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T]
 ///
 /// Behavior is undefined if any of the following conditions are violated:
 ///
-/// * `data` must be non-null, [valid] for both reads and writes for `len * mem::size_of::<T>()` many bytes,
+/// * `data` must be non-null, [valid] for both reads and writes for `len * size_of::<T>()` many bytes,
 ///   and it must be properly aligned. This means in particular:
 ///
 ///     * The entire memory range of this slice must be contained within a single allocated object!
@@ -163,7 +163,7 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T]
 ///   (not derived from the return value) for the duration of lifetime `'a`.
 ///   Both read and write accesses are forbidden.
 ///
-/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`,
+/// * The total size `len * size_of::<T>()` of the slice must be no larger than `isize::MAX`,
 ///   and adding that size to `data` must not "wrap around" the address space.
 ///   See the safety documentation of [`pointer::offset`].
 ///
diff --git a/library/core/src/slice/rotate.rs b/library/core/src/slice/rotate.rs
index 5d5ee4c7b62..80178f297ea 100644
--- a/library/core/src/slice/rotate.rs
+++ b/library/core/src/slice/rotate.rs
@@ -1,4 +1,4 @@
-use crate::mem::{self, MaybeUninit, SizedTypeProperties};
+use crate::mem::{MaybeUninit, SizedTypeProperties};
 use crate::{cmp, ptr};
 
 type BufType = [usize; 32];
@@ -21,12 +21,12 @@ pub(super) unsafe fn ptr_rotate<T>(left: usize, mid: *mut T, right: usize) {
     }
     // `T` is not a zero-sized type, so it's okay to divide by its size.
     if !cfg!(feature = "optimize_for_size")
-        && cmp::min(left, right) <= mem::size_of::<BufType>() / mem::size_of::<T>()
+        && cmp::min(left, right) <= size_of::<BufType>() / size_of::<T>()
     {
         // SAFETY: guaranteed by the caller
         unsafe { ptr_rotate_memmove(left, mid, right) };
     } else if !cfg!(feature = "optimize_for_size")
-        && ((left + right < 24) || (mem::size_of::<T>() > mem::size_of::<[usize; 4]>()))
+        && ((left + right < 24) || (size_of::<T>() > size_of::<[usize; 4]>()))
     {
         // SAFETY: guaranteed by the caller
         unsafe { ptr_rotate_gcd(left, mid, right) }
diff --git a/library/core/src/slice/sort/shared/smallsort.rs b/library/core/src/slice/sort/shared/smallsort.rs
index f6dcf42ba60..95f196a40d0 100644
--- a/library/core/src/slice/sort/shared/smallsort.rs
+++ b/library/core/src/slice/sort/shared/smallsort.rs
@@ -113,7 +113,7 @@ pub(crate) trait UnstableSmallSortFreezeTypeImpl: Sized + FreezeMarker {
 impl<T: FreezeMarker> UnstableSmallSortFreezeTypeImpl for T {
     #[inline(always)]
     default fn small_sort_threshold() -> usize {
-        if (mem::size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
+        if (size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
             SMALL_SORT_GENERAL_THRESHOLD
         } else {
             SMALL_SORT_FALLBACK_THRESHOLD
@@ -125,7 +125,7 @@ impl<T: FreezeMarker> UnstableSmallSortFreezeTypeImpl for T {
     where
         F: FnMut(&T, &T) -> bool,
     {
-        if (mem::size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
+        if (size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
             small_sort_general(v, is_less);
         } else {
             small_sort_fallback(v, is_less);
@@ -143,10 +143,10 @@ impl<T: FreezeMarker + CopyMarker> UnstableSmallSortFreezeTypeImpl for T {
     #[inline(always)]
     fn small_sort_threshold() -> usize {
         if has_efficient_in_place_swap::<T>()
-            && (mem::size_of::<T>() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE
+            && (size_of::<T>() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE
         {
             SMALL_SORT_NETWORK_THRESHOLD
-        } else if (mem::size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
+        } else if (size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
             SMALL_SORT_GENERAL_THRESHOLD
         } else {
             SMALL_SORT_FALLBACK_THRESHOLD
@@ -159,10 +159,10 @@ impl<T: FreezeMarker + CopyMarker> UnstableSmallSortFreezeTypeImpl for T {
         F: FnMut(&T, &T) -> bool,
     {
         if has_efficient_in_place_swap::<T>()
-            && (mem::size_of::<T>() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE
+            && (size_of::<T>() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE
         {
             small_sort_network(v, is_less);
-        } else if (mem::size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
+        } else if (size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
             small_sort_general(v, is_less);
         } else {
             small_sort_fallback(v, is_less);
@@ -238,7 +238,7 @@ fn small_sort_general_with_scratch<T: FreezeMarker, F: FnMut(&T, &T) -> bool>(
     unsafe {
         let scratch_base = scratch.as_mut_ptr() as *mut T;
 
-        let presorted_len = if const { mem::size_of::<T>() <= 16 } && len >= 16 {
+        let presorted_len = if const { size_of::<T>() <= 16 } && len >= 16 {
             // SAFETY: scratch_base is valid and has enough space.
             sort8_stable(v_base, scratch_base, scratch_base.add(len), is_less);
             sort8_stable(
@@ -863,5 +863,5 @@ fn panic_on_ord_violation() -> ! {
 #[must_use]
 pub(crate) const fn has_efficient_in_place_swap<T>() -> bool {
     // Heuristic that holds true on all tested 64-bit capable architectures.
-    mem::size_of::<T>() <= 8 // mem::size_of::<u64>()
+    size_of::<T>() <= 8 // size_of::<u64>()
 }
diff --git a/library/core/src/slice/sort/stable/mod.rs b/library/core/src/slice/sort/stable/mod.rs
index 3ff2e71fd05..090367cdaba 100644
--- a/library/core/src/slice/sort/stable/mod.rs
+++ b/library/core/src/slice/sort/stable/mod.rs
@@ -3,7 +3,7 @@
 #[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
 use crate::cmp;
 use crate::intrinsics;
-use crate::mem::{self, MaybeUninit, SizedTypeProperties};
+use crate::mem::{MaybeUninit, SizedTypeProperties};
 #[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
 use crate::slice::sort::shared::smallsort::{
     SMALL_SORT_GENERAL_SCRATCH_LEN, StableSmallSortTypeImpl, insertion_sort_shift_left,
@@ -107,7 +107,7 @@ fn driftsort_main<T, F: FnMut(&T, &T) -> bool, BufT: BufGuard<T>>(v: &mut [T], i
     // If min_good_run_len is ever modified, this code must be updated to allocate
     // the correct scratch size for it.
     const MAX_FULL_ALLOC_BYTES: usize = 8_000_000; // 8MB
-    let max_full_alloc = MAX_FULL_ALLOC_BYTES / mem::size_of::<T>();
+    let max_full_alloc = MAX_FULL_ALLOC_BYTES / size_of::<T>();
     let len = v.len();
     let alloc_len = cmp::max(
         cmp::max(len - len / 2, cmp::min(len, max_full_alloc)),
@@ -155,7 +155,7 @@ impl<T, const N: usize> AlignedStorage<T, N> {
     }
 
     fn as_uninit_slice_mut(&mut self) -> &mut [MaybeUninit<T>] {
-        let len = N / mem::size_of::<T>();
+        let len = N / size_of::<T>();
 
         // SAFETY: `_align` ensures we are correctly aligned.
         unsafe { core::slice::from_raw_parts_mut(self.storage.as_mut_ptr().cast(), len) }
diff --git a/library/core/src/slice/sort/stable/quicksort.rs b/library/core/src/slice/sort/stable/quicksort.rs
index 630c6ff9077..3c9688790c4 100644
--- a/library/core/src/slice/sort/stable/quicksort.rs
+++ b/library/core/src/slice/sort/stable/quicksort.rs
@@ -1,6 +1,6 @@
 //! This module contains a stable quicksort and partition implementation.
 
-use crate::mem::{self, ManuallyDrop, MaybeUninit};
+use crate::mem::{ManuallyDrop, MaybeUninit};
 use crate::slice::sort::shared::FreezeMarker;
 use crate::slice::sort::shared::pivot::choose_pivot;
 use crate::slice::sort::shared::smallsort::StableSmallSortTypeImpl;
@@ -126,7 +126,7 @@ fn stable_partition<T, F: FnMut(&T, &T) -> bool>(
             // this gave significant performance boosts in benchmarks. Unrolling
             // through for _ in 0..UNROLL_LEN { .. } instead of manually improves
             // compile times but has a ~10-20% performance penalty on opt-level=s.
-            if const { mem::size_of::<T>() <= 16 } {
+            if const { size_of::<T>() <= 16 } {
                 const UNROLL_LEN: usize = 4;
                 let unroll_end = v_base.add(loop_end_pos.saturating_sub(UNROLL_LEN - 1));
                 while state.scan < unroll_end {
diff --git a/library/core/src/slice/sort/unstable/quicksort.rs b/library/core/src/slice/sort/unstable/quicksort.rs
index bb9f90fc881..68a16118716 100644
--- a/library/core/src/slice/sort/unstable/quicksort.rs
+++ b/library/core/src/slice/sort/unstable/quicksort.rs
@@ -1,6 +1,8 @@
 //! This module contains an unstable quicksort and two partition implementations.
 
-use crate::mem::{self, ManuallyDrop};
+#[cfg(not(feature = "optimize_for_size"))]
+use crate::mem;
+use crate::mem::ManuallyDrop;
 #[cfg(not(feature = "optimize_for_size"))]
 use crate::slice::sort::shared::pivot::choose_pivot;
 #[cfg(not(feature = "optimize_for_size"))]
@@ -137,7 +139,7 @@ where
 
 const fn inst_partition<T, F: FnMut(&T, &T) -> bool>() -> fn(&mut [T], &T, &mut F) -> usize {
     const MAX_BRANCHLESS_PARTITION_SIZE: usize = 96;
-    if mem::size_of::<T>() <= MAX_BRANCHLESS_PARTITION_SIZE {
+    if size_of::<T>() <= MAX_BRANCHLESS_PARTITION_SIZE {
         // Specialize for types that are relatively cheap to copy, where branchless optimizations
         // have large leverage e.g. `u64` and `String`.
         cfg_if! {
@@ -304,7 +306,7 @@ where
 
         // Manual unrolling that works well on x86, Arm and with opt-level=s without murdering
         // compile-times. Leaving this to the compiler yields ok to bad results.
-        let unroll_len = const { if mem::size_of::<T>() <= 16 { 2 } else { 1 } };
+        let unroll_len = const { if size_of::<T>() <= 16 { 2 } else { 1 } };
 
         let unroll_end = v_base.add(len - (unroll_len - 1));
         while state.right < unroll_end {