diff options
Diffstat (limited to 'library/alloc')
| -rw-r--r-- | library/alloc/benches/slice.rs | 14 | ||||
| -rw-r--r-- | library/alloc/benches/vec.rs | 8 | ||||
| -rw-r--r-- | library/alloc/src/boxed/convert.rs | 28 | ||||
| -rw-r--r-- | library/alloc/src/boxed/thin.rs | 22 | ||||
| -rw-r--r-- | library/alloc/src/collections/btree/node/tests.rs | 8 | ||||
| -rw-r--r-- | library/alloc/src/raw_vec.rs | 4 | ||||
| -rw-r--r-- | library/alloc/src/raw_vec/tests.rs | 3 | ||||
| -rw-r--r-- | library/alloc/src/slice.rs | 4 | ||||
| -rw-r--r-- | library/alloc/src/string.rs | 6 | ||||
| -rw-r--r-- | library/alloc/src/sync.rs | 4 | ||||
| -rw-r--r-- | library/alloc/src/vec/in_place_collect.rs | 24 | ||||
| -rw-r--r-- | library/alloc/src/vec/mod.rs | 10 | ||||
| -rw-r--r-- | library/alloc/tests/arc.rs | 5 | ||||
| -rw-r--r-- | library/alloc/tests/rc.rs | 5 | ||||
| -rw-r--r-- | library/alloc/tests/slice.rs | 4 | ||||
| -rw-r--r-- | library/alloc/tests/sort/known_good_stable_sort.rs | 6 | ||||
| -rw-r--r-- | library/alloc/tests/thin_box.rs | 3 | ||||
| -rw-r--r-- | library/alloc/tests/vec.rs | 6 |
18 files changed, 72 insertions, 92 deletions
diff --git a/library/alloc/benches/slice.rs b/library/alloc/benches/slice.rs index c6b46e6a2a1..27b0e6fac0a 100644 --- a/library/alloc/benches/slice.rs +++ b/library/alloc/benches/slice.rs @@ -1,4 +1,4 @@ -use std::{mem, ptr}; +use std::ptr; use rand::Rng; use rand::distr::{Alphanumeric, SampleString, StandardUniform}; @@ -234,7 +234,7 @@ macro_rules! sort { fn $name(b: &mut Bencher) { let v = $gen($len); b.iter(|| v.clone().$f()); - b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64; + b.bytes = $len * size_of_val(&$gen(1)[0]) as u64; } }; } @@ -246,7 +246,7 @@ macro_rules! sort_strings { let v = $gen($len); let v = v.iter().map(|s| &**s).collect::<Vec<&str>>(); b.iter(|| v.clone().$f()); - b.bytes = $len * mem::size_of::<&str>() as u64; + b.bytes = $len * size_of::<&str>() as u64; } }; } @@ -268,7 +268,7 @@ macro_rules! sort_expensive { }); black_box(count); }); - b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64; + b.bytes = $len * size_of_val(&$gen(1)[0]) as u64; } }; } @@ -279,7 +279,7 @@ macro_rules! sort_lexicographic { fn $name(b: &mut Bencher) { let v = $gen($len); b.iter(|| v.clone().$f(|x| x.to_string())); - b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64; + b.bytes = $len * size_of_val(&$gen(1)[0]) as u64; } }; } @@ -322,7 +322,7 @@ macro_rules! reverse { fn $name(b: &mut Bencher) { // odd length and offset by 1 to be as unaligned as possible let n = 0xFFFFF; - let mut v: Vec<_> = (0..1 + (n / mem::size_of::<$ty>() as u64)).map($f).collect(); + let mut v: Vec<_> = (0..1 + (n / size_of::<$ty>() as u64)).map($f).collect(); b.iter(|| black_box(&mut v[1..]).reverse()); b.bytes = n; } @@ -346,7 +346,7 @@ macro_rules! rotate { ($name:ident, $gen:expr, $len:expr, $mid:expr) => { #[bench] fn $name(b: &mut Bencher) { - let size = mem::size_of_val(&$gen(1)[0]); + let size = size_of_val(&$gen(1)[0]); let mut v = $gen($len * 8 / size); b.iter(|| black_box(&mut v).rotate_left(($mid * 8 + size - 1) / size)); b.bytes = (v.len() * size) as u64; diff --git a/library/alloc/benches/vec.rs b/library/alloc/benches/vec.rs index a725ad6894b..1dab71fa1f4 100644 --- a/library/alloc/benches/vec.rs +++ b/library/alloc/benches/vec.rs @@ -669,7 +669,7 @@ fn random_sorted_fill(mut seed: u32, buf: &mut [u32]) { // This algorithm was used for Vecs prior to Rust 1.52. fn bench_dedup_slice_truncate(b: &mut Bencher, sz: usize) { let mut template = vec![0u32; sz]; - b.bytes = std::mem::size_of_val(template.as_slice()) as u64; + b.bytes = size_of_val(template.as_slice()) as u64; random_sorted_fill(0x43, &mut template); let mut vec = template.clone(); @@ -691,7 +691,7 @@ fn bench_dedup_slice_truncate(b: &mut Bencher, sz: usize) { // Measures performance of Vec::dedup on random data. fn bench_vec_dedup_random(b: &mut Bencher, sz: usize) { let mut template = vec![0u32; sz]; - b.bytes = std::mem::size_of_val(template.as_slice()) as u64; + b.bytes = size_of_val(template.as_slice()) as u64; random_sorted_fill(0x43, &mut template); let mut vec = template.clone(); @@ -708,7 +708,7 @@ fn bench_vec_dedup_random(b: &mut Bencher, sz: usize) { // Measures performance of Vec::dedup when there is no items removed fn bench_vec_dedup_none(b: &mut Bencher, sz: usize) { let mut template = vec![0u32; sz]; - b.bytes = std::mem::size_of_val(template.as_slice()) as u64; + b.bytes = size_of_val(template.as_slice()) as u64; template.chunks_exact_mut(2).for_each(|w| { w[0] = black_box(0); w[1] = black_box(5); @@ -729,7 +729,7 @@ fn bench_vec_dedup_none(b: &mut Bencher, sz: usize) { // Measures performance of Vec::dedup when there is all items removed fn bench_vec_dedup_all(b: &mut Bencher, sz: usize) { let mut template = vec![0u32; sz]; - b.bytes = std::mem::size_of_val(template.as_slice()) as u64; + b.bytes = size_of_val(template.as_slice()) as u64; template.iter_mut().for_each(|w| { *w = black_box(0); }); diff --git a/library/alloc/src/boxed/convert.rs b/library/alloc/src/boxed/convert.rs index 255cefb1e78..80626580202 100644 --- a/library/alloc/src/boxed/convert.rs +++ b/library/alloc/src/boxed/convert.rs @@ -529,7 +529,6 @@ impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> { /// ``` /// use std::error::Error; /// use std::fmt; - /// use std::mem; /// /// #[derive(Debug)] /// struct AnError; @@ -543,9 +542,9 @@ impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> { /// impl Error for AnError {} /// /// let an_error = AnError; - /// assert!(0 == mem::size_of_val(&an_error)); + /// assert!(0 == size_of_val(&an_error)); /// let a_boxed_error = Box::<dyn Error>::from(an_error); - /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error)) + /// assert!(size_of::<Box<dyn Error>>() == size_of_val(&a_boxed_error)) /// ``` fn from(err: E) -> Box<dyn Error + 'a> { Box::new(err) @@ -563,7 +562,6 @@ impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + /// ``` /// use std::error::Error; /// use std::fmt; - /// use std::mem; /// /// #[derive(Debug)] /// struct AnError; @@ -581,10 +579,10 @@ impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + /// unsafe impl Sync for AnError {} /// /// let an_error = AnError; - /// assert!(0 == mem::size_of_val(&an_error)); + /// assert!(0 == size_of_val(&an_error)); /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(an_error); /// assert!( - /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error)) + /// size_of::<Box<dyn Error + Send + Sync>>() == size_of_val(&a_boxed_error)) /// ``` fn from(err: E) -> Box<dyn Error + Send + Sync + 'a> { Box::new(err) @@ -600,12 +598,11 @@ impl<'a> From<String> for Box<dyn Error + Send + Sync + 'a> { /// /// ``` /// use std::error::Error; - /// use std::mem; /// /// let a_string_error = "a string error".to_string(); /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_string_error); /// assert!( - /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error)) + /// size_of::<Box<dyn Error + Send + Sync>>() == size_of_val(&a_boxed_error)) /// ``` #[inline] fn from(err: String) -> Box<dyn Error + Send + Sync + 'a> { @@ -644,11 +641,10 @@ impl<'a> From<String> for Box<dyn Error + 'a> { /// /// ``` /// use std::error::Error; - /// use std::mem; /// /// let a_string_error = "a string error".to_string(); /// let a_boxed_error = Box::<dyn Error>::from(a_string_error); - /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error)) + /// assert!(size_of::<Box<dyn Error>>() == size_of_val(&a_boxed_error)) /// ``` fn from(str_err: String) -> Box<dyn Error + 'a> { let err1: Box<dyn Error + Send + Sync> = From::from(str_err); @@ -668,12 +664,11 @@ impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> { /// /// ``` /// use std::error::Error; - /// use std::mem; /// /// let a_str_error = "a str error"; /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_str_error); /// assert!( - /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error)) + /// size_of::<Box<dyn Error + Send + Sync>>() == size_of_val(&a_boxed_error)) /// ``` #[inline] fn from(err: &str) -> Box<dyn Error + Send + Sync + 'a> { @@ -692,11 +687,10 @@ impl<'a> From<&str> for Box<dyn Error + 'a> { /// /// ``` /// use std::error::Error; - /// use std::mem; /// /// let a_str_error = "a str error"; /// let a_boxed_error = Box::<dyn Error>::from(a_str_error); - /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error)) + /// assert!(size_of::<Box<dyn Error>>() == size_of_val(&a_boxed_error)) /// ``` fn from(err: &str) -> Box<dyn Error + 'a> { From::from(String::from(err)) @@ -712,13 +706,12 @@ impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> { /// /// ``` /// use std::error::Error; - /// use std::mem; /// use std::borrow::Cow; /// /// let a_cow_str_error = Cow::from("a str error"); /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_cow_str_error); /// assert!( - /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error)) + /// size_of::<Box<dyn Error + Send + Sync>>() == size_of_val(&a_boxed_error)) /// ``` fn from(err: Cow<'b, str>) -> Box<dyn Error + Send + Sync + 'a> { From::from(String::from(err)) @@ -734,12 +727,11 @@ impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + 'a> { /// /// ``` /// use std::error::Error; - /// use std::mem; /// use std::borrow::Cow; /// /// let a_cow_str_error = Cow::from("a str error"); /// let a_boxed_error = Box::<dyn Error>::from(a_cow_str_error); - /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error)) + /// assert!(size_of::<Box<dyn Error>>() == size_of_val(&a_boxed_error)) /// ``` fn from(err: Cow<'b, str>) -> Box<dyn Error + 'a> { From::from(String::from(err)) diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs index 78e5aec09b1..21425b9846e 100644 --- a/library/alloc/src/boxed/thin.rs +++ b/library/alloc/src/boxed/thin.rs @@ -9,9 +9,8 @@ use core::intrinsics::const_allocate; use core::marker::PhantomData; #[cfg(not(no_global_oom_handling))] use core::marker::Unsize; -use core::mem; #[cfg(not(no_global_oom_handling))] -use core::mem::SizedTypeProperties; +use core::mem::{self, SizedTypeProperties}; use core::ops::{Deref, DerefMut}; use core::ptr::{self, NonNull, Pointee}; @@ -30,7 +29,6 @@ use crate::alloc::{self, Layout, LayoutError}; /// let five = ThinBox::new(5); /// let thin_slice = ThinBox::<[i32]>::new_unsize([1, 2, 3, 4]); /// -/// use std::mem::{size_of, size_of_val}; /// let size_of_ptr = size_of::<*const ()>(); /// assert_eq!(size_of_ptr, size_of_val(&five)); /// assert_eq!(size_of_ptr, size_of_val(&thin_slice)); @@ -114,7 +112,7 @@ impl<Dyn: ?Sized> ThinBox<Dyn> { where T: Unsize<Dyn>, { - if mem::size_of::<T>() == 0 { + if size_of::<T>() == 0 { let ptr = WithOpaqueHeader::new_unsize_zst::<Dyn, T>(value); ThinBox { ptr, _marker: PhantomData } } else { @@ -283,9 +281,7 @@ impl<H> WithHeader<H> { let ptr = if layout.size() == 0 { // Some paranoia checking, mostly so that the ThinBox tests are // more able to catch issues. - debug_assert!( - value_offset == 0 && mem::size_of::<T>() == 0 && mem::size_of::<H>() == 0 - ); + debug_assert!(value_offset == 0 && size_of::<T>() == 0 && size_of::<H>() == 0); layout.dangling() } else { let ptr = alloc::alloc(layout); @@ -315,7 +311,7 @@ impl<H> WithHeader<H> { Dyn: Pointee<Metadata = H> + ?Sized, T: Unsize<Dyn>, { - assert!(mem::size_of::<T>() == 0); + assert!(size_of::<T>() == 0); const fn max(a: usize, b: usize) -> usize { if a > b { a } else { b } @@ -329,18 +325,16 @@ impl<H> WithHeader<H> { // FIXME: just call `WithHeader::alloc_layout` with size reset to 0. // Currently that's blocked on `Layout::extend` not being `const fn`. - let alloc_align = - max(mem::align_of::<T>(), mem::align_of::<<Dyn as Pointee>::Metadata>()); + let alloc_align = max(align_of::<T>(), align_of::<<Dyn as Pointee>::Metadata>()); - let alloc_size = - max(mem::align_of::<T>(), mem::size_of::<<Dyn as Pointee>::Metadata>()); + let alloc_size = max(align_of::<T>(), size_of::<<Dyn as Pointee>::Metadata>()); unsafe { // SAFETY: align is power of two because it is the maximum of two alignments. let alloc: *mut u8 = const_allocate(alloc_size, alloc_align); let metadata_offset = - alloc_size.checked_sub(mem::size_of::<<Dyn as Pointee>::Metadata>()).unwrap(); + alloc_size.checked_sub(size_of::<<Dyn as Pointee>::Metadata>()).unwrap(); // SAFETY: adding offset within the allocation. let metadata_ptr: *mut <Dyn as Pointee>::Metadata = alloc.add(metadata_offset).cast(); @@ -421,7 +415,7 @@ impl<H> WithHeader<H> { } const fn header_size() -> usize { - mem::size_of::<H>() + size_of::<H>() } fn alloc_layout(value_layout: Layout) -> Result<(Layout, usize), LayoutError> { diff --git a/library/alloc/src/collections/btree/node/tests.rs b/library/alloc/src/collections/btree/node/tests.rs index ecd009f11c7..7d1a2ea4809 100644 --- a/library/alloc/src/collections/btree/node/tests.rs +++ b/library/alloc/src/collections/btree/node/tests.rs @@ -92,8 +92,8 @@ fn test_partial_eq() { #[cfg(target_arch = "x86_64")] #[cfg_attr(any(miri, randomized_layouts), ignore)] // We'd like to run Miri with layout randomization fn test_sizes() { - assert_eq!(core::mem::size_of::<LeafNode<(), ()>>(), 16); - assert_eq!(core::mem::size_of::<LeafNode<i64, i64>>(), 16 + CAPACITY * 2 * 8); - assert_eq!(core::mem::size_of::<InternalNode<(), ()>>(), 16 + (CAPACITY + 1) * 8); - assert_eq!(core::mem::size_of::<InternalNode<i64, i64>>(), 16 + (CAPACITY * 3 + 1) * 8); + assert_eq!(size_of::<LeafNode<(), ()>>(), 16); + assert_eq!(size_of::<LeafNode<i64, i64>>(), 16 + CAPACITY * 2 * 8); + assert_eq!(size_of::<InternalNode<(), ()>>(), 16 + (CAPACITY + 1) * 8); + assert_eq!(size_of::<InternalNode<i64, i64>>(), 16 + (CAPACITY * 3 + 1) * 8); } diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index b80d1fc7889..70f32fbaab4 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -480,7 +480,7 @@ impl<A: Allocator> RawVecInner<A> { // Allocators currently return a `NonNull<[u8]>` whose length // matches the size requested. If that ever changes, the capacity - // here should change to `ptr.len() / mem::size_of::<T>()`. + // here should change to `ptr.len() / size_of::<T>()`. Ok(Self { ptr: Unique::from(ptr.cast()), cap: unsafe { Cap::new_unchecked(capacity) }, @@ -627,7 +627,7 @@ impl<A: Allocator> RawVecInner<A> { unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) { // Allocators currently return a `NonNull<[u8]>` whose length matches // the size requested. If that ever changes, the capacity here should - // change to `ptr.len() / mem::size_of::<T>()`. + // change to `ptr.len() / size_of::<T>()`. self.ptr = Unique::from(ptr.cast()); self.cap = unsafe { Cap::new_unchecked(cap) }; } diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/raw_vec/tests.rs index d78ded104fb..700fa922739 100644 --- a/library/alloc/src/raw_vec/tests.rs +++ b/library/alloc/src/raw_vec/tests.rs @@ -1,4 +1,3 @@ -use core::mem::size_of; use std::cell::Cell; use super::*; @@ -93,7 +92,7 @@ fn zst_sanity<T>(v: &RawVec<T>) { fn zst() { let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into()); - assert_eq!(std::mem::size_of::<ZST>(), 0); + assert_eq!(size_of::<ZST>(), 0); // All these different ways of creating the RawVec produce the same thing. diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index dcd95ddf00f..8baf9685062 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -16,7 +16,7 @@ use core::borrow::{Borrow, BorrowMut}; #[cfg(not(no_global_oom_handling))] use core::cmp::Ordering::{self, Less}; #[cfg(not(no_global_oom_handling))] -use core::mem::{self, MaybeUninit}; +use core::mem::MaybeUninit; #[cfg(not(no_global_oom_handling))] use core::ptr; #[unstable(feature = "array_chunks", issue = "74985")] @@ -446,7 +446,7 @@ impl<T> [T] { // Avoids binary-size usage in cases where the alignment doesn't work out to make this // beneficial or on 32-bit platforms. let is_using_u32_as_idx_type_helpful = - const { mem::size_of::<(K, u32)>() < mem::size_of::<(K, usize)>() }; + const { size_of::<(K, u32)>() < size_of::<(K, usize)>() }; // It's possible to instantiate this for u8 and u16 but, doing so is very wasteful in terms // of compile-times and binary-size, the peak saved heap memory for u16 is (u8 + u16) -> 4 diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs index f10ef1fca13..679c8eb12b4 100644 --- a/library/alloc/src/string.rs +++ b/library/alloc/src/string.rs @@ -119,8 +119,6 @@ use crate::vec::{self, Vec}; /// the same `char`s: /// /// ``` -/// use std::mem; -/// /// // `s` is ASCII which represents each `char` as one byte /// let s = "hello"; /// assert_eq!(s.len(), 5); @@ -128,7 +126,7 @@ use crate::vec::{self, Vec}; /// // A `char` array with the same contents would be longer because /// // every `char` is four bytes /// let s = ['h', 'e', 'l', 'l', 'o']; -/// let size: usize = s.into_iter().map(|c| mem::size_of_val(&c)).sum(); +/// let size: usize = s.into_iter().map(|c| size_of_val(&c)).sum(); /// assert_eq!(size, 20); /// /// // However, for non-ASCII strings, the difference will be smaller @@ -137,7 +135,7 @@ use crate::vec::{self, Vec}; /// assert_eq!(s.len(), 20); /// /// let s = ['💖', '💖', '💖', '💖', '💖']; -/// let size: usize = s.into_iter().map(|c| mem::size_of_val(&c)).sum(); +/// let size: usize = s.into_iter().map(|c| size_of_val(&c)).sum(); /// assert_eq!(size, 20); /// ``` /// diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs index dba1449347a..1956dda5388 100644 --- a/library/alloc/src/sync.rs +++ b/library/alloc/src/sync.rs @@ -2274,7 +2274,7 @@ impl<T: ?Sized + CloneToUninit, A: Allocator + Clone> Arc<T, A> { #[inline] #[stable(feature = "arc_unique", since = "1.4.0")] pub fn make_mut(this: &mut Self) -> &mut T { - let size_of_val = mem::size_of_val::<T>(&**this); + let size_of_val = size_of_val::<T>(&**this); // Note that we hold both a strong reference and a weak reference. // Thus, releasing our strong reference only will not, by itself, cause @@ -3544,7 +3544,7 @@ impl<T> Default for Arc<[T]> { /// This may or may not share an allocation with other Arcs. #[inline] fn default() -> Self { - if mem::align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT { + if align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT { // We take a reference to the whole struct instead of the ArcInner<[u8; 1]> inside it so // we don't shrink the range of bytes the ptr is allowed to access under Stacked Borrows. // (Miri complains on 32-bit targets with Arc<[Align16]> otherwise.) diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs index dffd85f13aa..b98a118048f 100644 --- a/library/alloc/src/vec/in_place_collect.rs +++ b/library/alloc/src/vec/in_place_collect.rs @@ -171,7 +171,7 @@ const fn in_place_collectible<DEST, SRC>( ) -> bool { // Require matching alignments because an alignment-changing realloc is inefficient on many // system allocators and better implementations would require the unstable Allocator trait. - if const { SRC::IS_ZST || DEST::IS_ZST || mem::align_of::<SRC>() != mem::align_of::<DEST>() } { + if const { SRC::IS_ZST || DEST::IS_ZST || align_of::<SRC>() != align_of::<DEST>() } { return false; } @@ -181,7 +181,7 @@ const fn in_place_collectible<DEST, SRC>( // e.g. // - 1 x [u8; 4] -> 4x u8, via flatten // - 4 x u8 -> 1x [u8; 4], via array_chunks - mem::size_of::<SRC>() * step_merge.get() >= mem::size_of::<DEST>() * step_expand.get() + size_of::<SRC>() * step_merge.get() >= size_of::<DEST>() * step_expand.get() } // Fall back to other from_iter impls if an overflow occurred in the step merge/expansion // tracking. @@ -190,7 +190,7 @@ const fn in_place_collectible<DEST, SRC>( } const fn needs_realloc<SRC, DEST>(src_cap: usize, dst_cap: usize) -> bool { - if const { mem::align_of::<SRC>() != mem::align_of::<DEST>() } { + if const { align_of::<SRC>() != align_of::<DEST>() } { // FIXME(const-hack): use unreachable! once that works in const panic!("in_place_collectible() prevents this"); } @@ -199,8 +199,8 @@ const fn needs_realloc<SRC, DEST>(src_cap: usize, dst_cap: usize) -> bool { // the caller will have calculated a `dst_cap` that is an integer multiple of // `src_cap` without remainder. if const { - let src_sz = mem::size_of::<SRC>(); - let dest_sz = mem::size_of::<DEST>(); + let src_sz = size_of::<SRC>(); + let dest_sz = size_of::<DEST>(); dest_sz != 0 && src_sz % dest_sz == 0 } { return false; @@ -208,7 +208,7 @@ const fn needs_realloc<SRC, DEST>(src_cap: usize, dst_cap: usize) -> bool { // type layouts don't guarantee a fit, so do a runtime check to see if // the allocations happen to match - src_cap > 0 && src_cap * mem::size_of::<SRC>() != dst_cap * mem::size_of::<DEST>() + src_cap > 0 && src_cap * size_of::<SRC>() != dst_cap * size_of::<DEST>() } /// This provides a shorthand for the source type since local type aliases aren't a thing. @@ -262,7 +262,7 @@ where inner.buf.cast::<T>(), inner.end as *const T, // SAFETY: the multiplication can not overflow, since `inner.cap * size_of::<I::SRC>()` is the size of the allocation. - inner.cap.unchecked_mul(mem::size_of::<I::Src>()) / mem::size_of::<T>(), + inner.cap.unchecked_mul(size_of::<I::Src>()) / size_of::<T>(), ) }; @@ -310,14 +310,14 @@ where debug_assert_ne!(dst_cap, 0); unsafe { // The old allocation exists, therefore it must have a valid layout. - let src_align = mem::align_of::<I::Src>(); - let src_size = mem::size_of::<I::Src>().unchecked_mul(src_cap); + let src_align = align_of::<I::Src>(); + let src_size = size_of::<I::Src>().unchecked_mul(src_cap); let old_layout = Layout::from_size_align_unchecked(src_size, src_align); // The allocation must be equal or smaller for in-place iteration to be possible // therefore the new layout must be ≤ the old one and therefore valid. - let dst_align = mem::align_of::<T>(); - let dst_size = mem::size_of::<T>().unchecked_mul(dst_cap); + let dst_align = align_of::<T>(); + let dst_size = size_of::<T>().unchecked_mul(dst_cap); let new_layout = Layout::from_size_align_unchecked(dst_size, dst_align); let result = alloc.shrink(dst_buf.cast(), old_layout, new_layout); @@ -325,7 +325,7 @@ where dst_buf = reallocated.cast::<T>(); } } else { - debug_assert_eq!(src_cap * mem::size_of::<I::Src>(), dst_cap * mem::size_of::<T>()); + debug_assert_eq!(src_cap * size_of::<I::Src>(), dst_cap * size_of::<T>()); } mem::forget(dst_guard); diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 701144cc3af..49878f2b6fa 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -293,7 +293,7 @@ mod spec_extend; /// on an empty Vec, it will not allocate memory. Similarly, if you store zero-sized /// types inside a `Vec`, it will not allocate space for them. *Note that in this case /// the `Vec` might not report a [`capacity`] of 0*. `Vec` will allocate if and only -/// if <code>[mem::size_of::\<T>]\() * [capacity]\() > 0</code>. In general, `Vec`'s allocation +/// if <code>[size_of::\<T>]\() * [capacity]\() > 0</code>. In general, `Vec`'s allocation /// details are very subtle --- if you intend to allocate memory using a `Vec` /// and use it for something else (either to pass to unsafe code, or to build your /// own memory-backed collection), be sure to deallocate this memory by using @@ -393,7 +393,7 @@ mod spec_extend; /// [capacity]: Vec::capacity /// [`capacity`]: Vec::capacity /// [`Vec::capacity`]: Vec::capacity -/// [mem::size_of::\<T>]: core::mem::size_of +/// [size_of::\<T>]: size_of /// [len]: Vec::len /// [`len`]: Vec::len /// [`push`]: Vec::push @@ -1573,7 +1573,7 @@ impl<T, A: Allocator> Vec<T, A> { pub const fn as_slice(&self) -> &[T] { // SAFETY: `slice::from_raw_parts` requires pointee is a contiguous, aligned buffer of size // `len` containing properly-initialized `T`s. Data must not be mutated for the returned - // lifetime. Further, `len * mem::size_of::<T>` <= `ISIZE::MAX`, and allocation does not + // lifetime. Further, `len * size_of::<T>` <= `isize::MAX`, and allocation does not // "wrap" through overflowing memory addresses. // // * Vec API guarantees that self.buf: @@ -1605,7 +1605,7 @@ impl<T, A: Allocator> Vec<T, A> { pub const fn as_mut_slice(&mut self) -> &mut [T] { // SAFETY: `slice::from_raw_parts_mut` requires pointee is a contiguous, aligned buffer of // size `len` containing properly-initialized `T`s. Data must not be accessed through any - // other pointer for the returned lifetime. Further, `len * mem::size_of::<T>` <= + // other pointer for the returned lifetime. Further, `len * size_of::<T>` <= // `ISIZE::MAX` and allocation does not "wrap" through overflowing memory addresses. // // * Vec API guarantees that self.buf: @@ -2693,7 +2693,7 @@ impl<T, A: Allocator> Vec<T, A> { let len = self.len; // SAFETY: The maximum capacity of `Vec<T>` is `isize::MAX` bytes, so the maximum value can - // be returned is `usize::checked_div(mem::size_of::<T>()).unwrap_or(usize::MAX)`, which + // be returned is `usize::checked_div(size_of::<T>()).unwrap_or(usize::MAX)`, which // matches the definition of `T::MAX_SLICE_LEN`. unsafe { intrinsics::assume(len <= T::MAX_SLICE_LEN) }; diff --git a/library/alloc/tests/arc.rs b/library/alloc/tests/arc.rs index a259c0131ec..0baa50f439b 100644 --- a/library/alloc/tests/arc.rs +++ b/library/alloc/tests/arc.rs @@ -1,7 +1,6 @@ use std::any::Any; use std::cell::{Cell, RefCell}; use std::iter::TrustedLen; -use std::mem; use std::sync::{Arc, Weak}; #[test] @@ -129,7 +128,7 @@ fn shared_from_iter_trustedlen_normal() { let vec = iter.clone().collect::<Vec<_>>(); let rc = iter.collect::<Rc<[_]>>(); assert_eq!(&*vec, &*rc); - assert_eq!(mem::size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc)); + assert_eq!(size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, size_of_val(&*rc)); // Clone a bit and let these get dropped. { @@ -145,7 +144,7 @@ fn shared_from_iter_trustedlen_normal() { let vec = iter.clone().collect::<Vec<_>>(); let rc = iter.collect::<Rc<[_]>>(); assert_eq!(&*vec, &*rc); - assert_eq!(0, mem::size_of_val(&*rc)); + assert_eq!(0, size_of_val(&*rc)); { let _rc_2 = rc.clone(); let _rc_3 = rc.clone(); diff --git a/library/alloc/tests/rc.rs b/library/alloc/tests/rc.rs index 451765d7242..9d82a7621a2 100644 --- a/library/alloc/tests/rc.rs +++ b/library/alloc/tests/rc.rs @@ -1,7 +1,6 @@ use std::any::Any; use std::cell::{Cell, RefCell}; use std::iter::TrustedLen; -use std::mem; use std::rc::{Rc, Weak}; #[test] @@ -125,7 +124,7 @@ fn shared_from_iter_trustedlen_normal() { let vec = iter.clone().collect::<Vec<_>>(); let rc = iter.collect::<Rc<[_]>>(); assert_eq!(&*vec, &*rc); - assert_eq!(mem::size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc)); + assert_eq!(size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, size_of_val(&*rc)); // Clone a bit and let these get dropped. { @@ -141,7 +140,7 @@ fn shared_from_iter_trustedlen_normal() { let vec = iter.clone().collect::<Vec<_>>(); let rc = iter.collect::<Rc<[_]>>(); assert_eq!(&*vec, &*rc); - assert_eq!(0, mem::size_of_val(&*rc)); + assert_eq!(0, size_of_val(&*rc)); { let _rc_2 = rc.clone(); let _rc_3 = rc.clone(); diff --git a/library/alloc/tests/slice.rs b/library/alloc/tests/slice.rs index f990a41b679..2516563187f 100644 --- a/library/alloc/tests/slice.rs +++ b/library/alloc/tests/slice.rs @@ -1,7 +1,7 @@ use std::cmp::Ordering::{Equal, Greater, Less}; use std::convert::identity; use std::rc::Rc; -use std::{fmt, mem, panic}; +use std::{fmt, panic}; fn square(n: usize) -> usize { n * n @@ -73,7 +73,7 @@ fn test_len_divzero() { let v0: &[Z] = &[]; let v1: &[Z] = &[[]]; let v2: &[Z] = &[[], []]; - assert_eq!(mem::size_of::<Z>(), 0); + assert_eq!(size_of::<Z>(), 0); assert_eq!(v0.len(), 0); assert_eq!(v1.len(), 1); assert_eq!(v2.len(), 2); diff --git a/library/alloc/tests/sort/known_good_stable_sort.rs b/library/alloc/tests/sort/known_good_stable_sort.rs index f8615435fc2..2df89146253 100644 --- a/library/alloc/tests/sort/known_good_stable_sort.rs +++ b/library/alloc/tests/sort/known_good_stable_sort.rs @@ -5,7 +5,7 @@ // Based on https://github.com/voultapher/tiny-sort-rs. use alloc::alloc::{Layout, alloc, dealloc}; -use std::{mem, ptr}; +use std::ptr; /// Sort `v` preserving initial order of equal elements. /// @@ -26,7 +26,7 @@ pub fn sort<T: Ord>(v: &mut [T]) { #[inline(always)] fn stable_sort<T, F: FnMut(&T, &T) -> bool>(v: &mut [T], mut is_less: F) { - if mem::size_of::<T>() == 0 { + if size_of::<T>() == 0 { return; } @@ -166,7 +166,7 @@ struct BufGuard<T> { impl<T> BufGuard<T> { // SAFETY: The caller has to ensure that len is not 0 and that T is not a ZST. unsafe fn new(len: usize) -> Self { - debug_assert!(len > 0 && mem::size_of::<T>() > 0); + debug_assert!(len > 0 && size_of::<T>() > 0); // SAFETY: See function safety description. let layout = unsafe { unwrap_unchecked(Layout::array::<T>(len).ok()) }; diff --git a/library/alloc/tests/thin_box.rs b/library/alloc/tests/thin_box.rs index e008b0cc357..4c46b614127 100644 --- a/library/alloc/tests/thin_box.rs +++ b/library/alloc/tests/thin_box.rs @@ -1,5 +1,4 @@ use core::fmt::Debug; -use core::mem::size_of; use std::boxed::ThinBox; #[test] @@ -52,7 +51,7 @@ fn verify_aligned<T>(ptr: *const T) { ptr.is_aligned() && !ptr.is_null(), "misaligned ThinBox data; valid pointers to `{ty}` should be aligned to {align}: {ptr:p}", ty = core::any::type_name::<T>(), - align = core::mem::align_of::<T>(), + align = align_of::<T>(), ); } diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs index fe1db56414e..f430d979fa8 100644 --- a/library/alloc/tests/vec.rs +++ b/library/alloc/tests/vec.rs @@ -11,14 +11,14 @@ use std::borrow::Cow; use std::cell::Cell; use std::collections::TryReserveErrorKind::*; use std::fmt::Debug; +use std::hint; use std::iter::InPlaceIterable; -use std::mem::{size_of, swap}; +use std::mem::swap; use std::ops::Bound::*; use std::panic::{AssertUnwindSafe, catch_unwind}; use std::rc::Rc; use std::sync::atomic::{AtomicU32, Ordering}; use std::vec::{Drain, IntoIter}; -use std::{hint, mem}; struct DropCounter<'a> { count: &'a mut u32, @@ -1134,7 +1134,7 @@ fn test_into_iter_zst() { impl Drop for AlignedZstWithDrop { fn drop(&mut self) { let addr = self as *mut _ as usize; - assert!(hint::black_box(addr) % mem::align_of::<u64>() == 0); + assert!(hint::black_box(addr) % align_of::<u64>() == 0); } } |
