about summary refs log tree commit diff
diff options
context:
space:
mode:
authorMatthias Krüger <matthias.krueger@famsik.de>2024-02-17 18:47:40 +0100
committerGitHub <noreply@github.com>2024-02-17 18:47:40 +0100
commit59972868e63a24034a5d8379c8ca4146a72b7373 (patch)
tree1315cd321372f52e3e48053f9cd5871f1b0eb478
parenteeeb021954e30a6b16fc12cc4b8a1bcf7910a40e (diff)
parent7c2db703b0b56cede3d8ce264e9d1fcdb1397f94 (diff)
downloadrust-59972868e63a24034a5d8379c8ca4146a72b7373.tar.gz
rust-59972868e63a24034a5d8379c8ca4146a72b7373.zip
Rollup merge of #120952 - saethlin:vec-into-iter, r=the8472
Don't use mem::zeroed in vec::IntoIter

`mem::zeroed` is not a trivial function. Maybe it was once, but now it involves multiple locals, copies, and an intrinsic that gets monomorphized into a call to `panic_nounwind` for iterators of types like `Vec<&T>`. Of course all that complexity is trivially optimized out, but generating a bunch of IR where we don't need to just so we can optimize it away later is silly.
-rw-r--r--library/alloc/src/vec/into_iter.rs63
-rw-r--r--tests/codegen/vec-iter.rs17
2 files changed, 44 insertions, 36 deletions
diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs
index 63d8fe19ac3..dfd42ca0619 100644
--- a/library/alloc/src/vec/into_iter.rs
+++ b/library/alloc/src/vec/into_iter.rs
@@ -11,7 +11,7 @@ use core::iter::{
     TrustedRandomAccessNoCoerce,
 };
 use core::marker::PhantomData;
-use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
+use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties};
 use core::num::NonZero;
 #[cfg(not(no_global_oom_handling))]
 use core::ops::Deref;
@@ -200,27 +200,23 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
 
     #[inline]
     fn next(&mut self) -> Option<T> {
-        if T::IS_ZST {
-            if self.ptr.as_ptr() == self.end as *mut _ {
-                None
-            } else {
-                // `ptr` has to stay where it is to remain aligned, so we reduce the length by 1 by
-                // reducing the `end`.
-                self.end = self.end.wrapping_byte_sub(1);
-
-                // Make up a value of this ZST.
-                Some(unsafe { mem::zeroed() })
+        let ptr = if T::IS_ZST {
+            if self.ptr.as_ptr() == self.end as *mut T {
+                return None;
             }
+            // `ptr` has to stay where it is to remain aligned, so we reduce the length by 1 by
+            // reducing the `end`.
+            self.end = self.end.wrapping_byte_sub(1);
+            self.ptr
         } else {
             if self.ptr == non_null!(self.end, T) {
-                None
-            } else {
-                let old = self.ptr;
-                self.ptr = unsafe { old.add(1) };
-
-                Some(unsafe { ptr::read(old.as_ptr()) })
+                return None;
             }
-        }
+            let old = self.ptr;
+            self.ptr = unsafe { old.add(1) };
+            old
+        };
+        Some(unsafe { ptr.read() })
     }
 
     #[inline]
@@ -305,7 +301,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
         // Also note the implementation of `Self: TrustedRandomAccess` requires
         // that `T: Copy` so reading elements from the buffer doesn't invalidate
         // them for `Drop`.
-        unsafe { if T::IS_ZST { mem::zeroed() } else { self.ptr.add(i).read() } }
+        unsafe { self.ptr.add(i).read() }
     }
 }
 
@@ -314,23 +310,22 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
     #[inline]
     fn next_back(&mut self) -> Option<T> {
         if T::IS_ZST {
-            if self.end as *mut _ == self.ptr.as_ptr() {
-                None
-            } else {
-                // See above for why 'ptr.offset' isn't used
-                self.end = self.end.wrapping_byte_sub(1);
-
-                // Make up a value of this ZST.
-                Some(unsafe { mem::zeroed() })
+            if self.ptr.as_ptr() == self.end as *mut _ {
+                return None;
             }
+            // See above for why 'ptr.offset' isn't used
+            self.end = self.end.wrapping_byte_sub(1);
+            // Note that even though this is next_back() we're reading from `self.ptr`, not
+            // `self.end`. We track our length using the byte offset from `self.ptr` to `self.end`,
+            // so the end pointer may not be suitably aligned for T.
+            Some(unsafe { ptr::read(self.ptr.as_ptr()) })
         } else {
-            if non_null!(self.end, T) == self.ptr {
-                None
-            } else {
-                let new_end = unsafe { non_null!(self.end, T).sub(1) };
-                *non_null!(mut self.end, T) = new_end;
-
-                Some(unsafe { ptr::read(new_end.as_ptr()) })
+            if self.ptr == non_null!(self.end, T) {
+                return None;
+            }
+            unsafe {
+                self.end = self.end.sub(1);
+                Some(ptr::read(self.end))
             }
         }
     }
diff --git a/tests/codegen/vec-iter.rs b/tests/codegen/vec-iter.rs
index 0282791e9d1..4e206858751 100644
--- a/tests/codegen/vec-iter.rs
+++ b/tests/codegen/vec-iter.rs
@@ -32,9 +32,9 @@ pub fn vec_iter_is_empty_nonnull(it: &vec::IntoIter<u8>) -> bool {
     it.is_empty()
 }
 
-// CHECK-LABEL: @vec_iter_next
+// CHECK-LABEL: @vec_iter_next_nonnull
 #[no_mangle]
-pub fn vec_iter_next(it: &mut vec::IntoIter<u8>) -> Option<u8> {
+pub fn vec_iter_next_nonnull(it: &mut vec::IntoIter<u8>) -> Option<u8> {
     // CHECK: load ptr
     // CHECK-SAME: !nonnull
     // CHECK-SAME: !noundef
@@ -44,3 +44,16 @@ pub fn vec_iter_next(it: &mut vec::IntoIter<u8>) -> Option<u8> {
     // CHECK: ret
     it.next()
 }
+
+// CHECK-LABEL: @vec_iter_next_back_nonnull
+#[no_mangle]
+pub fn vec_iter_next_back_nonnull(it: &mut vec::IntoIter<u8>) -> Option<u8> {
+    // CHECK: load ptr
+    // CHECK-SAME: !nonnull
+    // CHECK-SAME: !noundef
+    // CHECK: load ptr
+    // CHECK-SAME: !nonnull
+    // CHECK-SAME: !noundef
+    // CHECK: ret
+    it.next_back()
+}