about summary refs log tree commit diff
diff options
context:
space:
mode:
authorJubilee Young <workingjubilee@gmail.com>2023-05-30 00:34:50 -0700
committerJubilee Young <workingjubilee@gmail.com>2023-05-30 00:40:39 -0700
commit472230d192b8bace6ddbe825d68ccd27d0d5ef1d (patch)
treea4f10140d7833692e52bf4425f417fdff61ca256
parent165cddafe9449e090fdef2686045385b55a87329 (diff)
downloadrust-472230d192b8bace6ddbe825d68ccd27d0d5ef1d.tar.gz
rust-472230d192b8bace6ddbe825d68ccd27d0d5ef1d.zip
Remove array_zip
`[T; N]::zip` is "eager" but most zips are mapped.
This causes poor optimization in generated code.
This is a fundamental design issue and "zip" is
"prime real estate" in terms of function names,
so let's free it up again.
-rw-r--r--library/core/src/array/mod.rs23
-rw-r--r--tests/codegen/array-map.rs11
-rw-r--r--tests/codegen/autovectorize-f32x4.rs11
3 files changed, 0 insertions, 45 deletions
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
index fec92320a4b..76b3589b9e4 100644
--- a/library/core/src/array/mod.rs
+++ b/library/core/src/array/mod.rs
@@ -538,29 +538,6 @@ impl<T, const N: usize> [T; N] {
         drain_array_with(self, |iter| try_from_trusted_iterator(iter.map(f)))
     }
 
-    /// 'Zips up' two arrays into a single array of pairs.
-    ///
-    /// `zip()` returns a new array where every element is a tuple where the
-    /// first element comes from the first array, and the second element comes
-    /// from the second array. In other words, it zips two arrays together,
-    /// into a single one.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// #![feature(array_zip)]
-    /// let x = [1, 2, 3];
-    /// let y = [4, 5, 6];
-    /// let z = x.zip(y);
-    /// assert_eq!(z, [(1, 4), (2, 5), (3, 6)]);
-    /// ```
-    #[unstable(feature = "array_zip", issue = "80094")]
-    pub fn zip<U>(self, rhs: [U; N]) -> [(T, U); N] {
-        drain_array_with(self, |lhs| {
-            drain_array_with(rhs, |rhs| from_trusted_iterator(crate::iter::zip(lhs, rhs)))
-        })
-    }
-
     /// Returns a slice containing the entire array. Equivalent to `&s[..]`.
     #[stable(feature = "array_as_slice", since = "1.57.0")]
     #[rustc_const_stable(feature = "array_as_slice", since = "1.57.0")]
diff --git a/tests/codegen/array-map.rs b/tests/codegen/array-map.rs
index 3706ddf99fd..24f3f43d078 100644
--- a/tests/codegen/array-map.rs
+++ b/tests/codegen/array-map.rs
@@ -4,7 +4,6 @@
 // ignore-debug (the extra assertions get in the way)
 
 #![crate_type = "lib"]
-#![feature(array_zip)]
 
 // CHECK-LABEL: @short_integer_map
 #[no_mangle]
@@ -16,16 +15,6 @@ pub fn short_integer_map(x: [u32; 8]) -> [u32; 8] {
     x.map(|x| 2 * x + 1)
 }
 
-// CHECK-LABEL: @short_integer_zip_map
-#[no_mangle]
-pub fn short_integer_zip_map(x: [u32; 8], y: [u32; 8]) -> [u32; 8] {
-    // CHECK: %[[A:.+]] = load <8 x i32>
-    // CHECK: %[[B:.+]] = load <8 x i32>
-    // CHECK: sub <8 x i32> %[[B]], %[[A]]
-    // CHECK: store <8 x i32>
-    x.zip(y).map(|(x, y)| x - y)
-}
-
 // This test is checking that LLVM can SRoA away a bunch of the overhead,
 // like fully moving the iterators to registers.  Notably, previous implementations
 // of `map` ended up `alloca`ing the whole `array::IntoIterator`, meaning both a
diff --git a/tests/codegen/autovectorize-f32x4.rs b/tests/codegen/autovectorize-f32x4.rs
index 9ecea53f1c0..474ff1c4e91 100644
--- a/tests/codegen/autovectorize-f32x4.rs
+++ b/tests/codegen/autovectorize-f32x4.rs
@@ -1,7 +1,6 @@
 // compile-flags: -C opt-level=3 -Z merge-functions=disabled
 // only-x86_64
 #![crate_type = "lib"]
-#![feature(array_zip)]
 
 // CHECK-LABEL: @auto_vectorize_direct
 #[no_mangle]
@@ -31,13 +30,3 @@ pub fn auto_vectorize_loop(a: [f32; 4], b: [f32; 4]) -> [f32; 4] {
     }
     c
 }
-
-// CHECK-LABEL: @auto_vectorize_array_zip_map
-#[no_mangle]
-pub fn auto_vectorize_array_zip_map(a: [f32; 4], b: [f32; 4]) -> [f32; 4] {
-// CHECK: load <4 x float>
-// CHECK: load <4 x float>
-// CHECK: fadd <4 x float>
-// CHECK: store <4 x float>
-    a.zip(b).map(|(a, b)| a + b)
-}