about summary refs log tree commit diff
path: root/tests/codegen/array-map.rs
diff options
context:
space:
mode:
authorScott McMurray <scottmcm@users.noreply.github.com>2023-02-02 20:58:22 -0800
committerScott McMurray <scottmcm@users.noreply.github.com>2023-02-04 16:41:35 -0800
commit5a7342c3dde43c96a71bc27995030896342761f6 (patch)
tree09b68957ad641e2b8c5c47ca03eda6f3345bbd59 /tests/codegen/array-map.rs
parent50d3ba5bcbf5c7e13d4ce068d3339710701dd603 (diff)
downloadrust-5a7342c3dde43c96a71bc27995030896342761f6.tar.gz
rust-5a7342c3dde43c96a71bc27995030896342761f6.zip
Stop using `into_iter` in `array::map`
Diffstat (limited to 'tests/codegen/array-map.rs')
-rw-r--r--tests/codegen/array-map.rs48
1 files changed, 48 insertions, 0 deletions
diff --git a/tests/codegen/array-map.rs b/tests/codegen/array-map.rs
new file mode 100644
index 00000000000..37585371a32
--- /dev/null
+++ b/tests/codegen/array-map.rs
@@ -0,0 +1,48 @@
+// compile-flags: -C opt-level=3 -C target-cpu=x86-64-v3 -C llvm-args=-x86-asm-syntax=intel --emit=llvm-ir,asm
+// no-system-llvm
+// only-x86_64
+// ignore-debug (the extra assertions get in the way)
+
+#![crate_type = "lib"]
+#![feature(array_zip)]
+
+// CHECK-LABEL: @short_integer_map
+#[no_mangle]
+pub fn short_integer_map(x: [u32; 8]) -> [u32; 8] {
+    // CHECK: load <8 x i32>
+    // CHECK: shl <8 x i32>
+    // CHECK: or <8 x i32>
+    // CHECK: store <8 x i32>
+    x.map(|x| 2 * x + 1)
+}
+
+// CHECK-LABEL: @short_integer_zip_map
+#[no_mangle]
+pub fn short_integer_zip_map(x: [u32; 8], y: [u32; 8]) -> [u32; 8] {
+    // CHECK: %[[A:.+]] = load <8 x i32>
+    // CHECK: %[[B:.+]] = load <8 x i32>
+    // CHECK: sub <8 x i32> %[[A]], %[[B]]
+    // CHECK: store <8 x i32>
+    x.zip(y).map(|(x, y)| x - y)
+}
+
+// This test is checking that LLVM can SRoA away a bunch of the overhead,
+// like fully moving the iterators to registers.  Notably, previous implementations
+// of `map` ended up `alloca`ing the whole `array::IntoIterator`, meaning both a
+// hard-to-eliminate `memcpy` and that the iteration counts needed to be written
+// out to stack every iteration, even for infallible operations on `Copy` types.
+//
+// This is still imperfect, as there's more copies than would be ideal,
+// but hopefully work like #103830 will improve that in future,
+// and update this test to be stricter.
+//
+// CHECK-LABEL: @long_integer_map
+#[no_mangle]
+pub fn long_integer_map(x: [u32; 64]) -> [u32; 64] {
+    // CHECK: start:
+    // CHECK-NEXT: alloca [{{64|65}} x i32]
+    // CHECK-NEXT: alloca [{{64|65}} x i32]
+    // CHECK-NEXT: alloca %"core::mem::manually_drop::ManuallyDrop<[u32; 64]>"
+    // CHECK-NOT: alloca
+    x.map(|x| 2 * x + 1)
+}