about summary refs log tree commit diff
path: root/src/test/codegen
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2022-05-14 03:12:53 +0000
committerbors <bors@rust-lang.org>2022-05-14 03:12:53 +0000
commit9fbbe75fd73ed3c202cec3b0ba51eadf506f03fe (patch)
tree370bb66caa2dcab24871a888d08ecfe70fdfe8d4 /src/test/codegen
parentf1f721e64014863f41c1a386b04af04c2de25321 (diff)
parente8fc7ba6a756628b3226e002e3f3e54ddf14fa04 (diff)
downloadrust-9fbbe75fd73ed3c202cec3b0ba51eadf506f03fe.tar.gz
rust-9fbbe75fd73ed3c202cec3b0ba51eadf506f03fe.zip
Auto merge of #95602 - scottmcm:faster-array-intoiter-fold, r=the8472
Fix `array::IntoIter::fold` to use the optimized `Range::fold`

It was using `Iterator::by_ref` in the implementation, which ended up pessimizing it enough that, for example, it didn't vectorize when we tried it in the <https://rust-lang.zulipchat.com/#narrow/stream/257879-project-portable-simd/topic/Reducing.20sum.20into.20wider.20types> conversation.

Demonstration that the codegen test doesn't pass on the current nightly: <https://rust.godbolt.org/z/Taxev5eMn>
Diffstat (limited to 'src/test/codegen')
-rw-r--r--src/test/codegen/simd-wide-sum.rs54
1 files changed, 54 insertions, 0 deletions
diff --git a/src/test/codegen/simd-wide-sum.rs b/src/test/codegen/simd-wide-sum.rs
new file mode 100644
index 00000000000..fde9b0fcd8a
--- /dev/null
+++ b/src/test/codegen/simd-wide-sum.rs
@@ -0,0 +1,54 @@
+// compile-flags: -C opt-level=3 --edition=2021
+// only-x86_64
+// ignore-debug: the debug assertions get in the way
+
+#![crate_type = "lib"]
+#![feature(portable_simd)]
+
+use std::simd::Simd;
+const N: usize = 8;
+
+#[no_mangle]
+// CHECK-LABEL: @wider_reduce_simd
+pub fn wider_reduce_simd(x: Simd<u8, N>) -> u16 {
+    // CHECK: zext <8 x i8>
+    // CHECK-SAME: to <8 x i16>
+    // CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16>
+    let x: Simd<u16, N> = x.cast();
+    x.reduce_sum()
+}
+
+#[no_mangle]
+// CHECK-LABEL: @wider_reduce_loop
+pub fn wider_reduce_loop(x: Simd<u8, N>) -> u16 {
+    // CHECK: zext <8 x i8>
+    // CHECK-SAME: to <8 x i16>
+    // CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16>
+    let mut sum = 0_u16;
+    for i in 0..N {
+        sum += u16::from(x[i]);
+    }
+    sum
+}
+
+#[no_mangle]
+// CHECK-LABEL: @wider_reduce_iter
+pub fn wider_reduce_iter(x: Simd<u8, N>) -> u16 {
+    // CHECK: zext <8 x i8>
+    // CHECK-SAME: to <8 x i16>
+    // CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16>
+    x.as_array().iter().copied().map(u16::from).sum()
+}
+
+// This iterator one is the most interesting, as it's the one
+// which used to not auto-vectorize due to a suboptimality in the
+// `<array::IntoIter as Iterator>::fold` implementation.
+
+#[no_mangle]
+// CHECK-LABEL: @wider_reduce_into_iter
+pub fn wider_reduce_into_iter(x: Simd<u8, N>) -> u16 {
+    // CHECK: zext <8 x i8>
+    // CHECK-SAME: to <8 x i16>
+    // CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16>
+    x.to_array().into_iter().map(u16::from).sum()
+}