about summary refs log tree commit diff
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2023-10-06 08:04:53 +0000
committerbors <bors@rust-lang.org>2023-10-06 08:04:53 +0000
commitd4ba2b4c7c938cf90c03a265cb31356537f608ad (patch)
treea23f402b29354f4c6b415c0bd0f7c68b33a2cf96
parentff057893b8a2f20de2ff258ed8690f5a42fed78a (diff)
parente300847864faf93538354b418da0a1612efd36f4 (diff)
downloadrust-d4ba2b4c7c938cf90c03a265cb31356537f608ad.tar.gz
rust-d4ba2b4c7c938cf90c03a265cb31356537f608ad.zip
Auto merge of #116018 - DianQK:simd-wide-sum-test, r=scottmcm
Increasing the SIMD size improves the vectorization possibilities

Change the `simd-wide-sum.rs` to pass tests based on the LLVM main branch.

For smaller lengths, we cannot expect to always get vectorized.

A related discussion at https://rust-lang.zulipchat.com/#narrow/stream/187780-t-compiler.2Fwg-llvm/topic/LLVM.20HEAD.3A.20codegen.2Fsimd.2Fsimd-wide-sum.2Ers.20newly.20failing.

r? scottmcm
-rw-r--r--tests/codegen/simd/simd-wide-sum.rs26
1 files changed, 13 insertions, 13 deletions
diff --git a/tests/codegen/simd/simd-wide-sum.rs b/tests/codegen/simd/simd-wide-sum.rs
index 3116f9597bc..6e7d3d9316a 100644
--- a/tests/codegen/simd/simd-wide-sum.rs
+++ b/tests/codegen/simd/simd-wide-sum.rs
@@ -11,14 +11,14 @@
 #![feature(portable_simd)]
 
 use std::simd::{Simd, SimdUint};
-const N: usize = 8;
+const N: usize = 16;
 
 #[no_mangle]
 // CHECK-LABEL: @wider_reduce_simd
 pub fn wider_reduce_simd(x: Simd<u8, N>) -> u16 {
-    // CHECK: zext <8 x i8>
-    // CHECK-SAME: to <8 x i16>
-    // CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16>
+    // CHECK: zext <16 x i8>
+    // CHECK-SAME: to <16 x i16>
+    // CHECK: call i16 @llvm.vector.reduce.add.v16i16(<16 x i16>
     let x: Simd<u16, N> = x.cast();
     x.reduce_sum()
 }
@@ -26,9 +26,9 @@ pub fn wider_reduce_simd(x: Simd<u8, N>) -> u16 {
 #[no_mangle]
 // CHECK-LABEL: @wider_reduce_loop
 pub fn wider_reduce_loop(x: Simd<u8, N>) -> u16 {
-    // CHECK: zext <8 x i8>
-    // CHECK-SAME: to <8 x i16>
-    // CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16>
+    // CHECK: zext <16 x i8>
+    // CHECK-SAME: to <16 x i16>
+    // CHECK: call i16 @llvm.vector.reduce.add.v16i16(<16 x i16>
     let mut sum = 0_u16;
     for i in 0..N {
         sum += u16::from(x[i]);
@@ -39,9 +39,9 @@ pub fn wider_reduce_loop(x: Simd<u8, N>) -> u16 {
 #[no_mangle]
 // CHECK-LABEL: @wider_reduce_iter
 pub fn wider_reduce_iter(x: Simd<u8, N>) -> u16 {
-    // CHECK: zext <8 x i8>
-    // CHECK-SAME: to <8 x i16>
-    // CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16>
+    // CHECK: zext <16 x i8>
+    // CHECK-SAME: to <16 x i16>
+    // CHECK: call i16 @llvm.vector.reduce.add.v16i16(<16 x i16>
     x.as_array().iter().copied().map(u16::from).sum()
 }
 
@@ -52,8 +52,8 @@ pub fn wider_reduce_iter(x: Simd<u8, N>) -> u16 {
 #[no_mangle]
 // CHECK-LABEL: @wider_reduce_into_iter
 pub fn wider_reduce_into_iter(x: Simd<u8, N>) -> u16 {
-    // CHECK: zext <8 x i8>
-    // CHECK-SAME: to <8 x i16>
-    // CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16>
+    // FIXME: It would be nice if this was exactly the same as the above tests,
+    // but at the time of writing this comment, that didn't happen on LLVM main.
+    // CHECK: call i16 @llvm.vector.reduce.add
     x.to_array().into_iter().map(u16::from).sum()
 }