diff options
| author | bors <bors@rust-lang.org> | 2019-01-24 13:11:06 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2019-01-24 13:11:06 +0000 |
| commit | 095b44c83b540bb4dbf74be1cae604f4bae87989 (patch) | |
| tree | 660a01091b2a50604735d6d3ddaa862dadb238c9 /src/test/codegen | |
| parent | f23b63cbbf8d92deac6b3c39746d3b85d3e838af (diff) | |
| parent | 785f529d6e91b787d94b44726a5d6018e8fe181b (diff) | |
| download | rust-095b44c83b540bb4dbf74be1cae604f4bae87989.tar.gz rust-095b44c83b540bb4dbf74be1cae604f4bae87989.zip | |
Auto merge of #57269 - gnzlbg:simd_bitmask, r=rkruppe
Add intrinsic to create an integer bitmask from a vector mask This PR adds a new simd intrinsic: `simd_bitmask(vector) -> unsigned integer` that creates an integer bitmask from a vector mask by extracting one bit of each vector lane. This is required to implement: https://github.com/rust-lang-nursery/packed_simd/issues/166 . EDIT: the reason we need an intrinsics for this is that we have to truncate the vector lanes to an `<i1 x N>` vector, and then bitcast that to an `iN` integer (while making sure that we only materialize `i8`, ... , `i64` - that is, no `i1`, `i2`, `i4`, types), and we can't do any of that in a Rust library. r? @rkruppe
Diffstat (limited to 'src/test/codegen')
| -rw-r--r-- | src/test/codegen/simd-intrinsic-generic-bitmask.rs | 57 |
1 files changed, 57 insertions, 0 deletions
diff --git a/src/test/codegen/simd-intrinsic-generic-bitmask.rs b/src/test/codegen/simd-intrinsic-generic-bitmask.rs new file mode 100644 index 00000000000..cd8130f9231 --- /dev/null +++ b/src/test/codegen/simd-intrinsic-generic-bitmask.rs @@ -0,0 +1,57 @@ +// compile-flags: -C no-prepopulate-passes +// ignore-tidy-linelength + +#![crate_type = "lib"] + +#![feature(repr_simd, platform_intrinsics)] +#![allow(non_camel_case_types)] + +#[repr(simd)] +#[derive(Copy, Clone)] +pub struct u32x2(u32, u32); + +#[repr(simd)] +#[derive(Copy, Clone)] +pub struct i32x2(i32, i32); + +#[repr(simd)] +#[derive(Copy, Clone)] +pub struct i8x16( + i8, i8, i8, i8, i8, i8, i8, i8, + i8, i8, i8, i8, i8, i8, i8, i8, +); + + +extern "platform-intrinsic" { + fn simd_bitmask<T, U>(x: T) -> U; +} + +// CHECK-LABEL: @bitmask_int +#[no_mangle] +pub unsafe fn bitmask_int(x: i32x2) -> u8 { + // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{[0-9]+}}, <i32 31, i32 31> + // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1> + // CHECK: [[C:%[0-9]+]] = bitcast <2 x i1> [[B]] to i2 + // CHECK: %{{[0-9]+}} = zext i2 [[C]] to i8 + simd_bitmask(x) +} + +// CHECK-LABEL: @bitmask_uint +#[no_mangle] +pub unsafe fn bitmask_uint(x: u32x2) -> u8 { + // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{[0-9]+}}, <i32 31, i32 31> + // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1> + // CHECK: [[C:%[0-9]+]] = bitcast <2 x i1> [[B]] to i2 + // CHECK: %{{[0-9]+}} = zext i2 [[C]] to i8 + simd_bitmask(x) +} + +// CHECK-LABEL: @bitmask_int16 +#[no_mangle] +pub unsafe fn bitmask_int16(x: i8x16) -> u16 { + // CHECK: [[A:%[0-9]+]] = lshr <16 x i8> %{{[0-9]+}}, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> + // CHECK: [[B:%[0-9]+]] = trunc <16 x i8> [[A]] to <16 x i1> + // CHECK: %{{[0-9]+}} = bitcast <16 x i1> [[B]] to i16 + // CHECK-NOT: zext + simd_bitmask(x) +} |
