summary refs log tree commit diff
path: root/tests/codegen-llvm/simd-intrinsic
diff options
context:
space:
mode:
Diffstat (limited to 'tests/codegen-llvm/simd-intrinsic')
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-abs.rs60
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-ceil.rs60
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-cos.rs60
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-exp.rs60
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-exp2.rs60
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-floor.rs60
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-fma.rs60
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-fsqrt.rs60
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-log.rs60
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-log10.rs60
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-log2.rs60
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-minmax.rs25
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-sin.rs60
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs579
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-bitmask.rs48
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-gather.rs55
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-masked-load.rs49
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-masked-store.rs41
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-scatter.rs47
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-select.rs48
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-mask-reduce.rs60
-rw-r--r--tests/codegen-llvm/simd-intrinsic/simd-intrinsic-transmute-array.rs58
22 files changed, 1730 insertions, 0 deletions
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-abs.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-abs.rs
new file mode 100644
index 00000000000..baf445d0a1b
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-abs.rs
@@ -0,0 +1,60 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_fabs;
+
+// CHECK-LABEL: @fabs_32x2
+#[no_mangle]
+pub unsafe fn fabs_32x2(a: f32x2) -> f32x2 {
+    // CHECK: call <2 x float> @llvm.fabs.v2f32
+    simd_fabs(a)
+}
+
+// CHECK-LABEL: @fabs_32x4
+#[no_mangle]
+pub unsafe fn fabs_32x4(a: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.fabs.v4f32
+    simd_fabs(a)
+}
+
+// CHECK-LABEL: @fabs_32x8
+#[no_mangle]
+pub unsafe fn fabs_32x8(a: f32x8) -> f32x8 {
+    // CHECK: call <8 x float> @llvm.fabs.v8f32
+    simd_fabs(a)
+}
+
+// CHECK-LABEL: @fabs_32x16
+#[no_mangle]
+pub unsafe fn fabs_32x16(a: f32x16) -> f32x16 {
+    // CHECK: call <16 x float> @llvm.fabs.v16f32
+    simd_fabs(a)
+}
+
+// CHECK-LABEL: @fabs_64x4
+#[no_mangle]
+pub unsafe fn fabs_64x4(a: f64x4) -> f64x4 {
+    // CHECK: call <4 x double> @llvm.fabs.v4f64
+    simd_fabs(a)
+}
+
+// CHECK-LABEL: @fabs_64x2
+#[no_mangle]
+pub unsafe fn fabs_64x2(a: f64x2) -> f64x2 {
+    // CHECK: call <2 x double> @llvm.fabs.v2f64
+    simd_fabs(a)
+}
+
+// CHECK-LABEL: @fabs_64x8
+#[no_mangle]
+pub unsafe fn fabs_64x8(a: f64x8) -> f64x8 {
+    // CHECK: call <8 x double> @llvm.fabs.v8f64
+    simd_fabs(a)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-ceil.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-ceil.rs
new file mode 100644
index 00000000000..096de569274
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-ceil.rs
@@ -0,0 +1,60 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_ceil;
+
+// CHECK-LABEL: @ceil_32x2
+#[no_mangle]
+pub unsafe fn ceil_32x2(a: f32x2) -> f32x2 {
+    // CHECK: call <2 x float> @llvm.ceil.v2f32
+    simd_ceil(a)
+}
+
+// CHECK-LABEL: @ceil_32x4
+#[no_mangle]
+pub unsafe fn ceil_32x4(a: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.ceil.v4f32
+    simd_ceil(a)
+}
+
+// CHECK-LABEL: @ceil_32x8
+#[no_mangle]
+pub unsafe fn ceil_32x8(a: f32x8) -> f32x8 {
+    // CHECK: call <8 x float> @llvm.ceil.v8f32
+    simd_ceil(a)
+}
+
+// CHECK-LABEL: @ceil_32x16
+#[no_mangle]
+pub unsafe fn ceil_32x16(a: f32x16) -> f32x16 {
+    // CHECK: call <16 x float> @llvm.ceil.v16f32
+    simd_ceil(a)
+}
+
+// CHECK-LABEL: @ceil_64x4
+#[no_mangle]
+pub unsafe fn ceil_64x4(a: f64x4) -> f64x4 {
+    // CHECK: call <4 x double> @llvm.ceil.v4f64
+    simd_ceil(a)
+}
+
+// CHECK-LABEL: @ceil_64x2
+#[no_mangle]
+pub unsafe fn ceil_64x2(a: f64x2) -> f64x2 {
+    // CHECK: call <2 x double> @llvm.ceil.v2f64
+    simd_ceil(a)
+}
+
+// CHECK-LABEL: @ceil_64x8
+#[no_mangle]
+pub unsafe fn ceil_64x8(a: f64x8) -> f64x8 {
+    // CHECK: call <8 x double> @llvm.ceil.v8f64
+    simd_ceil(a)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-cos.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-cos.rs
new file mode 100644
index 00000000000..5b2197924bc
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-cos.rs
@@ -0,0 +1,60 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_fcos;
+
+// CHECK-LABEL: @fcos_32x2
+#[no_mangle]
+pub unsafe fn fcos_32x2(a: f32x2) -> f32x2 {
+    // CHECK: call <2 x float> @llvm.cos.v2f32
+    simd_fcos(a)
+}
+
+// CHECK-LABEL: @fcos_32x4
+#[no_mangle]
+pub unsafe fn fcos_32x4(a: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.cos.v4f32
+    simd_fcos(a)
+}
+
+// CHECK-LABEL: @fcos_32x8
+#[no_mangle]
+pub unsafe fn fcos_32x8(a: f32x8) -> f32x8 {
+    // CHECK: call <8 x float> @llvm.cos.v8f32
+    simd_fcos(a)
+}
+
+// CHECK-LABEL: @fcos_32x16
+#[no_mangle]
+pub unsafe fn fcos_32x16(a: f32x16) -> f32x16 {
+    // CHECK: call <16 x float> @llvm.cos.v16f32
+    simd_fcos(a)
+}
+
+// CHECK-LABEL: @fcos_64x4
+#[no_mangle]
+pub unsafe fn fcos_64x4(a: f64x4) -> f64x4 {
+    // CHECK: call <4 x double> @llvm.cos.v4f64
+    simd_fcos(a)
+}
+
+// CHECK-LABEL: @fcos_64x2
+#[no_mangle]
+pub unsafe fn fcos_64x2(a: f64x2) -> f64x2 {
+    // CHECK: call <2 x double> @llvm.cos.v2f64
+    simd_fcos(a)
+}
+
+// CHECK-LABEL: @fcos_64x8
+#[no_mangle]
+pub unsafe fn fcos_64x8(a: f64x8) -> f64x8 {
+    // CHECK: call <8 x double> @llvm.cos.v8f64
+    simd_fcos(a)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-exp.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-exp.rs
new file mode 100644
index 00000000000..d4eadb36c65
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-exp.rs
@@ -0,0 +1,60 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_fexp;
+
+// CHECK-LABEL: @exp_32x2
+#[no_mangle]
+pub unsafe fn exp_32x2(a: f32x2) -> f32x2 {
+    // CHECK: call <2 x float> @llvm.exp.v2f32
+    simd_fexp(a)
+}
+
+// CHECK-LABEL: @exp_32x4
+#[no_mangle]
+pub unsafe fn exp_32x4(a: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.exp.v4f32
+    simd_fexp(a)
+}
+
+// CHECK-LABEL: @exp_32x8
+#[no_mangle]
+pub unsafe fn exp_32x8(a: f32x8) -> f32x8 {
+    // CHECK: call <8 x float> @llvm.exp.v8f32
+    simd_fexp(a)
+}
+
+// CHECK-LABEL: @exp_32x16
+#[no_mangle]
+pub unsafe fn exp_32x16(a: f32x16) -> f32x16 {
+    // CHECK: call <16 x float> @llvm.exp.v16f32
+    simd_fexp(a)
+}
+
+// CHECK-LABEL: @exp_64x4
+#[no_mangle]
+pub unsafe fn exp_64x4(a: f64x4) -> f64x4 {
+    // CHECK: call <4 x double> @llvm.exp.v4f64
+    simd_fexp(a)
+}
+
+// CHECK-LABEL: @exp_64x2
+#[no_mangle]
+pub unsafe fn exp_64x2(a: f64x2) -> f64x2 {
+    // CHECK: call <2 x double> @llvm.exp.v2f64
+    simd_fexp(a)
+}
+
+// CHECK-LABEL: @exp_64x8
+#[no_mangle]
+pub unsafe fn exp_64x8(a: f64x8) -> f64x8 {
+    // CHECK: call <8 x double> @llvm.exp.v8f64
+    simd_fexp(a)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-exp2.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-exp2.rs
new file mode 100644
index 00000000000..d32015b7990
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-exp2.rs
@@ -0,0 +1,60 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_fexp2;
+
+// CHECK-LABEL: @exp2_32x2
+#[no_mangle]
+pub unsafe fn exp2_32x2(a: f32x2) -> f32x2 {
+    // CHECK: call <2 x float> @llvm.exp2.v2f32
+    simd_fexp2(a)
+}
+
+// CHECK-LABEL: @exp2_32x4
+#[no_mangle]
+pub unsafe fn exp2_32x4(a: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.exp2.v4f32
+    simd_fexp2(a)
+}
+
+// CHECK-LABEL: @exp2_32x8
+#[no_mangle]
+pub unsafe fn exp2_32x8(a: f32x8) -> f32x8 {
+    // CHECK: call <8 x float> @llvm.exp2.v8f32
+    simd_fexp2(a)
+}
+
+// CHECK-LABEL: @exp2_32x16
+#[no_mangle]
+pub unsafe fn exp2_32x16(a: f32x16) -> f32x16 {
+    // CHECK: call <16 x float> @llvm.exp2.v16f32
+    simd_fexp2(a)
+}
+
+// CHECK-LABEL: @exp2_64x4
+#[no_mangle]
+pub unsafe fn exp2_64x4(a: f64x4) -> f64x4 {
+    // CHECK: call <4 x double> @llvm.exp2.v4f64
+    simd_fexp2(a)
+}
+
+// CHECK-LABEL: @exp2_64x2
+#[no_mangle]
+pub unsafe fn exp2_64x2(a: f64x2) -> f64x2 {
+    // CHECK: call <2 x double> @llvm.exp2.v2f64
+    simd_fexp2(a)
+}
+
+// CHECK-LABEL: @exp2_64x8
+#[no_mangle]
+pub unsafe fn exp2_64x8(a: f64x8) -> f64x8 {
+    // CHECK: call <8 x double> @llvm.exp2.v8f64
+    simd_fexp2(a)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-floor.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-floor.rs
new file mode 100644
index 00000000000..1e1c8ce0c35
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-floor.rs
@@ -0,0 +1,60 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_floor;
+
+// CHECK-LABEL: @floor_32x2
+#[no_mangle]
+pub unsafe fn floor_32x2(a: f32x2) -> f32x2 {
+    // CHECK: call <2 x float> @llvm.floor.v2f32
+    simd_floor(a)
+}
+
+// CHECK-LABEL: @floor_32x4
+#[no_mangle]
+pub unsafe fn floor_32x4(a: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.floor.v4f32
+    simd_floor(a)
+}
+
+// CHECK-LABEL: @floor_32x8
+#[no_mangle]
+pub unsafe fn floor_32x8(a: f32x8) -> f32x8 {
+    // CHECK: call <8 x float> @llvm.floor.v8f32
+    simd_floor(a)
+}
+
+// CHECK-LABEL: @floor_32x16
+#[no_mangle]
+pub unsafe fn floor_32x16(a: f32x16) -> f32x16 {
+    // CHECK: call <16 x float> @llvm.floor.v16f32
+    simd_floor(a)
+}
+
+// CHECK-LABEL: @floor_64x4
+#[no_mangle]
+pub unsafe fn floor_64x4(a: f64x4) -> f64x4 {
+    // CHECK: call <4 x double> @llvm.floor.v4f64
+    simd_floor(a)
+}
+
+// CHECK-LABEL: @floor_64x2
+#[no_mangle]
+pub unsafe fn floor_64x2(a: f64x2) -> f64x2 {
+    // CHECK: call <2 x double> @llvm.floor.v2f64
+    simd_floor(a)
+}
+
+// CHECK-LABEL: @floor_64x8
+#[no_mangle]
+pub unsafe fn floor_64x8(a: f64x8) -> f64x8 {
+    // CHECK: call <8 x double> @llvm.floor.v8f64
+    simd_floor(a)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-fma.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-fma.rs
new file mode 100644
index 00000000000..982077d81f9
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-fma.rs
@@ -0,0 +1,60 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_fma;
+
+// CHECK-LABEL: @fma_32x2
+#[no_mangle]
+pub unsafe fn fma_32x2(a: f32x2, b: f32x2, c: f32x2) -> f32x2 {
+    // CHECK: call <2 x float> @llvm.fma.v2f32
+    simd_fma(a, b, c)
+}
+
+// CHECK-LABEL: @fma_32x4
+#[no_mangle]
+pub unsafe fn fma_32x4(a: f32x4, b: f32x4, c: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.fma.v4f32
+    simd_fma(a, b, c)
+}
+
+// CHECK-LABEL: @fma_32x8
+#[no_mangle]
+pub unsafe fn fma_32x8(a: f32x8, b: f32x8, c: f32x8) -> f32x8 {
+    // CHECK: call <8 x float> @llvm.fma.v8f32
+    simd_fma(a, b, c)
+}
+
+// CHECK-LABEL: @fma_32x16
+#[no_mangle]
+pub unsafe fn fma_32x16(a: f32x16, b: f32x16, c: f32x16) -> f32x16 {
+    // CHECK: call <16 x float> @llvm.fma.v16f32
+    simd_fma(a, b, c)
+}
+
+// CHECK-LABEL: @fma_64x4
+#[no_mangle]
+pub unsafe fn fma_64x4(a: f64x4, b: f64x4, c: f64x4) -> f64x4 {
+    // CHECK: call <4 x double> @llvm.fma.v4f64
+    simd_fma(a, b, c)
+}
+
+// CHECK-LABEL: @fma_64x2
+#[no_mangle]
+pub unsafe fn fma_64x2(a: f64x2, b: f64x2, c: f64x2) -> f64x2 {
+    // CHECK: call <2 x double> @llvm.fma.v2f64
+    simd_fma(a, b, c)
+}
+
+// CHECK-LABEL: @fma_64x8
+#[no_mangle]
+pub unsafe fn fma_64x8(a: f64x8, b: f64x8, c: f64x8) -> f64x8 {
+    // CHECK: call <8 x double> @llvm.fma.v8f64
+    simd_fma(a, b, c)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-fsqrt.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-fsqrt.rs
new file mode 100644
index 00000000000..e20a591f573
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-fsqrt.rs
@@ -0,0 +1,60 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_fsqrt;
+
+// CHECK-LABEL: @fsqrt_32x2
+#[no_mangle]
+pub unsafe fn fsqrt_32x2(a: f32x2) -> f32x2 {
+    // CHECK: call <2 x float> @llvm.sqrt.v2f32
+    simd_fsqrt(a)
+}
+
+// CHECK-LABEL: @fsqrt_32x4
+#[no_mangle]
+pub unsafe fn fsqrt_32x4(a: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.sqrt.v4f32
+    simd_fsqrt(a)
+}
+
+// CHECK-LABEL: @fsqrt_32x8
+#[no_mangle]
+pub unsafe fn fsqrt_32x8(a: f32x8) -> f32x8 {
+    // CHECK: call <8 x float> @llvm.sqrt.v8f32
+    simd_fsqrt(a)
+}
+
+// CHECK-LABEL: @fsqrt_32x16
+#[no_mangle]
+pub unsafe fn fsqrt_32x16(a: f32x16) -> f32x16 {
+    // CHECK: call <16 x float> @llvm.sqrt.v16f32
+    simd_fsqrt(a)
+}
+
+// CHECK-LABEL: @fsqrt_64x4
+#[no_mangle]
+pub unsafe fn fsqrt_64x4(a: f64x4) -> f64x4 {
+    // CHECK: call <4 x double> @llvm.sqrt.v4f64
+    simd_fsqrt(a)
+}
+
+// CHECK-LABEL: @fsqrt_64x2
+#[no_mangle]
+pub unsafe fn fsqrt_64x2(a: f64x2) -> f64x2 {
+    // CHECK: call <2 x double> @llvm.sqrt.v2f64
+    simd_fsqrt(a)
+}
+
+// CHECK-LABEL: @fsqrt_64x8
+#[no_mangle]
+pub unsafe fn fsqrt_64x8(a: f64x8) -> f64x8 {
+    // CHECK: call <8 x double> @llvm.sqrt.v8f64
+    simd_fsqrt(a)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-log.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-log.rs
new file mode 100644
index 00000000000..bf1ffc76330
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-log.rs
@@ -0,0 +1,60 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_flog;
+
+// CHECK-LABEL: @log_32x2
+#[no_mangle]
+pub unsafe fn log_32x2(a: f32x2) -> f32x2 {
+    // CHECK: call <2 x float> @llvm.log.v2f32
+    simd_flog(a)
+}
+
+// CHECK-LABEL: @log_32x4
+#[no_mangle]
+pub unsafe fn log_32x4(a: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.log.v4f32
+    simd_flog(a)
+}
+
+// CHECK-LABEL: @log_32x8
+#[no_mangle]
+pub unsafe fn log_32x8(a: f32x8) -> f32x8 {
+    // CHECK: call <8 x float> @llvm.log.v8f32
+    simd_flog(a)
+}
+
+// CHECK-LABEL: @log_32x16
+#[no_mangle]
+pub unsafe fn log_32x16(a: f32x16) -> f32x16 {
+    // CHECK: call <16 x float> @llvm.log.v16f32
+    simd_flog(a)
+}
+
+// CHECK-LABEL: @log_64x4
+#[no_mangle]
+pub unsafe fn log_64x4(a: f64x4) -> f64x4 {
+    // CHECK: call <4 x double> @llvm.log.v4f64
+    simd_flog(a)
+}
+
+// CHECK-LABEL: @log_64x2
+#[no_mangle]
+pub unsafe fn log_64x2(a: f64x2) -> f64x2 {
+    // CHECK: call <2 x double> @llvm.log.v2f64
+    simd_flog(a)
+}
+
+// CHECK-LABEL: @log_64x8
+#[no_mangle]
+pub unsafe fn log_64x8(a: f64x8) -> f64x8 {
+    // CHECK: call <8 x double> @llvm.log.v8f64
+    simd_flog(a)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-log10.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-log10.rs
new file mode 100644
index 00000000000..ccf484e0e41
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-log10.rs
@@ -0,0 +1,60 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_flog10;
+
+// CHECK-LABEL: @log10_32x2
+#[no_mangle]
+pub unsafe fn log10_32x2(a: f32x2) -> f32x2 {
+    // CHECK: call <2 x float> @llvm.log10.v2f32
+    simd_flog10(a)
+}
+
+// CHECK-LABEL: @log10_32x4
+#[no_mangle]
+pub unsafe fn log10_32x4(a: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.log10.v4f32
+    simd_flog10(a)
+}
+
+// CHECK-LABEL: @log10_32x8
+#[no_mangle]
+pub unsafe fn log10_32x8(a: f32x8) -> f32x8 {
+    // CHECK: call <8 x float> @llvm.log10.v8f32
+    simd_flog10(a)
+}
+
+// CHECK-LABEL: @log10_32x16
+#[no_mangle]
+pub unsafe fn log10_32x16(a: f32x16) -> f32x16 {
+    // CHECK: call <16 x float> @llvm.log10.v16f32
+    simd_flog10(a)
+}
+
+// CHECK-LABEL: @log10_64x4
+#[no_mangle]
+pub unsafe fn log10_64x4(a: f64x4) -> f64x4 {
+    // CHECK: call <4 x double> @llvm.log10.v4f64
+    simd_flog10(a)
+}
+
+// CHECK-LABEL: @log10_64x2
+#[no_mangle]
+pub unsafe fn log10_64x2(a: f64x2) -> f64x2 {
+    // CHECK: call <2 x double> @llvm.log10.v2f64
+    simd_flog10(a)
+}
+
+// CHECK-LABEL: @log10_64x8
+#[no_mangle]
+pub unsafe fn log10_64x8(a: f64x8) -> f64x8 {
+    // CHECK: call <8 x double> @llvm.log10.v8f64
+    simd_flog10(a)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-log2.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-log2.rs
new file mode 100644
index 00000000000..677d8b01e84
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-log2.rs
@@ -0,0 +1,60 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_flog2;
+
+// CHECK-LABEL: @log2_32x2
+#[no_mangle]
+pub unsafe fn log2_32x2(a: f32x2) -> f32x2 {
+    // CHECK: call <2 x float> @llvm.log2.v2f32
+    simd_flog2(a)
+}
+
+// CHECK-LABEL: @log2_32x4
+#[no_mangle]
+pub unsafe fn log2_32x4(a: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.log2.v4f32
+    simd_flog2(a)
+}
+
+// CHECK-LABEL: @log2_32x8
+#[no_mangle]
+pub unsafe fn log2_32x8(a: f32x8) -> f32x8 {
+    // CHECK: call <8 x float> @llvm.log2.v8f32
+    simd_flog2(a)
+}
+
+// CHECK-LABEL: @log2_32x16
+#[no_mangle]
+pub unsafe fn log2_32x16(a: f32x16) -> f32x16 {
+    // CHECK: call <16 x float> @llvm.log2.v16f32
+    simd_flog2(a)
+}
+
+// CHECK-LABEL: @log2_64x4
+#[no_mangle]
+pub unsafe fn log2_64x4(a: f64x4) -> f64x4 {
+    // CHECK: call <4 x double> @llvm.log2.v4f64
+    simd_flog2(a)
+}
+
+// CHECK-LABEL: @log2_64x2
+#[no_mangle]
+pub unsafe fn log2_64x2(a: f64x2) -> f64x2 {
+    // CHECK: call <2 x double> @llvm.log2.v2f64
+    simd_flog2(a)
+}
+
+// CHECK-LABEL: @log2_64x8
+#[no_mangle]
+pub unsafe fn log2_64x8(a: f64x8) -> f64x8 {
+    // CHECK: call <8 x double> @llvm.log2.v8f64
+    simd_flog2(a)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-minmax.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-minmax.rs
new file mode 100644
index 00000000000..8dd464a1bff
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-minmax.rs
@@ -0,0 +1,25 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::{simd_fmax, simd_fmin};
+
+// CHECK-LABEL: @fmin
+#[no_mangle]
+pub unsafe fn fmin(a: f32x4, b: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.minnum.v4f32
+    simd_fmin(a, b)
+}
+
+// CHECK-LABEL: @fmax
+#[no_mangle]
+pub unsafe fn fmax(a: f32x4, b: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.maxnum.v4f32
+    simd_fmax(a, b)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-sin.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-sin.rs
new file mode 100644
index 00000000000..48becc72c0b
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-float-sin.rs
@@ -0,0 +1,60 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_fsin;
+
+// CHECK-LABEL: @fsin_32x2
+#[no_mangle]
+pub unsafe fn fsin_32x2(a: f32x2) -> f32x2 {
+    // CHECK: call <2 x float> @llvm.sin.v2f32
+    simd_fsin(a)
+}
+
+// CHECK-LABEL: @fsin_32x4
+#[no_mangle]
+pub unsafe fn fsin_32x4(a: f32x4) -> f32x4 {
+    // CHECK: call <4 x float> @llvm.sin.v4f32
+    simd_fsin(a)
+}
+
+// CHECK-LABEL: @fsin_32x8
+#[no_mangle]
+pub unsafe fn fsin_32x8(a: f32x8) -> f32x8 {
+    // CHECK: call <8 x float> @llvm.sin.v8f32
+    simd_fsin(a)
+}
+
+// CHECK-LABEL: @fsin_32x16
+#[no_mangle]
+pub unsafe fn fsin_32x16(a: f32x16) -> f32x16 {
+    // CHECK: call <16 x float> @llvm.sin.v16f32
+    simd_fsin(a)
+}
+
+// CHECK-LABEL: @fsin_64x4
+#[no_mangle]
+pub unsafe fn fsin_64x4(a: f64x4) -> f64x4 {
+    // CHECK: call <4 x double> @llvm.sin.v4f64
+    simd_fsin(a)
+}
+
+// CHECK-LABEL: @fsin_64x2
+#[no_mangle]
+pub unsafe fn fsin_64x2(a: f64x2) -> f64x2 {
+    // CHECK: call <2 x double> @llvm.sin.v2f64
+    simd_fsin(a)
+}
+
+// CHECK-LABEL: @fsin_64x8
+#[no_mangle]
+pub unsafe fn fsin_64x8(a: f64x8) -> f64x8 {
+    // CHECK: call <8 x double> @llvm.sin.v8f64
+    simd_fsin(a)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs
new file mode 100644
index 00000000000..06d46889715
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs
@@ -0,0 +1,579 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+#![deny(unused)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::{simd_saturating_add, simd_saturating_sub};
+
+// NOTE(eddyb) `%{{x|0}}` is used because on some targets (e.g. WASM)
+// SIMD vectors are passed directly, resulting in `%x` being a vector,
+// while on others they're passed indirectly, resulting in `%x` being
+// a pointer to a vector, and `%0` a vector loaded from that pointer.
+// This is controlled by the target spec option `simd_types_indirect`.
+// The same applies to `%{{y|1}}` as well.
+
+// CHECK-LABEL: @sadd_i8x2
+#[no_mangle]
+pub unsafe fn sadd_i8x2(x: i8x2, y: i8x2) -> i8x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %{{x|0}}, <2 x i8> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i8x4
+#[no_mangle]
+pub unsafe fn sadd_i8x4(x: i8x4, y: i8x4) -> i8x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %{{x|0}}, <4 x i8> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i8x8
+#[no_mangle]
+pub unsafe fn sadd_i8x8(x: i8x8, y: i8x8) -> i8x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %{{x|0}}, <8 x i8> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i8x16
+#[no_mangle]
+pub unsafe fn sadd_i8x16(x: i8x16, y: i8x16) -> i8x16 {
+    // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %{{x|0}}, <16 x i8> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i8x32
+#[no_mangle]
+pub unsafe fn sadd_i8x32(x: i8x32, y: i8x32) -> i8x32 {
+    // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %{{x|0}}, <32 x i8> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i8x64
+#[no_mangle]
+pub unsafe fn sadd_i8x64(x: i8x64, y: i8x64) -> i8x64 {
+    // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> %{{x|0}}, <64 x i8> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i16x2
+#[no_mangle]
+pub unsafe fn sadd_i16x2(x: i16x2, y: i16x2) -> i16x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %{{x|0}}, <2 x i16> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i16x4
+#[no_mangle]
+pub unsafe fn sadd_i16x4(x: i16x4, y: i16x4) -> i16x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %{{x|0}}, <4 x i16> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i16x8
+#[no_mangle]
+pub unsafe fn sadd_i16x8(x: i16x8, y: i16x8) -> i16x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %{{x|0}}, <8 x i16> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i16x16
+#[no_mangle]
+pub unsafe fn sadd_i16x16(x: i16x16, y: i16x16) -> i16x16 {
+    // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %{{x|0}}, <16 x i16> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i16x32
+#[no_mangle]
+pub unsafe fn sadd_i16x32(x: i16x32, y: i16x32) -> i16x32 {
+    // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %{{x|0}}, <32 x i16> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i32x2
+#[no_mangle]
+pub unsafe fn sadd_i32x2(x: i32x2, y: i32x2) -> i32x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %{{x|0}}, <2 x i32> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i32x4
+#[no_mangle]
+pub unsafe fn sadd_i32x4(x: i32x4, y: i32x4) -> i32x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %{{x|0}}, <4 x i32> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i32x8
+#[no_mangle]
+pub unsafe fn sadd_i32x8(x: i32x8, y: i32x8) -> i32x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %{{x|0}}, <8 x i32> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i32x16
+#[no_mangle]
+pub unsafe fn sadd_i32x16(x: i32x16, y: i32x16) -> i32x16 {
+    // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %{{x|0}}, <16 x i32> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i64x2
+#[no_mangle]
+pub unsafe fn sadd_i64x2(x: i64x2, y: i64x2) -> i64x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %{{x|0}}, <2 x i64> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i64x4
+#[no_mangle]
+pub unsafe fn sadd_i64x4(x: i64x4, y: i64x4) -> i64x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %{{x|0}}, <4 x i64> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i64x8
+#[no_mangle]
+pub unsafe fn sadd_i64x8(x: i64x8, y: i64x8) -> i64x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %{{x|0}}, <8 x i64> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i128x2
+#[no_mangle]
+pub unsafe fn sadd_i128x2(x: i128x2, y: i128x2) -> i128x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128> %{{x|0}}, <2 x i128> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @sadd_i128x4
+#[no_mangle]
+pub unsafe fn sadd_i128x4(x: i128x4, y: i128x4) -> i128x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.sadd.sat.v4i128(<4 x i128> %{{x|0}}, <4 x i128> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u8x2
+#[no_mangle]
+pub unsafe fn uadd_u8x2(x: u8x2, y: u8x2) -> u8x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %{{x|0}}, <2 x i8> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u8x4
+#[no_mangle]
+pub unsafe fn uadd_u8x4(x: u8x4, y: u8x4) -> u8x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %{{x|0}}, <4 x i8> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u8x8
+#[no_mangle]
+pub unsafe fn uadd_u8x8(x: u8x8, y: u8x8) -> u8x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %{{x|0}}, <8 x i8> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u8x16
+#[no_mangle]
+pub unsafe fn uadd_u8x16(x: u8x16, y: u8x16) -> u8x16 {
+    // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %{{x|0}}, <16 x i8> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u8x32
+#[no_mangle]
+pub unsafe fn uadd_u8x32(x: u8x32, y: u8x32) -> u8x32 {
+    // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %{{x|0}}, <32 x i8> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u8x64
+#[no_mangle]
+pub unsafe fn uadd_u8x64(x: u8x64, y: u8x64) -> u8x64 {
+    // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %{{x|0}}, <64 x i8> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u16x2
+#[no_mangle]
+pub unsafe fn uadd_u16x2(x: u16x2, y: u16x2) -> u16x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %{{x|0}}, <2 x i16> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u16x4
+#[no_mangle]
+pub unsafe fn uadd_u16x4(x: u16x4, y: u16x4) -> u16x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %{{x|0}}, <4 x i16> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u16x8
+#[no_mangle]
+pub unsafe fn uadd_u16x8(x: u16x8, y: u16x8) -> u16x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %{{x|0}}, <8 x i16> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u16x16
+#[no_mangle]
+pub unsafe fn uadd_u16x16(x: u16x16, y: u16x16) -> u16x16 {
+    // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %{{x|0}}, <16 x i16> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u16x32
+#[no_mangle]
+pub unsafe fn uadd_u16x32(x: u16x32, y: u16x32) -> u16x32 {
+    // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %{{x|0}}, <32 x i16> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u32x2
+#[no_mangle]
+pub unsafe fn uadd_u32x2(x: u32x2, y: u32x2) -> u32x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %{{x|0}}, <2 x i32> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u32x4
+#[no_mangle]
+pub unsafe fn uadd_u32x4(x: u32x4, y: u32x4) -> u32x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %{{x|0}}, <4 x i32> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u32x8
+#[no_mangle]
+pub unsafe fn uadd_u32x8(x: u32x8, y: u32x8) -> u32x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %{{x|0}}, <8 x i32> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u32x16
+#[no_mangle]
+pub unsafe fn uadd_u32x16(x: u32x16, y: u32x16) -> u32x16 {
+    // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %{{x|0}}, <16 x i32> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u64x2
+#[no_mangle]
+pub unsafe fn uadd_u64x2(x: u64x2, y: u64x2) -> u64x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %{{x|0}}, <2 x i64> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u64x4
+#[no_mangle]
+pub unsafe fn uadd_u64x4(x: u64x4, y: u64x4) -> u64x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %{{x|0}}, <4 x i64> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u64x8
+#[no_mangle]
+pub unsafe fn uadd_u64x8(x: u64x8, y: u64x8) -> u64x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %{{x|0}}, <8 x i64> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u128x2
+#[no_mangle]
+pub unsafe fn uadd_u128x2(x: u128x2, y: u128x2) -> u128x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %{{x|0}}, <2 x i128> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @uadd_u128x4
+#[no_mangle]
+pub unsafe fn uadd_u128x4(x: u128x4, y: u128x4) -> u128x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.uadd.sat.v4i128(<4 x i128> %{{x|0}}, <4 x i128> %{{y|1}})
+    simd_saturating_add(x, y)
+}
+
+// CHECK-LABEL: @ssub_i8x2
+#[no_mangle]
+pub unsafe fn ssub_i8x2(x: i8x2, y: i8x2) -> i8x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %{{x|0}}, <2 x i8> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i8x4
+#[no_mangle]
+pub unsafe fn ssub_i8x4(x: i8x4, y: i8x4) -> i8x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %{{x|0}}, <4 x i8> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i8x8
+#[no_mangle]
+pub unsafe fn ssub_i8x8(x: i8x8, y: i8x8) -> i8x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %{{x|0}}, <8 x i8> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i8x16
+#[no_mangle]
+pub unsafe fn ssub_i8x16(x: i8x16, y: i8x16) -> i8x16 {
+    // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %{{x|0}}, <16 x i8> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i8x32
+#[no_mangle]
+pub unsafe fn ssub_i8x32(x: i8x32, y: i8x32) -> i8x32 {
+    // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %{{x|0}}, <32 x i8> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i8x64
+#[no_mangle]
+pub unsafe fn ssub_i8x64(x: i8x64, y: i8x64) -> i8x64 {
+    // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> %{{x|0}}, <64 x i8> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i16x2
+#[no_mangle]
+pub unsafe fn ssub_i16x2(x: i16x2, y: i16x2) -> i16x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %{{x|0}}, <2 x i16> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i16x4
+#[no_mangle]
+pub unsafe fn ssub_i16x4(x: i16x4, y: i16x4) -> i16x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %{{x|0}}, <4 x i16> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i16x8
+#[no_mangle]
+pub unsafe fn ssub_i16x8(x: i16x8, y: i16x8) -> i16x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %{{x|0}}, <8 x i16> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i16x16
+#[no_mangle]
+pub unsafe fn ssub_i16x16(x: i16x16, y: i16x16) -> i16x16 {
+    // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %{{x|0}}, <16 x i16> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i16x32
+#[no_mangle]
+pub unsafe fn ssub_i16x32(x: i16x32, y: i16x32) -> i16x32 {
+    // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %{{x|0}}, <32 x i16> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i32x2
+#[no_mangle]
+pub unsafe fn ssub_i32x2(x: i32x2, y: i32x2) -> i32x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %{{x|0}}, <2 x i32> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i32x4
+#[no_mangle]
+pub unsafe fn ssub_i32x4(x: i32x4, y: i32x4) -> i32x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %{{x|0}}, <4 x i32> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i32x8
+#[no_mangle]
+pub unsafe fn ssub_i32x8(x: i32x8, y: i32x8) -> i32x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %{{x|0}}, <8 x i32> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i32x16
+#[no_mangle]
+pub unsafe fn ssub_i32x16(x: i32x16, y: i32x16) -> i32x16 {
+    // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %{{x|0}}, <16 x i32> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i64x2
+#[no_mangle]
+pub unsafe fn ssub_i64x2(x: i64x2, y: i64x2) -> i64x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %{{x|0}}, <2 x i64> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i64x4
+#[no_mangle]
+pub unsafe fn ssub_i64x4(x: i64x4, y: i64x4) -> i64x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %{{x|0}}, <4 x i64> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i64x8
+#[no_mangle]
+pub unsafe fn ssub_i64x8(x: i64x8, y: i64x8) -> i64x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %{{x|0}}, <8 x i64> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i128x2
+#[no_mangle]
+pub unsafe fn ssub_i128x2(x: i128x2, y: i128x2) -> i128x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %{{x|0}}, <2 x i128> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @ssub_i128x4
+#[no_mangle]
+pub unsafe fn ssub_i128x4(x: i128x4, y: i128x4) -> i128x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.ssub.sat.v4i128(<4 x i128> %{{x|0}}, <4 x i128> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u8x2
+#[no_mangle]
+pub unsafe fn usub_u8x2(x: u8x2, y: u8x2) -> u8x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %{{x|0}}, <2 x i8> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u8x4
+#[no_mangle]
+pub unsafe fn usub_u8x4(x: u8x4, y: u8x4) -> u8x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %{{x|0}}, <4 x i8> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u8x8
+#[no_mangle]
+pub unsafe fn usub_u8x8(x: u8x8, y: u8x8) -> u8x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %{{x|0}}, <8 x i8> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u8x16
+#[no_mangle]
+pub unsafe fn usub_u8x16(x: u8x16, y: u8x16) -> u8x16 {
+    // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %{{x|0}}, <16 x i8> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u8x32
+#[no_mangle]
+pub unsafe fn usub_u8x32(x: u8x32, y: u8x32) -> u8x32 {
+    // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %{{x|0}}, <32 x i8> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u8x64
+#[no_mangle]
+pub unsafe fn usub_u8x64(x: u8x64, y: u8x64) -> u8x64 {
+    // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %{{x|0}}, <64 x i8> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u16x2
+#[no_mangle]
+pub unsafe fn usub_u16x2(x: u16x2, y: u16x2) -> u16x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %{{x|0}}, <2 x i16> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u16x4
+#[no_mangle]
+pub unsafe fn usub_u16x4(x: u16x4, y: u16x4) -> u16x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %{{x|0}}, <4 x i16> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u16x8
+#[no_mangle]
+pub unsafe fn usub_u16x8(x: u16x8, y: u16x8) -> u16x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %{{x|0}}, <8 x i16> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u16x16
+#[no_mangle]
+pub unsafe fn usub_u16x16(x: u16x16, y: u16x16) -> u16x16 {
+    // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %{{x|0}}, <16 x i16> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u16x32
+#[no_mangle]
+pub unsafe fn usub_u16x32(x: u16x32, y: u16x32) -> u16x32 {
+    // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{x|0}}, <32 x i16> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u32x2
+#[no_mangle]
+pub unsafe fn usub_u32x2(x: u32x2, y: u32x2) -> u32x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %{{x|0}}, <2 x i32> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u32x4
+#[no_mangle]
+pub unsafe fn usub_u32x4(x: u32x4, y: u32x4) -> u32x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %{{x|0}}, <4 x i32> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u32x8
+#[no_mangle]
+pub unsafe fn usub_u32x8(x: u32x8, y: u32x8) -> u32x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %{{x|0}}, <8 x i32> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u32x16
+#[no_mangle]
+pub unsafe fn usub_u32x16(x: u32x16, y: u32x16) -> u32x16 {
+    // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %{{x|0}}, <16 x i32> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u64x2
+#[no_mangle]
+pub unsafe fn usub_u64x2(x: u64x2, y: u64x2) -> u64x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %{{x|0}}, <2 x i64> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u64x4
+#[no_mangle]
+pub unsafe fn usub_u64x4(x: u64x4, y: u64x4) -> u64x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %{{x|0}}, <4 x i64> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u64x8
+#[no_mangle]
+pub unsafe fn usub_u64x8(x: u64x8, y: u64x8) -> u64x8 {
+    // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %{{x|0}}, <8 x i64> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u128x2
+#[no_mangle]
+pub unsafe fn usub_u128x2(x: u128x2, y: u128x2) -> u128x2 {
+    // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.usub.sat.v2i128(<2 x i128> %{{x|0}}, <2 x i128> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
+
+// CHECK-LABEL: @usub_u128x4
+#[no_mangle]
+pub unsafe fn usub_u128x4(x: u128x4, y: u128x4) -> u128x4 {
+    // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.usub.sat.v4i128(<4 x i128> %{{x|0}}, <4 x i128> %{{y|1}})
+    simd_saturating_sub(x, y)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-bitmask.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-bitmask.rs
new file mode 100644
index 00000000000..294262d8152
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-bitmask.rs
@@ -0,0 +1,48 @@
+//@ compile-flags: -C no-prepopulate-passes
+//
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_bitmask;
+
+// NOTE(eddyb) `%{{x|1}}` is used because on some targets (e.g. WASM)
+// SIMD vectors are passed directly, resulting in `%x` being a vector,
+// while on others they're passed indirectly, resulting in `%x` being
+// a pointer to a vector, and `%1` a vector loaded from that pointer.
+// This is controlled by the target spec option `simd_types_indirect`.
+
+// CHECK-LABEL: @bitmask_int
+#[no_mangle]
+pub unsafe fn bitmask_int(x: i32x2) -> u8 {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|1}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: [[C:%[0-9]+]] = bitcast <2 x i1> [[B]] to i2
+    // CHECK: %{{[0-9]+}} = zext i2 [[C]] to i8
+    simd_bitmask(x)
+}
+
+// CHECK-LABEL: @bitmask_uint
+#[no_mangle]
+pub unsafe fn bitmask_uint(x: u32x2) -> u8 {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|1}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: [[C:%[0-9]+]] = bitcast <2 x i1> [[B]] to i2
+    // CHECK: %{{[0-9]+}} = zext i2 [[C]] to i8
+    simd_bitmask(x)
+}
+
+// CHECK-LABEL: @bitmask_int16
+#[no_mangle]
+pub unsafe fn bitmask_int16(x: i8x16) -> u16 {
+    // CHECK: [[A:%[0-9]+]] = lshr <16 x i8> %{{x|1|2}}, {{<i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>|splat \(i8 7\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <16 x i8> [[A]] to <16 x i1>
+    // CHECK: %{{[0-9]+}} = bitcast <16 x i1> [[B]] to i16
+    // CHECK-NOT: zext
+    simd_bitmask(x)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-gather.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-gather.rs
new file mode 100644
index 00000000000..690bfb432f9
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-gather.rs
@@ -0,0 +1,55 @@
+//
+
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_gather;
+
+pub type Vec2<T> = Simd<T, 2>;
+pub type Vec4<T> = Simd<T, 4>;
+
+// CHECK-LABEL: @gather_f32x2
+#[no_mangle]
+pub unsafe fn gather_f32x2(
+    pointers: Vec2<*const f32>,
+    mask: Vec2<i32>,
+    values: Vec2<f32>,
+) -> Vec2<f32> {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]], <2 x float> {{.*}})
+    simd_gather(values, pointers, mask)
+}
+
+// CHECK-LABEL: @gather_f32x2_unsigned
+#[no_mangle]
+pub unsafe fn gather_f32x2_unsigned(
+    pointers: Vec2<*const f32>,
+    mask: Vec2<u32>,
+    values: Vec2<f32>,
+) -> Vec2<f32> {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]], <2 x float> {{.*}})
+    simd_gather(values, pointers, mask)
+}
+
+// CHECK-LABEL: @gather_pf32x2
+#[no_mangle]
+pub unsafe fn gather_pf32x2(
+    pointers: Vec2<*const *const f32>,
+    mask: Vec2<i32>,
+    values: Vec2<*const f32>,
+) -> Vec2<*const f32> {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]], <2 x ptr> {{.*}})
+    simd_gather(values, pointers, mask)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-masked-load.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-masked-load.rs
new file mode 100644
index 00000000000..fda315dc66c
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-masked-load.rs
@@ -0,0 +1,49 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_masked_load;
+
+pub type Vec2<T> = Simd<T, 2>;
+pub type Vec4<T> = Simd<T, 4>;
+
+// CHECK-LABEL: @load_f32x2
+#[no_mangle]
+pub unsafe fn load_f32x2(mask: Vec2<i32>, pointer: *const f32, values: Vec2<f32>) -> Vec2<f32> {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: call <2 x float> @llvm.masked.load.v2f32.p0(ptr {{.*}}, i32 4, <2 x i1> [[B]], <2 x float> {{.*}})
+    simd_masked_load(mask, pointer, values)
+}
+
+// CHECK-LABEL: @load_f32x2_unsigned
+#[no_mangle]
+pub unsafe fn load_f32x2_unsigned(
+    mask: Vec2<u32>,
+    pointer: *const f32,
+    values: Vec2<f32>,
+) -> Vec2<f32> {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: call <2 x float> @llvm.masked.load.v2f32.p0(ptr {{.*}}, i32 4, <2 x i1> [[B]], <2 x float> {{.*}})
+    simd_masked_load(mask, pointer, values)
+}
+
+// CHECK-LABEL: @load_pf32x4
+#[no_mangle]
+pub unsafe fn load_pf32x4(
+    mask: Vec4<i32>,
+    pointer: *const *const f32,
+    values: Vec4<*const f32>,
+) -> Vec4<*const f32> {
+    // CHECK: [[A:%[0-9]+]] = lshr <4 x i32> {{.*}}, {{<i32 31, i32 31, i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <4 x i32> [[A]] to <4 x i1>
+    // CHECK: call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr {{.*}}, i32 {{.*}}, <4 x i1> [[B]], <4 x ptr> {{.*}})
+    simd_masked_load(mask, pointer, values)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-masked-store.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-masked-store.rs
new file mode 100644
index 00000000000..6ca7388d464
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-masked-store.rs
@@ -0,0 +1,41 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_masked_store;
+
+pub type Vec2<T> = Simd<T, 2>;
+pub type Vec4<T> = Simd<T, 4>;
+
+// CHECK-LABEL: @store_f32x2
+#[no_mangle]
+pub unsafe fn store_f32x2(mask: Vec2<i32>, pointer: *mut f32, values: Vec2<f32>) {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: call void @llvm.masked.store.v2f32.p0(<2 x float> {{.*}}, ptr {{.*}}, i32 4, <2 x i1> [[B]])
+    simd_masked_store(mask, pointer, values)
+}
+
+// CHECK-LABEL: @store_f32x2_unsigned
+#[no_mangle]
+pub unsafe fn store_f32x2_unsigned(mask: Vec2<u32>, pointer: *mut f32, values: Vec2<f32>) {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: call void @llvm.masked.store.v2f32.p0(<2 x float> {{.*}}, ptr {{.*}}, i32 4, <2 x i1> [[B]])
+    simd_masked_store(mask, pointer, values)
+}
+
+// CHECK-LABEL: @store_pf32x4
+#[no_mangle]
+pub unsafe fn store_pf32x4(mask: Vec4<i32>, pointer: *mut *const f32, values: Vec4<*const f32>) {
+    // CHECK: [[A:%[0-9]+]] = lshr <4 x i32> {{.*}}, {{<i32 31, i32 31, i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <4 x i32> [[A]] to <4 x i1>
+    // CHECK: call void @llvm.masked.store.v4p0.p0(<4 x ptr> {{.*}}, ptr {{.*}}, i32 {{.*}}, <4 x i1> [[B]])
+    simd_masked_store(mask, pointer, values)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-scatter.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-scatter.rs
new file mode 100644
index 00000000000..743652966e1
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-scatter.rs
@@ -0,0 +1,47 @@
+//
+
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::simd_scatter;
+
+pub type Vec2<T> = Simd<T, 2>;
+pub type Vec4<T> = Simd<T, 4>;
+
+// CHECK-LABEL: @scatter_f32x2
+#[no_mangle]
+pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2<i32>, values: Vec2<f32>) {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]]
+    simd_scatter(values, pointers, mask)
+}
+
+// CHECK-LABEL: @scatter_f32x2_unsigned
+#[no_mangle]
+pub unsafe fn scatter_f32x2_unsigned(pointers: Vec2<*mut f32>, mask: Vec2<u32>, values: Vec2<f32>) {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]]
+    simd_scatter(values, pointers, mask)
+}
+
+// CHECK-LABEL: @scatter_pf32x2
+#[no_mangle]
+pub unsafe fn scatter_pf32x2(
+    pointers: Vec2<*mut *const f32>,
+    mask: Vec2<i32>,
+    values: Vec2<*const f32>,
+) {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> [[B]]
+    simd_scatter(values, pointers, mask)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-select.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-select.rs
new file mode 100644
index 00000000000..2c0bad21f44
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-generic-select.rs
@@ -0,0 +1,48 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::{simd_select, simd_select_bitmask};
+
+pub type b8x4 = i8x4;
+
+// CHECK-LABEL: @select_m8
+#[no_mangle]
+pub unsafe fn select_m8(m: b8x4, a: f32x4, b: f32x4) -> f32x4 {
+    // CHECK: [[A:%[0-9]+]] = lshr <4 x i8> %{{.*}}, {{<i8 7, i8 7, i8 7, i8 7>|splat \(i8 7\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <4 x i8> [[A]] to <4 x i1>
+    // CHECK: select <4 x i1> [[B]]
+    simd_select(m, a, b)
+}
+
+// CHECK-LABEL: @select_m32
+#[no_mangle]
+pub unsafe fn select_m32(m: i32x4, a: f32x4, b: f32x4) -> f32x4 {
+    // CHECK: [[A:%[0-9]+]] = lshr <4 x i32> %{{.*}}, {{<i32 31, i32 31, i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <4 x i32> [[A]] to <4 x i1>
+    // CHECK: select <4 x i1> [[B]]
+    simd_select(m, a, b)
+}
+
+// CHECK-LABEL: @select_m32_unsigned
+#[no_mangle]
+pub unsafe fn select_m32_unsigned(m: u32x4, a: f32x4, b: f32x4) -> f32x4 {
+    // CHECK: [[A:%[0-9]+]] = lshr <4 x i32> %{{.*}}, {{<i32 31, i32 31, i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <4 x i32> [[A]] to <4 x i1>
+    // CHECK: select <4 x i1> [[B]]
+    simd_select(m, a, b)
+}
+
+// CHECK-LABEL: @select_bitmask
+#[no_mangle]
+pub unsafe fn select_bitmask(m: i8, a: f32x8, b: f32x8) -> f32x8 {
+    // CHECK: [[A:%[0-9]+]] = bitcast i8 {{.*}} to <8 x i1>
+    // CHECK: select <8 x i1> [[A]]
+    simd_select_bitmask(m, a, b)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-mask-reduce.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-mask-reduce.rs
new file mode 100644
index 00000000000..79f00a6ed60
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-mask-reduce.rs
@@ -0,0 +1,60 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(repr_simd, core_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+use std::intrinsics::simd::{simd_reduce_all, simd_reduce_any};
+
+pub type mask32x2 = Simd<i32, 2>;
+pub type mask8x16 = Simd<i8, 16>;
+
+// NOTE(eddyb) `%{{x|1}}` is used because on some targets (e.g. WASM)
+// SIMD vectors are passed directly, resulting in `%x` being a vector,
+// while on others they're passed indirectly, resulting in `%x` being
+// a pointer to a vector, and `%1` a vector loaded from that pointer.
+// This is controlled by the target spec option `simd_types_indirect`.
+
+// CHECK-LABEL: @reduce_any_32x2
+#[no_mangle]
+pub unsafe fn reduce_any_32x2(x: mask32x2) -> bool {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|1}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: [[C:%[0-9]+]] = call i1 @llvm.vector.reduce.or.v2i1(<2 x i1> [[B]])
+    // CHECK: %{{[0-9]+}} = zext i1 [[C]] to i8
+    simd_reduce_any(x)
+}
+
+// CHECK-LABEL: @reduce_all_32x2
+#[no_mangle]
+pub unsafe fn reduce_all_32x2(x: mask32x2) -> bool {
+    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|1}}, {{<i32 31, i32 31>|splat \(i32 31\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
+    // CHECK: [[C:%[0-9]+]] = call i1 @llvm.vector.reduce.and.v2i1(<2 x i1> [[B]])
+    // CHECK: %{{[0-9]+}} = zext i1 [[C]] to i8
+    simd_reduce_all(x)
+}
+
+// CHECK-LABEL: @reduce_any_8x16
+#[no_mangle]
+pub unsafe fn reduce_any_8x16(x: mask8x16) -> bool {
+    // CHECK: [[A:%[0-9]+]] = lshr <16 x i8> %{{x|1}}, {{<i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>|splat \(i8 7\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <16 x i8> [[A]] to <16 x i1>
+    // CHECK: [[C:%[0-9]+]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[B]])
+    // CHECK: %{{[0-9]+}} = zext i1 [[C]] to i8
+    simd_reduce_any(x)
+}
+
+// CHECK-LABEL: @reduce_all_8x16
+#[no_mangle]
+pub unsafe fn reduce_all_8x16(x: mask8x16) -> bool {
+    // CHECK: [[A:%[0-9]+]] = lshr <16 x i8> %{{x|1}}, {{<i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>|splat \(i8 7\)}}
+    // CHECK: [[B:%[0-9]+]] = trunc <16 x i8> [[A]] to <16 x i1>
+    // CHECK: [[C:%[0-9]+]] = call i1 @llvm.vector.reduce.and.v16i1(<16 x i1> [[B]])
+    // CHECK: %{{[0-9]+}} = zext i1 [[C]] to i8
+    simd_reduce_all(x)
+}
diff --git a/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-transmute-array.rs b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-transmute-array.rs
new file mode 100644
index 00000000000..05c2f7e1bdf
--- /dev/null
+++ b/tests/codegen-llvm/simd-intrinsic/simd-intrinsic-transmute-array.rs
@@ -0,0 +1,58 @@
+//
+//@ compile-flags: -C no-prepopulate-passes
+// 32bit MSVC does not align things properly so we suppress high alignment annotations (#112480)
+//@ ignore-i686-pc-windows-msvc
+//@ ignore-i686-pc-windows-gnu
+
+#![crate_type = "lib"]
+#![allow(non_camel_case_types)]
+#![feature(repr_simd, core_intrinsics)]
+
+#[path = "../../auxiliary/minisimd.rs"]
+mod minisimd;
+use minisimd::*;
+
+pub type S<const N: usize> = Simd<f32, N>;
+pub type T = Simd<f32, 4>;
+
+// CHECK-LABEL: @array_align(
+#[no_mangle]
+pub fn array_align() -> usize {
+    // CHECK: ret [[USIZE:i[0-9]+]] [[ARRAY_ALIGN:[0-9]+]]
+    const { std::mem::align_of::<f32>() }
+}
+
+// CHECK-LABEL: @vector_align(
+#[no_mangle]
+pub fn vector_align() -> usize {
+    // CHECK: ret [[USIZE]] [[VECTOR_ALIGN:[0-9]+]]
+    const { std::mem::align_of::<T>() }
+}
+
+// CHECK-LABEL: @build_array_s
+#[no_mangle]
+pub fn build_array_s(x: [f32; 4]) -> S<4> {
+    // CHECK: call void @llvm.memcpy.{{.+}}({{.*}} align [[VECTOR_ALIGN]] {{.*}} align [[ARRAY_ALIGN]] {{.*}}, [[USIZE]] 16, i1 false)
+    Simd(x)
+}
+
+// CHECK-LABEL: @build_array_transmute_s
+#[no_mangle]
+pub fn build_array_transmute_s(x: [f32; 4]) -> S<4> {
+    // CHECK: call void @llvm.memcpy.{{.+}}({{.*}} align [[VECTOR_ALIGN]] {{.*}} align [[ARRAY_ALIGN]] {{.*}}, [[USIZE]] 16, i1 false)
+    unsafe { std::mem::transmute(x) }
+}
+
+// CHECK-LABEL: @build_array_t
+#[no_mangle]
+pub fn build_array_t(x: [f32; 4]) -> T {
+    // CHECK: call void @llvm.memcpy.{{.+}}({{.*}} align [[VECTOR_ALIGN]] {{.*}} align [[ARRAY_ALIGN]] {{.*}}, [[USIZE]] 16, i1 false)
+    Simd(x)
+}
+
+// CHECK-LABEL: @build_array_transmute_t
+#[no_mangle]
+pub fn build_array_transmute_t(x: [f32; 4]) -> T {
+    // CHECK: call void @llvm.memcpy.{{.+}}({{.*}} align [[VECTOR_ALIGN]] {{.*}} align [[ARRAY_ALIGN]] {{.*}}, [[USIZE]] 16, i1 false)
+    unsafe { std::mem::transmute(x) }
+}