about summary refs log tree commit diff
path: root/tests
diff options
context:
space:
mode:
authorCamille GILLOT <gillot.camille@gmail.com>2023-05-15 21:15:28 +0200
committerCamille GILLOT <gillot.camille@gmail.com>2023-07-08 15:38:40 +0200
commitd7983a2f231a279984fc70eb428b936930eaa45c (patch)
tree7e289bf73e50ac6d40b8d03575a34d6d32bb7dee /tests
parentd4096e0412ac5de785d739a0aa2b1c1c7b9d3b7d (diff)
downloadrust-d7983a2f231a279984fc70eb428b936930eaa45c.tar.gz
rust-d7983a2f231a279984fc70eb428b936930eaa45c.zip
Always name the return place.
Diffstat (limited to 'tests')
-rw-r--r--tests/codegen/array-codegen.rs6
-rw-r--r--tests/codegen/avr/avr-func-addrspace.rs2
-rw-r--r--tests/codegen/consts.rs5
-rw-r--r--tests/codegen/enum-match.rs2
-rw-r--r--tests/codegen/fewer-names.rs4
-rw-r--r--tests/codegen/function-arguments-noopt.rs4
-rw-r--r--tests/codegen/function-arguments.rs2
-rw-r--r--tests/codegen/intrinsics/transmute-niched.rs16
-rw-r--r--tests/codegen/intrinsics/transmute-x64.rs12
-rw-r--r--tests/codegen/intrinsics/transmute.rs90
-rw-r--r--tests/codegen/match-optimized.rs6
-rw-r--r--tests/codegen/mem-replace-big-type.rs2
-rw-r--r--tests/codegen/mem-replace-simple-type.rs2
-rw-r--r--tests/codegen/repeat-trusted-len.rs4
-rw-r--r--tests/codegen/riscv-abi/riscv64-lp64-lp64f-lp64d-abi.rs2
-rw-r--r--tests/codegen/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs166
-rw-r--r--tests/codegen/simd-intrinsic/simd-intrinsic-generic-extract-insert.rs4
-rw-r--r--tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs7
-rw-r--r--tests/codegen/simd_arith_offset.rs2
-rw-r--r--tests/codegen/slice-iter-len-eq-zero.rs8
-rw-r--r--tests/codegen/transmute-scalar.rs20
-rw-r--r--tests/codegen/uninit-consts.rs11
-rw-r--r--tests/codegen/union-abi.rs2
-rw-r--r--tests/codegen/var-names.rs4
24 files changed, 190 insertions, 193 deletions
diff --git a/tests/codegen/array-codegen.rs b/tests/codegen/array-codegen.rs
index 98488eb92ee..71acd781549 100644
--- a/tests/codegen/array-codegen.rs
+++ b/tests/codegen/array-codegen.rs
@@ -6,10 +6,10 @@
 // CHECK-LABEL: @array_load
 #[no_mangle]
 pub fn array_load(a: &[u8; 4]) -> [u8; 4] {
-    // CHECK: %0 = alloca [4 x i8], align 1
+    // CHECK: %_0 = alloca [4 x i8], align 1
     // CHECK: %[[TEMP1:.+]] = load <4 x i8>, ptr %a, align 1
-    // CHECK: store <4 x i8> %[[TEMP1]], ptr %0, align 1
-    // CHECK: %[[TEMP2:.+]] = load i32, ptr %0, align 1
+    // CHECK: store <4 x i8> %[[TEMP1]], ptr %_0, align 1
+    // CHECK: %[[TEMP2:.+]] = load i32, ptr %_0, align 1
     // CHECK: ret i32 %[[TEMP2]]
     *a
 }
diff --git a/tests/codegen/avr/avr-func-addrspace.rs b/tests/codegen/avr/avr-func-addrspace.rs
index bc11e108124..83baae9e432 100644
--- a/tests/codegen/avr/avr-func-addrspace.rs
+++ b/tests/codegen/avr/avr-func-addrspace.rs
@@ -116,7 +116,7 @@ pub enum Either<T, U> { A(T), B(U) }
 // with the `ptr` field representing both `&i32` and `fn()` depending on the variant.
 // This is incorrect, because `fn()` should be `ptr addrspace(1)`, not `ptr`.
 
-// CHECK: define{{.+}}void @should_not_combine_addrspace({{.+\*|ptr}}{{.+}}sret{{.+}}%0, {{.+\*|ptr}}{{.+}}%x)
+// CHECK: define{{.+}}void @should_not_combine_addrspace({{.+\*|ptr}}{{.+}}sret{{.+}}%_0, {{.+\*|ptr}}{{.+}}%x)
 #[no_mangle]
 #[inline(never)]
 pub fn should_not_combine_addrspace(x: Either<&i32, fn()>) -> Either<&i32, fn()> {
diff --git a/tests/codegen/consts.rs b/tests/codegen/consts.rs
index fc2badc417c..810da581ce9 100644
--- a/tests/codegen/consts.rs
+++ b/tests/codegen/consts.rs
@@ -1,4 +1,5 @@
 // compile-flags: -C no-prepopulate-passes
+// min-llvm-version: 15.0 (for opaque pointers)
 
 #![crate_type = "lib"]
 
@@ -42,7 +43,7 @@ pub fn inline_enum_const() -> E<i8, i16> {
 #[no_mangle]
 pub fn low_align_const() -> E<i16, [i16; 3]> {
     // Check that low_align_const and high_align_const use the same constant
-    // CHECK: memcpy.{{.+}}({{i8\*|ptr}} align 2 %{{[0-9]+}}, {{i8\*|ptr}} align 2 {{.*}}[[LOW_HIGH]]{{.*}}, i{{(32|64)}} 8, i1 false)
+    // CHECK: memcpy.{{.+}}(ptr align 2 %_0, ptr align 2 {{.*}}[[LOW_HIGH]]{{.*}}, i{{(32|64)}} 8, i1 false)
     *&E::A(0)
 }
 
@@ -50,6 +51,6 @@ pub fn low_align_const() -> E<i16, [i16; 3]> {
 #[no_mangle]
 pub fn high_align_const() -> E<i16, i32> {
     // Check that low_align_const and high_align_const use the same constant
-    // CHECK: memcpy.{{.+}}({{i8\*|ptr}} align 4 %{{[0-9]+}}, {{i8\*|ptr}} align 4 {{.*}}[[LOW_HIGH]]{{.*}}, i{{(32|64)}} 8, i1 false)
+    // CHECK: memcpy.{{.+}}(ptr align 4 %_0, ptr align 4 {{.*}}[[LOW_HIGH]]{{.*}}, i{{(32|64)}} 8, i1 false)
     *&E::A(0)
 }
diff --git a/tests/codegen/enum-match.rs b/tests/codegen/enum-match.rs
index 36c6be19012..5548cd25147 100644
--- a/tests/codegen/enum-match.rs
+++ b/tests/codegen/enum-match.rs
@@ -15,7 +15,7 @@ pub enum Enum0 {
 // CHECK-NEXT: start:
 // CHECK-NEXT: %1 = icmp eq i8 %0, 2
 // CHECK-NEXT: %2 = and i8 %0, 1
-// CHECK-NEXT: %.0 = select i1 %1, i8 13, i8 %2
+// CHECK-NEXT: %_0.0 = select i1 %1, i8 13, i8 %2
 #[no_mangle]
 pub fn match0(e: Enum0) -> u8 {
     use Enum0::*;
diff --git a/tests/codegen/fewer-names.rs b/tests/codegen/fewer-names.rs
index a09c795924c..df1080bff2b 100644
--- a/tests/codegen/fewer-names.rs
+++ b/tests/codegen/fewer-names.rs
@@ -13,8 +13,8 @@ pub fn sum(x: u32, y: u32) -> u32 {
 
     // NO-LABEL: define{{.*}}i32 @sum(i32 noundef %x, i32 noundef %y)
     // NO-NEXT:  start:
-    // NO-NEXT:    %0 = add i32 %y, %x
-    // NO-NEXT:    ret i32 %0
+    // NO-NEXT:    %z = add i32 %y, %x
+    // NO-NEXT:    ret i32 %z
     let z = x + y;
     z
 }
diff --git a/tests/codegen/function-arguments-noopt.rs b/tests/codegen/function-arguments-noopt.rs
index 0c62e0d35e3..35f31eba3b1 100644
--- a/tests/codegen/function-arguments-noopt.rs
+++ b/tests/codegen/function-arguments-noopt.rs
@@ -42,7 +42,7 @@ pub fn borrow_call(x: &i32, f: fn(&i32) -> &i32) -> &i32 {
   f(x)
 }
 
-// CHECK: void @struct_({{%S\*|ptr}} sret(%S){{( %0)?}}, {{%S\*|ptr}} %x)
+// CHECK: void @struct_({{%S\*|ptr}} sret(%S){{( %_0)?}}, {{%S\*|ptr}} %x)
 #[no_mangle]
 pub fn struct_(x: S) -> S {
   x
@@ -51,7 +51,7 @@ pub fn struct_(x: S) -> S {
 // CHECK-LABEL: @struct_call
 #[no_mangle]
 pub fn struct_call(x: S, f: fn(S) -> S) -> S {
-  // CHECK: call void %f({{%S\*|ptr}} sret(%S){{( %0)?}}, {{%S\*|ptr}} %{{.+}})
+  // CHECK: call void %f({{%S\*|ptr}} sret(%S){{( %_0)?}}, {{%S\*|ptr}} %{{.+}})
   f(x)
 }
 
diff --git a/tests/codegen/function-arguments.rs b/tests/codegen/function-arguments.rs
index d6f019016a5..ccf4a5de327 100644
--- a/tests/codegen/function-arguments.rs
+++ b/tests/codegen/function-arguments.rs
@@ -188,7 +188,7 @@ pub fn notunpin_box(x: Box<NotUnpin>) -> Box<NotUnpin> {
   x
 }
 
-// CHECK: @struct_return({{%S\*|ptr}} noalias nocapture noundef sret(%S) dereferenceable(32){{( %0)?}})
+// CHECK: @struct_return({{%S\*|ptr}} noalias nocapture noundef sret(%S) dereferenceable(32){{( %_0)?}})
 #[no_mangle]
 pub fn struct_return() -> S {
   S {
diff --git a/tests/codegen/intrinsics/transmute-niched.rs b/tests/codegen/intrinsics/transmute-niched.rs
index 69e9b1d1206..fffc24a1181 100644
--- a/tests/codegen/intrinsics/transmute-niched.rs
+++ b/tests/codegen/intrinsics/transmute-niched.rs
@@ -169,16 +169,16 @@ pub unsafe fn check_bool_from_ordering(x: std::cmp::Ordering) -> bool {
 // CHECK-LABEL: @check_bool_to_ordering(
 #[no_mangle]
 pub unsafe fn check_bool_to_ordering(x: bool) -> std::cmp::Ordering {
-    // CHECK: %0 = zext i1 %x to i8
-    // OPT: %1 = icmp ule i8 %0, 1
-    // OPT: call void @llvm.assume(i1 %1)
-    // OPT: %2 = icmp uge i8 %0, -1
-    // OPT: %3 = icmp ule i8 %0, 1
-    // OPT: %4 = or i1 %2, %3
-    // OPT: call void @llvm.assume(i1 %4)
+    // CHECK: %_0 = zext i1 %x to i8
+    // OPT: %0 = icmp ule i8 %_0, 1
+    // OPT: call void @llvm.assume(i1 %0)
+    // OPT: %1 = icmp uge i8 %_0, -1
+    // OPT: %2 = icmp ule i8 %_0, 1
+    // OPT: %3 = or i1 %1, %2
+    // OPT: call void @llvm.assume(i1 %3)
     // DBG-NOT: icmp
     // DBG-NOT: assume
-    // CHECK: ret i8 %0
+    // CHECK: ret i8 %_0
 
     transmute(x)
 }
diff --git a/tests/codegen/intrinsics/transmute-x64.rs b/tests/codegen/intrinsics/transmute-x64.rs
index 99d258c6204..168838ef497 100644
--- a/tests/codegen/intrinsics/transmute-x64.rs
+++ b/tests/codegen/intrinsics/transmute-x64.rs
@@ -11,8 +11,8 @@ use std::mem::transmute;
 #[no_mangle]
 pub unsafe fn check_sse_float_to_int(x: __m128) -> __m128i {
     // CHECK-NOT: alloca
-    // CHECK: %1 = load <4 x float>, ptr %x, align 16
-    // CHECK: store <4 x float> %1, ptr %0, align 16
+    // CHECK: %0 = load <4 x float>, ptr %x, align 16
+    // CHECK: store <4 x float> %0, ptr %_0, align 16
     transmute(x)
 }
 
@@ -20,8 +20,8 @@ pub unsafe fn check_sse_float_to_int(x: __m128) -> __m128i {
 #[no_mangle]
 pub unsafe fn check_sse_pair_to_avx(x: (__m128i, __m128i)) -> __m256i {
     // CHECK-NOT: alloca
-    // CHECK: %1 = load <4 x i64>, ptr %x, align 16
-    // CHECK: store <4 x i64> %1, ptr %0, align 32
+    // CHECK: %0 = load <4 x i64>, ptr %x, align 16
+    // CHECK: store <4 x i64> %0, ptr %_0, align 32
     transmute(x)
 }
 
@@ -29,7 +29,7 @@ pub unsafe fn check_sse_pair_to_avx(x: (__m128i, __m128i)) -> __m256i {
 #[no_mangle]
 pub unsafe fn check_sse_pair_from_avx(x: __m256i) -> (__m128i, __m128i) {
     // CHECK-NOT: alloca
-    // CHECK: %1 = load <4 x i64>, ptr %x, align 32
-    // CHECK: store <4 x i64> %1, ptr %0, align 16
+    // CHECK: %0 = load <4 x i64>, ptr %x, align 32
+    // CHECK: store <4 x i64> %0, ptr %_0, align 16
     transmute(x)
 }
diff --git a/tests/codegen/intrinsics/transmute.rs b/tests/codegen/intrinsics/transmute.rs
index fe42494000e..f8c20960660 100644
--- a/tests/codegen/intrinsics/transmute.rs
+++ b/tests/codegen/intrinsics/transmute.rs
@@ -8,8 +8,8 @@
 #![feature(inline_const)]
 #![allow(unreachable_code)]
 
-use std::mem::MaybeUninit;
 use std::intrinsics::{transmute, transmute_unchecked};
+use std::mem::MaybeUninit;
 
 // Some of these need custom MIR to not get removed by MIR optimizations.
 use std::intrinsics::mir::*;
@@ -63,7 +63,7 @@ pub unsafe fn check_to_empty_array(x: [u32; 5]) -> [u32; 0] {
     // CHECK-NOT: trap
     // CHECK: call void @llvm.trap
     // CHECK-NOT: trap
-    mir!{
+    mir! {
         {
             RET = CastTransmute(x);
             Return()
@@ -78,7 +78,7 @@ pub unsafe fn check_from_empty_array(x: [u32; 0]) -> [u32; 5] {
     // CHECK-NOT: trap
     // CHECK: call void @llvm.trap
     // CHECK-NOT: trap
-    mir!{
+    mir! {
         {
             RET = CastTransmute(x);
             Return()
@@ -93,7 +93,7 @@ pub unsafe fn check_to_uninhabited(x: u16) {
     // CHECK-NOT: trap
     // CHECK: call void @llvm.trap
     // CHECK-NOT: trap
-    mir!{
+    mir! {
         let temp: BigNever;
         {
             temp = CastTransmute(x);
@@ -107,7 +107,7 @@ pub unsafe fn check_to_uninhabited(x: u16) {
 #[custom_mir(dialect = "runtime", phase = "optimized")]
 pub unsafe fn check_from_uninhabited(x: BigNever) -> u16 {
     // CHECK: ret i16 poison
-    mir!{
+    mir! {
         {
             RET = CastTransmute(x);
             Return()
@@ -122,9 +122,7 @@ pub unsafe fn check_intermediate_passthrough(x: u32) -> i32 {
     // CHECK: %[[TMP:.+]] = add i32 1, %x
     // CHECK: %[[RET:.+]] = add i32 %[[TMP]], 1
     // CHECK: ret i32 %[[RET]]
-    unsafe {
-        transmute::<u32, i32>(1 + x) + 1
-    }
+    unsafe { transmute::<u32, i32>(1 + x) + 1 }
 }
 
 // CHECK-LABEL: @check_nop_pair(
@@ -134,9 +132,7 @@ pub unsafe fn check_nop_pair(x: (u8, i8)) -> (i8, u8) {
     // CHECK: %0 = insertvalue { i8, i8 } poison, i8 %x.0, 0
     // CHECK: %1 = insertvalue { i8, i8 } %0, i8 %x.1, 1
     // CHECK: ret { i8, i8 } %1
-    unsafe {
-        transmute(x)
-    }
+    unsafe { transmute(x) }
 }
 
 // CHECK-LABEL: @check_to_newtype(
@@ -168,9 +164,9 @@ pub unsafe fn check_aggregate_to_bool(x: Aggregate8) -> bool {
 // CHECK-LABEL: @check_aggregate_from_bool(
 #[no_mangle]
 pub unsafe fn check_aggregate_from_bool(x: bool) -> Aggregate8 {
-    // CHECK: %0 = alloca %Aggregate8, align 1
+    // CHECK: %_0 = alloca %Aggregate8, align 1
     // CHECK: %[[BYTE:.+]] = zext i1 %x to i8
-    // CHECK: store i8 %[[BYTE]], ptr %0, align 1
+    // CHECK: store i8 %[[BYTE]], ptr %_0, align 1
     transmute(x)
 }
 
@@ -195,8 +191,8 @@ pub unsafe fn check_byte_from_bool(x: bool) -> u8 {
 // CHECK-LABEL: @check_to_pair(
 #[no_mangle]
 pub unsafe fn check_to_pair(x: u64) -> Option<i32> {
-    // CHECK: %0 = alloca { i32, i32 }, align 4
-    // CHECK: store i64 %x, ptr %0, align 4
+    // CHECK: %_0 = alloca { i32, i32 }, align 4
+    // CHECK: store i64 %x, ptr %_0, align 4
     transmute(x)
 }
 
@@ -207,11 +203,11 @@ pub unsafe fn check_from_pair(x: Option<i32>) -> u64 {
     // immediates so we can write using the destination alloca's alignment.
     const { assert!(std::mem::align_of::<Option<i32>>() == 4) };
 
-    // CHECK: %0 = alloca i64, align 8
-    // CHECK: store i32 %x.0, ptr %1, align 8
-    // CHECK: store i32 %x.1, ptr %2, align 4
-    // CHECK: %3 = load i64, ptr %0, align 8
-    // CHECK: ret i64 %3
+    // CHECK: %_0 = alloca i64, align 8
+    // CHECK: store i32 %x.0, ptr %0, align 8
+    // CHECK: store i32 %x.1, ptr %1, align 4
+    // CHECK: %2 = load i64, ptr %_0, align 8
+    // CHECK: ret i64 %2
     transmute(x)
 }
 
@@ -219,8 +215,8 @@ pub unsafe fn check_from_pair(x: Option<i32>) -> u64 {
 #[no_mangle]
 pub unsafe fn check_to_float(x: u32) -> f32 {
     // CHECK-NOT: alloca
-    // CHECK: %0 = bitcast i32 %x to float
-    // CHECK: ret float %0
+    // CHECK: %_0 = bitcast i32 %x to float
+    // CHECK: ret float %_0
     transmute(x)
 }
 
@@ -228,16 +224,16 @@ pub unsafe fn check_to_float(x: u32) -> f32 {
 #[no_mangle]
 pub unsafe fn check_from_float(x: f32) -> u32 {
     // CHECK-NOT: alloca
-    // CHECK: %0 = bitcast float %x to i32
-    // CHECK: ret i32 %0
+    // CHECK: %_0 = bitcast float %x to i32
+    // CHECK: ret i32 %_0
     transmute(x)
 }
 
 // CHECK-LABEL: @check_to_bytes(
 #[no_mangle]
 pub unsafe fn check_to_bytes(x: u32) -> [u8; 4] {
-    // CHECK: %0 = alloca [4 x i8], align 1
-    // CHECK: store i32 %x, ptr %0, align 1
+    // CHECK: %_0 = alloca [4 x i8], align 1
+    // CHECK: store i32 %x, ptr %_0, align 1
     transmute(x)
 }
 
@@ -253,10 +249,10 @@ pub unsafe fn check_from_bytes(x: [u8; 4]) -> u32 {
 // CHECK-LABEL: @check_to_aggregate(
 #[no_mangle]
 pub unsafe fn check_to_aggregate(x: u64) -> Aggregate64 {
-    // CHECK: %0 = alloca %Aggregate64, align 4
-    // CHECK: store i64 %x, ptr %0, align 4
-    // CHECK: %1 = load i64, ptr %0, align 4
-    // CHECK: ret i64 %1
+    // CHECK: %_0 = alloca %Aggregate64, align 4
+    // CHECK: store i64 %x, ptr %_0, align 4
+    // CHECK: %0 = load i64, ptr %_0, align 4
+    // CHECK: ret i64 %0
     transmute(x)
 }
 
@@ -273,7 +269,7 @@ pub unsafe fn check_from_aggregate(x: Aggregate64) -> u64 {
 #[no_mangle]
 pub unsafe fn check_long_array_less_aligned(x: [u64; 100]) -> [u16; 400] {
     // CHECK-NEXT: start
-    // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 2 %0, ptr align 8 %x, i64 800, i1 false)
+    // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 2 %_0, ptr align 8 %x, i64 800, i1 false)
     // CHECK-NEXT: ret void
     transmute(x)
 }
@@ -282,7 +278,7 @@ pub unsafe fn check_long_array_less_aligned(x: [u64; 100]) -> [u16; 400] {
 #[no_mangle]
 pub unsafe fn check_long_array_more_aligned(x: [u8; 100]) -> [u32; 25] {
     // CHECK-NEXT: start
-    // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 1 %x, i64 100, i1 false)
+    // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %_0, ptr align 1 %x, i64 100, i1 false)
     // CHECK-NEXT: ret void
     transmute(x)
 }
@@ -301,8 +297,8 @@ pub unsafe fn check_pair_with_bool(x: (u8, bool)) -> (bool, i8) {
 pub unsafe fn check_float_to_pointer(x: f64) -> *const () {
     // CHECK-NOT: alloca
     // CHECK: %0 = bitcast double %x to i64
-    // CHECK: %1 = inttoptr i64 %0 to ptr
-    // CHECK: ret ptr %1
+    // CHECK: %_0 = inttoptr i64 %0 to ptr
+    // CHECK: ret ptr %_0
     transmute(x)
 }
 
@@ -311,8 +307,8 @@ pub unsafe fn check_float_to_pointer(x: f64) -> *const () {
 pub unsafe fn check_float_from_pointer(x: *const ()) -> f64 {
     // CHECK-NOT: alloca
     // CHECK: %0 = ptrtoint ptr %x to i64
-    // CHECK: %1 = bitcast i64 %0 to double
-    // CHECK: ret double %1
+    // CHECK: %_0 = bitcast i64 %0 to double
+    // CHECK: ret double %_0
     transmute(x)
 }
 
@@ -376,10 +372,10 @@ pub unsafe fn check_issue_110005(x: (usize, bool)) -> Option<Box<[u8]>> {
 // CHECK-LABEL: @check_pair_to_dst_ref(
 #[no_mangle]
 pub unsafe fn check_pair_to_dst_ref<'a>(x: (usize, usize)) -> &'a [u8] {
-    // CHECK: %0 = inttoptr i64 %x.0 to ptr
-    // CHECK: %1 = insertvalue { ptr, i64 } poison, ptr %0, 0
-    // CHECK: %2 = insertvalue { ptr, i64 } %1, i64 %x.1, 1
-    // CHECK: ret { ptr, i64 } %2
+    // CHECK: %_0.0 = inttoptr i64 %x.0 to ptr
+    // CHECK: %0 = insertvalue { ptr, i64 } poison, ptr %_0.0, 0
+    // CHECK: %1 = insertvalue { ptr, i64 } %0, i64 %x.1, 1
+    // CHECK: ret { ptr, i64 } %1
     transmute(x)
 }
 
@@ -391,7 +387,7 @@ pub unsafe fn check_issue_109992(x: ()) -> [(); 1] {
 
     // CHECK: start
     // CHECK-NEXT: ret void
-    mir!{
+    mir! {
         {
             RET = CastTransmute(x);
             Return()
@@ -408,7 +404,7 @@ pub unsafe fn check_unit_to_never(x: ()) {
     // CHECK-NOT: trap
     // CHECK: call void @llvm.trap
     // CHECK-NOT: trap
-    mir!{
+    mir! {
         let temp: ZstNever;
         {
             temp = CastTransmute(x);
@@ -425,7 +421,7 @@ pub unsafe fn check_unit_from_never(x: ZstNever) -> () {
 
     // CHECK: start
     // CHECK-NEXT: ret void
-    mir!{
+    mir! {
         {
             RET = CastTransmute(x);
             Return()
@@ -457,10 +453,10 @@ pub struct HighAlignScalar(u8);
 // CHECK-LABEL: @check_to_overalign(
 #[no_mangle]
 pub unsafe fn check_to_overalign(x: u64) -> HighAlignScalar {
-    // CHECK: %0 = alloca %HighAlignScalar, align 8
-    // CHECK: store i64 %x, ptr %0, align 8
-    // CHECK: %1 = load i64, ptr %0, align 8
-    // CHECK: ret i64 %1
+    // CHECK: %_0 = alloca %HighAlignScalar, align 8
+    // CHECK: store i64 %x, ptr %_0, align 8
+    // CHECK: %0 = load i64, ptr %_0, align 8
+    // CHECK: ret i64 %0
     transmute(x)
 }
 
diff --git a/tests/codegen/match-optimized.rs b/tests/codegen/match-optimized.rs
index 520c46a0d57..59e6eeb7c5d 100644
--- a/tests/codegen/match-optimized.rs
+++ b/tests/codegen/match-optimized.rs
@@ -20,13 +20,13 @@ pub fn exhaustive_match(e: E) -> u8 {
 // CHECK-NEXT: unreachable
 //
 // CHECK: [[A]]:
-// CHECK-NEXT: store i8 0, {{i8\*|ptr}} %1, align 1
+// CHECK-NEXT: store i8 0, {{i8\*|ptr}} %_0, align 1
 // CHECK-NEXT: br label %[[EXIT:[a-zA-Z0-9_]+]]
 // CHECK: [[B]]:
-// CHECK-NEXT: store i8 1, {{i8\*|ptr}} %1, align 1
+// CHECK-NEXT: store i8 1, {{i8\*|ptr}} %_0, align 1
 // CHECK-NEXT: br label %[[EXIT]]
 // CHECK: [[C]]:
-// CHECK-NEXT: store i8 2, {{i8\*|ptr}} %1, align 1
+// CHECK-NEXT: store i8 2, {{i8\*|ptr}} %_0, align 1
 // CHECK-NEXT: br label %[[EXIT]]
     match e {
         E::A => 0,
diff --git a/tests/codegen/mem-replace-big-type.rs b/tests/codegen/mem-replace-big-type.rs
index c6b920cf599..55c2741faaf 100644
--- a/tests/codegen/mem-replace-big-type.rs
+++ b/tests/codegen/mem-replace-big-type.rs
@@ -25,7 +25,7 @@ pub fn replace_big(dst: &mut Big, src: Big) -> Big {
 // For a large type, we expect exactly three `memcpy`s
 // CHECK-LABEL: define internal void @{{.+}}mem{{.+}}replace{{.+}}sret(%Big)
 // CHECK-NOT: call void @llvm.memcpy
-// CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} align 8 %0, {{i8\*|ptr}} align 8 %dest, i{{.*}} 56, i1 false)
+// CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} align 8 %result, {{i8\*|ptr}} align 8 %dest, i{{.*}} 56, i1 false)
 // CHECK-NOT: call void @llvm.memcpy
 // CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} align 8 %dest, {{i8\*|ptr}} align 8 %src, i{{.*}} 56, i1 false)
 // CHECK-NOT: call void @llvm.memcpy
diff --git a/tests/codegen/mem-replace-simple-type.rs b/tests/codegen/mem-replace-simple-type.rs
index 6151177de15..5c4acf813ea 100644
--- a/tests/codegen/mem-replace-simple-type.rs
+++ b/tests/codegen/mem-replace-simple-type.rs
@@ -38,7 +38,7 @@ pub fn replace_ref_str<'a>(r: &mut &'a str, v: &'a str) -> &'a str {
 pub fn replace_short_array(r: &mut [u32; 3], v: [u32; 3]) -> [u32; 3] {
     // CHECK-NOT: alloca
     // CHECK: %[[R:.+]] = load <3 x i32>, ptr %r, align 4
-    // CHECK: store <3 x i32> %[[R]], ptr %0
+    // CHECK: store <3 x i32> %[[R]], ptr %result
     // CHECK: %[[V:.+]] = load <3 x i32>, ptr %v, align 4
     // CHECK: store <3 x i32> %[[V]], ptr %r
     std::mem::replace(r, v)
diff --git a/tests/codegen/repeat-trusted-len.rs b/tests/codegen/repeat-trusted-len.rs
index 87c8fe1354d..d06978f2435 100644
--- a/tests/codegen/repeat-trusted-len.rs
+++ b/tests/codegen/repeat-trusted-len.rs
@@ -8,13 +8,13 @@ use std::iter;
 // CHECK-LABEL: @repeat_take_collect
 #[no_mangle]
 pub fn repeat_take_collect() -> Vec<u8> {
-// CHECK: call void @llvm.memset.{{.+}}({{i8\*|ptr}} {{.*}}align 1{{.*}} %{{[0-9]+}}, i8 42, i{{[0-9]+}} 100000, i1 false)
+    // CHECK: call void @llvm.memset.{{.+}}({{i8\*|ptr}} {{.*}}align 1{{.*}} %{{.*}}, i8 42, i{{[0-9]+}} 100000, i1 false)
     iter::repeat(42).take(100000).collect()
 }
 
 // CHECK-LABEL: @repeat_with_take_collect
 #[no_mangle]
 pub fn repeat_with_take_collect() -> Vec<u8> {
-// CHECK: call void @llvm.memset.{{.+}}({{i8\*|ptr}} {{.*}}align 1{{.*}} %{{[0-9]+}}, i8 13, i{{[0-9]+}} 12345, i1 false)
+    // CHECK: call void @llvm.memset.{{.+}}({{i8\*|ptr}} {{.*}}align 1{{.*}} %{{.*}}, i8 13, i{{[0-9]+}} 12345, i1 false)
     iter::repeat_with(|| 13).take(12345).collect()
 }
diff --git a/tests/codegen/riscv-abi/riscv64-lp64-lp64f-lp64d-abi.rs b/tests/codegen/riscv-abi/riscv64-lp64-lp64f-lp64d-abi.rs
index 045f01985a5..ec18fa9a328 100644
--- a/tests/codegen/riscv-abi/riscv64-lp64-lp64f-lp64d-abi.rs
+++ b/tests/codegen/riscv-abi/riscv64-lp64-lp64f-lp64d-abi.rs
@@ -152,7 +152,7 @@ pub extern "C" fn f_scalar_stack_1(
 ) {
 }
 
-// CHECK: define void @f_scalar_stack_2({{%Large\*|ptr}} {{.*}}sret{{.*}} %0, i64 noundef %a, i128 %1, i128 %2, i64 noundef %d, i8 noundef zeroext %e, i8 noundef %f, i8 noundef %g)
+// CHECK: define void @f_scalar_stack_2({{%Large\*|ptr}} {{.*}}sret{{.*}} %_0, i64 noundef %a, i128 %0, i128 %1, i64 noundef %d, i8 noundef zeroext %e, i8 noundef %f, i8 noundef %g)
 #[no_mangle]
 pub extern "C" fn f_scalar_stack_2(
     a: u64,
diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs
index faac7566a0c..0bcfacec6d7 100644
--- a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs
+++ b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs
@@ -116,150 +116,150 @@ extern "platform-intrinsic" {
     fn simd_saturating_sub<T>(x: T, y: T) -> T;
 }
 
-// NOTE(eddyb) `%{{x|1}}` is used because on some targets (e.g. WASM)
+// NOTE(eddyb) `%{{x|0}}` is used because on some targets (e.g. WASM)
 // SIMD vectors are passed directly, resulting in `%x` being a vector,
 // while on others they're passed indirectly, resulting in `%x` being
-// a pointer to a vector, and `%1` a vector loaded from that pointer.
+// a pointer to a vector, and `%0` a vector loaded from that pointer.
 // This is controlled by the target spec option `simd_types_indirect`.
-// The same applies to `%{{y|2}}` as well.
+// The same applies to `%{{y|1}}` as well.
 
 // CHECK-LABEL: @sadd_i8x2
 #[no_mangle]
 pub unsafe fn sadd_i8x2(x: i8x2, y: i8x2) -> i8x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %{{x|0}}, <2 x i8> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i8x4
 #[no_mangle]
 pub unsafe fn sadd_i8x4(x: i8x4, y: i8x4) -> i8x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %{{x|0}}, <4 x i8> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i8x8
 #[no_mangle]
 pub unsafe fn sadd_i8x8(x: i8x8, y: i8x8) -> i8x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %{{x|0}}, <8 x i8> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i8x16
 #[no_mangle]
 pub unsafe fn sadd_i8x16(x: i8x16, y: i8x16) -> i8x16 {
-    // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %{{x|0}}, <16 x i8> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i8x32
 #[no_mangle]
 pub unsafe fn sadd_i8x32(x: i8x32, y: i8x32) -> i8x32 {
-    // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %{{x|0}}, <32 x i8> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i8x64
 #[no_mangle]
 pub unsafe fn sadd_i8x64(x: i8x64, y: i8x64) -> i8x64 {
-    // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> %{{x|0}}, <64 x i8> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i16x2
 #[no_mangle]
 pub unsafe fn sadd_i16x2(x: i16x2, y: i16x2) -> i16x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %{{x|0}}, <2 x i16> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i16x4
 #[no_mangle]
 pub unsafe fn sadd_i16x4(x: i16x4, y: i16x4) -> i16x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %{{x|0}}, <4 x i16> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i16x8
 #[no_mangle]
 pub unsafe fn sadd_i16x8(x: i16x8, y: i16x8) -> i16x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %{{x|0}}, <8 x i16> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i16x16
 #[no_mangle]
 pub unsafe fn sadd_i16x16(x: i16x16, y: i16x16) -> i16x16 {
-    // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %{{x|0}}, <16 x i16> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i16x32
 #[no_mangle]
 pub unsafe fn sadd_i16x32(x: i16x32, y: i16x32) -> i16x32 {
-    // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %{{x|0}}, <32 x i16> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i32x2
 #[no_mangle]
 pub unsafe fn sadd_i32x2(x: i32x2, y: i32x2) -> i32x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %{{x|0}}, <2 x i32> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i32x4
 #[no_mangle]
 pub unsafe fn sadd_i32x4(x: i32x4, y: i32x4) -> i32x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %{{x|0}}, <4 x i32> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i32x8
 #[no_mangle]
 pub unsafe fn sadd_i32x8(x: i32x8, y: i32x8) -> i32x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %{{x|0}}, <8 x i32> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i32x16
 #[no_mangle]
 pub unsafe fn sadd_i32x16(x: i32x16, y: i32x16) -> i32x16 {
-    // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %{{x|0}}, <16 x i32> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i64x2
 #[no_mangle]
 pub unsafe fn sadd_i64x2(x: i64x2, y: i64x2) -> i64x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %{{x|0}}, <2 x i64> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i64x4
 #[no_mangle]
 pub unsafe fn sadd_i64x4(x: i64x4, y: i64x4) -> i64x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %{{x|0}}, <4 x i64> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i64x8
 #[no_mangle]
 pub unsafe fn sadd_i64x8(x: i64x8, y: i64x8) -> i64x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %{{x|0}}, <8 x i64> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i128x2
 #[no_mangle]
 pub unsafe fn sadd_i128x2(x: i128x2, y: i128x2) -> i128x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128> %{{x|0}}, <2 x i128> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @sadd_i128x4
 #[no_mangle]
 pub unsafe fn sadd_i128x4(x: i128x4, y: i128x4) -> i128x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.sadd.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.sadd.sat.v4i128(<4 x i128> %{{x|0}}, <4 x i128> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
@@ -268,140 +268,140 @@ pub unsafe fn sadd_i128x4(x: i128x4, y: i128x4) -> i128x4 {
 // CHECK-LABEL: @uadd_u8x2
 #[no_mangle]
 pub unsafe fn uadd_u8x2(x: u8x2, y: u8x2) -> u8x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %{{x|0}}, <2 x i8> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u8x4
 #[no_mangle]
 pub unsafe fn uadd_u8x4(x: u8x4, y: u8x4) -> u8x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %{{x|0}}, <4 x i8> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u8x8
 #[no_mangle]
 pub unsafe fn uadd_u8x8(x: u8x8, y: u8x8) -> u8x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %{{x|0}}, <8 x i8> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u8x16
 #[no_mangle]
 pub unsafe fn uadd_u8x16(x: u8x16, y: u8x16) -> u8x16 {
-    // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %{{x|0}}, <16 x i8> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u8x32
 #[no_mangle]
 pub unsafe fn uadd_u8x32(x: u8x32, y: u8x32) -> u8x32 {
-    // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %{{x|0}}, <32 x i8> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u8x64
 #[no_mangle]
 pub unsafe fn uadd_u8x64(x: u8x64, y: u8x64) -> u8x64 {
-    // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %{{x|0}}, <64 x i8> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u16x2
 #[no_mangle]
 pub unsafe fn uadd_u16x2(x: u16x2, y: u16x2) -> u16x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %{{x|0}}, <2 x i16> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u16x4
 #[no_mangle]
 pub unsafe fn uadd_u16x4(x: u16x4, y: u16x4) -> u16x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %{{x|0}}, <4 x i16> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u16x8
 #[no_mangle]
 pub unsafe fn uadd_u16x8(x: u16x8, y: u16x8) -> u16x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %{{x|0}}, <8 x i16> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u16x16
 #[no_mangle]
 pub unsafe fn uadd_u16x16(x: u16x16, y: u16x16) -> u16x16 {
-    // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %{{x|0}}, <16 x i16> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u16x32
 #[no_mangle]
 pub unsafe fn uadd_u16x32(x: u16x32, y: u16x32) -> u16x32 {
-    // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %{{x|0}}, <32 x i16> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u32x2
 #[no_mangle]
 pub unsafe fn uadd_u32x2(x: u32x2, y: u32x2) -> u32x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %{{x|0}}, <2 x i32> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u32x4
 #[no_mangle]
 pub unsafe fn uadd_u32x4(x: u32x4, y: u32x4) -> u32x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %{{x|0}}, <4 x i32> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u32x8
 #[no_mangle]
 pub unsafe fn uadd_u32x8(x: u32x8, y: u32x8) -> u32x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %{{x|0}}, <8 x i32> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u32x16
 #[no_mangle]
 pub unsafe fn uadd_u32x16(x: u32x16, y: u32x16) -> u32x16 {
-    // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %{{x|0}}, <16 x i32> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u64x2
 #[no_mangle]
 pub unsafe fn uadd_u64x2(x: u64x2, y: u64x2) -> u64x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %{{x|0}}, <2 x i64> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u64x4
 #[no_mangle]
 pub unsafe fn uadd_u64x4(x: u64x4, y: u64x4) -> u64x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %{{x|0}}, <4 x i64> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u64x8
 #[no_mangle]
 pub unsafe fn uadd_u64x8(x: u64x8, y: u64x8) -> u64x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %{{x|0}}, <8 x i64> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u128x2
 #[no_mangle]
 pub unsafe fn uadd_u128x2(x: u128x2, y: u128x2) -> u128x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %{{x|0}}, <2 x i128> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
 // CHECK-LABEL: @uadd_u128x4
 #[no_mangle]
 pub unsafe fn uadd_u128x4(x: u128x4, y: u128x4) -> u128x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.uadd.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.uadd.sat.v4i128(<4 x i128> %{{x|0}}, <4 x i128> %{{y|1}})
     simd_saturating_add(x, y)
 }
 
@@ -412,140 +412,140 @@ pub unsafe fn uadd_u128x4(x: u128x4, y: u128x4) -> u128x4 {
 // CHECK-LABEL: @ssub_i8x2
 #[no_mangle]
 pub unsafe fn ssub_i8x2(x: i8x2, y: i8x2) -> i8x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %{{x|0}}, <2 x i8> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i8x4
 #[no_mangle]
 pub unsafe fn ssub_i8x4(x: i8x4, y: i8x4) -> i8x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %{{x|0}}, <4 x i8> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i8x8
 #[no_mangle]
 pub unsafe fn ssub_i8x8(x: i8x8, y: i8x8) -> i8x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %{{x|0}}, <8 x i8> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i8x16
 #[no_mangle]
 pub unsafe fn ssub_i8x16(x: i8x16, y: i8x16) -> i8x16 {
-    // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %{{x|0}}, <16 x i8> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i8x32
 #[no_mangle]
 pub unsafe fn ssub_i8x32(x: i8x32, y: i8x32) -> i8x32 {
-    // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %{{x|0}}, <32 x i8> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i8x64
 #[no_mangle]
 pub unsafe fn ssub_i8x64(x: i8x64, y: i8x64) -> i8x64 {
-    // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> %{{x|0}}, <64 x i8> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i16x2
 #[no_mangle]
 pub unsafe fn ssub_i16x2(x: i16x2, y: i16x2) -> i16x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %{{x|0}}, <2 x i16> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i16x4
 #[no_mangle]
 pub unsafe fn ssub_i16x4(x: i16x4, y: i16x4) -> i16x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %{{x|0}}, <4 x i16> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i16x8
 #[no_mangle]
 pub unsafe fn ssub_i16x8(x: i16x8, y: i16x8) -> i16x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %{{x|0}}, <8 x i16> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i16x16
 #[no_mangle]
 pub unsafe fn ssub_i16x16(x: i16x16, y: i16x16) -> i16x16 {
-    // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %{{x|0}}, <16 x i16> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i16x32
 #[no_mangle]
 pub unsafe fn ssub_i16x32(x: i16x32, y: i16x32) -> i16x32 {
-    // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %{{x|0}}, <32 x i16> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i32x2
 #[no_mangle]
 pub unsafe fn ssub_i32x2(x: i32x2, y: i32x2) -> i32x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %{{x|0}}, <2 x i32> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i32x4
 #[no_mangle]
 pub unsafe fn ssub_i32x4(x: i32x4, y: i32x4) -> i32x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %{{x|0}}, <4 x i32> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i32x8
 #[no_mangle]
 pub unsafe fn ssub_i32x8(x: i32x8, y: i32x8) -> i32x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %{{x|0}}, <8 x i32> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i32x16
 #[no_mangle]
 pub unsafe fn ssub_i32x16(x: i32x16, y: i32x16) -> i32x16 {
-    // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %{{x|0}}, <16 x i32> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i64x2
 #[no_mangle]
 pub unsafe fn ssub_i64x2(x: i64x2, y: i64x2) -> i64x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %{{x|0}}, <2 x i64> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i64x4
 #[no_mangle]
 pub unsafe fn ssub_i64x4(x: i64x4, y: i64x4) -> i64x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %{{x|0}}, <4 x i64> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i64x8
 #[no_mangle]
 pub unsafe fn ssub_i64x8(x: i64x8, y: i64x8) -> i64x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %{{x|0}}, <8 x i64> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i128x2
 #[no_mangle]
 pub unsafe fn ssub_i128x2(x: i128x2, y: i128x2) -> i128x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %{{x|0}}, <2 x i128> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @ssub_i128x4
 #[no_mangle]
 pub unsafe fn ssub_i128x4(x: i128x4, y: i128x4) -> i128x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.ssub.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.ssub.sat.v4i128(<4 x i128> %{{x|0}}, <4 x i128> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
@@ -554,139 +554,139 @@ pub unsafe fn ssub_i128x4(x: i128x4, y: i128x4) -> i128x4 {
 // CHECK-LABEL: @usub_u8x2
 #[no_mangle]
 pub unsafe fn usub_u8x2(x: u8x2, y: u8x2) -> u8x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %{{x|0}}, <2 x i8> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u8x4
 #[no_mangle]
 pub unsafe fn usub_u8x4(x: u8x4, y: u8x4) -> u8x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %{{x|0}}, <4 x i8> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u8x8
 #[no_mangle]
 pub unsafe fn usub_u8x8(x: u8x8, y: u8x8) -> u8x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %{{x|0}}, <8 x i8> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u8x16
 #[no_mangle]
 pub unsafe fn usub_u8x16(x: u8x16, y: u8x16) -> u8x16 {
-    // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %{{x|0}}, <16 x i8> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u8x32
 #[no_mangle]
 pub unsafe fn usub_u8x32(x: u8x32, y: u8x32) -> u8x32 {
-    // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %{{x|0}}, <32 x i8> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u8x64
 #[no_mangle]
 pub unsafe fn usub_u8x64(x: u8x64, y: u8x64) -> u8x64 {
-    // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %{{x|0}}, <64 x i8> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u16x2
 #[no_mangle]
 pub unsafe fn usub_u16x2(x: u16x2, y: u16x2) -> u16x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %{{x|0}}, <2 x i16> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u16x4
 #[no_mangle]
 pub unsafe fn usub_u16x4(x: u16x4, y: u16x4) -> u16x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %{{x|0}}, <4 x i16> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u16x8
 #[no_mangle]
 pub unsafe fn usub_u16x8(x: u16x8, y: u16x8) -> u16x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %{{x|0}}, <8 x i16> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u16x16
 #[no_mangle]
 pub unsafe fn usub_u16x16(x: u16x16, y: u16x16) -> u16x16 {
-    // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %{{x|0}}, <16 x i16> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u16x32
 #[no_mangle]
 pub unsafe fn usub_u16x32(x: u16x32, y: u16x32) -> u16x32 {
-    // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{x|0}}, <32 x i16> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u32x2
 #[no_mangle]
 pub unsafe fn usub_u32x2(x: u32x2, y: u32x2) -> u32x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %{{x|0}}, <2 x i32> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u32x4
 #[no_mangle]
 pub unsafe fn usub_u32x4(x: u32x4, y: u32x4) -> u32x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %{{x|0}}, <4 x i32> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u32x8
 #[no_mangle]
 pub unsafe fn usub_u32x8(x: u32x8, y: u32x8) -> u32x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %{{x|0}}, <8 x i32> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u32x16
 #[no_mangle]
 pub unsafe fn usub_u32x16(x: u32x16, y: u32x16) -> u32x16 {
-    // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %{{x|0}}, <16 x i32> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u64x2
 #[no_mangle]
 pub unsafe fn usub_u64x2(x: u64x2, y: u64x2) -> u64x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %{{x|0}}, <2 x i64> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u64x4
 #[no_mangle]
 pub unsafe fn usub_u64x4(x: u64x4, y: u64x4) -> u64x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %{{x|0}}, <4 x i64> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u64x8
 #[no_mangle]
 pub unsafe fn usub_u64x8(x: u64x8, y: u64x8) -> u64x8 {
-    // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %{{x|0}}, <8 x i64> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u128x2
 #[no_mangle]
 pub unsafe fn usub_u128x2(x: u128x2, y: u128x2) -> u128x2 {
-    // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.usub.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.usub.sat.v2i128(<2 x i128> %{{x|0}}, <2 x i128> %{{y|1}})
     simd_saturating_sub(x, y)
 }
 
 // CHECK-LABEL: @usub_u128x4
 #[no_mangle]
 pub unsafe fn usub_u128x4(x: u128x4, y: u128x4) -> u128x4 {
-    // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.usub.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}})
+    // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.usub.sat.v4i128(<4 x i128> %{{x|0}}, <4 x i128> %{{y|1}})
     simd_saturating_sub(x, y)
 }
diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-extract-insert.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-extract-insert.rs
index 80583dec195..a5d2509d000 100644
--- a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-extract-insert.rs
+++ b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-extract-insert.rs
@@ -35,13 +35,13 @@ pub unsafe fn extract_s(v: S<4>, i: u32) -> f32  {
 // CHECK-LABEL: @insert_m
 #[no_mangle]
 pub unsafe fn insert_m(v: M, i: u32, j: f32) -> M  {
-    // CHECK: insertelement <4 x float> %{{v|1|2}}, float %j, i32 %i
+    // CHECK: insertelement <4 x float> %{{v|0|1}}, float %j, i32 %i
     simd_insert(v, i, j)
 }
 
 // CHECK-LABEL: @insert_s
 #[no_mangle]
 pub unsafe fn insert_s(v: S<4>, i: u32, j: f32) -> S<4>  {
-    // CHECK: insertelement <4 x float> %{{v|1|2}}, float %j, i32 %i
+    // CHECK: insertelement <4 x float> %{{v|0|1}}, float %j, i32 %i
     simd_insert(v, i, j)
 }
diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs
index fd488a14bd3..3a0e37de2f3 100644
--- a/tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs
+++ b/tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs
@@ -2,7 +2,6 @@
 // compile-flags: -C no-prepopulate-passes
 
 #![crate_type = "lib"]
-
 #![allow(non_camel_case_types)]
 #![feature(repr_simd, platform_intrinsics)]
 #![feature(inline_const)]
@@ -44,7 +43,7 @@ pub fn build_array_s(x: [f32; 4]) -> S<4> {
 #[no_mangle]
 pub fn build_array_transmute_s(x: [f32; 4]) -> S<4> {
     // CHECK: %[[VAL:.+]] = load <4 x float>, {{ptr %x|.+>\* %.+}}, align [[ARRAY_ALIGN]]
-    // CHECK: store <4 x float> %[[VAL:.+]], {{ptr %0|.+>\* %.+}}, align [[VECTOR_ALIGN]]
+    // CHECK: store <4 x float> %[[VAL:.+]], {{ptr %_0|.+>\* %.+}}, align [[VECTOR_ALIGN]]
     unsafe { std::mem::transmute(x) }
 }
 
@@ -59,7 +58,7 @@ pub fn build_array_t(x: [f32; 4]) -> T {
 #[no_mangle]
 pub fn build_array_transmute_t(x: [f32; 4]) -> T {
     // CHECK: %[[VAL:.+]] = load <4 x float>, {{ptr %x|.+>\* %.+}}, align [[ARRAY_ALIGN]]
-    // CHECK: store <4 x float> %[[VAL:.+]], {{ptr %0|.+>\* %.+}}, align [[VECTOR_ALIGN]]
+    // CHECK: store <4 x float> %[[VAL:.+]], {{ptr %_0|.+>\* %.+}}, align [[VECTOR_ALIGN]]
     unsafe { std::mem::transmute(x) }
 }
 
@@ -78,6 +77,6 @@ pub fn build_array_u(x: [f32; 4]) -> U {
 #[no_mangle]
 pub fn build_array_transmute_u(x: [f32; 4]) -> U {
     // CHECK: %[[VAL:.+]] = load <4 x float>, {{ptr %x|.+>\* %.+}}, align [[ARRAY_ALIGN]]
-    // CHECK: store <4 x float> %[[VAL:.+]], {{ptr %0|.+>\* %.+}}, align [[VECTOR_ALIGN]]
+    // CHECK: store <4 x float> %[[VAL:.+]], {{ptr %_0|.+>\* %.+}}, align [[VECTOR_ALIGN]]
     unsafe { std::mem::transmute(x) }
 }
diff --git a/tests/codegen/simd_arith_offset.rs b/tests/codegen/simd_arith_offset.rs
index f2333777391..74d7edc70d7 100644
--- a/tests/codegen/simd_arith_offset.rs
+++ b/tests/codegen/simd_arith_offset.rs
@@ -21,6 +21,6 @@ pub struct Simd<T, const LANES: usize>([T; LANES]);
 // CHECK-LABEL: smoke
 #[no_mangle]
 pub fn smoke(ptrs: SimdConstPtr<u8, 8>, offsets: Simd<usize, 8>) -> SimdConstPtr<u8, 8> {
-    // CHECK: getelementptr i8, <8 x {{i8\*|ptr}}> %1, <8 x i64> %2
+    // CHECK: getelementptr i8, <8 x {{i8\*|ptr}}> %0, <8 x i64> %1
     unsafe { simd_arith_offset(ptrs, offsets) }
 }
diff --git a/tests/codegen/slice-iter-len-eq-zero.rs b/tests/codegen/slice-iter-len-eq-zero.rs
index 894b0ec3de4..69f78cea564 100644
--- a/tests/codegen/slice-iter-len-eq-zero.rs
+++ b/tests/codegen/slice-iter-len-eq-zero.rs
@@ -9,8 +9,8 @@ type Demo = [u8; 3];
 #[no_mangle]
 pub fn slice_iter_len_eq_zero(y: std::slice::Iter<'_, Demo>) -> bool {
     // CHECK-NOT: sub
-    // CHECK: %2 = icmp eq {{i8\*|ptr}} {{%1|%0}}, {{%1|%0}}
-    // CHECK: ret i1 %2
+    // CHECK: %_0 = icmp eq {{i8\*|ptr}} {{%1|%0}}, {{%1|%0}}
+    // CHECK: ret i1 %_0
     y.len() == 0
 }
 
@@ -22,7 +22,7 @@ pub fn array_into_iter_len_eq_zero(y: std::array::IntoIter<Demo, 123>) -> bool {
 
     // CHECK-NOT: icmp
     // CHECK-NOT: sub
-    // CHECK: %1 = icmp eq {{i16|i32|i64}}
-    // CHECK: ret i1 %1
+    // CHECK: %_0 = icmp eq {{i16|i32|i64}}
+    // CHECK: ret i1 %_0
     y.len() == 0
 }
diff --git a/tests/codegen/transmute-scalar.rs b/tests/codegen/transmute-scalar.rs
index a0894a505c7..293b0d664f6 100644
--- a/tests/codegen/transmute-scalar.rs
+++ b/tests/codegen/transmute-scalar.rs
@@ -11,24 +11,24 @@
 // that allows us to avoid the `alloca`s entirely; see `rvalue_creates_operand`.
 
 // CHECK-LABEL: define{{.*}}i32 @f32_to_bits(float %x)
-// CHECK: %0 = bitcast float %x to i32
-// CHECK-NEXT: ret i32 %0
+// CHECK: %_0 = bitcast float %x to i32
+// CHECK-NEXT: ret i32 %_0
 #[no_mangle]
 pub fn f32_to_bits(x: f32) -> u32 {
     unsafe { std::mem::transmute(x) }
 }
 
 // CHECK-LABEL: define{{.*}}i8 @bool_to_byte(i1 zeroext %b)
-// CHECK: %0 = zext i1 %b to i8
-// CHECK-NEXT: ret i8 %0
+// CHECK: %_0 = zext i1 %b to i8
+// CHECK-NEXT: ret i8 %_0
 #[no_mangle]
 pub fn bool_to_byte(b: bool) -> u8 {
     unsafe { std::mem::transmute(b) }
 }
 
 // CHECK-LABEL: define{{.*}}zeroext i1 @byte_to_bool(i8 %byte)
-// CHECK: %0 = trunc i8 %byte to i1
-// CHECK-NEXT: ret i1 %0
+// CHECK: %_0 = trunc i8 %byte to i1
+// CHECK-NEXT: ret i1 %_0
 #[no_mangle]
 pub unsafe fn byte_to_bool(byte: u8) -> bool {
     std::mem::transmute(byte)
@@ -42,16 +42,16 @@ pub fn ptr_to_ptr(p: *mut u16) -> *mut u8 {
 }
 
 // CHECK: define{{.*}}[[USIZE:i[0-9]+]] @ptr_to_int(ptr %p)
-// CHECK: %0 = ptrtoint ptr %p to [[USIZE]]
-// CHECK-NEXT: ret [[USIZE]] %0
+// CHECK: %_0 = ptrtoint ptr %p to [[USIZE]]
+// CHECK-NEXT: ret [[USIZE]] %_0
 #[no_mangle]
 pub fn ptr_to_int(p: *mut u16) -> usize {
     unsafe { std::mem::transmute(p) }
 }
 
 // CHECK: define{{.*}}ptr @int_to_ptr([[USIZE]] %i)
-// CHECK: %0 = inttoptr [[USIZE]] %i to ptr
-// CHECK-NEXT: ret ptr %0
+// CHECK: %_0 = inttoptr [[USIZE]] %i to ptr
+// CHECK-NEXT: ret ptr %_0
 #[no_mangle]
 pub fn int_to_ptr(i: usize) -> *mut u16 {
     unsafe { std::mem::transmute(i) }
diff --git a/tests/codegen/uninit-consts.rs b/tests/codegen/uninit-consts.rs
index 54e9a9e9bb8..f169988e1f5 100644
--- a/tests/codegen/uninit-consts.rs
+++ b/tests/codegen/uninit-consts.rs
@@ -1,4 +1,5 @@
 // compile-flags: -C no-prepopulate-passes
+// min-llvm-version: 15.0 (for opaque pointers)
 
 // Check that we use undef (and not zero) for uninitialized bytes in constants.
 
@@ -8,7 +9,7 @@ use std::mem::MaybeUninit;
 
 pub struct PartiallyUninit {
     x: u32,
-    y: MaybeUninit<[u8; 10]>
+    y: MaybeUninit<[u8; 10]>,
 }
 
 // CHECK: [[FULLY_UNINIT:@[0-9]+]] = private unnamed_addr constant <{ [10 x i8] }> undef
@@ -25,7 +26,7 @@ pub struct PartiallyUninit {
 #[no_mangle]
 pub const fn fully_uninit() -> MaybeUninit<[u8; 10]> {
     const M: MaybeUninit<[u8; 10]> = MaybeUninit::uninit();
-    // CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} align 1 %{{[0-9]+}}, {{i8\*|ptr}} align 1 {{.*}}[[FULLY_UNINIT]]{{.*}}, i{{(32|64)}} 10, i1 false)
+    // CHECK: call void @llvm.memcpy.{{.+}}(ptr align 1 %_0, ptr align 1 {{.*}}[[FULLY_UNINIT]]{{.*}}, i{{(32|64)}} 10, i1 false)
     M
 }
 
@@ -33,7 +34,7 @@ pub const fn fully_uninit() -> MaybeUninit<[u8; 10]> {
 #[no_mangle]
 pub const fn partially_uninit() -> PartiallyUninit {
     const X: PartiallyUninit = PartiallyUninit { x: 0xdeadbeef, y: MaybeUninit::uninit() };
-    // CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} align 4 %{{[0-9]+}}, {{i8\*|ptr}} align 4 {{.*}}[[PARTIALLY_UNINIT]]{{.*}}, i{{(32|64)}} 16, i1 false)
+    // CHECK: call void @llvm.memcpy.{{.+}}(ptr align 4 %_0, ptr align 4 {{.*}}[[PARTIALLY_UNINIT]]{{.*}}, i{{(32|64)}} 16, i1 false)
     X
 }
 
@@ -41,7 +42,7 @@ pub const fn partially_uninit() -> PartiallyUninit {
 #[no_mangle]
 pub const fn uninit_padding_huge() -> [(u32, u8); 4096] {
     const X: [(u32, u8); 4096] = [(123, 45); 4096];
-    // CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} align 4 %{{[0-9]+}}, {{i8\*|ptr}} align 4 {{.*}}[[UNINIT_PADDING_HUGE]]{{.*}}, i{{(32|64)}} 32768, i1 false)
+    // CHECK: call void @llvm.memcpy.{{.+}}(ptr align 4 %_0, ptr align 4 {{.*}}[[UNINIT_PADDING_HUGE]]{{.*}}, i{{(32|64)}} 32768, i1 false)
     X
 }
 
@@ -49,6 +50,6 @@ pub const fn uninit_padding_huge() -> [(u32, u8); 4096] {
 #[no_mangle]
 pub const fn fully_uninit_huge() -> MaybeUninit<[u32; 4096]> {
     const F: MaybeUninit<[u32; 4096]> = MaybeUninit::uninit();
-    // CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} align 4 %{{[0-9]+}}, {{i8\*|ptr}} align 4 {{.*}}[[FULLY_UNINIT_HUGE]]{{.*}}, i{{(32|64)}} 16384, i1 false)
+    // CHECK: call void @llvm.memcpy.{{.+}}(ptr align 4 %_0, ptr align 4 {{.*}}[[FULLY_UNINIT_HUGE]]{{.*}}, i{{(32|64)}} 16384, i1 false)
     F
 }
diff --git a/tests/codegen/union-abi.rs b/tests/codegen/union-abi.rs
index 8481ca8ccfa..653c5837daf 100644
--- a/tests/codegen/union-abi.rs
+++ b/tests/codegen/union-abi.rs
@@ -73,4 +73,4 @@ pub union UnionBool { b:bool }
 // CHECK: define {{(dso_local )?}}noundef zeroext i1 @test_UnionBool(i8 %b)
 #[no_mangle]
 pub fn test_UnionBool(b: UnionBool) -> bool { unsafe { b.b }  }
-// CHECK: %0 = trunc i8 %b to i1
+// CHECK: %_0 = trunc i8 %b to i1
diff --git a/tests/codegen/var-names.rs b/tests/codegen/var-names.rs
index 53841df32e8..d4715efad73 100644
--- a/tests/codegen/var-names.rs
+++ b/tests/codegen/var-names.rs
@@ -9,7 +9,7 @@ pub fn test(a: u32, b: u32) -> u32 {
     // CHECK: %c = add i32 %a, %b
     let d = c;
     let e = d * a;
-    // CHECK-NEXT: %0 = mul i32 %c, %a
+    // CHECK-NEXT: %e = mul i32 %c, %a
     e
-    // CHECK-NEXT: ret i32 %0
+    // CHECK-NEXT: ret i32 %e
 }