about summary refs log tree commit diff
path: root/src/test/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'src/test/codegen')
-rw-r--r--src/test/codegen/adjustments.rs8
-rw-r--r--src/test/codegen/align-struct.rs2
-rw-r--r--src/test/codegen/fastcall-inreg.rs12
-rw-r--r--src/test/codegen/function-arguments.rs32
-rw-r--r--src/test/codegen/move-val-init.rs2
-rw-r--r--src/test/codegen/refs.rs6
-rw-r--r--src/test/codegen/stores.rs12
7 files changed, 36 insertions, 38 deletions
diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs
index 40603845da2..bd85e303143 100644
--- a/src/test/codegen/adjustments.rs
+++ b/src/test/codegen/adjustments.rs
@@ -13,7 +13,7 @@
 #![crate_type = "lib"]
 
 // Hack to get the correct size for the length part in slices
-// CHECK: @helper([[USIZE:i[0-9]+]])
+// CHECK: @helper([[USIZE:i[0-9]+]] %arg0)
 #[no_mangle]
 fn helper(_: usize) {
 }
@@ -23,9 +23,9 @@ fn helper(_: usize) {
 pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] {
     // We used to generate an extra alloca and memcpy for the block's trailing expression value, so
     // check that we copy directly to the return value slot
-// CHECK: %2 = insertvalue { i8*, [[USIZE]] } undef, i8* %0, 0
-// CHECK: %3 = insertvalue { i8*, [[USIZE]] } %2, [[USIZE]] %1, 1
-// CHECK: ret { i8*, [[USIZE]] } %3
+// CHECK: %0 = insertvalue { i8*, [[USIZE]] } undef, i8* %x.ptr, 0
+// CHECK: %1 = insertvalue { i8*, [[USIZE]] } %0, [[USIZE]] %x.meta, 1
+// CHECK: ret { i8*, [[USIZE]] } %1
     { x }
 }
 
diff --git a/src/test/codegen/align-struct.rs b/src/test/codegen/align-struct.rs
index d4828be037a..ba81e2d6046 100644
--- a/src/test/codegen/align-struct.rs
+++ b/src/test/codegen/align-struct.rs
@@ -42,7 +42,6 @@ pub fn align64(i : i32) -> Align64 {
 #[no_mangle]
 pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 {
 // CHECK: %n64 = alloca %Nested64, align 64
-// CHECK: %a = alloca %Align64, align 64
     let n64 = Nested64 { a, b, c, d };
     n64
 }
@@ -51,7 +50,6 @@ pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 {
 #[no_mangle]
 pub fn enum64(a: Align64) -> Enum64 {
 // CHECK: %e64 = alloca %Enum64, align 64
-// CHECK: %a = alloca %Align64, align 64
     let e64 = Enum64::A(a);
     e64
 }
diff --git a/src/test/codegen/fastcall-inreg.rs b/src/test/codegen/fastcall-inreg.rs
index f02e7e9f0dd..cc13d4a7b68 100644
--- a/src/test/codegen/fastcall-inreg.rs
+++ b/src/test/codegen/fastcall-inreg.rs
@@ -60,27 +60,27 @@
 #![crate_type = "lib"]
 
 mod tests {
-    // CHECK: @f1(i32 inreg, i32 inreg, i32)
+    // CHECK: @f1(i32 inreg %arg0, i32 inreg %arg1, i32 %arg2)
     #[no_mangle]
     extern "fastcall" fn f1(_: i32, _: i32, _: i32) {}
 
-    // CHECK: @f2(i32* inreg, i32* inreg, i32*)
+    // CHECK: @f2(i32* inreg %arg0, i32* inreg %arg1, i32* %arg2)
     #[no_mangle]
     extern "fastcall" fn f2(_: *const i32, _: *const i32, _: *const i32) {}
 
-    // CHECK: @f3(float, i32 inreg, i32 inreg, i32)
+    // CHECK: @f3(float %arg0, i32 inreg %arg1, i32 inreg %arg2, i32 %arg3)
     #[no_mangle]
     extern "fastcall" fn f3(_: f32, _: i32, _: i32, _: i32) {}
 
-    // CHECK: @f4(i32 inreg, float, i32 inreg, i32)
+    // CHECK: @f4(i32 inreg %arg0, float %arg1, i32 inreg %arg2, i32 %arg3)
     #[no_mangle]
     extern "fastcall" fn f4(_: i32, _: f32, _: i32, _: i32) {}
 
-    // CHECK: @f5(i64, i32)
+    // CHECK: @f5(i64 %arg0, i32 %arg1)
     #[no_mangle]
     extern "fastcall" fn f5(_: i64, _: i32) {}
 
-    // CHECK: @f6(i1 inreg zeroext, i32 inreg, i32)
+    // CHECK: @f6(i1 inreg zeroext %arg0, i32 inreg %arg1, i32 %arg2)
     #[no_mangle]
     extern "fastcall" fn f6(_: bool, _: i32, _: i32) {}
 }
diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs
index d8bbcd9b732..d4c7fe9e80a 100644
--- a/src/test/codegen/function-arguments.rs
+++ b/src/test/codegen/function-arguments.rs
@@ -21,62 +21,62 @@ pub struct UnsafeInner {
   _field: std::cell::UnsafeCell<i16>,
 }
 
-// CHECK: zeroext i1 @boolean(i1 zeroext)
+// CHECK: zeroext i1 @boolean(i1 zeroext %x)
 #[no_mangle]
 pub fn boolean(x: bool) -> bool {
   x
 }
 
-// CHECK: @readonly_borrow(i32* noalias readonly dereferenceable(4))
+// CHECK: @readonly_borrow(i32* noalias readonly dereferenceable(4) %arg0)
 // FIXME #25759 This should also have `nocapture`
 #[no_mangle]
 pub fn readonly_borrow(_: &i32) {
 }
 
-// CHECK: @static_borrow(i32* noalias readonly dereferenceable(4))
+// CHECK: @static_borrow(i32* noalias readonly dereferenceable(4) %arg0)
 // static borrow may be captured
 #[no_mangle]
 pub fn static_borrow(_: &'static i32) {
 }
 
-// CHECK: @named_borrow(i32* noalias readonly dereferenceable(4))
+// CHECK: @named_borrow(i32* noalias readonly dereferenceable(4) %arg0)
 // borrow with named lifetime may be captured
 #[no_mangle]
 pub fn named_borrow<'r>(_: &'r i32) {
 }
 
-// CHECK: @unsafe_borrow(%UnsafeInner* dereferenceable(2))
+// CHECK: @unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0)
 // unsafe interior means this isn't actually readonly and there may be aliases ...
 #[no_mangle]
 pub fn unsafe_borrow(_: &UnsafeInner) {
 }
 
-// CHECK: @mutable_unsafe_borrow(%UnsafeInner* dereferenceable(2))
+// CHECK: @mutable_unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0)
 // ... unless this is a mutable borrow, those never alias
 // ... except that there's this LLVM bug that forces us to not use noalias, see #29485
 #[no_mangle]
 pub fn mutable_unsafe_borrow(_: &mut UnsafeInner) {
 }
 
-// CHECK: @mutable_borrow(i32* dereferenceable(4))
+// CHECK: @mutable_borrow(i32* dereferenceable(4) %arg0)
 // FIXME #25759 This should also have `nocapture`
 // ... there's this LLVM bug that forces us to not use noalias, see #29485
 #[no_mangle]
 pub fn mutable_borrow(_: &mut i32) {
 }
 
-// CHECK: @indirect_struct(%S* noalias nocapture dereferenceable(32))
+// CHECK: @indirect_struct(%S* noalias nocapture dereferenceable(32) %arg0)
 #[no_mangle]
 pub fn indirect_struct(_: S) {
 }
 
-// CHECK: @borrowed_struct(%S* noalias readonly dereferenceable(32))
+// CHECK: @borrowed_struct(%S* noalias readonly dereferenceable(32) %arg0)
 // FIXME #25759 This should also have `nocapture`
 #[no_mangle]
 pub fn borrowed_struct(_: &S) {
 }
 
-// CHECK: noalias dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4))
+// CHECK: noalias dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4) %x)
 #[no_mangle]
 pub fn _box(x: Box<i32>) -> Box<i32> {
   x
@@ -91,31 +91,31 @@ pub fn struct_return() -> S {
 }
 
 // Hack to get the correct size for the length part in slices
-// CHECK: @helper([[USIZE:i[0-9]+]])
+// CHECK: @helper([[USIZE:i[0-9]+]] %arg0)
 #[no_mangle]
 fn helper(_: usize) {
 }
 
-// CHECK: @slice(i8* noalias nonnull readonly, [[USIZE]])
+// CHECK: @slice(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta)
 // FIXME #25759 This should also have `nocapture`
 #[no_mangle]
 fn slice(_: &[u8]) {
 }
 
-// CHECK: @mutable_slice(i8* nonnull, [[USIZE]])
+// CHECK: @mutable_slice(i8* nonnull %arg0.ptr, [[USIZE]] %arg0.meta)
 // FIXME #25759 This should also have `nocapture`
 // ... there's this LLVM bug that forces us to not use noalias, see #29485
 #[no_mangle]
 fn mutable_slice(_: &mut [u8]) {
 }
 
-// CHECK: @unsafe_slice(%UnsafeInner* nonnull, [[USIZE]])
+// CHECK: @unsafe_slice(%UnsafeInner* nonnull %arg0.ptr, [[USIZE]] %arg0.meta)
 // unsafe interior means this isn't actually readonly and there may be aliases ...
 #[no_mangle]
 pub fn unsafe_slice(_: &[UnsafeInner]) {
 }
 
-// CHECK: @str(i8* noalias nonnull readonly, [[USIZE]])
+// CHECK: @str(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta)
 // FIXME #25759 This should also have `nocapture`
 #[no_mangle]
 fn str(_: &[u8]) {
@@ -132,7 +132,7 @@ fn trait_borrow(_: &Drop) {
 fn trait_box(_: Box<Drop>) {
 }
 
-// CHECK: { i16*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly, [[USIZE]])
+// CHECK: { i16*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta)
 #[no_mangle]
 fn return_slice(x: &[u16]) -> &[u16] {
   x
diff --git a/src/test/codegen/move-val-init.rs b/src/test/codegen/move-val-init.rs
index 98b7db60b68..e2371d61487 100644
--- a/src/test/codegen/move-val-init.rs
+++ b/src/test/codegen/move-val-init.rs
@@ -24,6 +24,6 @@ pub struct Big {
 // CHECK-LABEL: @test_mvi
 #[no_mangle]
 pub unsafe fn test_mvi(target: *mut Big, make_big: fn() -> Big) {
-    // CHECK: call void %1(%Big*{{[^%]*}} %0)
+    // CHECK: call void %make_big(%Big*{{[^%]*}} %target)
     move_val_init(target, make_big());
 }
diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs
index 49ed2229fcd..fd1a14020d8 100644
--- a/src/test/codegen/refs.rs
+++ b/src/test/codegen/refs.rs
@@ -13,7 +13,7 @@
 #![crate_type = "lib"]
 
 // Hack to get the correct size for the length part in slices
-// CHECK: @helper([[USIZE:i[0-9]+]])
+// CHECK: @helper([[USIZE:i[0-9]+]] %arg0)
 #[no_mangle]
 fn helper(_: usize) {
 }
@@ -24,9 +24,9 @@ pub fn ref_dst(s: &[u8]) {
     // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy
     // directly to the alloca for "x"
 // CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 0
-// CHECK: store i8* %0, i8** [[X0]]
+// CHECK: store i8* %s.ptr, i8** [[X0]]
 // CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 1
-// CHECK: store [[USIZE]] %1, [[USIZE]]* [[X1]]
+// CHECK: store [[USIZE]] %s.meta, [[USIZE]]* [[X1]]
 
     let x = &*s;
     &x; // keep variable in an alloca
diff --git a/src/test/codegen/stores.rs b/src/test/codegen/stores.rs
index 6135f49eb71..08f5038fb18 100644
--- a/src/test/codegen/stores.rs
+++ b/src/test/codegen/stores.rs
@@ -25,9 +25,9 @@ pub struct Bytes {
 #[no_mangle]
 pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
 // CHECK: [[TMP:%.+]] = alloca i32
-// CHECK: %arg1 = alloca [4 x i8]
-// CHECK: store i32 %1, i32* [[TMP]]
-// CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %arg1 to i8*
+// CHECK: %y = alloca [4 x i8]
+// CHECK: store i32 %0, i32* [[TMP]]
+// CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %y to i8*
 // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8*
 // CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false)
     *x = y;
@@ -39,9 +39,9 @@ pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
 #[no_mangle]
 pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) {
 // CHECK: [[TMP:%.+]] = alloca i32
-// CHECK: %arg1 = alloca %Bytes
-// CHECK: store i32 %1, i32* [[TMP]]
-// CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %arg1 to i8*
+// CHECK: %y = alloca %Bytes
+// CHECK: store i32 %0, i32* [[TMP]]
+// CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %y to i8*
 // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8*
 // CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false)
     *x = y;