about summary refs log tree commit diff
path: root/tests/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'tests/codegen')
-rw-r--r--tests/codegen/array-equality.rs1
-rw-r--r--tests/codegen/atomicptr.rs38
-rw-r--r--tests/codegen/cffi/c-variadic-naked.rs19
-rw-r--r--tests/codegen/checked_math.rs86
-rw-r--r--tests/codegen/enum/enum-debug-niche-2.rs2
-rw-r--r--tests/codegen/function-arguments.rs1
-rw-r--r--tests/codegen/intrinsics/ctlz.rs56
-rw-r--r--tests/codegen/intrinsics/ctpop.rs31
-rw-r--r--tests/codegen/intrinsics/rotate_left.rs31
-rw-r--r--tests/codegen/intrinsics/transmute-niched.rs1
-rw-r--r--tests/codegen/issues/issue-119422.rs1
-rw-r--r--tests/codegen/loads.rs1
-rw-r--r--tests/codegen/mem-replace-simple-type.rs6
-rw-r--r--tests/codegen/naked-fn/naked-functions.rs2
-rw-r--r--tests/codegen/option-as-slice.rs1
-rw-r--r--tests/codegen/option-niche-eq.rs1
-rw-r--r--tests/codegen/slice-ref-equality.rs33
-rw-r--r--tests/codegen/transmute-optimized.rs1
-rw-r--r--tests/codegen/vec-len-invariant.rs16
19 files changed, 299 insertions, 29 deletions
diff --git a/tests/codegen/array-equality.rs b/tests/codegen/array-equality.rs
index 5b85da1d4a0..bc5425c7a4f 100644
--- a/tests/codegen/array-equality.rs
+++ b/tests/codegen/array-equality.rs
@@ -1,7 +1,6 @@
 //@ compile-flags: -O -Z merge-functions=disabled
 //@ only-x86_64
 #![crate_type = "lib"]
-#![feature(generic_nonzero)]
 
 // CHECK-LABEL: @array_eq_value
 #[no_mangle]
diff --git a/tests/codegen/atomicptr.rs b/tests/codegen/atomicptr.rs
new file mode 100644
index 00000000000..cbbd5615512
--- /dev/null
+++ b/tests/codegen/atomicptr.rs
@@ -0,0 +1,38 @@
+// LLVM does not support some atomic RMW operations on pointers, so inside codegen we lower those
+// to integer atomics, surrounded by casts to and from integer type.
+// This test ensures that we do the round-trip correctly for AtomicPtr::fetch_byte_add, and also
+// ensures that we do not have such a round-trip for AtomicPtr::swap, because LLVM supports pointer
+// arguments to `atomicrmw xchg`.
+
+//@ compile-flags: -O -Cno-prepopulate-passes
+#![crate_type = "lib"]
+
+#![feature(strict_provenance)]
+#![feature(strict_provenance_atomic_ptr)]
+
+use std::sync::atomic::AtomicPtr;
+use std::sync::atomic::Ordering::Relaxed;
+use std::ptr::without_provenance_mut;
+
+// Portability hack so that we can say [[USIZE]] instead of i64/i32/i16 for usize.
+// CHECK: @helper([[USIZE:i[0-9]+]] noundef %_1)
+#[no_mangle]
+pub fn helper(_: usize) {}
+
+// CHECK-LABEL: @atomicptr_fetch_byte_add
+#[no_mangle]
+pub fn atomicptr_fetch_byte_add(a: &AtomicPtr<u8>, v: usize) -> *mut u8 {
+    // CHECK: %[[INTPTR:.*]] = ptrtoint ptr %{{.*}} to [[USIZE]]
+    // CHECK-NEXT: %[[RET:.*]] = atomicrmw add ptr %{{.*}}, [[USIZE]] %[[INTPTR]]
+    // CHECK-NEXT: inttoptr [[USIZE]] %[[RET]] to ptr
+    a.fetch_byte_add(v, Relaxed)
+}
+
+// CHECK-LABEL: @atomicptr_swap
+#[no_mangle]
+pub fn atomicptr_swap(a: &AtomicPtr<u8>, ptr: *mut u8) -> *mut u8 {
+    // CHECK-NOT: ptrtoint
+    // CHECK: atomicrmw xchg ptr %{{.*}}, ptr %{{.*}} monotonic
+    // CHECK-NOT: inttoptr
+    a.swap(ptr, Relaxed)
+}
diff --git a/tests/codegen/cffi/c-variadic-naked.rs b/tests/codegen/cffi/c-variadic-naked.rs
new file mode 100644
index 00000000000..807873ea368
--- /dev/null
+++ b/tests/codegen/cffi/c-variadic-naked.rs
@@ -0,0 +1,19 @@
+//@ needs-asm-support
+//@ only-x86_64
+
+// tests that `va_start` is not injected into naked functions
+
+#![crate_type = "lib"]
+#![feature(c_variadic)]
+#![feature(naked_functions)]
+#![no_std]
+
+#[naked]
+pub unsafe extern "C" fn c_variadic(_: usize, _: ...) {
+    // CHECK-NOT: va_start
+    // CHECK-NOT: alloca
+    core::arch::asm! {
+        "ret",
+        options(noreturn),
+    }
+}
diff --git a/tests/codegen/checked_math.rs b/tests/codegen/checked_math.rs
new file mode 100644
index 00000000000..41016e3b7be
--- /dev/null
+++ b/tests/codegen/checked_math.rs
@@ -0,0 +1,86 @@
+//@ compile-flags: -O -Z merge-functions=disabled
+
+#![crate_type = "lib"]
+#![feature(unchecked_shifts)]
+
+// Because the result of something like `u32::checked_sub` can only be used if it
+// didn't overflow, make sure that LLVM actually knows that in optimized builds.
+// Thanks to poison semantics, this doesn't even need branches.
+
+// CHECK-LABEL: @checked_sub_unsigned
+// CHECK-SAME: (i16 noundef %a, i16 noundef %b)
+#[no_mangle]
+pub fn checked_sub_unsigned(a: u16, b: u16) -> Option<u16> {
+    // CHECK-DAG: %[[IS_SOME:.+]] = icmp uge i16 %a, %b
+    // CHECK-DAG: %[[DIFF_P:.+]] = sub nuw i16 %a, %b
+    // CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i16
+    // CHECK-DAG: %[[DIFF_U:.+]] = select i1 %[[IS_SOME]], i16 %[[DIFF_P]], i16 undef
+
+    // CHECK: %[[R0:.+]] = insertvalue { i16, i16 } poison, i16 %[[DISCR]], 0
+    // CHECK: %[[R1:.+]] = insertvalue { i16, i16 } %[[R0]], i16 %[[DIFF_U]], 1
+    // CHECK: ret { i16, i16 } %[[R1]]
+    a.checked_sub(b)
+}
+
+// Note that `shl` and `shr` in LLVM are already unchecked. So rather than
+// looking for no-wrap flags, we just need there to not be any masking.
+
+// CHECK-LABEL: @checked_shl_unsigned
+// CHECK-SAME: (i32 noundef %a, i32 noundef %b)
+#[no_mangle]
+pub fn checked_shl_unsigned(a: u32, b: u32) -> Option<u32> {
+    // CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32
+    // CHECK-DAG: %[[SHIFTED_P:.+]] = shl i32 %a, %b
+    // CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i32
+    // CHECK-DAG: %[[SHIFTED_U:.+]] = select i1 %[[IS_SOME]], i32 %[[SHIFTED_P]], i32 undef
+
+    // CHECK: %[[R0:.+]] = insertvalue { i32, i32 } poison, i32 %[[DISCR]], 0
+    // CHECK: %[[R1:.+]] = insertvalue { i32, i32 } %[[R0]], i32 %[[SHIFTED_U]], 1
+    // CHECK: ret { i32, i32 } %[[R1]]
+    a.checked_shl(b)
+}
+
+// CHECK-LABEL: @checked_shr_unsigned
+// CHECK-SAME: (i32 noundef %a, i32 noundef %b)
+#[no_mangle]
+pub fn checked_shr_unsigned(a: u32, b: u32) -> Option<u32> {
+    // CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32
+    // CHECK-DAG: %[[SHIFTED_P:.+]] = lshr i32 %a, %b
+    // CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i32
+    // CHECK-DAG: %[[SHIFTED_U:.+]] = select i1 %[[IS_SOME]], i32 %[[SHIFTED_P]], i32 undef
+
+    // CHECK: %[[R0:.+]] = insertvalue { i32, i32 } poison, i32 %[[DISCR]], 0
+    // CHECK: %[[R1:.+]] = insertvalue { i32, i32 } %[[R0]], i32 %[[SHIFTED_U]], 1
+    // CHECK: ret { i32, i32 } %[[R1]]
+    a.checked_shr(b)
+}
+
+// CHECK-LABEL: @checked_shl_signed
+// CHECK-SAME: (i32 noundef %a, i32 noundef %b)
+#[no_mangle]
+pub fn checked_shl_signed(a: i32, b: u32) -> Option<i32> {
+    // CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32
+    // CHECK-DAG: %[[SHIFTED_P:.+]] = shl i32 %a, %b
+    // CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i32
+    // CHECK-DAG: %[[SHIFTED_U:.+]] = select i1 %[[IS_SOME]], i32 %[[SHIFTED_P]], i32 undef
+
+    // CHECK: %[[R0:.+]] = insertvalue { i32, i32 } poison, i32 %[[DISCR]], 0
+    // CHECK: %[[R1:.+]] = insertvalue { i32, i32 } %[[R0]], i32 %[[SHIFTED_U]], 1
+    // CHECK: ret { i32, i32 } %[[R1]]
+    a.checked_shl(b)
+}
+
+// CHECK-LABEL: @checked_shr_signed
+// CHECK-SAME: (i32 noundef %a, i32 noundef %b)
+#[no_mangle]
+pub fn checked_shr_signed(a: i32, b: u32) -> Option<i32> {
+    // CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32
+    // CHECK-DAG: %[[SHIFTED_P:.+]] = ashr i32 %a, %b
+    // CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i32
+    // CHECK-DAG: %[[SHIFTED_U:.+]] = select i1 %[[IS_SOME]], i32 %[[SHIFTED_P]], i32 undef
+
+    // CHECK: %[[R0:.+]] = insertvalue { i32, i32 } poison, i32 %[[DISCR]], 0
+    // CHECK: %[[R1:.+]] = insertvalue { i32, i32 } %[[R0]], i32 %[[SHIFTED_U]], 1
+    // CHECK: ret { i32, i32 } %[[R1]]
+    a.checked_shr(b)
+}
diff --git a/tests/codegen/enum/enum-debug-niche-2.rs b/tests/codegen/enum/enum-debug-niche-2.rs
index 25871885e7e..58f43fe3ec6 100644
--- a/tests/codegen/enum/enum-debug-niche-2.rs
+++ b/tests/codegen/enum/enum-debug-niche-2.rs
@@ -7,7 +7,7 @@
 // CHECK: {{.*}}DICompositeType{{.*}}tag: DW_TAG_variant_part,{{.*}}size: 32,{{.*}}
 // CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}name: "Placeholder",{{.*}}extraData: i128 4294967295{{[,)].*}}
 // CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}name: "Error",{{.*}}extraData: i128 0{{[,)].*}}
-#![feature(generic_nonzero, never_type)]
+#![feature(never_type)]
 
 #[derive(Copy, Clone)]
 pub struct Entity {
diff --git a/tests/codegen/function-arguments.rs b/tests/codegen/function-arguments.rs
index 468ec0a7753..2b27dab078d 100644
--- a/tests/codegen/function-arguments.rs
+++ b/tests/codegen/function-arguments.rs
@@ -1,7 +1,6 @@
 //@ compile-flags: -O -C no-prepopulate-passes
 #![crate_type = "lib"]
 #![feature(dyn_star)]
-#![feature(generic_nonzero)]
 #![feature(allocator_api)]
 
 use std::mem::MaybeUninit;
diff --git a/tests/codegen/intrinsics/ctlz.rs b/tests/codegen/intrinsics/ctlz.rs
new file mode 100644
index 00000000000..0d54d21ce12
--- /dev/null
+++ b/tests/codegen/intrinsics/ctlz.rs
@@ -0,0 +1,56 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(core_intrinsics)]
+
+use std::intrinsics::{ctlz, ctlz_nonzero};
+
+// CHECK-LABEL: @ctlz_u16
+#[no_mangle]
+pub unsafe fn ctlz_u16(x: u16) -> u32 {
+    // CHECK: %[[tmp:.*]] = call i16 @llvm.ctlz.i16(i16 %x, i1 false)
+    // CHECK: zext i16 %[[tmp]] to i32
+    ctlz(x)
+}
+
+// CHECK-LABEL: @ctlz_nzu16
+#[no_mangle]
+pub unsafe fn ctlz_nzu16(x: u16) -> u32 {
+    // CHECK: %[[tmp:.*]] = call i16 @llvm.ctlz.i16(i16 %x, i1 true)
+    // CHECK: zext i16 %[[tmp]] to i32
+    ctlz_nonzero(x)
+}
+
+// CHECK-LABEL: @ctlz_u32
+#[no_mangle]
+pub unsafe fn ctlz_u32(x: u32) -> u32 {
+    // CHECK: call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+    // CHECK-NOT: zext
+    // CHECK-NOT: trunc
+    ctlz(x)
+}
+
+// CHECK-LABEL: @ctlz_nzu32
+#[no_mangle]
+pub unsafe fn ctlz_nzu32(x: u32) -> u32 {
+    // CHECK: call i32 @llvm.ctlz.i32(i32 %x, i1 true)
+    // CHECK-NOT: zext
+    // CHECK-NOT: trunc
+    ctlz_nonzero(x)
+}
+
+// CHECK-LABEL: @ctlz_u64
+#[no_mangle]
+pub unsafe fn ctlz_u64(x: u64) -> u32 {
+    // CHECK: %[[tmp:.*]] = call i64 @llvm.ctlz.i64(i64 %x, i1 false)
+    // CHECK: trunc i64 %[[tmp]] to i32
+    ctlz(x)
+}
+
+// CHECK-LABEL: @ctlz_nzu64
+#[no_mangle]
+pub unsafe fn ctlz_nzu64(x: u64) -> u32 {
+    // CHECK: %[[tmp:.*]] = call i64 @llvm.ctlz.i64(i64 %x, i1 true)
+    // CHECK: trunc i64 %[[tmp]] to i32
+    ctlz_nonzero(x)
+}
diff --git a/tests/codegen/intrinsics/ctpop.rs b/tests/codegen/intrinsics/ctpop.rs
new file mode 100644
index 00000000000..f4043325de9
--- /dev/null
+++ b/tests/codegen/intrinsics/ctpop.rs
@@ -0,0 +1,31 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(core_intrinsics)]
+
+use std::intrinsics::ctpop;
+
+// CHECK-LABEL: @ctpop_u16
+#[no_mangle]
+pub unsafe fn ctpop_u16(x: u16) -> u32 {
+    // CHECK: %[[tmp:.*]] = call i16 @llvm.ctpop.i16(i16 %x)
+    // CHECK: zext i16 %[[tmp]] to i32
+    ctpop(x)
+}
+
+// CHECK-LABEL: @ctpop_u32
+#[no_mangle]
+pub unsafe fn ctpop_u32(x: u32) -> u32 {
+    // CHECK: call i32 @llvm.ctpop.i32(i32 %x)
+    // CHECK-NOT: zext
+    // CHECK-NOT: trunc
+    ctpop(x)
+}
+
+// CHECK-LABEL: @ctpop_u64
+#[no_mangle]
+pub unsafe fn ctpop_u64(x: u64) -> u32 {
+    // CHECK: %[[tmp:.*]] = call i64 @llvm.ctpop.i64(i64 %x)
+    // CHECK: trunc i64 %[[tmp]] to i32
+    ctpop(x)
+}
diff --git a/tests/codegen/intrinsics/rotate_left.rs b/tests/codegen/intrinsics/rotate_left.rs
new file mode 100644
index 00000000000..4f6c5cbaed6
--- /dev/null
+++ b/tests/codegen/intrinsics/rotate_left.rs
@@ -0,0 +1,31 @@
+//@ compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(core_intrinsics)]
+
+use std::intrinsics::rotate_left;
+
+// CHECK-LABEL: @rotate_left_u16
+#[no_mangle]
+pub unsafe fn rotate_left_u16(x: u16, shift: u32) -> u16 {
+    // CHECK: %[[tmp:.*]] = trunc i32 %shift to i16
+    // CHECK: call i16 @llvm.fshl.i16(i16 %x, i16 %x, i16 %[[tmp]])
+    rotate_left(x, shift)
+}
+
+// CHECK-LABEL: @rotate_left_u32
+#[no_mangle]
+pub unsafe fn rotate_left_u32(x: u32, shift: u32) -> u32 {
+    // CHECK-NOT: trunc
+    // CHECK-NOT: zext
+    // CHECK: call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %shift)
+    rotate_left(x, shift)
+}
+
+// CHECK-LABEL: @rotate_left_u64
+#[no_mangle]
+pub unsafe fn rotate_left_u64(x: u64, shift: u32) -> u64 {
+    // CHECK: %[[tmp:.*]] = zext i32 %shift to i64
+    // CHECK: call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 %[[tmp]])
+    rotate_left(x, shift)
+}
diff --git a/tests/codegen/intrinsics/transmute-niched.rs b/tests/codegen/intrinsics/transmute-niched.rs
index b5e0da1b2f5..f5b7bd2efea 100644
--- a/tests/codegen/intrinsics/transmute-niched.rs
+++ b/tests/codegen/intrinsics/transmute-niched.rs
@@ -3,7 +3,6 @@
 //@ [DBG] compile-flags: -C opt-level=0 -C no-prepopulate-passes
 //@ only-64bit (so I don't need to worry about usize)
 #![crate_type = "lib"]
-#![feature(generic_nonzero)]
 
 use std::mem::transmute;
 use std::num::NonZero;
diff --git a/tests/codegen/issues/issue-119422.rs b/tests/codegen/issues/issue-119422.rs
index 19480b4dc9e..aa56bfe79ac 100644
--- a/tests/codegen/issues/issue-119422.rs
+++ b/tests/codegen/issues/issue-119422.rs
@@ -4,7 +4,6 @@
 //@ compile-flags: -O --edition=2021 -Zmerge-functions=disabled
 //@ only-64bit (because the LLVM type of i64 for usize shows up)
 #![crate_type = "lib"]
-#![feature(generic_nonzero)]
 
 use core::ptr::NonNull;
 use core::num::NonZero;
diff --git a/tests/codegen/loads.rs b/tests/codegen/loads.rs
index ba4de77ce6f..e3e2f757770 100644
--- a/tests/codegen/loads.rs
+++ b/tests/codegen/loads.rs
@@ -1,7 +1,6 @@
 //@ compile-flags: -C no-prepopulate-passes -Zmir-opt-level=0 -O
 
 #![crate_type = "lib"]
-#![feature(generic_nonzero)]
 
 use std::mem::MaybeUninit;
 use std::num::NonZero;
diff --git a/tests/codegen/mem-replace-simple-type.rs b/tests/codegen/mem-replace-simple-type.rs
index 50b43f5854a..7209fa21925 100644
--- a/tests/codegen/mem-replace-simple-type.rs
+++ b/tests/codegen/mem-replace-simple-type.rs
@@ -34,18 +34,20 @@ pub fn replace_ref_str<'a>(r: &mut &'a str, v: &'a str) -> &'a str {
 
 #[no_mangle]
 // CHECK-LABEL: @replace_short_array_3(
+// CHECK-SAME: ptr{{.+}}sret{{.+}}%[[RET:.+]], ptr{{.+}}%r, ptr{{.+}}%v
 pub fn replace_short_array_3(r: &mut [u32; 3], v: [u32; 3]) -> [u32; 3] {
     // CHECK-NOT: alloca
-    // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %result, ptr align 4 %r, i64 12, i1 false)
+    // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[RET]], ptr align 4 %r, i64 12, i1 false)
     // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %r, ptr align 4 %v, i64 12, i1 false)
     std::mem::replace(r, v)
 }
 
 #[no_mangle]
 // CHECK-LABEL: @replace_short_array_4(
+// CHECK-SAME: ptr{{.+}}sret{{.+}}%[[RET:.+]], ptr{{.+}}%r, ptr{{.+}}%v
 pub fn replace_short_array_4(r: &mut [u32; 4], v: [u32; 4]) -> [u32; 4] {
     // CHECK-NOT: alloca
-    // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %result, ptr align 4 %r, i64 16, i1 false)
+    // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[RET]], ptr align 4 %r, i64 16, i1 false)
     // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %r, ptr align 4 %v, i64 16, i1 false)
     std::mem::replace(r, v)
 }
diff --git a/tests/codegen/naked-fn/naked-functions.rs b/tests/codegen/naked-fn/naked-functions.rs
index 755dd155112..3c426825537 100644
--- a/tests/codegen/naked-fn/naked-functions.rs
+++ b/tests/codegen/naked-fn/naked-functions.rs
@@ -19,7 +19,7 @@ pub unsafe extern "C" fn naked_empty() {
 }
 
 // CHECK: Function Attrs: naked
-// CHECK-NEXT: define{{.*}}i{{[0-9]+}} @naked_with_args_and_return(i64 %a, i64 %b)
+// CHECK-NEXT: define{{.*}}i{{[0-9]+}} @naked_with_args_and_return(i64 %0, i64 %1)
 #[no_mangle]
 #[naked]
 pub unsafe extern "C" fn naked_with_args_and_return(a: isize, b: isize) -> isize {
diff --git a/tests/codegen/option-as-slice.rs b/tests/codegen/option-as-slice.rs
index c5b1eafaccb..65637a2495d 100644
--- a/tests/codegen/option-as-slice.rs
+++ b/tests/codegen/option-as-slice.rs
@@ -1,7 +1,6 @@
 //@ compile-flags: -O -Z randomize-layout=no
 //@ only-x86_64
 #![crate_type = "lib"]
-#![feature(generic_nonzero)]
 
 extern crate core;
 
diff --git a/tests/codegen/option-niche-eq.rs b/tests/codegen/option-niche-eq.rs
index 8b8044e9b75..7b955332fd3 100644
--- a/tests/codegen/option-niche-eq.rs
+++ b/tests/codegen/option-niche-eq.rs
@@ -1,7 +1,6 @@
 //@ compile-flags: -O -Zmerge-functions=disabled
 //@ min-llvm-version: 18
 #![crate_type = "lib"]
-#![feature(generic_nonzero)]
 
 extern crate core;
 use core::cmp::Ordering;
diff --git a/tests/codegen/slice-ref-equality.rs b/tests/codegen/slice-ref-equality.rs
index 85d9c34a30b..1153d7817b2 100644
--- a/tests/codegen/slice-ref-equality.rs
+++ b/tests/codegen/slice-ref-equality.rs
@@ -1,6 +1,5 @@
 //@ compile-flags: -O -Zmerge-functions=disabled
 #![crate_type = "lib"]
-#![feature(generic_nonzero)]
 
 use std::num::NonZero;
 
@@ -43,48 +42,48 @@ pub fn is_zero_array(data: &[u8; 4]) -> bool {
 // equality for non-byte types also just emit a `bcmp`, not a loop.
 
 // CHECK-LABEL: @eq_slice_of_nested_u8(
-// CHECK-SAME: [[USIZE:i16|i32|i64]] noundef %1
-// CHECK-SAME: [[USIZE]] noundef %3
+// CHECK-SAME: [[USIZE:i16|i32|i64]] noundef %x.1
+// CHECK-SAME: [[USIZE]] noundef %y.1
 #[no_mangle]
 fn eq_slice_of_nested_u8(x: &[[u8; 3]], y: &[[u8; 3]]) -> bool {
-    // CHECK: icmp eq [[USIZE]] %1, %3
-    // CHECK: %[[BYTES:.+]] = mul nsw [[USIZE]] %1, 3
+    // CHECK: icmp eq [[USIZE]] %x.1, %y.1
+    // CHECK: %[[BYTES:.+]] = mul nsw [[USIZE]] {{%x.1|%y.1}}, 3
     // CHECK: tail call{{( noundef)?}} i32 @{{bcmp|memcmp}}(ptr
     // CHECK-SAME: , [[USIZE]]{{( noundef)?}} %[[BYTES]])
     x == y
 }
 
 // CHECK-LABEL: @eq_slice_of_i32(
-// CHECK-SAME: [[USIZE:i16|i32|i64]] noundef %1
-// CHECK-SAME: [[USIZE]] noundef %3
+// CHECK-SAME: [[USIZE:i16|i32|i64]] noundef %x.1
+// CHECK-SAME: [[USIZE]] noundef %y.1
 #[no_mangle]
 fn eq_slice_of_i32(x: &[i32], y: &[i32]) -> bool {
-    // CHECK: icmp eq [[USIZE]] %1, %3
-    // CHECK: %[[BYTES:.+]] = shl nsw [[USIZE]] %1, 2
+    // CHECK: icmp eq [[USIZE]] %x.1, %y.1
+    // CHECK: %[[BYTES:.+]] = shl nsw [[USIZE]] {{%x.1|%y.1}}, 2
     // CHECK: tail call{{( noundef)?}} i32 @{{bcmp|memcmp}}(ptr
     // CHECK-SAME: , [[USIZE]]{{( noundef)?}} %[[BYTES]])
     x == y
 }
 
 // CHECK-LABEL: @eq_slice_of_nonzero(
-// CHECK-SAME: [[USIZE:i16|i32|i64]] noundef %1
-// CHECK-SAME: [[USIZE]] noundef %3
+// CHECK-SAME: [[USIZE:i16|i32|i64]] noundef %x.1
+// CHECK-SAME: [[USIZE]] noundef %y.1
 #[no_mangle]
 fn eq_slice_of_nonzero(x: &[NonZero<i32>], y: &[NonZero<i32>]) -> bool {
-    // CHECK: icmp eq [[USIZE]] %1, %3
-    // CHECK: %[[BYTES:.+]] = shl nsw [[USIZE]] %1, 2
+    // CHECK: icmp eq [[USIZE]] %x.1, %y.1
+    // CHECK: %[[BYTES:.+]] = shl nsw [[USIZE]] {{%x.1|%y.1}}, 2
     // CHECK: tail call{{( noundef)?}} i32 @{{bcmp|memcmp}}(ptr
     // CHECK-SAME: , [[USIZE]]{{( noundef)?}} %[[BYTES]])
     x == y
 }
 
 // CHECK-LABEL: @eq_slice_of_option_of_nonzero(
-// CHECK-SAME: [[USIZE:i16|i32|i64]] noundef %1
-// CHECK-SAME: [[USIZE]] noundef %3
+// CHECK-SAME: [[USIZE:i16|i32|i64]] noundef %x.1
+// CHECK-SAME: [[USIZE]] noundef %y.1
 #[no_mangle]
 fn eq_slice_of_option_of_nonzero(x: &[Option<NonZero<i16>>], y: &[Option<NonZero<i16>>]) -> bool {
-    // CHECK: icmp eq [[USIZE]] %1, %3
-    // CHECK: %[[BYTES:.+]] = shl nsw [[USIZE]] %1, 1
+    // CHECK: icmp eq [[USIZE]] %x.1, %y.1
+    // CHECK: %[[BYTES:.+]] = shl nsw [[USIZE]] {{%x.1|%y.1}}, 1
     // CHECK: tail call{{( noundef)?}} i32 @{{bcmp|memcmp}}(ptr
     // CHECK-SAME: , [[USIZE]]{{( noundef)?}} %[[BYTES]])
     x == y
diff --git a/tests/codegen/transmute-optimized.rs b/tests/codegen/transmute-optimized.rs
index 1a5f53e625a..8e5bcb2340e 100644
--- a/tests/codegen/transmute-optimized.rs
+++ b/tests/codegen/transmute-optimized.rs
@@ -1,6 +1,5 @@
 //@ compile-flags: -O -Z merge-functions=disabled
 #![crate_type = "lib"]
-#![feature(generic_nonzero)]
 
 // This tests that LLVM can optimize based on the niches in the source or
 // destination types for transmutes.
diff --git a/tests/codegen/vec-len-invariant.rs b/tests/codegen/vec-len-invariant.rs
new file mode 100644
index 00000000000..780c86bab95
--- /dev/null
+++ b/tests/codegen/vec-len-invariant.rs
@@ -0,0 +1,16 @@
+//@ compile-flags: -O
+//@ only-64bit
+//
+// This test confirms that we do not reload the length of a Vec after growing it in push.
+
+#![crate_type = "lib"]
+
+// CHECK-LABEL: @should_load_once
+#[no_mangle]
+pub fn should_load_once(v: &mut Vec<u8>) {
+    // CHECK: load i64
+    // CHECK: call {{.*}}grow_one
+    // CHECK-NOT: load i64
+    // CHECK: add {{.*}}, 1
+    v.push(1);
+}