about summary refs log tree commit diff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/assembly/simd-bitmask.rs149
-rw-r--r--tests/assembly/simd-intrinsic-gather.rs44
-rw-r--r--tests/assembly/simd-intrinsic-mask-load.rs88
-rw-r--r--tests/assembly/simd-intrinsic-mask-reduce.rs60
-rw-r--r--tests/assembly/simd-intrinsic-mask-store.rs86
-rw-r--r--tests/assembly/simd-intrinsic-scatter.rs40
-rw-r--r--tests/assembly/simd-intrinsic-select.rs130
-rw-r--r--tests/rustdoc/line-breaks.rs41
-rw-r--r--tests/ui/attributes/unix_sigpipe/unix_sigpipe-ident-list.rs (renamed from tests/ui/attributes/unix_sigpipe/unix_sigpipe-list.rs)0
-rw-r--r--tests/ui/attributes/unix_sigpipe/unix_sigpipe-ident-list.stderr (renamed from tests/ui/attributes/unix_sigpipe/unix_sigpipe-list.stderr)2
-rw-r--r--tests/ui/attributes/unix_sigpipe/unix_sigpipe-str-list.rs4
-rw-r--r--tests/ui/attributes/unix_sigpipe/unix_sigpipe-str-list.stderr8
-rw-r--r--tests/ui/consts/auxiliary/const_mut_refs_crate.rs23
-rw-r--r--tests/ui/consts/const-mut-refs-crate.rs37
-rw-r--r--tests/ui/impl-trait/associated-type-cycle.rs14
-rw-r--r--tests/ui/impl-trait/associated-type-cycle.stderr12
-rw-r--r--tests/ui/statics/nested_struct.rs24
17 files changed, 746 insertions, 16 deletions
diff --git a/tests/assembly/simd-bitmask.rs b/tests/assembly/simd-bitmask.rs
new file mode 100644
index 00000000000..8264a706852
--- /dev/null
+++ b/tests/assembly/simd-bitmask.rs
@@ -0,0 +1,149 @@
+//@ revisions: x86 x86-avx2 x86-avx512 aarch64
+//@ [x86] compile-flags: --target=x86_64-unknown-linux-gnu -C llvm-args=-x86-asm-syntax=intel
+//@ [x86] needs-llvm-components: x86
+//@ [x86-avx2] compile-flags: --target=x86_64-unknown-linux-gnu -C llvm-args=-x86-asm-syntax=intel
+//@ [x86-avx2] compile-flags: -C target-feature=+avx2
+//@ [x86-avx2] needs-llvm-components: x86
+//@ [x86-avx512] compile-flags: --target=x86_64-unknown-linux-gnu -C llvm-args=-x86-asm-syntax=intel
+//@ [x86-avx512] compile-flags: -C target-feature=+avx512f,+avx512vl,+avx512bw,+avx512dq
+//@ [x86-avx512] needs-llvm-components: x86
+//@ [aarch64] compile-flags: --target=aarch64-unknown-linux-gnu
+//@ [aarch64] needs-llvm-components: aarch64
+//@ [aarch64] min-llvm-version: 18.0
+//@ assembly-output: emit-asm
+//@ compile-flags: --crate-type=lib -O
+
+#![feature(no_core, lang_items, repr_simd, intrinsics)]
+#![no_core]
+#![allow(non_camel_case_types)]
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {}
+
+#[repr(simd)]
+pub struct m8x16([i8; 16]);
+
+#[repr(simd)]
+pub struct m8x64([i8; 64]);
+
+#[repr(simd)]
+pub struct m32x4([i32; 4]);
+
+#[repr(simd)]
+pub struct m64x2([i64; 2]);
+
+#[repr(simd)]
+pub struct m64x4([i64; 4]);
+
+extern "rust-intrinsic" {
+    fn simd_bitmask<V, B>(mask: V) -> B;
+}
+
+// CHECK-LABEL: bitmask_m8x16
+#[no_mangle]
+pub unsafe extern "C" fn bitmask_m8x16(mask: m8x16) -> u16 {
+    // The simd_bitmask intrinsic already uses the most significant bit, so no shift is necessary.
+    // Note that x86 has no byte shift, llvm uses a word shift to move the least significant bit
+    // of each byte into the right position.
+    //
+    // x86-NOT: psllw
+    // x86: movmskb eax, xmm0
+    //
+    // x86-avx2-NOT: vpsllw
+    // x86-avx2: vpmovmskb eax, xmm0
+    //
+    // x86-avx512-NOT: vpsllw xmm0
+    // x86-avx512: vpmovmskb eax, xmm0
+    //
+    // aarch64: adrp
+    // aarch64-NEXT: cmlt
+    // aarch64-NEXT: ldr
+    // aarch64-NEXT: and
+    // aarch64-NEXT: ext
+    // aarch64-NEXT: zip1
+    // aarch64-NEXT: addv
+    // aarch64-NEXT: fmov
+    simd_bitmask(mask)
+}
+
+// CHECK-LABEL: bitmask_m8x64
+#[no_mangle]
+pub unsafe extern "C" fn bitmask_m8x64(mask: m8x64) -> u64 {
+    // The simd_bitmask intrinsic already uses the most significant bit, so no shift is necessary.
+    // Note that x86 has no byte shift, llvm uses a word shift to move the least significant bit
+    // of each byte into the right position.
+    //
+    // The parameter is a 512 bit vector which in the C abi is only valid for avx512 targets.
+    //
+    // x86-avx512-NOT: vpsllw
+    // x86-avx512: vpmovb2m k0, zmm0
+    // x86-avx512: kmovq rax, k0
+    simd_bitmask(mask)
+}
+
+// CHECK-LABEL: bitmask_m32x4
+#[no_mangle]
+pub unsafe extern "C" fn bitmask_m32x4(mask: m32x4) -> u8 {
+    // The simd_bitmask intrinsic already uses the most significant bit, so no shift is necessary.
+    //
+    // x86-NOT: psllq
+    // x86: movmskps eax, xmm0
+    //
+    // x86-avx2-NOT: vpsllq
+    // x86-avx2: vmovmskps eax, xmm0
+    //
+    // x86-avx512-NOT: vpsllq
+    // x86-avx512: vmovmskps eax, xmm0
+    //
+    // aarch64: adrp
+    // aarch64-NEXT: cmlt
+    // aarch64-NEXT: ldr
+    // aarch64-NEXT: and
+    // aarch64-NEXT: addv
+    // aarch64-NEXT: fmov
+    // aarch64-NEXT: and
+    simd_bitmask(mask)
+}
+
+// CHECK-LABEL: bitmask_m64x2
+#[no_mangle]
+pub unsafe extern "C" fn bitmask_m64x2(mask: m64x2) -> u8 {
+    // The simd_bitmask intrinsic already uses the most significant bit, so no shift is necessary.
+    //
+    // x86-NOT: psllq
+    // x86: movmskpd eax, xmm0
+    //
+    // x86-avx2-NOT: vpsllq
+    // x86-avx2: vmovmskpd eax, xmm0
+    //
+    // x86-avx512-NOT: vpsllq
+    // x86-avx512: vmovmskpd eax, xmm0
+    //
+    // aarch64: adrp
+    // aarch64-NEXT: cmlt
+    // aarch64-NEXT: ldr
+    // aarch64-NEXT: and
+    // aarch64-NEXT: addp
+    // aarch64-NEXT: fmov
+    // aarch64-NEXT: and
+    simd_bitmask(mask)
+}
+
+// CHECK-LABEL: bitmask_m64x4
+#[no_mangle]
+pub unsafe extern "C" fn bitmask_m64x4(mask: m64x4) -> u8 {
+    // The simd_bitmask intrinsic already uses the most significant bit, so no shift is necessary.
+    //
+    // The parameter is a 256 bit vector which in the C abi is only valid for avx/avx512 targets.
+    //
+    // x86-avx2-NOT: vpsllq
+    // x86-avx2: vmovmskpd eax, ymm0
+    //
+    // x86-avx512-NOT: vpsllq
+    // x86-avx512: vmovmskpd eax, ymm0
+    simd_bitmask(mask)
+}
diff --git a/tests/assembly/simd-intrinsic-gather.rs b/tests/assembly/simd-intrinsic-gather.rs
new file mode 100644
index 00000000000..ef6b597c25f
--- /dev/null
+++ b/tests/assembly/simd-intrinsic-gather.rs
@@ -0,0 +1,44 @@
+//@ revisions: x86-avx512
+//@ [x86-avx512] compile-flags: --target=x86_64-unknown-linux-gnu -C llvm-args=-x86-asm-syntax=intel
+//@ [x86-avx512] compile-flags: -C target-feature=+avx512f,+avx512vl,+avx512bw,+avx512dq
+//@ [x86-avx512] needs-llvm-components: x86
+//@ [x86-avx512] min-llvm-version: 18.0
+//@ assembly-output: emit-asm
+//@ compile-flags: --crate-type=lib -O
+
+#![feature(no_core, lang_items, repr_simd, intrinsics)]
+#![no_core]
+#![allow(non_camel_case_types)]
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {}
+
+#[repr(simd)]
+pub struct f64x4([f64; 4]);
+
+#[repr(simd)]
+pub struct m64x4([i64; 4]);
+
+#[repr(simd)]
+pub struct pf64x4([*const f64; 4]);
+
+extern "rust-intrinsic" {
+    fn simd_gather<V, M, P>(values: V, mask: M, pointer: P) -> V;
+}
+
+// CHECK-LABEL: gather_f64x4
+#[no_mangle]
+pub unsafe extern "C" fn gather_f64x4(mask: m64x4, ptrs: pf64x4) -> f64x4 {
+    // FIXME: This should also get checked to generate a gather instruction for avx2.
+    // Currently llvm scalarizes this code, see https://github.com/llvm/llvm-project/issues/59789
+    //
+    // x86-avx512: vpsllq ymm0, ymm0, 63
+    // x86-avx512-NEXT: vpmovq2m k1, ymm0
+    // x86-avx512-NEXT: vpxor xmm0, xmm0, xmm0
+    // x86-avx512-NEXT: vgatherqpd ymm0 {k1}, ymmword ptr [1*ymm1]
+    simd_gather(f64x4([0_f64, 0_f64, 0_f64, 0_f64]), ptrs, mask)
+}
diff --git a/tests/assembly/simd-intrinsic-mask-load.rs b/tests/assembly/simd-intrinsic-mask-load.rs
new file mode 100644
index 00000000000..49d231c45f8
--- /dev/null
+++ b/tests/assembly/simd-intrinsic-mask-load.rs
@@ -0,0 +1,88 @@
+//@ revisions: x86-avx2 x86-avx512
+//@ [x86-avx2] compile-flags: --target=x86_64-unknown-linux-gnu -C llvm-args=-x86-asm-syntax=intel
+//@ [x86-avx2] compile-flags: -C target-feature=+avx2
+//@ [x86-avx2] needs-llvm-components: x86
+//@ [x86-avx512] compile-flags: --target=x86_64-unknown-linux-gnu -C llvm-args=-x86-asm-syntax=intel
+//@ [x86-avx512] compile-flags: -C target-feature=+avx512f,+avx512vl,+avx512bw,+avx512dq
+//@ [x86-avx512] needs-llvm-components: x86
+//@ assembly-output: emit-asm
+//@ compile-flags: --crate-type=lib -O
+
+#![feature(no_core, lang_items, repr_simd, intrinsics)]
+#![no_core]
+#![allow(non_camel_case_types)]
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {}
+
+#[repr(simd)]
+pub struct i8x16([i8; 16]);
+
+#[repr(simd)]
+pub struct m8x16([i8; 16]);
+
+#[repr(simd)]
+pub struct f32x8([f32; 8]);
+
+#[repr(simd)]
+pub struct m32x8([i32; 8]);
+
+#[repr(simd)]
+pub struct f64x4([f64; 4]);
+
+#[repr(simd)]
+pub struct m64x4([i64; 4]);
+
+extern "rust-intrinsic" {
+    fn simd_masked_load<M, P, T>(mask: M, pointer: P, values: T) -> T;
+}
+
+// CHECK-LABEL: load_i8x16
+#[no_mangle]
+pub unsafe extern "C" fn load_i8x16(mask: m8x16, pointer: *const i8) -> i8x16 {
+    // Since avx2 supports no masked loads for bytes, the code tests each individual bit
+    // and jumps to code that inserts individual bytes.
+    // x86-avx2: vpsllw xmm0, xmm0, 7
+    // x86-avx2-NEXT: vpmovmskb eax, xmm0
+    // x86-avx2-NEXT: vpxor xmm0, xmm0
+    // x86-avx2-NEXT: test al, 1
+    // x86-avx2-NEXT: jne
+    // x86-avx2-NEXT: test al, 2
+    // x86-avx2-NEXT: jne
+    // x86-avx2-DAG: movzx [[REG:[a-z]+]], byte ptr [rdi]
+    // x86-avx2-NEXT: vmovd xmm0, [[REG]]
+    // x86-avx2-DAG: vpinsrb xmm0, xmm0, byte ptr [rdi + 1], 1
+    //
+    // x86-avx512: vpsllw xmm0, xmm0, 7
+    // x86-avx512-NEXT: vpmovb2m k1, xmm0
+    // x86-avx512-NEXT: vmovdqu8 xmm0 {k1} {z}, xmmword ptr [rdi]
+    simd_masked_load(mask, pointer, i8x16([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
+}
+
+// CHECK-LABEL: load_f32x8
+#[no_mangle]
+pub unsafe extern "C" fn load_f32x8(mask: m32x8, pointer: *const f32) -> f32x8 {
+    // x86-avx2: vpslld ymm0, ymm0, 31
+    // x86-avx2-NEXT: vmaskmovps ymm0, ymm0, ymmword ptr [rdi]
+    //
+    // x86-avx512: vpslld ymm0, ymm0, 31
+    // x86-avx512-NEXT: vpmovd2m k1, ymm0
+    // x86-avx512-NEXT: vmovups ymm0 {k1} {z}, ymmword ptr [rdi]
+    simd_masked_load(mask, pointer, f32x8([0_f32, 0_f32, 0_f32, 0_f32, 0_f32, 0_f32, 0_f32, 0_f32]))
+}
+
+// CHECK-LABEL: load_f64x4
+#[no_mangle]
+pub unsafe extern "C" fn load_f64x4(mask: m64x4, pointer: *const f64) -> f64x4 {
+    // x86-avx2: vpsllq ymm0, ymm0, 63
+    // x86-avx2-NEXT: vmaskmovpd ymm0, ymm0, ymmword ptr [rdi]
+    //
+    // x86-avx512: vpsllq ymm0, ymm0, 63
+    // x86-avx512-NEXT: vpmovq2m k1, ymm0
+    // x86-avx512-NEXT: vmovupd ymm0 {k1} {z}, ymmword ptr [rdi]
+    simd_masked_load(mask, pointer, f64x4([0_f64, 0_f64, 0_f64, 0_f64]))
+}
diff --git a/tests/assembly/simd-intrinsic-mask-reduce.rs b/tests/assembly/simd-intrinsic-mask-reduce.rs
new file mode 100644
index 00000000000..763401755fa
--- /dev/null
+++ b/tests/assembly/simd-intrinsic-mask-reduce.rs
@@ -0,0 +1,60 @@
+// verify that simd mask reductions do not introduce additional bit shift operations
+//@ revisions: x86 aarch64
+//@ [x86] compile-flags: --target=x86_64-unknown-linux-gnu -C llvm-args=-x86-asm-syntax=intel
+//@ [x86] needs-llvm-components: x86
+//@ [aarch64] compile-flags: --target=aarch64-unknown-linux-gnu
+//@ [aarch64] needs-llvm-components: aarch64
+//@ [aarch64] min-llvm-version: 18.0
+//@ assembly-output: emit-asm
+//@ compile-flags: --crate-type=lib -O
+
+#![feature(no_core, lang_items, repr_simd, intrinsics)]
+#![no_core]
+#![allow(non_camel_case_types)]
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {}
+
+#[repr(simd)]
+pub struct mask8x16([i8; 16]);
+
+extern "rust-intrinsic" {
+    fn simd_reduce_all<T>(x: T) -> bool;
+    fn simd_reduce_any<T>(x: T) -> bool;
+}
+
+// CHECK-LABEL: mask_reduce_all:
+#[no_mangle]
+pub unsafe extern "C" fn mask_reduce_all(m: mask8x16) -> bool {
+    // x86: psllw xmm0, 7
+    // x86-NEXT: pmovmskb eax, xmm0
+    // x86-NEXT: {{cmp ax, -1|xor eax, 65535}}
+    // x86-NEXT: sete al
+    //
+    // aarch64: shl v0.16b, v0.16b, #7
+    // aarch64-NEXT: cmlt v0.16b, v0.16b, #0
+    // aarch64-NEXT: uminv b0, v0.16b
+    // aarch64-NEXT: fmov [[REG:[a-z0-9]+]], s0
+    // aarch64-NEXT: and w0, [[REG]], #0x1
+    simd_reduce_all(m)
+}
+
+// CHECK-LABEL: mask_reduce_any:
+#[no_mangle]
+pub unsafe extern "C" fn mask_reduce_any(m: mask8x16) -> bool {
+    // x86: psllw xmm0, 7
+    // x86-NEXT: pmovmskb
+    // x86-NEXT: test eax, eax
+    // x86-NEXT: setne al
+    //
+    // aarch64: shl v0.16b, v0.16b, #7
+    // aarch64-NEXT: cmlt v0.16b, v0.16b, #0
+    // aarch64-NEXT: umaxv b0, v0.16b
+    // aarch64-NEXT: fmov [[REG:[a-z0-9]+]], s0
+    // aarch64-NEXT: and w0, [[REG]], #0x1
+    simd_reduce_any(m)
+}
diff --git a/tests/assembly/simd-intrinsic-mask-store.rs b/tests/assembly/simd-intrinsic-mask-store.rs
new file mode 100644
index 00000000000..a6611e1c23d
--- /dev/null
+++ b/tests/assembly/simd-intrinsic-mask-store.rs
@@ -0,0 +1,86 @@
+//@ revisions: x86-avx2 x86-avx512
+//@ [x86-avx2] compile-flags: --target=x86_64-unknown-linux-gnu -C llvm-args=-x86-asm-syntax=intel
+//@ [x86-avx2] compile-flags: -C target-feature=+avx2
+//@ [x86-avx2] needs-llvm-components: x86
+//@ [x86-avx512] compile-flags: --target=x86_64-unknown-linux-gnu -C llvm-args=-x86-asm-syntax=intel
+//@ [x86-avx512] compile-flags: -C target-feature=+avx512f,+avx512vl,+avx512bw,+avx512dq
+//@ [x86-avx512] needs-llvm-components: x86
+//@ assembly-output: emit-asm
+//@ compile-flags: --crate-type=lib -O
+
+#![feature(no_core, lang_items, repr_simd, intrinsics)]
+#![no_core]
+#![allow(non_camel_case_types)]
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {}
+
+#[repr(simd)]
+pub struct i8x16([i8; 16]);
+
+#[repr(simd)]
+pub struct m8x16([i8; 16]);
+
+#[repr(simd)]
+pub struct f32x8([f32; 8]);
+
+#[repr(simd)]
+pub struct m32x8([i32; 8]);
+
+#[repr(simd)]
+pub struct f64x4([f64; 4]);
+
+#[repr(simd)]
+pub struct m64x4([i64; 4]);
+
+extern "rust-intrinsic" {
+    fn simd_masked_store<M, P, T>(mask: M, pointer: P, values: T);
+}
+
+// CHECK-LABEL: store_i8x16
+#[no_mangle]
+pub unsafe extern "C" fn store_i8x16(mask: m8x16, pointer: *mut i8, value: i8x16) {
+    // Since avx2 supports no masked stores for bytes, the code tests each individual bit
+    // and jumps to code that extracts individual bytes to memory.
+    // x86-avx2: vpsllw xmm0, xmm0, 7
+    // x86-avx2-NEXT: vpmovmskb eax, xmm0
+    // x86-avx2-NEXT: test al, 1
+    // x86-avx2-NEXT: jne
+    // x86-avx2-NEXT: test al, 2
+    // x86-avx2-NEXT: jne
+    // x86-avx2-DAG: vpextrb byte ptr [rdi + 1], xmm1, 1
+    // x86-avx2-DAG: vpextrb byte ptr [rdi], xmm1, 0
+    //
+    // x86-avx512: vpsllw xmm0, xmm0, 7
+    // x86-avx512-NEXT: vpmovb2m k1, xmm0
+    // x86-avx512-NEXT: vmovdqu8 xmmword ptr [rdi] {k1}, xmm1
+    simd_masked_store(mask, pointer, value)
+}
+
+// CHECK-LABEL: store_f32x8
+#[no_mangle]
+pub unsafe extern "C" fn store_f32x8(mask: m32x8, pointer: *mut f32, value: f32x8) {
+    // x86-avx2: vpslld ymm0, ymm0, 31
+    // x86-avx2-NEXT: vmaskmovps ymmword ptr [rdi], ymm0, ymm1
+    //
+    // x86-avx512: vpslld ymm0, ymm0, 31
+    // x86-avx512-NEXT: vpmovd2m k1, ymm0
+    // x86-avx512-NEXT: vmovups ymmword ptr [rdi] {k1}, ymm1
+    simd_masked_store(mask, pointer, value)
+}
+
+// CHECK-LABEL: store_f64x4
+#[no_mangle]
+pub unsafe extern "C" fn store_f64x4(mask: m64x4, pointer: *mut f64, value: f64x4) {
+    // x86-avx2: vpsllq ymm0, ymm0, 63
+    // x86-avx2-NEXT: vmaskmovpd ymmword ptr [rdi], ymm0, ymm1
+    //
+    // x86-avx512: vpsllq ymm0, ymm0, 63
+    // x86-avx512-NEXT: vpmovq2m k1, ymm0
+    // x86-avx512-NEXT: vmovupd ymmword ptr [rdi] {k1}, ymm1
+    simd_masked_store(mask, pointer, value)
+}
diff --git a/tests/assembly/simd-intrinsic-scatter.rs b/tests/assembly/simd-intrinsic-scatter.rs
new file mode 100644
index 00000000000..6ffefb0801a
--- /dev/null
+++ b/tests/assembly/simd-intrinsic-scatter.rs
@@ -0,0 +1,40 @@
+//@ revisions: x86-avx512
+//@ [x86-avx512] compile-flags: --target=x86_64-unknown-linux-gnu -C llvm-args=-x86-asm-syntax=intel
+//@ [x86-avx512] compile-flags: -C target-feature=+avx512f,+avx512vl,+avx512bw,+avx512dq
+//@ [x86-avx512] needs-llvm-components: x86
+//@ [x86-avx512] min-llvm-version: 18.0
+//@ assembly-output: emit-asm
+//@ compile-flags: --crate-type=lib -O
+
+#![feature(no_core, lang_items, repr_simd, intrinsics)]
+#![no_core]
+#![allow(non_camel_case_types)]
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {}
+
+#[repr(simd)]
+pub struct f64x4([f64; 4]);
+
+#[repr(simd)]
+pub struct m64x4([i64; 4]);
+
+#[repr(simd)]
+pub struct pf64x4([*mut f64; 4]);
+
+extern "rust-intrinsic" {
+    fn simd_scatter<V, P, M>(values: V, pointer: P, mask: M);
+}
+
+// CHECK-LABEL: scatter_f64x4
+#[no_mangle]
+pub unsafe extern "C" fn scatter_f64x4(values: f64x4, ptrs: pf64x4, mask: m64x4) {
+    // x86-avx512: vpsllq ymm2, ymm2, 63
+    // x86-avx512-NEXT: vpmovq2m k1, ymm2
+    // x86-avx512-NEXT: vscatterqpd ymmword ptr [1*ymm1] {k1}, ymm0
+    simd_scatter(values, ptrs, mask)
+}
diff --git a/tests/assembly/simd-intrinsic-select.rs b/tests/assembly/simd-intrinsic-select.rs
new file mode 100644
index 00000000000..3f36402e3d0
--- /dev/null
+++ b/tests/assembly/simd-intrinsic-select.rs
@@ -0,0 +1,130 @@
+//@ revisions: x86-avx2 x86-avx512 aarch64
+//@ [x86-avx2] compile-flags: --target=x86_64-unknown-linux-gnu -C llvm-args=-x86-asm-syntax=intel
+//@ [x86-avx2] compile-flags: -C target-feature=+avx2
+//@ [x86-avx2] needs-llvm-components: x86
+//@ [x86-avx512] compile-flags: --target=x86_64-unknown-linux-gnu -C llvm-args=-x86-asm-syntax=intel
+//@ [x86-avx512] compile-flags: -C target-feature=+avx512f,+avx512vl,+avx512bw,+avx512dq
+//@ [x86-avx512] needs-llvm-components: x86
+//@ [aarch64] compile-flags: --target=aarch64-unknown-linux-gnu
+//@ [aarch64] needs-llvm-components: aarch64
+//@ [aarch64] min-llvm-version: 18.0
+//@ assembly-output: emit-asm
+//@ compile-flags: --crate-type=lib -O
+
+#![feature(no_core, lang_items, repr_simd, intrinsics)]
+#![no_core]
+#![allow(non_camel_case_types)]
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {}
+
+#[repr(simd)]
+pub struct i8x16([i8; 16]);
+
+#[repr(simd)]
+pub struct m8x16([i8; 16]);
+
+#[repr(simd)]
+pub struct f32x4([f32; 4]);
+
+#[repr(simd)]
+pub struct m32x4([i32; 4]);
+
+#[repr(simd)]
+pub struct f64x2([f64; 2]);
+
+#[repr(simd)]
+pub struct m64x2([i64; 2]);
+
+#[repr(simd)]
+pub struct f64x4([f64; 4]);
+
+#[repr(simd)]
+pub struct m64x4([i64; 4]);
+
+#[repr(simd)]
+pub struct f64x8([f64; 8]);
+
+#[repr(simd)]
+pub struct m64x8([i64; 8]);
+
+extern "rust-intrinsic" {
+    fn simd_select<M, V>(mask: M, a: V, b: V) -> V;
+}
+
+// CHECK-LABEL: select_i8x16
+#[no_mangle]
+pub unsafe extern "C" fn select_i8x16(mask: m8x16, a: i8x16, b: i8x16) -> i8x16 {
+    // x86-avx2: vpsllw xmm0, xmm0, 7
+    // x86-avx2-NEXT: vpblendvb xmm0, xmm2, xmm1, xmm0
+    //
+    // x86-avx512: vpsllw xmm0, xmm0, 7
+    // x86-avx512-NEXT: vpmovb2m k1, xmm0
+    // x86-avx512-NEXT: vpblendmb xmm0 {k1}, xmm2, xmm1
+    //
+    // aarch64: shl v0.16b, v0.16b, #7
+    // aarch64-NEXT: cmlt v0.16b, v0.16b, #0
+    // aarch64-NEXT: bsl v0.16b, v1.16b, v2.16b
+    simd_select(mask, a, b)
+}
+
+// CHECK-LABEL: select_f32x4
+#[no_mangle]
+pub unsafe extern "C" fn select_f32x4(mask: m32x4, a: f32x4, b: f32x4) -> f32x4 {
+    // x86-avx2: vpslld xmm0, xmm0, 31
+    // x86-avx2-NEXT: vblendvps xmm0, xmm2, xmm1, xmm0
+    //
+    // x86-avx512: vpslld xmm0, xmm0, 31
+    // x86-avx512-NEXT: vpmovd2m k1, xmm0
+    // x86-avx512-NEXT: vblendmps xmm0 {k1}, xmm2, xmm1
+    //
+    // aarch64: shl v0.4s, v0.4s, #31
+    // aarch64-NEXT: cmlt v0.4s, v0.4s, #0
+    // aarch64-NEXT: bsl v0.16b, v1.16b, v2.16b
+    simd_select(mask, a, b)
+}
+
+// CHECK-LABEL: select_f64x2
+#[no_mangle]
+pub unsafe extern "C" fn select_f64x2(mask: m64x2, a: f64x2, b: f64x2) -> f64x2 {
+    // x86-avx2: vpsllq xmm0, xmm0, 63
+    // x86-avx2-NEXT: vblendvpd xmm0, xmm2, xmm1, xmm0
+    //
+    // x86-avx512: vpsllq xmm0, xmm0, 63
+    // x86-avx512-NEXT: vpmovq2m k1, xmm0
+    // x86-avx512-NEXT: vblendmpd xmm0 {k1}, xmm2, xmm1
+    //
+    // aarch64: shl v0.2d, v0.2d, #63
+    // aarch64-NEXT: cmlt v0.2d, v0.2d, #0
+    // aarch64-NEXT: bsl v0.16b, v1.16b, v2.16b
+    simd_select(mask, a, b)
+}
+
+// CHECK-LABEL: select_f64x4
+#[no_mangle]
+pub unsafe extern "C" fn select_f64x4(mask: m64x4, a: f64x4, b: f64x4) -> f64x4 {
+    // The parameter is a 256 bit vector which in the C abi is only valid for avx targets.
+    //
+    // x86-avx2: vpsllq ymm0, ymm0, 63
+    // x86-avx2-NEXT: vblendvpd ymm0, ymm2, ymm1, ymm0
+    //
+    // x86-avx512: vpsllq ymm0, ymm0, 63
+    // x86-avx512-NEXT: vpmovq2m k1, ymm0
+    // x86-avx512-NEXT: vblendmpd ymm0 {k1}, ymm2, ymm1
+    simd_select(mask, a, b)
+}
+
+// CHECK-LABEL: select_f64x8
+#[no_mangle]
+pub unsafe extern "C" fn select_f64x8(mask: m64x8, a: f64x8, b: f64x8) -> f64x8 {
+    // The parameter is a 256 bit vector which in the C abi is only valid for avx512 targets.
+    //
+    // x86-avx512: vpsllq zmm0, zmm0, 63
+    // x86-avx512-NEXT: vpmovq2m k1, zmm0
+    // x86-avx512-NEXT: vblendmpd zmm0 {k1}, zmm2, zmm1
+    simd_select(mask, a, b)
+}
diff --git a/tests/rustdoc/line-breaks.rs b/tests/rustdoc/line-breaks.rs
index 29c16fcd4f8..21aa3a03ce4 100644
--- a/tests/rustdoc/line-breaks.rs
+++ b/tests/rustdoc/line-breaks.rs
@@ -1,26 +1,37 @@
 #![crate_name = "foo"]
 
-use std::ops::Add;
 use std::fmt::Display;
+use std::ops::Add;
 
-//@count foo/fn.function_with_a_really_long_name.html //pre/br 2
-pub fn function_with_a_really_long_name(parameter_one: i32,
-                                        parameter_two: i32)
-                                        -> Option<i32> {
+// @matches foo/fn.function_with_a_really_long_name.html '//*[@class="rust item-decl"]//code' "\
+//     function_with_a_really_long_name\(\n\
+//    \    parameter_one: i32,\n\
+//    \    parameter_two: i32\n\
+//    \) -> Option<i32>$"
+pub fn function_with_a_really_long_name(parameter_one: i32, parameter_two: i32) -> Option<i32> {
     Some(parameter_one + parameter_two)
 }
 
-//@count foo/fn.short_name.html //pre/br 0
-pub fn short_name(param: i32) -> i32 { param + 1 }
+// @matches foo/fn.short_name.html '//*[@class="rust item-decl"]//code' \
+//     "short_name\(param: i32\) -> i32$"
+pub fn short_name(param: i32) -> i32 {
+    param + 1
+}
 
-//@count foo/fn.where_clause.html //pre/br 4
-pub fn where_clause<T, U>(param_one: T,
-                          param_two: U)
-    where T: Add<U> + Display + Copy,
-          U: Add<T> + Display + Copy,
-          T::Output: Display + Add<U::Output> + Copy,
-          <T::Output as Add<U::Output>>::Output: Display,
-          U::Output: Display + Copy
+// @matches foo/fn.where_clause.html '//*[@class="rust item-decl"]//code' "\
+//     where_clause<T, U>\(param_one: T, param_two: U\)where\n\
+//    \    T: Add<U> \+ Display \+ Copy,\n\
+//    \    U: Add<T> \+ Display \+ Copy,\n\
+//    \    T::Output: Display \+ Add<U::Output> \+ Copy,\n\
+//    \    <T::Output as Add<U::Output>>::Output: Display,\n\
+//    \    U::Output: Display \+ Copy,$"
+pub fn where_clause<T, U>(param_one: T, param_two: U)
+where
+    T: Add<U> + Display + Copy,
+    U: Add<T> + Display + Copy,
+    T::Output: Display + Add<U::Output> + Copy,
+    <T::Output as Add<U::Output>>::Output: Display,
+    U::Output: Display + Copy,
 {
     let x = param_one + param_two;
     println!("{} + {} = {}", param_one, param_two, x);
diff --git a/tests/ui/attributes/unix_sigpipe/unix_sigpipe-list.rs b/tests/ui/attributes/unix_sigpipe/unix_sigpipe-ident-list.rs
index 462ae24a884..462ae24a884 100644
--- a/tests/ui/attributes/unix_sigpipe/unix_sigpipe-list.rs
+++ b/tests/ui/attributes/unix_sigpipe/unix_sigpipe-ident-list.rs
diff --git a/tests/ui/attributes/unix_sigpipe/unix_sigpipe-list.stderr b/tests/ui/attributes/unix_sigpipe/unix_sigpipe-ident-list.stderr
index 66902f3ca9a..a020f21e6ca 100644
--- a/tests/ui/attributes/unix_sigpipe/unix_sigpipe-list.stderr
+++ b/tests/ui/attributes/unix_sigpipe/unix_sigpipe-ident-list.stderr
@@ -1,5 +1,5 @@
 error: malformed `unix_sigpipe` attribute input
-  --> $DIR/unix_sigpipe-list.rs:3:1
+  --> $DIR/unix_sigpipe-ident-list.rs:3:1
    |
 LL | #[unix_sigpipe(sig_dfl)]
    | ^^^^^^^^^^^^^^^^^^^^^^^^ help: must be of the form: `#[unix_sigpipe = "inherit|sig_ign|sig_dfl"]`
diff --git a/tests/ui/attributes/unix_sigpipe/unix_sigpipe-str-list.rs b/tests/ui/attributes/unix_sigpipe/unix_sigpipe-str-list.rs
new file mode 100644
index 00000000000..22326835623
--- /dev/null
+++ b/tests/ui/attributes/unix_sigpipe/unix_sigpipe-str-list.rs
@@ -0,0 +1,4 @@
+#![feature(unix_sigpipe)]
+
+#[unix_sigpipe("sig_dfl")] //~ error: malformed `unix_sigpipe` attribute input
+fn main() {}
diff --git a/tests/ui/attributes/unix_sigpipe/unix_sigpipe-str-list.stderr b/tests/ui/attributes/unix_sigpipe/unix_sigpipe-str-list.stderr
new file mode 100644
index 00000000000..b62c086e360
--- /dev/null
+++ b/tests/ui/attributes/unix_sigpipe/unix_sigpipe-str-list.stderr
@@ -0,0 +1,8 @@
+error: malformed `unix_sigpipe` attribute input
+  --> $DIR/unix_sigpipe-str-list.rs:3:1
+   |
+LL | #[unix_sigpipe("sig_dfl")]
+   | ^^^^^^^^^^^^^^^^^^^^^^^^^^ help: must be of the form: `#[unix_sigpipe = "inherit|sig_ign|sig_dfl"]`
+
+error: aborting due to 1 previous error
+
diff --git a/tests/ui/consts/auxiliary/const_mut_refs_crate.rs b/tests/ui/consts/auxiliary/const_mut_refs_crate.rs
new file mode 100644
index 00000000000..8e78748e896
--- /dev/null
+++ b/tests/ui/consts/auxiliary/const_mut_refs_crate.rs
@@ -0,0 +1,23 @@
+// This is a support file for ../const-mut-refs-crate.rs
+
+// This is to test that static inners from an external
+// crate like this one, still preserves the alloc.
+// That is, the address from the standpoint of rustc+llvm
+// is the same.
+// The need for this test originated from the GH issue
+// https://github.com/rust-lang/rust/issues/57349
+
+// See also ../const-mut-refs-crate.rs for more details
+// about this test.
+
+#![feature(const_mut_refs)]
+
+// if we used immutable references here, then promotion would
+// turn the `&42` into a promoted, which gets duplicated arbitrarily.
+pub static mut FOO: &'static mut i32 = &mut 42;
+pub static mut BAR: &'static mut i32 = unsafe { FOO };
+
+pub mod inner {
+    pub static INNER_MOD_FOO: &'static i32 = &43;
+    pub static INNER_MOD_BAR: &'static i32 = INNER_MOD_FOO;
+}
diff --git a/tests/ui/consts/const-mut-refs-crate.rs b/tests/ui/consts/const-mut-refs-crate.rs
new file mode 100644
index 00000000000..dcc8ff370e1
--- /dev/null
+++ b/tests/ui/consts/const-mut-refs-crate.rs
@@ -0,0 +1,37 @@
+//@ run-pass
+//@ aux-build:const_mut_refs_crate.rs
+
+#![feature(const_mut_refs)]
+
+//! Regression test for https://github.com/rust-lang/rust/issues/79738
+//! Show how we are not duplicating allocations anymore. Statics that
+//! copy their value from another static used to also duplicate
+//! memory behind references.
+
+extern crate const_mut_refs_crate as other;
+
+use other::{
+    inner::{INNER_MOD_BAR, INNER_MOD_FOO},
+    BAR, FOO,
+};
+
+pub static LOCAL_FOO: &'static i32 = &41;
+pub static LOCAL_BAR: &'static i32 = LOCAL_FOO;
+pub static mut COPY_OF_REMOTE_FOO: &'static mut i32 = unsafe { FOO };
+
+static DOUBLE_REF: &&i32 = &&99;
+static ONE_STEP_ABOVE: &i32 = *DOUBLE_REF;
+static mut DOUBLE_REF_MUT: &mut &mut i32 = &mut &mut 99;
+static mut ONE_STEP_ABOVE_MUT: &mut i32 = unsafe { *DOUBLE_REF_MUT };
+
+pub fn main() {
+    unsafe {
+        assert_eq!(FOO as *const i32, BAR as *const i32);
+        assert_eq!(INNER_MOD_FOO as *const i32, INNER_MOD_BAR as *const i32);
+        assert_eq!(LOCAL_FOO as *const i32, LOCAL_BAR as *const i32);
+        assert_eq!(*DOUBLE_REF as *const i32, ONE_STEP_ABOVE as *const i32);
+        assert_eq!(*DOUBLE_REF_MUT as *mut i32, ONE_STEP_ABOVE_MUT as *mut i32);
+
+        assert_eq!(FOO as *const i32, COPY_OF_REMOTE_FOO as *const i32);
+    }
+}
diff --git a/tests/ui/impl-trait/associated-type-cycle.rs b/tests/ui/impl-trait/associated-type-cycle.rs
new file mode 100644
index 00000000000..4c1fc1a0fa6
--- /dev/null
+++ b/tests/ui/impl-trait/associated-type-cycle.rs
@@ -0,0 +1,14 @@
+trait Foo {
+    type Bar;
+    fn foo(self) -> Self::Bar;
+}
+
+impl Foo for Box<dyn Foo> {
+    //~^ ERROR: the value of the associated type `Bar` in `Foo` must be specified
+    type Bar = <Self as Foo>::Bar;
+    fn foo(self) -> <Self as Foo>::Bar {
+        (*self).foo()
+    }
+}
+
+fn main() {}
diff --git a/tests/ui/impl-trait/associated-type-cycle.stderr b/tests/ui/impl-trait/associated-type-cycle.stderr
new file mode 100644
index 00000000000..7eef8d1e338
--- /dev/null
+++ b/tests/ui/impl-trait/associated-type-cycle.stderr
@@ -0,0 +1,12 @@
+error[E0191]: the value of the associated type `Bar` in `Foo` must be specified
+  --> $DIR/associated-type-cycle.rs:6:22
+   |
+LL |     type Bar;
+   |     -------- `Bar` defined here
+...
+LL | impl Foo for Box<dyn Foo> {
+   |                      ^^^ help: specify the associated type: `Foo<Bar = Type>`
+
+error: aborting due to 1 previous error
+
+For more information about this error, try `rustc --explain E0191`.
diff --git a/tests/ui/statics/nested_struct.rs b/tests/ui/statics/nested_struct.rs
new file mode 100644
index 00000000000..f5819f50789
--- /dev/null
+++ b/tests/ui/statics/nested_struct.rs
@@ -0,0 +1,24 @@
+//@ check-pass
+/// oli-obk added this test after messing up the interner logic
+/// around mutability of nested allocations. This was not caught
+/// by the test suite, but by trying to build stage2 rustc.
+/// There is no real explanation for this test, as it was just
+/// a bug during a refactoring.
+
+pub struct Lint {
+    pub name: &'static str,
+    pub desc: &'static str,
+    pub report_in_external_macro: bool,
+    pub is_loaded: bool,
+    pub crate_level_only: bool,
+}
+
+static FOO: &Lint = &Lint {
+    name: &"foo",
+    desc: "desc",
+    report_in_external_macro: false,
+    is_loaded: true,
+    crate_level_only: false,
+};
+
+fn main() {}