about summary refs log tree commit diff
diff options
context:
space:
mode:
authorJubilee Young <workingjubilee@gmail.com>2025-02-08 21:16:31 -0800
committerJubilee Young <workingjubilee@gmail.com>2025-02-09 16:50:33 -0800
commit833f07021465b7d34b13fd7d6e5aadf2c35b61a0 (patch)
tree91f99a25010ecbceef8958e3d89477b2ed6f9d33
parentee111b24e35c32b251a0879e590af3da8d5015b0 (diff)
downloadrust-833f07021465b7d34b13fd7d6e5aadf2c35b61a0.tar.gz
rust-833f07021465b7d34b13fd7d6e5aadf2c35b61a0.zip
tests/assembly: cross-compile x86-return-float
We choose to test for Linux and Windows instead of random other targets.
-rw-r--r--tests/assembly/x86-return-float.rs43
1 files changed, 26 insertions, 17 deletions
diff --git a/tests/assembly/x86-return-float.rs b/tests/assembly/x86-return-float.rs
index 423263c9673..ad760627b3a 100644
--- a/tests/assembly/x86-return-float.rs
+++ b/tests/assembly/x86-return-float.rs
@@ -1,19 +1,28 @@
 //@ assembly-output: emit-asm
-//@ only-x86
 // FIXME(#114479): LLVM miscompiles loading and storing `f32` and `f64` when SSE is disabled.
 // There's no compiletest directive to ignore a test on i586 only, so just always explicitly enable
 // SSE2.
 // Use the same target CPU as `i686` so that LLVM orders the instructions in the same order.
 //@ compile-flags: -Ctarget-feature=+sse2 -Ctarget-cpu=pentium4
 // Force frame pointers to make ASM more consistent between targets
-//@ compile-flags: -Copt-level=3 -C force-frame-pointers
+//@ compile-flags: -C force-frame-pointers
+// At opt-level=3, LLVM can merge two movss into one movsd, and we aren't testing for that.
+//@ compile-flags: -Copt-level=2
 //@ filecheck-flags: --implicit-check-not fld --implicit-check-not fst
-//@ revisions: normal win
-//@[normal] ignore-windows
-//@[win] only-windows
+//@ revisions: linux win
+//@ add-core-stubs
+//@[linux] needs-llvm-components: x86
+//@[win] needs-llvm-components: x86
+//@[linux] compile-flags: --target i686-unknown-linux-gnu
+//@[win] compile-flags: --target i686-pc-windows-msvc
 
 #![crate_type = "lib"]
 #![feature(f16, f128)]
+#![feature(no_core)]
+#![no_core]
+
+extern crate minicore;
+use minicore::*;
 
 // Tests that returning `f32` and `f64` with the "Rust" ABI on 32-bit x86 doesn't use the x87
 // floating point stack, as loading and storing `f32`s and `f64`s to and from the x87 stack quietens
@@ -190,8 +199,8 @@ pub unsafe fn call_f64_f64(x: &mut (f64, f64)) {
     }
     // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
     // CHECK: calll {{()|_}}get_f64_f64
-    // normal: movsd [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
-    // normal-NEXT: movsd [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
+    // linux: movsd [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
+    // linux-NEXT: movsd [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
     // win: movsd (%esp), %[[VAL1:.*]]
     // win-NEXT: movsd 8(%esp), %[[VAL2:.*]]
     // CHECK-NEXT: movsd %[[VAL1]], (%[[PTR]])
@@ -207,12 +216,12 @@ pub unsafe fn call_f32_f64(x: &mut (f32, f64)) {
     }
     // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
     // CHECK: calll {{()|_}}get_f32_f64
-    // normal: movss [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
-    // normal-NEXT: movsd [[#%d,OFFSET+4]](%ebp), %[[VAL2:.*]]
+    // linux: movss [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
+    // linux-NEXT: movsd [[#%d,OFFSET+4]](%ebp), %[[VAL2:.*]]
     // win: movss (%esp), %[[VAL1:.*]]
     // win-NEXT: movsd 8(%esp), %[[VAL2:.*]]
     // CHECK-NEXT: movss %[[VAL1]], (%[[PTR]])
-    // normal-NEXT: movsd %[[VAL2]], 4(%[[PTR]])
+    // linux-NEXT: movsd %[[VAL2]], 4(%[[PTR]])
     // win-NEXT: movsd %[[VAL2]], 8(%[[PTR]])
     *x = get_f32_f64();
 }
@@ -225,8 +234,8 @@ pub unsafe fn call_f64_f32(x: &mut (f64, f32)) {
     }
     // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
     // CHECK: calll {{()|_}}get_f64_f32
-    // normal: movsd [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
-    // normal-NEXT: movss [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
+    // linux: movsd [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
+    // linux-NEXT: movss [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
     // win: movsd (%esp), %[[VAL1:.*]]
     // win-NEXT: movss 8(%esp), %[[VAL2:.*]]
     // CHECK-NEXT: movsd %[[VAL1]], (%[[PTR]])
@@ -257,8 +266,8 @@ pub unsafe fn call_f64_other(x: &mut (f64, usize)) {
     }
     // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
     // CHECK: calll {{()|_}}get_f64_other
-    // normal: movsd [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
-    // normal-NEXT: movl [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
+    // linux: movsd [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
+    // linux-NEXT: movl [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
     // win: movsd (%esp), %[[VAL1:.*]]
     // win-NEXT: movl 8(%esp), %[[VAL2:.*]]
     // CHECK-NEXT: movsd %[[VAL1]], (%[[PTR]])
@@ -289,12 +298,12 @@ pub unsafe fn call_other_f64(x: &mut (usize, f64)) {
     }
     // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
     // CHECK: calll {{()|_}}get_other_f64
-    // normal: movl [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
-    // normal-NEXT: movsd [[#%d,OFFSET+4]](%ebp), %[[VAL2:.*]]
+    // linux: movl [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
+    // linux-NEXT: movsd [[#%d,OFFSET+4]](%ebp), %[[VAL2:.*]]
     // win: movl (%esp), %[[VAL1:.*]]
     // win-NEXT: movsd 8(%esp), %[[VAL2:.*]]
     // CHECK-NEXT: movl %[[VAL1]], (%[[PTR]])
-    // normal-NEXT: movsd %[[VAL2]], 4(%[[PTR]])
+    // linux-NEXT: movsd %[[VAL2]], 4(%[[PTR]])
     // win-NEXT: movsd %[[VAL2]], 8(%[[PTR]])
     *x = get_other_f64();
 }