about summary refs log tree commit diff
diff options
context:
space:
mode:
authorbeetrees <b@beetr.ee>2024-04-02 05:08:56 +0100
committerbeetrees <b@beetr.ee>2024-06-01 07:39:13 +0100
commit952becc0bdf814f194a42d34ea78fed792a1f760 (patch)
tree818a7713b38d6d4c0642f246e172957d2eed02b6
parentdcc9a8f2831a9afd2896e3fe2cc020bb2bf949bd (diff)
downloadrust-952becc0bdf814f194a42d34ea78fed792a1f760.tar.gz
rust-952becc0bdf814f194a42d34ea78fed792a1f760.zip
Ensure floats are returned losslessly by the Rust ABI on 32-bit x86
-rw-r--r--compiler/rustc_ty_utils/src/abi.rs34
-rw-r--r--tests/assembly/x86-return-float.rs323
-rw-r--r--tests/ui/abi/numbers-arithmetic/return-float.rs57
3 files changed, 414 insertions, 0 deletions
diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs
index c5ea85c90dc..ca503dd16b9 100644
--- a/compiler/rustc_ty_utils/src/abi.rs
+++ b/compiler/rustc_ty_utils/src/abi.rs
@@ -744,6 +744,40 @@ fn fn_abi_adjust_for_abi<'tcx>(
                 return;
             }
 
+            // Avoid returning floats in x87 registers on x86 as loading and storing from x87
+            // registers will quiet signalling NaNs.
+            if cx.tcx.sess.target.arch == "x86"
+                && arg_idx.is_none()
+                // Intrinsics themselves are not actual "real" functions, so theres no need to
+                // change their ABIs.
+                && abi != SpecAbi::RustIntrinsic
+            {
+                match arg.layout.abi {
+                    // Handle similar to the way arguments with an `Abi::Aggregate` abi are handled
+                    // below, by returning arguments up to the size of a pointer (32 bits on x86)
+                    // cast to an appropriately sized integer.
+                    Abi::Scalar(s) if s.primitive() == Float(F32) => {
+                        // Same size as a pointer, return in a register.
+                        arg.cast_to(Reg::i32());
+                        return;
+                    }
+                    Abi::Scalar(s) if s.primitive() == Float(F64) => {
+                        // Larger than a pointer, return indirectly.
+                        arg.make_indirect();
+                        return;
+                    }
+                    Abi::ScalarPair(s1, s2)
+                        if matches!(s1.primitive(), Float(F32 | F64))
+                            || matches!(s2.primitive(), Float(F32 | F64)) =>
+                    {
+                        // Larger than a pointer, return indirectly.
+                        arg.make_indirect();
+                        return;
+                    }
+                    _ => {}
+                };
+            }
+
             match arg.layout.abi {
                 Abi::Aggregate { .. } => {}
 
diff --git a/tests/assembly/x86-return-float.rs b/tests/assembly/x86-return-float.rs
new file mode 100644
index 00000000000..270aea2475f
--- /dev/null
+++ b/tests/assembly/x86-return-float.rs
@@ -0,0 +1,323 @@
+//@ assembly-output: emit-asm
+//@ only-x86
+// Force frame pointers to make ASM more consistent between targets
+//@ compile-flags: -O -C force-frame-pointers
+//@ filecheck-flags: --implicit-check-not fld --implicit-check-not fst
+//@ revisions: unix windows
+//@[unix] ignore-windows
+//@[windows] only-windows
+
+#![crate_type = "lib"]
+#![feature(f16, f128)]
+
+// Tests that returning `f32` and `f64` with the "Rust" ABI on 32-bit x86 doesn't use the x87
+// floating point stack, as loading and storing `f32`s and `f64`s to and from the x87 stack quietens
+// signalling NaNs.
+
+// Returning individual floats
+
+// CHECK-LABEL: return_f32:
+#[no_mangle]
+pub fn return_f32(x: f32) -> f32 {
+    // CHECK: movl {{.*}}(%ebp), %eax
+    // CHECK-NOT: ax
+    // CHECK: retl
+    x
+}
+
+// CHECK-LABEL: return_f64:
+#[no_mangle]
+pub fn return_f64(x: f64) -> f64 {
+    // CHECK: movl [[#%d,OFFSET:]](%ebp), %[[PTR:.*]]
+    // CHECK-NEXT: movsd [[#%d,OFFSET+4]](%ebp), %[[VAL:.*]]
+    // CHECK-NEXT: movsd %[[VAL]], (%[[PTR]])
+    // CHECK: retl
+    x
+}
+
+// Returning scalar pairs containing floats
+
+// CHECK-LABEL: return_f32_f32:
+#[no_mangle]
+pub fn return_f32_f32(x: (f32, f32)) -> (f32, f32) {
+    // CHECK: movl [[#%d,OFFSET:]](%ebp), %[[PTR:.*]]
+    // CHECK-NEXT: movss [[#%d,OFFSET+4]](%ebp), %[[VAL1:.*]]
+    // CHECK-NEXT: movss [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
+    // CHECK-NEXT: movss %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movss %[[VAL2]], 4(%[[PTR]])
+    // CHECK: retl
+    x
+}
+
+// CHECK-LABEL: return_f64_f64:
+#[no_mangle]
+pub fn return_f64_f64(x: (f64, f64)) -> (f64, f64) {
+    // CHECK: movl [[#%d,OFFSET:]](%ebp), %[[PTR:.*]]
+    // CHECK-NEXT: movsd [[#%d,OFFSET+4]](%ebp), %[[VAL1:.*]]
+    // CHECK-NEXT: movsd [[#%d,OFFSET+12]](%ebp), %[[VAL2:.*]]
+    // CHECK-NEXT: movsd %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movsd %[[VAL2]], 8(%[[PTR]])
+    // CHECK: retl
+    x
+}
+
+// CHECK-LABEL: return_f32_f64:
+#[no_mangle]
+pub fn return_f32_f64(x: (f32, f64)) -> (f32, f64) {
+    // CHECK: movl [[#%d,OFFSET:]](%ebp), %[[PTR:.*]]
+    // CHECK-NEXT: movss [[#%d,OFFSET+4]](%ebp), %[[VAL1:.*]]
+    // CHECK-NEXT: movsd [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
+    // CHECK-NEXT: movss %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movsd %[[VAL2]], {{4|8}}(%[[PTR]])
+    // CHECK: retl
+    x
+}
+
+// CHECK-LABEL: return_f64_f32:
+#[no_mangle]
+pub fn return_f64_f32(x: (f64, f32)) -> (f64, f32) {
+    // CHECK: movl [[#%d,OFFSET:]](%ebp), %[[PTR:.*]]
+    // CHECK-NEXT: movsd [[#%d,OFFSET+4]](%ebp), %[[VAL1:.*]]
+    // CHECK-NEXT: movss [[#%d,OFFSET+12]](%ebp), %[[VAL2:.*]]
+    // CHECK-NEXT: movsd %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movss %[[VAL2]], 8(%[[PTR]])
+    // CHECK: retl
+    x
+}
+
+// CHECK-LABEL: return_f32_other:
+#[no_mangle]
+pub fn return_f32_other(x: (f32, usize)) -> (f32, usize) {
+    // CHECK: movl [[#%d,OFFSET:]](%ebp), %[[PTR:.*]]
+    // CHECK-NEXT: movss [[#%d,OFFSET+4]](%ebp), %[[VAL1:.*]]
+    // CHECK-NEXT: movl [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
+    // CHECK-NEXT: movss %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movl %[[VAL2]], 4(%[[PTR]])
+    // CHECK: retl
+    x
+}
+
+// CHECK-LABEL: return_f64_other:
+#[no_mangle]
+pub fn return_f64_other(x: (f64, usize)) -> (f64, usize) {
+    // CHECK: movl [[#%d,OFFSET:]](%ebp), %[[PTR:.*]]
+    // CHECK-NEXT: movsd [[#%d,OFFSET+4]](%ebp), %[[VAL1:.*]]
+    // CHECK-NEXT: movl [[#%d,OFFSET+12]](%ebp), %[[VAL2:.*]]
+    // CHECK-NEXT: movsd %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movl %[[VAL2]], 8(%[[PTR]])
+    // CHECK: retl
+    x
+}
+
+// CHECK-LABEL: return_other_f32:
+#[no_mangle]
+pub fn return_other_f32(x: (usize, f32)) -> (usize, f32) {
+    // CHECK: movl [[#%d,OFFSET:]](%ebp), %[[PTR:.*]]
+    // CHECK-NEXT: movl [[#%d,OFFSET+4]](%ebp), %[[VAL1:.*]]
+    // CHECK-NEXT: movss [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
+    // CHECK-NEXT: movl %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movss %[[VAL2]], 4(%[[PTR]])
+    // CHECK: retl
+    x
+}
+
+// CHECK-LABEL: return_other_f64:
+#[no_mangle]
+pub fn return_other_f64(x: (usize, f64)) -> (usize, f64) {
+    // CHECK: movl [[#%d,OFFSET:]](%ebp), %[[PTR:.*]]
+    // CHECK-NEXT: movl [[#%d,OFFSET+4]](%ebp), %[[VAL1:.*]]
+    // CHECK-NEXT: movsd [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
+    // CHECK-NEXT: movl %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movsd %[[VAL2]], {{4|8}}(%[[PTR]])
+    // CHECK: retl
+    x
+}
+
+// Calling functions returning floats
+
+// CHECK-LABEL: call_f32:
+#[no_mangle]
+pub unsafe fn call_f32(x: &mut f32) {
+    extern "Rust" {
+        fn get_f32() -> f32;
+    }
+    // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
+    // CHECK: calll {{()|_}}get_f32
+    // CHECK-NEXT: movl %eax, (%[[PTR]])
+    *x = get_f32();
+}
+
+// CHECK-LABEL: call_f64:
+#[no_mangle]
+pub unsafe fn call_f64(x: &mut f64) {
+    extern "Rust" {
+        fn get_f64() -> f64;
+    }
+    // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
+    // CHECK: calll {{()|_}}get_f64
+    // CHECK: movsd {{.*}}(%{{ebp|esp}}), %[[VAL:.*]]
+    // CHECK-NEXT: movsd %[[VAL:.*]], (%[[PTR]])
+    *x = get_f64();
+}
+
+// Calling functions returning scalar pairs containing floats
+
+// CHECK-LABEL: call_f32_f32:
+#[no_mangle]
+pub unsafe fn call_f32_f32(x: &mut (f32, f32)) {
+    extern "Rust" {
+        fn get_f32_f32() -> (f32, f32);
+    }
+    // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
+    // CHECK: calll {{()|_}}get_f32_f32
+    // CHECK: movss [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
+    // CHECK-NEXT: movss [[#%d,OFFSET+4]](%ebp), %[[VAL2:.*]]
+    // CHECK-NEXT: movss %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movss %[[VAL2]], 4(%[[PTR]])
+    *x = get_f32_f32();
+}
+
+// CHECK-LABEL: call_f64_f64:
+#[no_mangle]
+pub unsafe fn call_f64_f64(x: &mut (f64, f64)) {
+    extern "Rust" {
+        fn get_f64_f64() -> (f64, f64);
+    }
+    // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
+    // CHECK: calll {{()|_}}get_f64_f64
+    // unix: movsd [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
+    // unix-NEXT: movsd [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
+    // windows: movsd (%esp), %[[VAL1:.*]]
+    // windows-NEXT: movsd 8(%esp), %[[VAL2:.*]]
+    // CHECK-NEXT: movsd %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movsd %[[VAL2]], 8(%[[PTR]])
+    *x = get_f64_f64();
+}
+
+// CHECK-LABEL: call_f32_f64:
+#[no_mangle]
+pub unsafe fn call_f32_f64(x: &mut (f32, f64)) {
+    extern "Rust" {
+        fn get_f32_f64() -> (f32, f64);
+    }
+    // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
+    // CHECK: calll {{()|_}}get_f32_f64
+    // unix: movss [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
+    // unix-NEXT: movsd [[#%d,OFFSET+4]](%ebp), %[[VAL2:.*]]
+    // windows: movss (%esp), %[[VAL1:.*]]
+    // windows-NEXT: movsd 8(%esp), %[[VAL2:.*]]
+    // CHECK-NEXT: movss %[[VAL1]], (%[[PTR]])
+    // unix-NEXT: movsd %[[VAL2]], 4(%[[PTR]])
+    // windows-NEXT: movsd %[[VAL2]], 8(%[[PTR]])
+    *x = get_f32_f64();
+}
+
+// CHECK-LABEL: call_f64_f32:
+#[no_mangle]
+pub unsafe fn call_f64_f32(x: &mut (f64, f32)) {
+    extern "Rust" {
+        fn get_f64_f32() -> (f64, f32);
+    }
+    // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
+    // CHECK: calll {{()|_}}get_f64_f32
+    // unix: movsd [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
+    // unix-NEXT: movss [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
+    // windows: movsd (%esp), %[[VAL1:.*]]
+    // windows-NEXT: movss 8(%esp), %[[VAL2:.*]]
+    // CHECK-NEXT: movsd %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movss %[[VAL2]], 8(%[[PTR]])
+    *x = get_f64_f32();
+}
+
+// CHECK-LABEL: call_f32_other:
+#[no_mangle]
+pub unsafe fn call_f32_other(x: &mut (f32, usize)) {
+    extern "Rust" {
+        fn get_f32_other() -> (f32, usize);
+    }
+    // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
+    // CHECK: calll {{()|_}}get_f32_other
+    // CHECK: movss [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
+    // CHECK-NEXT: movl [[#%d,OFFSET+4]](%ebp), %[[VAL2:.*]]
+    // CHECK-NEXT: movss %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movl %[[VAL2]], 4(%[[PTR]])
+    *x = get_f32_other();
+}
+
+// CHECK-LABEL: call_f64_other:
+#[no_mangle]
+pub unsafe fn call_f64_other(x: &mut (f64, usize)) {
+    extern "Rust" {
+        fn get_f64_other() -> (f64, usize);
+    }
+    // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
+    // CHECK: calll {{()|_}}get_f64_other
+    // unix: movsd [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
+    // unix-NEXT: movl [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
+    // windows: movsd (%esp), %[[VAL1:.*]]
+    // windows-NEXT: movl 8(%esp), %[[VAL2:.*]]
+    // CHECK-NEXT: movsd %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movl %[[VAL2]], 8(%[[PTR]])
+    *x = get_f64_other();
+}
+
+// CHECK-LABEL: call_other_f32:
+#[no_mangle]
+pub unsafe fn call_other_f32(x: &mut (usize, f32)) {
+    extern "Rust" {
+        fn get_other_f32() -> (usize, f32);
+    }
+    // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
+    // CHECK: calll {{()|_}}get_other_f32
+    // CHECK: movl [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
+    // CHECK-NEXT: movss [[#%d,OFFSET+4]](%ebp), %[[VAL2:.*]]
+    // CHECK-NEXT: movl %[[VAL1]], (%[[PTR]])
+    // CHECK-NEXT: movss %[[VAL2]], 4(%[[PTR]])
+    *x = get_other_f32();
+}
+
+// CHECK-LABEL: call_other_f64:
+#[no_mangle]
+pub unsafe fn call_other_f64(x: &mut (usize, f64)) {
+    extern "Rust" {
+        fn get_other_f64() -> (usize, f64);
+    }
+    // CHECK: movl {{.*}}(%ebp), %[[PTR:.*]]
+    // CHECK: calll {{()|_}}get_other_f64
+    // unix: movl [[#%d,OFFSET:]](%ebp), %[[VAL1:.*]]
+    // unix-NEXT: movsd [[#%d,OFFSET+4]](%ebp), %[[VAL2:.*]]
+    // windows: movl (%esp), %[[VAL1:.*]]
+    // windows-NEXT: movsd 8(%esp), %[[VAL2:.*]]
+    // CHECK-NEXT: movl %[[VAL1]], (%[[PTR]])
+    // unix-NEXT: movsd %[[VAL2]], 4(%[[PTR]])
+    // windows-NEXT: movsd %[[VAL2]], 8(%[[PTR]])
+    *x = get_other_f64();
+}
+
+// The "C" ABI for `f16` and `f128` on x86 has never used the x87 floating point stack. Do some
+// basic checks to ensure this remains the case for the "Rust" ABI.
+
+// CHECK-LABEL: return_f16:
+#[no_mangle]
+pub fn return_f16(x: f16) -> f16 {
+    // CHECK: pinsrw $0, {{.*}}(%ebp), %xmm0
+    // CHECK-NOT: xmm0
+    // CHECK: retl
+    x
+}
+
+// CHECK-LABEL: return_f128:
+#[no_mangle]
+pub fn return_f128(x: f128) -> f128 {
+    // CHECK: movl [[#%d,OFFSET:]](%ebp), %[[PTR:.*]]
+    // CHECK-NEXT: movl [[#%d,OFFSET+16]](%ebp), %[[VAL4:.*]]
+    // CHECK-NEXT: movl [[#%d,OFFSET+4]](%ebp), %[[VAL1:.*]]
+    // CHECK-NEXT: movl [[#%d,OFFSET+8]](%ebp), %[[VAL2:.*]]
+    // CHECK-NEXT: movl [[#%d,OFFSET+12]](%ebp), %[[VAL3:.*]]
+    // CHECK-NEXT: movl %[[VAL4:.*]] 12(%[[PTR]])
+    // CHECK-NEXT: movl %[[VAL3:.*]] 8(%[[PTR]])
+    // CHECK-NEXT: movl %[[VAL2:.*]] 4(%[[PTR]])
+    // CHECK-NEXT: movl %[[VAL1:.*]] (%[[PTR]])
+    // CHECK: retl
+    x
+}
diff --git a/tests/ui/abi/numbers-arithmetic/return-float.rs b/tests/ui/abi/numbers-arithmetic/return-float.rs
new file mode 100644
index 00000000000..3b025b763f1
--- /dev/null
+++ b/tests/ui/abi/numbers-arithmetic/return-float.rs
@@ -0,0 +1,57 @@
+//@ run-pass
+//@ compile-flags: -Copt-level=0
+
+// Test that floats (in particular signalling NaNs) are losslessly returned from functions.
+
+fn main() {
+    let bits_f32 = std::hint::black_box([
+        4.2_f32.to_bits(),
+        f32::INFINITY.to_bits(),
+        f32::NEG_INFINITY.to_bits(),
+        f32::NAN.to_bits(),
+        // These two masks cover all the mantissa bits. One of them is a signalling NaN, the other
+        // is quiet.
+        // Similar to the masks in `test_float_bits_conv` in library/std/src/f32/tests.rs
+        f32::NAN.to_bits() ^ 0x002A_AAAA,
+        f32::NAN.to_bits() ^ 0x0055_5555,
+        // Same as above but with the sign bit flipped.
+        f32::NAN.to_bits() ^ 0x802A_AAAA,
+        f32::NAN.to_bits() ^ 0x8055_5555,
+    ]);
+    for bits in bits_f32 {
+        assert_eq!(identity(f32::from_bits(bits)).to_bits(), bits);
+        // Test types that are returned as scalar pairs.
+        assert_eq!(identity((f32::from_bits(bits), 42)).0.to_bits(), bits);
+        assert_eq!(identity((42, f32::from_bits(bits))).1.to_bits(), bits);
+        let (a, b) = identity((f32::from_bits(bits), f32::from_bits(bits)));
+        assert_eq!((a.to_bits(), b.to_bits()), (bits, bits));
+    }
+
+    let bits_f64 = std::hint::black_box([
+        4.2_f64.to_bits(),
+        f64::INFINITY.to_bits(),
+        f64::NEG_INFINITY.to_bits(),
+        f64::NAN.to_bits(),
+        // These two masks cover all the mantissa bits. One of them is a signalling NaN, the other
+        // is quiet.
+        // Similar to the masks in `test_float_bits_conv` in library/std/src/f64/tests.rs
+        f64::NAN.to_bits() ^ 0x000A_AAAA_AAAA_AAAA,
+        f64::NAN.to_bits() ^ 0x0005_5555_5555_5555,
+        // Same as above but with the sign bit flipped.
+        f64::NAN.to_bits() ^ 0x800A_AAAA_AAAA_AAAA,
+        f64::NAN.to_bits() ^ 0x8005_5555_5555_5555,
+    ]);
+    for bits in bits_f64 {
+        assert_eq!(identity(f64::from_bits(bits)).to_bits(), bits);
+        // Test types that are returned as scalar pairs.
+        assert_eq!(identity((f64::from_bits(bits), 42)).0.to_bits(), bits);
+        assert_eq!(identity((42, f64::from_bits(bits))).1.to_bits(), bits);
+        let (a, b) = identity((f64::from_bits(bits), f64::from_bits(bits)));
+        assert_eq!((a.to_bits(), b.to_bits()), (bits, bits));
+    }
+}
+
+#[inline(never)]
+fn identity<T>(x: T) -> T {
+    x
+}