about summary refs log tree commit diff
path: root/compiler/rustc_target/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_target/src')
-rw-r--r--compiler/rustc_target/src/callconv/loongarch.rs28
-rw-r--r--compiler/rustc_target/src/callconv/mod.rs119
-rw-r--r--compiler/rustc_target/src/callconv/riscv.rs27
-rw-r--r--compiler/rustc_target/src/callconv/x86.rs37
4 files changed, 208 insertions, 3 deletions
diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs
index 4a21935623b..ffec76370d0 100644
--- a/compiler/rustc_target/src/callconv/loongarch.rs
+++ b/compiler/rustc_target/src/callconv/loongarch.rs
@@ -1,6 +1,7 @@
 use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
 use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
 use crate::spec::HasTargetSpec;
+use crate::spec::abi::Abi as SpecAbi;
 
 #[derive(Copy, Clone)]
 enum RegPassKind {
@@ -359,3 +360,30 @@ where
         );
     }
 }
+
+pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi)
+where
+    Ty: TyAbiInterface<'a, C> + Copy,
+    C: HasDataLayout + HasTargetSpec,
+{
+    if abi == SpecAbi::RustIntrinsic {
+        return;
+    }
+
+    let grlen = cx.data_layout().pointer_size.bits();
+
+    for arg in fn_abi.args.iter_mut() {
+        if arg.is_ignore() {
+            continue;
+        }
+
+        // LLVM integers types do not differentiate between signed or unsigned integers.
+        // Some LoongArch instructions do not have a `.w` suffix version, they use all the
+        // GRLEN bits. By explicitly setting the `signext` or `zeroext` attribute
+        // according to signedness to avoid unnecessary integer extending instructions.
+        //
+        // This is similar to the RISC-V case, see
+        // https://github.com/rust-lang/rust/issues/114508 for details.
+        extend_integer_width(arg, grlen);
+    }
+}
diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs
index 5d120a68059..25b001b57e8 100644
--- a/compiler/rustc_target/src/callconv/mod.rs
+++ b/compiler/rustc_target/src/callconv/mod.rs
@@ -1,11 +1,14 @@
-use std::fmt;
 use std::str::FromStr;
+use std::{fmt, iter};
 
 pub use rustc_abi::{Reg, RegKind};
 use rustc_macros::HashStable_Generic;
 use rustc_span::Symbol;
 
-use crate::abi::{self, Abi, Align, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+use crate::abi::{
+    self, Abi, AddressSpace, Align, HasDataLayout, Pointer, Size, TyAbiInterface, TyAndLayout,
+};
+use crate::spec::abi::Abi as SpecAbi;
 use crate::spec::{self, HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi};
 
 mod aarch64;
@@ -720,6 +723,118 @@ impl<'a, Ty> FnAbi<'a, Ty> {
 
         Ok(())
     }
+
+    pub fn adjust_for_rust_abi<C>(&mut self, cx: &C, abi: SpecAbi)
+    where
+        Ty: TyAbiInterface<'a, C> + Copy,
+        C: HasDataLayout + HasTargetSpec,
+    {
+        let spec = cx.target_spec();
+        match &spec.arch[..] {
+            "x86" => x86::compute_rust_abi_info(cx, self, abi),
+            "riscv32" | "riscv64" => riscv::compute_rust_abi_info(cx, self, abi),
+            "loongarch64" => loongarch::compute_rust_abi_info(cx, self, abi),
+            _ => {}
+        };
+
+        for (arg_idx, arg) in self
+            .args
+            .iter_mut()
+            .enumerate()
+            .map(|(idx, arg)| (Some(idx), arg))
+            .chain(iter::once((None, &mut self.ret)))
+        {
+            if arg.is_ignore() {
+                continue;
+            }
+
+            if arg_idx.is_none() && arg.layout.size > Pointer(AddressSpace::DATA).size(cx) * 2 {
+                // Return values larger than 2 registers using a return area
+                // pointer. LLVM and Cranelift disagree about how to return
+                // values that don't fit in the registers designated for return
+                // values. LLVM will force the entire return value to be passed
+                // by return area pointer, while Cranelift will look at each IR level
+                // return value independently and decide to pass it in a
+                // register or not, which would result in the return value
+                // being passed partially in registers and partially through a
+                // return area pointer.
+                //
+                // While Cranelift may need to be fixed as the LLVM behavior is
+                // generally more correct with respect to the surface language,
+                // forcing this behavior in rustc itself makes it easier for
+                // other backends to conform to the Rust ABI and for the C ABI
+                // rustc already handles this behavior anyway.
+                //
+                // In addition LLVM's decision to pass the return value in
+                // registers or using a return area pointer depends on how
+                // exactly the return type is lowered to an LLVM IR type. For
+                // example `Option<u128>` can be lowered as `{ i128, i128 }`
+                // in which case the x86_64 backend would use a return area
+                // pointer, or it could be passed as `{ i32, i128 }` in which
+                // case the x86_64 backend would pass it in registers by taking
+                // advantage of an LLVM ABI extension that allows using 3
+                // registers for the x86_64 sysv call conv rather than the
+                // officially specified 2 registers.
+                //
+                // FIXME: Technically we should look at the amount of available
+                // return registers rather than guessing that there are 2
+                // registers for return values. In practice only a couple of
+                // architectures have less than 2 return registers. None of
+                // which supported by Cranelift.
+                //
+                // NOTE: This adjustment is only necessary for the Rust ABI as
+                // for other ABI's the calling convention implementations in
+                // rustc_target already ensure any return value which doesn't
+                // fit in the available amount of return registers is passed in
+                // the right way for the current target.
+                arg.make_indirect();
+                continue;
+            }
+
+            match arg.layout.abi {
+                Abi::Aggregate { .. } => {}
+
+                // This is a fun case! The gist of what this is doing is
+                // that we want callers and callees to always agree on the
+                // ABI of how they pass SIMD arguments. If we were to *not*
+                // make these arguments indirect then they'd be immediates
+                // in LLVM, which means that they'd used whatever the
+                // appropriate ABI is for the callee and the caller. That
+                // means, for example, if the caller doesn't have AVX
+                // enabled but the callee does, then passing an AVX argument
+                // across this boundary would cause corrupt data to show up.
+                //
+                // This problem is fixed by unconditionally passing SIMD
+                // arguments through memory between callers and callees
+                // which should get them all to agree on ABI regardless of
+                // target feature sets. Some more information about this
+                // issue can be found in #44367.
+                //
+                // Note that the intrinsic ABI is exempt here as
+                // that's how we connect up to LLVM and it's unstable
+                // anyway, we control all calls to it in libstd.
+                Abi::Vector { .. } if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect => {
+                    arg.make_indirect();
+                    continue;
+                }
+
+                _ => continue,
+            }
+            // Compute `Aggregate` ABI.
+
+            let is_indirect_not_on_stack =
+                matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
+            assert!(is_indirect_not_on_stack);
+
+            let size = arg.layout.size;
+            if !arg.layout.is_unsized() && size <= Pointer(AddressSpace::DATA).size(cx) {
+                // We want to pass small aggregates as immediates, but using
+                // an LLVM aggregate type for this leads to bad optimizations,
+                // so we pick an appropriately sized integer type instead.
+                arg.cast_to(Reg { kind: RegKind::Integer, size });
+            }
+        }
+    }
 }
 
 impl FromStr for Conv {
diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs
index be6bc701b49..f96169e6a61 100644
--- a/compiler/rustc_target/src/callconv/riscv.rs
+++ b/compiler/rustc_target/src/callconv/riscv.rs
@@ -7,6 +7,7 @@
 use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
 use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
 use crate::spec::HasTargetSpec;
+use crate::spec::abi::Abi as SpecAbi;
 
 #[derive(Copy, Clone)]
 enum RegPassKind {
@@ -365,3 +366,29 @@ where
         );
     }
 }
+
+pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi)
+where
+    Ty: TyAbiInterface<'a, C> + Copy,
+    C: HasDataLayout + HasTargetSpec,
+{
+    if abi == SpecAbi::RustIntrinsic {
+        return;
+    }
+
+    let xlen = cx.data_layout().pointer_size.bits();
+
+    for arg in fn_abi.args.iter_mut() {
+        if arg.is_ignore() {
+            continue;
+        }
+
+        // LLVM integers types do not differentiate between signed or unsigned integers.
+        // Some RISC-V instructions do not have a `.w` suffix version, they use all the
+        // XLEN bits. By explicitly setting the `signext` or `zeroext` attribute
+        // according to signedness to avoid unnecessary integer extending instructions.
+        //
+        // See https://github.com/rust-lang/rust/issues/114508 for details.
+        extend_integer_width(arg, xlen);
+    }
+}
diff --git a/compiler/rustc_target/src/callconv/x86.rs b/compiler/rustc_target/src/callconv/x86.rs
index 40c3e7a891a..e907beecb38 100644
--- a/compiler/rustc_target/src/callconv/x86.rs
+++ b/compiler/rustc_target/src/callconv/x86.rs
@@ -1,6 +1,9 @@
 use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
-use crate::abi::{Abi, Align, HasDataLayout, TyAbiInterface, TyAndLayout};
+use crate::abi::{
+    Abi, AddressSpace, Align, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
+};
 use crate::spec::HasTargetSpec;
+use crate::spec::abi::Abi as SpecAbi;
 
 #[derive(PartialEq)]
 pub(crate) enum Flavor {
@@ -207,3 +210,35 @@ pub(crate) fn fill_inregs<'a, Ty, C>(
         }
     }
 }
+
+pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi)
+where
+    Ty: TyAbiInterface<'a, C> + Copy,
+    C: HasDataLayout + HasTargetSpec,
+{
+    // Avoid returning floats in x87 registers on x86 as loading and storing from x87
+    // registers will quiet signalling NaNs. Also avoid using SSE registers since they
+    // are not always available (depending on target features).
+    if !fn_abi.ret.is_ignore()
+        // Intrinsics themselves are not actual "real" functions, so theres no need to change their ABIs.
+        && abi != SpecAbi::RustIntrinsic
+    {
+        let has_float = match fn_abi.ret.layout.abi {
+            Abi::Scalar(s) => matches!(s.primitive(), Float(_)),
+            Abi::ScalarPair(s1, s2) => {
+                matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_))
+            }
+            _ => false, // anyway not passed via registers on x86
+        };
+        if has_float {
+            if fn_abi.ret.layout.size <= Pointer(AddressSpace::DATA).size(cx) {
+                // Same size or smaller than pointer, return in a register.
+                fn_abi.ret.cast_to(Reg { kind: RegKind::Integer, size: fn_abi.ret.layout.size });
+            } else {
+                // Larger than a pointer, return indirectly.
+                fn_abi.ret.make_indirect();
+            }
+            return;
+        }
+    }
+}