about summary refs log tree commit diff
path: root/compiler/rustc_target/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_target/src')
-rw-r--r--compiler/rustc_target/src/asm/mod.rs13
-rw-r--r--compiler/rustc_target/src/asm/s390x.rs13
-rw-r--r--compiler/rustc_target/src/callconv/aarch64.rs62
-rw-r--r--compiler/rustc_target/src/callconv/mod.rs1
-rw-r--r--compiler/rustc_target/src/callconv/s390x.rs26
-rw-r--r--compiler/rustc_target/src/spec/mod.rs5
-rw-r--r--compiler/rustc_target/src/spec/targets/s390x_unknown_linux_gnu.rs3
-rw-r--r--compiler/rustc_target/src/spec/targets/s390x_unknown_linux_musl.rs3
-rw-r--r--compiler/rustc_target/src/spec/targets/wasm32_unknown_emscripten.rs3
-rw-r--r--compiler/rustc_target/src/spec/targets/x86_64_win7_windows_msvc.rs2
10 files changed, 101 insertions, 30 deletions
diff --git a/compiler/rustc_target/src/asm/mod.rs b/compiler/rustc_target/src/asm/mod.rs
index 10778e9acf1..db8d23776e5 100644
--- a/compiler/rustc_target/src/asm/mod.rs
+++ b/compiler/rustc_target/src/asm/mod.rs
@@ -604,9 +604,13 @@ impl InlineAsmRegClass {
 
     /// Returns a list of supported types for this register class, each with an
     /// options target feature required to use this type.
+    ///
+    /// At the codegen stage, it is fine to always pass true for `allow_experimental_reg`,
+    /// since all the stability checking will have been done in prior stages.
     pub fn supported_types(
         self,
         arch: InlineAsmArch,
+        allow_experimental_reg: bool,
     ) -> &'static [(InlineAsmType, Option<Symbol>)] {
         match self {
             Self::X86(r) => r.supported_types(arch),
@@ -618,7 +622,7 @@ impl InlineAsmRegClass {
             Self::Hexagon(r) => r.supported_types(arch),
             Self::LoongArch(r) => r.supported_types(arch),
             Self::Mips(r) => r.supported_types(arch),
-            Self::S390x(r) => r.supported_types(arch),
+            Self::S390x(r) => r.supported_types(arch, allow_experimental_reg),
             Self::Sparc(r) => r.supported_types(arch),
             Self::SpirV(r) => r.supported_types(arch),
             Self::Wasm(r) => r.supported_types(arch),
@@ -696,8 +700,11 @@ impl InlineAsmRegClass {
 
     /// Returns whether registers in this class can only be used as clobbers
     /// and not as inputs/outputs.
-    pub fn is_clobber_only(self, arch: InlineAsmArch) -> bool {
-        self.supported_types(arch).is_empty()
+    ///
+    /// At the codegen stage, it is fine to always pass true for `allow_experimental_reg`,
+    /// since all the stability checking will have been done in prior stages.
+    pub fn is_clobber_only(self, arch: InlineAsmArch, allow_experimental_reg: bool) -> bool {
+        self.supported_types(arch, allow_experimental_reg).is_empty()
     }
 }
 
diff --git a/compiler/rustc_target/src/asm/s390x.rs b/compiler/rustc_target/src/asm/s390x.rs
index 9b31190a72b..410590b722b 100644
--- a/compiler/rustc_target/src/asm/s390x.rs
+++ b/compiler/rustc_target/src/asm/s390x.rs
@@ -38,11 +38,22 @@ impl S390xInlineAsmRegClass {
     pub fn supported_types(
         self,
         _arch: InlineAsmArch,
+        allow_experimental_reg: bool,
     ) -> &'static [(InlineAsmType, Option<Symbol>)] {
         match self {
             Self::reg | Self::reg_addr => types! { _: I8, I16, I32, I64; },
             Self::freg => types! { _: F32, F64; },
-            Self::vreg => &[],
+            Self::vreg => {
+                if allow_experimental_reg {
+                    // non-clobber-only vector register support is unstable.
+                    types! {
+                        vector: I32, F32, I64, F64, I128, F128,
+                            VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2);
+                    }
+                } else {
+                    &[]
+                }
+            }
             Self::areg => &[],
         }
     }
diff --git a/compiler/rustc_target/src/callconv/aarch64.rs b/compiler/rustc_target/src/callconv/aarch64.rs
index 55b65fb1caa..67345f0d47b 100644
--- a/compiler/rustc_target/src/callconv/aarch64.rs
+++ b/compiler/rustc_target/src/callconv/aarch64.rs
@@ -1,5 +1,10 @@
+use std::iter;
+
+use rustc_abi::{BackendRepr, Primitive};
+
 use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
 use crate::abi::{HasDataLayout, TyAbiInterface};
+use crate::spec::{HasTargetSpec, Target};
 
 /// Indicates the variant of the AArch64 ABI we are compiling for.
 /// Used to accommodate Apple and Microsoft's deviations from the usual AAPCS ABI.
@@ -15,7 +20,7 @@ pub(crate) enum AbiKind {
 fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
 where
     Ty: TyAbiInterface<'a, C> + Copy,
-    C: HasDataLayout,
+    C: HasDataLayout + HasTargetSpec,
 {
     arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
         let size = arg.layout.size;
@@ -27,7 +32,9 @@ where
 
         let valid_unit = match unit.kind {
             RegKind::Integer => false,
-            RegKind::Float => true,
+            // The softfloat ABI treats floats like integers, so they
+            // do not get homogeneous aggregate treatment.
+            RegKind::Float => cx.target_spec().abi != "softfloat",
             RegKind::Vector => size.bits() == 64 || size.bits() == 128,
         };
 
@@ -35,10 +42,42 @@ where
     })
 }
 
+fn softfloat_float_abi<Ty>(target: &Target, arg: &mut ArgAbi<'_, Ty>) {
+    if target.abi != "softfloat" {
+        return;
+    }
+    // Do *not* use the float registers for passing arguments, as that would make LLVM pick the ABI
+    // and its choice depends on whether `neon` instructions are enabled. Instead, we follow the
+    // AAPCS "softfloat" ABI, which specifies that floats should be passed as equivalently-sized
+    // integers. Nominally this only exists for "R" profile chips, but sometimes people don't want
+    // to use hardfloats even if the hardware supports them, so we do this for all softfloat
+    // targets.
+    if let BackendRepr::Scalar(s) = arg.layout.backend_repr
+        && let Primitive::Float(f) = s.primitive()
+    {
+        arg.cast_to(Reg { kind: RegKind::Integer, size: f.size() });
+    } else if let BackendRepr::ScalarPair(s1, s2) = arg.layout.backend_repr
+        && (matches!(s1.primitive(), Primitive::Float(_))
+            || matches!(s2.primitive(), Primitive::Float(_)))
+    {
+        // This case can only be reached for the Rust ABI, so we can do whatever we want here as
+        // long as it does not depend on target features (i.e., as long as we do not use float
+        // registers). So we pass small things in integer registers and large things via pointer
+        // indirection. This means we lose the nice "pass it as two arguments" optimization, but we
+        // currently just have to way to combine a `PassMode::Cast` with that optimization (and we
+        // need a cast since we want to pass the float as an int).
+        if arg.layout.size.bits() <= target.pointer_width.into() {
+            arg.cast_to(Reg { kind: RegKind::Integer, size: arg.layout.size });
+        } else {
+            arg.make_indirect();
+        }
+    }
+}
+
 fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, kind: AbiKind)
 where
     Ty: TyAbiInterface<'a, C> + Copy,
-    C: HasDataLayout,
+    C: HasDataLayout + HasTargetSpec,
 {
     if !ret.layout.is_sized() {
         // Not touching this...
@@ -51,6 +90,7 @@ where
             // See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
             ret.extend_integer_width_to(32)
         }
+        softfloat_float_abi(cx.target_spec(), ret);
         return;
     }
     if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
@@ -69,7 +109,7 @@ where
 fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, kind: AbiKind)
 where
     Ty: TyAbiInterface<'a, C> + Copy,
-    C: HasDataLayout,
+    C: HasDataLayout + HasTargetSpec,
 {
     if !arg.layout.is_sized() {
         // Not touching this...
@@ -82,6 +122,8 @@ where
             // See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
             arg.extend_integer_width_to(32);
         }
+        softfloat_float_abi(cx.target_spec(), arg);
+
         return;
     }
     if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
@@ -112,7 +154,7 @@ where
 pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, kind: AbiKind)
 where
     Ty: TyAbiInterface<'a, C> + Copy,
-    C: HasDataLayout,
+    C: HasDataLayout + HasTargetSpec,
 {
     if !fn_abi.ret.is_ignore() {
         classify_ret(cx, &mut fn_abi.ret, kind);
@@ -125,3 +167,13 @@ where
         classify_arg(cx, arg, kind);
     }
 }
+
+pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+    Ty: TyAbiInterface<'a, C> + Copy,
+    C: HasDataLayout + HasTargetSpec,
+{
+    for arg in fn_abi.args.iter_mut().chain(iter::once(&mut fn_abi.ret)) {
+        softfloat_float_abi(cx.target_spec(), arg);
+    }
+}
diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs
index aa639f1624f..fb0fe402934 100644
--- a/compiler/rustc_target/src/callconv/mod.rs
+++ b/compiler/rustc_target/src/callconv/mod.rs
@@ -738,6 +738,7 @@ impl<'a, Ty> FnAbi<'a, Ty> {
             "x86" => x86::compute_rust_abi_info(cx, self, abi),
             "riscv32" | "riscv64" => riscv::compute_rust_abi_info(cx, self, abi),
             "loongarch64" => loongarch::compute_rust_abi_info(cx, self, abi),
+            "aarch64" => aarch64::compute_rust_abi_info(cx, self),
             _ => {}
         };
 
diff --git a/compiler/rustc_target/src/callconv/s390x.rs b/compiler/rustc_target/src/callconv/s390x.rs
index 502e7331267..c99eb9226ef 100644
--- a/compiler/rustc_target/src/callconv/s390x.rs
+++ b/compiler/rustc_target/src/callconv/s390x.rs
@@ -1,12 +1,16 @@
-// FIXME: The assumes we're using the non-vector ABI, i.e., compiling
-// for a pre-z13 machine or using -mno-vx.
+// Reference: ELF Application Binary Interface s390x Supplement
+// https://github.com/IBM/s390x-abi
 
-use crate::abi::call::{ArgAbi, FnAbi, Reg};
-use crate::abi::{HasDataLayout, TyAbiInterface};
+use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind};
+use crate::abi::{BackendRepr, HasDataLayout, TyAbiInterface};
 use crate::spec::HasTargetSpec;
 
 fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
-    if !ret.layout.is_aggregate() && ret.layout.size.bits() <= 64 {
+    let size = ret.layout.size;
+    if size.bits() <= 128 && matches!(ret.layout.backend_repr, BackendRepr::Vector { .. }) {
+        return;
+    }
+    if !ret.layout.is_aggregate() && size.bits() <= 64 {
         ret.extend_integer_width_to(64);
     } else {
         ret.make_indirect();
@@ -32,19 +36,25 @@ where
         }
         return;
     }
-    if !arg.layout.is_aggregate() && arg.layout.size.bits() <= 64 {
+
+    let size = arg.layout.size;
+    if size.bits() <= 128 && arg.layout.is_single_vector_element(cx, size) {
+        arg.cast_to(Reg { kind: RegKind::Vector, size });
+        return;
+    }
+    if !arg.layout.is_aggregate() && size.bits() <= 64 {
         arg.extend_integer_width_to(64);
         return;
     }
 
     if arg.layout.is_single_fp_element(cx) {
-        match arg.layout.size.bytes() {
+        match size.bytes() {
             4 => arg.cast_to(Reg::f32()),
             8 => arg.cast_to(Reg::f64()),
             _ => arg.make_indirect(),
         }
     } else {
-        match arg.layout.size.bytes() {
+        match size.bytes() {
             1 => arg.cast_to(Reg::i8()),
             2 => arg.cast_to(Reg::i16()),
             4 => arg.cast_to(Reg::i32()),
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
index 321ab40403a..fead20ec7d1 100644
--- a/compiler/rustc_target/src/spec/mod.rs
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -2327,8 +2327,6 @@ pub struct TargetOptions {
     /// If we give emcc .o files that are actually .bc files it
     /// will 'just work'.
     pub obj_is_bitcode: bool,
-    /// Whether the target requires that emitted object code includes bitcode.
-    pub forces_embed_bitcode: bool,
     /// Content of the LLVM cmdline section associated with embedded bitcode.
     pub bitcode_llvm_cmdline: StaticCow<str>,
 
@@ -2671,7 +2669,6 @@ impl Default for TargetOptions {
             allow_asm: true,
             has_thread_local: false,
             obj_is_bitcode: false,
-            forces_embed_bitcode: false,
             bitcode_llvm_cmdline: "".into(),
             min_atomic_width: None,
             max_atomic_width: None,
@@ -3412,7 +3409,6 @@ impl Target {
         key!(main_needs_argc_argv, bool);
         key!(has_thread_local, bool);
         key!(obj_is_bitcode, bool);
-        key!(forces_embed_bitcode, bool);
         key!(bitcode_llvm_cmdline);
         key!(max_atomic_width, Option<u64>);
         key!(min_atomic_width, Option<u64>);
@@ -3687,7 +3683,6 @@ impl ToJson for Target {
         target_option_val!(main_needs_argc_argv);
         target_option_val!(has_thread_local);
         target_option_val!(obj_is_bitcode);
-        target_option_val!(forces_embed_bitcode);
         target_option_val!(bitcode_llvm_cmdline);
         target_option_val!(min_atomic_width);
         target_option_val!(max_atomic_width);
diff --git a/compiler/rustc_target/src/spec/targets/s390x_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/targets/s390x_unknown_linux_gnu.rs
index 3efbb464836..a84a18a433f 100644
--- a/compiler/rustc_target/src/spec/targets/s390x_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/targets/s390x_unknown_linux_gnu.rs
@@ -6,9 +6,6 @@ pub(crate) fn target() -> Target {
     base.endian = Endian::Big;
     // z10 is the oldest CPU supported by LLVM
     base.cpu = "z10".into();
-    // FIXME: The ABI implementation in abi/call/s390x.rs is for now hard-coded to assume the no-vector
-    // ABI. Pass the -vector feature string to LLVM to respect this assumption.
-    base.features = "-vector".into();
     base.max_atomic_width = Some(128);
     base.min_global_align = Some(16);
     base.stack_probes = StackProbeType::Inline;
diff --git a/compiler/rustc_target/src/spec/targets/s390x_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/targets/s390x_unknown_linux_musl.rs
index 65b5c1167bd..4bde0fb729c 100644
--- a/compiler/rustc_target/src/spec/targets/s390x_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/targets/s390x_unknown_linux_musl.rs
@@ -6,9 +6,6 @@ pub(crate) fn target() -> Target {
     base.endian = Endian::Big;
     // z10 is the oldest CPU supported by LLVM
     base.cpu = "z10".into();
-    // FIXME: The ABI implementation in abi/call/s390x.rs is for now hard-coded to assume the no-vector
-    // ABI. Pass the -vector feature string to LLVM to respect this assumption.
-    base.features = "-vector".into();
     base.max_atomic_width = Some(128);
     base.min_global_align = Some(16);
     base.static_position_independent_executables = true;
diff --git a/compiler/rustc_target/src/spec/targets/wasm32_unknown_emscripten.rs b/compiler/rustc_target/src/spec/targets/wasm32_unknown_emscripten.rs
index a213adadbea..a70cebbd9c8 100644
--- a/compiler/rustc_target/src/spec/targets/wasm32_unknown_emscripten.rs
+++ b/compiler/rustc_target/src/spec/targets/wasm32_unknown_emscripten.rs
@@ -5,7 +5,8 @@ use crate::spec::{
 pub(crate) fn target() -> Target {
     // Reset flags for non-Em flavors back to empty to satisfy sanity checking tests.
     let pre_link_args = LinkArgs::new();
-    let post_link_args = TargetOptions::link_args(LinkerFlavor::EmCc, &["-sABORTING_MALLOC=0"]);
+    let post_link_args =
+        TargetOptions::link_args(LinkerFlavor::EmCc, &["-sABORTING_MALLOC=0", "-sWASM_BIGINT"]);
 
     let opts = TargetOptions {
         os: "emscripten".into(),
diff --git a/compiler/rustc_target/src/spec/targets/x86_64_win7_windows_msvc.rs b/compiler/rustc_target/src/spec/targets/x86_64_win7_windows_msvc.rs
index 3a3716db350..f42188ec61a 100644
--- a/compiler/rustc_target/src/spec/targets/x86_64_win7_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/targets/x86_64_win7_windows_msvc.rs
@@ -8,7 +8,7 @@ pub(crate) fn target() -> Target {
     base.vendor = "win7".into();
 
     Target {
-        llvm_target: "x86_64-win7-windows-msvc".into(),
+        llvm_target: "x86_64-pc-windows-msvc".into(),
         metadata: crate::spec::TargetMetadata {
             description: Some("64-bit Windows 7 support".into()),
             tier: Some(3),