about summary refs log tree commit diff
diff options
context:
space:
mode:
authorScott Mabin <scott@mabez.dev>2020-09-12 23:31:14 +0100
committerScott Mabin <scott@mabez.dev>2024-05-29 13:48:03 +0100
commite823288c352bf19bbaaf77b7ef8958d3c8426a3f (patch)
treef1f16d82f6825545354915219c70c1bb6bd21bf3
parentb37a448616a370ba02eeaa7d7c8289fa85d35fc9 (diff)
downloadrust-e823288c352bf19bbaaf77b7ef8958d3c8426a3f.tar.gz
rust-e823288c352bf19bbaaf77b7ef8958d3c8426a3f.zip
Teach rustc about the Xtensa call ABI.
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/xtensa.rs123
2 files changed, 125 insertions, 0 deletions
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index fc79c9232d1..5713542c17d 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -29,6 +29,7 @@ mod wasm;
 mod x86;
 mod x86_64;
 mod x86_win64;
+mod xtensa;
 
 #[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
 pub enum PassMode {
@@ -903,6 +904,7 @@ impl<'a, Ty> FnAbi<'a, Ty> {
                 }
             }
             "hexagon" => hexagon::compute_abi_info(self),
+            "xtensa" => xtensa::compute_abi_info(cx, self),
             "riscv32" | "riscv64" => riscv::compute_abi_info(cx, self),
             "wasm32" | "wasm64" => {
                 if cx.target_spec().adjust_abi(cx, abi, self.c_variadic) == spec::abi::Abi::Wasm {
diff --git a/compiler/rustc_target/src/abi/call/xtensa.rs b/compiler/rustc_target/src/abi/call/xtensa.rs
new file mode 100644
index 00000000000..addbe698925
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/xtensa.rs
@@ -0,0 +1,123 @@
+//! The Xtensa ABI implementation
+//!
+//! This ABI implementation is based on the following sources:
+//!
+//! Section 8.1.4 & 8.1.5 of the Xtensa ISA reference manual, as well as snippets from
+//! Section 2.3 from the Xtensa programmers guide.
+
+use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
+use crate::abi::{Abi, HasDataLayout, Size, TyAbiInterface};
+use crate::spec::HasTargetSpec;
+
+const NUM_ARG_GPRS: u64 = 6;
+const NUM_RET_GPRS: u64 = 4;
+const MAX_ARG_IN_REGS_SIZE: u64 = NUM_ARG_GPRS * 32;
+const MAX_RET_IN_REGS_SIZE: u64 = NUM_RET_GPRS * 32;
+
+fn classify_ret_ty<'a, Ty, C>(arg: &mut ArgAbi<'_, Ty>)
+where
+    Ty: TyAbiInterface<'a, C> + Copy,
+{
+    if arg.is_ignore() {
+        return;
+    }
+
+    // The rules for return and argument types are the same,
+    // so defer to `classify_arg_ty`.
+    let mut arg_gprs_left = NUM_RET_GPRS;
+    classify_arg_ty(arg, &mut arg_gprs_left, MAX_RET_IN_REGS_SIZE);
+    // Ret args cannot be passed via stack, we lower to indirect and let the backend handle the invisble reference
+    match arg.mode {
+        super::PassMode::Indirect { attrs: _, meta_attrs: _, ref mut on_stack } => {
+            *on_stack = false;
+        }
+        _ => {}
+    }
+}
+
+fn classify_arg_ty<'a, Ty, C>(arg: &mut ArgAbi<'_, Ty>, arg_gprs_left: &mut u64, max_size: u64)
+where
+    Ty: TyAbiInterface<'a, C> + Copy,
+{
+    assert!(*arg_gprs_left <= NUM_ARG_GPRS, "Arg GPR tracking underflow");
+
+    // Ignore empty structs/unions.
+    if arg.layout.is_zst() {
+        return;
+    }
+
+    let size = arg.layout.size.bits();
+    let needed_align = arg.layout.align.abi.bits();
+    let mut must_use_stack = false;
+
+    // Determine the number of GPRs needed to pass the current argument
+    // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
+    // register pairs, so may consume 3 registers.
+    let mut needed_arg_gprs = (size + 32 - 1) / 32;
+    if needed_align == 64 {
+        needed_arg_gprs += *arg_gprs_left % 2;
+    }
+
+    if needed_arg_gprs > *arg_gprs_left
+        || needed_align > 128
+        || (*arg_gprs_left < (max_size / 32) && needed_align == 128)
+    {
+        must_use_stack = true;
+        needed_arg_gprs = *arg_gprs_left;
+    }
+    *arg_gprs_left -= needed_arg_gprs;
+
+    if must_use_stack {
+        arg.make_indirect_byval(None);
+    } else {
+        if is_xtensa_aggregate(arg) {
+            // Aggregates which are <= max_size will be passed in
+            // registers if possible, so coerce to integers.
+
+            // Use a single `xlen` int if possible, 2 * `xlen` if 2 * `xlen` alignment
+            // is required, and a 2-element `xlen` array if only `xlen` alignment is
+            // required.
+            if size <= 32 {
+                arg.cast_to(Reg::i32());
+            } else {
+                let reg = if needed_align == 2 * 32 { Reg::i64() } else { Reg::i32() };
+                let total = Size::from_bits(((size + 32 - 1) / 32) * 32);
+                arg.cast_to(Uniform::new(reg, total));
+            }
+        } else {
+            // All integral types are promoted to `xlen`
+            // width.
+            //
+            // We let the LLVM backend handle integral types >= xlen.
+            if size < 32 {
+                arg.extend_integer_width_to(32);
+            }
+        }
+    }
+}
+
+pub fn compute_abi_info<'a, Ty, C>(_cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+    Ty: TyAbiInterface<'a, C> + Copy,
+    C: HasDataLayout + HasTargetSpec,
+{
+    if !fn_abi.ret.is_ignore() {
+        classify_ret_ty(&mut fn_abi.ret);
+    }
+
+    let mut arg_gprs_left = NUM_ARG_GPRS;
+
+    for arg in fn_abi.args.iter_mut() {
+        if arg.is_ignore() {
+            continue;
+        }
+        classify_arg_ty(arg, &mut arg_gprs_left, MAX_ARG_IN_REGS_SIZE);
+    }
+}
+
+fn is_xtensa_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
+    match arg.layout.abi {
+        Abi::Vector { .. } => true,
+        _ => arg.layout.is_aggregate(),
+    }
+}