about summary refs log tree commit diff
path: root/compiler/rustc_target/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_target/src')
-rw-r--r--compiler/rustc_target/src/callconv/loongarch.rs18
-rw-r--r--compiler/rustc_target/src/callconv/mips64.rs8
-rw-r--r--compiler/rustc_target/src/callconv/mod.rs29
-rw-r--r--compiler/rustc_target/src/callconv/riscv.rs18
-rw-r--r--compiler/rustc_target/src/callconv/sparc64.rs8
-rw-r--r--compiler/rustc_target/src/callconv/x86.rs18
-rw-r--r--compiler/rustc_target/src/callconv/x86_64.rs14
-rw-r--r--compiler/rustc_target/src/callconv/x86_win64.rs27
-rw-r--r--compiler/rustc_target/src/callconv/xtensa.rs6
9 files changed, 81 insertions, 65 deletions
diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs
index ffec76370d0..d1234c3cc91 100644
--- a/compiler/rustc_target/src/callconv/loongarch.rs
+++ b/compiler/rustc_target/src/callconv/loongarch.rs
@@ -1,5 +1,7 @@
 use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
-use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+use crate::abi::{
+    self, BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout,
+};
 use crate::spec::HasTargetSpec;
 use crate::spec::abi::Abi as SpecAbi;
 
@@ -21,8 +23,8 @@ enum FloatConv {
 struct CannotUseFpConv;
 
 fn is_loongarch_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
-    match arg.layout.abi {
-        Abi::Vector { .. } => true,
+    match arg.layout.backend_repr {
+        BackendRepr::Vector { .. } => true,
         _ => arg.layout.is_aggregate(),
     }
 }
@@ -38,8 +40,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
 where
     Ty: TyAbiInterface<'a, C> + Copy,
 {
-    match arg_layout.abi {
-        Abi::Scalar(scalar) => match scalar.primitive() {
+    match arg_layout.backend_repr {
+        BackendRepr::Scalar(scalar) => match scalar.primitive() {
             abi::Int(..) | abi::Pointer(_) => {
                 if arg_layout.size.bits() > xlen {
                     return Err(CannotUseFpConv);
@@ -77,8 +79,8 @@ where
                 }
             }
         },
-        Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
-        Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
+        BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv),
+        BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
             FieldsShape::Primitive => {
                 unreachable!("aggregates can't have `FieldsShape::Primitive`")
             }
@@ -311,7 +313,7 @@ fn classify_arg<'a, Ty, C>(
 }
 
 fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
-    if let Abi::Scalar(scalar) = arg.layout.abi {
+    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
         if let abi::Int(i, _) = scalar.primitive() {
             // 32-bit integers are always sign-extended
             if i.size().bits() == 32 && xlen > 32 {
diff --git a/compiler/rustc_target/src/callconv/mips64.rs b/compiler/rustc_target/src/callconv/mips64.rs
index 2c3258c8d42..5bdf4c2ad77 100644
--- a/compiler/rustc_target/src/callconv/mips64.rs
+++ b/compiler/rustc_target/src/callconv/mips64.rs
@@ -5,7 +5,7 @@ use crate::abi::{self, HasDataLayout, Size, TyAbiInterface};
 
 fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
     // Always sign extend u32 values on 64-bit mips
-    if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+    if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
         if let abi::Int(i, signed) = scalar.primitive() {
             if !signed && i.size().bits() == 32 {
                 if let PassMode::Direct(ref mut attrs) = arg.mode {
@@ -24,8 +24,8 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout,
 {
-    match ret.layout.field(cx, i).abi {
-        abi::Abi::Scalar(scalar) => match scalar.primitive() {
+    match ret.layout.field(cx, i).backend_repr {
+        abi::BackendRepr::Scalar(scalar) => match scalar.primitive() {
             abi::Float(abi::F32) => Some(Reg::f32()),
             abi::Float(abi::F64) => Some(Reg::f64()),
             _ => None,
@@ -109,7 +109,7 @@ where
                 let offset = arg.layout.fields.offset(i);
 
                 // We only care about aligned doubles
-                if let abi::Abi::Scalar(scalar) = field.abi {
+                if let abi::BackendRepr::Scalar(scalar) = field.backend_repr {
                     if scalar.primitive() == abi::Float(abi::F64) {
                         if offset.is_aligned(dl.f64_align.abi) {
                             // Insert enough integers to cover [last_offset, offset)
diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs
index 25b001b57e8..8c3df9c426b 100644
--- a/compiler/rustc_target/src/callconv/mod.rs
+++ b/compiler/rustc_target/src/callconv/mod.rs
@@ -6,7 +6,8 @@ use rustc_macros::HashStable_Generic;
 use rustc_span::Symbol;
 
 use crate::abi::{
-    self, Abi, AddressSpace, Align, HasDataLayout, Pointer, Size, TyAbiInterface, TyAndLayout,
+    self, AddressSpace, Align, BackendRepr, HasDataLayout, Pointer, Size, TyAbiInterface,
+    TyAndLayout,
 };
 use crate::spec::abi::Abi as SpecAbi;
 use crate::spec::{self, HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi};
@@ -350,15 +351,17 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
         layout: TyAndLayout<'a, Ty>,
         scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes,
     ) -> Self {
-        let mode = match layout.abi {
-            Abi::Uninhabited => PassMode::Ignore,
-            Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
-            Abi::ScalarPair(a, b) => PassMode::Pair(
+        let mode = match layout.backend_repr {
+            BackendRepr::Uninhabited => PassMode::Ignore,
+            BackendRepr::Scalar(scalar) => {
+                PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO))
+            }
+            BackendRepr::ScalarPair(a, b) => PassMode::Pair(
                 scalar_attrs(&layout, a, Size::ZERO),
                 scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
             ),
-            Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
-            Abi::Aggregate { .. } => Self::indirect_pass_mode(&layout),
+            BackendRepr::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
+            BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
         };
         ArgAbi { layout, mode }
     }
@@ -460,7 +463,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
 
     pub fn extend_integer_width_to(&mut self, bits: u64) {
         // Only integers have signedness
-        if let Abi::Scalar(scalar) = self.layout.abi {
+        if let BackendRepr::Scalar(scalar) = self.layout.backend_repr {
             if let abi::Int(i, signed) = scalar.primitive() {
                 if i.size().bits() < bits {
                     if let PassMode::Direct(ref mut attrs) = self.mode {
@@ -512,7 +515,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
             // That elevates any type difference to an ABI difference since we just use the
             // full Rust type as the LLVM argument/return type.
             if matches!(self.mode, PassMode::Direct(..))
-                && matches!(self.layout.abi, Abi::Aggregate { .. })
+                && matches!(self.layout.backend_repr, BackendRepr::Memory { .. })
             {
                 // For aggregates in `Direct` mode to be compatible, the types need to be equal.
                 self.layout.ty == other.layout.ty
@@ -791,8 +794,8 @@ impl<'a, Ty> FnAbi<'a, Ty> {
                 continue;
             }
 
-            match arg.layout.abi {
-                Abi::Aggregate { .. } => {}
+            match arg.layout.backend_repr {
+                BackendRepr::Memory { .. } => {}
 
                 // This is a fun case! The gist of what this is doing is
                 // that we want callers and callees to always agree on the
@@ -813,7 +816,9 @@ impl<'a, Ty> FnAbi<'a, Ty> {
                 // Note that the intrinsic ABI is exempt here as
                 // that's how we connect up to LLVM and it's unstable
                 // anyway, we control all calls to it in libstd.
-                Abi::Vector { .. } if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect => {
+                BackendRepr::Vector { .. }
+                    if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect =>
+                {
                     arg.make_indirect();
                     continue;
                 }
diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs
index f96169e6a61..c0298edb5ab 100644
--- a/compiler/rustc_target/src/callconv/riscv.rs
+++ b/compiler/rustc_target/src/callconv/riscv.rs
@@ -4,8 +4,10 @@
 // Reference: Clang RISC-V ELF psABI lowering code
 // https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
 
+use rustc_abi::{BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+
+use crate::abi;
 use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
-use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
 use crate::spec::HasTargetSpec;
 use crate::spec::abi::Abi as SpecAbi;
 
@@ -27,8 +29,8 @@ enum FloatConv {
 struct CannotUseFpConv;
 
 fn is_riscv_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
-    match arg.layout.abi {
-        Abi::Vector { .. } => true,
+    match arg.layout.backend_repr {
+        BackendRepr::Vector { .. } => true,
         _ => arg.layout.is_aggregate(),
     }
 }
@@ -44,8 +46,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
 where
     Ty: TyAbiInterface<'a, C> + Copy,
 {
-    match arg_layout.abi {
-        Abi::Scalar(scalar) => match scalar.primitive() {
+    match arg_layout.backend_repr {
+        BackendRepr::Scalar(scalar) => match scalar.primitive() {
             abi::Int(..) | abi::Pointer(_) => {
                 if arg_layout.size.bits() > xlen {
                     return Err(CannotUseFpConv);
@@ -83,8 +85,8 @@ where
                 }
             }
         },
-        Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
-        Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
+        BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv),
+        BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
             FieldsShape::Primitive => {
                 unreachable!("aggregates can't have `FieldsShape::Primitive`")
             }
@@ -317,7 +319,7 @@ fn classify_arg<'a, Ty, C>(
 }
 
 fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
-    if let Abi::Scalar(scalar) = arg.layout.abi {
+    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
         if let abi::Int(i, _) = scalar.primitive() {
             // 32-bit integers are always sign-extended
             if i.size().bits() == 32 && xlen > 32 {
diff --git a/compiler/rustc_target/src/callconv/sparc64.rs b/compiler/rustc_target/src/callconv/sparc64.rs
index 835353f76fc..313d8730399 100644
--- a/compiler/rustc_target/src/callconv/sparc64.rs
+++ b/compiler/rustc_target/src/callconv/sparc64.rs
@@ -109,11 +109,11 @@ where
         return data;
     }
 
-    match layout.abi {
-        abi::Abi::Scalar(scalar) => {
+    match layout.backend_repr {
+        abi::BackendRepr::Scalar(scalar) => {
             data = arg_scalar(cx, &scalar, offset, data);
         }
-        abi::Abi::Aggregate { .. } => {
+        abi::BackendRepr::Memory { .. } => {
             for i in 0..layout.fields.count() {
                 if offset < layout.fields.offset(i) {
                     offset = layout.fields.offset(i);
@@ -122,7 +122,7 @@ where
             }
         }
         _ => {
-            if let abi::Abi::ScalarPair(scalar1, scalar2) = &layout.abi {
+            if let abi::BackendRepr::ScalarPair(scalar1, scalar2) = &layout.backend_repr {
                 data = arg_scalar_pair(cx, scalar1, scalar2, offset, data);
             }
         }
diff --git a/compiler/rustc_target/src/callconv/x86.rs b/compiler/rustc_target/src/callconv/x86.rs
index e907beecb38..a5af975d4d2 100644
--- a/compiler/rustc_target/src/callconv/x86.rs
+++ b/compiler/rustc_target/src/callconv/x86.rs
@@ -1,6 +1,6 @@
 use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
 use crate::abi::{
-    Abi, AddressSpace, Align, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
+    AddressSpace, Align, BackendRepr, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
 };
 use crate::spec::HasTargetSpec;
 use crate::spec::abi::Abi as SpecAbi;
@@ -105,10 +105,12 @@ where
             where
                 Ty: TyAbiInterface<'a, C> + Copy,
             {
-                match layout.abi {
-                    Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) => false,
-                    Abi::Vector { .. } => true,
-                    Abi::Aggregate { .. } => {
+                match layout.backend_repr {
+                    BackendRepr::Uninhabited
+                    | BackendRepr::Scalar(_)
+                    | BackendRepr::ScalarPair(..) => false,
+                    BackendRepr::Vector { .. } => true,
+                    BackendRepr::Memory { .. } => {
                         for i in 0..layout.fields.count() {
                             if contains_vector(cx, layout.field(cx, i)) {
                                 return true;
@@ -223,9 +225,9 @@ where
         // Intrinsics themselves are not actual "real" functions, so theres no need to change their ABIs.
         && abi != SpecAbi::RustIntrinsic
     {
-        let has_float = match fn_abi.ret.layout.abi {
-            Abi::Scalar(s) => matches!(s.primitive(), Float(_)),
-            Abi::ScalarPair(s1, s2) => {
+        let has_float = match fn_abi.ret.layout.backend_repr {
+            BackendRepr::Scalar(s) => matches!(s.primitive(), Float(_)),
+            BackendRepr::ScalarPair(s1, s2) => {
                 matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_))
             }
             _ => false, // anyway not passed via registers on x86
diff --git a/compiler/rustc_target/src/callconv/x86_64.rs b/compiler/rustc_target/src/callconv/x86_64.rs
index 9910e623ac9..bd101b23ea1 100644
--- a/compiler/rustc_target/src/callconv/x86_64.rs
+++ b/compiler/rustc_target/src/callconv/x86_64.rs
@@ -1,8 +1,10 @@
 // The classification code for the x86_64 ABI is taken from the clay language
 // https://github.com/jckarter/clay/blob/db0bd2702ab0b6e48965cd85f8859bbd5f60e48e/compiler/externals.cpp
 
+use rustc_abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+
+use crate::abi;
 use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind};
-use crate::abi::{self, Abi, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
 
 /// Classification of "eightbyte" components.
 // N.B., the order of the variants is from general to specific,
@@ -46,17 +48,17 @@ where
             return Ok(());
         }
 
-        let mut c = match layout.abi {
-            Abi::Uninhabited => return Ok(()),
+        let mut c = match layout.backend_repr {
+            BackendRepr::Uninhabited => return Ok(()),
 
-            Abi::Scalar(scalar) => match scalar.primitive() {
+            BackendRepr::Scalar(scalar) => match scalar.primitive() {
                 abi::Int(..) | abi::Pointer(_) => Class::Int,
                 abi::Float(_) => Class::Sse,
             },
 
-            Abi::Vector { .. } => Class::Sse,
+            BackendRepr::Vector { .. } => Class::Sse,
 
-            Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
+            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => {
                 for i in 0..layout.fields.count() {
                     let field_off = off + layout.fields.offset(i);
                     classify(cx, layout.field(cx, i), cls, field_off)?;
diff --git a/compiler/rustc_target/src/callconv/x86_win64.rs b/compiler/rustc_target/src/callconv/x86_win64.rs
index e5a20b248e4..83d94cb11ba 100644
--- a/compiler/rustc_target/src/callconv/x86_win64.rs
+++ b/compiler/rustc_target/src/callconv/x86_win64.rs
@@ -1,25 +1,28 @@
+use rustc_abi::{BackendRepr, Float, Primitive};
+
 use crate::abi::call::{ArgAbi, FnAbi, Reg};
-use crate::abi::{Abi, Float, Primitive};
 use crate::spec::HasTargetSpec;
 
 // Win64 ABI: https://docs.microsoft.com/en-us/cpp/build/parameter-passing
 
 pub(crate) fn compute_abi_info<Ty>(cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<'_, Ty>) {
     let fixup = |a: &mut ArgAbi<'_, Ty>| {
-        match a.layout.abi {
-            Abi::Uninhabited | Abi::Aggregate { sized: false } => {}
-            Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => match a.layout.size.bits() {
-                8 => a.cast_to(Reg::i8()),
-                16 => a.cast_to(Reg::i16()),
-                32 => a.cast_to(Reg::i32()),
-                64 => a.cast_to(Reg::i64()),
-                _ => a.make_indirect(),
-            },
-            Abi::Vector { .. } => {
+        match a.layout.backend_repr {
+            BackendRepr::Uninhabited | BackendRepr::Memory { sized: false } => {}
+            BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => {
+                match a.layout.size.bits() {
+                    8 => a.cast_to(Reg::i8()),
+                    16 => a.cast_to(Reg::i16()),
+                    32 => a.cast_to(Reg::i32()),
+                    64 => a.cast_to(Reg::i64()),
+                    _ => a.make_indirect(),
+                }
+            }
+            BackendRepr::Vector { .. } => {
                 // FIXME(eddyb) there should be a size cap here
                 // (probably what clang calls "illegal vectors").
             }
-            Abi::Scalar(scalar) => {
+            BackendRepr::Scalar(scalar) => {
                 // Match what LLVM does for `f128` so that `compiler-builtins` builtins match up
                 // with what LLVM expects.
                 if a.layout.size.bytes() > 8
diff --git a/compiler/rustc_target/src/callconv/xtensa.rs b/compiler/rustc_target/src/callconv/xtensa.rs
index e1728b08a39..9d313d16500 100644
--- a/compiler/rustc_target/src/callconv/xtensa.rs
+++ b/compiler/rustc_target/src/callconv/xtensa.rs
@@ -6,7 +6,7 @@
 //! Section 2.3 from the Xtensa programmers guide.
 
 use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
-use crate::abi::{Abi, HasDataLayout, Size, TyAbiInterface};
+use crate::abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface};
 use crate::spec::HasTargetSpec;
 
 const NUM_ARG_GPRS: u64 = 6;
@@ -114,8 +114,8 @@ where
 }
 
 fn is_xtensa_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
-    match arg.layout.abi {
-        Abi::Vector { .. } => true,
+    match arg.layout.backend_repr {
+        BackendRepr::Vector { .. } => true,
         _ => arg.layout.is_aggregate(),
     }
 }