about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--compiler/rustc_target/src/asm/mod.rs2
-rw-r--r--compiler/rustc_target/src/callconv/aarch64.rs3
-rw-r--r--compiler/rustc_target/src/callconv/amdgpu.rs3
-rw-r--r--compiler/rustc_target/src/callconv/arm.rs3
-rw-r--r--compiler/rustc_target/src/callconv/loongarch.rs23
-rw-r--r--compiler/rustc_target/src/callconv/mips.rs3
-rw-r--r--compiler/rustc_target/src/callconv/mips64.rs33
-rw-r--r--compiler/rustc_target/src/callconv/mod.rs20
-rw-r--r--compiler/rustc_target/src/callconv/nvptx64.rs5
-rw-r--r--compiler/rustc_target/src/callconv/powerpc64.rs3
-rw-r--r--compiler/rustc_target/src/callconv/riscv.rs23
-rw-r--r--compiler/rustc_target/src/callconv/s390x.rs3
-rw-r--r--compiler/rustc_target/src/callconv/sparc.rs3
-rw-r--r--compiler/rustc_target/src/callconv/sparc64.rs40
-rw-r--r--compiler/rustc_target/src/callconv/wasm.rs3
-rw-r--r--compiler/rustc_target/src/callconv/x86.rs22
-rw-r--r--compiler/rustc_target/src/callconv/x86_64.rs16
-rw-r--r--compiler/rustc_target/src/callconv/xtensa.rs3
-rw-r--r--compiler/rustc_target/src/lib.rs5
-rw-r--r--compiler/rustc_target/src/spec/base/aix.rs3
-rw-r--r--compiler/rustc_target/src/spec/base/bpf.rs3
-rw-r--r--compiler/rustc_target/src/spec/base/xtensa.rs3
-rw-r--r--compiler/rustc_target/src/spec/mod.rs43
23 files changed, 143 insertions, 125 deletions
diff --git a/compiler/rustc_target/src/asm/mod.rs b/compiler/rustc_target/src/asm/mod.rs
index f17452b3ba0..0a4ffb15219 100644
--- a/compiler/rustc_target/src/asm/mod.rs
+++ b/compiler/rustc_target/src/asm/mod.rs
@@ -1,11 +1,11 @@
 use std::fmt;
 use std::str::FromStr;
 
+use rustc_abi::Size;
 use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
 use rustc_macros::{Decodable, Encodable, HashStable_Generic};
 use rustc_span::Symbol;
 
-use crate::abi::Size;
 use crate::spec::{RelocModel, Target};
 
 pub struct ModifierInfo {
diff --git a/compiler/rustc_target/src/callconv/aarch64.rs b/compiler/rustc_target/src/callconv/aarch64.rs
index 67345f0d47b..d712bec9b78 100644
--- a/compiler/rustc_target/src/callconv/aarch64.rs
+++ b/compiler/rustc_target/src/callconv/aarch64.rs
@@ -1,9 +1,8 @@
 use std::iter;
 
-use rustc_abi::{BackendRepr, Primitive};
+use rustc_abi::{BackendRepr, HasDataLayout, Primitive, TyAbiInterface};
 
 use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
-use crate::abi::{HasDataLayout, TyAbiInterface};
 use crate::spec::{HasTargetSpec, Target};
 
 /// Indicates the variant of the AArch64 ABI we are compiling for.
diff --git a/compiler/rustc_target/src/callconv/amdgpu.rs b/compiler/rustc_target/src/callconv/amdgpu.rs
index 3007a729a8b..91ac00e0250 100644
--- a/compiler/rustc_target/src/callconv/amdgpu.rs
+++ b/compiler/rustc_target/src/callconv/amdgpu.rs
@@ -1,5 +1,6 @@
+use rustc_abi::{HasDataLayout, TyAbiInterface};
+
 use crate::abi::call::{ArgAbi, FnAbi};
-use crate::abi::{HasDataLayout, TyAbiInterface};
 
 fn classify_ret<'a, Ty, C>(_cx: &C, ret: &mut ArgAbi<'a, Ty>)
 where
diff --git a/compiler/rustc_target/src/callconv/arm.rs b/compiler/rustc_target/src/callconv/arm.rs
index bd6f781fb81..75797daba69 100644
--- a/compiler/rustc_target/src/callconv/arm.rs
+++ b/compiler/rustc_target/src/callconv/arm.rs
@@ -1,5 +1,6 @@
+use rustc_abi::{HasDataLayout, TyAbiInterface};
+
 use crate::abi::call::{ArgAbi, Conv, FnAbi, Reg, RegKind, Uniform};
-use crate::abi::{HasDataLayout, TyAbiInterface};
 use crate::spec::HasTargetSpec;
 
 fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs
index 8bf61cb1337..47566bde6b4 100644
--- a/compiler/rustc_target/src/callconv/loongarch.rs
+++ b/compiler/rustc_target/src/callconv/loongarch.rs
@@ -1,9 +1,10 @@
-use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
-use crate::abi::{
-    self, BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout,
+use rustc_abi::{
+    BackendRepr, ExternAbi, FieldsShape, HasDataLayout, Primitive, Reg, RegKind, Size,
+    TyAbiInterface, TyAndLayout, Variants,
 };
+
+use crate::callconv::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform};
 use crate::spec::HasTargetSpec;
-use crate::spec::abi::Abi as SpecAbi;
 
 #[derive(Copy, Clone)]
 enum RegPassKind {
@@ -42,7 +43,7 @@ where
 {
     match arg_layout.backend_repr {
         BackendRepr::Scalar(scalar) => match scalar.primitive() {
-            abi::Int(..) | abi::Pointer(_) => {
+            Primitive::Int(..) | Primitive::Pointer(_) => {
                 if arg_layout.size.bits() > xlen {
                     return Err(CannotUseFpConv);
                 }
@@ -62,7 +63,7 @@ where
                     _ => return Err(CannotUseFpConv),
                 }
             }
-            abi::Float(_) => {
+            Primitive::Float(_) => {
                 if arg_layout.size.bits() > flen {
                     return Err(CannotUseFpConv);
                 }
@@ -115,8 +116,8 @@ where
             }
             FieldsShape::Arbitrary { .. } => {
                 match arg_layout.variants {
-                    abi::Variants::Multiple { .. } => return Err(CannotUseFpConv),
-                    abi::Variants::Single { .. } | abi::Variants::Empty => (),
+                    Variants::Multiple { .. } => return Err(CannotUseFpConv),
+                    Variants::Single { .. } | Variants::Empty => (),
                 }
                 for i in arg_layout.fields.index_by_increasing_offset() {
                     let field = arg_layout.field(cx, i);
@@ -314,7 +315,7 @@ fn classify_arg<'a, Ty, C>(
 
 fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
     if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
-        if let abi::Int(i, _) = scalar.primitive() {
+        if let Primitive::Int(i, _) = scalar.primitive() {
             // 32-bit integers are always sign-extended
             if i.size().bits() == 32 && xlen > 32 {
                 if let PassMode::Direct(ref mut attrs) = arg.mode {
@@ -363,12 +364,12 @@ where
     }
 }
 
-pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi)
+pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: ExternAbi)
 where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout + HasTargetSpec,
 {
-    if abi == SpecAbi::RustIntrinsic {
+    if abi == ExternAbi::RustIntrinsic {
         return;
     }
 
diff --git a/compiler/rustc_target/src/callconv/mips.rs b/compiler/rustc_target/src/callconv/mips.rs
index 37980a91c76..f7d68822155 100644
--- a/compiler/rustc_target/src/callconv/mips.rs
+++ b/compiler/rustc_target/src/callconv/mips.rs
@@ -1,5 +1,6 @@
+use rustc_abi::{HasDataLayout, Size};
+
 use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
-use crate::abi::{HasDataLayout, Size};
 
 fn classify_ret<Ty, C>(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size)
 where
diff --git a/compiler/rustc_target/src/callconv/mips64.rs b/compiler/rustc_target/src/callconv/mips64.rs
index 5bdf4c2ad77..89f324bc313 100644
--- a/compiler/rustc_target/src/callconv/mips64.rs
+++ b/compiler/rustc_target/src/callconv/mips64.rs
@@ -1,12 +1,15 @@
-use crate::abi::call::{
-    ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, PassMode, Reg, Uniform,
+use rustc_abi::{
+    BackendRepr, FieldsShape, Float, HasDataLayout, Primitive, Reg, Size, TyAbiInterface,
+};
+
+use crate::callconv::{
+    ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, PassMode, Uniform,
 };
-use crate::abi::{self, HasDataLayout, Size, TyAbiInterface};
 
 fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
     // Always sign extend u32 values on 64-bit mips
-    if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
-        if let abi::Int(i, signed) = scalar.primitive() {
+    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
+        if let Primitive::Int(i, signed) = scalar.primitive() {
             if !signed && i.size().bits() == 32 {
                 if let PassMode::Direct(ref mut attrs) = arg.mode {
                     attrs.ext(ArgExtension::Sext);
@@ -25,9 +28,9 @@ where
     C: HasDataLayout,
 {
     match ret.layout.field(cx, i).backend_repr {
-        abi::BackendRepr::Scalar(scalar) => match scalar.primitive() {
-            abi::Float(abi::F32) => Some(Reg::f32()),
-            abi::Float(abi::F64) => Some(Reg::f64()),
+        BackendRepr::Scalar(scalar) => match scalar.primitive() {
+            Primitive::Float(Float::F32) => Some(Reg::f32()),
+            Primitive::Float(Float::F64) => Some(Reg::f64()),
             _ => None,
         },
         _ => None,
@@ -51,7 +54,7 @@ where
         // use of float registers to structures (not unions) containing exactly one or two
         // float fields.
 
-        if let abi::FieldsShape::Arbitrary { .. } = ret.layout.fields {
+        if let FieldsShape::Arbitrary { .. } = ret.layout.fields {
             if ret.layout.fields.count() == 1 {
                 if let Some(reg) = float_reg(cx, ret, 0) {
                     ret.cast_to(reg);
@@ -90,16 +93,16 @@ where
     let mut prefix_index = 0;
 
     match arg.layout.fields {
-        abi::FieldsShape::Primitive => unreachable!(),
-        abi::FieldsShape::Array { .. } => {
+        FieldsShape::Primitive => unreachable!(),
+        FieldsShape::Array { .. } => {
             // Arrays are passed indirectly
             arg.make_indirect();
             return;
         }
-        abi::FieldsShape::Union(_) => {
+        FieldsShape::Union(_) => {
             // Unions and are always treated as a series of 64-bit integer chunks
         }
-        abi::FieldsShape::Arbitrary { .. } => {
+        FieldsShape::Arbitrary { .. } => {
             // Structures are split up into a series of 64-bit integer chunks, but any aligned
             // doubles not part of another aggregate are passed as floats.
             let mut last_offset = Size::ZERO;
@@ -109,8 +112,8 @@ where
                 let offset = arg.layout.fields.offset(i);
 
                 // We only care about aligned doubles
-                if let abi::BackendRepr::Scalar(scalar) = field.backend_repr {
-                    if scalar.primitive() == abi::Float(abi::F64) {
+                if let BackendRepr::Scalar(scalar) = field.backend_repr {
+                    if scalar.primitive() == Primitive::Float(Float::F64) {
                         if offset.is_aligned(dl.f64_align.abi) {
                             // Insert enough integers to cover [last_offset, offset)
                             assert!(last_offset.is_aligned(dl.f64_align.abi));
diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs
index 41b78d9121d..9e651376cd7 100644
--- a/compiler/rustc_target/src/callconv/mod.rs
+++ b/compiler/rustc_target/src/callconv/mod.rs
@@ -1,14 +1,14 @@
 use std::str::FromStr;
 use std::{fmt, iter};
 
-pub use rustc_abi::{ExternAbi, Reg, RegKind};
+use rustc_abi::{
+    AddressSpace, Align, BackendRepr, ExternAbi, HasDataLayout, Scalar, Size, TyAbiInterface,
+    TyAndLayout,
+};
+pub use rustc_abi::{Primitive, Reg, RegKind};
 use rustc_macros::HashStable_Generic;
 use rustc_span::Symbol;
 
-use crate::abi::{
-    self, AddressSpace, Align, BackendRepr, HasDataLayout, Pointer, Size, TyAbiInterface,
-    TyAndLayout,
-};
 use crate::spec::{HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi};
 
 mod aarch64;
@@ -349,7 +349,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
     pub fn new(
         cx: &impl HasDataLayout,
         layout: TyAndLayout<'a, Ty>,
-        scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes,
+        scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, Scalar, Size) -> ArgAttributes,
     ) -> Self {
         let mode = match layout.backend_repr {
             BackendRepr::Uninhabited => PassMode::Ignore,
@@ -464,7 +464,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
     pub fn extend_integer_width_to(&mut self, bits: u64) {
         // Only integers have signedness
         if let BackendRepr::Scalar(scalar) = self.layout.backend_repr {
-            if let abi::Int(i, signed) = scalar.primitive() {
+            if let Primitive::Int(i, signed) = scalar.primitive() {
                 if i.size().bits() < bits {
                     if let PassMode::Direct(ref mut attrs) = self.mode {
                         if signed {
@@ -756,7 +756,9 @@ impl<'a, Ty> FnAbi<'a, Ty> {
                 continue;
             }
 
-            if arg_idx.is_none() && arg.layout.size > Pointer(AddressSpace::DATA).size(cx) * 2 {
+            if arg_idx.is_none()
+                && arg.layout.size > Primitive::Pointer(AddressSpace::DATA).size(cx) * 2
+            {
                 // Return values larger than 2 registers using a return area
                 // pointer. LLVM and Cranelift disagree about how to return
                 // values that don't fit in the registers designated for return
@@ -837,7 +839,7 @@ impl<'a, Ty> FnAbi<'a, Ty> {
             assert!(is_indirect_not_on_stack);
 
             let size = arg.layout.size;
-            if !arg.layout.is_unsized() && size <= Pointer(AddressSpace::DATA).size(cx) {
+            if !arg.layout.is_unsized() && size <= Primitive::Pointer(AddressSpace::DATA).size(cx) {
                 // We want to pass small aggregates as immediates, but using
                 // an LLVM aggregate type for this leads to bad optimizations,
                 // so we pick an appropriately sized integer type instead.
diff --git a/compiler/rustc_target/src/callconv/nvptx64.rs b/compiler/rustc_target/src/callconv/nvptx64.rs
index c64164372a1..c5da1855658 100644
--- a/compiler/rustc_target/src/callconv/nvptx64.rs
+++ b/compiler/rustc_target/src/callconv/nvptx64.rs
@@ -1,6 +1,7 @@
+use rustc_abi::{HasDataLayout, Reg, Size, TyAbiInterface};
+
 use super::{ArgAttribute, ArgAttributes, ArgExtension, CastTarget};
-use crate::abi::call::{ArgAbi, FnAbi, Reg, Size, Uniform};
-use crate::abi::{HasDataLayout, TyAbiInterface};
+use crate::abi::call::{ArgAbi, FnAbi, Uniform};
 
 fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
     if ret.layout.is_aggregate() && ret.layout.is_sized() {
diff --git a/compiler/rustc_target/src/callconv/powerpc64.rs b/compiler/rustc_target/src/callconv/powerpc64.rs
index 92c1f6e7148..7a66ce8529a 100644
--- a/compiler/rustc_target/src/callconv/powerpc64.rs
+++ b/compiler/rustc_target/src/callconv/powerpc64.rs
@@ -2,8 +2,9 @@
 // Alignment of 128 bit types is not currently handled, this will
 // need to be fixed when PowerPC vector support is added.
 
+use rustc_abi::{Endian, HasDataLayout, TyAbiInterface};
+
 use crate::abi::call::{Align, ArgAbi, FnAbi, Reg, RegKind, Uniform};
-use crate::abi::{Endian, HasDataLayout, TyAbiInterface};
 use crate::spec::HasTargetSpec;
 
 #[derive(Debug, Clone, Copy, PartialEq)]
diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs
index 4d858392c97..24531b0ef63 100644
--- a/compiler/rustc_target/src/callconv/riscv.rs
+++ b/compiler/rustc_target/src/callconv/riscv.rs
@@ -4,12 +4,13 @@
 // Reference: Clang RISC-V ELF psABI lowering code
 // https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
 
-use rustc_abi::{BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+use rustc_abi::{
+    BackendRepr, ExternAbi, FieldsShape, HasDataLayout, Primitive, Reg, RegKind, Size,
+    TyAbiInterface, TyAndLayout, Variants,
+};
 
-use crate::abi;
-use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
+use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform};
 use crate::spec::HasTargetSpec;
-use crate::spec::abi::Abi as SpecAbi;
 
 #[derive(Copy, Clone)]
 enum RegPassKind {
@@ -48,7 +49,7 @@ where
 {
     match arg_layout.backend_repr {
         BackendRepr::Scalar(scalar) => match scalar.primitive() {
-            abi::Int(..) | abi::Pointer(_) => {
+            Primitive::Int(..) | Primitive::Pointer(_) => {
                 if arg_layout.size.bits() > xlen {
                     return Err(CannotUseFpConv);
                 }
@@ -68,7 +69,7 @@ where
                     _ => return Err(CannotUseFpConv),
                 }
             }
-            abi::Float(_) => {
+            Primitive::Float(_) => {
                 if arg_layout.size.bits() > flen {
                     return Err(CannotUseFpConv);
                 }
@@ -121,8 +122,8 @@ where
             }
             FieldsShape::Arbitrary { .. } => {
                 match arg_layout.variants {
-                    abi::Variants::Multiple { .. } => return Err(CannotUseFpConv),
-                    abi::Variants::Single { .. } | abi::Variants::Empty => (),
+                    Variants::Multiple { .. } => return Err(CannotUseFpConv),
+                    Variants::Single { .. } | Variants::Empty => (),
                 }
                 for i in arg_layout.fields.index_by_increasing_offset() {
                     let field = arg_layout.field(cx, i);
@@ -320,7 +321,7 @@ fn classify_arg<'a, Ty, C>(
 
 fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
     if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
-        if let abi::Int(i, _) = scalar.primitive() {
+        if let Primitive::Int(i, _) = scalar.primitive() {
             // 32-bit integers are always sign-extended
             if i.size().bits() == 32 && xlen > 32 {
                 if let PassMode::Direct(ref mut attrs) = arg.mode {
@@ -369,12 +370,12 @@ where
     }
 }
 
-pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi)
+pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: ExternAbi)
 where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout + HasTargetSpec,
 {
-    if abi == SpecAbi::RustIntrinsic {
+    if abi == ExternAbi::RustIntrinsic {
         return;
     }
 
diff --git a/compiler/rustc_target/src/callconv/s390x.rs b/compiler/rustc_target/src/callconv/s390x.rs
index a73c1a0f46c..edf57098d6d 100644
--- a/compiler/rustc_target/src/callconv/s390x.rs
+++ b/compiler/rustc_target/src/callconv/s390x.rs
@@ -1,8 +1,9 @@
 // Reference: ELF Application Binary Interface s390x Supplement
 // https://github.com/IBM/s390x-abi
 
+use rustc_abi::{BackendRepr, HasDataLayout, TyAbiInterface};
+
 use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind};
-use crate::abi::{BackendRepr, HasDataLayout, TyAbiInterface};
 use crate::spec::HasTargetSpec;
 
 fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
diff --git a/compiler/rustc_target/src/callconv/sparc.rs b/compiler/rustc_target/src/callconv/sparc.rs
index 37980a91c76..f7d68822155 100644
--- a/compiler/rustc_target/src/callconv/sparc.rs
+++ b/compiler/rustc_target/src/callconv/sparc.rs
@@ -1,5 +1,6 @@
+use rustc_abi::{HasDataLayout, Size};
+
 use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
-use crate::abi::{HasDataLayout, Size};
 
 fn classify_ret<Ty, C>(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size)
 where
diff --git a/compiler/rustc_target/src/callconv/sparc64.rs b/compiler/rustc_target/src/callconv/sparc64.rs
index 313d8730399..392fb5156fc 100644
--- a/compiler/rustc_target/src/callconv/sparc64.rs
+++ b/compiler/rustc_target/src/callconv/sparc64.rs
@@ -1,9 +1,13 @@
 // FIXME: This needs an audit for correctness and completeness.
 
+use rustc_abi::{
+    BackendRepr, FieldsShape, Float, HasDataLayout, Primitive, Reg, Scalar, Size, TyAbiInterface,
+    TyAndLayout,
+};
+
 use crate::abi::call::{
-    ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, Reg, Uniform,
+    ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, Uniform,
 };
-use crate::abi::{self, HasDataLayout, Scalar, Size, TyAbiInterface, TyAndLayout};
 use crate::spec::HasTargetSpec;
 
 #[derive(Clone, Debug)]
@@ -21,7 +25,7 @@ where
 {
     let dl = cx.data_layout();
 
-    if !matches!(scalar.primitive(), abi::Float(abi::F32 | abi::F64)) {
+    if !matches!(scalar.primitive(), Primitive::Float(Float::F32 | Float::F64)) {
         return data;
     }
 
@@ -57,7 +61,7 @@ where
         return data;
     }
 
-    if scalar.primitive() == abi::Float(abi::F32) {
+    if scalar.primitive() == Primitive::Float(Float::F32) {
         data.arg_attribute = ArgAttribute::InReg;
         data.prefix[data.prefix_index] = Some(Reg::f32());
         data.last_offset = offset + Reg::f32().size;
@@ -81,14 +85,16 @@ where
 {
     data = arg_scalar(cx, scalar1, offset, data);
     match (scalar1.primitive(), scalar2.primitive()) {
-        (abi::Float(abi::F32), _) => offset += Reg::f32().size,
-        (_, abi::Float(abi::F64)) => offset += Reg::f64().size,
-        (abi::Int(i, _signed), _) => offset += i.size(),
-        (abi::Pointer(_), _) => offset += Reg::i64().size,
+        (Primitive::Float(Float::F32), _) => offset += Reg::f32().size,
+        (_, Primitive::Float(Float::F64)) => offset += Reg::f64().size,
+        (Primitive::Int(i, _signed), _) => offset += i.size(),
+        (Primitive::Pointer(_), _) => offset += Reg::i64().size,
         _ => {}
     }
 
-    if (offset.bytes() % 4) != 0 && matches!(scalar2.primitive(), abi::Float(abi::F32 | abi::F64)) {
+    if (offset.bytes() % 4) != 0
+        && matches!(scalar2.primitive(), Primitive::Float(Float::F32 | Float::F64))
+    {
         offset += Size::from_bytes(4 - (offset.bytes() % 4));
     }
     data = arg_scalar(cx, scalar2, offset, data);
@@ -105,15 +111,15 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout,
 {
-    if let abi::FieldsShape::Union(_) = layout.fields {
+    if let FieldsShape::Union(_) = layout.fields {
         return data;
     }
 
     match layout.backend_repr {
-        abi::BackendRepr::Scalar(scalar) => {
+        BackendRepr::Scalar(scalar) => {
             data = arg_scalar(cx, &scalar, offset, data);
         }
-        abi::BackendRepr::Memory { .. } => {
+        BackendRepr::Memory { .. } => {
             for i in 0..layout.fields.count() {
                 if offset < layout.fields.offset(i) {
                     offset = layout.fields.offset(i);
@@ -122,7 +128,7 @@ where
             }
         }
         _ => {
-            if let abi::BackendRepr::ScalarPair(scalar1, scalar2) = &layout.backend_repr {
+            if let BackendRepr::ScalarPair(scalar1, scalar2) = &layout.backend_repr {
                 data = arg_scalar_pair(cx, scalar1, scalar2, offset, data);
             }
         }
@@ -148,16 +154,16 @@ where
     }
 
     match arg.layout.fields {
-        abi::FieldsShape::Primitive => unreachable!(),
-        abi::FieldsShape::Array { .. } => {
+        FieldsShape::Primitive => unreachable!(),
+        FieldsShape::Array { .. } => {
             // Arrays are passed indirectly
             arg.make_indirect();
             return;
         }
-        abi::FieldsShape::Union(_) => {
+        FieldsShape::Union(_) => {
             // Unions and are always treated as a series of 64-bit integer chunks
         }
-        abi::FieldsShape::Arbitrary { .. } => {
+        FieldsShape::Arbitrary { .. } => {
             // Structures with floating point numbers need special care.
 
             let mut data = parse_structure(
diff --git a/compiler/rustc_target/src/callconv/wasm.rs b/compiler/rustc_target/src/callconv/wasm.rs
index d01b59cbb03..56cd7a3f93d 100644
--- a/compiler/rustc_target/src/callconv/wasm.rs
+++ b/compiler/rustc_target/src/callconv/wasm.rs
@@ -1,7 +1,6 @@
-use rustc_abi::{BackendRepr, Float, Integer, Primitive};
+use rustc_abi::{BackendRepr, Float, HasDataLayout, Integer, Primitive, TyAbiInterface};
 
 use crate::abi::call::{ArgAbi, FnAbi};
-use crate::abi::{HasDataLayout, TyAbiInterface};
 
 fn unwrap_trivial_aggregate<'a, Ty, C>(cx: &C, val: &mut ArgAbi<'a, Ty>) -> bool
 where
diff --git a/compiler/rustc_target/src/callconv/x86.rs b/compiler/rustc_target/src/callconv/x86.rs
index cd8465c09ca..7c88d9b55cf 100644
--- a/compiler/rustc_target/src/callconv/x86.rs
+++ b/compiler/rustc_target/src/callconv/x86.rs
@@ -1,9 +1,10 @@
-use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
-use crate::abi::{
-    AddressSpace, Align, BackendRepr, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
+use rustc_abi::{
+    AddressSpace, Align, BackendRepr, ExternAbi, HasDataLayout, Primitive, Reg, RegKind,
+    TyAbiInterface, TyAndLayout,
 };
+
+use crate::abi::call::{ArgAttribute, FnAbi, PassMode};
 use crate::spec::HasTargetSpec;
-use crate::spec::abi::Abi as SpecAbi;
 
 #[derive(PartialEq)]
 pub(crate) enum Flavor {
@@ -214,7 +215,7 @@ pub(crate) fn fill_inregs<'a, Ty, C>(
     }
 }
 
-pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi)
+pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: ExternAbi)
 where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout + HasTargetSpec,
@@ -223,18 +224,19 @@ where
     // registers will quiet signalling NaNs. Also avoid using SSE registers since they
     // are not always available (depending on target features).
     if !fn_abi.ret.is_ignore()
-        // Intrinsics themselves are not actual "real" functions, so theres no need to change their ABIs.
-        && abi != SpecAbi::RustIntrinsic
+        // Intrinsics themselves are not "real" functions, so theres no need to change their ABIs.
+        && abi != ExternAbi::RustIntrinsic
     {
         let has_float = match fn_abi.ret.layout.backend_repr {
-            BackendRepr::Scalar(s) => matches!(s.primitive(), Float(_)),
+            BackendRepr::Scalar(s) => matches!(s.primitive(), Primitive::Float(_)),
             BackendRepr::ScalarPair(s1, s2) => {
-                matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_))
+                matches!(s1.primitive(), Primitive::Float(_))
+                    || matches!(s2.primitive(), Primitive::Float(_))
             }
             _ => false, // anyway not passed via registers on x86
         };
         if has_float {
-            if fn_abi.ret.layout.size <= Pointer(AddressSpace::DATA).size(cx) {
+            if fn_abi.ret.layout.size <= Primitive::Pointer(AddressSpace::DATA).size(cx) {
                 // Same size or smaller than pointer, return in a register.
                 fn_abi.ret.cast_to(Reg { kind: RegKind::Integer, size: fn_abi.ret.layout.size });
             } else {
diff --git a/compiler/rustc_target/src/callconv/x86_64.rs b/compiler/rustc_target/src/callconv/x86_64.rs
index 37aecf323a1..6e9b4690a2c 100644
--- a/compiler/rustc_target/src/callconv/x86_64.rs
+++ b/compiler/rustc_target/src/callconv/x86_64.rs
@@ -1,10 +1,12 @@
 // The classification code for the x86_64 ABI is taken from the clay language
 // https://github.com/jckarter/clay/blob/db0bd2702ab0b6e48965cd85f8859bbd5f60e48e/compiler/externals.cpp
 
-use rustc_abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+use rustc_abi::{
+    BackendRepr, HasDataLayout, Primitive, Reg, RegKind, Size, TyAbiInterface, TyAndLayout,
+    Variants,
+};
 
-use crate::abi;
-use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind};
+use crate::abi::call::{ArgAbi, CastTarget, FnAbi};
 
 /// Classification of "eightbyte" components.
 // N.B., the order of the variants is from general to specific,
@@ -52,8 +54,8 @@ where
             BackendRepr::Uninhabited => return Ok(()),
 
             BackendRepr::Scalar(scalar) => match scalar.primitive() {
-                abi::Int(..) | abi::Pointer(_) => Class::Int,
-                abi::Float(_) => Class::Sse,
+                Primitive::Int(..) | Primitive::Pointer(_) => Class::Int,
+                Primitive::Float(_) => Class::Sse,
             },
 
             BackendRepr::Vector { .. } => Class::Sse,
@@ -65,8 +67,8 @@ where
                 }
 
                 match &layout.variants {
-                    abi::Variants::Single { .. } | abi::Variants::Empty => {}
-                    abi::Variants::Multiple { variants, .. } => {
+                    Variants::Single { .. } | Variants::Empty => {}
+                    Variants::Multiple { variants, .. } => {
                         // Treat enum variants like union members.
                         for variant_idx in variants.indices() {
                             classify(cx, layout.for_variant(cx, variant_idx), cls, off)?;
diff --git a/compiler/rustc_target/src/callconv/xtensa.rs b/compiler/rustc_target/src/callconv/xtensa.rs
index 9d313d16500..2542713bc11 100644
--- a/compiler/rustc_target/src/callconv/xtensa.rs
+++ b/compiler/rustc_target/src/callconv/xtensa.rs
@@ -5,8 +5,9 @@
 //! Section 8.1.4 & 8.1.5 of the Xtensa ISA reference manual, as well as snippets from
 //! Section 2.3 from the Xtensa programmers guide.
 
+use rustc_abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface};
+
 use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
-use crate::abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface};
 use crate::spec::HasTargetSpec;
 
 const NUM_ARG_GPRS: u64 = 6;
diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs
index 50679ab8cc8..11fc09d26e4 100644
--- a/compiler/rustc_target/src/lib.rs
+++ b/compiler/rustc_target/src/lib.rs
@@ -31,10 +31,7 @@ pub mod target_features;
 mod tests;
 
 pub mod abi {
-    pub(crate) use Float::*;
-    pub(crate) use Primitive::*;
-    // Explicitly import `Float` to avoid ambiguity with `Primitive::Float`.
-    pub use rustc_abi::{Float, *};
+    pub use rustc_abi::*;
 
     pub use crate::callconv as call;
 }
diff --git a/compiler/rustc_target/src/spec/base/aix.rs b/compiler/rustc_target/src/spec/base/aix.rs
index fe37d313294..a92d104f910 100644
--- a/compiler/rustc_target/src/spec/base/aix.rs
+++ b/compiler/rustc_target/src/spec/base/aix.rs
@@ -1,4 +1,5 @@
-use crate::abi::Endian;
+use rustc_abi::Endian;
+
 use crate::spec::{Cc, CodeModel, LinkOutputKind, LinkerFlavor, TargetOptions, crt_objects, cvs};
 
 pub(crate) fn opts() -> TargetOptions {
diff --git a/compiler/rustc_target/src/spec/base/bpf.rs b/compiler/rustc_target/src/spec/base/bpf.rs
index 17d5e75ef6d..7c0e2e165b6 100644
--- a/compiler/rustc_target/src/spec/base/bpf.rs
+++ b/compiler/rustc_target/src/spec/base/bpf.rs
@@ -1,4 +1,5 @@
-use crate::abi::Endian;
+use rustc_abi::Endian;
+
 use crate::spec::{LinkerFlavor, MergeFunctions, PanicStrategy, TargetOptions};
 
 pub(crate) fn opts(endian: Endian) -> TargetOptions {
diff --git a/compiler/rustc_target/src/spec/base/xtensa.rs b/compiler/rustc_target/src/spec/base/xtensa.rs
index 280dd16e264..47a532dfdd4 100644
--- a/compiler/rustc_target/src/spec/base/xtensa.rs
+++ b/compiler/rustc_target/src/spec/base/xtensa.rs
@@ -1,4 +1,5 @@
-use crate::abi::Endian;
+use rustc_abi::Endian;
+
 use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, TargetOptions};
 
 pub(crate) fn opts() -> TargetOptions {
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
index 72600225e7a..3affa1cf90c 100644
--- a/compiler/rustc_target/src/spec/mod.rs
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -42,6 +42,7 @@ use std::path::{Path, PathBuf};
 use std::str::FromStr;
 use std::{fmt, io};
 
+use rustc_abi::{Endian, ExternAbi, Integer, Size, TargetDataLayout, TargetDataLayoutErrors};
 use rustc_data_structures::fx::FxHashSet;
 use rustc_fs_util::try_canonicalize;
 use rustc_macros::{Decodable, Encodable, HashStable_Generic};
@@ -51,9 +52,7 @@ use serde_json::Value;
 use tracing::debug;
 
 use crate::abi::call::Conv;
-use crate::abi::{Endian, Integer, Size, TargetDataLayout, TargetDataLayoutErrors};
 use crate::json::{Json, ToJson};
-use crate::spec::abi::Abi;
 use crate::spec::crt_objects::CrtObjects;
 
 pub mod crt_objects;
@@ -2845,44 +2844,40 @@ impl DerefMut for Target {
 
 impl Target {
     /// Given a function ABI, turn it into the correct ABI for this target.
-    pub fn adjust_abi(&self, abi: Abi, c_variadic: bool) -> Abi {
+    pub fn adjust_abi(&self, abi: ExternAbi, c_variadic: bool) -> ExternAbi {
+        use ExternAbi::*;
         match abi {
             // On Windows, `extern "system"` behaves like msvc's `__stdcall`.
             // `__stdcall` only applies on x86 and on non-variadic functions:
             // https://learn.microsoft.com/en-us/cpp/cpp/stdcall?view=msvc-170
-            Abi::System { unwind } if self.is_like_windows && self.arch == "x86" && !c_variadic => {
-                Abi::Stdcall { unwind }
+            System { unwind } if self.is_like_windows && self.arch == "x86" && !c_variadic => {
+                Stdcall { unwind }
             }
-            Abi::System { unwind } => Abi::C { unwind },
-            Abi::EfiApi if self.arch == "arm" => Abi::Aapcs { unwind: false },
-            Abi::EfiApi if self.arch == "x86_64" => Abi::Win64 { unwind: false },
-            Abi::EfiApi => Abi::C { unwind: false },
-
-            // See commentary in `is_abi_supported`: we map these ABIs to "C" when they do not make sense.
-            Abi::Stdcall { .. } | Abi::Thiscall { .. } | Abi::Fastcall { .. }
-                if self.arch == "x86" =>
-            {
-                abi
-            }
-            Abi::Vectorcall { .. } if ["x86", "x86_64"].contains(&&self.arch[..]) => abi,
-            Abi::Stdcall { unwind }
-            | Abi::Thiscall { unwind }
-            | Abi::Fastcall { unwind }
-            | Abi::Vectorcall { unwind } => Abi::C { unwind },
+            System { unwind } => C { unwind },
+            EfiApi if self.arch == "arm" => Aapcs { unwind: false },
+            EfiApi if self.arch == "x86_64" => Win64 { unwind: false },
+            EfiApi => C { unwind: false },
+
+            // See commentary in `is_abi_supported`.
+            Stdcall { .. } | Thiscall { .. } if self.arch == "x86" => abi,
+            Stdcall { unwind } | Thiscall { unwind } => C { unwind },
+            Fastcall { .. } if self.arch == "x86" => abi,
+            Vectorcall { .. } if ["x86", "x86_64"].contains(&&self.arch[..]) => abi,
+            Fastcall { unwind } | Vectorcall { unwind } => C { unwind },
 
             // The Windows x64 calling convention we use for `extern "Rust"`
             // <https://learn.microsoft.com/en-us/cpp/build/x64-software-conventions#register-volatility-and-preservation>
             // expects the callee to save `xmm6` through `xmm15`, but `PreserveMost`
             // (that we use by default for `extern "rust-cold"`) doesn't save any of those.
             // So to avoid bloating callers, just use the Rust convention here.
-            Abi::RustCold if self.is_like_windows && self.arch == "x86_64" => Abi::Rust,
+            RustCold if self.is_like_windows && self.arch == "x86_64" => Rust,
 
             abi => abi,
         }
     }
 
-    pub fn is_abi_supported(&self, abi: Abi) -> bool {
-        use Abi::*;
+    pub fn is_abi_supported(&self, abi: ExternAbi) -> bool {
+        use ExternAbi::*;
         match abi {
             Rust
             | C { .. }