about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_abi/src/callconv.rs16
-rw-r--r--compiler/rustc_abi/src/layout.rs104
-rw-r--r--compiler/rustc_abi/src/layout/ty.rs12
-rw-r--r--compiler/rustc_abi/src/lib.rs112
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs93
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs13
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs37
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/constant.rs4
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs4
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs39
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs10
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/builder.rs4
-rw-r--r--compiler/rustc_const_eval/src/const_eval/dummy_machine.rs2
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs6
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/call.rs4
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs53
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs12
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs12
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs23
-rw-r--r--compiler/rustc_const_eval/src/util/check_validity_requirement.rs14
-rw-r--r--compiler/rustc_lint/src/builtin.rs6
-rw-r--r--compiler/rustc_lint/src/foreign_modules.rs2
-rw-r--r--compiler/rustc_lint/src/types.rs6
-rw-r--r--compiler/rustc_middle/src/ty/layout.rs7
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_rvalue.rs4
-rw-r--r--compiler/rustc_mir_dataflow/src/value_analysis.rs2
-rw-r--r--compiler/rustc_mir_transform/src/dataflow_const_prop.rs12
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs19
-rw-r--r--compiler/rustc_mir_transform/src/known_panics_lint.rs10
-rw-r--r--compiler/rustc_passes/src/layout_test.rs6
-rw-r--r--compiler/rustc_smir/src/rustc_smir/convert/abi.rs14
-rw-r--r--compiler/rustc_target/src/callconv/loongarch.rs18
-rw-r--r--compiler/rustc_target/src/callconv/mips64.rs8
-rw-r--r--compiler/rustc_target/src/callconv/mod.rs29
-rw-r--r--compiler/rustc_target/src/callconv/riscv.rs18
-rw-r--r--compiler/rustc_target/src/callconv/sparc64.rs8
-rw-r--r--compiler/rustc_target/src/callconv/x86.rs18
-rw-r--r--compiler/rustc_target/src/callconv/x86_64.rs14
-rw-r--r--compiler/rustc_target/src/callconv/x86_win64.rs27
-rw-r--r--compiler/rustc_target/src/callconv/xtensa.rs6
-rw-r--r--compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs10
-rw-r--r--compiler/rustc_ty_utils/src/abi.rs14
-rw-r--r--compiler/rustc_ty_utils/src/layout.rs43
-rw-r--r--compiler/rustc_ty_utils/src/layout/invariant.rs42
51 files changed, 517 insertions, 428 deletions
diff --git a/compiler/rustc_abi/src/callconv.rs b/compiler/rustc_abi/src/callconv.rs
index 872cae59a4e..ee63e46e88c 100644
--- a/compiler/rustc_abi/src/callconv.rs
+++ b/compiler/rustc_abi/src/callconv.rs
@@ -6,9 +6,9 @@ mod abi {
 #[cfg(feature = "nightly")]
 use rustc_macros::HashStable_Generic;
 
-#[cfg(feature = "nightly")]
-use crate::{Abi, FieldsShape, TyAbiInterface, TyAndLayout};
 use crate::{Align, HasDataLayout, Size};
+#[cfg(feature = "nightly")]
+use crate::{BackendRepr, FieldsShape, TyAbiInterface, TyAndLayout};
 
 #[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
@@ -128,11 +128,11 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
     where
         Ty: TyAbiInterface<'a, C> + Copy,
     {
-        match self.abi {
-            Abi::Uninhabited => Err(Heterogeneous),
+        match self.backend_repr {
+            BackendRepr::Uninhabited => Err(Heterogeneous),
 
             // The primitive for this algorithm.
-            Abi::Scalar(scalar) => {
+            BackendRepr::Scalar(scalar) => {
                 let kind = match scalar.primitive() {
                     abi::Int(..) | abi::Pointer(_) => RegKind::Integer,
                     abi::Float(_) => RegKind::Float,
@@ -140,7 +140,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
                 Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }))
             }
 
-            Abi::Vector { .. } => {
+            BackendRepr::Vector { .. } => {
                 assert!(!self.is_zst());
                 Ok(HomogeneousAggregate::Homogeneous(Reg {
                     kind: RegKind::Vector,
@@ -148,7 +148,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
                 }))
             }
 
-            Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => {
+            BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => {
                 // Helper for computing `homogeneous_aggregate`, allowing a custom
                 // starting offset (used below for handling variants).
                 let from_fields_at =
@@ -246,7 +246,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
                     Ok(result)
                 }
             }
-            Abi::Aggregate { sized: false } => Err(Heterogeneous),
+            BackendRepr::Memory { sized: false } => Err(Heterogeneous),
         }
     }
 }
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index 86de39b8f97..e6d66f608da 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -6,7 +6,7 @@ use rustc_index::Idx;
 use tracing::debug;
 
 use crate::{
-    Abi, AbiAndPrefAlign, Align, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
+    AbiAndPrefAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
     LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding,
     Variants, WrappingRange,
 };
@@ -125,7 +125,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 offsets: [Size::ZERO, b_offset].into(),
                 memory_index: [0, 1].into(),
             },
-            abi: Abi::ScalarPair(a, b),
+            backend_repr: BackendRepr::ScalarPair(a, b),
             largest_niche,
             align,
             size,
@@ -216,7 +216,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
         LayoutData {
             variants: Variants::Single { index: VariantIdx::new(0) },
             fields: FieldsShape::Primitive,
-            abi: Abi::Uninhabited,
+            backend_repr: BackendRepr::Uninhabited,
             largest_niche: None,
             align: dl.i8_align,
             size: Size::ZERO,
@@ -331,7 +331,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
 
             if let Ok(common) = common_non_zst_abi_and_align {
                 // Discard valid range information and allow undef
-                let field_abi = field.abi.to_union();
+                let field_abi = field.backend_repr.to_union();
 
                 if let Some((common_abi, common_align)) = common {
                     if common_abi != field_abi {
@@ -340,7 +340,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                     } else {
                         // Fields with the same non-Aggregate ABI should also
                         // have the same alignment
-                        if !matches!(common_abi, Abi::Aggregate { .. }) {
+                        if !matches!(common_abi, BackendRepr::Memory { .. }) {
                             assert_eq!(
                                 common_align, field.align.abi,
                                 "non-Aggregate field with matching ABI but differing alignment"
@@ -369,11 +369,11 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
         // If all non-ZST fields have the same ABI, we may forward that ABI
         // for the union as a whole, unless otherwise inhibited.
         let abi = match common_non_zst_abi_and_align {
-            Err(AbiMismatch) | Ok(None) => Abi::Aggregate { sized: true },
+            Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true },
             Ok(Some((abi, _))) => {
                 if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) {
                     // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
-                    Abi::Aggregate { sized: true }
+                    BackendRepr::Memory { sized: true }
                 } else {
                     abi
                 }
@@ -387,7 +387,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
         Ok(LayoutData {
             variants: Variants::Single { index: only_variant_idx },
             fields: FieldsShape::Union(union_field_count),
-            abi,
+            backend_repr: abi,
             largest_niche: None,
             align,
             size: size.align_to(align.abi),
@@ -434,23 +434,23 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 // Already doesn't have any niches
                 Scalar::Union { .. } => {}
             };
-            match &mut st.abi {
-                Abi::Uninhabited => {}
-                Abi::Scalar(scalar) => hide_niches(scalar),
-                Abi::ScalarPair(a, b) => {
+            match &mut st.backend_repr {
+                BackendRepr::Uninhabited => {}
+                BackendRepr::Scalar(scalar) => hide_niches(scalar),
+                BackendRepr::ScalarPair(a, b) => {
                     hide_niches(a);
                     hide_niches(b);
                 }
-                Abi::Vector { element, count: _ } => hide_niches(element),
-                Abi::Aggregate { sized: _ } => {}
+                BackendRepr::Vector { element, count: _ } => hide_niches(element),
+                BackendRepr::Memory { sized: _ } => {}
             }
             st.largest_niche = None;
             return Ok(st);
         }
 
         let (start, end) = scalar_valid_range;
-        match st.abi {
-            Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
+        match st.backend_repr {
+            BackendRepr::Scalar(ref mut scalar) | BackendRepr::ScalarPair(ref mut scalar, _) => {
                 // Enlarging validity ranges would result in missed
                 // optimizations, *not* wrongly assuming the inner
                 // value is valid. e.g. unions already enlarge validity ranges,
@@ -607,8 +607,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 }
 
                 // It can't be a Scalar or ScalarPair because the offset isn't 0.
-                if !layout.abi.is_uninhabited() {
-                    layout.abi = Abi::Aggregate { sized: true };
+                if !layout.is_uninhabited() {
+                    layout.backend_repr = BackendRepr::Memory { sized: true };
                 }
                 layout.size += this_offset;
 
@@ -627,26 +627,26 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
             let same_size = size == variant_layouts[largest_variant_index].size;
             let same_align = align == variant_layouts[largest_variant_index].align;
 
-            let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
-                Abi::Uninhabited
+            let abi = if variant_layouts.iter().all(|v| v.is_uninhabited()) {
+                BackendRepr::Uninhabited
             } else if same_size && same_align && others_zst {
-                match variant_layouts[largest_variant_index].abi {
+                match variant_layouts[largest_variant_index].backend_repr {
                     // When the total alignment and size match, we can use the
                     // same ABI as the scalar variant with the reserved niche.
-                    Abi::Scalar(_) => Abi::Scalar(niche_scalar),
-                    Abi::ScalarPair(first, second) => {
+                    BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar),
+                    BackendRepr::ScalarPair(first, second) => {
                         // Only the niche is guaranteed to be initialised,
                         // so use union layouts for the other primitive.
                         if niche_offset == Size::ZERO {
-                            Abi::ScalarPair(niche_scalar, second.to_union())
+                            BackendRepr::ScalarPair(niche_scalar, second.to_union())
                         } else {
-                            Abi::ScalarPair(first.to_union(), niche_scalar)
+                            BackendRepr::ScalarPair(first.to_union(), niche_scalar)
                         }
                     }
-                    _ => Abi::Aggregate { sized: true },
+                    _ => BackendRepr::Memory { sized: true },
                 }
             } else {
-                Abi::Aggregate { sized: true }
+                BackendRepr::Memory { sized: true }
             };
 
             let layout = LayoutData {
@@ -664,7 +664,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                     offsets: [niche_offset].into(),
                     memory_index: [0].into(),
                 },
-                abi,
+                backend_repr: abi,
                 largest_niche,
                 size,
                 align,
@@ -833,14 +833,14 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 end: (max as u128 & tag_mask),
             },
         };
-        let mut abi = Abi::Aggregate { sized: true };
+        let mut abi = BackendRepr::Memory { sized: true };
 
-        if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
-            abi = Abi::Uninhabited;
+        if layout_variants.iter().all(|v| v.is_uninhabited()) {
+            abi = BackendRepr::Uninhabited;
         } else if tag.size(dl) == size {
             // Make sure we only use scalar layout when the enum is entirely its
             // own tag (i.e. it has no padding nor any non-ZST variant fields).
-            abi = Abi::Scalar(tag);
+            abi = BackendRepr::Scalar(tag);
         } else {
             // Try to use a ScalarPair for all tagged enums.
             // That's possible only if we can find a common primitive type for all variants.
@@ -864,8 +864,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                         break;
                     }
                 };
-                let prim = match field.abi {
-                    Abi::Scalar(scalar) => {
+                let prim = match field.backend_repr {
+                    BackendRepr::Scalar(scalar) => {
                         common_prim_initialized_in_all_variants &=
                             matches!(scalar, Scalar::Initialized { .. });
                         scalar.primitive()
@@ -934,7 +934,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 {
                     // We can use `ScalarPair` only when it matches our
                     // already computed layout (including `#[repr(C)]`).
-                    abi = pair.abi;
+                    abi = pair.backend_repr;
                 }
             }
         }
@@ -942,12 +942,14 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
         // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
         // variants to ensure they are consistent. This is because a downcast is
         // semantically a NOP, and thus should not affect layout.
-        if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
+        if matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
             for variant in &mut layout_variants {
                 // We only do this for variants with fields; the others are not accessed anyway.
                 // Also do not overwrite any already existing "clever" ABIs.
-                if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
-                    variant.abi = abi;
+                if variant.fields.count() > 0
+                    && matches!(variant.backend_repr, BackendRepr::Memory { .. })
+                {
+                    variant.backend_repr = abi;
                     // Also need to bump up the size and alignment, so that the entire value fits
                     // in here.
                     variant.size = cmp::max(variant.size, size);
@@ -970,7 +972,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 memory_index: [0].into(),
             },
             largest_niche,
-            abi,
+            backend_repr: abi,
             align,
             size,
             max_repr_align,
@@ -1252,7 +1254,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
         }
         let mut layout_of_single_non_zst_field = None;
         let sized = unsized_field.is_none();
-        let mut abi = Abi::Aggregate { sized };
+        let mut abi = BackendRepr::Memory { sized };
 
         let optimize_abi = !repr.inhibit_newtype_abi_optimization();
 
@@ -1270,16 +1272,16 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                     // Field fills the struct and it has a scalar or scalar pair ABI.
                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
                     {
-                        match field.abi {
+                        match field.backend_repr {
                             // For plain scalars, or vectors of them, we can't unpack
                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
-                            Abi::Scalar(_) | Abi::Vector { .. } if optimize_abi => {
-                                abi = field.abi;
+                            BackendRepr::Scalar(_) | BackendRepr::Vector { .. } if optimize_abi => {
+                                abi = field.backend_repr;
                             }
                             // But scalar pairs are Rust-specific and get
                             // treated as aggregates by C ABIs anyway.
-                            Abi::ScalarPair(..) => {
-                                abi = field.abi;
+                            BackendRepr::ScalarPair(..) => {
+                                abi = field.backend_repr;
                             }
                             _ => {}
                         }
@@ -1288,8 +1290,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
 
                 // Two non-ZST fields, and they're both scalars.
                 (Some((i, a)), Some((j, b)), None) => {
-                    match (a.abi, b.abi) {
-                        (Abi::Scalar(a), Abi::Scalar(b)) => {
+                    match (a.backend_repr, b.backend_repr) {
+                        (BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => {
                             // Order by the memory placement, not source order.
                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
                                 ((i, a), (j, b))
@@ -1315,7 +1317,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                             {
                                 // We can use `ScalarPair` only when it matches our
                                 // already computed layout (including `#[repr(C)]`).
-                                abi = pair.abi;
+                                abi = pair.backend_repr;
                             }
                         }
                         _ => {}
@@ -1325,8 +1327,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 _ => {}
             }
         }
-        if fields.iter().any(|f| f.abi.is_uninhabited()) {
-            abi = Abi::Uninhabited;
+        if fields.iter().any(|f| f.is_uninhabited()) {
+            abi = BackendRepr::Uninhabited;
         }
 
         let unadjusted_abi_align = if repr.transparent() {
@@ -1344,7 +1346,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
         Ok(LayoutData {
             variants: Variants::Single { index: VariantIdx::new(0) },
             fields: FieldsShape::Arbitrary { offsets, memory_index },
-            abi,
+            backend_repr: abi,
             largest_niche,
             align,
             size,
diff --git a/compiler/rustc_abi/src/layout/ty.rs b/compiler/rustc_abi/src/layout/ty.rs
index e029e1426b2..062447ea03f 100644
--- a/compiler/rustc_abi/src/layout/ty.rs
+++ b/compiler/rustc_abi/src/layout/ty.rs
@@ -83,8 +83,8 @@ impl<'a> Layout<'a> {
         &self.0.0.variants
     }
 
-    pub fn abi(self) -> Abi {
-        self.0.0.abi
+    pub fn backend_repr(self) -> BackendRepr {
+        self.0.0.backend_repr
     }
 
     pub fn largest_niche(self) -> Option<Niche> {
@@ -114,7 +114,7 @@ impl<'a> Layout<'a> {
     pub fn is_pointer_like(self, data_layout: &TargetDataLayout) -> bool {
         self.size() == data_layout.pointer_size
             && self.align().abi == data_layout.pointer_align.abi
-            && matches!(self.abi(), Abi::Scalar(Scalar::Initialized { .. }))
+            && matches!(self.backend_repr(), BackendRepr::Scalar(Scalar::Initialized { .. }))
     }
 }
 
@@ -196,9 +196,9 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
         Ty: TyAbiInterface<'a, C>,
         C: HasDataLayout,
     {
-        match self.abi {
-            Abi::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)),
-            Abi::Aggregate { .. } => {
+        match self.backend_repr {
+            BackendRepr::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)),
+            BackendRepr::Memory { .. } => {
                 if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
                     self.field(cx, 0).is_single_fp_element(cx)
                 } else {
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index 41922aee648..fac1122c4df 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -1344,11 +1344,19 @@ impl AddressSpace {
     pub const DATA: Self = AddressSpace(0);
 }
 
-/// Describes how values of the type are passed by target ABIs,
-/// in terms of categories of C types there are ABI rules for.
+/// The way we represent values to the backend
+///
+/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
+/// In reality, this implies little about that, but is mostly used to describe the syntactic form
+/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
+/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
+/// how the value will be lowered to the calling convention, in itself.
+///
+/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
+/// and larger values will usually prefer to be represented as memory.
 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
 #[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum Abi {
+pub enum BackendRepr {
     Uninhabited,
     Scalar(Scalar),
     ScalarPair(Scalar, Scalar),
@@ -1356,19 +1364,23 @@ pub enum Abi {
         element: Scalar,
         count: u64,
     },
-    Aggregate {
+    // FIXME: I sometimes use memory, sometimes use an IR aggregate!
+    Memory {
         /// If true, the size is exact, otherwise it's only a lower bound.
         sized: bool,
     },
 }
 
-impl Abi {
+impl BackendRepr {
     /// Returns `true` if the layout corresponds to an unsized type.
     #[inline]
     pub fn is_unsized(&self) -> bool {
         match *self {
-            Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
-            Abi::Aggregate { sized } => !sized,
+            BackendRepr::Uninhabited
+            | BackendRepr::Scalar(_)
+            | BackendRepr::ScalarPair(..)
+            | BackendRepr::Vector { .. } => false,
+            BackendRepr::Memory { sized } => !sized,
         }
     }
 
@@ -1381,7 +1393,7 @@ impl Abi {
     #[inline]
     pub fn is_signed(&self) -> bool {
         match self {
-            Abi::Scalar(scal) => match scal.primitive() {
+            BackendRepr::Scalar(scal) => match scal.primitive() {
                 Primitive::Int(_, signed) => signed,
                 _ => false,
             },
@@ -1392,61 +1404,67 @@ impl Abi {
     /// Returns `true` if this is an uninhabited type
     #[inline]
     pub fn is_uninhabited(&self) -> bool {
-        matches!(*self, Abi::Uninhabited)
+        matches!(*self, BackendRepr::Uninhabited)
     }
 
     /// Returns `true` if this is a scalar type
     #[inline]
     pub fn is_scalar(&self) -> bool {
-        matches!(*self, Abi::Scalar(_))
+        matches!(*self, BackendRepr::Scalar(_))
     }
 
     /// Returns `true` if this is a bool
     #[inline]
     pub fn is_bool(&self) -> bool {
-        matches!(*self, Abi::Scalar(s) if s.is_bool())
+        matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
     }
 
     /// Returns the fixed alignment of this ABI, if any is mandated.
     pub fn inherent_align<C: HasDataLayout>(&self, cx: &C) -> Option<AbiAndPrefAlign> {
         Some(match *self {
-            Abi::Scalar(s) => s.align(cx),
-            Abi::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)),
-            Abi::Vector { element, count } => {
+            BackendRepr::Scalar(s) => s.align(cx),
+            BackendRepr::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)),
+            BackendRepr::Vector { element, count } => {
                 cx.data_layout().vector_align(element.size(cx) * count)
             }
-            Abi::Uninhabited | Abi::Aggregate { .. } => return None,
+            BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None,
         })
     }
 
     /// Returns the fixed size of this ABI, if any is mandated.
     pub fn inherent_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
         Some(match *self {
-            Abi::Scalar(s) => {
+            BackendRepr::Scalar(s) => {
                 // No padding in scalars.
                 s.size(cx)
             }
-            Abi::ScalarPair(s1, s2) => {
+            BackendRepr::ScalarPair(s1, s2) => {
                 // May have some padding between the pair.
                 let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
                 (field2_offset + s2.size(cx)).align_to(self.inherent_align(cx)?.abi)
             }
-            Abi::Vector { element, count } => {
+            BackendRepr::Vector { element, count } => {
                 // No padding in vectors, except possibly for trailing padding
                 // to make the size a multiple of align (e.g. for vectors of size 3).
                 (element.size(cx) * count).align_to(self.inherent_align(cx)?.abi)
             }
-            Abi::Uninhabited | Abi::Aggregate { .. } => return None,
+            BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None,
         })
     }
 
     /// Discard validity range information and allow undef.
     pub fn to_union(&self) -> Self {
         match *self {
-            Abi::Scalar(s) => Abi::Scalar(s.to_union()),
-            Abi::ScalarPair(s1, s2) => Abi::ScalarPair(s1.to_union(), s2.to_union()),
-            Abi::Vector { element, count } => Abi::Vector { element: element.to_union(), count },
-            Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
+            BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
+            BackendRepr::ScalarPair(s1, s2) => {
+                BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
+            }
+            BackendRepr::Vector { element, count } => {
+                BackendRepr::Vector { element: element.to_union(), count }
+            }
+            BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
+                BackendRepr::Memory { sized: true }
+            }
         }
     }
 
@@ -1454,12 +1472,12 @@ impl Abi {
         match (self, other) {
             // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
             // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
-            (Abi::Scalar(l), Abi::Scalar(r)) => l.primitive() == r.primitive(),
+            (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
             (
-                Abi::Vector { element: element_l, count: count_l },
-                Abi::Vector { element: element_r, count: count_r },
+                BackendRepr::Vector { element: element_l, count: count_l },
+                BackendRepr::Vector { element: element_r, count: count_r },
             ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
-            (Abi::ScalarPair(l1, l2), Abi::ScalarPair(r1, r2)) => {
+            (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
                 l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
             }
             // Everything else must be strictly identical.
@@ -1616,14 +1634,14 @@ pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
     /// must be taken into account.
     pub variants: Variants<FieldIdx, VariantIdx>,
 
-    /// The `abi` defines how this data is passed between functions, and it defines
-    /// value restrictions via `valid_range`.
+    /// The `backend_repr` defines how this data will be represented to the codegen backend,
+    /// and encodes value restrictions via `valid_range`.
     ///
     /// Note that this is entirely orthogonal to the recursive structure defined by
     /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
-    /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
+    /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
     /// have to be taken into account to find all fields of this layout.
-    pub abi: Abi,
+    pub backend_repr: BackendRepr,
 
     /// The leaf scalar with the largest number of invalid values
     /// (i.e. outside of its `valid_range`), if it exists.
@@ -1646,15 +1664,15 @@ pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
 impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
     /// Returns `true` if this is an aggregate type (including a ScalarPair!)
     pub fn is_aggregate(&self) -> bool {
-        match self.abi {
-            Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } => false,
-            Abi::ScalarPair(..) | Abi::Aggregate { .. } => true,
+        match self.backend_repr {
+            BackendRepr::Uninhabited | BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => false,
+            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
         }
     }
 
     /// Returns `true` if this is an uninhabited type
     pub fn is_uninhabited(&self) -> bool {
-        self.abi.is_uninhabited()
+        self.backend_repr.is_uninhabited()
     }
 
     pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
@@ -1664,7 +1682,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
         LayoutData {
             variants: Variants::Single { index: VariantIdx::new(0) },
             fields: FieldsShape::Primitive,
-            abi: Abi::Scalar(scalar),
+            backend_repr: BackendRepr::Scalar(scalar),
             largest_niche,
             size,
             align,
@@ -1686,7 +1704,7 @@ where
         let LayoutData {
             size,
             align,
-            abi,
+            backend_repr,
             fields,
             largest_niche,
             variants,
@@ -1696,7 +1714,7 @@ where
         f.debug_struct("Layout")
             .field("size", size)
             .field("align", align)
-            .field("abi", abi)
+            .field("abi", backend_repr)
             .field("fields", fields)
             .field("largest_niche", largest_niche)
             .field("variants", variants)
@@ -1732,12 +1750,12 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
     /// Returns `true` if the layout corresponds to an unsized type.
     #[inline]
     pub fn is_unsized(&self) -> bool {
-        self.abi.is_unsized()
+        self.backend_repr.is_unsized()
     }
 
     #[inline]
     pub fn is_sized(&self) -> bool {
-        self.abi.is_sized()
+        self.backend_repr.is_sized()
     }
 
     /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
@@ -1750,10 +1768,12 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
     /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
     /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
     pub fn is_zst(&self) -> bool {
-        match self.abi {
-            Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
-            Abi::Uninhabited => self.size.bytes() == 0,
-            Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
+        match self.backend_repr {
+            BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => {
+                false
+            }
+            BackendRepr::Uninhabited => self.size.bytes() == 0,
+            BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
         }
     }
 
@@ -1768,8 +1788,8 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
         // 2nd point is quite hard to check though.
         self.size == other.size
             && self.is_sized() == other.is_sized()
-            && self.abi.eq_up_to_validity(&other.abi)
-            && self.abi.is_bool() == other.abi.is_bool()
+            && self.backend_repr.eq_up_to_validity(&other.backend_repr)
+            && self.backend_repr.is_bool() == other.backend_repr.is_bool()
             && self.align.abi == other.align.abi
             && self.max_repr_align == other.max_repr_align
             && self.unadjusted_abi_align == other.unadjusted_abi_align
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index 8a1ee48c43c..855ca010611 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -458,7 +458,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         match &self.ret.mode {
             PassMode::Direct(attrs) => {
                 attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
-                if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
+                if let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr {
                     apply_range_attr(llvm::AttributePlace::ReturnValue, scalar);
                 }
             }
@@ -495,7 +495,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                 }
                 PassMode::Direct(attrs) => {
                     let i = apply(attrs);
-                    if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+                    if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
                         apply_range_attr(llvm::AttributePlace::Argument(i), scalar);
                     }
                 }
@@ -510,7 +510,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                 PassMode::Pair(a, b) => {
                     let i = apply(a);
                     let ii = apply(b);
-                    if let abi::Abi::ScalarPair(scalar_a, scalar_b) = arg.layout.abi {
+                    if let abi::BackendRepr::ScalarPair(scalar_a, scalar_b) =
+                        arg.layout.backend_repr
+                    {
                         apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a);
                         apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b);
                     }
@@ -570,7 +572,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         }
         if bx.cx.sess().opts.optimize != config::OptLevel::No
                 && llvm_util::get_version() < (19, 0, 0)
-                && let abi::Abi::Scalar(scalar) = self.ret.layout.abi
+                && let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr
                 && matches!(scalar.primitive(), Int(..))
                 // If the value is a boolean, the range is 0..2 and that ultimately
                 // become 0..0 when the type becomes i1, which would be rejected
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 3c30822a2e2..53758967552 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -880,8 +880,8 @@ fn llvm_fixup_input<'ll, 'tcx>(
 ) -> &'ll Value {
     use InlineAsmRegClass::*;
     let dl = &bx.tcx.data_layout;
-    match (reg, layout.abi) {
-        (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+    match (reg, layout.backend_repr) {
+        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
                 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
@@ -889,7 +889,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 value
             }
         }
-        (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
             if s.primitive() != Primitive::Float(Float::F128) =>
         {
             let elem_ty = llvm_asm_scalar_type(bx.cx, s);
@@ -902,7 +902,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
             }
             bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
         }
-        (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
             if layout.size.bytes() == 8 =>
         {
             let elem_ty = llvm_asm_scalar_type(bx.cx, element);
@@ -910,14 +910,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
             let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
             bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
         }
-        (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F64) =>
         {
             bx.bitcast(value, bx.cx.type_i64())
         }
         (
             X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
-            Abi::Vector { .. },
+            BackendRepr::Vector { .. },
         ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
         (
             X86(
@@ -925,7 +925,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
             && s.primitive() == Primitive::Float(Float::F128) =>
         {
@@ -937,7 +937,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if s.primitive() == Primitive::Float(Float::F16) => {
             let value = bx.insert_element(
                 bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
@@ -952,11 +952,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Vector { element, count: count @ (8 | 16) },
+            BackendRepr::Vector { element, count: count @ (8 | 16) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
         }
-        (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
+        (
+            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            BackendRepr::Scalar(s),
+        ) => {
             if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_f32())
             } else {
@@ -969,7 +972,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_f64())
@@ -986,11 +989,11 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::qreg_low4
                 | ArmInlineAsmRegClass::qreg_low8,
             ),
-            Abi::Vector { element, count: count @ (4 | 8) },
+            BackendRepr::Vector { element, count: count @ (4 | 8) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
         }
-        (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
             match s.primitive() {
                 // MIPS only supports register-length arithmetics.
                 Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
@@ -999,7 +1002,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 _ => value,
             }
         }
-        (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F16)
                 && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
         {
@@ -1022,15 +1025,15 @@ fn llvm_fixup_output<'ll, 'tcx>(
     instance: Instance<'_>,
 ) -> &'ll Value {
     use InlineAsmRegClass::*;
-    match (reg, layout.abi) {
-        (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+    match (reg, layout.backend_repr) {
+        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 bx.extract_element(value, bx.const_i32(0))
             } else {
                 value
             }
         }
-        (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
             if s.primitive() != Primitive::Float(Float::F128) =>
         {
             value = bx.extract_element(value, bx.const_i32(0));
@@ -1039,7 +1042,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
             }
             value
         }
-        (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
             if layout.size.bytes() == 8 =>
         {
             let elem_ty = llvm_asm_scalar_type(bx.cx, element);
@@ -1047,14 +1050,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
             let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
             bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
         }
-        (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F64) =>
         {
             bx.bitcast(value, bx.cx.type_f64())
         }
         (
             X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
-            Abi::Vector { .. },
+            BackendRepr::Vector { .. },
         ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
         (
             X86(
@@ -1062,7 +1065,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
             && s.primitive() == Primitive::Float(Float::F128) =>
         {
@@ -1074,7 +1077,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if s.primitive() == Primitive::Float(Float::F16) => {
             let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
             bx.extract_element(value, bx.const_usize(0))
@@ -1085,11 +1088,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Vector { element, count: count @ (8 | 16) },
+            BackendRepr::Vector { element, count: count @ (8 | 16) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
         }
-        (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
+        (
+            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            BackendRepr::Scalar(s),
+        ) => {
             if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_i32())
             } else {
@@ -1102,7 +1108,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_i64())
@@ -1119,11 +1125,11 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::qreg_low4
                 | ArmInlineAsmRegClass::qreg_low8,
             ),
-            Abi::Vector { element, count: count @ (4 | 8) },
+            BackendRepr::Vector { element, count: count @ (4 | 8) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
         }
-        (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
             match s.primitive() {
                 // MIPS only supports register-length arithmetics.
                 Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
@@ -1133,7 +1139,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 _ => value,
             }
         }
-        (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F16)
                 && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
         {
@@ -1153,35 +1159,35 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
     instance: Instance<'_>,
 ) -> &'ll Type {
     use InlineAsmRegClass::*;
-    match (reg, layout.abi) {
-        (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+    match (reg, layout.backend_repr) {
+        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 cx.type_vector(cx.type_i8(), 8)
             } else {
                 layout.llvm_type(cx)
             }
         }
-        (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
             if s.primitive() != Primitive::Float(Float::F128) =>
         {
             let elem_ty = llvm_asm_scalar_type(cx, s);
             let count = 16 / layout.size.bytes();
             cx.type_vector(elem_ty, count)
         }
-        (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
             if layout.size.bytes() == 8 =>
         {
             let elem_ty = llvm_asm_scalar_type(cx, element);
             cx.type_vector(elem_ty, count * 2)
         }
-        (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F64) =>
         {
             cx.type_i64()
         }
         (
             X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
-            Abi::Vector { .. },
+            BackendRepr::Vector { .. },
         ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
         (
             X86(
@@ -1189,7 +1195,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
             && s.primitive() == Primitive::Float(Float::F128) =>
         {
@@ -1201,7 +1207,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
         (
             X86(
@@ -1209,11 +1215,14 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Vector { element, count: count @ (8 | 16) },
+            BackendRepr::Vector { element, count: count @ (8 | 16) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             cx.type_vector(cx.type_i16(), count)
         }
-        (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
+        (
+            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            BackendRepr::Scalar(s),
+        ) => {
             if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 cx.type_f32()
             } else {
@@ -1226,7 +1235,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 cx.type_f64()
@@ -1243,11 +1252,11 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::qreg_low4
                 | ArmInlineAsmRegClass::qreg_low8,
             ),
-            Abi::Vector { element, count: count @ (4 | 8) },
+            BackendRepr::Vector { element, count: count @ (4 | 8) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             cx.type_vector(cx.type_i16(), count)
         }
-        (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
             match s.primitive() {
                 // MIPS only supports register-length arithmetics.
                 Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
@@ -1256,7 +1265,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 _ => layout.llvm_type(cx),
             }
         }
-        (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F16)
                 && !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) =>
         {
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 8702532c36e..8e87869f946 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -545,13 +545,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
             }
             let llval = const_llval.unwrap_or_else(|| {
                 let load = self.load(llty, place.val.llval, place.val.align);
-                if let abi::Abi::Scalar(scalar) = place.layout.abi {
+                if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr {
                     scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
                 }
                 load
             });
             OperandValue::Immediate(self.to_immediate(llval, place.layout))
-        } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
+        } else if let abi::BackendRepr::ScalarPair(a, b) = place.layout.backend_repr {
             let b_offset = a.size(self).align_to(b.align(self).abi);
 
             let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index d04b5257619..c77e00aed9a 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -258,8 +258,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
             }
             sym::va_arg => {
-                match fn_abi.ret.layout.abi {
-                    abi::Abi::Scalar(scalar) => {
+                match fn_abi.ret.layout.backend_repr {
+                    abi::BackendRepr::Scalar(scalar) => {
                         match scalar.primitive() {
                             Primitive::Int(..) => {
                                 if self.cx().size_of(ret_ty).bytes() < 4 {
@@ -436,13 +436,13 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
             }
 
             sym::raw_eq => {
-                use abi::Abi::*;
+                use abi::BackendRepr::*;
                 let tp_ty = fn_args.type_at(0);
                 let layout = self.layout_of(tp_ty).layout;
-                let use_integer_compare = match layout.abi() {
+                let use_integer_compare = match layout.backend_repr() {
                     Scalar(_) | ScalarPair(_, _) => true,
                     Uninhabited | Vector { .. } => false,
-                    Aggregate { .. } => {
+                    Memory { .. } => {
                         // For rusty ABIs, small aggregates are actually passed
                         // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
                         // so we re-use that same threshold here.
@@ -549,7 +549,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 }
 
                 let llret_ty = if ret_ty.is_simd()
-                    && let abi::Abi::Aggregate { .. } = self.layout_of(ret_ty).layout.abi
+                    && let abi::BackendRepr::Memory { .. } =
+                        self.layout_of(ret_ty).layout.backend_repr
                 {
                     let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
                     let elem_ll_ty = match elem_ty.kind() {
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 6be4c3f034f..2b05e24a7ba 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -1,7 +1,7 @@
 use std::fmt::Write;
 
 use rustc_abi::Primitive::{Float, Int, Pointer};
-use rustc_abi::{Abi, Align, FieldsShape, Scalar, Size, Variants};
+use rustc_abi::{Align, BackendRepr, FieldsShape, Scalar, Size, Variants};
 use rustc_codegen_ssa::traits::*;
 use rustc_middle::bug;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
@@ -17,13 +17,13 @@ fn uncached_llvm_type<'a, 'tcx>(
     layout: TyAndLayout<'tcx>,
     defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
 ) -> &'a Type {
-    match layout.abi {
-        Abi::Scalar(_) => bug!("handled elsewhere"),
-        Abi::Vector { element, count } => {
+    match layout.backend_repr {
+        BackendRepr::Scalar(_) => bug!("handled elsewhere"),
+        BackendRepr::Vector { element, count } => {
             let element = layout.scalar_llvm_type_at(cx, element);
             return cx.type_vector(element, count);
         }
-        Abi::Uninhabited | Abi::Aggregate { .. } | Abi::ScalarPair(..) => {}
+        BackendRepr::Uninhabited | BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {}
     }
 
     let name = match layout.ty.kind() {
@@ -170,16 +170,21 @@ pub(crate) trait LayoutLlvmExt<'tcx> {
 
 impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     fn is_llvm_immediate(&self) -> bool {
-        match self.abi {
-            Abi::Scalar(_) | Abi::Vector { .. } => true,
-            Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
+        match self.backend_repr {
+            BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
+            BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
+                false
+            }
         }
     }
 
     fn is_llvm_scalar_pair(&self) -> bool {
-        match self.abi {
-            Abi::ScalarPair(..) => true,
-            Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
+        match self.backend_repr {
+            BackendRepr::ScalarPair(..) => true,
+            BackendRepr::Uninhabited
+            | BackendRepr::Scalar(_)
+            | BackendRepr::Vector { .. }
+            | BackendRepr::Memory { .. } => false,
         }
     }
 
@@ -198,7 +203,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
         // In other words, this should generally not look at the type at all, but only at the
         // layout.
-        if let Abi::Scalar(scalar) = self.abi {
+        if let BackendRepr::Scalar(scalar) = self.backend_repr {
             // Use a different cache for scalars because pointers to DSTs
             // can be either wide or thin (data pointers of wide pointers).
             if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
@@ -248,13 +253,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     }
 
     fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
-        match self.abi {
-            Abi::Scalar(scalar) => {
+        match self.backend_repr {
+            BackendRepr::Scalar(scalar) => {
                 if scalar.is_bool() {
                     return cx.type_i1();
                 }
             }
-            Abi::ScalarPair(..) => {
+            BackendRepr::ScalarPair(..) => {
                 // An immediate pair always contains just the two elements, without any padding
                 // filler, as it should never be stored to memory.
                 return cx.type_struct(
@@ -287,7 +292,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
         // In other words, this should generally not look at the type at all, but only at the
         // layout.
-        let Abi::ScalarPair(a, b) = self.abi else {
+        let BackendRepr::ScalarPair(a, b) = self.backend_repr else {
             bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
         };
         let scalar = [a, b][index];
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index a17a127f014..283740fa664 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -1532,7 +1532,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 // the load would just produce `OperandValue::Ref` instead
                 // of the `OperandValue::Immediate` we need for the call.
                 llval = bx.load(bx.backend_type(arg.layout), llval, align);
-                if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+                if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
                     if scalar.is_bool() {
                         bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
                     }
diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs
index 15f45b226f5..54b9c9cc89f 100644
--- a/compiler/rustc_codegen_ssa/src/mir/constant.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs
@@ -1,8 +1,8 @@
+use rustc_abi::BackendRepr;
 use rustc_middle::mir::interpret::ErrorHandled;
 use rustc_middle::ty::layout::HasTyCtxt;
 use rustc_middle::ty::{self, Ty};
 use rustc_middle::{bug, mir, span_bug};
-use rustc_target::abi::Abi;
 
 use super::FunctionCx;
 use crate::errors;
@@ -86,7 +86,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     .map(|field| {
                         if let Some(prim) = field.try_to_scalar() {
                             let layout = bx.layout_of(field_ty);
-                            let Abi::Scalar(scalar) = layout.abi else {
+                            let BackendRepr::Scalar(scalar) = layout.backend_repr else {
                                 bug!("from_const: invalid ByVal layout: {:#?}", layout);
                             };
                             bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index 146f55f95c2..21d20475408 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -2,6 +2,7 @@ use std::collections::hash_map::Entry;
 use std::marker::PhantomData;
 use std::ops::Range;
 
+use rustc_abi::{BackendRepr, FieldIdx, FieldsShape, Size, VariantIdx};
 use rustc_data_structures::fx::FxHashMap;
 use rustc_index::IndexVec;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
@@ -11,7 +12,6 @@ use rustc_middle::{bug, mir, ty};
 use rustc_session::config::DebugInfo;
 use rustc_span::symbol::{Symbol, kw};
 use rustc_span::{BytePos, Span, hygiene};
-use rustc_target::abi::{Abi, FieldIdx, FieldsShape, Size, VariantIdx};
 
 use super::operand::{OperandRef, OperandValue};
 use super::place::{PlaceRef, PlaceValue};
@@ -510,7 +510,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         // be marked as a `LocalVariable` for MSVC debuggers to visualize
                         // their data correctly. (See #81894 & #88625)
                         let var_ty_layout = self.cx.layout_of(var_ty);
-                        if let Abi::ScalarPair(_, _) = var_ty_layout.abi {
+                        if let BackendRepr::ScalarPair(_, _) = var_ty_layout.backend_repr {
                             VariableKind::LocalVariable
                         } else {
                             VariableKind::ArgumentVariable(arg_index)
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index 88ceff327d0..19101ec2d1b 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -4,7 +4,7 @@ use std::fmt;
 use arrayvec::ArrayVec;
 use either::Either;
 use rustc_abi as abi;
-use rustc_abi::{Abi, Align, Size};
+use rustc_abi::{Align, BackendRepr, Size};
 use rustc_middle::bug;
 use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range};
 use rustc_middle::mir::{self, ConstValue};
@@ -163,7 +163,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
 
         let val = match val {
             ConstValue::Scalar(x) => {
-                let Abi::Scalar(scalar) = layout.abi else {
+                let BackendRepr::Scalar(scalar) = layout.backend_repr else {
                     bug!("from_const: invalid ByVal layout: {:#?}", layout);
                 };
                 let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
@@ -171,7 +171,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
             }
             ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
             ConstValue::Slice { data, meta } => {
-                let Abi::ScalarPair(a_scalar, _) = layout.abi else {
+                let BackendRepr::ScalarPair(a_scalar, _) = layout.backend_repr else {
                     bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
                 };
                 let a = Scalar::from_pointer(
@@ -221,14 +221,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
         // case where some of the bytes are initialized and others are not. So, we need an extra
         // check that walks over the type of `mplace` to make sure it is truly correct to treat this
         // like a `Scalar` (or `ScalarPair`).
-        match layout.abi {
-            Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => {
+        match layout.backend_repr {
+            BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => {
                 let size = s.size(bx);
                 assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
                 let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout));
                 OperandRef { val: OperandValue::Immediate(val), layout }
             }
-            Abi::ScalarPair(
+            BackendRepr::ScalarPair(
                 a @ abi::Scalar::Initialized { .. },
                 b @ abi::Scalar::Initialized { .. },
             ) => {
@@ -322,7 +322,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
         llval: V,
         layout: TyAndLayout<'tcx>,
     ) -> Self {
-        let val = if let Abi::ScalarPair(..) = layout.abi {
+        let val = if let BackendRepr::ScalarPair(..) = layout.backend_repr {
             debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
 
             // Deconstruct the immediate aggregate.
@@ -343,7 +343,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
         let field = self.layout.field(bx.cx(), i);
         let offset = self.layout.fields.offset(i);
 
-        let mut val = match (self.val, self.layout.abi) {
+        let mut val = match (self.val, self.layout.backend_repr) {
             // If the field is ZST, it has no data.
             _ if field.is_zst() => OperandValue::ZeroSized,
 
@@ -356,7 +356,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
             }
 
             // Extract a scalar component from a pair.
-            (OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => {
+            (OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => {
                 if offset.bytes() == 0 {
                     assert_eq!(field.size, a.size(bx.cx()));
                     OperandValue::Immediate(a_llval)
@@ -368,30 +368,30 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
             }
 
             // `#[repr(simd)]` types are also immediate.
-            (OperandValue::Immediate(llval), Abi::Vector { .. }) => {
+            (OperandValue::Immediate(llval), BackendRepr::Vector { .. }) => {
                 OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
             }
 
             _ => bug!("OperandRef::extract_field({:?}): not applicable", self),
         };
 
-        match (&mut val, field.abi) {
+        match (&mut val, field.backend_repr) {
             (OperandValue::ZeroSized, _) => {}
             (
                 OperandValue::Immediate(llval),
-                Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. },
+                BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. },
             ) => {
                 // Bools in union fields needs to be truncated.
                 *llval = bx.to_immediate(*llval, field);
             }
-            (OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => {
+            (OperandValue::Pair(a, b), BackendRepr::ScalarPair(a_abi, b_abi)) => {
                 // Bools in union fields needs to be truncated.
                 *a = bx.to_immediate_scalar(*a, a_abi);
                 *b = bx.to_immediate_scalar(*b, b_abi);
             }
             // Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
-            (OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => {
-                assert_matches!(self.layout.abi, Abi::Vector { .. });
+            (OperandValue::Immediate(llval), BackendRepr::Memory { sized: true }) => {
+                assert_matches!(self.layout.backend_repr, BackendRepr::Vector { .. });
 
                 let llfield_ty = bx.cx().backend_type(field);
 
@@ -400,7 +400,10 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
                 bx.store(*llval, llptr, field.align.abi);
                 *llval = bx.load(llfield_ty, llptr, field.align.abi);
             }
-            (OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => {
+            (
+                OperandValue::Immediate(_),
+                BackendRepr::Uninhabited | BackendRepr::Memory { sized: false },
+            ) => {
                 bug!()
             }
             (OperandValue::Pair(..), _) => bug!(),
@@ -494,7 +497,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
                 bx.store_with_flags(val, dest.val.llval, dest.val.align, flags);
             }
             OperandValue::Pair(a, b) => {
-                let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else {
+                let BackendRepr::ScalarPair(a_scalar, b_scalar) = dest.layout.backend_repr else {
                     bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
                 };
                 let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
@@ -645,7 +648,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     // However, some SIMD types do not actually use the vector ABI
                     // (in particular, packed SIMD types do not). Ensure we exclude those.
                     let layout = bx.layout_of(constant_ty);
-                    if let Abi::Vector { .. } = layout.abi {
+                    if let BackendRepr::Vector { .. } = layout.backend_repr {
                         let (llval, ty) = self.immediate_const_vector(bx, constant);
                         return OperandRef {
                             val: OperandValue::Immediate(llval),
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 6e8c193cd75..86cf0f9614d 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -1136,17 +1136,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             OperandValueKind::ZeroSized
         } else if self.cx.is_backend_immediate(layout) {
             assert!(!self.cx.is_backend_scalar_pair(layout));
-            OperandValueKind::Immediate(match layout.abi {
-                abi::Abi::Scalar(s) => s,
-                abi::Abi::Vector { element, .. } => element,
+            OperandValueKind::Immediate(match layout.backend_repr {
+                abi::BackendRepr::Scalar(s) => s,
+                abi::BackendRepr::Vector { element, .. } => element,
                 x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
             })
         } else if self.cx.is_backend_scalar_pair(layout) {
-            let abi::Abi::ScalarPair(s1, s2) = layout.abi else {
+            let abi::BackendRepr::ScalarPair(s1, s2) = layout.backend_repr else {
                 span_bug!(
                     self.mir.span,
                     "Couldn't translate {:?} as backend scalar pair",
-                    layout.abi,
+                    layout.backend_repr,
                 );
             };
             OperandValueKind::Pair(s1, s2)
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
index 50a51714146..768a0439ab5 100644
--- a/compiler/rustc_codegen_ssa/src/traits/builder.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -1,13 +1,13 @@
 use std::assert_matches::assert_matches;
 use std::ops::Deref;
 
+use rustc_abi::{Align, BackendRepr, Scalar, Size, WrappingRange};
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
 use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
 use rustc_middle::ty::{Instance, Ty};
 use rustc_session::config::OptLevel;
 use rustc_span::Span;
 use rustc_target::abi::call::FnAbi;
-use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange};
 
 use super::abi::AbiBuilderMethods;
 use super::asm::AsmBuilderMethods;
@@ -162,7 +162,7 @@ pub trait BuilderMethods<'a, 'tcx>:
 
     fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
     fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value {
-        if let Abi::Scalar(scalar) = layout.abi {
+        if let BackendRepr::Scalar(scalar) = layout.backend_repr {
             self.to_immediate_scalar(val, scalar)
         } else {
             val
diff --git a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs
index 743924faa21..bc2661c4fc7 100644
--- a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs
@@ -131,7 +131,7 @@ impl<'tcx> interpret::Machine<'tcx> for DummyMachine {
         interp_ok(match bin_op {
             Eq | Ne | Lt | Le | Gt | Ge => {
                 // Types can differ, e.g. fn ptrs with different `for`.
-                assert_eq!(left.layout.abi, right.layout.abi);
+                assert_eq!(left.layout.backend_repr, right.layout.backend_repr);
                 let size = ecx.pointer_size();
                 // Just compare the bits. ScalarPairs are compared lexicographically.
                 // We thus always compare pairs and simply fill scalars up with 0.
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 7319c251bbd..81b9d73b952 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -1,6 +1,7 @@
 use std::sync::atomic::Ordering::Relaxed;
 
 use either::{Left, Right};
+use rustc_abi::{self as abi, BackendRepr};
 use rustc_hir::def::DefKind;
 use rustc_middle::bug;
 use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo};
@@ -12,7 +13,6 @@ use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_span::def_id::LocalDefId;
 use rustc_span::{DUMMY_SP, Span};
-use rustc_target::abi::{self, Abi};
 use tracing::{debug, instrument, trace};
 
 use super::{CanAccessMutGlobal, CompileTimeInterpCx, CompileTimeMachine};
@@ -174,8 +174,8 @@ pub(super) fn op_to_const<'tcx>(
     // type (it's used throughout the compiler and having it work just on literals is not enough)
     // and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar`
     // from its byte-serialized form).
-    let force_as_immediate = match op.layout.abi {
-        Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
+    let force_as_immediate = match op.layout.backend_repr {
+        BackendRepr::Scalar(abi::Scalar::Initialized { .. }) => true,
         // We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the
         // input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will
         // not have to generate any duplicate allocations (we preserve the original `AllocId` in
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index 9e80e666ba9..ea88b2ed22e 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -1,10 +1,10 @@
+use rustc_abi::{BackendRepr, VariantIdx};
 use rustc_data_structures::stack::ensure_sufficient_stack;
 use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
 use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
 use rustc_middle::{bug, mir};
 use rustc_span::DUMMY_SP;
-use rustc_target::abi::{Abi, VariantIdx};
 use tracing::{debug, instrument, trace};
 
 use super::eval_queries::{mk_eval_cx_to_read_const_val, op_to_const};
@@ -117,7 +117,7 @@ fn const_to_valtree_inner<'tcx>(
             let val = ecx.read_immediate(place).unwrap();
             // We could allow wide raw pointers where both sides are integers in the future,
             // but for now we reject them.
-            if matches!(val.layout.abi, Abi::ScalarPair(..)) {
+            if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
                 return Err(ValTreeCreationError::NonSupportedType(ty));
             }
             let val = val.to_scalar();
@@ -311,7 +311,7 @@ pub fn valtree_to_const_value<'tcx>(
                 // Fast path to avoid some allocations.
                 return mir::ConstValue::ZeroSized;
             }
-            if layout.abi.is_scalar()
+            if layout.backend_repr.is_scalar()
                 && (matches!(ty.kind(), ty::Tuple(_))
                     || matches!(ty.kind(), ty::Adt(def, _) if def.is_struct()))
             {
diff --git a/compiler/rustc_const_eval/src/interpret/call.rs b/compiler/rustc_const_eval/src/interpret/call.rs
index 85d99900c6c..1915bf75c95 100644
--- a/compiler/rustc_const_eval/src/interpret/call.rs
+++ b/compiler/rustc_const_eval/src/interpret/call.rs
@@ -172,8 +172,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         // must be compatible. So we just accept everything with Pointer ABI as compatible,
         // even if this will accept some code that is not stably guaranteed to work.
         // This also handles function pointers.
-        let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.abi {
-            abi::Abi::Scalar(s) => match s.primitive() {
+        let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.backend_repr {
+            abi::BackendRepr::Scalar(s) => match s.primitive() {
                 abi::Primitive::Pointer(addr_space) => Some(addr_space),
                 _ => None,
             },
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index 64b15611316..60d5e904bd9 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -274,7 +274,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         cast_ty: Ty<'tcx>,
     ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
         // Let's make sure v is sign-extended *if* it has a signed type.
-        let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`.
+        let signed = src_layout.backend_repr.is_signed(); // Also asserts that abi is `Scalar`.
 
         let v = match src_layout.ty.kind() {
             Uint(_) | RawPtr(..) | FnPtr(..) => scalar.to_uint(src_layout.size)?,
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index feed0860679..bb4ac9556ea 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -112,7 +112,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         // Read tag and sanity-check `tag_layout`.
         let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?;
         assert_eq!(tag_layout.size, tag_val.layout.size);
-        assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
+        assert_eq!(tag_layout.backend_repr.is_signed(), tag_val.layout.backend_repr.is_signed());
         trace!("tag value: {}", tag_val);
 
         // Figure out which discriminant and variant this corresponds to.
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index 6148123bdfe..80e14ee887c 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -563,7 +563,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
         interp_ok(if overflowed.to_bool()? {
             let size = l.layout.size;
-            if l.layout.abi.is_signed() {
+            if l.layout.backend_repr.is_signed() {
                 // For signed ints the saturated value depends on the sign of the first
                 // term since the sign of the second term can be inferred from this and
                 // the fact that the operation has overflowed (if either is 0 no
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index cd5e2aeca85..43ae98e74b0 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -5,7 +5,7 @@ use std::assert_matches::assert_matches;
 
 use either::{Either, Left, Right};
 use rustc_abi as abi;
-use rustc_abi::{Abi, HasDataLayout, Size};
+use rustc_abi::{BackendRepr, HasDataLayout, Size};
 use rustc_hir::def::Namespace;
 use rustc_middle::mir::interpret::ScalarSizeMismatch;
 use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutOf, TyAndLayout};
@@ -114,9 +114,9 @@ impl<Prov: Provenance> Immediate<Prov> {
     }
 
     /// Assert that this immediate is a valid value for the given ABI.
-    pub fn assert_matches_abi(self, abi: Abi, msg: &str, cx: &impl HasDataLayout) {
+    pub fn assert_matches_abi(self, abi: BackendRepr, msg: &str, cx: &impl HasDataLayout) {
         match (self, abi) {
-            (Immediate::Scalar(scalar), Abi::Scalar(s)) => {
+            (Immediate::Scalar(scalar), BackendRepr::Scalar(s)) => {
                 assert_eq!(scalar.size(), s.size(cx), "{msg}: scalar value has wrong size");
                 if !matches!(s.primitive(), abi::Primitive::Pointer(..)) {
                     // This is not a pointer, it should not carry provenance.
@@ -126,7 +126,7 @@ impl<Prov: Provenance> Immediate<Prov> {
                     );
                 }
             }
-            (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
+            (Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => {
                 assert_eq!(
                     a_val.size(),
                     a.size(cx),
@@ -244,7 +244,7 @@ impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
 impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
     #[inline]
     pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
-        debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
+        debug_assert!(layout.backend_repr.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
         debug_assert_eq!(val.size(), layout.size);
         ImmTy { imm: val.into(), layout }
     }
@@ -252,7 +252,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
     #[inline]
     pub fn from_scalar_pair(a: Scalar<Prov>, b: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
         debug_assert!(
-            matches!(layout.abi, Abi::ScalarPair(..)),
+            matches!(layout.backend_repr, BackendRepr::ScalarPair(..)),
             "`ImmTy::from_scalar_pair` on non-scalar-pair layout"
         );
         let imm = Immediate::ScalarPair(a, b);
@@ -263,9 +263,9 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
     pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
         // Without a `cx` we cannot call `assert_matches_abi`.
         debug_assert!(
-            match (imm, layout.abi) {
-                (Immediate::Scalar(..), Abi::Scalar(..)) => true,
-                (Immediate::ScalarPair(..), Abi::ScalarPair(..)) => true,
+            match (imm, layout.backend_repr) {
+                (Immediate::Scalar(..), BackendRepr::Scalar(..)) => true,
+                (Immediate::ScalarPair(..), BackendRepr::ScalarPair(..)) => true,
                 (Immediate::Uninit, _) if layout.is_sized() => true,
                 _ => false,
             },
@@ -356,7 +356,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
     fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
         // Verify that the input matches its type.
         if cfg!(debug_assertions) {
-            self.assert_matches_abi(self.layout.abi, "invalid input to Immediate::offset", cx);
+            self.assert_matches_abi(
+                self.layout.backend_repr,
+                "invalid input to Immediate::offset",
+                cx,
+            );
         }
         // `ImmTy` have already been checked to be in-bounds, so we can just check directly if this
         // remains in-bounds. This cannot actually be violated since projections are type-checked
@@ -370,19 +374,19 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
         );
         // This makes several assumptions about what layouts we will encounter; we match what
         // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
-        let inner_val: Immediate<_> = match (**self, self.layout.abi) {
+        let inner_val: Immediate<_> = match (**self, self.layout.backend_repr) {
             // If the entire value is uninit, then so is the field (can happen in ConstProp).
             (Immediate::Uninit, _) => Immediate::Uninit,
             // If the field is uninhabited, we can forget the data (can happen in ConstProp).
             // `enum S { A(!), B, C }` is an example of an enum with Scalar layout that
             // has an `Uninhabited` variant, which means this case is possible.
-            _ if layout.abi.is_uninhabited() => Immediate::Uninit,
+            _ if layout.is_uninhabited() => Immediate::Uninit,
             // the field contains no information, can be left uninit
             // (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST)
             _ if layout.is_zst() => Immediate::Uninit,
             // some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
             // to detect those here and also give them no data
-            _ if matches!(layout.abi, Abi::Aggregate { .. })
+            _ if matches!(layout.backend_repr, BackendRepr::Memory { .. })
                 && matches!(layout.variants, abi::Variants::Single { .. })
                 && matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) =>
             {
@@ -394,7 +398,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
                 **self
             }
             // extract fields from types with `ScalarPair` ABI
-            (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
+            (Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => {
                 Immediate::from(if offset.bytes() == 0 {
                     a_val
                 } else {
@@ -411,7 +415,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
             ),
         };
         // Ensure the new layout matches the new value.
-        inner_val.assert_matches_abi(layout.abi, "invalid field type in Immediate::offset", cx);
+        inner_val.assert_matches_abi(
+            layout.backend_repr,
+            "invalid field type in Immediate::offset",
+            cx,
+        );
 
         ImmTy::from_immediate(inner_val, layout)
     }
@@ -567,8 +575,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         // case where some of the bytes are initialized and others are not. So, we need an extra
         // check that walks over the type of `mplace` to make sure it is truly correct to treat this
         // like a `Scalar` (or `ScalarPair`).
-        interp_ok(match mplace.layout.abi {
-            Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
+        interp_ok(match mplace.layout.backend_repr {
+            BackendRepr::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
                 let size = s.size(self);
                 assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
                 let scalar = alloc.read_scalar(
@@ -577,7 +585,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 )?;
                 Some(ImmTy::from_scalar(scalar, mplace.layout))
             }
-            Abi::ScalarPair(
+            BackendRepr::ScalarPair(
                 abi::Scalar::Initialized { value: a, .. },
                 abi::Scalar::Initialized { value: b, .. },
             ) => {
@@ -637,9 +645,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         op: &impl Projectable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         if !matches!(
-            op.layout().abi,
-            Abi::Scalar(abi::Scalar::Initialized { .. })
-                | Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
+            op.layout().backend_repr,
+            BackendRepr::Scalar(abi::Scalar::Initialized { .. })
+                | BackendRepr::ScalarPair(
+                    abi::Scalar::Initialized { .. },
+                    abi::Scalar::Initialized { .. }
+                )
         ) {
             span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty);
         }
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index 380db907481..cf280e0c1ae 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -114,7 +114,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             let l_bits = left.layout.size.bits();
             // Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is
             // the one MIR operator that does *not* directly map to a single LLVM operation.)
-            let (shift_amount, overflow) = if right.layout.abi.is_signed() {
+            let (shift_amount, overflow) = if right.layout.backend_repr.is_signed() {
                 let shift_amount = r_signed();
                 let rem = shift_amount.rem_euclid(l_bits.into());
                 // `rem` is guaranteed positive, so the `unwrap` cannot fail
@@ -126,7 +126,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             };
             let shift_amount = u32::try_from(shift_amount).unwrap(); // we brought this in the range `0..size` so this will always fit
             // Compute the shifted result.
-            let result = if left.layout.abi.is_signed() {
+            let result = if left.layout.backend_repr.is_signed() {
                 let l = l_signed();
                 let result = match bin_op {
                     Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
@@ -147,7 +147,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             if overflow && let Some(intrinsic) = throw_ub_on_overflow {
                 throw_ub!(ShiftOverflow {
                     intrinsic,
-                    shift_amount: if right.layout.abi.is_signed() {
+                    shift_amount: if right.layout.backend_repr.is_signed() {
                         Either::Right(r_signed())
                     } else {
                         Either::Left(r_unsigned())
@@ -171,7 +171,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         let size = left.layout.size;
 
         // Operations that need special treatment for signed integers
-        if left.layout.abi.is_signed() {
+        if left.layout.backend_repr.is_signed() {
             let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
                 Lt => Some(i128::lt),
                 Le => Some(i128::le),
@@ -250,7 +250,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             BitXor => ImmTy::from_uint(l ^ r, left.layout),
 
             _ => {
-                assert!(!left.layout.abi.is_signed());
+                assert!(!left.layout.backend_repr.is_signed());
                 let op: fn(u128, u128) -> (u128, bool) = match bin_op {
                     Add | AddUnchecked | AddWithOverflow => u128::overflowing_add,
                     Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub,
@@ -332,7 +332,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 }
 
                 let offset_bytes = val.to_target_isize(self)?;
-                if !right.layout.abi.is_signed() && offset_bytes < 0 {
+                if !right.layout.backend_repr.is_signed() && offset_bytes < 0 {
                     // We were supposed to do an unsigned offset but the result is negative -- this
                     // can only mean that the cast wrapped around.
                     throw_ub!(PointerArithOverflow)
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 81b926a1b65..139a1db60e0 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -5,11 +5,11 @@
 use std::assert_matches::assert_matches;
 
 use either::{Either, Left, Right};
+use rustc_abi::{Align, BackendRepr, HasDataLayout, Size};
 use rustc_ast::Mutability;
 use rustc_middle::ty::Ty;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::{bug, mir, span_bug};
-use rustc_target::abi::{Abi, Align, HasDataLayout, Size};
 use tracing::{instrument, trace};
 
 use super::{
@@ -659,7 +659,7 @@ where
                 // Unfortunately this is too expensive to do in release builds.
                 if cfg!(debug_assertions) {
                     src.assert_matches_abi(
-                        local_layout.abi,
+                        local_layout.backend_repr,
                         "invalid immediate for given destination place",
                         self,
                     );
@@ -683,7 +683,11 @@ where
     ) -> InterpResult<'tcx> {
         // We use the sizes from `value` below.
         // Ensure that matches the type of the place it is written to.
-        value.assert_matches_abi(layout.abi, "invalid immediate for given destination place", self);
+        value.assert_matches_abi(
+            layout.backend_repr,
+            "invalid immediate for given destination place",
+            self,
+        );
         // Note that it is really important that the type here is the right one, and matches the
         // type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
         // to handle padding properly, which is only correct if we never look at this data with the
@@ -700,7 +704,7 @@ where
                 alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)
             }
             Immediate::ScalarPair(a_val, b_val) => {
-                let Abi::ScalarPair(a, b) = layout.abi else {
+                let BackendRepr::ScalarPair(a, b) = layout.backend_repr else {
                     span_bug!(
                         self.cur_span(),
                         "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 8b5bb1332e7..cd2c1ef3613 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -11,6 +11,10 @@ use std::num::NonZero;
 
 use either::{Left, Right};
 use hir::def::DefKind;
+use rustc_abi::{
+    BackendRepr, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants,
+    WrappingRange,
+};
 use rustc_ast::Mutability;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_hir as hir;
@@ -23,9 +27,6 @@ use rustc_middle::mir::interpret::{
 use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, Ty};
 use rustc_span::symbol::{Symbol, sym};
-use rustc_target::abi::{
-    Abi, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
-};
 use tracing::trace;
 
 use super::machine::AllocMap;
@@ -422,7 +423,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
         // Reset provenance: ensure slice tail metadata does not preserve provenance,
         // and ensure all pointers do not preserve partial provenance.
         if self.reset_provenance_and_padding {
-            if matches!(imm.layout.abi, Abi::Scalar(..)) {
+            if matches!(imm.layout.backend_repr, BackendRepr::Scalar(..)) {
                 // A thin pointer. If it has provenance, we don't have to do anything.
                 // If it does not, ensure we clear the provenance in memory.
                 if matches!(imm.to_scalar(), Scalar::Int(..)) {
@@ -981,7 +982,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
                     let elem = layout.field(cx, 0);
 
                     // Fast-path for large arrays of simple types that do not contain any padding.
-                    if elem.abi.is_scalar() {
+                    if elem.backend_repr.is_scalar() {
                         out.add_range(base_offset, elem.size * count);
                     } else {
                         for idx in 0..count {
@@ -1299,19 +1300,19 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
         // FIXME: We could avoid some redundant checks here. For newtypes wrapping
         // scalars, we do the same check on every "level" (e.g., first we check
         // MyNewtype and then the scalar in there).
-        match val.layout.abi {
-            Abi::Uninhabited => {
+        match val.layout.backend_repr {
+            BackendRepr::Uninhabited => {
                 let ty = val.layout.ty;
                 throw_validation_failure!(self.path, UninhabitedVal { ty });
             }
-            Abi::Scalar(scalar_layout) => {
+            BackendRepr::Scalar(scalar_layout) => {
                 if !scalar_layout.is_uninit_valid() {
                     // There is something to check here.
                     let scalar = self.read_scalar(val, ExpectedKind::InitScalar)?;
                     self.visit_scalar(scalar, scalar_layout)?;
                 }
             }
-            Abi::ScalarPair(a_layout, b_layout) => {
+            BackendRepr::ScalarPair(a_layout, b_layout) => {
                 // We can only proceed if *both* scalars need to be initialized.
                 // FIXME: find a way to also check ScalarPair when one side can be uninit but
                 // the other must be init.
@@ -1322,12 +1323,12 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
                     self.visit_scalar(b, b_layout)?;
                 }
             }
-            Abi::Vector { .. } => {
+            BackendRepr::Vector { .. } => {
                 // No checks here, we assume layout computation gets this right.
                 // (This is harder to check since Miri does not represent these as `Immediate`. We
                 // also cannot use field projections since this might be a newtype around a vector.)
             }
-            Abi::Aggregate { .. } => {
+            BackendRepr::Memory { .. } => {
                 // Nothing to do.
             }
         }
diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
index 7a8b976dfc4..f743525f359 100644
--- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
+++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
@@ -1,9 +1,9 @@
+use rustc_abi::{BackendRepr, FieldsShape, Scalar, Variants};
 use rustc_middle::bug;
 use rustc_middle::ty::layout::{
     HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement,
 };
 use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt};
-use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants};
 
 use crate::const_eval::{CanAccessMutGlobal, CheckAlignment, CompileTimeMachine};
 use crate::interpret::{InterpCx, MemoryKind};
@@ -111,12 +111,12 @@ fn check_validity_requirement_lax<'tcx>(
     };
 
     // Check the ABI.
-    let valid = match this.abi {
-        Abi::Uninhabited => false, // definitely UB
-        Abi::Scalar(s) => scalar_allows_raw_init(s),
-        Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
-        Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
-        Abi::Aggregate { .. } => true, // Fields are checked below.
+    let valid = match this.backend_repr {
+        BackendRepr::Uninhabited => false, // definitely UB
+        BackendRepr::Scalar(s) => scalar_allows_raw_init(s),
+        BackendRepr::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
+        BackendRepr::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
+        BackendRepr::Memory { .. } => true, // Fields are checked below.
     };
     if !valid {
         // This is definitely not okay.
diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs
index dbb8c667532..6400685101b 100644
--- a/compiler/rustc_lint/src/builtin.rs
+++ b/compiler/rustc_lint/src/builtin.rs
@@ -16,6 +16,7 @@
 use std::fmt::Write;
 
 use ast::token::TokenKind;
+use rustc_abi::BackendRepr;
 use rustc_ast::tokenstream::{TokenStream, TokenTree};
 use rustc_ast::visit::{FnCtxt, FnKind};
 use rustc_ast::{self as ast, *};
@@ -40,7 +41,6 @@ use rustc_span::edition::Edition;
 use rustc_span::source_map::Spanned;
 use rustc_span::symbol::{Ident, Symbol, kw, sym};
 use rustc_span::{BytePos, InnerSpan, Span};
-use rustc_target::abi::Abi;
 use rustc_target::asm::InlineAsmArch;
 use rustc_trait_selection::infer::{InferCtxtExt, TyCtxtInferExt};
 use rustc_trait_selection::traits::misc::type_allowed_to_implement_copy;
@@ -2466,7 +2466,9 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
 
             // Check if this ADT has a constrained layout (like `NonNull` and friends).
             if let Ok(layout) = cx.tcx.layout_of(cx.param_env.and(ty)) {
-                if let Abi::Scalar(scalar) | Abi::ScalarPair(scalar, _) = &layout.abi {
+                if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) =
+                    &layout.backend_repr
+                {
                     let range = scalar.valid_range(cx);
                     let msg = if !range.contains(0) {
                         "must be non-null"
diff --git a/compiler/rustc_lint/src/foreign_modules.rs b/compiler/rustc_lint/src/foreign_modules.rs
index abe4e3e78ee..394ea798d3e 100644
--- a/compiler/rustc_lint/src/foreign_modules.rs
+++ b/compiler/rustc_lint/src/foreign_modules.rs
@@ -217,7 +217,7 @@ fn structurally_same_type<'tcx>(
         // `extern` blocks cannot be generic, so we'll always get a layout here.
         let a_layout = tcx.layout_of(param_env.and(a)).unwrap();
         let b_layout = tcx.layout_of(param_env.and(b)).unwrap();
-        assert_eq!(a_layout.abi, b_layout.abi);
+        assert_eq!(a_layout.backend_repr, b_layout.backend_repr);
         assert_eq!(a_layout.size, b_layout.size);
         assert_eq!(a_layout.align, b_layout.align);
     }
diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs
index 0751d35cb9c..88878a018e7 100644
--- a/compiler/rustc_lint/src/types.rs
+++ b/compiler/rustc_lint/src/types.rs
@@ -1,6 +1,7 @@
 use std::iter;
 use std::ops::ControlFlow;
 
+use rustc_abi::{BackendRepr, TagEncoding, Variants, WrappingRange};
 use rustc_data_structures::fx::FxHashSet;
 use rustc_errors::DiagMessage;
 use rustc_hir::{Expr, ExprKind};
@@ -13,7 +14,6 @@ use rustc_session::{declare_lint, declare_lint_pass, impl_lint_pass};
 use rustc_span::def_id::LocalDefId;
 use rustc_span::symbol::sym;
 use rustc_span::{Span, Symbol, source_map};
-use rustc_target::abi::{Abi, TagEncoding, Variants, WrappingRange};
 use rustc_target::spec::abi::Abi as SpecAbi;
 use tracing::debug;
 use {rustc_ast as ast, rustc_hir as hir};
@@ -776,8 +776,8 @@ pub(crate) fn repr_nullable_ptr<'tcx>(
             bug!("should be able to compute the layout of non-polymorphic type");
         }
 
-        let field_ty_abi = &field_ty_layout.ok()?.abi;
-        if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
+        let field_ty_abi = &field_ty_layout.ok()?.backend_repr;
+        if let BackendRepr::Scalar(field_ty_scalar) = field_ty_abi {
             match field_ty_scalar.valid_range(&tcx) {
                 WrappingRange { start: 0, end }
                     if end == field_ty_scalar.size(&tcx).unsigned_int_max() - 1 =>
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index 2c7a3ffd04c..0560ffe058a 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -4,8 +4,9 @@ use std::{cmp, fmt};
 
 use rustc_abi::Primitive::{self, Float, Int, Pointer};
 use rustc_abi::{
-    Abi, AddressSpace, Align, FieldsShape, HasDataLayout, Integer, LayoutCalculator, LayoutData,
-    PointeeInfo, PointerKind, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout, Variants,
+    AddressSpace, Align, BackendRepr, FieldsShape, HasDataLayout, Integer, LayoutCalculator,
+    LayoutData, PointeeInfo, PointerKind, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout,
+    Variants,
 };
 use rustc_error_messages::DiagMessage;
 use rustc_errors::{
@@ -757,7 +758,7 @@ where
                         Some(fields) => FieldsShape::Union(fields),
                         None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() },
                     },
-                    abi: Abi::Uninhabited,
+                    backend_repr: BackendRepr::Uninhabited,
                     largest_niche: None,
                     align: tcx.data_layout.i8_align,
                     size: Size::ZERO,
diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
index fd949a53384..2357dd73490 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
@@ -13,7 +13,7 @@ use rustc_middle::ty::util::IntTypeExt;
 use rustc_middle::ty::{self, Ty, UpvarArgs};
 use rustc_span::source_map::Spanned;
 use rustc_span::{DUMMY_SP, Span};
-use rustc_target::abi::{Abi, FieldIdx, Primitive};
+use rustc_target::abi::{BackendRepr, FieldIdx, Primitive};
 use tracing::debug;
 
 use crate::build::expr::as_place::PlaceBase;
@@ -207,7 +207,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
                     );
                     let (op, ty) = (Operand::Move(discr), discr_ty);
 
-                    if let Abi::Scalar(scalar) = layout.unwrap().abi
+                    if let BackendRepr::Scalar(scalar) = layout.unwrap().backend_repr
                         && !scalar.is_always_valid(&this.tcx)
                         && let Primitive::Int(int_width, _signed) = scalar.primitive()
                     {
diff --git a/compiler/rustc_mir_dataflow/src/value_analysis.rs b/compiler/rustc_mir_dataflow/src/value_analysis.rs
index f7d4a082779..80151b8ba2d 100644
--- a/compiler/rustc_mir_dataflow/src/value_analysis.rs
+++ b/compiler/rustc_mir_dataflow/src/value_analysis.rs
@@ -858,7 +858,7 @@ impl<'tcx> Map<'tcx> {
             // Allocate a value slot if it doesn't have one, and the user requested one.
             assert!(place_info.value_index.is_none());
             if let Ok(layout) = tcx.layout_of(param_env.and(place_info.ty))
-                && layout.abi.is_scalar()
+                && layout.backend_repr.is_scalar()
             {
                 place_info.value_index = Some(self.value_count.into());
                 self.value_count += 1;
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
index 002216f50f2..ca24d0d7e70 100644
--- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
+++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
@@ -2,6 +2,7 @@
 //!
 //! Currently, this pass only propagates scalar values.
 
+use rustc_abi::{BackendRepr, FIRST_VARIANT, FieldIdx, Size, VariantIdx};
 use rustc_const_eval::const_eval::{DummyMachine, throw_machine_stop_str};
 use rustc_const_eval::interpret::{
     ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable, interp_ok,
@@ -20,7 +21,6 @@ use rustc_mir_dataflow::value_analysis::{
 };
 use rustc_mir_dataflow::{Analysis, Results, ResultsVisitor};
 use rustc_span::DUMMY_SP;
-use rustc_target::abi::{Abi, FIRST_VARIANT, FieldIdx, Size, VariantIdx};
 use tracing::{debug, debug_span, instrument};
 
 // These constants are somewhat random guesses and have not been optimized.
@@ -457,7 +457,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
                     // a pair and sometimes not. But as a hack we always return a pair
                     // and just make the 2nd component `Bottom` when it does not exist.
                     Some(val) => {
-                        if matches!(val.layout.abi, Abi::ScalarPair(..)) {
+                        if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
                             let (val, overflow) = val.to_scalar_pair();
                             (FlatSet::Elem(val), FlatSet::Elem(overflow))
                         } else {
@@ -470,7 +470,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
             // Exactly one side is known, attempt some algebraic simplifications.
             (FlatSet::Elem(const_arg), _) | (_, FlatSet::Elem(const_arg)) => {
                 let layout = const_arg.layout;
-                if !matches!(layout.abi, rustc_target::abi::Abi::Scalar(..)) {
+                if !matches!(layout.backend_repr, rustc_target::abi::BackendRepr::Scalar(..)) {
                     return (FlatSet::Top, FlatSet::Top);
                 }
 
@@ -589,13 +589,13 @@ impl<'a, 'tcx> Collector<'a, 'tcx> {
         }
 
         let place = map.find(place.as_ref())?;
-        if layout.abi.is_scalar()
+        if layout.backend_repr.is_scalar()
             && let Some(value) = propagatable_scalar(place, state, map)
         {
             return Some(Const::Val(ConstValue::Scalar(value), ty));
         }
 
-        if matches!(layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
+        if matches!(layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
             let alloc_id = ecx
                 .intern_with_temp_alloc(layout, |ecx, dest| {
                     try_write_constant(ecx, dest, place, ty, state, map)
@@ -641,7 +641,7 @@ fn try_write_constant<'tcx>(
     }
 
     // Fast path for scalars.
-    if layout.abi.is_scalar()
+    if layout.backend_repr.is_scalar()
         && let Some(value) = propagatable_scalar(place, state, map)
     {
         return ecx.write_immediate(Immediate::Scalar(value), dest);
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index 79c62372df0..8a646d8cbfe 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -85,6 +85,7 @@
 use std::borrow::Cow;
 
 use either::Either;
+use rustc_abi::{self as abi, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx};
 use rustc_const_eval::const_eval::DummyMachine;
 use rustc_const_eval::interpret::{
     ImmTy, Immediate, InterpCx, MemPlaceMeta, MemoryKind, OpTy, Projectable, Scalar,
@@ -103,7 +104,6 @@ use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_span::DUMMY_SP;
 use rustc_span::def_id::DefId;
-use rustc_target::abi::{self, Abi, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx};
 use smallvec::SmallVec;
 use tracing::{debug, instrument, trace};
 
@@ -427,7 +427,10 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     };
                     let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx);
                     ImmTy::from_immediate(ptr_imm, ty).into()
-                } else if matches!(ty.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
+                } else if matches!(
+                    ty.backend_repr,
+                    BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)
+                ) {
                     let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?;
                     let variant_dest = if let Some(variant) = variant {
                         self.ecx.project_downcast(&dest, variant).discard_err()?
@@ -573,12 +576,12 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     // limited transmutes: it only works between types with the same layout, and
                     // cannot transmute pointers to integers.
                     if value.as_mplace_or_imm().is_right() {
-                        let can_transmute = match (value.layout.abi, to.abi) {
-                            (Abi::Scalar(s1), Abi::Scalar(s2)) => {
+                        let can_transmute = match (value.layout.backend_repr, to.backend_repr) {
+                            (BackendRepr::Scalar(s1), BackendRepr::Scalar(s2)) => {
                                 s1.size(&self.ecx) == s2.size(&self.ecx)
                                     && !matches!(s1.primitive(), Primitive::Pointer(..))
                             }
-                            (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
+                            (BackendRepr::ScalarPair(a1, b1), BackendRepr::ScalarPair(a2, b2)) => {
                                 a1.size(&self.ecx) == a2.size(&self.ecx) &&
                                 b1.size(&self.ecx) == b2.size(&self.ecx) &&
                                 // The alignment of the second component determines its offset, so that also needs to match.
@@ -1241,7 +1244,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
 
         let as_bits = |value| {
             let constant = self.evaluated[value].as_ref()?;
-            if layout.abi.is_scalar() {
+            if layout.backend_repr.is_scalar() {
                 let scalar = self.ecx.read_scalar(constant).discard_err()?;
                 scalar.to_bits(constant.layout.size).discard_err()
             } else {
@@ -1497,12 +1500,12 @@ fn op_to_prop_const<'tcx>(
 
     // Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to
     // avoid.
-    if !matches!(op.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
+    if !matches!(op.layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
         return None;
     }
 
     // If this constant has scalar ABI, return it as a `ConstValue::Scalar`.
-    if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi
+    if let BackendRepr::Scalar(abi::Scalar::Initialized { .. }) = op.layout.backend_repr
         && let Some(scalar) = ecx.read_scalar(op).discard_err()
     {
         if !scalar.try_to_scalar_int().is_ok() {
diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs
index 08923748eb2..0604665642a 100644
--- a/compiler/rustc_mir_transform/src/known_panics_lint.rs
+++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs
@@ -4,6 +4,7 @@
 
 use std::fmt::Debug;
 
+use rustc_abi::{BackendRepr, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx};
 use rustc_const_eval::const_eval::DummyMachine;
 use rustc_const_eval::interpret::{
     ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error, interp_ok,
@@ -19,7 +20,6 @@ use rustc_middle::mir::*;
 use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
 use rustc_middle::ty::{self, ConstInt, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt};
 use rustc_span::Span;
-use rustc_target::abi::{Abi, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx};
 use tracing::{debug, instrument, trace};
 
 use crate::errors::{AssertLint, AssertLintKind};
@@ -557,7 +557,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                 let right = self.use_ecx(|this| this.ecx.read_immediate(&right))?;
 
                 let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?;
-                if matches!(val.layout.abi, Abi::ScalarPair(..)) {
+                if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
                     // FIXME `Value` should properly support pairs in `Immediate`... but currently
                     // it does not.
                     let (val, overflow) = val.to_pair(&self.ecx);
@@ -651,9 +651,9 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                     let to = self.ecx.layout_of(to).ok()?;
                     // `offset` for immediates only supports scalar/scalar-pair ABIs,
                     // so bail out if the target is not one.
-                    match (value.layout.abi, to.abi) {
-                        (Abi::Scalar(..), Abi::Scalar(..)) => {}
-                        (Abi::ScalarPair(..), Abi::ScalarPair(..)) => {}
+                    match (value.layout.backend_repr, to.backend_repr) {
+                        (BackendRepr::Scalar(..), BackendRepr::Scalar(..)) => {}
+                        (BackendRepr::ScalarPair(..), BackendRepr::ScalarPair(..)) => {}
                         _ => return None,
                     }
 
diff --git a/compiler/rustc_passes/src/layout_test.rs b/compiler/rustc_passes/src/layout_test.rs
index 0b6cf82ca8b..7260c12f278 100644
--- a/compiler/rustc_passes/src/layout_test.rs
+++ b/compiler/rustc_passes/src/layout_test.rs
@@ -81,8 +81,12 @@ fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) {
             let meta_items = attr.meta_item_list().unwrap_or_default();
             for meta_item in meta_items {
                 match meta_item.name_or_empty() {
+                    // FIXME: this never was about ABI and now this dump arg is confusing
                     sym::abi => {
-                        tcx.dcx().emit_err(LayoutAbi { span, abi: format!("{:?}", ty_layout.abi) });
+                        tcx.dcx().emit_err(LayoutAbi {
+                            span,
+                            abi: format!("{:?}", ty_layout.backend_repr),
+                        });
                     }
 
                     sym::align => {
diff --git a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs
index 410bf0f40f4..af24fd23f50 100644
--- a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs
+++ b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs
@@ -56,7 +56,7 @@ impl<'tcx> Stable<'tcx> for rustc_abi::LayoutData<rustc_abi::FieldIdx, rustc_abi
         LayoutShape {
             fields: self.fields.stable(tables),
             variants: self.variants.stable(tables),
-            abi: self.abi.stable(tables),
+            abi: self.backend_repr.stable(tables),
             abi_align: self.align.abi.stable(tables),
             size: self.size.stable(tables),
         }
@@ -196,20 +196,20 @@ impl<'tcx> Stable<'tcx> for rustc_abi::TagEncoding<rustc_abi::VariantIdx> {
     }
 }
 
-impl<'tcx> Stable<'tcx> for rustc_abi::Abi {
+impl<'tcx> Stable<'tcx> for rustc_abi::BackendRepr {
     type T = ValueAbi;
 
     fn stable(&self, tables: &mut Tables<'_>) -> Self::T {
         match *self {
-            rustc_abi::Abi::Uninhabited => ValueAbi::Uninhabited,
-            rustc_abi::Abi::Scalar(scalar) => ValueAbi::Scalar(scalar.stable(tables)),
-            rustc_abi::Abi::ScalarPair(first, second) => {
+            rustc_abi::BackendRepr::Uninhabited => ValueAbi::Uninhabited,
+            rustc_abi::BackendRepr::Scalar(scalar) => ValueAbi::Scalar(scalar.stable(tables)),
+            rustc_abi::BackendRepr::ScalarPair(first, second) => {
                 ValueAbi::ScalarPair(first.stable(tables), second.stable(tables))
             }
-            rustc_abi::Abi::Vector { element, count } => {
+            rustc_abi::BackendRepr::Vector { element, count } => {
                 ValueAbi::Vector { element: element.stable(tables), count }
             }
-            rustc_abi::Abi::Aggregate { sized } => ValueAbi::Aggregate { sized },
+            rustc_abi::BackendRepr::Memory { sized } => ValueAbi::Aggregate { sized },
         }
     }
 }
diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs
index ffec76370d0..d1234c3cc91 100644
--- a/compiler/rustc_target/src/callconv/loongarch.rs
+++ b/compiler/rustc_target/src/callconv/loongarch.rs
@@ -1,5 +1,7 @@
 use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
-use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+use crate::abi::{
+    self, BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout,
+};
 use crate::spec::HasTargetSpec;
 use crate::spec::abi::Abi as SpecAbi;
 
@@ -21,8 +23,8 @@ enum FloatConv {
 struct CannotUseFpConv;
 
 fn is_loongarch_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
-    match arg.layout.abi {
-        Abi::Vector { .. } => true,
+    match arg.layout.backend_repr {
+        BackendRepr::Vector { .. } => true,
         _ => arg.layout.is_aggregate(),
     }
 }
@@ -38,8 +40,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
 where
     Ty: TyAbiInterface<'a, C> + Copy,
 {
-    match arg_layout.abi {
-        Abi::Scalar(scalar) => match scalar.primitive() {
+    match arg_layout.backend_repr {
+        BackendRepr::Scalar(scalar) => match scalar.primitive() {
             abi::Int(..) | abi::Pointer(_) => {
                 if arg_layout.size.bits() > xlen {
                     return Err(CannotUseFpConv);
@@ -77,8 +79,8 @@ where
                 }
             }
         },
-        Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
-        Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
+        BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv),
+        BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
             FieldsShape::Primitive => {
                 unreachable!("aggregates can't have `FieldsShape::Primitive`")
             }
@@ -311,7 +313,7 @@ fn classify_arg<'a, Ty, C>(
 }
 
 fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
-    if let Abi::Scalar(scalar) = arg.layout.abi {
+    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
         if let abi::Int(i, _) = scalar.primitive() {
             // 32-bit integers are always sign-extended
             if i.size().bits() == 32 && xlen > 32 {
diff --git a/compiler/rustc_target/src/callconv/mips64.rs b/compiler/rustc_target/src/callconv/mips64.rs
index 2c3258c8d42..5bdf4c2ad77 100644
--- a/compiler/rustc_target/src/callconv/mips64.rs
+++ b/compiler/rustc_target/src/callconv/mips64.rs
@@ -5,7 +5,7 @@ use crate::abi::{self, HasDataLayout, Size, TyAbiInterface};
 
 fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
     // Always sign extend u32 values on 64-bit mips
-    if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+    if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
         if let abi::Int(i, signed) = scalar.primitive() {
             if !signed && i.size().bits() == 32 {
                 if let PassMode::Direct(ref mut attrs) = arg.mode {
@@ -24,8 +24,8 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout,
 {
-    match ret.layout.field(cx, i).abi {
-        abi::Abi::Scalar(scalar) => match scalar.primitive() {
+    match ret.layout.field(cx, i).backend_repr {
+        abi::BackendRepr::Scalar(scalar) => match scalar.primitive() {
             abi::Float(abi::F32) => Some(Reg::f32()),
             abi::Float(abi::F64) => Some(Reg::f64()),
             _ => None,
@@ -109,7 +109,7 @@ where
                 let offset = arg.layout.fields.offset(i);
 
                 // We only care about aligned doubles
-                if let abi::Abi::Scalar(scalar) = field.abi {
+                if let abi::BackendRepr::Scalar(scalar) = field.backend_repr {
                     if scalar.primitive() == abi::Float(abi::F64) {
                         if offset.is_aligned(dl.f64_align.abi) {
                             // Insert enough integers to cover [last_offset, offset)
diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs
index 25b001b57e8..8c3df9c426b 100644
--- a/compiler/rustc_target/src/callconv/mod.rs
+++ b/compiler/rustc_target/src/callconv/mod.rs
@@ -6,7 +6,8 @@ use rustc_macros::HashStable_Generic;
 use rustc_span::Symbol;
 
 use crate::abi::{
-    self, Abi, AddressSpace, Align, HasDataLayout, Pointer, Size, TyAbiInterface, TyAndLayout,
+    self, AddressSpace, Align, BackendRepr, HasDataLayout, Pointer, Size, TyAbiInterface,
+    TyAndLayout,
 };
 use crate::spec::abi::Abi as SpecAbi;
 use crate::spec::{self, HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi};
@@ -350,15 +351,17 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
         layout: TyAndLayout<'a, Ty>,
         scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes,
     ) -> Self {
-        let mode = match layout.abi {
-            Abi::Uninhabited => PassMode::Ignore,
-            Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
-            Abi::ScalarPair(a, b) => PassMode::Pair(
+        let mode = match layout.backend_repr {
+            BackendRepr::Uninhabited => PassMode::Ignore,
+            BackendRepr::Scalar(scalar) => {
+                PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO))
+            }
+            BackendRepr::ScalarPair(a, b) => PassMode::Pair(
                 scalar_attrs(&layout, a, Size::ZERO),
                 scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
             ),
-            Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
-            Abi::Aggregate { .. } => Self::indirect_pass_mode(&layout),
+            BackendRepr::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
+            BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
         };
         ArgAbi { layout, mode }
     }
@@ -460,7 +463,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
 
     pub fn extend_integer_width_to(&mut self, bits: u64) {
         // Only integers have signedness
-        if let Abi::Scalar(scalar) = self.layout.abi {
+        if let BackendRepr::Scalar(scalar) = self.layout.backend_repr {
             if let abi::Int(i, signed) = scalar.primitive() {
                 if i.size().bits() < bits {
                     if let PassMode::Direct(ref mut attrs) = self.mode {
@@ -512,7 +515,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
             // That elevates any type difference to an ABI difference since we just use the
             // full Rust type as the LLVM argument/return type.
             if matches!(self.mode, PassMode::Direct(..))
-                && matches!(self.layout.abi, Abi::Aggregate { .. })
+                && matches!(self.layout.backend_repr, BackendRepr::Memory { .. })
             {
                 // For aggregates in `Direct` mode to be compatible, the types need to be equal.
                 self.layout.ty == other.layout.ty
@@ -791,8 +794,8 @@ impl<'a, Ty> FnAbi<'a, Ty> {
                 continue;
             }
 
-            match arg.layout.abi {
-                Abi::Aggregate { .. } => {}
+            match arg.layout.backend_repr {
+                BackendRepr::Memory { .. } => {}
 
                 // This is a fun case! The gist of what this is doing is
                 // that we want callers and callees to always agree on the
@@ -813,7 +816,9 @@ impl<'a, Ty> FnAbi<'a, Ty> {
                 // Note that the intrinsic ABI is exempt here as
                 // that's how we connect up to LLVM and it's unstable
                 // anyway, we control all calls to it in libstd.
-                Abi::Vector { .. } if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect => {
+                BackendRepr::Vector { .. }
+                    if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect =>
+                {
                     arg.make_indirect();
                     continue;
                 }
diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs
index f96169e6a61..c0298edb5ab 100644
--- a/compiler/rustc_target/src/callconv/riscv.rs
+++ b/compiler/rustc_target/src/callconv/riscv.rs
@@ -4,8 +4,10 @@
 // Reference: Clang RISC-V ELF psABI lowering code
 // https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
 
+use rustc_abi::{BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+
+use crate::abi;
 use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
-use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
 use crate::spec::HasTargetSpec;
 use crate::spec::abi::Abi as SpecAbi;
 
@@ -27,8 +29,8 @@ enum FloatConv {
 struct CannotUseFpConv;
 
 fn is_riscv_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
-    match arg.layout.abi {
-        Abi::Vector { .. } => true,
+    match arg.layout.backend_repr {
+        BackendRepr::Vector { .. } => true,
         _ => arg.layout.is_aggregate(),
     }
 }
@@ -44,8 +46,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
 where
     Ty: TyAbiInterface<'a, C> + Copy,
 {
-    match arg_layout.abi {
-        Abi::Scalar(scalar) => match scalar.primitive() {
+    match arg_layout.backend_repr {
+        BackendRepr::Scalar(scalar) => match scalar.primitive() {
             abi::Int(..) | abi::Pointer(_) => {
                 if arg_layout.size.bits() > xlen {
                     return Err(CannotUseFpConv);
@@ -83,8 +85,8 @@ where
                 }
             }
         },
-        Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
-        Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
+        BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv),
+        BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
             FieldsShape::Primitive => {
                 unreachable!("aggregates can't have `FieldsShape::Primitive`")
             }
@@ -317,7 +319,7 @@ fn classify_arg<'a, Ty, C>(
 }
 
 fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
-    if let Abi::Scalar(scalar) = arg.layout.abi {
+    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
         if let abi::Int(i, _) = scalar.primitive() {
             // 32-bit integers are always sign-extended
             if i.size().bits() == 32 && xlen > 32 {
diff --git a/compiler/rustc_target/src/callconv/sparc64.rs b/compiler/rustc_target/src/callconv/sparc64.rs
index 835353f76fc..313d8730399 100644
--- a/compiler/rustc_target/src/callconv/sparc64.rs
+++ b/compiler/rustc_target/src/callconv/sparc64.rs
@@ -109,11 +109,11 @@ where
         return data;
     }
 
-    match layout.abi {
-        abi::Abi::Scalar(scalar) => {
+    match layout.backend_repr {
+        abi::BackendRepr::Scalar(scalar) => {
             data = arg_scalar(cx, &scalar, offset, data);
         }
-        abi::Abi::Aggregate { .. } => {
+        abi::BackendRepr::Memory { .. } => {
             for i in 0..layout.fields.count() {
                 if offset < layout.fields.offset(i) {
                     offset = layout.fields.offset(i);
@@ -122,7 +122,7 @@ where
             }
         }
         _ => {
-            if let abi::Abi::ScalarPair(scalar1, scalar2) = &layout.abi {
+            if let abi::BackendRepr::ScalarPair(scalar1, scalar2) = &layout.backend_repr {
                 data = arg_scalar_pair(cx, scalar1, scalar2, offset, data);
             }
         }
diff --git a/compiler/rustc_target/src/callconv/x86.rs b/compiler/rustc_target/src/callconv/x86.rs
index e907beecb38..a5af975d4d2 100644
--- a/compiler/rustc_target/src/callconv/x86.rs
+++ b/compiler/rustc_target/src/callconv/x86.rs
@@ -1,6 +1,6 @@
 use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
 use crate::abi::{
-    Abi, AddressSpace, Align, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
+    AddressSpace, Align, BackendRepr, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
 };
 use crate::spec::HasTargetSpec;
 use crate::spec::abi::Abi as SpecAbi;
@@ -105,10 +105,12 @@ where
             where
                 Ty: TyAbiInterface<'a, C> + Copy,
             {
-                match layout.abi {
-                    Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) => false,
-                    Abi::Vector { .. } => true,
-                    Abi::Aggregate { .. } => {
+                match layout.backend_repr {
+                    BackendRepr::Uninhabited
+                    | BackendRepr::Scalar(_)
+                    | BackendRepr::ScalarPair(..) => false,
+                    BackendRepr::Vector { .. } => true,
+                    BackendRepr::Memory { .. } => {
                         for i in 0..layout.fields.count() {
                             if contains_vector(cx, layout.field(cx, i)) {
                                 return true;
@@ -223,9 +225,9 @@ where
         // Intrinsics themselves are not actual "real" functions, so theres no need to change their ABIs.
         && abi != SpecAbi::RustIntrinsic
     {
-        let has_float = match fn_abi.ret.layout.abi {
-            Abi::Scalar(s) => matches!(s.primitive(), Float(_)),
-            Abi::ScalarPair(s1, s2) => {
+        let has_float = match fn_abi.ret.layout.backend_repr {
+            BackendRepr::Scalar(s) => matches!(s.primitive(), Float(_)),
+            BackendRepr::ScalarPair(s1, s2) => {
                 matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_))
             }
             _ => false, // anyway not passed via registers on x86
diff --git a/compiler/rustc_target/src/callconv/x86_64.rs b/compiler/rustc_target/src/callconv/x86_64.rs
index 9910e623ac9..bd101b23ea1 100644
--- a/compiler/rustc_target/src/callconv/x86_64.rs
+++ b/compiler/rustc_target/src/callconv/x86_64.rs
@@ -1,8 +1,10 @@
 // The classification code for the x86_64 ABI is taken from the clay language
 // https://github.com/jckarter/clay/blob/db0bd2702ab0b6e48965cd85f8859bbd5f60e48e/compiler/externals.cpp
 
+use rustc_abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+
+use crate::abi;
 use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind};
-use crate::abi::{self, Abi, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
 
 /// Classification of "eightbyte" components.
 // N.B., the order of the variants is from general to specific,
@@ -46,17 +48,17 @@ where
             return Ok(());
         }
 
-        let mut c = match layout.abi {
-            Abi::Uninhabited => return Ok(()),
+        let mut c = match layout.backend_repr {
+            BackendRepr::Uninhabited => return Ok(()),
 
-            Abi::Scalar(scalar) => match scalar.primitive() {
+            BackendRepr::Scalar(scalar) => match scalar.primitive() {
                 abi::Int(..) | abi::Pointer(_) => Class::Int,
                 abi::Float(_) => Class::Sse,
             },
 
-            Abi::Vector { .. } => Class::Sse,
+            BackendRepr::Vector { .. } => Class::Sse,
 
-            Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
+            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => {
                 for i in 0..layout.fields.count() {
                     let field_off = off + layout.fields.offset(i);
                     classify(cx, layout.field(cx, i), cls, field_off)?;
diff --git a/compiler/rustc_target/src/callconv/x86_win64.rs b/compiler/rustc_target/src/callconv/x86_win64.rs
index e5a20b248e4..83d94cb11ba 100644
--- a/compiler/rustc_target/src/callconv/x86_win64.rs
+++ b/compiler/rustc_target/src/callconv/x86_win64.rs
@@ -1,25 +1,28 @@
+use rustc_abi::{BackendRepr, Float, Primitive};
+
 use crate::abi::call::{ArgAbi, FnAbi, Reg};
-use crate::abi::{Abi, Float, Primitive};
 use crate::spec::HasTargetSpec;
 
 // Win64 ABI: https://docs.microsoft.com/en-us/cpp/build/parameter-passing
 
 pub(crate) fn compute_abi_info<Ty>(cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<'_, Ty>) {
     let fixup = |a: &mut ArgAbi<'_, Ty>| {
-        match a.layout.abi {
-            Abi::Uninhabited | Abi::Aggregate { sized: false } => {}
-            Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => match a.layout.size.bits() {
-                8 => a.cast_to(Reg::i8()),
-                16 => a.cast_to(Reg::i16()),
-                32 => a.cast_to(Reg::i32()),
-                64 => a.cast_to(Reg::i64()),
-                _ => a.make_indirect(),
-            },
-            Abi::Vector { .. } => {
+        match a.layout.backend_repr {
+            BackendRepr::Uninhabited | BackendRepr::Memory { sized: false } => {}
+            BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => {
+                match a.layout.size.bits() {
+                    8 => a.cast_to(Reg::i8()),
+                    16 => a.cast_to(Reg::i16()),
+                    32 => a.cast_to(Reg::i32()),
+                    64 => a.cast_to(Reg::i64()),
+                    _ => a.make_indirect(),
+                }
+            }
+            BackendRepr::Vector { .. } => {
                 // FIXME(eddyb) there should be a size cap here
                 // (probably what clang calls "illegal vectors").
             }
-            Abi::Scalar(scalar) => {
+            BackendRepr::Scalar(scalar) => {
                 // Match what LLVM does for `f128` so that `compiler-builtins` builtins match up
                 // with what LLVM expects.
                 if a.layout.size.bytes() > 8
diff --git a/compiler/rustc_target/src/callconv/xtensa.rs b/compiler/rustc_target/src/callconv/xtensa.rs
index e1728b08a39..9d313d16500 100644
--- a/compiler/rustc_target/src/callconv/xtensa.rs
+++ b/compiler/rustc_target/src/callconv/xtensa.rs
@@ -6,7 +6,7 @@
 //! Section 2.3 from the Xtensa programmers guide.
 
 use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
-use crate::abi::{Abi, HasDataLayout, Size, TyAbiInterface};
+use crate::abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface};
 use crate::spec::HasTargetSpec;
 
 const NUM_ARG_GPRS: u64 = 6;
@@ -114,8 +114,8 @@ where
 }
 
 fn is_xtensa_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
-    match arg.layout.abi {
-        Abi::Vector { .. } => true,
+    match arg.layout.backend_repr {
+        BackendRepr::Vector { .. } => true,
         _ => arg.layout.is_aggregate(),
     }
 }
diff --git a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs
index a068f25fe35..3ddf023cf97 100644
--- a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs
+++ b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs
@@ -18,7 +18,7 @@ use rustc_middle::ty::{
 };
 use rustc_span::Span;
 use rustc_span::symbol::Symbol;
-use rustc_target::abi::Abi;
+use rustc_target::abi::BackendRepr;
 use smallvec::SmallVec;
 use tracing::{debug, instrument};
 
@@ -523,8 +523,8 @@ fn check_receiver_correct<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, method:
 
     // e.g., `Rc<()>`
     let unit_receiver_ty = receiver_for_self_ty(tcx, receiver_ty, tcx.types.unit, method_def_id);
-    match tcx.layout_of(param_env.and(unit_receiver_ty)).map(|l| l.abi) {
-        Ok(Abi::Scalar(..)) => (),
+    match tcx.layout_of(param_env.and(unit_receiver_ty)).map(|l| l.backend_repr) {
+        Ok(BackendRepr::Scalar(..)) => (),
         abi => {
             tcx.dcx().span_delayed_bug(
                 tcx.def_span(method_def_id),
@@ -538,8 +538,8 @@ fn check_receiver_correct<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, method:
     // e.g., `Rc<dyn Trait>`
     let trait_object_receiver =
         receiver_for_self_ty(tcx, receiver_ty, trait_object_ty, method_def_id);
-    match tcx.layout_of(param_env.and(trait_object_receiver)).map(|l| l.abi) {
-        Ok(Abi::ScalarPair(..)) => (),
+    match tcx.layout_of(param_env.and(trait_object_receiver)).map(|l| l.backend_repr) {
+        Ok(BackendRepr::ScalarPair(..)) => (),
         abi => {
             tcx.dcx().span_delayed_bug(
                 tcx.def_span(method_def_id),
diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs
index 48149a08de8..722ef5f4569 100644
--- a/compiler/rustc_ty_utils/src/abi.rs
+++ b/compiler/rustc_ty_utils/src/abi.rs
@@ -1,7 +1,7 @@
 use std::iter;
 
 use rustc_abi::Primitive::Pointer;
-use rustc_abi::{Abi, PointerKind, Scalar, Size};
+use rustc_abi::{BackendRepr, PointerKind, Scalar, Size};
 use rustc_hir as hir;
 use rustc_hir::lang_items::LangItem;
 use rustc_middle::bug;
@@ -469,7 +469,7 @@ fn fn_abi_sanity_check<'tcx>(
                 // careful. Scalar/ScalarPair is fine, since backends will generally use
                 // `layout.abi` and ignore everything else. We should just reject `Aggregate`
                 // entirely here, but some targets need to be fixed first.
-                if matches!(arg.layout.abi, Abi::Aggregate { .. }) {
+                if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) {
                     // For an unsized type we'd only pass the sized prefix, so there is no universe
                     // in which we ever want to allow this.
                     assert!(
@@ -500,7 +500,7 @@ fn fn_abi_sanity_check<'tcx>(
                 // Similar to `Direct`, we need to make sure that backends use `layout.abi` and
                 // ignore the rest of the layout.
                 assert!(
-                    matches!(arg.layout.abi, Abi::ScalarPair(..)),
+                    matches!(arg.layout.backend_repr, BackendRepr::ScalarPair(..)),
                     "PassMode::Pair for type {}",
                     arg.layout.ty
                 );
@@ -658,9 +658,9 @@ fn fn_abi_adjust_for_abi<'tcx>(
         fn unadjust<'tcx>(arg: &mut ArgAbi<'tcx, Ty<'tcx>>) {
             // This still uses `PassMode::Pair` for ScalarPair types. That's unlikely to be intended,
             // but who knows what breaks if we change this now.
-            if matches!(arg.layout.abi, Abi::Aggregate { .. }) {
+            if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) {
                 assert!(
-                    arg.layout.abi.is_sized(),
+                    arg.layout.backend_repr.is_sized(),
                     "'unadjusted' ABI does not support unsized arguments"
                 );
             }
@@ -731,8 +731,8 @@ fn make_thin_self_ptr<'tcx>(
         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
         Ty::new_mut_ptr(tcx, layout.ty)
     } else {
-        match layout.abi {
-            Abi::ScalarPair(..) | Abi::Scalar(..) => (),
+        match layout.backend_repr {
+            BackendRepr::ScalarPair(..) | BackendRepr::Scalar(..) => (),
             _ => bug!("receiver type has unsupported layout: {:?}", layout),
         }
 
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index 94b80e2694d..5ca7afe2453 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -5,8 +5,9 @@ use hir::def_id::DefId;
 use rustc_abi::Integer::{I8, I32};
 use rustc_abi::Primitive::{self, Float, Int, Pointer};
 use rustc_abi::{
-    Abi, AbiAndPrefAlign, AddressSpace, Align, FieldsShape, HasDataLayout, LayoutCalculatorError,
-    LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding, Variants, WrappingRange,
+    AbiAndPrefAlign, AddressSpace, Align, BackendRepr, FieldsShape, HasDataLayout,
+    LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding,
+    Variants, WrappingRange,
 };
 use rustc_index::bit_set::BitSet;
 use rustc_index::{IndexSlice, IndexVec};
@@ -173,7 +174,9 @@ fn layout_of_uncached<'tcx>(
             let mut layout = LayoutData::clone(&layout.0);
             match *pat {
                 ty::PatternKind::Range { start, end, include_end } => {
-                    if let Abi::Scalar(scalar) | Abi::ScalarPair(scalar, _) = &mut layout.abi {
+                    if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) =
+                        &mut layout.backend_repr
+                    {
                         if let Some(start) = start {
                             scalar.valid_range_mut().start = start
                                 .try_to_bits(tcx, param_env)
@@ -275,7 +278,7 @@ fn layout_of_uncached<'tcx>(
                     return Ok(tcx.mk_layout(LayoutData::scalar(cx, data_ptr)));
                 }
 
-                let Abi::Scalar(metadata) = metadata_layout.abi else {
+                let BackendRepr::Scalar(metadata) = metadata_layout.backend_repr else {
                     return Err(error(cx, LayoutError::Unknown(pointee)));
                 };
 
@@ -330,9 +333,9 @@ fn layout_of_uncached<'tcx>(
                 .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
 
             let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
-                Abi::Uninhabited
+                BackendRepr::Uninhabited
             } else {
-                Abi::Aggregate { sized: true }
+                BackendRepr::Memory { sized: true }
             };
 
             let largest_niche = if count != 0 { element.largest_niche } else { None };
@@ -340,7 +343,7 @@ fn layout_of_uncached<'tcx>(
             tcx.mk_layout(LayoutData {
                 variants: Variants::Single { index: FIRST_VARIANT },
                 fields: FieldsShape::Array { stride: element.size, count },
-                abi,
+                backend_repr: abi,
                 largest_niche,
                 align: element.align,
                 size,
@@ -353,7 +356,7 @@ fn layout_of_uncached<'tcx>(
             tcx.mk_layout(LayoutData {
                 variants: Variants::Single { index: FIRST_VARIANT },
                 fields: FieldsShape::Array { stride: element.size, count: 0 },
-                abi: Abi::Aggregate { sized: false },
+                backend_repr: BackendRepr::Memory { sized: false },
                 largest_niche: None,
                 align: element.align,
                 size: Size::ZERO,
@@ -364,7 +367,7 @@ fn layout_of_uncached<'tcx>(
         ty::Str => tcx.mk_layout(LayoutData {
             variants: Variants::Single { index: FIRST_VARIANT },
             fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
-            abi: Abi::Aggregate { sized: false },
+            backend_repr: BackendRepr::Memory { sized: false },
             largest_niche: None,
             align: dl.i8_align,
             size: Size::ZERO,
@@ -384,8 +387,8 @@ fn layout_of_uncached<'tcx>(
                 &ReprOptions::default(),
                 StructKind::AlwaysSized,
             )?;
-            match unit.abi {
-                Abi::Aggregate { ref mut sized } => *sized = false,
+            match unit.backend_repr {
+                BackendRepr::Memory { ref mut sized } => *sized = false,
                 _ => bug!(),
             }
             tcx.mk_layout(unit)
@@ -500,7 +503,7 @@ fn layout_of_uncached<'tcx>(
 
             // Compute the ABI of the element type:
             let e_ly = cx.layout_of(e_ty)?;
-            let Abi::Scalar(e_abi) = e_ly.abi else {
+            let BackendRepr::Scalar(e_abi) = e_ly.backend_repr else {
                 // This error isn't caught in typeck, e.g., if
                 // the element type of the vector is generic.
                 tcx.dcx().emit_fatal(NonPrimitiveSimdType { ty, e_ty });
@@ -516,12 +519,12 @@ fn layout_of_uncached<'tcx>(
                 // Non-power-of-two vectors have padding up to the next power-of-two.
                 // If we're a packed repr, remove the padding while keeping the alignment as close
                 // to a vector as possible.
-                (Abi::Aggregate { sized: true }, AbiAndPrefAlign {
+                (BackendRepr::Memory { sized: true }, AbiAndPrefAlign {
                     abi: Align::max_for_offset(size),
                     pref: dl.vector_align(size).pref,
                 })
             } else {
-                (Abi::Vector { element: e_abi, count: e_len }, dl.vector_align(size))
+                (BackendRepr::Vector { element: e_abi, count: e_len }, dl.vector_align(size))
             };
             let size = size.align_to(align.abi);
 
@@ -535,7 +538,7 @@ fn layout_of_uncached<'tcx>(
             tcx.mk_layout(LayoutData {
                 variants: Variants::Single { index: FIRST_VARIANT },
                 fields,
-                abi,
+                backend_repr: abi,
                 largest_niche: e_ly.largest_niche,
                 size,
                 align,
@@ -985,10 +988,12 @@ fn coroutine_layout<'tcx>(
 
     size = size.align_to(align.abi);
 
-    let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) {
-        Abi::Uninhabited
+    let abi = if prefix.backend_repr.is_uninhabited()
+        || variants.iter().all(|v| v.backend_repr.is_uninhabited())
+    {
+        BackendRepr::Uninhabited
     } else {
-        Abi::Aggregate { sized: true }
+        BackendRepr::Memory { sized: true }
     };
 
     let layout = tcx.mk_layout(LayoutData {
@@ -999,7 +1004,7 @@ fn coroutine_layout<'tcx>(
             variants,
         },
         fields: outer_fields,
-        abi,
+        backend_repr: abi,
         // Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
         // self-referentiality), getting the discriminant can cause aliasing violations.
         // `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
diff --git a/compiler/rustc_ty_utils/src/layout/invariant.rs b/compiler/rustc_ty_utils/src/layout/invariant.rs
index 3db5a4f1805..f43feb552b2 100644
--- a/compiler/rustc_ty_utils/src/layout/invariant.rs
+++ b/compiler/rustc_ty_utils/src/layout/invariant.rs
@@ -66,12 +66,12 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
 
     fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) {
         // Verify the ABI mandated alignment and size.
-        let align = layout.abi.inherent_align(cx).map(|align| align.abi);
-        let size = layout.abi.inherent_size(cx);
+        let align = layout.backend_repr.inherent_align(cx).map(|align| align.abi);
+        let size = layout.backend_repr.inherent_size(cx);
         let Some((align, size)) = align.zip(size) else {
             assert_matches!(
-                layout.layout.abi(),
-                Abi::Uninhabited | Abi::Aggregate { .. },
+                layout.layout.backend_repr(),
+                BackendRepr::Uninhabited | BackendRepr::Memory { .. },
                 "ABI unexpectedly missing alignment and/or size in {layout:#?}"
             );
             return;
@@ -88,12 +88,12 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
         );
 
         // Verify per-ABI invariants
-        match layout.layout.abi() {
-            Abi::Scalar(_) => {
+        match layout.layout.backend_repr() {
+            BackendRepr::Scalar(_) => {
                 // Check that this matches the underlying field.
                 let inner = skip_newtypes(cx, layout);
                 assert!(
-                    matches!(inner.layout.abi(), Abi::Scalar(_)),
+                    matches!(inner.layout.backend_repr(), BackendRepr::Scalar(_)),
                     "`Scalar` type {} is newtype around non-`Scalar` type {}",
                     layout.ty,
                     inner.ty
@@ -132,7 +132,7 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
                             "`Scalar` field with bad align in {inner:#?}",
                         );
                         assert!(
-                            matches!(field.abi, Abi::Scalar(_)),
+                            matches!(field.backend_repr, BackendRepr::Scalar(_)),
                             "`Scalar` field with bad ABI in {inner:#?}",
                         );
                     }
@@ -141,11 +141,11 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
                     }
                 }
             }
-            Abi::ScalarPair(scalar1, scalar2) => {
+            BackendRepr::ScalarPair(scalar1, scalar2) => {
                 // Check that the underlying pair of fields matches.
                 let inner = skip_newtypes(cx, layout);
                 assert!(
-                    matches!(inner.layout.abi(), Abi::ScalarPair(..)),
+                    matches!(inner.layout.backend_repr(), BackendRepr::ScalarPair(..)),
                     "`ScalarPair` type {} is newtype around non-`ScalarPair` type {}",
                     layout.ty,
                     inner.ty
@@ -208,8 +208,8 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
                     "`ScalarPair` first field with bad align in {inner:#?}",
                 );
                 assert_matches!(
-                    field1.abi,
-                    Abi::Scalar(_),
+                    field1.backend_repr,
+                    BackendRepr::Scalar(_),
                     "`ScalarPair` first field with bad ABI in {inner:#?}",
                 );
                 let field2_offset = size1.align_to(align2);
@@ -226,16 +226,16 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
                     "`ScalarPair` second field with bad align in {inner:#?}",
                 );
                 assert_matches!(
-                    field2.abi,
-                    Abi::Scalar(_),
+                    field2.backend_repr,
+                    BackendRepr::Scalar(_),
                     "`ScalarPair` second field with bad ABI in {inner:#?}",
                 );
             }
-            Abi::Vector { element, .. } => {
+            BackendRepr::Vector { element, .. } => {
                 assert!(align >= element.align(cx).abi); // just sanity-checking `vector_align`.
                 // FIXME: Do some kind of check of the inner type, like for Scalar and ScalarPair.
             }
-            Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
+            BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {} // Nothing to check.
         }
     }
 
@@ -274,13 +274,13 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
             // The top-level ABI and the ABI of the variants should be coherent.
             let scalar_coherent =
                 |s1: Scalar, s2: Scalar| s1.size(cx) == s2.size(cx) && s1.align(cx) == s2.align(cx);
-            let abi_coherent = match (layout.abi, variant.abi) {
-                (Abi::Scalar(s1), Abi::Scalar(s2)) => scalar_coherent(s1, s2),
-                (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
+            let abi_coherent = match (layout.backend_repr, variant.backend_repr) {
+                (BackendRepr::Scalar(s1), BackendRepr::Scalar(s2)) => scalar_coherent(s1, s2),
+                (BackendRepr::ScalarPair(a1, b1), BackendRepr::ScalarPair(a2, b2)) => {
                     scalar_coherent(a1, a2) && scalar_coherent(b1, b2)
                 }
-                (Abi::Uninhabited, _) => true,
-                (Abi::Aggregate { .. }, _) => true,
+                (BackendRepr::Uninhabited, _) => true,
+                (BackendRepr::Memory { .. }, _) => true,
                 _ => false,
             };
             if !abi_coherent {