about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
authorJubilee Young <workingjubilee@gmail.com>2024-10-29 13:37:26 -0700
committerJubilee Young <workingjubilee@gmail.com>2024-10-29 14:56:00 -0700
commit7086dd83cca1cf694c7bd171efbf262fa8ffb4aa (patch)
treea01a9f48b7bc1e43128c0fdd0294dfb1d8bc345a /compiler/rustc_codegen_llvm/src
parent2dece5bb62f234f5622a08289c5a3d1555cd7843 (diff)
downloadrust-7086dd83cca1cf694c7bd171efbf262fa8ffb4aa.tar.gz
rust-7086dd83cca1cf694c7bd171efbf262fa8ffb4aa.zip
compiler: `rustc_abi::Abi` => `BackendRepr`
The initial naming of "Abi" was an awful mistake, conveying wrong ideas
about how psABIs worked and even more about what the enum meant.
It was only meant to represent the way the value would be described to
a codegen backend as it was lowered to that intermediate representation.
It was never meant to mean anything about the actual psABI handling!
The conflation is because LLVM typically will associate a certain form
with a certain ABI, but even that does not hold when the special cases
that actually exist arise, plus the IR annotations that modify the ABI.

Reframe `rustc_abi::Abi` as the `BackendRepr` of the type, and rename
`BackendRepr::Aggregate` as `BackendRepr::Memory`. Unfortunately, due to
the persistent misunderstandings, this too is now incorrect:
- Scattered ABI-relevant code is entangled with BackendRepr
- We do not always pre-compute a correct BackendRepr that reflects how
  we "actually" want this value to be handled, so we leave the backend
  interface to also inject various special-cases here
- In some cases `BackendRepr::Memory` is a "real" aggregate, but in
  others it is in fact using memory, and in some cases it is a scalar!

Our rustc-to-backend lowering code handles this sort of thing right now.
That will eventually be addressed by lifting duplicated lowering code
to either rustc_codegen_ssa or rustc_target as appropriate.
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs93
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs13
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs37
5 files changed, 87 insertions, 70 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index 8a1ee48c43c..855ca010611 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -458,7 +458,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         match &self.ret.mode {
             PassMode::Direct(attrs) => {
                 attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
-                if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
+                if let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr {
                     apply_range_attr(llvm::AttributePlace::ReturnValue, scalar);
                 }
             }
@@ -495,7 +495,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                 }
                 PassMode::Direct(attrs) => {
                     let i = apply(attrs);
-                    if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+                    if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
                         apply_range_attr(llvm::AttributePlace::Argument(i), scalar);
                     }
                 }
@@ -510,7 +510,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                 PassMode::Pair(a, b) => {
                     let i = apply(a);
                     let ii = apply(b);
-                    if let abi::Abi::ScalarPair(scalar_a, scalar_b) = arg.layout.abi {
+                    if let abi::BackendRepr::ScalarPair(scalar_a, scalar_b) =
+                        arg.layout.backend_repr
+                    {
                         apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a);
                         apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b);
                     }
@@ -570,7 +572,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         }
         if bx.cx.sess().opts.optimize != config::OptLevel::No
                 && llvm_util::get_version() < (19, 0, 0)
-                && let abi::Abi::Scalar(scalar) = self.ret.layout.abi
+                && let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr
                 && matches!(scalar.primitive(), Int(..))
                 // If the value is a boolean, the range is 0..2 and that ultimately
                 // become 0..0 when the type becomes i1, which would be rejected
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 3c30822a2e2..53758967552 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -880,8 +880,8 @@ fn llvm_fixup_input<'ll, 'tcx>(
 ) -> &'ll Value {
     use InlineAsmRegClass::*;
     let dl = &bx.tcx.data_layout;
-    match (reg, layout.abi) {
-        (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+    match (reg, layout.backend_repr) {
+        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
                 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
@@ -889,7 +889,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 value
             }
         }
-        (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
             if s.primitive() != Primitive::Float(Float::F128) =>
         {
             let elem_ty = llvm_asm_scalar_type(bx.cx, s);
@@ -902,7 +902,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
             }
             bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
         }
-        (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
             if layout.size.bytes() == 8 =>
         {
             let elem_ty = llvm_asm_scalar_type(bx.cx, element);
@@ -910,14 +910,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
             let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
             bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
         }
-        (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F64) =>
         {
             bx.bitcast(value, bx.cx.type_i64())
         }
         (
             X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
-            Abi::Vector { .. },
+            BackendRepr::Vector { .. },
         ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
         (
             X86(
@@ -925,7 +925,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
             && s.primitive() == Primitive::Float(Float::F128) =>
         {
@@ -937,7 +937,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if s.primitive() == Primitive::Float(Float::F16) => {
             let value = bx.insert_element(
                 bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
@@ -952,11 +952,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Vector { element, count: count @ (8 | 16) },
+            BackendRepr::Vector { element, count: count @ (8 | 16) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
         }
-        (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
+        (
+            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            BackendRepr::Scalar(s),
+        ) => {
             if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_f32())
             } else {
@@ -969,7 +972,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_f64())
@@ -986,11 +989,11 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::qreg_low4
                 | ArmInlineAsmRegClass::qreg_low8,
             ),
-            Abi::Vector { element, count: count @ (4 | 8) },
+            BackendRepr::Vector { element, count: count @ (4 | 8) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
         }
-        (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
             match s.primitive() {
                 // MIPS only supports register-length arithmetics.
                 Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
@@ -999,7 +1002,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 _ => value,
             }
         }
-        (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F16)
                 && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
         {
@@ -1022,15 +1025,15 @@ fn llvm_fixup_output<'ll, 'tcx>(
     instance: Instance<'_>,
 ) -> &'ll Value {
     use InlineAsmRegClass::*;
-    match (reg, layout.abi) {
-        (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+    match (reg, layout.backend_repr) {
+        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 bx.extract_element(value, bx.const_i32(0))
             } else {
                 value
             }
         }
-        (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
             if s.primitive() != Primitive::Float(Float::F128) =>
         {
             value = bx.extract_element(value, bx.const_i32(0));
@@ -1039,7 +1042,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
             }
             value
         }
-        (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
             if layout.size.bytes() == 8 =>
         {
             let elem_ty = llvm_asm_scalar_type(bx.cx, element);
@@ -1047,14 +1050,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
             let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
             bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
         }
-        (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F64) =>
         {
             bx.bitcast(value, bx.cx.type_f64())
         }
         (
             X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
-            Abi::Vector { .. },
+            BackendRepr::Vector { .. },
         ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
         (
             X86(
@@ -1062,7 +1065,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
             && s.primitive() == Primitive::Float(Float::F128) =>
         {
@@ -1074,7 +1077,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if s.primitive() == Primitive::Float(Float::F16) => {
             let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
             bx.extract_element(value, bx.const_usize(0))
@@ -1085,11 +1088,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Vector { element, count: count @ (8 | 16) },
+            BackendRepr::Vector { element, count: count @ (8 | 16) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
         }
-        (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
+        (
+            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            BackendRepr::Scalar(s),
+        ) => {
             if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_i32())
             } else {
@@ -1102,7 +1108,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_i64())
@@ -1119,11 +1125,11 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::qreg_low4
                 | ArmInlineAsmRegClass::qreg_low8,
             ),
-            Abi::Vector { element, count: count @ (4 | 8) },
+            BackendRepr::Vector { element, count: count @ (4 | 8) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
         }
-        (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
             match s.primitive() {
                 // MIPS only supports register-length arithmetics.
                 Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
@@ -1133,7 +1139,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 _ => value,
             }
         }
-        (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F16)
                 && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
         {
@@ -1153,35 +1159,35 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
     instance: Instance<'_>,
 ) -> &'ll Type {
     use InlineAsmRegClass::*;
-    match (reg, layout.abi) {
-        (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+    match (reg, layout.backend_repr) {
+        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 cx.type_vector(cx.type_i8(), 8)
             } else {
                 layout.llvm_type(cx)
             }
         }
-        (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
             if s.primitive() != Primitive::Float(Float::F128) =>
         {
             let elem_ty = llvm_asm_scalar_type(cx, s);
             let count = 16 / layout.size.bytes();
             cx.type_vector(elem_ty, count)
         }
-        (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
             if layout.size.bytes() == 8 =>
         {
             let elem_ty = llvm_asm_scalar_type(cx, element);
             cx.type_vector(elem_ty, count * 2)
         }
-        (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F64) =>
         {
             cx.type_i64()
         }
         (
             X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
-            Abi::Vector { .. },
+            BackendRepr::Vector { .. },
         ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
         (
             X86(
@@ -1189,7 +1195,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
             && s.primitive() == Primitive::Float(Float::F128) =>
         {
@@ -1201,7 +1207,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
         (
             X86(
@@ -1209,11 +1215,14 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Vector { element, count: count @ (8 | 16) },
+            BackendRepr::Vector { element, count: count @ (8 | 16) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             cx.type_vector(cx.type_i16(), count)
         }
-        (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
+        (
+            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            BackendRepr::Scalar(s),
+        ) => {
             if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 cx.type_f32()
             } else {
@@ -1226,7 +1235,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 cx.type_f64()
@@ -1243,11 +1252,11 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::qreg_low4
                 | ArmInlineAsmRegClass::qreg_low8,
             ),
-            Abi::Vector { element, count: count @ (4 | 8) },
+            BackendRepr::Vector { element, count: count @ (4 | 8) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             cx.type_vector(cx.type_i16(), count)
         }
-        (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
             match s.primitive() {
                 // MIPS only supports register-length arithmetics.
                 Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
@@ -1256,7 +1265,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 _ => layout.llvm_type(cx),
             }
         }
-        (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F16)
                 && !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) =>
         {
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 8702532c36e..8e87869f946 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -545,13 +545,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
             }
             let llval = const_llval.unwrap_or_else(|| {
                 let load = self.load(llty, place.val.llval, place.val.align);
-                if let abi::Abi::Scalar(scalar) = place.layout.abi {
+                if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr {
                     scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
                 }
                 load
             });
             OperandValue::Immediate(self.to_immediate(llval, place.layout))
-        } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
+        } else if let abi::BackendRepr::ScalarPair(a, b) = place.layout.backend_repr {
             let b_offset = a.size(self).align_to(b.align(self).abi);
 
             let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index d04b5257619..c77e00aed9a 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -258,8 +258,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
             }
             sym::va_arg => {
-                match fn_abi.ret.layout.abi {
-                    abi::Abi::Scalar(scalar) => {
+                match fn_abi.ret.layout.backend_repr {
+                    abi::BackendRepr::Scalar(scalar) => {
                         match scalar.primitive() {
                             Primitive::Int(..) => {
                                 if self.cx().size_of(ret_ty).bytes() < 4 {
@@ -436,13 +436,13 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
             }
 
             sym::raw_eq => {
-                use abi::Abi::*;
+                use abi::BackendRepr::*;
                 let tp_ty = fn_args.type_at(0);
                 let layout = self.layout_of(tp_ty).layout;
-                let use_integer_compare = match layout.abi() {
+                let use_integer_compare = match layout.backend_repr() {
                     Scalar(_) | ScalarPair(_, _) => true,
                     Uninhabited | Vector { .. } => false,
-                    Aggregate { .. } => {
+                    Memory { .. } => {
                         // For rusty ABIs, small aggregates are actually passed
                         // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
                         // so we re-use that same threshold here.
@@ -549,7 +549,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 }
 
                 let llret_ty = if ret_ty.is_simd()
-                    && let abi::Abi::Aggregate { .. } = self.layout_of(ret_ty).layout.abi
+                    && let abi::BackendRepr::Memory { .. } =
+                        self.layout_of(ret_ty).layout.backend_repr
                 {
                     let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
                     let elem_ll_ty = match elem_ty.kind() {
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 6be4c3f034f..2b05e24a7ba 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -1,7 +1,7 @@
 use std::fmt::Write;
 
 use rustc_abi::Primitive::{Float, Int, Pointer};
-use rustc_abi::{Abi, Align, FieldsShape, Scalar, Size, Variants};
+use rustc_abi::{Align, BackendRepr, FieldsShape, Scalar, Size, Variants};
 use rustc_codegen_ssa::traits::*;
 use rustc_middle::bug;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
@@ -17,13 +17,13 @@ fn uncached_llvm_type<'a, 'tcx>(
     layout: TyAndLayout<'tcx>,
     defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
 ) -> &'a Type {
-    match layout.abi {
-        Abi::Scalar(_) => bug!("handled elsewhere"),
-        Abi::Vector { element, count } => {
+    match layout.backend_repr {
+        BackendRepr::Scalar(_) => bug!("handled elsewhere"),
+        BackendRepr::Vector { element, count } => {
             let element = layout.scalar_llvm_type_at(cx, element);
             return cx.type_vector(element, count);
         }
-        Abi::Uninhabited | Abi::Aggregate { .. } | Abi::ScalarPair(..) => {}
+        BackendRepr::Uninhabited | BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {}
     }
 
     let name = match layout.ty.kind() {
@@ -170,16 +170,21 @@ pub(crate) trait LayoutLlvmExt<'tcx> {
 
 impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     fn is_llvm_immediate(&self) -> bool {
-        match self.abi {
-            Abi::Scalar(_) | Abi::Vector { .. } => true,
-            Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
+        match self.backend_repr {
+            BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
+            BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
+                false
+            }
         }
     }
 
     fn is_llvm_scalar_pair(&self) -> bool {
-        match self.abi {
-            Abi::ScalarPair(..) => true,
-            Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
+        match self.backend_repr {
+            BackendRepr::ScalarPair(..) => true,
+            BackendRepr::Uninhabited
+            | BackendRepr::Scalar(_)
+            | BackendRepr::Vector { .. }
+            | BackendRepr::Memory { .. } => false,
         }
     }
 
@@ -198,7 +203,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
         // In other words, this should generally not look at the type at all, but only at the
         // layout.
-        if let Abi::Scalar(scalar) = self.abi {
+        if let BackendRepr::Scalar(scalar) = self.backend_repr {
             // Use a different cache for scalars because pointers to DSTs
             // can be either wide or thin (data pointers of wide pointers).
             if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
@@ -248,13 +253,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
     }
 
     fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
-        match self.abi {
-            Abi::Scalar(scalar) => {
+        match self.backend_repr {
+            BackendRepr::Scalar(scalar) => {
                 if scalar.is_bool() {
                     return cx.type_i1();
                 }
             }
-            Abi::ScalarPair(..) => {
+            BackendRepr::ScalarPair(..) => {
                 // An immediate pair always contains just the two elements, without any padding
                 // filler, as it should never be stored to memory.
                 return cx.type_struct(
@@ -287,7 +292,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
         // In other words, this should generally not look at the type at all, but only at the
         // layout.
-        let Abi::ScalarPair(a, b) = self.abi else {
+        let BackendRepr::ScalarPair(a, b) = self.backend_repr else {
             bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
         };
         let scalar = [a, b][index];