about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src/asm.rs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src/asm.rs')
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs584
1 files changed, 270 insertions, 314 deletions
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 775266fec97..bb74dfe1487 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -1,21 +1,21 @@
 use std::assert_matches::assert_matches;
 
 use libc::{c_char, c_uint};
+use rustc_abi::{BackendRepr, Float, Integer, Primitive, Scalar};
 use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
 use rustc_codegen_ssa::mir::operand::OperandValue;
 use rustc_codegen_ssa::traits::*;
 use rustc_data_structures::fx::FxHashMap;
-use rustc_middle::ty::layout::TyAndLayout;
 use rustc_middle::ty::Instance;
+use rustc_middle::ty::layout::TyAndLayout;
 use rustc_middle::{bug, span_bug};
-use rustc_span::{sym, Pos, Span, Symbol};
-use rustc_target::abi::*;
+use rustc_span::{Pos, Span, Symbol, sym};
 use rustc_target::asm::*;
 use smallvec::SmallVec;
 use tracing::debug;
 
 use crate::builder::Builder;
-use crate::common::Funclet;
+use crate::common::{AsCCharPtr, Funclet};
 use crate::context::CodegenCx;
 use crate::type_::Type;
 use crate::type_of::LayoutLlvmExt;
@@ -154,7 +154,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     // We prefer the latter because it matches the behavior of
                     // Clang.
                     if late && matches!(reg, InlineAsmRegOrRegClass::Reg(_)) {
-                        constraints.push(reg_to_llvm(reg, Some(&in_value.layout)).to_string());
+                        constraints.push(reg_to_llvm(reg, Some(&in_value.layout)));
                     } else {
                         constraints.push(format!("{}", op_idx[&idx]));
                     }
@@ -268,6 +268,15 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 InlineAsmArch::S390x => {
                     constraints.push("~{cc}".to_string());
                 }
+                InlineAsmArch::Sparc | InlineAsmArch::Sparc64 => {
+                    // In LLVM, ~{icc} represents icc and xcc in 64-bit code.
+                    // https://github.com/llvm/llvm-project/blob/llvmorg-19.1.0/llvm/lib/Target/Sparc/SparcRegisterInfo.td#L64
+                    constraints.push("~{icc}".to_string());
+                    constraints.push("~{fcc0}".to_string());
+                    constraints.push("~{fcc1}".to_string());
+                    constraints.push("~{fcc2}".to_string());
+                    constraints.push("~{fcc3}".to_string());
+                }
                 InlineAsmArch::SpirV => {}
                 InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {}
                 InlineAsmArch::Bpf => {}
@@ -420,7 +429,7 @@ impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
         unsafe {
             llvm::LLVMAppendModuleInlineAsm(
                 self.llmod,
-                template_str.as_ptr().cast(),
+                template_str.as_c_char_ptr(),
                 template_str.len(),
             );
         }
@@ -458,14 +467,14 @@ pub(crate) fn inline_asm_call<'ll>(
     let fty = bx.cx.type_func(&argtys, output);
     unsafe {
         // Ask LLVM to verify that the constraints are well-formed.
-        let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len());
+        let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_c_char_ptr(), cons.len());
         debug!("constraint verification result: {:?}", constraints_ok);
         if constraints_ok {
             let v = llvm::LLVMRustInlineAsm(
                 fty,
-                asm.as_ptr().cast(),
+                asm.as_c_char_ptr(),
                 asm.len(),
-                cons.as_ptr().cast(),
+                cons.as_c_char_ptr(),
                 cons.len(),
                 volatile,
                 alignstack,
@@ -504,10 +513,15 @@ pub(crate) fn inline_asm_call<'ll>(
                 // due to the asm template string coming from a macro. LLVM will
                 // default to the first srcloc for lines that don't have an
                 // associated srcloc.
-                srcloc.push(bx.const_i32(0));
+                srcloc.push(llvm::LLVMValueAsMetadata(bx.const_i32(0)));
             }
-            srcloc.extend(line_spans.iter().map(|span| bx.const_i32(span.lo().to_u32() as i32)));
-            let md = llvm::LLVMMDNodeInContext(bx.llcx, srcloc.as_ptr(), srcloc.len() as u32);
+            srcloc.extend(
+                line_spans
+                    .iter()
+                    .map(|span| llvm::LLVMValueAsMetadata(bx.const_i32(span.lo().to_u32() as i32))),
+            );
+            let md = llvm::LLVMMDNodeInContext2(bx.llcx, srcloc.as_ptr(), srcloc.len());
+            let md = llvm::LLVMMetadataAsValue(&bx.llcx, md);
             llvm::LLVMSetMetadata(call, kind, md);
 
             Some(call)
@@ -520,24 +534,16 @@ pub(crate) fn inline_asm_call<'ll>(
 
 /// If the register is an xmm/ymm/zmm register then return its index.
 fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
+    use X86InlineAsmReg::*;
     match reg {
-        InlineAsmReg::X86(reg)
-            if reg as u32 >= X86InlineAsmReg::xmm0 as u32
-                && reg as u32 <= X86InlineAsmReg::xmm15 as u32 =>
-        {
-            Some(reg as u32 - X86InlineAsmReg::xmm0 as u32)
+        InlineAsmReg::X86(reg) if reg as u32 >= xmm0 as u32 && reg as u32 <= xmm15 as u32 => {
+            Some(reg as u32 - xmm0 as u32)
         }
-        InlineAsmReg::X86(reg)
-            if reg as u32 >= X86InlineAsmReg::ymm0 as u32
-                && reg as u32 <= X86InlineAsmReg::ymm15 as u32 =>
-        {
-            Some(reg as u32 - X86InlineAsmReg::ymm0 as u32)
+        InlineAsmReg::X86(reg) if reg as u32 >= ymm0 as u32 && reg as u32 <= ymm15 as u32 => {
+            Some(reg as u32 - ymm0 as u32)
         }
-        InlineAsmReg::X86(reg)
-            if reg as u32 >= X86InlineAsmReg::zmm0 as u32
-                && reg as u32 <= X86InlineAsmReg::zmm31 as u32 =>
-        {
-            Some(reg as u32 - X86InlineAsmReg::zmm0 as u32)
+        InlineAsmReg::X86(reg) if reg as u32 >= zmm0 as u32 && reg as u32 <= zmm31 as u32 => {
+            Some(reg as u32 - zmm0 as u32)
         }
         _ => None,
     }
@@ -546,37 +552,7 @@ fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
 /// If the register is an AArch64 integer register then return its index.
 fn a64_reg_index(reg: InlineAsmReg) -> Option<u32> {
     match reg {
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x0) => Some(0),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x1) => Some(1),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x2) => Some(2),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x3) => Some(3),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x4) => Some(4),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x5) => Some(5),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x6) => Some(6),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x7) => Some(7),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x8) => Some(8),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x9) => Some(9),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x10) => Some(10),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x11) => Some(11),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x12) => Some(12),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x13) => Some(13),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x14) => Some(14),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x15) => Some(15),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x16) => Some(16),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x17) => Some(17),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x18) => Some(18),
-        // x19 is reserved
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x20) => Some(20),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x21) => Some(21),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x22) => Some(22),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x23) => Some(23),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x24) => Some(24),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x25) => Some(25),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x26) => Some(26),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x27) => Some(27),
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x28) => Some(28),
-        // x29 is reserved
-        InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) => Some(30),
+        InlineAsmReg::AArch64(r) => r.reg_index(),
         _ => None,
     }
 }
@@ -584,18 +560,14 @@ fn a64_reg_index(reg: InlineAsmReg) -> Option<u32> {
 /// If the register is an AArch64 vector register then return its index.
 fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
     match reg {
-        InlineAsmReg::AArch64(reg)
-            if reg as u32 >= AArch64InlineAsmReg::v0 as u32
-                && reg as u32 <= AArch64InlineAsmReg::v31 as u32 =>
-        {
-            Some(reg as u32 - AArch64InlineAsmReg::v0 as u32)
-        }
+        InlineAsmReg::AArch64(reg) => reg.vreg_index(),
         _ => None,
     }
 }
 
 /// Converts a register class to an LLVM constraint code.
 fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> String {
+    use InlineAsmRegClass::*;
     match reg {
         // For vector registers LLVM wants the register name to match the type size.
         InlineAsmRegOrRegClass::Reg(reg) => {
@@ -652,75 +624,73 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) ->
         // The constraints can be retrieved from
         // https://llvm.org/docs/LangRef.html#supported-constraint-code-list
         InlineAsmRegOrRegClass::RegClass(reg) => match reg {
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
-                unreachable!("clobber-only")
-            }
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
-            InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
-            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
-            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
-            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
-            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
-            | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
-                unreachable!("clobber-only")
-            }
-            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+            AArch64(AArch64InlineAsmRegClass::reg) => "r",
+            AArch64(AArch64InlineAsmRegClass::vreg) => "w",
+            AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
+            AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"),
+            Arm(ArmInlineAsmRegClass::reg) => "r",
+            Arm(ArmInlineAsmRegClass::sreg)
+            | Arm(ArmInlineAsmRegClass::dreg_low16)
+            | Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
+            Arm(ArmInlineAsmRegClass::sreg_low16)
+            | Arm(ArmInlineAsmRegClass::dreg_low8)
+            | Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
+            Arm(ArmInlineAsmRegClass::dreg) | Arm(ArmInlineAsmRegClass::qreg) => "w",
+            Hexagon(HexagonInlineAsmRegClass::reg) => "r",
+            LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
+            LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
+            Mips(MipsInlineAsmRegClass::reg) => "r",
+            Mips(MipsInlineAsmRegClass::freg) => "f",
+            Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
+            Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
+            Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
+            PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
+            PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
+            PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
+            PowerPC(PowerPCInlineAsmRegClass::cr)
+            | PowerPC(PowerPCInlineAsmRegClass::xer)
+            | PowerPC(PowerPCInlineAsmRegClass::vreg) => {
                 unreachable!("clobber-only")
             }
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
-            | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
-            InlineAsmRegClass::X86(
+            RiscV(RiscVInlineAsmRegClass::reg) => "r",
+            RiscV(RiscVInlineAsmRegClass::freg) => "f",
+            RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"),
+            X86(X86InlineAsmRegClass::reg) => "r",
+            X86(X86InlineAsmRegClass::reg_abcd) => "Q",
+            X86(X86InlineAsmRegClass::reg_byte) => "q",
+            X86(X86InlineAsmRegClass::xmm_reg) | X86(X86InlineAsmRegClass::ymm_reg) => "x",
+            X86(X86InlineAsmRegClass::zmm_reg) => "v",
+            X86(X86InlineAsmRegClass::kreg) => "^Yk",
+            X86(
                 X86InlineAsmRegClass::x87_reg
                 | X86InlineAsmRegClass::mmx_reg
                 | X86InlineAsmRegClass::kreg0
                 | X86InlineAsmRegClass::tmm_reg,
             ) => unreachable!("clobber-only"),
-            InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
-            InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
-            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg_addr) => "a",
-            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a",
-            InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d",
-            InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
-                bug!("LLVM backend does not support SPIR-V")
+            Wasm(WasmInlineAsmRegClass::local) => "r",
+            Bpf(BpfInlineAsmRegClass::reg) => "r",
+            Bpf(BpfInlineAsmRegClass::wreg) => "w",
+            Avr(AvrInlineAsmRegClass::reg) => "r",
+            Avr(AvrInlineAsmRegClass::reg_upper) => "d",
+            Avr(AvrInlineAsmRegClass::reg_pair) => "r",
+            Avr(AvrInlineAsmRegClass::reg_iw) => "w",
+            Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
+            S390x(S390xInlineAsmRegClass::reg) => "r",
+            S390x(S390xInlineAsmRegClass::reg_addr) => "a",
+            S390x(S390xInlineAsmRegClass::freg) => "f",
+            S390x(S390xInlineAsmRegClass::vreg | S390xInlineAsmRegClass::areg) => {
+                unreachable!("clobber-only")
             }
-            InlineAsmRegClass::Err => unreachable!(),
+            Sparc(SparcInlineAsmRegClass::reg) => "r",
+            Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"),
+            Msp430(Msp430InlineAsmRegClass::reg) => "r",
+            M68k(M68kInlineAsmRegClass::reg) => "r",
+            M68k(M68kInlineAsmRegClass::reg_addr) => "a",
+            M68k(M68kInlineAsmRegClass::reg_data) => "d",
+            CSKY(CSKYInlineAsmRegClass::reg) => "r",
+            CSKY(CSKYInlineAsmRegClass::freg) => "f",
+            SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"),
+            Err => unreachable!(),
         }
         .to_string(),
     }
@@ -732,44 +702,41 @@ fn modifier_to_llvm(
     reg: InlineAsmRegClass,
     modifier: Option<char>,
 ) -> Option<char> {
+    use InlineAsmRegClass::*;
     // The modifiers can be retrieved from
     // https://llvm.org/docs/LangRef.html#asm-template-argument-modifiers
     match reg {
-        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
-        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
-        | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
-            if modifier == Some('v') { None } else { modifier }
-        }
-        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
-            unreachable!("clobber-only")
+        AArch64(AArch64InlineAsmRegClass::reg) => modifier,
+        AArch64(AArch64InlineAsmRegClass::vreg) | AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+            if modifier == Some('v') {
+                None
+            } else {
+                modifier
+            }
         }
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None,
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None,
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+        AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"),
+        Arm(ArmInlineAsmRegClass::reg) => None,
+        Arm(ArmInlineAsmRegClass::sreg) | Arm(ArmInlineAsmRegClass::sreg_low16) => None,
+        Arm(ArmInlineAsmRegClass::dreg)
+        | Arm(ArmInlineAsmRegClass::dreg_low16)
+        | Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
+        Arm(ArmInlineAsmRegClass::qreg)
+        | Arm(ArmInlineAsmRegClass::qreg_low8)
+        | Arm(ArmInlineAsmRegClass::qreg_low4) => {
             if modifier.is_none() {
                 Some('q')
             } else {
                 modifier
             }
         }
-        InlineAsmRegClass::Hexagon(_) => None,
-        InlineAsmRegClass::LoongArch(_) => None,
-        InlineAsmRegClass::Mips(_) => None,
-        InlineAsmRegClass::Nvptx(_) => None,
-        InlineAsmRegClass::PowerPC(_) => None,
-        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
-        | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
-        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
-            unreachable!("clobber-only")
-        }
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
-        | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
+        Hexagon(_) => None,
+        LoongArch(_) => None,
+        Mips(_) => None,
+        Nvptx(_) => None,
+        PowerPC(_) => None,
+        RiscV(RiscVInlineAsmRegClass::reg) | RiscV(RiscVInlineAsmRegClass::freg) => None,
+        RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"),
+        X86(X86InlineAsmRegClass::reg) | X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
             None if arch == InlineAsmArch::X86_64 => Some('q'),
             None => Some('k'),
             Some('l') => Some('b'),
@@ -779,10 +746,10 @@ fn modifier_to_llvm(
             Some('r') => Some('q'),
             _ => unreachable!(),
         },
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
-        InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
-        | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
-        | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
+        X86(X86InlineAsmRegClass::reg_byte) => None,
+        X86(reg @ X86InlineAsmRegClass::xmm_reg)
+        | X86(reg @ X86InlineAsmRegClass::ymm_reg)
+        | X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
             (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
             (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
             (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
@@ -791,116 +758,105 @@ fn modifier_to_llvm(
             (_, Some('z')) => Some('g'),
             _ => unreachable!(),
         },
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
-        InlineAsmRegClass::X86(
+        X86(X86InlineAsmRegClass::kreg) => None,
+        X86(
             X86InlineAsmRegClass::x87_reg
             | X86InlineAsmRegClass::mmx_reg
             | X86InlineAsmRegClass::kreg0
             | X86InlineAsmRegClass::tmm_reg,
-        ) => {
-            unreachable!("clobber-only")
-        }
-        InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
-        InlineAsmRegClass::Bpf(_) => None,
-        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair)
-        | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw)
-        | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
+        ) => unreachable!("clobber-only"),
+        Wasm(WasmInlineAsmRegClass::local) => None,
+        Bpf(_) => None,
+        Avr(AvrInlineAsmRegClass::reg_pair)
+        | Avr(AvrInlineAsmRegClass::reg_iw)
+        | Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
             Some('h') => Some('B'),
             Some('l') => Some('A'),
             _ => None,
         },
-        InlineAsmRegClass::Avr(_) => None,
-        InlineAsmRegClass::S390x(_) => None,
-        InlineAsmRegClass::Msp430(_) => None,
-        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
-            bug!("LLVM backend does not support SPIR-V")
-        }
-        InlineAsmRegClass::M68k(_) => None,
-        InlineAsmRegClass::CSKY(_) => None,
-        InlineAsmRegClass::Err => unreachable!(),
+        Avr(_) => None,
+        S390x(_) => None,
+        Sparc(_) => None,
+        Msp430(_) => None,
+        SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"),
+        M68k(_) => None,
+        CSKY(_) => None,
+        Err => unreachable!(),
     }
 }
 
 /// Type to use for outputs that are discarded. It doesn't really matter what
 /// the type is, as long as it is valid for the constraint code.
 fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'ll Type {
+    use InlineAsmRegClass::*;
     match reg {
-        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
-        | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+        AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
+        AArch64(AArch64InlineAsmRegClass::vreg) | AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
             cx.type_vector(cx.type_i64(), 2)
         }
-        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+        AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"),
+        Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
+        Arm(ArmInlineAsmRegClass::sreg) | Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
+        Arm(ArmInlineAsmRegClass::dreg)
+        | Arm(ArmInlineAsmRegClass::dreg_low16)
+        | Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
+        Arm(ArmInlineAsmRegClass::qreg)
+        | Arm(ArmInlineAsmRegClass::qreg_low8)
+        | Arm(ArmInlineAsmRegClass::qreg_low4) => cx.type_vector(cx.type_i64(), 2),
+        Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
+        LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(),
+        LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(),
+        Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
+        Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
+        Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
+        Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
+        Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
+        PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
+        PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
+        PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
+        PowerPC(PowerPCInlineAsmRegClass::cr)
+        | PowerPC(PowerPCInlineAsmRegClass::xer)
+        | PowerPC(PowerPCInlineAsmRegClass::vreg) => {
             unreachable!("clobber-only")
         }
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
-        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
-            cx.type_vector(cx.type_i64(), 2)
-        }
-        InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(),
-        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
-        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
-        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
-        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
-        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
-        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
-        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
-        | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
-            unreachable!("clobber-only")
-        }
-        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
-        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
-            unreachable!("clobber-only")
-        }
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
-        | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
-        | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
-        | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
-        InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
-        InlineAsmRegClass::X86(
+        RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
+        RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
+        RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"),
+        X86(X86InlineAsmRegClass::reg) | X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
+        X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
+        X86(X86InlineAsmRegClass::xmm_reg)
+        | X86(X86InlineAsmRegClass::ymm_reg)
+        | X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
+        X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+        X86(
             X86InlineAsmRegClass::x87_reg
             | X86InlineAsmRegClass::mmx_reg
             | X86InlineAsmRegClass::kreg0
             | X86InlineAsmRegClass::tmm_reg,
-        ) => {
+        ) => unreachable!("clobber-only"),
+        Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
+        Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
+        Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
+        Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
+        Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
+        Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
+        Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
+        Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
+        S390x(S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr) => cx.type_i32(),
+        S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
+        S390x(S390xInlineAsmRegClass::vreg | S390xInlineAsmRegClass::areg) => {
             unreachable!("clobber-only")
         }
-        InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
-        InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
-        InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
-        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
-        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
-        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
-        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
-        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
-        InlineAsmRegClass::S390x(
-            S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr,
-        ) => cx.type_i32(),
-        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
-        InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
-        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
-        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
-        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
-        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
-        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
-            bug!("LLVM backend does not support SPIR-V")
-        }
-        InlineAsmRegClass::Err => unreachable!(),
+        Sparc(SparcInlineAsmRegClass::reg) => cx.type_i32(),
+        Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"),
+        Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
+        M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
+        M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
+        M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
+        CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
+        CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
+        SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"),
+        Err => unreachable!(),
     }
 }
 
@@ -940,9 +896,10 @@ fn llvm_fixup_input<'ll, 'tcx>(
     layout: &TyAndLayout<'tcx>,
     instance: Instance<'_>,
 ) -> &'ll Value {
+    use InlineAsmRegClass::*;
     let dl = &bx.tcx.data_layout;
-    match (reg, layout.abi) {
-        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+    match (reg, layout.backend_repr) {
+        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
                 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
@@ -950,7 +907,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 value
             }
         }
-        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
             if s.primitive() != Primitive::Float(Float::F128) =>
         {
             let elem_ty = llvm_asm_scalar_type(bx.cx, s);
@@ -963,43 +920,42 @@ fn llvm_fixup_input<'ll, 'tcx>(
             }
             bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
         }
-        (
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
-            Abi::Vector { element, count },
-        ) if layout.size.bytes() == 8 => {
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
+            if layout.size.bytes() == 8 =>
+        {
             let elem_ty = llvm_asm_scalar_type(bx.cx, element);
             let vec_ty = bx.cx.type_vector(elem_ty, count);
             let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
             bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
         }
-        (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F64) =>
         {
             bx.bitcast(value, bx.cx.type_i64())
         }
         (
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
-            Abi::Vector { .. },
+            X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+            BackendRepr::Vector { .. },
         ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
             && s.primitive() == Primitive::Float(Float::F128) =>
         {
             bx.bitcast(value, bx.type_vector(bx.type_i32(), 4))
         }
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if s.primitive() == Primitive::Float(Float::F16) => {
             let value = bx.insert_element(
                 bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
@@ -1009,18 +965,18 @@ fn llvm_fixup_input<'ll, 'tcx>(
             bx.bitcast(value, bx.type_vector(bx.type_i16(), 8))
         }
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Vector { element, count: count @ (8 | 16) },
+            BackendRepr::Vector { element, count: count @ (8 | 16) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
         }
         (
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
-            Abi::Scalar(s),
+            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_f32())
@@ -1029,12 +985,12 @@ fn llvm_fixup_input<'ll, 'tcx>(
             }
         }
         (
-            InlineAsmRegClass::Arm(
+            Arm(
                 ArmInlineAsmRegClass::dreg
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_f64())
@@ -1043,7 +999,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
             }
         }
         (
-            InlineAsmRegClass::Arm(
+            Arm(
                 ArmInlineAsmRegClass::dreg
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16
@@ -1051,11 +1007,11 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::qreg_low4
                 | ArmInlineAsmRegClass::qreg_low8,
             ),
-            Abi::Vector { element, count: count @ (4 | 8) },
+            BackendRepr::Vector { element, count: count @ (4 | 8) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
         }
-        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
             match s.primitive() {
                 // MIPS only supports register-length arithmetics.
                 Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
@@ -1064,7 +1020,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
                 _ => value,
             }
         }
-        (InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F16)
                 && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
         {
@@ -1086,15 +1042,16 @@ fn llvm_fixup_output<'ll, 'tcx>(
     layout: &TyAndLayout<'tcx>,
     instance: Instance<'_>,
 ) -> &'ll Value {
-    match (reg, layout.abi) {
-        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+    use InlineAsmRegClass::*;
+    match (reg, layout.backend_repr) {
+        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 bx.extract_element(value, bx.const_i32(0))
             } else {
                 value
             }
         }
-        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
             if s.primitive() != Primitive::Float(Float::F128) =>
         {
             value = bx.extract_element(value, bx.const_i32(0));
@@ -1103,60 +1060,59 @@ fn llvm_fixup_output<'ll, 'tcx>(
             }
             value
         }
-        (
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
-            Abi::Vector { element, count },
-        ) if layout.size.bytes() == 8 => {
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
+            if layout.size.bytes() == 8 =>
+        {
             let elem_ty = llvm_asm_scalar_type(bx.cx, element);
             let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
             let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
             bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
         }
-        (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F64) =>
         {
             bx.bitcast(value, bx.cx.type_f64())
         }
         (
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
-            Abi::Vector { .. },
+            X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+            BackendRepr::Vector { .. },
         ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
             && s.primitive() == Primitive::Float(Float::F128) =>
         {
             bx.bitcast(value, bx.type_f128())
         }
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if s.primitive() == Primitive::Float(Float::F16) => {
             let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
             bx.extract_element(value, bx.const_usize(0))
         }
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Vector { element, count: count @ (8 | 16) },
+            BackendRepr::Vector { element, count: count @ (8 | 16) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
         }
         (
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
-            Abi::Scalar(s),
+            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_i32())
@@ -1165,12 +1121,12 @@ fn llvm_fixup_output<'ll, 'tcx>(
             }
         }
         (
-            InlineAsmRegClass::Arm(
+            Arm(
                 ArmInlineAsmRegClass::dreg
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 bx.bitcast(value, bx.cx.type_i64())
@@ -1179,7 +1135,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
             }
         }
         (
-            InlineAsmRegClass::Arm(
+            Arm(
                 ArmInlineAsmRegClass::dreg
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16
@@ -1187,11 +1143,11 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::qreg_low4
                 | ArmInlineAsmRegClass::qreg_low8,
             ),
-            Abi::Vector { element, count: count @ (4 | 8) },
+            BackendRepr::Vector { element, count: count @ (4 | 8) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
         }
-        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
             match s.primitive() {
                 // MIPS only supports register-length arithmetics.
                 Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
@@ -1201,7 +1157,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
                 _ => value,
             }
         }
-        (InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F16)
                 && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
         {
@@ -1220,70 +1176,70 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
     layout: &TyAndLayout<'tcx>,
     instance: Instance<'_>,
 ) -> &'ll Type {
-    match (reg, layout.abi) {
-        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+    use InlineAsmRegClass::*;
+    match (reg, layout.backend_repr) {
+        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
             if let Primitive::Int(Integer::I8, _) = s.primitive() {
                 cx.type_vector(cx.type_i8(), 8)
             } else {
                 layout.llvm_type(cx)
             }
         }
-        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
             if s.primitive() != Primitive::Float(Float::F128) =>
         {
             let elem_ty = llvm_asm_scalar_type(cx, s);
             let count = 16 / layout.size.bytes();
             cx.type_vector(elem_ty, count)
         }
-        (
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
-            Abi::Vector { element, count },
-        ) if layout.size.bytes() == 8 => {
+        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
+            if layout.size.bytes() == 8 =>
+        {
             let elem_ty = llvm_asm_scalar_type(cx, element);
             cx.type_vector(elem_ty, count * 2)
         }
-        (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F64) =>
         {
             cx.type_i64()
         }
         (
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
-            Abi::Vector { .. },
+            X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+            BackendRepr::Vector { .. },
         ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
             && s.primitive() == Primitive::Float(Float::F128) =>
         {
             cx.type_vector(cx.type_i32(), 4)
         }
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
         (
-            InlineAsmRegClass::X86(
+            X86(
                 X86InlineAsmRegClass::xmm_reg
                 | X86InlineAsmRegClass::ymm_reg
                 | X86InlineAsmRegClass::zmm_reg,
             ),
-            Abi::Vector { element, count: count @ (8 | 16) },
+            BackendRepr::Vector { element, count: count @ (8 | 16) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             cx.type_vector(cx.type_i16(), count)
         }
         (
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
-            Abi::Scalar(s),
+            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I32, _) = s.primitive() {
                 cx.type_f32()
@@ -1292,12 +1248,12 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
             }
         }
         (
-            InlineAsmRegClass::Arm(
+            Arm(
                 ArmInlineAsmRegClass::dreg
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16,
             ),
-            Abi::Scalar(s),
+            BackendRepr::Scalar(s),
         ) => {
             if let Primitive::Int(Integer::I64, _) = s.primitive() {
                 cx.type_f64()
@@ -1306,7 +1262,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
             }
         }
         (
-            InlineAsmRegClass::Arm(
+            Arm(
                 ArmInlineAsmRegClass::dreg
                 | ArmInlineAsmRegClass::dreg_low8
                 | ArmInlineAsmRegClass::dreg_low16
@@ -1314,11 +1270,11 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 | ArmInlineAsmRegClass::qreg_low4
                 | ArmInlineAsmRegClass::qreg_low8,
             ),
-            Abi::Vector { element, count: count @ (4 | 8) },
+            BackendRepr::Vector { element, count: count @ (4 | 8) },
         ) if element.primitive() == Primitive::Float(Float::F16) => {
             cx.type_vector(cx.type_i16(), count)
         }
-        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
             match s.primitive() {
                 // MIPS only supports register-length arithmetics.
                 Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
@@ -1327,7 +1283,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
                 _ => layout.llvm_type(cx),
             }
         }
-        (InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
             if s.primitive() == Primitive::Float(Float::F16)
                 && !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) =>
         {