diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src/asm.rs')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/asm.rs | 93 |
1 files changed, 51 insertions, 42 deletions
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 3c30822a2e2..53758967552 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -880,8 +880,8 @@ fn llvm_fixup_input<'ll, 'tcx>( ) -> &'ll Value { use InlineAsmRegClass::*; let dl = &bx.tcx.data_layout; - match (reg, layout.abi) { - (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { + match (reg, layout.backend_repr) { + (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.primitive() { let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8); bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) @@ -889,7 +889,7 @@ fn llvm_fixup_input<'ll, 'tcx>( value } } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s)) if s.primitive() != Primitive::Float(Float::F128) => { let elem_ty = llvm_asm_scalar_type(bx.cx, s); @@ -902,7 +902,7 @@ fn llvm_fixup_input<'ll, 'tcx>( } bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count }) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); @@ -910,14 +910,14 @@ fn llvm_fixup_input<'ll, 'tcx>( let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect(); bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } - (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) + (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F64) => { bx.bitcast(value, bx.cx.type_i64()) } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - Abi::Vector { .. }, + BackendRepr::Vector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)), ( X86( @@ -925,7 +925,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if bx.sess().asm_arch == Some(InlineAsmArch::X86) && s.primitive() == Primitive::Float(Float::F128) => { @@ -937,7 +937,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if s.primitive() == Primitive::Float(Float::F16) => { let value = bx.insert_element( bx.const_undef(bx.type_vector(bx.type_f16(), 8)), @@ -952,11 +952,14 @@ fn llvm_fixup_input<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Vector { element, count: count @ (8 | 16) }, + BackendRepr::Vector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } - (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => { + ( + Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), + BackendRepr::Scalar(s), + ) => { if let Primitive::Int(Integer::I32, _) = s.primitive() { bx.bitcast(value, bx.cx.type_f32()) } else { @@ -969,7 +972,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) => { if let Primitive::Int(Integer::I64, _) = s.primitive() { bx.bitcast(value, bx.cx.type_f64()) @@ -986,11 +989,11 @@ fn llvm_fixup_input<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - Abi::Vector { element, count: count @ (4 | 8) }, + BackendRepr::Vector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } - (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => { match s.primitive() { // MIPS only supports register-length arithmetics. Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()), @@ -999,7 +1002,7 @@ fn llvm_fixup_input<'ll, 'tcx>( _ => value, } } - (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) + (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F16) && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) => { @@ -1022,15 +1025,15 @@ fn llvm_fixup_output<'ll, 'tcx>( instance: Instance<'_>, ) -> &'ll Value { use InlineAsmRegClass::*; - match (reg, layout.abi) { - (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { + match (reg, layout.backend_repr) { + (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.primitive() { bx.extract_element(value, bx.const_i32(0)) } else { value } } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s)) if s.primitive() != Primitive::Float(Float::F128) => { value = bx.extract_element(value, bx.const_i32(0)); @@ -1039,7 +1042,7 @@ fn llvm_fixup_output<'ll, 'tcx>( } value } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count }) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); @@ -1047,14 +1050,14 @@ fn llvm_fixup_output<'ll, 'tcx>( let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect(); bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } - (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) + (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F64) => { bx.bitcast(value, bx.cx.type_f64()) } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - Abi::Vector { .. }, + BackendRepr::Vector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)), ( X86( @@ -1062,7 +1065,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if bx.sess().asm_arch == Some(InlineAsmArch::X86) && s.primitive() == Primitive::Float(Float::F128) => { @@ -1074,7 +1077,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if s.primitive() == Primitive::Float(Float::F16) => { let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8)); bx.extract_element(value, bx.const_usize(0)) @@ -1085,11 +1088,14 @@ fn llvm_fixup_output<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Vector { element, count: count @ (8 | 16) }, + BackendRepr::Vector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } - (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => { + ( + Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), + BackendRepr::Scalar(s), + ) => { if let Primitive::Int(Integer::I32, _) = s.primitive() { bx.bitcast(value, bx.cx.type_i32()) } else { @@ -1102,7 +1108,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) => { if let Primitive::Int(Integer::I64, _) = s.primitive() { bx.bitcast(value, bx.cx.type_i64()) @@ -1119,11 +1125,11 @@ fn llvm_fixup_output<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - Abi::Vector { element, count: count @ (4 | 8) }, + BackendRepr::Vector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } - (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => { match s.primitive() { // MIPS only supports register-length arithmetics. Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()), @@ -1133,7 +1139,7 @@ fn llvm_fixup_output<'ll, 'tcx>( _ => value, } } - (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) + (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F16) && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) => { @@ -1153,35 +1159,35 @@ fn llvm_fixup_output_type<'ll, 'tcx>( instance: Instance<'_>, ) -> &'ll Type { use InlineAsmRegClass::*; - match (reg, layout.abi) { - (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { + match (reg, layout.backend_repr) { + (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.primitive() { cx.type_vector(cx.type_i8(), 8) } else { layout.llvm_type(cx) } } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s)) if s.primitive() != Primitive::Float(Float::F128) => { let elem_ty = llvm_asm_scalar_type(cx, s); let count = 16 / layout.size.bytes(); cx.type_vector(elem_ty, count) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count }) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(cx, element); cx.type_vector(elem_ty, count * 2) } - (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) + (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F64) => { cx.type_i64() } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - Abi::Vector { .. }, + BackendRepr::Vector { .. }, ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8), ( X86( @@ -1189,7 +1195,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if cx.sess().asm_arch == Some(InlineAsmArch::X86) && s.primitive() == Primitive::Float(Float::F128) => { @@ -1201,7 +1207,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8), ( X86( @@ -1209,11 +1215,14 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Vector { element, count: count @ (8 | 16) }, + BackendRepr::Vector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } - (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => { + ( + Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), + BackendRepr::Scalar(s), + ) => { if let Primitive::Int(Integer::I32, _) = s.primitive() { cx.type_f32() } else { @@ -1226,7 +1235,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) => { if let Primitive::Int(Integer::I64, _) = s.primitive() { cx.type_f64() @@ -1243,11 +1252,11 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - Abi::Vector { element, count: count @ (4 | 8) }, + BackendRepr::Vector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } - (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => { match s.primitive() { // MIPS only supports register-length arithmetics. Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(), @@ -1256,7 +1265,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( _ => layout.llvm_type(cx), } } - (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) + (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F16) && !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) => { |
