diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/asm.rs | 39 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/intrinsic.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/type_of.rs | 10 |
3 files changed, 27 insertions, 24 deletions
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index be5673eddf9..e8a69743157 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -939,9 +939,10 @@ fn llvm_fixup_input<'ll, 'tcx>( } bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) - if layout.size.bytes() == 8 => - { + ( + AArch64(AArch64InlineAsmRegClass::vreg_low16), + BackendRepr::SimdVector { element, count }, + ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); let vec_ty = bx.cx.type_vector(elem_ty, count); let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect(); @@ -954,7 +955,7 @@ fn llvm_fixup_input<'ll, 'tcx>( } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - BackendRepr::Vector { .. }, + BackendRepr::SimdVector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)), ( X86( @@ -989,7 +990,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - BackendRepr::Vector { element, count: count @ (8 | 16) }, + BackendRepr::SimdVector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } @@ -1026,7 +1027,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - BackendRepr::Vector { element, count: count @ (4 | 8) }, + BackendRepr::SimdVector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } @@ -1099,9 +1100,10 @@ fn llvm_fixup_output<'ll, 'tcx>( } value } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) - if layout.size.bytes() == 8 => - { + ( + AArch64(AArch64InlineAsmRegClass::vreg_low16), + BackendRepr::SimdVector { element, count }, + ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); let vec_ty = bx.cx.type_vector(elem_ty, count * 2); let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect(); @@ -1114,7 +1116,7 @@ fn llvm_fixup_output<'ll, 'tcx>( } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - BackendRepr::Vector { .. }, + BackendRepr::SimdVector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)), ( X86( @@ -1145,7 +1147,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - BackendRepr::Vector { element, count: count @ (8 | 16) }, + BackendRepr::SimdVector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } @@ -1182,7 +1184,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - BackendRepr::Vector { element, count: count @ (4 | 8) }, + BackendRepr::SimdVector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } @@ -1243,9 +1245,10 @@ fn llvm_fixup_output_type<'ll, 'tcx>( let count = 16 / layout.size.bytes(); cx.type_vector(elem_ty, count) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) - if layout.size.bytes() == 8 => - { + ( + AArch64(AArch64InlineAsmRegClass::vreg_low16), + BackendRepr::SimdVector { element, count }, + ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(cx, element); cx.type_vector(elem_ty, count * 2) } @@ -1256,7 +1259,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - BackendRepr::Vector { .. }, + BackendRepr::SimdVector { .. }, ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8), ( X86( @@ -1284,7 +1287,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - BackendRepr::Vector { element, count: count @ (8 | 16) }, + BackendRepr::SimdVector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } @@ -1321,7 +1324,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - BackendRepr::Vector { element, count: count @ (4 | 8) }, + BackendRepr::SimdVector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 1169b9c067d..0272667e223 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -470,7 +470,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { let layout = self.layout_of(tp_ty).layout; let use_integer_compare = match layout.backend_repr() { Scalar(_) | ScalarPair(_, _) => true, - Vector { .. } => false, + SimdVector { .. } => false, Memory { .. } => { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index ba01fbff385..4e7096da502 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -19,7 +19,7 @@ fn uncached_llvm_type<'a, 'tcx>( ) -> &'a Type { match layout.backend_repr { BackendRepr::Scalar(_) => bug!("handled elsewhere"), - BackendRepr::Vector { element, count } => { + BackendRepr::SimdVector { element, count } => { let element = layout.scalar_llvm_type_at(cx, element); return cx.type_vector(element, count); } @@ -171,7 +171,7 @@ pub(crate) trait LayoutLlvmExt<'tcx> { impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn is_llvm_immediate(&self) -> bool { match self.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true, + BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false, } } @@ -179,9 +179,9 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn is_llvm_scalar_pair(&self) -> bool { match self.backend_repr { BackendRepr::ScalarPair(..) => true, - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => { - false - } + BackendRepr::Scalar(_) + | BackendRepr::SimdVector { .. } + | BackendRepr::Memory { .. } => false, } } |
