diff options
| author | Trevor Gross <t.gross35@gmail.com> | 2024-08-27 01:46:53 -0500 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2024-08-27 01:46:53 -0500 |
| commit | 8ea70e9537f0e22c2eb32eabc08175093617fb16 (patch) | |
| tree | 1e4445f7e86e81f2cb65f723245996ca8c2b6a39 /compiler/rustc_codegen_llvm/src | |
| parent | 3c131a3f54c6ebe4dd8edac0236655996dfb2c51 (diff) | |
| parent | abd44fc5f4211a0684da4b37a15c3d07b17a436e (diff) | |
| download | rust-8ea70e9537f0e22c2eb32eabc08175093617fb16.tar.gz rust-8ea70e9537f0e22c2eb32eabc08175093617fb16.zip | |
Rollup merge of #129536 - beetrees:f16-f128-inline-asm-aarch64, r=Amanieu
Add `f16` and `f128` inline ASM support for `aarch64` Adds `f16` and `f128` inline ASM support for `aarch64`. SIMD vector types are taken from [the ARM intrinsics list](https://developer.arm.com/architectures/instruction-sets/intrinsics/#f:`@navigationhierarchiesreturnbasetype=[float]&f:@navigationhierarchieselementbitsize=[16]&f:@navigationhierarchiesarchitectures=[A64]).` Based on the work of `@lengrongfu` in #127043. Relevant issue: #125398 Tracking issue: #116909 `@rustbot` label +F-f16_and_f128 try-job: aarch64-gnu try-job: aarch64-apple
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/asm.rs | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index f931698c38f..1d91c3fb17d 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -913,8 +913,10 @@ fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Ty Primitive::Int(Integer::I16, _) => cx.type_i16(), Primitive::Int(Integer::I32, _) => cx.type_i32(), Primitive::Int(Integer::I64, _) => cx.type_i64(), + Primitive::Float(Float::F16) => cx.type_f16(), Primitive::Float(Float::F32) => cx.type_f32(), Primitive::Float(Float::F64) => cx.type_f64(), + Primitive::Float(Float::F128) => cx.type_f128(), // FIXME(erikdesjardins): handle non-default addrspace ptr sizes Primitive::Pointer(_) => cx.type_from_integer(dl.ptr_sized_integer()), _ => unreachable!(), @@ -948,7 +950,9 @@ fn llvm_fixup_input<'ll, 'tcx>( value } } - (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => { + (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + if s.primitive() != Primitive::Float(Float::F128) => + { let elem_ty = llvm_asm_scalar_type(bx.cx, s); let count = 16 / layout.size.bytes(); let vec_ty = bx.cx.type_vector(elem_ty, count); @@ -1090,7 +1094,9 @@ fn llvm_fixup_output<'ll, 'tcx>( value } } - (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => { + (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + if s.primitive() != Primitive::Float(Float::F128) => + { value = bx.extract_element(value, bx.const_i32(0)); if let Primitive::Pointer(_) = s.primitive() { value = bx.inttoptr(value, layout.llvm_type(bx.cx)); @@ -1222,7 +1228,9 @@ fn llvm_fixup_output_type<'ll, 'tcx>( layout.llvm_type(cx) } } - (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => { + (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + if s.primitive() != Primitive::Float(Float::F128) => + { let elem_ty = llvm_asm_scalar_type(cx, s); let count = 16 / layout.size.bytes(); cx.type_vector(elem_ty, count) |
