about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs54
1 files changed, 53 insertions, 1 deletions
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 3d2ce550b23..39bbf87bea7 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -482,8 +482,60 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
             }
 
             _ if name.as_str().starts_with("simd_") => {
+                // Unpack non-power-of-2 #[repr(packed, simd)] arguments.
+                // This gives them the expected layout of a regular #[repr(simd)] vector.
+                let mut loaded_args = Vec::new();
+                for (ty, arg) in arg_tys.iter().zip(args) {
+                    loaded_args.push(
+                        // #[repr(packed, simd)] vectors are passed like arrays (as references,
+                        // with reduced alignment and no padding) rather than as immediates.
+                        // We can use a vector load to fix the layout and turn the argument
+                        // into an immediate.
+                        if ty.is_simd()
+                            && let OperandValue::Ref(place) = arg.val
+                        {
+                            let (size, elem_ty) = ty.simd_size_and_type(self.tcx());
+                            let elem_ll_ty = match elem_ty.kind() {
+                                ty::Float(f) => self.type_float_from_ty(*f),
+                                ty::Int(i) => self.type_int_from_ty(*i),
+                                ty::Uint(u) => self.type_uint_from_ty(*u),
+                                ty::RawPtr(_, _) => self.type_ptr(),
+                                _ => unreachable!(),
+                            };
+                            let loaded =
+                                self.load_from_place(self.type_vector(elem_ll_ty, size), place);
+                            OperandRef::from_immediate_or_packed_pair(self, loaded, arg.layout)
+                        } else {
+                            *arg
+                        },
+                    );
+                }
+
+                let llret_ty = if ret_ty.is_simd()
+                    && let abi::Abi::Aggregate { .. } = self.layout_of(ret_ty).layout.abi
+                {
+                    let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
+                    let elem_ll_ty = match elem_ty.kind() {
+                        ty::Float(f) => self.type_float_from_ty(*f),
+                        ty::Int(i) => self.type_int_from_ty(*i),
+                        ty::Uint(u) => self.type_uint_from_ty(*u),
+                        ty::RawPtr(_, _) => self.type_ptr(),
+                        _ => unreachable!(),
+                    };
+                    self.type_vector(elem_ll_ty, size)
+                } else {
+                    llret_ty
+                };
+
                 match generic_simd_intrinsic(
-                    self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span,
+                    self,
+                    name,
+                    callee_ty,
+                    fn_args,
+                    &loaded_args,
+                    ret_ty,
+                    llret_ty,
+                    span,
                 ) {
                     Ok(llval) => llval,
                     Err(()) => return Ok(()),