diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/builder.rs | 12 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/context.rs | 1 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/intrinsic.rs | 111 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/va_arg.rs | 185 |
4 files changed, 239 insertions, 70 deletions
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index fcb55a04635..167678c2ff1 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -1452,9 +1452,15 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { impl<'ll> StaticBuilderMethods for Builder<'_, 'll, '_> { fn get_static(&mut self, def_id: DefId) -> &'ll Value { // Forward to the `get_static` method of `CodegenCx` - let s = self.cx().get_static(def_id); - // Cast to default address space if globals are in a different addrspace - self.cx().const_pointercast(s, self.type_ptr()) + let global = self.cx().get_static(def_id); + if self.cx().tcx.is_thread_local_static(def_id) { + let pointer = self.call_intrinsic("llvm.threadlocal.address", &[global]); + // Cast to default address space if globals are in a different addrspace + self.pointercast(pointer, self.type_ptr()) + } else { + // Cast to default address space if globals are in a different addrspace + self.cx().const_pointercast(global, self.type_ptr()) + } } } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 8cc2cb9c333..8d6e1d8941b 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -1243,6 +1243,7 @@ impl<'ll> CodegenCx<'ll, '_> { } ifn!("llvm.ptrmask", fn(ptr, t_isize) -> ptr); + ifn!("llvm.threadlocal.address", fn(ptr) -> ptr); None } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index e8629aeebb9..989752eb78e 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -169,19 +169,9 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { span: Span, ) -> Result<(), ty::Instance<'tcx>> { let tcx = self.tcx; - let callee_ty = instance.ty(tcx, self.typing_env()); - let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else { - bug!("expected fn item type, found {}", callee_ty); - }; - - let sig = callee_ty.fn_sig(tcx); - let sig = tcx.normalize_erasing_late_bound_regions(self.typing_env(), sig); - let arg_tys = sig.inputs(); - let ret_ty = sig.output(); - let name = tcx.item_name(def_id); - - let llret_ty = self.layout_of(ret_ty).llvm_type(self); + let name = tcx.item_name(instance.def_id()); + let fn_args = instance.args; let simple = get_simple_intrinsic(self, name); let llval = match name { @@ -265,22 +255,22 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { BackendRepr::Scalar(scalar) => { match scalar.primitive() { Primitive::Int(..) => { - if self.cx().size_of(ret_ty).bytes() < 4 { + if self.cx().size_of(result.layout.ty).bytes() < 4 { // `va_arg` should not be called on an integer type // less than 4 bytes in length. If it is, promote // the integer to an `i32` and truncate the result // back to the smaller type. let promoted_result = emit_va_arg(self, args[0], tcx.types.i32); - self.trunc(promoted_result, llret_ty) + self.trunc(promoted_result, result.layout.llvm_type(self)) } else { - emit_va_arg(self, args[0], ret_ty) + emit_va_arg(self, args[0], result.layout.ty) } } Primitive::Float(Float::F16) => { bug!("the va_arg intrinsic does not work with `f16`") } Primitive::Float(Float::F64) | Primitive::Pointer(_) => { - emit_va_arg(self, args[0], ret_ty) + emit_va_arg(self, args[0], result.layout.ty) } // `va_arg` should never be used with the return type f32. Primitive::Float(Float::F32) => { @@ -384,7 +374,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { | sym::rotate_right | sym::saturating_add | sym::saturating_sub => { - let ty = arg_tys[0]; + let ty = args[0].layout.ty; if !ty.is_integral() { tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, @@ -403,26 +393,26 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { &[args[0].immediate(), y], ); - self.intcast(ret, llret_ty, false) + self.intcast(ret, result.layout.llvm_type(self), false) } sym::ctlz_nonzero => { let y = self.const_bool(true); let llvm_name = &format!("llvm.ctlz.i{width}"); let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]); - self.intcast(ret, llret_ty, false) + self.intcast(ret, result.layout.llvm_type(self), false) } sym::cttz_nonzero => { let y = self.const_bool(true); let llvm_name = &format!("llvm.cttz.i{width}"); let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]); - self.intcast(ret, llret_ty, false) + self.intcast(ret, result.layout.llvm_type(self), false) } sym::ctpop => { let ret = self.call_intrinsic( &format!("llvm.ctpop.i{width}"), &[args[0].immediate()], ); - self.intcast(ret, llret_ty, false) + self.intcast(ret, result.layout.llvm_type(self), false) } sym::bswap => { if width == 8 { @@ -554,16 +544,16 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { // Unpack non-power-of-2 #[repr(packed, simd)] arguments. // This gives them the expected layout of a regular #[repr(simd)] vector. let mut loaded_args = Vec::new(); - for (ty, arg) in arg_tys.iter().zip(args) { + for arg in args { loaded_args.push( // #[repr(packed, simd)] vectors are passed like arrays (as references, // with reduced alignment and no padding) rather than as immediates. // We can use a vector load to fix the layout and turn the argument // into an immediate. - if ty.is_simd() + if arg.layout.ty.is_simd() && let OperandValue::Ref(place) = arg.val { - let (size, elem_ty) = ty.simd_size_and_type(self.tcx()); + let (size, elem_ty) = arg.layout.ty.simd_size_and_type(self.tcx()); let elem_ll_ty = match elem_ty.kind() { ty::Float(f) => self.type_float_from_ty(*f), ty::Int(i) => self.type_int_from_ty(*i), @@ -580,10 +570,10 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { ); } - let llret_ty = if ret_ty.is_simd() - && let BackendRepr::Memory { .. } = self.layout_of(ret_ty).layout.backend_repr + let llret_ty = if result.layout.ty.is_simd() + && let BackendRepr::Memory { .. } = result.layout.backend_repr { - let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx()); + let (size, elem_ty) = result.layout.ty.simd_size_and_type(self.tcx()); let elem_ll_ty = match elem_ty.kind() { ty::Float(f) => self.type_float_from_ty(*f), ty::Int(i) => self.type_int_from_ty(*i), @@ -593,16 +583,15 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { }; self.type_vector(elem_ll_ty, size) } else { - llret_ty + result.layout.llvm_type(self) }; match generic_simd_intrinsic( self, name, - callee_ty, fn_args, &loaded_args, - ret_ty, + result.layout.ty, llret_ty, span, ) { @@ -621,9 +610,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { }; if result.layout.ty.is_bool() { - OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) - .val - .store(self, result); + let val = self.from_immediate(llval); + self.store_to_place(val, result.val); } else if !result.layout.ty.is_unit() { self.store_to_place(llval, result.val); } @@ -1151,7 +1139,6 @@ fn get_rust_try_fn<'a, 'll, 'tcx>( fn generic_simd_intrinsic<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, name: Symbol, - callee_ty: Ty<'tcx>, fn_args: GenericArgsRef<'tcx>, args: &[OperandRef<'tcx, &'ll Value>], ret_ty: Ty<'tcx>, @@ -1222,26 +1209,22 @@ fn generic_simd_intrinsic<'ll, 'tcx>( bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len)) } - let tcx = bx.tcx(); - let sig = tcx.normalize_erasing_late_bound_regions(bx.typing_env(), callee_ty.fn_sig(tcx)); - let arg_tys = sig.inputs(); - // Sanity-check: all vector arguments must be immediates. if cfg!(debug_assertions) { - for (ty, arg) in arg_tys.iter().zip(args) { - if ty.is_simd() { + for arg in args { + if arg.layout.ty.is_simd() { assert_matches!(arg.val, OperandValue::Immediate(_)); } } } if name == sym::simd_select_bitmask { - let (len, _) = require_simd!(arg_tys[1], SimdArgument); + let (len, _) = require_simd!(args[1].layout.ty, SimdArgument); let expected_int_bits = len.max(8).next_power_of_two(); let expected_bytes = len.div_ceil(8); - let mask_ty = arg_tys[0]; + let mask_ty = args[0].layout.ty; let mask = match mask_ty.kind() { ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), @@ -1275,8 +1258,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } // every intrinsic below takes a SIMD vector as its first argument - let (in_len, in_elem) = require_simd!(arg_tys[0], SimdInput); - let in_ty = arg_tys[0]; + let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput); + let in_ty = args[0].layout.ty; let comparison = match name { sym::simd_eq => Some(BinOp::Eq), @@ -1407,13 +1390,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_insert || name == sym::simd_insert_dyn { require!( - in_elem == arg_tys[2], + in_elem == args[2].layout.ty, InvalidMonomorphization::InsertedType { span, name, in_elem, in_ty, - out_ty: arg_tys[2] + out_ty: args[2].layout.ty } ); @@ -1464,7 +1447,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_select { let m_elem_ty = in_elem; let m_len = in_len; - let (v_len, _) = require_simd!(arg_tys[1], SimdArgument); + let (v_len, _) = require_simd!(args[1].layout.ty, SimdArgument); require!( m_len == v_len, InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len } @@ -1665,9 +1648,9 @@ fn generic_simd_intrinsic<'ll, 'tcx>( // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument let (_, element_ty0) = require_simd!(in_ty, SimdFirst); - let (out_len, element_ty1) = require_simd!(arg_tys[1], SimdSecond); + let (out_len, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond); // The element type of the third argument must be a signed integer type of any width: - let (out_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird); + let (out_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird); require_simd!(ret_ty, SimdReturn); // Of the same length: @@ -1678,7 +1661,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[1], + arg_ty: args[1].layout.ty, out_len } ); @@ -1689,7 +1672,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[2], + arg_ty: args[2].layout.ty, out_len: out_len2 } ); @@ -1709,7 +1692,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( span, name, expected_element: element_ty1, - second_arg: arg_tys[1], + second_arg: args[1].layout.ty, in_elem, in_ty, mutability: ExpectedPointerMutability::Not, @@ -1770,10 +1753,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let (mask_len, mask_elem) = (in_len, in_elem); // The second argument must be a pointer matching the element type - let pointer_ty = arg_tys[1]; + let pointer_ty = args[1].layout.ty; // The last argument is a passthrough vector providing values for disabled lanes - let values_ty = arg_tys[2]; + let values_ty = args[2].layout.ty; let (values_len, values_elem) = require_simd!(values_ty, SimdThird); require_simd!(ret_ty, SimdReturn); @@ -1861,10 +1844,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let (mask_len, mask_elem) = (in_len, in_elem); // The second argument must be a pointer matching the element type - let pointer_ty = arg_tys[1]; + let pointer_ty = args[1].layout.ty; // The last argument specifies the values to store to memory - let values_ty = arg_tys[2]; + let values_ty = args[2].layout.ty; let (values_len, values_elem) = require_simd!(values_ty, SimdThird); // Of the same length: @@ -1944,8 +1927,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument let (_, element_ty0) = require_simd!(in_ty, SimdFirst); - let (element_len1, element_ty1) = require_simd!(arg_tys[1], SimdSecond); - let (element_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird); + let (element_len1, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond); + let (element_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird); // Of the same length: require!( @@ -1955,7 +1938,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[1], + arg_ty: args[1].layout.ty, out_len: element_len1 } ); @@ -1966,7 +1949,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[2], + arg_ty: args[2].layout.ty, out_len: element_len2 } ); @@ -1981,7 +1964,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( span, name, expected_element: element_ty1, - second_arg: arg_tys[1], + second_arg: args[1].layout.ty, in_elem, in_ty, mutability: ExpectedPointerMutability::Mut, @@ -2503,7 +2486,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let ptrs = args[0].immediate(); // The second argument must be a ptr-sized integer. // (We don't care about the signedness, this is wrapping anyway.) - let (_offsets_len, offsets_elem) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (_offsets_len, offsets_elem) = args[1].layout.ty.simd_size_and_type(bx.tcx()); if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) { span_bug!( span, @@ -2527,8 +2510,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( return_error!(InvalidMonomorphization::ExpectedVectorElementType { span, name, - expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1, - vector_type: arg_tys[0] + expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1, + vector_type: args[0].layout.ty }); } }; diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index 8eedb5392b5..236568590be 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -40,6 +40,7 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( align: Align, slot_size: Align, allow_higher_align: bool, + force_right_adjust: bool, ) -> (&'ll Value, Align) { let va_list_ty = bx.type_ptr(); let va_list_addr = list.immediate(); @@ -57,7 +58,10 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( let next = bx.inbounds_ptradd(addr, full_direct_size); bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi); - if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big { + if size.bytes() < slot_size.bytes() + && bx.tcx().sess.target.endian == Endian::Big + && force_right_adjust + { let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32); let adjusted = bx.inbounds_ptradd(addr, adjusted_size); (adjusted, addr_align) @@ -81,6 +85,11 @@ enum AllowHigherAlign { Yes, } +enum ForceRightAdjust { + No, + Yes, +} + fn emit_ptr_va_arg<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, list: OperandRef<'tcx, &'ll Value>, @@ -88,9 +97,11 @@ fn emit_ptr_va_arg<'ll, 'tcx>( pass_mode: PassMode, slot_size: SlotSize, allow_higher_align: AllowHigherAlign, + force_right_adjust: ForceRightAdjust, ) -> &'ll Value { let indirect = matches!(pass_mode, PassMode::Indirect); let allow_higher_align = matches!(allow_higher_align, AllowHigherAlign::Yes); + let force_right_adjust = matches!(force_right_adjust, ForceRightAdjust::Yes); let slot_size = Align::from_bytes(slot_size as u64).unwrap(); let layout = bx.cx.layout_of(target_ty); @@ -103,8 +114,15 @@ fn emit_ptr_va_arg<'ll, 'tcx>( } else { (layout.llvm_type(bx.cx), layout.size, layout.align) }; - let (addr, addr_align) = - emit_direct_ptr_va_arg(bx, list, size, align.abi, slot_size, allow_higher_align); + let (addr, addr_align) = emit_direct_ptr_va_arg( + bx, + list, + size, + align.abi, + slot_size, + allow_higher_align, + force_right_adjust, + ); if indirect { let tmp_ret = bx.load(llty, addr, addr_align); bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi) @@ -208,6 +226,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( PassMode::Direct, SlotSize::Bytes8, AllowHigherAlign::Yes, + ForceRightAdjust::No, ); bx.br(end); @@ -218,6 +237,150 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( val } +fn emit_powerpc_va_arg<'ll, 'tcx>( + bx: &mut Builder<'_, 'll, 'tcx>, + list: OperandRef<'tcx, &'ll Value>, + target_ty: Ty<'tcx>, +) -> &'ll Value { + let dl = bx.cx.data_layout(); + + // struct __va_list_tag { + // unsigned char gpr; + // unsigned char fpr; + // unsigned short reserved; + // void *overflow_arg_area; + // void *reg_save_area; + // }; + let va_list_addr = list.immediate(); + + // Peel off any newtype wrappers. + let layout = { + let mut layout = bx.cx.layout_of(target_ty); + + while let Some((_, inner)) = layout.non_1zst_field(bx.cx) { + layout = inner; + } + + layout + }; + + // Rust does not currently support any powerpc softfloat targets. + let target = &bx.cx.tcx.sess.target; + let is_soft_float_abi = target.abi == "softfloat"; + assert!(!is_soft_float_abi); + + // All instances of VaArgSafe are passed directly. + let is_indirect = false; + + let (is_i64, is_int, is_f64) = match layout.layout.backend_repr() { + BackendRepr::Scalar(scalar) => match scalar.primitive() { + rustc_abi::Primitive::Int(integer, _) => (integer.size().bits() == 64, true, false), + rustc_abi::Primitive::Float(float) => (false, false, float.size().bits() == 64), + rustc_abi::Primitive::Pointer(_) => (false, true, false), + }, + _ => unreachable!("all instances of VaArgSafe are represented as scalars"), + }; + + let num_regs_addr = if is_int || is_soft_float_abi { + va_list_addr // gpr + } else { + bx.inbounds_ptradd(va_list_addr, bx.const_usize(1)) // fpr + }; + + let mut num_regs = bx.load(bx.type_i8(), num_regs_addr, dl.i8_align.abi); + + // "Align" the register count when the type is passed as `i64`. + if is_i64 || (is_f64 && is_soft_float_abi) { + num_regs = bx.add(num_regs, bx.const_u8(1)); + num_regs = bx.and(num_regs, bx.const_u8(0b1111_1110)); + } + + let max_regs = 8u8; + let use_regs = bx.icmp(IntPredicate::IntULT, num_regs, bx.const_u8(max_regs)); + + let in_reg = bx.append_sibling_block("va_arg.in_reg"); + let in_mem = bx.append_sibling_block("va_arg.in_mem"); + let end = bx.append_sibling_block("va_arg.end"); + + bx.cond_br(use_regs, in_reg, in_mem); + + let reg_addr = { + bx.switch_to_block(in_reg); + + let reg_safe_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2 + 4)); + let mut reg_addr = bx.load(bx.type_ptr(), reg_safe_area_ptr, dl.pointer_align.abi); + + // Floating-point registers start after the general-purpose registers. + if !is_int && !is_soft_float_abi { + reg_addr = bx.inbounds_ptradd(reg_addr, bx.cx.const_usize(32)) + } + + // Get the address of the saved value by scaling the number of + // registers we've used by the number of. + let reg_size = if is_int || is_soft_float_abi { 4 } else { 8 }; + let reg_offset = bx.mul(num_regs, bx.cx().const_u8(reg_size)); + let reg_addr = bx.inbounds_ptradd(reg_addr, reg_offset); + + // Increase the used-register count. + let reg_incr = if is_i64 || (is_f64 && is_soft_float_abi) { 2 } else { 1 }; + let new_num_regs = bx.add(num_regs, bx.cx.const_u8(reg_incr)); + bx.store(new_num_regs, num_regs_addr, dl.i8_align.abi); + + bx.br(end); + + reg_addr + }; + + let mem_addr = { + bx.switch_to_block(in_mem); + + bx.store(bx.const_u8(max_regs), num_regs_addr, dl.i8_align.abi); + + // Everything in the overflow area is rounded up to a size of at least 4. + let overflow_area_align = Align::from_bytes(4).unwrap(); + + let size = if !is_indirect { + layout.layout.size.align_to(overflow_area_align) + } else { + dl.pointer_size + }; + + let overflow_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2)); + let mut overflow_area = bx.load(bx.type_ptr(), overflow_area_ptr, dl.pointer_align.abi); + + // Round up address of argument to alignment + if layout.layout.align.abi > overflow_area_align { + overflow_area = round_pointer_up_to_alignment( + bx, + overflow_area, + layout.layout.align.abi, + bx.type_ptr(), + ); + } + + let mem_addr = overflow_area; + + // Increase the overflow area. + overflow_area = bx.inbounds_ptradd(overflow_area, bx.const_usize(size.bytes())); + bx.store(overflow_area, overflow_area_ptr, dl.pointer_align.abi); + + bx.br(end); + + mem_addr + }; + + // Return the appropriate result. + bx.switch_to_block(end); + let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]); + let val_type = layout.llvm_type(bx); + let val_addr = if is_indirect { + bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi) + } else { + val_addr + }; + bx.load(val_type, val_addr, layout.align.abi) +} + fn emit_s390x_va_arg<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, list: OperandRef<'tcx, &'ll Value>, @@ -728,6 +891,7 @@ pub(super) fn emit_va_arg<'ll, 'tcx>( PassMode::Direct, SlotSize::Bytes4, if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes }, + ForceRightAdjust::No, ), "aarch64" | "arm64ec" if target.is_like_windows || target.is_like_darwin => { emit_ptr_va_arg( @@ -737,10 +901,24 @@ pub(super) fn emit_va_arg<'ll, 'tcx>( PassMode::Direct, SlotSize::Bytes8, if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes }, + ForceRightAdjust::No, ) } "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty), "s390x" => emit_s390x_va_arg(bx, addr, target_ty), + "powerpc" => emit_powerpc_va_arg(bx, addr, target_ty), + "powerpc64" | "powerpc64le" => emit_ptr_va_arg( + bx, + addr, + target_ty, + PassMode::Direct, + SlotSize::Bytes8, + AllowHigherAlign::Yes, + match &*target.arch { + "powerpc64" => ForceRightAdjust::Yes, + _ => ForceRightAdjust::No, + }, + ), // Windows x86_64 "x86_64" if target.is_like_windows => { let target_ty_size = bx.cx.size_of(target_ty).bytes(); @@ -755,6 +933,7 @@ pub(super) fn emit_va_arg<'ll, 'tcx>( }, SlotSize::Bytes8, AllowHigherAlign::No, + ForceRightAdjust::No, ) } // This includes `target.is_like_darwin`, which on x86_64 targets is like sysv64. |
