diff options
| author | bors <bors@rust-lang.org> | 2025-07-07 17:28:14 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2025-07-07 17:28:14 +0000 |
| commit | 2f8eeb2bba86b8f457ec602c578473c711f85628 (patch) | |
| tree | 1e984b67d6882080d8a9ead645145177143dda50 /compiler/rustc_codegen_llvm/src | |
| parent | 1b0bc594a75dfc1cdedc6c17052cf44de101e632 (diff) | |
| parent | 93f1201c0616672d71e640a0ad600d029448c40a (diff) | |
| download | rust-2f8eeb2bba86b8f457ec602c578473c711f85628.tar.gz rust-2f8eeb2bba86b8f457ec602c578473c711f85628.zip | |
Auto merge of #143182 - xdoardo:more-addrspace, r=workingjubilee
Allow custom default address spaces and parse `p-` specifications in the datalayout string Some targets, such as CHERI, use as default an address space different from the "normal" default address space `0` (in the case of CHERI, [200 is used](https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-877.pdf)). Currently, `rustc` does not allow to specify custom address spaces and does not take into consideration [`p-` specifications in the datalayout string](https://llvm.org/docs/LangRef.html#langref-datalayout). This patch tries to mitigate these problems by allowing targets to define a custom default address space (while keeping the default value to address space `0`) and adding the code to parse the `p-` specifications in `rustc_abi`. The main changes are that `TargetDataLayout` now uses functions to refer to pointer-related informations, instead of having specific fields for the size and alignment of pointers in the default address space; furthermore, the two `pointer_size` and `pointer_align` fields in `TargetDataLayout` are replaced with an `FxHashMap` that holds info for all the possible address spaces, as parsed by the `p-` specifications. The potential performance drawbacks of not having ad-hoc fields for the default address space will be tested in this PR's CI run. r? workingjubilee
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/common.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/consts.rs | 11 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/context.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs | 14 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/intrinsic.rs | 20 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/lib.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/type_.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/va_arg.rs | 54 |
8 files changed, 59 insertions, 50 deletions
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 7cfab25bc50..92f38565eef 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -175,7 +175,7 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { } fn const_usize(&self, i: u64) -> &'ll Value { - let bit_size = self.data_layout().pointer_size.bits(); + let bit_size = self.data_layout().pointer_size().bits(); if bit_size < 64 { // make sure it doesn't overflow assert!(i < (1 << bit_size)); diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index a4492d76c3c..28f5282c6b0 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -43,7 +43,8 @@ pub(crate) fn const_alloc_to_llvm<'ll>( } let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1); let dl = cx.data_layout(); - let pointer_size = dl.pointer_size.bytes() as usize; + let pointer_size = dl.pointer_size(); + let pointer_size_bytes = pointer_size.bytes() as usize; // Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`, so `range` // must be within the bounds of `alloc` and not contain or overlap a pointer provenance. @@ -100,7 +101,9 @@ pub(crate) fn const_alloc_to_llvm<'ll>( // This `inspect` is okay since it is within the bounds of the allocation, it doesn't // affect interpreter execution (we inspect the result after interpreter execution), // and we properly interpret the provenance as a relocation pointer offset. - alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)), + alloc.inspect_with_uninit_and_ptr_outside_interpreter( + offset..(offset + pointer_size_bytes), + ), ) .expect("const_alloc_to_llvm: could not read relocation pointer") as u64; @@ -111,11 +114,11 @@ pub(crate) fn const_alloc_to_llvm<'ll>( InterpScalar::from_pointer(Pointer::new(prov, Size::from_bytes(ptr_offset)), &cx.tcx), Scalar::Initialized { value: Primitive::Pointer(address_space), - valid_range: WrappingRange::full(dl.pointer_size), + valid_range: WrappingRange::full(pointer_size), }, cx.type_ptr_ext(address_space), )); - next_offset = offset + pointer_size; + next_offset = offset + pointer_size_bytes; } if alloc.len() >= next_offset { let range = next_offset..alloc.len(); diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 0324dff6ff2..90582e23b04 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -605,7 +605,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { GenericCx( FullCx { tcx, - scx: SimpleCx::new(llmod, llcx, tcx.data_layout.pointer_size), + scx: SimpleCx::new(llmod, llcx, tcx.data_layout.pointer_size()), use_dll_storage_attrs, tls_model, codegen_unit, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 7f3e486ca31..9b4736e50e6 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -159,13 +159,15 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>( return_if_di_node_created_in_meantime!(cx, unique_type_id); let data_layout = &cx.tcx.data_layout; + let pointer_size = data_layout.pointer_size(); + let pointer_align = data_layout.pointer_align(); let ptr_type_debuginfo_name = compute_debuginfo_type_name(cx.tcx, ptr_type, true); match wide_pointer_kind(cx, pointee_type) { None => { // This is a thin pointer. Create a regular pointer type and give it the correct name. assert_eq!( - (data_layout.pointer_size, data_layout.pointer_align.abi), + (pointer_size, pointer_align.abi), cx.size_and_align_of(ptr_type), "ptr_type={ptr_type}, pointee_type={pointee_type}", ); @@ -174,8 +176,8 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreatePointerType( DIB(cx), pointee_type_di_node, - data_layout.pointer_size.bits(), - data_layout.pointer_align.abi.bits() as u32, + pointer_size.bits(), + pointer_align.abi.bits() as u32, 0, // Ignore DWARF address space. ptr_type_debuginfo_name.as_c_char_ptr(), ptr_type_debuginfo_name.len(), @@ -319,7 +321,9 @@ fn build_subroutine_type_di_node<'ll, 'tcx>( let name = compute_debuginfo_type_name(cx.tcx, fn_ty, false); let (size, align) = match fn_ty.kind() { ty::FnDef(..) => (Size::ZERO, Align::ONE), - ty::FnPtr(..) => (cx.tcx.data_layout.pointer_size, cx.tcx.data_layout.pointer_align.abi), + ty::FnPtr(..) => { + (cx.tcx.data_layout.pointer_size(), cx.tcx.data_layout.pointer_align().abi) + } _ => unreachable!(), }; let di_node = unsafe { @@ -504,7 +508,7 @@ fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll D create_basic_type( cx, "<recur_type>", - cx.tcx.data_layout.pointer_size, + cx.tcx.data_layout.pointer_size(), dwarf_const::DW_ATE_unsigned, ) }) diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 35922c100cd..fcc0d378f06 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -458,7 +458,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), // so we re-use that same threshold here. - layout.size() <= self.data_layout().pointer_size * 2 + layout.size() <= self.data_layout().pointer_size() * 2 } }; @@ -758,8 +758,8 @@ fn codegen_msvc_try<'ll, 'tcx>( // } // // More information can be found in libstd's seh.rs implementation. - let ptr_size = bx.tcx().data_layout.pointer_size; - let ptr_align = bx.tcx().data_layout.pointer_align.abi; + let ptr_size = bx.tcx().data_layout.pointer_size(); + let ptr_align = bx.tcx().data_layout.pointer_align().abi; let slot = bx.alloca(ptr_size, ptr_align); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None); @@ -1031,8 +1031,8 @@ fn codegen_emcc_try<'ll, 'tcx>( // We need to pass two values to catch_func (ptr and is_rust_panic), so // create an alloca and pass a pointer to that. - let ptr_size = bx.tcx().data_layout.pointer_size; - let ptr_align = bx.tcx().data_layout.pointer_align.abi; + let ptr_size = bx.tcx().data_layout.pointer_size(); + let ptr_align = bx.tcx().data_layout.pointer_align().abi; let i8_align = bx.tcx().data_layout.i8_align.abi; // Required in order for there to be no padding between the fields. assert!(i8_align <= ptr_align); @@ -1158,9 +1158,11 @@ fn generic_simd_intrinsic<'ll, 'tcx>( macro_rules! require_int_or_uint_ty { ($ty: expr, $diag: expr) => { match $ty { - ty::Int(i) => i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()), + ty::Int(i) => { + i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits()) + } ty::Uint(i) => { - i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()) + i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits()) } _ => { return_error!($diag); @@ -2014,10 +2016,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } else { let bitwidth = match in_elem.kind() { ty::Int(i) => { - i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()) + i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits()) } ty::Uint(i) => { - i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()) + i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits()) } _ => return_error!(InvalidMonomorphization::UnsupportedSymbol { span, diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index cdfffbe47bf..63ca51b006d 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -113,7 +113,7 @@ impl ExtraBackendMethods for LlvmCodegenBackend { ) -> ModuleLlvm { let module_llvm = ModuleLlvm::new_metadata(tcx, module_name); let cx = - SimpleCx::new(module_llvm.llmod(), &module_llvm.llcx, tcx.data_layout.pointer_size); + SimpleCx::new(module_llvm.llmod(), &module_llvm.llcx, tcx.data_layout.pointer_size()); unsafe { allocator::codegen(tcx, cx, module_name, kind, alloc_error_handler_kind); } diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index 453eca2bbe1..ee472e75ed4 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -208,7 +208,7 @@ impl<'ll, CX: Borrow<SCx<'ll>>> BaseTypeCodegenMethods for GenericCx<'ll, CX> { } fn type_ptr(&self) -> &'ll Type { - self.type_ptr_ext(AddressSpace::DATA) + self.type_ptr_ext(AddressSpace::ZERO) } fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type { @@ -258,7 +258,7 @@ impl Type { } pub(crate) fn ptr_llcx(llcx: &llvm::Context) -> &Type { - unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::DATA.0) } + unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::ZERO.0) } } } diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index 486dc894a4e..ce079f3cb0a 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -45,7 +45,8 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( let va_list_ty = bx.type_ptr(); let va_list_addr = list.immediate(); - let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi); + let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi; + let ptr = bx.load(va_list_ty, va_list_addr, ptr_align_abi); let (addr, addr_align) = if allow_higher_align && align > slot_size { (round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align) @@ -56,7 +57,7 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( let aligned_size = size.align_to(slot_size).bytes() as i32; let full_direct_size = bx.cx().const_i32(aligned_size); let next = bx.inbounds_ptradd(addr, full_direct_size); - bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi); + bx.store(next, va_list_addr, ptr_align_abi); if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big @@ -108,8 +109,8 @@ fn emit_ptr_va_arg<'ll, 'tcx>( let (llty, size, align) = if indirect { ( bx.cx.layout_of(Ty::new_imm_ptr(bx.cx.tcx, target_ty)).llvm_type(bx.cx), - bx.cx.data_layout().pointer_size, - bx.cx.data_layout().pointer_align, + bx.cx.data_layout().pointer_size(), + bx.cx.data_layout().pointer_align(), ) } else { (layout.llvm_type(bx.cx), layout.size, layout.align) @@ -204,7 +205,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( bx.switch_to_block(in_reg); let top_type = bx.type_ptr(); - let top = bx.load(top_type, reg_top, dl.pointer_align.abi); + let top = bx.load(top_type, reg_top, dl.pointer_align().abi); // reg_value = *(@top + reg_off_v); let mut reg_addr = bx.ptradd(top, reg_off_v); @@ -297,6 +298,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( let max_regs = 8u8; let use_regs = bx.icmp(IntPredicate::IntULT, num_regs, bx.const_u8(max_regs)); + let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi; let in_reg = bx.append_sibling_block("va_arg.in_reg"); let in_mem = bx.append_sibling_block("va_arg.in_mem"); @@ -308,7 +310,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( bx.switch_to_block(in_reg); let reg_safe_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2 + 4)); - let mut reg_addr = bx.load(bx.type_ptr(), reg_safe_area_ptr, dl.pointer_align.abi); + let mut reg_addr = bx.load(bx.type_ptr(), reg_safe_area_ptr, ptr_align_abi); // Floating-point registers start after the general-purpose registers. if !is_int && !is_soft_float_abi { @@ -342,11 +344,11 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( let size = if !is_indirect { layout.layout.size.align_to(overflow_area_align) } else { - dl.pointer_size + dl.pointer_size() }; let overflow_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2)); - let mut overflow_area = bx.load(bx.type_ptr(), overflow_area_ptr, dl.pointer_align.abi); + let mut overflow_area = bx.load(bx.type_ptr(), overflow_area_ptr, ptr_align_abi); // Round up address of argument to alignment if layout.layout.align.abi > overflow_area_align { @@ -362,7 +364,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( // Increase the overflow area. overflow_area = bx.inbounds_ptradd(overflow_area, bx.const_usize(size.bytes())); - bx.store(overflow_area, overflow_area_ptr, dl.pointer_align.abi); + bx.store(overflow_area, overflow_area_ptr, ptr_align_abi); bx.br(end); @@ -373,11 +375,8 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( bx.switch_to_block(end); let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]); let val_type = layout.llvm_type(bx); - let val_addr = if is_indirect { - bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi) - } else { - val_addr - }; + let val_addr = + if is_indirect { bx.load(bx.cx.type_ptr(), val_addr, ptr_align_abi) } else { val_addr }; bx.load(val_type, val_addr, layout.align.abi) } @@ -414,6 +413,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>( let in_reg = bx.append_sibling_block("va_arg.in_reg"); let in_mem = bx.append_sibling_block("va_arg.in_mem"); let end = bx.append_sibling_block("va_arg.end"); + let ptr_align_abi = dl.pointer_align().abi; // FIXME: vector ABI not yet supported. let target_ty_size = bx.cx.size_of(target_ty).bytes(); @@ -435,7 +435,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>( bx.switch_to_block(in_reg); // Work out the address of the value in the register save area. - let reg_ptr_v = bx.load(bx.type_ptr(), reg_save_area, dl.pointer_align.abi); + let reg_ptr_v = bx.load(bx.type_ptr(), reg_save_area, ptr_align_abi); let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8)); let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding)); let reg_addr = bx.ptradd(reg_ptr_v, reg_off); @@ -449,15 +449,14 @@ fn emit_s390x_va_arg<'ll, 'tcx>( bx.switch_to_block(in_mem); // Work out the address of the value in the argument overflow area. - let arg_ptr_v = - bx.load(bx.type_ptr(), overflow_arg_area, bx.tcx().data_layout.pointer_align.abi); + let arg_ptr_v = bx.load(bx.type_ptr(), overflow_arg_area, ptr_align_abi); let arg_off = bx.const_u64(padding); let mem_addr = bx.ptradd(arg_ptr_v, arg_off); // Update the argument overflow area pointer. let arg_size = bx.cx().const_u64(padded_size); let new_arg_ptr_v = bx.inbounds_ptradd(arg_ptr_v, arg_size); - bx.store(new_arg_ptr_v, overflow_arg_area, dl.pointer_align.abi); + bx.store(new_arg_ptr_v, overflow_arg_area, ptr_align_abi); bx.br(end); // Return the appropriate result. @@ -465,7 +464,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>( let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]); let val_type = layout.llvm_type(bx); let val_addr = - if indirect { bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi) } else { val_addr }; + if indirect { bx.load(bx.cx.type_ptr(), val_addr, ptr_align_abi) } else { val_addr }; bx.load(val_type, val_addr, layout.align.abi) } @@ -607,7 +606,7 @@ fn emit_x86_64_sysv64_va_arg<'ll, 'tcx>( // loads than necessary. Can we clean this up? let reg_save_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * unsigned_int_offset + ptr_offset)); - let reg_save_area_v = bx.load(bx.type_ptr(), reg_save_area_ptr, dl.pointer_align.abi); + let reg_save_area_v = bx.load(bx.type_ptr(), reg_save_area_ptr, dl.pointer_align().abi); let reg_addr = match layout.layout.backend_repr() { BackendRepr::Scalar(scalar) => match scalar.primitive() { @@ -749,10 +748,11 @@ fn x86_64_sysv64_va_arg_from_memory<'ll, 'tcx>( layout: TyAndLayout<'tcx, Ty<'tcx>>, ) -> &'ll Value { let dl = bx.cx.data_layout(); + let ptr_align_abi = dl.data_layout().pointer_align().abi; let overflow_arg_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.const_usize(8)); - let overflow_arg_area_v = bx.load(bx.type_ptr(), overflow_arg_area_ptr, dl.pointer_align.abi); + let overflow_arg_area_v = bx.load(bx.type_ptr(), overflow_arg_area_ptr, ptr_align_abi); // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 // byte boundary if alignment needed by type exceeds 8 byte boundary. // It isn't stated explicitly in the standard, but in practice we use @@ -771,7 +771,7 @@ fn x86_64_sysv64_va_arg_from_memory<'ll, 'tcx>( let size_in_bytes = layout.layout.size().bytes(); let offset = bx.const_i32(size_in_bytes.next_multiple_of(8) as i32); let overflow_arg_area = bx.inbounds_ptradd(overflow_arg_area_v, offset); - bx.store(overflow_arg_area, overflow_arg_area_ptr, dl.pointer_align.abi); + bx.store(overflow_arg_area, overflow_arg_area_ptr, ptr_align_abi); mem_addr } @@ -803,6 +803,7 @@ fn emit_xtensa_va_arg<'ll, 'tcx>( let from_stack = bx.append_sibling_block("va_arg.from_stack"); let from_regsave = bx.append_sibling_block("va_arg.from_regsave"); let end = bx.append_sibling_block("va_arg.end"); + let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi; // (*va).va_ndx let va_reg_offset = 4; @@ -825,12 +826,11 @@ fn emit_xtensa_va_arg<'ll, 'tcx>( bx.switch_to_block(from_regsave); // update va_ndx - bx.store(offset_next, offset_ptr, bx.tcx().data_layout.pointer_align.abi); + bx.store(offset_next, offset_ptr, ptr_align_abi); // (*va).va_reg let regsave_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(va_reg_offset)); - let regsave_area = - bx.load(bx.type_ptr(), regsave_area_ptr, bx.tcx().data_layout.pointer_align.abi); + let regsave_area = bx.load(bx.type_ptr(), regsave_area_ptr, ptr_align_abi); let regsave_value_ptr = bx.inbounds_ptradd(regsave_area, offset); bx.br(end); @@ -849,11 +849,11 @@ fn emit_xtensa_va_arg<'ll, 'tcx>( // va_ndx = offset_next_corrected; let offset_next_corrected = bx.add(offset_next, bx.const_i32(slot_size)); // update va_ndx - bx.store(offset_next_corrected, offset_ptr, bx.tcx().data_layout.pointer_align.abi); + bx.store(offset_next_corrected, offset_ptr, ptr_align_abi); // let stack_value_ptr = unsafe { (*va).va_stk.byte_add(offset_corrected) }; let stack_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(0)); - let stack_area = bx.load(bx.type_ptr(), stack_area_ptr, bx.tcx().data_layout.pointer_align.abi); + let stack_area = bx.load(bx.type_ptr(), stack_area_ptr, ptr_align_abi); let stack_value_ptr = bx.inbounds_ptradd(stack_area, offset_corrected); bx.br(end); |
