diff options
| author | Scott McMurray <scottmcm@users.noreply.github.com> | 2024-04-10 22:07:21 -0700 |
|---|---|---|
| committer | Scott McMurray <scottmcm@users.noreply.github.com> | 2024-04-11 00:10:10 -0700 |
| commit | 89502e584bea0b33525bad390079f5fb57df64f1 (patch) | |
| tree | 84dce1f9989d14667f7184055ec84a9e88b80d8c /compiler/rustc_codegen_llvm/src | |
| parent | c2239bca5b89a8d3573cc0fc0f2fa65c50edb79c (diff) | |
| download | rust-89502e584bea0b33525bad390079f5fb57df64f1.tar.gz rust-89502e584bea0b33525bad390079f5fb57df64f1.zip | |
Make `PlaceRef` hold a `PlaceValue` for the non-layout fields (like `OperandRef` does)
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/abi.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/builder.rs | 20 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/intrinsic.rs | 8 |
3 files changed, 15 insertions, 15 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index f918facc86d..cd9e5515958 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -233,7 +233,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { bx.store(val, llscratch, scratch_align); // ... and then memcpy it to the intended destination. bx.memcpy( - dst.llval, + dst.val.llval, self.layout.align.abi, llscratch, scratch_align, diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index b7235972204..6819c22698c 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -535,7 +535,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { panic!("unsized locals must not be `extern` types"); } } - assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); + assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { return OperandRef::zero_sized(place.layout); @@ -579,13 +579,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - let val = if let Some(llextra) = place.llextra { - OperandValue::Ref(place.llval, Some(llextra), place.align) + let val = if let Some(llextra) = place.val.llextra { + OperandValue::Ref(place.val.llval, Some(llextra), place.val.align) } else if place.layout.is_llvm_immediate() { let mut const_llval = None; let llty = place.layout.llvm_type(self); unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { + if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) { if llvm::LLVMIsGlobalConstant(global) == llvm::True { if let Some(init) = llvm::LLVMGetInitializer(global) { if self.val_ty(init) == llty { @@ -596,7 +596,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } let llval = const_llval.unwrap_or_else(|| { - let load = self.load(llty, place.llval, place.align); + let load = self.load(llty, place.val.llval, place.val.align); if let abi::Abi::Scalar(scalar) = place.layout.abi { scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO); } @@ -608,9 +608,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let mut load = |i, scalar: abi::Scalar, layout, align, offset| { let llptr = if i == 0 { - place.llval + place.val.llval } else { - self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes())) + self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes())) }; let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); let load = self.load(llty, llptr, align); @@ -619,11 +619,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { }; OperandValue::Pair( - load(0, a, place.layout, place.align, Size::ZERO), - load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset), + load(0, a, place.layout, place.val.align, Size::ZERO), + load(1, b, place.layout, place.val.align.restrict_for_offset(b_offset), b_offset), ) } else { - OperandValue::Ref(place.llval, None, place.align) + OperandValue::Ref(place.val.llval, None, place.val.align) }; OperandRef { val, layout: place.layout } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index dc52dd156b7..a661a7f489d 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -264,7 +264,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { llvm::LLVMSetAlignment(load, align); } if !result.layout.is_zst() { - self.store(load, result.llval, result.align); + self.store(load, result.val.llval, result.val.align); } return Ok(()); } @@ -428,7 +428,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { sym::black_box => { args[0].val.store(self, result); - let result_val_span = [result.llval]; + let result_val_span = [result.val.llval]; // We need to "use" the argument in some way LLVM can't introspect, and on // targets that support it we can typically leverage inline assembly to do // this. LLVM's interpretation of inline assembly is that it's, well, a black @@ -482,7 +482,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { if !fn_abi.ret.is_ignore() { if let PassMode::Cast { .. } = &fn_abi.ret.mode { - self.store(llval, result.llval, result.align); + self.store(llval, result.val.llval, result.val.align); } else { OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) .val @@ -1065,7 +1065,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let place = PlaceRef::alloca(bx, args[0].layout); args[0].val.store(bx, place); let int_ty = bx.type_ix(expected_bytes * 8); - bx.load(int_ty, place.llval, Align::ONE) + bx.load(int_ty, place.val.llval, Align::ONE) } _ => return_error!(InvalidMonomorphization::InvalidBitmask { span, |
