diff options
Diffstat (limited to 'compiler/rustc_codegen_ssa/src')
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/base.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/codegen_attrs.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/block.rs | 15 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/intrinsic.rs | 15 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/operand.rs | 88 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/place.rs | 87 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 164 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/traits/builder.rs | 66 |
9 files changed, 281 insertions, 160 deletions
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 358c24bfb82..877e5b75912 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -283,7 +283,7 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } if src_f.layout.ty == dst_f.layout.ty { - bx.typed_place_copy(dst_f, src_f); + bx.typed_place_copy(dst_f.val, src_f.val, src_f.layout); } else { coerce_unsized_into(bx, src_f, dst_f); } diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs index c28b0d644e6..9bf055b1739 100644 --- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs +++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs @@ -276,7 +276,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { sym::target_feature => { if !tcx.is_closure_like(did.to_def_id()) && let Some(fn_sig) = fn_sig() - && fn_sig.skip_binder().unsafety() == hir::Unsafety::Normal + && fn_sig.skip_binder().safety() == hir::Safety::Safe { if tcx.sess.target.is_like_wasm || tcx.sess.opts.actually_rustdoc { // The `#[target_feature]` attribute is allowed on diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs index e9c7606dc5a..07473ee476b 100644 --- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs +++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs @@ -365,7 +365,7 @@ fn push_debuginfo_type_name<'tcx>( } output.push_str(" (*)("); } else { - output.push_str(sig.unsafety.prefix_str()); + output.push_str(sig.safety.prefix_str()); if sig.abi != rustc_target::spec::abi::Abi::Rust { output.push_str("extern \""); diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index d36972d0d86..ba6aad51316 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -1454,9 +1454,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Some(pointee_align) => cmp::max(pointee_align, arg.layout.align.abi), None => arg.layout.align.abi, }; - let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align); - op.val.store(bx, scratch); - (scratch.val.llval, scratch.val.align, true) + let scratch = PlaceValue::alloca(bx, arg.layout.size, required_align); + op.val.store(bx, scratch.with_type(arg.layout)); + (scratch.llval, scratch.align, true) } PassMode::Cast { .. } => { let scratch = PlaceRef::alloca(bx, arg.layout); @@ -1475,10 +1475,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // For `foo(packed.large_field)`, and types with <4 byte alignment on x86, // alignment requirements may be higher than the type's alignment, so copy // to a higher-aligned alloca. - let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align); - let op_place = PlaceRef { val: op_place_val, layout: op.layout }; - bx.typed_place_copy(scratch, op_place); - (scratch.val.llval, scratch.val.align, true) + let scratch = PlaceValue::alloca(bx, arg.layout.size, required_align); + bx.typed_place_copy(scratch, op_place_val, op.layout); + (scratch.llval, scratch.align, true) } else { (op_place_val.llval, op_place_val.align, true) } @@ -1567,7 +1566,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if place_val.llextra.is_some() { bug!("closure arguments must be sized"); } - let tuple_ptr = PlaceRef { val: place_val, layout: tuple.layout }; + let tuple_ptr = place_val.with_type(tuple.layout); for i in 0..tuple.layout.fields.count() { let field_ptr = tuple_ptr.project_field(bx, i); let field = bx.load_operand(field_ptr); diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 2e008460798..f88deaa7abc 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -1,4 +1,4 @@ -use super::operand::{OperandRef, OperandValue}; +use super::operand::OperandRef; use super::place::PlaceRef; use super::FunctionCx; use crate::errors; @@ -93,9 +93,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // into the (unoptimized) direct swapping implementation, so we disable it. || bx.sess().target.arch == "spirv" { - let x_place = PlaceRef::new_sized(args[0].immediate(), pointee_layout); - let y_place = PlaceRef::new_sized(args[1].immediate(), pointee_layout); - bx.typed_place_swap(x_place, y_place); + let align = pointee_layout.align.abi; + let x_place = args[0].val.deref(align); + let y_place = args[1].val.deref(align); + bx.typed_place_swap(x_place, y_place, pointee_layout); return Ok(()); } } @@ -113,15 +114,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { sym::va_end => bx.va_end(args[0].immediate()), sym::size_of_val => { let tp_ty = fn_args.type_at(0); - let meta = - if let OperandValue::Pair(_, meta) = args[0].val { Some(meta) } else { None }; + let (_, meta) = args[0].val.pointer_parts(); let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta); llsize } sym::min_align_of_val => { let tp_ty = fn_args.type_at(0); - let meta = - if let OperandValue::Pair(_, meta) = args[0].val { Some(meta) } else { None }; + let (_, meta) = args[0].val.pointer_parts(); let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta); llalign } diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index a2fce1275c2..32fd9b657f9 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -14,6 +14,9 @@ use rustc_target::abi::{self, Abi, Align, Size}; use std::fmt; +use arrayvec::ArrayVec; +use either::Either; + /// The representation of a Rust value. The enum variant is in fact /// uniquely determined by the value's type, but is kept as a /// safety check. @@ -58,6 +61,70 @@ pub enum OperandValue<V> { ZeroSized, } +impl<V: CodegenObject> OperandValue<V> { + /// If this is ZeroSized/Immediate/Pair, return an array of the 0/1/2 values. + /// If this is Ref, return the place. + #[inline] + pub fn immediates_or_place(self) -> Either<ArrayVec<V, 2>, PlaceValue<V>> { + match self { + OperandValue::ZeroSized => Either::Left(ArrayVec::new()), + OperandValue::Immediate(a) => Either::Left(ArrayVec::from_iter([a])), + OperandValue::Pair(a, b) => Either::Left([a, b].into()), + OperandValue::Ref(p) => Either::Right(p), + } + } + + /// Given an array of 0/1/2 immediate values, return ZeroSized/Immediate/Pair. + #[inline] + pub fn from_immediates(immediates: ArrayVec<V, 2>) -> Self { + let mut it = immediates.into_iter(); + let Some(a) = it.next() else { + return OperandValue::ZeroSized; + }; + let Some(b) = it.next() else { + return OperandValue::Immediate(a); + }; + OperandValue::Pair(a, b) + } + + /// Treat this value as a pointer and return the data pointer and + /// optional metadata as backend values. + /// + /// If you're making a place, use [`Self::deref`] instead. + pub fn pointer_parts(self) -> (V, Option<V>) { + match self { + OperandValue::Immediate(llptr) => (llptr, None), + OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)), + _ => bug!("OperandValue cannot be a pointer: {self:?}"), + } + } + + /// Treat this value as a pointer and return the place to which it points. + /// + /// The pointer immediate doesn't inherently know its alignment, + /// so you need to pass it in. If you want to get it from a type's ABI + /// alignment, then maybe you want [`OperandRef::deref`] instead. + /// + /// This is the inverse of [`PlaceValue::address`]. + pub fn deref(self, align: Align) -> PlaceValue<V> { + let (llval, llextra) = self.pointer_parts(); + PlaceValue { llval, llextra, align } + } + + pub(crate) fn is_expected_variant_for_type<'tcx, Cx: LayoutTypeMethods<'tcx>>( + &self, + cx: &Cx, + ty: TyAndLayout<'tcx>, + ) -> bool { + match self { + OperandValue::ZeroSized => ty.is_zst(), + OperandValue::Immediate(_) => cx.is_backend_immediate(ty), + OperandValue::Pair(_, _) => cx.is_backend_scalar_pair(ty), + OperandValue::Ref(_) => cx.is_backend_ref(ty), + } + } +} + /// An `OperandRef` is an "SSA" reference to a Rust value, along with /// its type. /// @@ -205,6 +272,15 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } } + /// Asserts that this operand is a pointer (or reference) and returns + /// the place to which it points. (This requires no code to be emitted + /// as we represent places using the pointer to the place.) + /// + /// This uses [`Ty::builtin_deref`] to include the type of the place and + /// assumes the place is aligned to the pointee's usual ABI alignment. + /// + /// If you don't need the type, see [`OperandValue::pointer_parts`] + /// or [`OperandValue::deref`]. pub fn deref<Cx: LayoutTypeMethods<'tcx>>(self, cx: &Cx) -> PlaceRef<'tcx, V> { if self.layout.ty.is_box() { // Derefer should have removed all Box derefs @@ -217,15 +293,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { .builtin_deref(true) .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)); - let (llptr, llextra) = match self.val { - OperandValue::Immediate(llptr) => (llptr, None), - OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)), - OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self), - OperandValue::ZeroSized => bug!("Deref of ZST operand {:?}", self), - }; let layout = cx.layout_of(projected_ty); - let val = PlaceValue { llval: llptr, llextra, align: layout.align.abi }; - PlaceRef { val, layout } + self.val.deref(layout.align.abi).with_type(layout) } /// If this operand is a `Pair`, we return an aggregate with the two values. @@ -418,8 +487,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> { if val.llextra.is_some() { bug!("cannot directly store unsized values"); } - let source_place = PlaceRef { val, layout: dest.layout }; - bx.typed_place_copy_with_flags(dest, source_place, flags); + bx.typed_place_copy_with_flags(dest.val, val, dest.layout, flags); } OperandValue::Immediate(s) => { let val = bx.from_immediate(s); diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 870a105c61d..971ac2defdc 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -10,12 +10,15 @@ use rustc_middle::mir; use rustc_middle::mir::tcx::PlaceTy; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, Ty}; -use rustc_target::abi::{Align, FieldsShape, Int, Pointer, TagEncoding}; +use rustc_target::abi::{Align, FieldsShape, Int, Pointer, Size, TagEncoding}; use rustc_target::abi::{VariantIdx, Variants}; /// The location and extra runtime properties of the place. /// /// Typically found in a [`PlaceRef`] or an [`OperandValue::Ref`]. +/// +/// As a location in memory, this has no specific type. If you want to +/// load or store it using a typed operation, use [`Self::with_type`]. #[derive(Copy, Clone, Debug)] pub struct PlaceValue<V> { /// A pointer to the contents of the place. @@ -35,6 +38,41 @@ impl<V: CodegenObject> PlaceValue<V> { pub fn new_sized(llval: V, align: Align) -> PlaceValue<V> { PlaceValue { llval, llextra: None, align } } + + /// Allocates a stack slot in the function for a value + /// of the specified size and alignment. + /// + /// The allocation itself is untyped. + pub fn alloca<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx, Value = V>>( + bx: &mut Bx, + size: Size, + align: Align, + ) -> PlaceValue<V> { + let llval = bx.alloca(size, align); + PlaceValue::new_sized(llval, align) + } + + /// Creates a `PlaceRef` to this location with the given type. + pub fn with_type<'tcx>(self, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> { + debug_assert!( + layout.is_unsized() || layout.abi.is_uninhabited() || self.llextra.is_none(), + "Had pointer metadata {:?} for sized type {layout:?}", + self.llextra, + ); + PlaceRef { val: self, layout } + } + + /// Gets the pointer to this place as an [`OperandValue::Immediate`] + /// or, for those needing metadata, an [`OperandValue::Pair`]. + /// + /// This is the inverse of [`OperandValue::deref`]. + pub fn address(self) -> OperandValue<V> { + if let Some(llextra) = self.llextra { + OperandValue::Pair(self.llval, llextra) + } else { + OperandValue::Immediate(self.llval) + } + } } #[derive(Copy, Clone, Debug)] @@ -52,9 +90,7 @@ pub struct PlaceRef<'tcx, V> { impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> { - assert!(layout.is_sized()); - let val = PlaceValue::new_sized(llval, layout.align.abi); - PlaceRef { val, layout } + PlaceRef::new_sized_aligned(llval, layout, layout.align.abi) } pub fn new_sized_aligned( @@ -63,8 +99,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { align: Align, ) -> PlaceRef<'tcx, V> { assert!(layout.is_sized()); - let val = PlaceValue::new_sized(llval, align); - PlaceRef { val, layout } + PlaceValue::new_sized(llval, align).with_type(layout) } // FIXME(eddyb) pass something else for the name so no work is done @@ -73,17 +108,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, layout: TyAndLayout<'tcx>, ) -> Self { - Self::alloca_aligned(bx, layout, layout.align.abi) - } - - pub fn alloca_aligned<Bx: BuilderMethods<'a, 'tcx, Value = V>>( - bx: &mut Bx, - layout: TyAndLayout<'tcx>, - align: Align, - ) -> Self { assert!(layout.is_sized(), "tried to statically allocate unsized place"); - let tmp = bx.alloca(layout.size, align); - Self::new_sized_aligned(tmp, layout, align) + PlaceValue::alloca(bx, layout.size, layout.align.abi).with_type(layout) } /// Returns a place for an indirect reference to an unsized place. @@ -132,18 +158,12 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { } else { bx.inbounds_ptradd(self.val.llval, bx.const_usize(offset.bytes())) }; - PlaceRef { - val: PlaceValue { + let val = PlaceValue { llval, - llextra: if bx.cx().type_has_metadata(field.ty) { - self.val.llextra - } else { - None - }, + llextra: if bx.cx().type_has_metadata(field.ty) { self.val.llextra } else { None }, align: effective_field_align, - }, - layout: field, - } + }; + val.with_type(field) }; // Simple cases, which don't need DST adjustment: @@ -198,7 +218,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { let ptr = bx.inbounds_ptradd(self.val.llval, offset); let val = PlaceValue { llval: ptr, llextra: self.val.llextra, align: effective_field_align }; - PlaceRef { val, layout: field } + val.with_type(field) } /// Obtain the actual discriminant of a value. @@ -387,18 +407,13 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { layout.size }; - PlaceRef { - val: PlaceValue { - llval: bx.inbounds_gep( + let llval = bx.inbounds_gep( bx.cx().backend_type(self.layout), self.val.llval, &[bx.cx().const_usize(0), llindex], - ), - llextra: None, - align: self.val.align.restrict_for_offset(offset), - }, - layout, - } + ); + let align = self.val.align.restrict_for_offset(offset); + PlaceValue::new_sized(llval, align).with_type(layout) } pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>( diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 1c2049f4326..ff176a79675 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -8,14 +8,16 @@ use crate::traits::*; use crate::MemFlags; use rustc_hir as hir; -use rustc_middle::mir::{self, AggregateKind, Operand}; +use rustc_middle::mir; use rustc_middle::ty::cast::{CastTy, IntTy}; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, adjustment::PointerCoercion, Instance, Ty, TyCtxt}; use rustc_middle::{bug, span_bug}; use rustc_session::config::OptLevel; use rustc_span::{Span, DUMMY_SP}; -use rustc_target::abi::{self, FIRST_VARIANT}; +use rustc_target::abi::{self, FieldIdx, FIRST_VARIANT}; + +use arrayvec::ArrayVec; impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { #[instrument(level = "trace", skip(self, bx))] @@ -72,8 +74,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if val.llextra.is_some() { bug!("unsized coercion on an unsized rvalue"); } - let source = PlaceRef { val, layout: operand.layout }; - base::coerce_unsized_into(bx, source, dest); + base::coerce_unsized_into(bx, val.with_type(operand.layout), dest); } OperandValue::ZeroSized => { bug!("unsized coercion on a ZST rvalue"); @@ -120,7 +121,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.write_operand_repeatedly(cg_elem, count, dest); } - mir::Rvalue::Aggregate(ref kind, ref operands) => { + // This implementation does field projection, so never use it for `RawPtr`, + // which will always be fine with the `codegen_rvalue_operand` path below. + mir::Rvalue::Aggregate(ref kind, ref operands) + if !matches!(**kind, mir::AggregateKind::RawPtr(..)) => + { let (variant_index, variant_dest, active_field_index) = match **kind { mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => { let variant_dest = dest.project_downcast(bx, variant_index); @@ -182,10 +187,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Immediate(..) | OperandValue::Pair(..) => { // When we have immediate(s), the alignment of the source is irrelevant, // so we can store them using the destination's alignment. - src.val.store( - bx, - PlaceRef::new_sized_aligned(dst.val.llval, src.layout, dst.val.align), - ); + src.val.store(bx, dst.val.with_type(src.layout)); } } } @@ -223,8 +225,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Ref(source_place_val) => { debug_assert_eq!(source_place_val.llextra, None); debug_assert!(matches!(operand_kind, OperandValueKind::Ref)); - let fake_place = PlaceRef { val: source_place_val, layout: cast }; - Some(bx.load_operand(fake_place).val) + Some(bx.load_operand(source_place_val.with_type(cast)).val) } OperandValue::ZeroSized => { let OperandValueKind::ZeroSized = operand_kind else { @@ -450,23 +451,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } mir::CastKind::PointerCoercion(PointerCoercion::Unsize) => { assert!(bx.cx().is_backend_scalar_pair(cast)); - let (lldata, llextra) = match operand.val { - OperandValue::Pair(lldata, llextra) => { - // unsize from a fat pointer -- this is a - // "trait-object-to-supertrait" coercion. - (lldata, Some(llextra)) - } - OperandValue::Immediate(lldata) => { - // "standard" unsize - (lldata, None) - } - OperandValue::Ref(..) => { - bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand); - } - OperandValue::ZeroSized => { - bug!("zero-sized operand {:?} in `codegen_rvalue_operand`", operand); - } - }; + let (lldata, llextra) = operand.val.pointer_parts(); let (lldata, llextra) = base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra); OperandValue::Pair(lldata, llextra) @@ -487,12 +472,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } mir::CastKind::DynStar => { - let (lldata, llextra) = match operand.val { - OperandValue::Ref(..) => todo!(), - OperandValue::Immediate(v) => (v, None), - OperandValue::Pair(v, l) => (v, Some(l)), - OperandValue::ZeroSized => bug!("ZST -- which is not PointerLike -- in DynStar"), - }; + let (lldata, llextra) = operand.val.pointer_parts(); let (lldata, llextra) = base::cast_to_dyn_star(bx, lldata, operand.layout, cast.ty, llextra); OperandValue::Pair(lldata, llextra) @@ -579,7 +559,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.codegen_place_to_pointer(bx, place, mk_ref) } - mir::Rvalue::CopyForDeref(place) => self.codegen_operand(bx, &Operand::Copy(place)), + mir::Rvalue::CopyForDeref(place) => { + self.codegen_operand(bx, &mir::Operand::Copy(place)) + } mir::Rvalue::AddressOf(mutability, place) => { let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| Ty::new_ptr(tcx, ty, mutability); @@ -718,29 +700,45 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandRef { val: OperandValue::Immediate(static_), layout } } mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand), - mir::Rvalue::Aggregate(box mir::AggregateKind::RawPtr(..), ref fields) => { + mir::Rvalue::Repeat(..) => bug!("{rvalue:?} in codegen_rvalue_operand"), + mir::Rvalue::Aggregate(_, ref fields) => { let ty = rvalue.ty(self.mir, self.cx.tcx()); - let layout = self.cx.layout_of(self.monomorphize(ty)); - let [data, meta] = &*fields.raw else { - bug!("RawPtr fields: {fields:?}"); - }; - let data = self.codegen_operand(bx, data); - let meta = self.codegen_operand(bx, meta); - match (data.val, meta.val) { - (p @ OperandValue::Immediate(_), OperandValue::ZeroSized) => { - OperandRef { val: p, layout } - } - (OperandValue::Immediate(p), OperandValue::Immediate(m)) => { - OperandRef { val: OperandValue::Pair(p, m), layout } - } - _ => bug!("RawPtr operands {data:?} {meta:?}"), + let ty = self.monomorphize(ty); + let layout = self.cx.layout_of(ty); + + // `rvalue_creates_operand` has arranged that we only get here if + // we can build the aggregate immediate from the field immediates. + let mut inputs = ArrayVec::<Bx::Value, 2>::new(); + let mut input_scalars = ArrayVec::<abi::Scalar, 2>::new(); + for field_idx in layout.fields.index_by_increasing_offset() { + let field_idx = FieldIdx::from_usize(field_idx); + let op = self.codegen_operand(bx, &fields[field_idx]); + let values = op.val.immediates_or_place().left_or_else(|p| { + bug!("Field {field_idx:?} is {p:?} making {layout:?}"); + }); + inputs.extend(values); + let scalars = self.value_kind(op.layout).scalars().unwrap(); + input_scalars.extend(scalars); } - } - mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => { - // According to `rvalue_creates_operand`, only ZST - // aggregate rvalues are allowed to be operands. - let ty = rvalue.ty(self.mir, self.cx.tcx()); - OperandRef::zero_sized(self.cx.layout_of(self.monomorphize(ty))) + + let output_scalars = self.value_kind(layout).scalars().unwrap(); + itertools::izip!(&mut inputs, input_scalars, output_scalars).for_each( + |(v, in_s, out_s)| { + if in_s != out_s { + // We have to be really careful about bool here, because + // `(bool,)` stays i1 but `Cell<bool>` becomes i8. + *v = bx.from_immediate(*v); + *v = bx.to_immediate_scalar(*v, out_s); + } + }, + ); + + let val = OperandValue::from_immediates(inputs); + debug_assert!( + val.is_expected_variant_for_type(self.cx, layout), + "Made wrong variant {val:?} for type {layout:?}", + ); + OperandRef { val, layout } } mir::Rvalue::ShallowInitBox(ref operand, content_ty) => { let operand = self.codegen_operand(bx, operand); @@ -778,16 +776,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>, ) -> OperandRef<'tcx, Bx::Value> { let cg_place = self.codegen_place(bx, place.as_ref()); + let val = cg_place.val.address(); let ty = cg_place.layout.ty; + debug_assert!( + if bx.cx().type_has_metadata(ty) { + matches!(val, OperandValue::Pair(..)) + } else { + matches!(val, OperandValue::Immediate(..)) + }, + "Address of place was unexpectedly {val:?} for pointee type {ty:?}", + ); - // Note: places are indirect, so storing the `llval` into the - // destination effectively creates a reference. - let val = if !bx.cx().type_has_metadata(ty) { - OperandValue::Immediate(cg_place.val.llval) - } else { - OperandValue::Pair(cg_place.val.llval, cg_place.val.llextra.unwrap()) - }; OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) } } @@ -1047,14 +1047,29 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::ThreadLocalRef(_) | mir::Rvalue::Use(..) => // (*) true, - // This always produces a `ty::RawPtr`, so will be Immediate or Pair - mir::Rvalue::Aggregate(box AggregateKind::RawPtr(..), ..) => true, - mir::Rvalue::Repeat(..) | - mir::Rvalue::Aggregate(..) => { + // Arrays are always aggregates, so it's not worth checking anything here. + // (If it's really `[(); N]` or `[T; 0]` and we use the place path, fine.) + mir::Rvalue::Repeat(..) => false, + mir::Rvalue::Aggregate(ref kind, _) => { + let allowed_kind = match **kind { + // This always produces a `ty::RawPtr`, so will be Immediate or Pair + mir::AggregateKind::RawPtr(..) => true, + mir::AggregateKind::Array(..) => false, + mir::AggregateKind::Tuple => true, + mir::AggregateKind::Adt(def_id, ..) => { + let adt_def = self.cx.tcx().adt_def(def_id); + adt_def.is_struct() && !adt_def.repr().simd() + } + mir::AggregateKind::Closure(..) => true, + // FIXME: Can we do this for simple coroutines too? + mir::AggregateKind::Coroutine(..) | mir::AggregateKind::CoroutineClosure(..) => false, + }; + allowed_kind && { let ty = rvalue.ty(self.mir, self.cx.tcx()); let ty = self.monomorphize(ty); - // For ZST this can be `OperandValueKind::ZeroSized`. - self.cx.spanned_layout_of(ty, span).is_zst() + let layout = self.cx.spanned_layout_of(ty, span); + !self.cx.is_backend_ref(layout) + } } } @@ -1096,3 +1111,14 @@ enum OperandValueKind { Pair(abi::Scalar, abi::Scalar), ZeroSized, } + +impl OperandValueKind { + fn scalars(self) -> Option<ArrayVec<abi::Scalar, 2>> { + Some(match self { + OperandValueKind::ZeroSized => ArrayVec::new(), + OperandValueKind::Immediate(a) => ArrayVec::from_iter([a]), + OperandValueKind::Pair(a, b) => [a, b].into(), + OperandValueKind::Ref => return None, + }) + } +} diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index 51b22bfaf25..9fd6eb8edab 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -186,6 +186,15 @@ pub trait BuilderMethods<'a, 'tcx>: align: Align, flags: MemFlags, ) -> Self::Value; + fn store_to_place_with_flags( + &mut self, + val: Self::Value, + place: PlaceValue<Self::Value>, + flags: MemFlags, + ) -> Self::Value { + debug_assert_eq!(place.llextra, None); + self.store_with_flags(val, place.llval, place.align, flags) + } fn atomic_store( &mut self, val: Self::Value, @@ -238,7 +247,10 @@ pub trait BuilderMethods<'a, 'tcx>: } else { (in_ty, dest_ty) }; - assert!(matches!(self.cx().type_kind(float_ty), TypeKind::Float | TypeKind::Double)); + assert!(matches!( + self.cx().type_kind(float_ty), + TypeKind::Half | TypeKind::Float | TypeKind::Double | TypeKind::FP128 + )); assert_eq!(self.cx().type_kind(int_ty), TypeKind::Integer); if let Some(false) = self.cx().sess().opts.unstable_opts.saturating_float_casts { @@ -286,35 +298,36 @@ pub trait BuilderMethods<'a, 'tcx>: /// (For example, typed load-stores with alias metadata.) fn typed_place_copy( &mut self, - dst: PlaceRef<'tcx, Self::Value>, - src: PlaceRef<'tcx, Self::Value>, + dst: PlaceValue<Self::Value>, + src: PlaceValue<Self::Value>, + layout: TyAndLayout<'tcx>, ) { - self.typed_place_copy_with_flags(dst, src, MemFlags::empty()); + self.typed_place_copy_with_flags(dst, src, layout, MemFlags::empty()); } fn typed_place_copy_with_flags( &mut self, - dst: PlaceRef<'tcx, Self::Value>, - src: PlaceRef<'tcx, Self::Value>, + dst: PlaceValue<Self::Value>, + src: PlaceValue<Self::Value>, + layout: TyAndLayout<'tcx>, flags: MemFlags, ) { - debug_assert!(src.val.llextra.is_none(), "cannot directly copy from unsized values"); - debug_assert!(dst.val.llextra.is_none(), "cannot directly copy into unsized values"); - debug_assert_eq!(dst.layout.size, src.layout.size); + debug_assert!(layout.is_sized(), "cannot typed-copy an unsigned type"); + debug_assert!(src.llextra.is_none(), "cannot directly copy from unsized values"); + debug_assert!(dst.llextra.is_none(), "cannot directly copy into unsized values"); if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. - let ty = self.backend_type(dst.layout); - let val = self.load_from_place(ty, src.val); - self.store_with_flags(val, dst.val.llval, dst.val.align, flags); - } else if self.sess().opts.optimize == OptLevel::No && self.is_backend_immediate(dst.layout) - { + let ty = self.backend_type(layout); + let val = self.load_from_place(ty, src); + self.store_to_place_with_flags(val, dst, flags); + } else if self.sess().opts.optimize == OptLevel::No && self.is_backend_immediate(layout) { // If we're not optimizing, the aliasing information from `memcpy` // isn't useful, so just load-store the value for smaller code. - let temp = self.load_operand(src); - temp.val.store_with_flags(self, dst, flags); - } else if !dst.layout.is_zst() { - let bytes = self.const_usize(dst.layout.size.bytes()); - self.memcpy(dst.val.llval, dst.val.align, src.val.llval, src.val.align, bytes, flags); + let temp = self.load_operand(src.with_type(layout)); + temp.val.store_with_flags(self, dst.with_type(layout), flags); + } else if !layout.is_zst() { + let bytes = self.const_usize(layout.size.bytes()); + self.memcpy(dst.llval, dst.align, src.llval, src.align, bytes, flags); } } @@ -327,18 +340,19 @@ pub trait BuilderMethods<'a, 'tcx>: /// cases (in non-debug), preferring the fallback body instead. fn typed_place_swap( &mut self, - left: PlaceRef<'tcx, Self::Value>, - right: PlaceRef<'tcx, Self::Value>, + left: PlaceValue<Self::Value>, + right: PlaceValue<Self::Value>, + layout: TyAndLayout<'tcx>, ) { - let mut temp = self.load_operand(left); + let mut temp = self.load_operand(left.with_type(layout)); if let OperandValue::Ref(..) = temp.val { // The SSA value isn't stand-alone, so we need to copy it elsewhere - let alloca = PlaceRef::alloca(self, left.layout); - self.typed_place_copy(alloca, left); + let alloca = PlaceRef::alloca(self, layout); + self.typed_place_copy(alloca.val, left, layout); temp = self.load_operand(alloca); } - self.typed_place_copy(left, right); - temp.val.store(self, right); + self.typed_place_copy(left, right, layout); + temp.val.store(self, right.with_type(layout)); } fn select( |
