diff options
Diffstat (limited to 'compiler/rustc_codegen_ssa')
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/codegen_attrs.rs | 9 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/analyze.rs | 118 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/operand.rs | 37 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 150 |
4 files changed, 145 insertions, 169 deletions
diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs index 5a6a76d83cb..e187331c696 100644 --- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs +++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs @@ -509,15 +509,6 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { err.emit(); } - // Any linkage to LLVM intrinsics for now forcibly marks them all as never - // unwinds since LLVM sometimes can't handle codegen which `invoke`s - // intrinsic functions. - if let Some(name) = &codegen_fn_attrs.link_name - && name.as_str().starts_with("llvm.") - { - codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND; - } - if let Some(features) = check_tied_features( tcx.sess, &codegen_fn_attrs diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs index 6d6465dd798..c2c023af090 100644 --- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs +++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs @@ -1,12 +1,13 @@ //! An analysis to determine which locals require allocas and //! which do not. +use rustc_abi as abi; use rustc_data_structures::graph::dominators::Dominators; use rustc_index::bit_set::DenseBitSet; use rustc_index::{IndexSlice, IndexVec}; use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor}; use rustc_middle::mir::{self, DefLocation, Location, TerminatorKind, traversal}; -use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; +use rustc_middle::ty::layout::LayoutOf; use rustc_middle::{bug, span_bug}; use tracing::debug; @@ -99,63 +100,75 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'b, 'tcx>> LocalAnalyzer<'a, 'b, 'tcx, Bx> context: PlaceContext, location: Location, ) { - let cx = self.fx.cx; - - if let Some((place_base, elem)) = place_ref.last_projection() { - let mut base_context = if context.is_mutating_use() { - PlaceContext::MutatingUse(MutatingUseContext::Projection) - } else { - PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection) - }; - - // Allow uses of projections that are ZSTs or from scalar fields. - let is_consume = matches!( - context, - PlaceContext::NonMutatingUse( - NonMutatingUseContext::Copy | NonMutatingUseContext::Move, - ) - ); - if is_consume { - let base_ty = place_base.ty(self.fx.mir, cx.tcx()); - let base_ty = self.fx.monomorphize(base_ty); - - // ZSTs don't require any actual memory access. - let elem_ty = base_ty.projection_ty(cx.tcx(), self.fx.monomorphize(elem)).ty; - let span = self.fx.mir.local_decls[place_ref.local].source_info.span; - if cx.spanned_layout_of(elem_ty, span).is_zst() { - return; + if !place_ref.projection.is_empty() { + const COPY_CONTEXT: PlaceContext = + PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy); + + // `PlaceElem::Index` is the only variant that can mention other `Local`s, + // so check for those up-front before any potential short-circuits. + for elem in place_ref.projection { + if let mir::PlaceElem::Index(index_local) = *elem { + self.visit_local(index_local, COPY_CONTEXT, location); } + } - if let mir::ProjectionElem::Field(..) = elem { - let layout = cx.spanned_layout_of(base_ty.ty, span); - if cx.is_backend_immediate(layout) || cx.is_backend_scalar_pair(layout) { - // Recurse with the same context, instead of `Projection`, - // potentially stopping at non-operand projections, - // which would trigger `not_ssa` on locals. - base_context = context; - } - } + // If our local is already memory, nothing can make it *more* memory + // so we don't need to bother checking the projections further. + if self.locals[place_ref.local] == LocalKind::Memory { + return; } - if let mir::ProjectionElem::Deref = elem { - // Deref projections typically only read the pointer. - base_context = PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy); + if place_ref.is_indirect_first_projection() { + // If this starts with a `Deref`, we only need to record a read of the + // pointer being dereferenced, as all the subsequent projections are + // working on a place which is always supported. (And because we're + // looking at codegen MIR, it can only happen as the first projection.) + self.visit_local(place_ref.local, COPY_CONTEXT, location); + return; } - self.process_place(&place_base, base_context, location); - // HACK(eddyb) this emulates the old `visit_projection_elem`, this - // entire `visit_place`-like `process_place` method should be rewritten, - // now that we have moved to the "slice of projections" representation. - if let mir::ProjectionElem::Index(local) = elem { - self.visit_local( - local, - PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy), - location, - ); + if context.is_mutating_use() { + // If it's a mutating use it doesn't matter what the projections are, + // if there are *any* then we need a place to write. (For example, + // `_1 = Foo()` works in SSA but `_2.0 = Foo()` does not.) + let mut_projection = PlaceContext::MutatingUse(MutatingUseContext::Projection); + self.visit_local(place_ref.local, mut_projection, location); + return; } - } else { - self.visit_local(place_ref.local, context, location); + + // Scan through to ensure the only projections are those which + // `FunctionCx::maybe_codegen_consume_direct` can handle. + let base_ty = self.fx.monomorphized_place_ty(mir::PlaceRef::from(place_ref.local)); + let mut layout = self.fx.cx.layout_of(base_ty); + for elem in place_ref.projection { + layout = match *elem { + mir::PlaceElem::Field(fidx, ..) => layout.field(self.fx.cx, fidx.as_usize()), + mir::PlaceElem::Downcast(_, vidx) + if let abi::Variants::Single { index: single_variant } = + layout.variants + && vidx == single_variant => + { + layout.for_variant(self.fx.cx, vidx) + } + mir::PlaceElem::Subtype(subtype_ty) => { + let subtype_ty = self.fx.monomorphize(subtype_ty); + self.fx.cx.layout_of(subtype_ty) + } + _ => { + self.locals[place_ref.local] = LocalKind::Memory; + return; + } + } + } + debug_assert!( + !self.fx.cx.is_backend_ref(layout), + "Post-projection {place_ref:?} layout should be non-Ref, but it's {layout:?}", + ); } + + // Even with supported projections, we still need to have `visit_local` + // check for things that can't be done in SSA (like `SharedBorrow`). + self.visit_local(place_ref.local, context, location); } } @@ -170,11 +183,6 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'b, 'tcx>> Visitor<'tcx> for LocalAnalyzer if let Some(local) = place.as_local() { self.define(local, DefLocation::Assignment(location)); - if self.locals[local] != LocalKind::Memory { - if !self.fx.rvalue_creates_operand(rvalue) { - self.locals[local] = LocalKind::Memory; - } - } } else { self.visit_place(place, PlaceContext::MutatingUse(MutatingUseContext::Store), location); } diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 8e308aac769..5459f95c186 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -335,13 +335,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let val = if field.is_zst() { OperandValue::ZeroSized - } else if let BackendRepr::SimdVector { .. } = self.layout.backend_repr { - // codegen_transmute_operand doesn't support SIMD, but since the previous - // check handled ZSTs, the only possible field access into something SIMD - // is to the `non_1zst_field` that's the same SIMD. (Other things, even - // just padding, would change the wrapper's representation type.) - assert_eq!(field.size, self.layout.size); - self.val } else if field.size == self.layout.size { assert_eq!(offset.bytes(), 0); fx.codegen_transmute_operand(bx, *self, field) @@ -928,9 +921,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match self.locals[place_ref.local] { LocalRef::Operand(mut o) => { - // Moves out of scalar and scalar pair fields are trivial. - for elem in place_ref.projection.iter() { - match elem { + // We only need to handle the projections that + // `LocalAnalyzer::process_place` let make it here. + for elem in place_ref.projection { + match *elem { mir::ProjectionElem::Field(f, _) => { assert!( !o.layout.ty.is_any_ptr(), @@ -939,17 +933,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ); o = o.extract_field(self, bx, f.index()); } - mir::ProjectionElem::Index(_) - | mir::ProjectionElem::ConstantIndex { .. } => { - // ZSTs don't require any actual memory access. - // FIXME(eddyb) deduplicate this with the identical - // checks in `codegen_consume` and `extract_field`. - let elem = o.layout.field(bx.cx(), 0); - if elem.is_zst() { - o = OperandRef::zero_sized(elem); - } else { - return None; - } + mir::PlaceElem::Downcast(_, vidx) => { + debug_assert_eq!( + o.layout.variants, + abi::Variants::Single { index: vidx }, + ); + let layout = o.layout.for_variant(bx.cx(), vidx); + o = OperandRef { val: o.val, layout } + } + mir::PlaceElem::Subtype(subtype_ty) => { + let subtype_ty = self.monomorphize(subtype_ty); + let layout = self.cx.layout_of(subtype_ty); + o = OperandRef { val: o.val, layout } } _ => return None, } diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index a5759b79be4..8a67b8d6e5f 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -2,12 +2,12 @@ use rustc_abi::{self as abi, FIRST_VARIANT}; use rustc_middle::ty::adjustment::PointerCoercion; use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; -use rustc_middle::{bug, mir}; +use rustc_middle::{bug, mir, span_bug}; use rustc_session::config::OptLevel; use tracing::{debug, instrument}; use super::operand::{OperandRef, OperandRefBuilder, OperandValue}; -use super::place::{PlaceRef, codegen_tag_value}; +use super::place::{PlaceRef, PlaceValue, codegen_tag_value}; use super::{FunctionCx, LocalRef}; use crate::common::{IntPredicate, TypeKind}; use crate::traits::*; @@ -180,7 +180,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } _ => { - assert!(self.rvalue_creates_operand(rvalue)); let temp = self.codegen_rvalue_operand(bx, rvalue); temp.val.store(bx, dest); } @@ -218,17 +217,26 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { /// Transmutes an `OperandValue` to another `OperandValue`. /// - /// This is supported only for cases where [`Self::rvalue_creates_operand`] - /// returns `true`, and will ICE otherwise. (In particular, anything that - /// would need to `alloca` in order to return a `PlaceValue` will ICE, - /// expecting those to go via [`Self::codegen_transmute`] instead where - /// the destination place is already allocated.) + /// This is supported for all cases where the `cast` type is SSA, + /// but for non-ZSTs with [`abi::BackendRepr::Memory`] it ICEs. pub(crate) fn codegen_transmute_operand( &mut self, bx: &mut Bx, operand: OperandRef<'tcx, Bx::Value>, cast: TyAndLayout<'tcx>, ) -> OperandValue<Bx::Value> { + if let abi::BackendRepr::Memory { .. } = cast.backend_repr + && !cast.is_zst() + { + span_bug!(self.mir.span, "Use `codegen_transmute` to transmute to {cast:?}"); + } + + // `Layout` is interned, so we can do a cheap check for things that are + // exactly the same and thus don't need any handling. + if abi::Layout::eq(&operand.layout.layout, &cast.layout) { + return operand.val; + } + // Check for transmutes that are always UB. if operand.layout.size != cast.size || operand.layout.is_uninhabited() @@ -241,11 +249,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return OperandValue::poison(bx, cast); } + // To or from pointers takes different methods, so we use this to restrict + // the SimdVector case to types which can be `bitcast` between each other. + #[inline] + fn vector_can_bitcast(x: abi::Scalar) -> bool { + matches!( + x, + abi::Scalar::Initialized { + value: abi::Primitive::Int(..) | abi::Primitive::Float(..), + .. + } + ) + } + + let cx = bx.cx(); match (operand.val, operand.layout.backend_repr, cast.backend_repr) { _ if cast.is_zst() => OperandValue::ZeroSized, - (_, _, abi::BackendRepr::Memory { .. }) => { - bug!("Cannot `codegen_transmute_operand` to non-ZST memory-ABI output {cast:?}"); - } (OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => { assert_eq!(source_place_val.llextra, None); // The existing alignment is part of `source_place_val`, @@ -256,16 +275,46 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Immediate(imm), abi::BackendRepr::Scalar(from_scalar), abi::BackendRepr::Scalar(to_scalar), - ) => OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar)), + ) if from_scalar.size(cx) == to_scalar.size(cx) => { + OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar)) + } + ( + OperandValue::Immediate(imm), + abi::BackendRepr::SimdVector { element: from_scalar, .. }, + abi::BackendRepr::SimdVector { element: to_scalar, .. }, + ) if vector_can_bitcast(from_scalar) && vector_can_bitcast(to_scalar) => { + let to_backend_ty = bx.cx().immediate_backend_type(cast); + OperandValue::Immediate(bx.bitcast(imm, to_backend_ty)) + } ( OperandValue::Pair(imm_a, imm_b), abi::BackendRepr::ScalarPair(in_a, in_b), abi::BackendRepr::ScalarPair(out_a, out_b), - ) => OperandValue::Pair( - transmute_scalar(bx, imm_a, in_a, out_a), - transmute_scalar(bx, imm_b, in_b, out_b), - ), - _ => bug!("Cannot `codegen_transmute_operand` {operand:?} to {cast:?}"), + ) if in_a.size(cx) == out_a.size(cx) && in_b.size(cx) == out_b.size(cx) => { + OperandValue::Pair( + transmute_scalar(bx, imm_a, in_a, out_a), + transmute_scalar(bx, imm_b, in_b, out_b), + ) + } + _ => { + // For any other potentially-tricky cases, make a temporary instead. + // If anything else wants the target local to be in memory this won't + // be hit, as `codegen_transmute` will get called directly. Thus this + // is only for places where everything else wants the operand form, + // and thus it's not worth making those places get it from memory. + // + // Notably, Scalar ⇌ ScalarPair cases go here to avoid padding + // and endianness issues, as do SimdVector ones to avoid worrying + // about things like f32x8 ⇌ ptrx4 that would need multiple steps. + let align = Ord::max(operand.layout.align.abi, cast.align.abi); + let size = Ord::max(operand.layout.size, cast.size); + let temp = PlaceValue::alloca(bx, size, align); + bx.lifetime_start(temp.llval, size); + operand.val.store(bx, temp.with_type(operand.layout)); + let val = bx.load_operand(temp.with_type(cast)).val; + bx.lifetime_end(temp.llval, size); + val + } } } @@ -326,8 +375,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx: &mut Bx, rvalue: &mir::Rvalue<'tcx>, ) -> OperandRef<'tcx, Bx::Value> { - assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {rvalue:?} to operand",); - match *rvalue { mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { let operand = self.codegen_operand(bx, source); @@ -653,8 +700,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let ty = self.monomorphize(ty); let layout = self.cx.layout_of(ty); - // `rvalue_creates_operand` has arranged that we only get here if - // we can build the aggregate immediate from the field immediates. let mut builder = OperandRefBuilder::new(layout); for (field_idx, field) in fields.iter_enumerated() { let op = self.codegen_operand(bx, field); @@ -955,69 +1000,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Pair(val, of) } - - /// Returns `true` if the `rvalue` can be computed into an [`OperandRef`], - /// rather than needing a full `PlaceRef` for the assignment destination. - /// - /// This is used by the [`super::analyze`] code to decide which MIR locals - /// can stay as SSA values (as opposed to generating `alloca` slots for them). - /// As such, some paths here return `true` even where the specific rvalue - /// will not actually take the operand path because the result type is such - /// that it always gets an `alloca`, but where it's not worth re-checking the - /// layout in this code when the right thing will happen anyway. - pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool { - match *rvalue { - mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => { - let operand_ty = operand.ty(self.mir, self.cx.tcx()); - let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty)); - let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty)); - match (operand_layout.backend_repr, cast_layout.backend_repr) { - // When the output will be in memory anyway, just use its place - // (instead of the operand path) unless it's the trivial ZST case. - (_, abi::BackendRepr::Memory { .. }) => cast_layout.is_zst(), - - // Otherwise (for a non-memory output) if the input is memory - // then we can just read the value from the place. - (abi::BackendRepr::Memory { .. }, _) => true, - - // When we have scalar immediates, we can only convert things - // where the sizes match, to avoid endianness questions. - (abi::BackendRepr::Scalar(a), abi::BackendRepr::Scalar(b)) => - a.size(self.cx) == b.size(self.cx), - (abi::BackendRepr::ScalarPair(a0, a1), abi::BackendRepr::ScalarPair(b0, b1)) => - a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx), - - // Mixing Scalars and ScalarPairs can get quite complicated when - // padding and undef get involved, so leave that to the memory path. - (abi::BackendRepr::Scalar(_), abi::BackendRepr::ScalarPair(_, _)) | - (abi::BackendRepr::ScalarPair(_, _), abi::BackendRepr::Scalar(_)) => false, - - // SIMD vectors aren't worth the trouble of dealing with complex - // cases like from vectors of f32 to vectors of pointers or - // from fat pointers to vectors of u16. (See #143194 #110021 ...) - (abi::BackendRepr::SimdVector { .. }, _) | (_, abi::BackendRepr::SimdVector { .. }) => false, - } - } - mir::Rvalue::Ref(..) | - mir::Rvalue::CopyForDeref(..) | - mir::Rvalue::RawPtr(..) | - mir::Rvalue::Len(..) | - mir::Rvalue::Cast(..) | // (*) - mir::Rvalue::ShallowInitBox(..) | // (*) - mir::Rvalue::BinaryOp(..) | - mir::Rvalue::UnaryOp(..) | - mir::Rvalue::Discriminant(..) | - mir::Rvalue::NullaryOp(..) | - mir::Rvalue::ThreadLocalRef(_) | - mir::Rvalue::Use(..) | - mir::Rvalue::Repeat(..) | // (*) - mir::Rvalue::Aggregate(..) | // (*) - mir::Rvalue::WrapUnsafeBinder(..) => // (*) - true, - } - - // (*) this is only true if the type is suitable - } } /// Transmutes a single scalar value `imm` from `from_scalar` to `to_scalar`. |
