diff options
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/mir/rvalue.rs')
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 178 |
1 files changed, 90 insertions, 88 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 610e2fd2311..8a67b8d6e5f 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -2,12 +2,12 @@ use rustc_abi::{self as abi, FIRST_VARIANT}; use rustc_middle::ty::adjustment::PointerCoercion; use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; -use rustc_middle::{bug, mir}; +use rustc_middle::{bug, mir, span_bug}; use rustc_session::config::OptLevel; use tracing::{debug, instrument}; use super::operand::{OperandRef, OperandRefBuilder, OperandValue}; -use super::place::{PlaceRef, codegen_tag_value}; +use super::place::{PlaceRef, PlaceValue, codegen_tag_value}; use super::{FunctionCx, LocalRef}; use crate::common::{IntPredicate, TypeKind}; use crate::traits::*; @@ -180,7 +180,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } _ => { - assert!(self.rvalue_creates_operand(rvalue)); let temp = self.codegen_rvalue_operand(bx, rvalue); temp.val.store(bx, dest); } @@ -218,17 +217,26 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { /// Transmutes an `OperandValue` to another `OperandValue`. /// - /// This is supported only for cases where [`Self::rvalue_creates_operand`] - /// returns `true`, and will ICE otherwise. (In particular, anything that - /// would need to `alloca` in order to return a `PlaceValue` will ICE, - /// expecting those to go via [`Self::codegen_transmute`] instead where - /// the destination place is already allocated.) + /// This is supported for all cases where the `cast` type is SSA, + /// but for non-ZSTs with [`abi::BackendRepr::Memory`] it ICEs. pub(crate) fn codegen_transmute_operand( &mut self, bx: &mut Bx, operand: OperandRef<'tcx, Bx::Value>, cast: TyAndLayout<'tcx>, ) -> OperandValue<Bx::Value> { + if let abi::BackendRepr::Memory { .. } = cast.backend_repr + && !cast.is_zst() + { + span_bug!(self.mir.span, "Use `codegen_transmute` to transmute to {cast:?}"); + } + + // `Layout` is interned, so we can do a cheap check for things that are + // exactly the same and thus don't need any handling. + if abi::Layout::eq(&operand.layout.layout, &cast.layout) { + return operand.val; + } + // Check for transmutes that are always UB. if operand.layout.size != cast.size || operand.layout.is_uninhabited() @@ -241,11 +249,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return OperandValue::poison(bx, cast); } + // To or from pointers takes different methods, so we use this to restrict + // the SimdVector case to types which can be `bitcast` between each other. + #[inline] + fn vector_can_bitcast(x: abi::Scalar) -> bool { + matches!( + x, + abi::Scalar::Initialized { + value: abi::Primitive::Int(..) | abi::Primitive::Float(..), + .. + } + ) + } + + let cx = bx.cx(); match (operand.val, operand.layout.backend_repr, cast.backend_repr) { _ if cast.is_zst() => OperandValue::ZeroSized, - (_, _, abi::BackendRepr::Memory { .. }) => { - bug!("Cannot `codegen_transmute_operand` to non-ZST memory-ABI output {cast:?}"); - } (OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => { assert_eq!(source_place_val.llextra, None); // The existing alignment is part of `source_place_val`, @@ -256,16 +275,46 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Immediate(imm), abi::BackendRepr::Scalar(from_scalar), abi::BackendRepr::Scalar(to_scalar), - ) => OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar)), + ) if from_scalar.size(cx) == to_scalar.size(cx) => { + OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar)) + } + ( + OperandValue::Immediate(imm), + abi::BackendRepr::SimdVector { element: from_scalar, .. }, + abi::BackendRepr::SimdVector { element: to_scalar, .. }, + ) if vector_can_bitcast(from_scalar) && vector_can_bitcast(to_scalar) => { + let to_backend_ty = bx.cx().immediate_backend_type(cast); + OperandValue::Immediate(bx.bitcast(imm, to_backend_ty)) + } ( OperandValue::Pair(imm_a, imm_b), abi::BackendRepr::ScalarPair(in_a, in_b), abi::BackendRepr::ScalarPair(out_a, out_b), - ) => OperandValue::Pair( - transmute_scalar(bx, imm_a, in_a, out_a), - transmute_scalar(bx, imm_b, in_b, out_b), - ), - _ => bug!("Cannot `codegen_transmute_operand` {operand:?} to {cast:?}"), + ) if in_a.size(cx) == out_a.size(cx) && in_b.size(cx) == out_b.size(cx) => { + OperandValue::Pair( + transmute_scalar(bx, imm_a, in_a, out_a), + transmute_scalar(bx, imm_b, in_b, out_b), + ) + } + _ => { + // For any other potentially-tricky cases, make a temporary instead. + // If anything else wants the target local to be in memory this won't + // be hit, as `codegen_transmute` will get called directly. Thus this + // is only for places where everything else wants the operand form, + // and thus it's not worth making those places get it from memory. + // + // Notably, Scalar ⇌ ScalarPair cases go here to avoid padding + // and endianness issues, as do SimdVector ones to avoid worrying + // about things like f32x8 ⇌ ptrx4 that would need multiple steps. + let align = Ord::max(operand.layout.align.abi, cast.align.abi); + let size = Ord::max(operand.layout.size, cast.size); + let temp = PlaceValue::alloca(bx, size, align); + bx.lifetime_start(temp.llval, size); + operand.val.store(bx, temp.with_type(operand.layout)); + let val = bx.load_operand(temp.with_type(cast)).val; + bx.lifetime_end(temp.llval, size); + val + } } } @@ -288,7 +337,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // valid ranges. For example, `char`s are passed as just `i32`, with no // way for LLVM to know that they're 0x10FFFF at most. Thus we assume // the range of the input value too, not just the output range. - assume_scalar_range(bx, imm, from_scalar, from_backend_ty); + assume_scalar_range(bx, imm, from_scalar, from_backend_ty, None); imm = match (from_scalar.primitive(), to_scalar.primitive()) { (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed), @@ -326,8 +375,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx: &mut Bx, rvalue: &mir::Rvalue<'tcx>, ) -> OperandRef<'tcx, Bx::Value> { - assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {rvalue:?} to operand",); - match *rvalue { mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { let operand = self.codegen_operand(bx, source); @@ -653,8 +700,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let ty = self.monomorphize(ty); let layout = self.cx.layout_of(ty); - // `rvalue_creates_operand` has arranged that we only get here if - // we can build the aggregate immediate from the field immediates. let mut builder = OperandRefBuilder::new(layout); for (field_idx, field) in fields.iter_enumerated() { let op = self.codegen_operand(bx, field); @@ -955,69 +1000,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Pair(val, of) } - - /// Returns `true` if the `rvalue` can be computed into an [`OperandRef`], - /// rather than needing a full `PlaceRef` for the assignment destination. - /// - /// This is used by the [`super::analyze`] code to decide which MIR locals - /// can stay as SSA values (as opposed to generating `alloca` slots for them). - /// As such, some paths here return `true` even where the specific rvalue - /// will not actually take the operand path because the result type is such - /// that it always gets an `alloca`, but where it's not worth re-checking the - /// layout in this code when the right thing will happen anyway. - pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool { - match *rvalue { - mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => { - let operand_ty = operand.ty(self.mir, self.cx.tcx()); - let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty)); - let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty)); - match (operand_layout.backend_repr, cast_layout.backend_repr) { - // When the output will be in memory anyway, just use its place - // (instead of the operand path) unless it's the trivial ZST case. - (_, abi::BackendRepr::Memory { .. }) => cast_layout.is_zst(), - - // Otherwise (for a non-memory output) if the input is memory - // then we can just read the value from the place. - (abi::BackendRepr::Memory { .. }, _) => true, - - // When we have scalar immediates, we can only convert things - // where the sizes match, to avoid endianness questions. - (abi::BackendRepr::Scalar(a), abi::BackendRepr::Scalar(b)) => - a.size(self.cx) == b.size(self.cx), - (abi::BackendRepr::ScalarPair(a0, a1), abi::BackendRepr::ScalarPair(b0, b1)) => - a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx), - - // Mixing Scalars and ScalarPairs can get quite complicated when - // padding and undef get involved, so leave that to the memory path. - (abi::BackendRepr::Scalar(_), abi::BackendRepr::ScalarPair(_, _)) | - (abi::BackendRepr::ScalarPair(_, _), abi::BackendRepr::Scalar(_)) => false, - - // SIMD vectors aren't worth the trouble of dealing with complex - // cases like from vectors of f32 to vectors of pointers or - // from fat pointers to vectors of u16. (See #143194 #110021 ...) - (abi::BackendRepr::SimdVector { .. }, _) | (_, abi::BackendRepr::SimdVector { .. }) => false, - } - } - mir::Rvalue::Ref(..) | - mir::Rvalue::CopyForDeref(..) | - mir::Rvalue::RawPtr(..) | - mir::Rvalue::Len(..) | - mir::Rvalue::Cast(..) | // (*) - mir::Rvalue::ShallowInitBox(..) | // (*) - mir::Rvalue::BinaryOp(..) | - mir::Rvalue::UnaryOp(..) | - mir::Rvalue::Discriminant(..) | - mir::Rvalue::NullaryOp(..) | - mir::Rvalue::ThreadLocalRef(_) | - mir::Rvalue::Use(..) | - mir::Rvalue::Repeat(..) | // (*) - mir::Rvalue::Aggregate(..) | // (*) - mir::Rvalue::WrapUnsafeBinder(..) => // (*) - true, - } - - // (*) this is only true if the type is suitable - } } /// Transmutes a single scalar value `imm` from `from_scalar` to `to_scalar`. @@ -1064,7 +1046,7 @@ pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // That said, last time we tried removing this, it didn't actually help // the rustc-perf results, so might as well keep doing it // <https://github.com/rust-lang/rust/pull/135610#issuecomment-2599275182> - assume_scalar_range(bx, imm, from_scalar, from_backend_ty); + assume_scalar_range(bx, imm, from_scalar, from_backend_ty, Some(&to_scalar)); imm = match (from_scalar.primitive(), to_scalar.primitive()) { (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty), @@ -1092,22 +1074,42 @@ pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // since it's never passed to something with parameter metadata (especially // after MIR inlining) so the only way to tell the backend about the // constraint that the `transmute` introduced is to `assume` it. - assume_scalar_range(bx, imm, to_scalar, to_backend_ty); + assume_scalar_range(bx, imm, to_scalar, to_backend_ty, Some(&from_scalar)); imm = bx.to_immediate_scalar(imm, to_scalar); imm } +/// Emits an `assume` call that `imm`'s value is within the known range of `scalar`. +/// +/// If `known` is `Some`, only emits the assume if it's more specific than +/// whatever is already known from the range of *that* scalar. fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, imm: Bx::Value, scalar: abi::Scalar, backend_ty: Bx::Type, + known: Option<&abi::Scalar>, ) { - if matches!(bx.cx().sess().opts.optimize, OptLevel::No) || scalar.is_always_valid(bx.cx()) { + if matches!(bx.cx().sess().opts.optimize, OptLevel::No) { return; } + match (scalar, known) { + (abi::Scalar::Union { .. }, _) => return, + (_, None) => { + if scalar.is_always_valid(bx.cx()) { + return; + } + } + (abi::Scalar::Initialized { valid_range, .. }, Some(known)) => { + let known_range = known.valid_range(bx.cx()); + if valid_range.contains_range(known_range, scalar.size(bx.cx())) { + return; + } + } + } + match scalar.primitive() { abi::Primitive::Int(..) => { let range = scalar.valid_range(bx.cx()); |
