diff options
Diffstat (limited to 'compiler/rustc_codegen_ssa/src')
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/back/symbol_export.rs | 14 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/codegen_attrs.rs | 14 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/debuginfo.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/naked_asm.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/operand.rs | 131 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 59 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/traits/type_.rs | 20 |
7 files changed, 142 insertions, 106 deletions
diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs index 92b9b6e132e..d0b6c7470fb 100644 --- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs +++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs @@ -219,6 +219,7 @@ fn exported_symbols_provider_local<'tcx>( .chain([ mangle_internal_symbol(tcx, "__rust_alloc_error_handler"), mangle_internal_symbol(tcx, OomStrategy::SYMBOL), + mangle_internal_symbol(tcx, NO_ALLOC_SHIM_IS_UNSTABLE), ]) { let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, &symbol_name)); @@ -232,19 +233,6 @@ fn exported_symbols_provider_local<'tcx>( }, )); } - - let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new( - tcx, - &mangle_internal_symbol(tcx, NO_ALLOC_SHIM_IS_UNSTABLE), - )); - symbols.push(( - exported_symbol, - SymbolExportInfo { - level: SymbolExportLevel::Rust, - kind: SymbolExportKind::Data, - used: false, - }, - )) } if tcx.sess.instrument_coverage() || tcx.sess.opts.cg.profile_generate.enabled() { diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs index 33c457ead30..cd1fa6a06bb 100644 --- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs +++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs @@ -3,7 +3,6 @@ use std::str::FromStr; use rustc_abi::ExternAbi; use rustc_ast::expand::autodiff_attrs::{AutoDiffAttrs, DiffActivity, DiffMode}; use rustc_ast::{LitKind, MetaItem, MetaItemInner, attr}; -use rustc_attr_data_structures::ReprAttr::ReprAlign; use rustc_attr_data_structures::{ AttributeKind, InlineAttr, InstructionSetAttr, OptimizeAttr, find_attr, }; @@ -110,17 +109,8 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { } }; - if let hir::Attribute::Parsed(p) = attr { - match p { - AttributeKind::Repr(reprs) => { - codegen_fn_attrs.alignment = reprs - .iter() - .filter_map(|(r, _)| if let ReprAlign(x) = r { Some(*x) } else { None }) - .max(); - } - - _ => {} - } + if let hir::Attribute::Parsed(AttributeKind::Align { align, .. }) = attr { + codegen_fn_attrs.alignment = Some(*align); } let Some(Ident { name, .. }) = attr.ident() else { diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index f731613d67e..025f5fb54f4 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -317,7 +317,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let name = if bx.sess().fewer_names() { None } else { - Some(match whole_local_var.or(fallback_var.clone()) { + Some(match whole_local_var.or_else(|| fallback_var.clone()) { Some(var) if var.name != sym::empty => var.name.to_string(), _ => format!("{local:?}"), }) diff --git a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs index b805dc094e9..9f66457a740 100644 --- a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs +++ b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs @@ -205,7 +205,7 @@ fn prefix_and_suffix<'tcx>( let mut end = String::new(); match asm_binary_format { BinaryFormat::Elf => { - let section = link_section.unwrap_or(format!(".text.{asm_name}")); + let section = link_section.unwrap_or_else(|| format!(".text.{asm_name}")); let progbits = match is_arm { true => "%progbits", @@ -239,7 +239,7 @@ fn prefix_and_suffix<'tcx>( } } BinaryFormat::MachO => { - let section = link_section.unwrap_or("__TEXT,__text".to_string()); + let section = link_section.unwrap_or_else(|| "__TEXT,__text".to_string()); writeln!(begin, ".pushsection {},regular,pure_instructions", section).unwrap(); writeln!(begin, ".balign {align_bytes}").unwrap(); write_linkage(&mut begin).unwrap(); @@ -256,7 +256,7 @@ fn prefix_and_suffix<'tcx>( } } BinaryFormat::Coff => { - let section = link_section.unwrap_or(format!(".text.{asm_name}")); + let section = link_section.unwrap_or_else(|| format!(".text.{asm_name}")); writeln!(begin, ".pushsection {},\"xr\"", section).unwrap(); writeln!(begin, ".balign {align_bytes}").unwrap(); write_linkage(&mut begin).unwrap(); @@ -273,7 +273,7 @@ fn prefix_and_suffix<'tcx>( } } BinaryFormat::Wasm => { - let section = link_section.unwrap_or(format!(".text.{asm_name}")); + let section = link_section.unwrap_or_else(|| format!(".text.{asm_name}")); writeln!(begin, ".section {section},\"\",@").unwrap(); // wasm functions cannot be aligned, so skip diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index e9389ddf93b..99957c67708 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -1,9 +1,9 @@ use std::fmt; -use arrayvec::ArrayVec; -use either::Either; use rustc_abi as abi; -use rustc_abi::{Align, BackendRepr, FIRST_VARIANT, Primitive, Size, TagEncoding, Variants}; +use rustc_abi::{ + Align, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, TagEncoding, VariantIdx, Variants, +}; use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range}; use rustc_middle::mir::{self, ConstValue}; use rustc_middle::ty::Ty; @@ -13,6 +13,7 @@ use rustc_session::config::OptLevel; use tracing::{debug, instrument}; use super::place::{PlaceRef, PlaceValue}; +use super::rvalue::transmute_immediate; use super::{FunctionCx, LocalRef}; use crate::common::IntPredicate; use crate::traits::*; @@ -69,31 +70,6 @@ pub enum OperandValue<V> { } impl<V: CodegenObject> OperandValue<V> { - /// If this is ZeroSized/Immediate/Pair, return an array of the 0/1/2 values. - /// If this is Ref, return the place. - #[inline] - pub(crate) fn immediates_or_place(self) -> Either<ArrayVec<V, 2>, PlaceValue<V>> { - match self { - OperandValue::ZeroSized => Either::Left(ArrayVec::new()), - OperandValue::Immediate(a) => Either::Left(ArrayVec::from_iter([a])), - OperandValue::Pair(a, b) => Either::Left([a, b].into()), - OperandValue::Ref(p) => Either::Right(p), - } - } - - /// Given an array of 0/1/2 immediate values, return ZeroSized/Immediate/Pair. - #[inline] - pub(crate) fn from_immediates(immediates: ArrayVec<V, 2>) -> Self { - let mut it = immediates.into_iter(); - let Some(a) = it.next() else { - return OperandValue::ZeroSized; - }; - let Some(b) = it.next() else { - return OperandValue::Immediate(a); - }; - OperandValue::Pair(a, b) - } - /// Treat this value as a pointer and return the data pointer and /// optional metadata as backend values. /// @@ -595,6 +571,105 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } } } + + /// Creates an incomplete operand containing the [`abi::Scalar`]s expected based + /// on the `layout` passed. This is for use with [`OperandRef::insert_field`] + /// later to set the necessary immediate(s). + /// + /// Returns `None` for `layout`s which cannot be built this way. + pub(crate) fn builder( + layout: TyAndLayout<'tcx>, + ) -> Option<OperandRef<'tcx, Result<V, abi::Scalar>>> { + let val = match layout.backend_repr { + BackendRepr::Memory { .. } if layout.is_zst() => OperandValue::ZeroSized, + BackendRepr::Scalar(s) => OperandValue::Immediate(Err(s)), + BackendRepr::ScalarPair(a, b) => OperandValue::Pair(Err(a), Err(b)), + BackendRepr::Memory { .. } | BackendRepr::SimdVector { .. } => return None, + }; + Some(OperandRef { val, layout }) + } +} + +impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Result<V, abi::Scalar>> { + pub(crate) fn insert_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>( + &mut self, + bx: &mut Bx, + v: VariantIdx, + f: FieldIdx, + operand: OperandRef<'tcx, V>, + ) { + let (expect_zst, is_zero_offset) = if let abi::FieldsShape::Primitive = self.layout.fields { + // The other branch looking at field layouts ICEs for primitives, + // so we need to handle them separately. + // Multiple fields is possible for cases such as aggregating + // a thin pointer, where the second field is the unit. + assert!(!self.layout.is_zst()); + assert_eq!(v, FIRST_VARIANT); + let first_field = f == FieldIdx::ZERO; + (!first_field, first_field) + } else { + let variant_layout = self.layout.for_variant(bx.cx(), v); + let field_layout = variant_layout.field(bx.cx(), f.as_usize()); + let field_offset = variant_layout.fields.offset(f.as_usize()); + (field_layout.is_zst(), field_offset == Size::ZERO) + }; + + let mut update = |tgt: &mut Result<V, abi::Scalar>, src, from_scalar| { + let from_bty = bx.cx().type_from_scalar(from_scalar); + let to_scalar = tgt.unwrap_err(); + let to_bty = bx.cx().type_from_scalar(to_scalar); + let imm = transmute_immediate(bx, src, from_scalar, from_bty, to_scalar, to_bty); + *tgt = Ok(imm); + }; + + match (operand.val, operand.layout.backend_repr) { + (OperandValue::ZeroSized, _) if expect_zst => {} + (OperandValue::Immediate(v), BackendRepr::Scalar(from_scalar)) => match &mut self.val { + OperandValue::Immediate(val @ Err(_)) if is_zero_offset => { + update(val, v, from_scalar); + } + OperandValue::Pair(fst @ Err(_), _) if is_zero_offset => { + update(fst, v, from_scalar); + } + OperandValue::Pair(_, snd @ Err(_)) if !is_zero_offset => { + update(snd, v, from_scalar); + } + _ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"), + }, + (OperandValue::Pair(a, b), BackendRepr::ScalarPair(from_sa, from_sb)) => { + match &mut self.val { + OperandValue::Pair(fst @ Err(_), snd @ Err(_)) => { + update(fst, a, from_sa); + update(snd, b, from_sb); + } + _ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"), + } + } + _ => bug!("Unsupported operand {operand:?} inserting into {v:?}.{f:?} of {self:?}"), + } + } + + /// After having set all necessary fields, this converts the + /// `OperandValue<Result<V, _>>` (as obtained from [`OperandRef::builder`]) + /// to the normal `OperandValue<V>`. + /// + /// ICEs if any required fields were not set. + pub fn build(&self) -> OperandRef<'tcx, V> { + let OperandRef { val, layout } = *self; + + let unwrap = |r: Result<V, abi::Scalar>| match r { + Ok(v) => v, + Err(_) => bug!("OperandRef::build called while fields are missing {self:?}"), + }; + + let val = match val { + OperandValue::ZeroSized => OperandValue::ZeroSized, + OperandValue::Immediate(v) => OperandValue::Immediate(unwrap(v)), + OperandValue::Pair(a, b) => OperandValue::Pair(unwrap(a), unwrap(b)), + OperandValue::Ref(_) => bug!(), + }; + OperandRef { val, layout } + } } impl<'a, 'tcx, V: CodegenObject> OperandValue<V> { diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index b62ac89661f..e1d8b7546cf 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -1,7 +1,6 @@ use std::assert_matches::assert_matches; -use arrayvec::ArrayVec; -use rustc_abi::{self as abi, FIRST_VARIANT, FieldIdx}; +use rustc_abi::{self as abi, FIRST_VARIANT}; use rustc_middle::ty::adjustment::PointerCoercion; use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; @@ -708,38 +707,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // `rvalue_creates_operand` has arranged that we only get here if // we can build the aggregate immediate from the field immediates. - let mut inputs = ArrayVec::<Bx::Value, 2>::new(); - let mut input_scalars = ArrayVec::<abi::Scalar, 2>::new(); - for field_idx in layout.fields.index_by_increasing_offset() { - let field_idx = FieldIdx::from_usize(field_idx); - let op = self.codegen_operand(bx, &fields[field_idx]); - let values = op.val.immediates_or_place().left_or_else(|p| { - bug!("Field {field_idx:?} is {p:?} making {layout:?}"); - }); - let scalars = self.value_kind(op.layout).scalars().unwrap(); - assert_eq!(values.len(), scalars.len()); - inputs.extend(values); - input_scalars.extend(scalars); + let Some(mut builder) = OperandRef::builder(layout) else { + bug!("Cannot use type in operand builder: {layout:?}") + }; + for (field_idx, field) in fields.iter_enumerated() { + let op = self.codegen_operand(bx, field); + builder.insert_field(bx, FIRST_VARIANT, field_idx, op); } - let output_scalars = self.value_kind(layout).scalars().unwrap(); - itertools::izip!(&mut inputs, input_scalars, output_scalars).for_each( - |(v, in_s, out_s)| { - if in_s != out_s { - // We have to be really careful about bool here, because - // `(bool,)` stays i1 but `Cell<bool>` becomes i8. - *v = bx.from_immediate(*v); - *v = bx.to_immediate_scalar(*v, out_s); - } - }, - ); - - let val = OperandValue::from_immediates(inputs); - assert!( - val.is_expected_variant_for_type(self.cx, layout), - "Made wrong variant {val:?} for type {layout:?}", - ); - OperandRef { val, layout } + builder.build() } mir::Rvalue::ShallowInitBox(ref operand, content_ty) => { let operand = self.codegen_operand(bx, operand); @@ -1082,10 +1058,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::AggregateKind::Coroutine(..) | mir::AggregateKind::CoroutineClosure(..) => false, }; allowed_kind && { - let ty = rvalue.ty(self.mir, self.cx.tcx()); - let ty = self.monomorphize(ty); + let ty = rvalue.ty(self.mir, self.cx.tcx()); + let ty = self.monomorphize(ty); let layout = self.cx.spanned_layout_of(ty, span); - !self.cx.is_backend_ref(layout) + OperandRef::<Bx::Value>::builder(layout).is_some() } } } @@ -1129,23 +1105,12 @@ enum OperandValueKind { ZeroSized, } -impl OperandValueKind { - fn scalars(self) -> Option<ArrayVec<abi::Scalar, 2>> { - Some(match self { - OperandValueKind::ZeroSized => ArrayVec::new(), - OperandValueKind::Immediate(a) => ArrayVec::from_iter([a]), - OperandValueKind::Pair(a, b) => [a, b].into(), - OperandValueKind::Ref => return None, - }) - } -} - /// Transmutes one of the immediates from an [`OperandValue::Immediate`] /// or an [`OperandValue::Pair`] to an immediate of the target type. /// /// `to_backend_ty` must be the *non*-immediate backend type (so it will be /// `i8`, not `i1`, for `bool`-like types.) -fn transmute_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( +pub(super) fn transmute_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, mut imm: Bx::Value, from_scalar: abi::Scalar, diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs index 70331b72353..dcd9e25b2c9 100644 --- a/compiler/rustc_codegen_ssa/src/traits/type_.rs +++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs @@ -1,4 +1,4 @@ -use rustc_abi::{AddressSpace, Float, Integer, Reg}; +use rustc_abi::{AddressSpace, Float, Integer, Primitive, Reg, Scalar}; use rustc_middle::bug; use rustc_middle::ty::Ty; use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, TyAndLayout}; @@ -84,6 +84,24 @@ pub trait DerivedTypeCodegenMethods<'tcx>: fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { ty.is_freeze(self.tcx(), self.typing_env()) } + + fn type_from_primitive(&self, p: Primitive) -> Self::Type { + use Primitive::*; + match p { + Int(i, _) => self.type_from_integer(i), + Float(f) => self.type_from_float(f), + Pointer(address_space) => self.type_ptr_ext(address_space), + } + } + + fn type_from_scalar(&self, s: Scalar) -> Self::Type { + // `MaybeUninit` being `repr(transparent)` somewhat implies that the type + // of a scalar has to be the type of its primitive (which is true in LLVM, + // where noundef is a parameter attribute or metadata) but if we ever get + // a backend where that's no longer true, every use of this will need to + // to carefully scrutinized and re-evaluated. + self.type_from_primitive(s.primitive()) + } } impl<'tcx, T> DerivedTypeCodegenMethods<'tcx> for T where |
