diff options
Diffstat (limited to 'compiler')
46 files changed, 195 insertions, 136 deletions
diff --git a/compiler/rustc_abi/src/callconv.rs b/compiler/rustc_abi/src/callconv.rs index 4529ab8058e..a21e1aee9b0 100644 --- a/compiler/rustc_abi/src/callconv.rs +++ b/compiler/rustc_abi/src/callconv.rs @@ -74,7 +74,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size })) } - BackendRepr::Vector { .. } => { + BackendRepr::SimdVector { .. } => { assert!(!self.is_zst()); Ok(HomogeneousAggregate::Homogeneous(Reg { kind: RegKind::Vector, diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index 3f83787ea37..d3ae6a29f10 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -386,13 +386,15 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { BackendRepr::Memory { sized: true } } // Vectors require at least element alignment, else disable the opt - BackendRepr::Vector { element, count: _ } if element.align(dl).abi > align.abi => { + BackendRepr::SimdVector { element, count: _ } + if element.align(dl).abi > align.abi => + { BackendRepr::Memory { sized: true } } // the alignment tests passed and we can use this BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..) - | BackendRepr::Vector { .. } + | BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => repr, }, }; @@ -464,7 +466,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { hide_niches(a); hide_niches(b); } - BackendRepr::Vector { element, count: _ } => hide_niches(element), + BackendRepr::SimdVector { element, count: _ } => hide_niches(element), BackendRepr::Memory { sized: _ } => {} } st.largest_niche = None; @@ -1314,7 +1316,9 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { match field.backend_repr { // For plain scalars, or vectors of them, we can't unpack // newtypes for `#[repr(C)]`, as that affects C ABIs. - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } if optimize_abi => { + BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } + if optimize_abi => + { abi = field.backend_repr; } // But scalar pairs are Rust-specific and get diff --git a/compiler/rustc_abi/src/layout/ty.rs b/compiler/rustc_abi/src/layout/ty.rs index 221e990ae86..03f3f043c21 100644 --- a/compiler/rustc_abi/src/layout/ty.rs +++ b/compiler/rustc_abi/src/layout/ty.rs @@ -219,7 +219,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { C: HasDataLayout, { match self.backend_repr { - BackendRepr::Vector { .. } => self.size == expected_size, + BackendRepr::SimdVector { .. } => self.size == expected_size, BackendRepr::Memory { .. } => { if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 { self.field(cx, 0).is_single_vector_element(cx, expected_size) diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 81e4e255f37..2197dd68eaf 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -1410,7 +1410,7 @@ impl AddressSpace { pub enum BackendRepr { Scalar(Scalar), ScalarPair(Scalar, Scalar), - Vector { + SimdVector { element: Scalar, count: u64, }, @@ -1426,9 +1426,9 @@ impl BackendRepr { #[inline] pub fn is_unsized(&self) -> bool { match *self { - BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => { - false - } + BackendRepr::Scalar(_) + | BackendRepr::ScalarPair(..) + | BackendRepr::SimdVector { .. } => false, BackendRepr::Memory { sized } => !sized, } } @@ -1467,7 +1467,7 @@ impl BackendRepr { BackendRepr::Scalar(s) => Some(s.align(cx).abi), BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi), // The align of a Vector can vary in surprising ways - BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => None, + BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None, } } @@ -1489,7 +1489,7 @@ impl BackendRepr { Some(size) } // The size of a Vector can vary in surprising ways - BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => None, + BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None, } } @@ -1500,8 +1500,8 @@ impl BackendRepr { BackendRepr::ScalarPair(s1, s2) => { BackendRepr::ScalarPair(s1.to_union(), s2.to_union()) } - BackendRepr::Vector { element, count } => { - BackendRepr::Vector { element: element.to_union(), count } + BackendRepr::SimdVector { element, count } => { + BackendRepr::SimdVector { element: element.to_union(), count } } BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true }, } @@ -1513,8 +1513,8 @@ impl BackendRepr { // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x). (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(), ( - BackendRepr::Vector { element: element_l, count: count_l }, - BackendRepr::Vector { element: element_r, count: count_r }, + BackendRepr::SimdVector { element: element_l, count: count_l }, + BackendRepr::SimdVector { element: element_r, count: count_r }, ) => element_l.primitive() == element_r.primitive() && count_l == count_r, (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => { l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive() @@ -1735,7 +1735,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { /// Returns `true` if this is an aggregate type (including a ScalarPair!) pub fn is_aggregate(&self) -> bool { match self.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => false, + BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => false, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true, } } @@ -1877,9 +1877,9 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { /// non-trivial alignment constraints. You probably want to use `is_1zst` instead. pub fn is_zst(&self) -> bool { match self.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => { - false - } + BackendRepr::Scalar(_) + | BackendRepr::ScalarPair(..) + | BackendRepr::SimdVector { .. } => false, BackendRepr::Memory { sized } => sized && self.size.bytes() == 0, } } diff --git a/compiler/rustc_ast_lowering/messages.ftl b/compiler/rustc_ast_lowering/messages.ftl index f29c98164d0..906fb213ee7 100644 --- a/compiler/rustc_ast_lowering/messages.ftl +++ b/compiler/rustc_ast_lowering/messages.ftl @@ -37,11 +37,11 @@ ast_lowering_bad_return_type_notation_inputs = .suggestion = remove the input types ast_lowering_bad_return_type_notation_needs_dots = return type notation arguments must be elided with `..` - .suggestion = add `..` + .suggestion = use the correct syntax by adding `..` to the arguments ast_lowering_bad_return_type_notation_output = return type not allowed with return type notation - .suggestion = remove the return type +ast_lowering_bad_return_type_notation_output_suggestion = use the right argument notation and remove the return type ast_lowering_bad_return_type_notation_position = return type notation not allowed in this position yet diff --git a/compiler/rustc_ast_lowering/src/errors.rs b/compiler/rustc_ast_lowering/src/errors.rs index 766c0233408..ceeb5dffbea 100644 --- a/compiler/rustc_ast_lowering/src/errors.rs +++ b/compiler/rustc_ast_lowering/src/errors.rs @@ -373,24 +373,39 @@ pub(crate) struct InclusiveRangeWithNoEnd { pub span: Span, } +#[derive(Subdiagnostic)] +#[multipart_suggestion( + ast_lowering_bad_return_type_notation_output_suggestion, + applicability = "machine-applicable", + style = "verbose" +)] +/// Given `T: Tr<m() -> Ret>` or `T: Tr<m(Ty) -> Ret>`, suggest `T: Tr<m(..)>`. +pub(crate) struct RTNSuggestion { + #[suggestion_part(code = "")] + pub output: Span, + #[suggestion_part(code = "(..)")] + pub input: Span, +} + #[derive(Diagnostic)] pub(crate) enum BadReturnTypeNotation { #[diag(ast_lowering_bad_return_type_notation_inputs)] Inputs { #[primary_span] - #[suggestion(code = "()", applicability = "maybe-incorrect")] + #[suggestion(code = "(..)", applicability = "machine-applicable", style = "verbose")] span: Span, }, #[diag(ast_lowering_bad_return_type_notation_output)] Output { #[primary_span] - #[suggestion(code = "", applicability = "maybe-incorrect")] span: Span, + #[subdiagnostic] + suggestion: RTNSuggestion, }, #[diag(ast_lowering_bad_return_type_notation_needs_dots)] NeedsDots { #[primary_span] - #[suggestion(code = "(..)", applicability = "maybe-incorrect")] + #[suggestion(code = "(..)", applicability = "machine-applicable", style = "verbose")] span: Span, }, #[diag(ast_lowering_bad_return_type_notation_position)] diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs index 1c69937eed0..13edcc10c9e 100644 --- a/compiler/rustc_ast_lowering/src/lib.rs +++ b/compiler/rustc_ast_lowering/src/lib.rs @@ -926,19 +926,27 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { if let Some(first_char) = constraint.ident.as_str().chars().next() && first_char.is_ascii_lowercase() { - let mut err = if !data.inputs.is_empty() { - self.dcx().create_err(errors::BadReturnTypeNotation::Inputs { - span: data.inputs_span, - }) - } else if let FnRetTy::Ty(ty) = &data.output { - self.dcx().create_err(errors::BadReturnTypeNotation::Output { - span: data.inputs_span.shrink_to_hi().to(ty.span), - }) - } else { - self.dcx().create_err(errors::BadReturnTypeNotation::NeedsDots { - span: data.inputs_span, - }) + tracing::info!(?data, ?data.inputs); + let err = match (&data.inputs[..], &data.output) { + ([_, ..], FnRetTy::Default(_)) => { + errors::BadReturnTypeNotation::Inputs { span: data.inputs_span } + } + ([], FnRetTy::Default(_)) => { + errors::BadReturnTypeNotation::NeedsDots { span: data.inputs_span } + } + // The case `T: Trait<method(..) -> Ret>` is handled in the parser. + (_, FnRetTy::Ty(ty)) => { + let span = data.inputs_span.shrink_to_hi().to(ty.span); + errors::BadReturnTypeNotation::Output { + span, + suggestion: errors::RTNSuggestion { + output: span, + input: data.inputs_span, + }, + } + } }; + let mut err = self.dcx().create_err(err); if !self.tcx.features().return_type_notation() && self.tcx.sess.is_nightly_build() { diff --git a/compiler/rustc_ast_lowering/src/path.rs b/compiler/rustc_ast_lowering/src/path.rs index 55d06477313..6b6244b05aa 100644 --- a/compiler/rustc_ast_lowering/src/path.rs +++ b/compiler/rustc_ast_lowering/src/path.rs @@ -13,7 +13,7 @@ use tracing::{debug, instrument}; use super::errors::{ AsyncBoundNotOnTrait, AsyncBoundOnlyForFnTraits, BadReturnTypeNotation, - GenericTypeWithParentheses, UseAngleBrackets, + GenericTypeWithParentheses, RTNSuggestion, UseAngleBrackets, }; use super::{ AllowReturnTypeNotation, GenericArgsCtor, GenericArgsMode, ImplTraitContext, ImplTraitPosition, @@ -268,19 +268,27 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { } GenericArgs::Parenthesized(data) => match generic_args_mode { GenericArgsMode::ReturnTypeNotation => { - let mut err = if !data.inputs.is_empty() { - self.dcx().create_err(BadReturnTypeNotation::Inputs { - span: data.inputs_span, - }) - } else if let FnRetTy::Ty(ty) = &data.output { - self.dcx().create_err(BadReturnTypeNotation::Output { - span: data.inputs_span.shrink_to_hi().to(ty.span), - }) - } else { - self.dcx().create_err(BadReturnTypeNotation::NeedsDots { - span: data.inputs_span, - }) + tracing::info!(?data, ?data.inputs); + let err = match (&data.inputs[..], &data.output) { + ([_, ..], FnRetTy::Default(_)) => { + BadReturnTypeNotation::Inputs { span: data.inputs_span } + } + ([], FnRetTy::Default(_)) => { + BadReturnTypeNotation::NeedsDots { span: data.inputs_span } + } + // The case `T: Trait<method(..) -> Ret>` is handled in the parser. + (_, FnRetTy::Ty(ty)) => { + let span = data.inputs_span.shrink_to_hi().to(ty.span); + BadReturnTypeNotation::Output { + span, + suggestion: RTNSuggestion { + output: span, + input: data.inputs_span, + }, + } + } }; + let mut err = self.dcx().create_err(err); if !self.tcx.features().return_type_notation() && self.tcx.sess.is_nightly_build() { diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs index b28c4c9539c..06d89bc9ea7 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs @@ -84,7 +84,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { AbiParam::new(scalar_to_clif_type(tcx, scalar)), attrs )], - BackendRepr::Vector { .. } => { + BackendRepr::SimdVector { .. } => { let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout); smallvec![AbiParam::new(vector_ty)] } @@ -135,7 +135,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { BackendRepr::Scalar(scalar) => { (None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))]) } - BackendRepr::Vector { .. } => { + BackendRepr::SimdVector { .. } => { let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout); (None, vec![AbiParam::new(vector_ty)]) } diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs index 4d9bed8652c..6735ae024d1 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs @@ -53,7 +53,7 @@ fn report_atomic_type_validation_error<'tcx>( pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Type { let (element, count) = match layout.backend_repr { - BackendRepr::Vector { element, count } => (element, count), + BackendRepr::SimdVector { element, count } => (element, count), _ => unreachable!(), }; diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs index 1b3f86c8405..cc739fefcd0 100644 --- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs +++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs @@ -173,9 +173,11 @@ impl<'tcx> CValue<'tcx> { CValueInner::ByRef(ptr, None) => { let clif_ty = match layout.backend_repr { BackendRepr::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar), - BackendRepr::Vector { element, count } => scalar_to_clif_type(fx.tcx, element) - .by(u32::try_from(count).unwrap()) - .unwrap(), + BackendRepr::SimdVector { element, count } => { + scalar_to_clif_type(fx.tcx, element) + .by(u32::try_from(count).unwrap()) + .unwrap() + } _ => unreachable!("{:?}", layout.ty), }; let mut flags = MemFlags::new(); diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index f8672c07299..f38622074f1 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -312,7 +312,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc let layout = self.layout_of(tp_ty).layout; let _use_integer_compare = match layout.backend_repr() { Scalar(_) | ScalarPair(_, _) => true, - Vector { .. } => false, + SimdVector { .. } => false, Memory { .. } => { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index bac4fc51300..ae98b3d0b56 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -63,7 +63,7 @@ fn uncached_gcc_type<'gcc, 'tcx>( ) -> Type<'gcc> { match layout.backend_repr { BackendRepr::Scalar(_) => bug!("handled elsewhere"), - BackendRepr::Vector { ref element, count } => { + BackendRepr::SimdVector { ref element, count } => { let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO); let element = // NOTE: gcc doesn't allow pointer types in vectors. @@ -178,7 +178,7 @@ pub trait LayoutGccExt<'tcx> { impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { fn is_gcc_immediate(&self) -> bool { match self.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true, + BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false, } } @@ -186,9 +186,9 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { fn is_gcc_scalar_pair(&self) -> bool { match self.backend_repr { BackendRepr::ScalarPair(..) => true, - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => { - false - } + BackendRepr::Scalar(_) + | BackendRepr::SimdVector { .. } + | BackendRepr::Memory { .. } => false, } } diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index be5673eddf9..e8a69743157 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -939,9 +939,10 @@ fn llvm_fixup_input<'ll, 'tcx>( } bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) - if layout.size.bytes() == 8 => - { + ( + AArch64(AArch64InlineAsmRegClass::vreg_low16), + BackendRepr::SimdVector { element, count }, + ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); let vec_ty = bx.cx.type_vector(elem_ty, count); let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect(); @@ -954,7 +955,7 @@ fn llvm_fixup_input<'ll, 'tcx>( } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - BackendRepr::Vector { .. }, + BackendRepr::SimdVector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)), ( X86( @@ -989,7 +990,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - BackendRepr::Vector { element, count: count @ (8 | 16) }, + BackendRepr::SimdVector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } @@ -1026,7 +1027,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - BackendRepr::Vector { element, count: count @ (4 | 8) }, + BackendRepr::SimdVector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } @@ -1099,9 +1100,10 @@ fn llvm_fixup_output<'ll, 'tcx>( } value } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) - if layout.size.bytes() == 8 => - { + ( + AArch64(AArch64InlineAsmRegClass::vreg_low16), + BackendRepr::SimdVector { element, count }, + ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); let vec_ty = bx.cx.type_vector(elem_ty, count * 2); let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect(); @@ -1114,7 +1116,7 @@ fn llvm_fixup_output<'ll, 'tcx>( } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - BackendRepr::Vector { .. }, + BackendRepr::SimdVector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)), ( X86( @@ -1145,7 +1147,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - BackendRepr::Vector { element, count: count @ (8 | 16) }, + BackendRepr::SimdVector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } @@ -1182,7 +1184,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - BackendRepr::Vector { element, count: count @ (4 | 8) }, + BackendRepr::SimdVector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } @@ -1243,9 +1245,10 @@ fn llvm_fixup_output_type<'ll, 'tcx>( let count = 16 / layout.size.bytes(); cx.type_vector(elem_ty, count) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) - if layout.size.bytes() == 8 => - { + ( + AArch64(AArch64InlineAsmRegClass::vreg_low16), + BackendRepr::SimdVector { element, count }, + ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(cx, element); cx.type_vector(elem_ty, count * 2) } @@ -1256,7 +1259,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - BackendRepr::Vector { .. }, + BackendRepr::SimdVector { .. }, ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8), ( X86( @@ -1284,7 +1287,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - BackendRepr::Vector { element, count: count @ (8 | 16) }, + BackendRepr::SimdVector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } @@ -1321,7 +1324,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - BackendRepr::Vector { element, count: count @ (4 | 8) }, + BackendRepr::SimdVector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 1169b9c067d..0272667e223 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -470,7 +470,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { let layout = self.layout_of(tp_ty).layout; let use_integer_compare = match layout.backend_repr() { Scalar(_) | ScalarPair(_, _) => true, - Vector { .. } => false, + SimdVector { .. } => false, Memory { .. } => { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index ba01fbff385..4e7096da502 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -19,7 +19,7 @@ fn uncached_llvm_type<'a, 'tcx>( ) -> &'a Type { match layout.backend_repr { BackendRepr::Scalar(_) => bug!("handled elsewhere"), - BackendRepr::Vector { element, count } => { + BackendRepr::SimdVector { element, count } => { let element = layout.scalar_llvm_type_at(cx, element); return cx.type_vector(element, count); } @@ -171,7 +171,7 @@ pub(crate) trait LayoutLlvmExt<'tcx> { impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn is_llvm_immediate(&self) -> bool { match self.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true, + BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false, } } @@ -179,9 +179,9 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn is_llvm_scalar_pair(&self) -> bool { match self.backend_repr { BackendRepr::ScalarPair(..) => true, - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => { - false - } + BackendRepr::Scalar(_) + | BackendRepr::SimdVector { .. } + | BackendRepr::Memory { .. } => false, } } diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl index 22e262546c3..f15d6fba506 100644 --- a/compiler/rustc_codegen_ssa/messages.ftl +++ b/compiler/rustc_codegen_ssa/messages.ftl @@ -119,7 +119,8 @@ codegen_ssa_invalid_monomorphization_inserted_type = invalid monomorphization of codegen_ssa_invalid_monomorphization_invalid_bitmask = invalid monomorphization of `{$name}` intrinsic: invalid bitmask `{$mask_ty}`, expected `u{$expected_int_bits}` or `[u8; {$expected_bytes}]` -codegen_ssa_invalid_monomorphization_mask_type = invalid monomorphization of `{$name}` intrinsic: mask element type is `{$ty}`, expected `i_` +codegen_ssa_invalid_monomorphization_mask_type = invalid monomorphization of `{$name}` intrinsic: found mask element type is `{$ty}`, expected a signed integer type + .note = the mask may be widened, which only has the correct behavior for signed integers codegen_ssa_invalid_monomorphization_mismatched_lengths = invalid monomorphization of `{$name}` intrinsic: mismatched lengths: mask length `{$m_len}` != other vector length `{$v_len}` diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs index 236507ac0cd..3aacbcde1a7 100644 --- a/compiler/rustc_codegen_ssa/src/back/metadata.rs +++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs @@ -373,7 +373,11 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static Architecture::Avr => { // Resolve the ISA revision and set // the appropriate EF_AVR_ARCH flag. - ef_avr_arch(&sess.target.options.cpu) + if let Some(ref cpu) = sess.opts.cg.target_cpu { + ef_avr_arch(cpu) + } else { + bug!("AVR CPU not explicitly specified") + } } Architecture::Csky => { let e_flags = match sess.target.options.abi.as_ref() { diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index 7e28961599f..ccf6d12977f 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -957,6 +957,7 @@ pub enum InvalidMonomorphization<'tcx> { }, #[diag(codegen_ssa_invalid_monomorphization_mask_type, code = E0511)] + #[note] MaskType { #[primary_span] span: Span, diff --git a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs index 0593fb420c3..96d1ab018f6 100644 --- a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs +++ b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs @@ -349,7 +349,7 @@ fn wasm_type<'tcx>( PassMode::Direct(_) => { let direct_type = match arg_abi.layout.backend_repr { BackendRepr::Scalar(scalar) => wasm_primitive(scalar.primitive(), ptr_type), - BackendRepr::Vector { .. } => "v128", + BackendRepr::SimdVector { .. } => "v128", BackendRepr::Memory { .. } => { // FIXME: remove this branch once the wasm32-unknown-unknown ABI is fixed let _ = WasmCAbi::Legacy; diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 461cf1b8acd..4b33bdeadba 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -359,7 +359,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let offset = self.layout.fields.offset(i); if !bx.is_backend_ref(self.layout) && bx.is_backend_ref(field) { - if let BackendRepr::Vector { count, .. } = self.layout.backend_repr + if let BackendRepr::SimdVector { count, .. } = self.layout.backend_repr && let BackendRepr::Memory { sized: true } = field.backend_repr && count.is_power_of_two() { @@ -404,7 +404,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } }; OperandValue::Immediate(match field.backend_repr { - BackendRepr::Vector { .. } => imm, + BackendRepr::SimdVector { .. } => imm, BackendRepr::Scalar(out_scalar) => { let Some(in_scalar) = in_scalar else { span_bug!( @@ -666,7 +666,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // However, some SIMD types do not actually use the vector ABI // (in particular, packed SIMD types do not). Ensure we exclude those. let layout = bx.layout_of(constant_ty); - if let BackendRepr::Vector { .. } = layout.backend_repr { + if let BackendRepr::SimdVector { .. } = layout.backend_repr { let (llval, ty) = self.immediate_const_vector(bx, constant); return OperandRef { val: OperandValue::Immediate(llval), diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index d24e48b37a4..67555d40332 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -1190,7 +1190,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { assert!(!self.cx.is_backend_scalar_pair(layout)); OperandValueKind::Immediate(match layout.backend_repr { abi::BackendRepr::Scalar(s) => s, - abi::BackendRepr::Vector { element, .. } => element, + abi::BackendRepr::SimdVector { element, .. } => element, x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"), }) } else if self.cx.is_backend_scalar_pair(layout) { diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 40dec0cb39e..eb3f552cd27 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -1296,7 +1296,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt, self.visit_scalar(b, b_layout)?; } } - BackendRepr::Vector { .. } => { + BackendRepr::SimdVector { .. } => { // No checks here, we assume layout computation gets this right. // (This is harder to check since Miri does not represent these as `Immediate`. We // also cannot use field projections since this might be a newtype around a vector.) diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs index 6426bca2332..d7e97f32bae 100644 --- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs +++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs @@ -117,7 +117,7 @@ fn check_validity_requirement_lax<'tcx>( BackendRepr::ScalarPair(s1, s2) => { scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2) } - BackendRepr::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s), + BackendRepr::SimdVector { element: s, count } => count == 0 || scalar_allows_raw_init(s), BackendRepr::Memory { .. } => true, // Fields are checked below. }; if !valid { diff --git a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs index 49549335363..2022127a15b 100644 --- a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs +++ b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs @@ -937,12 +937,6 @@ impl<'tcx> ItemCtxt<'tcx> { } } -/// Compute the conditions that need to hold for a conditionally-const item to be const. -/// That is, compute the set of `~const` where clauses for a given item. -/// -/// This query also computes the `~const` where clauses for associated types, which are -/// not "const", but which have item bounds which may be `~const`. These must hold for -/// the `~const` item bound to hold. pub(super) fn const_conditions<'tcx>( tcx: TyCtxt<'tcx>, def_id: LocalDefId, @@ -1060,7 +1054,9 @@ pub(super) fn explicit_implied_const_bounds<'tcx>( def_id: LocalDefId, ) -> ty::EarlyBinder<'tcx, &'tcx [(ty::PolyTraitRef<'tcx>, Span)]> { if !tcx.is_conditionally_const(def_id) { - bug!("const_conditions invoked for item that is not conditionally const: {def_id:?}"); + bug!( + "explicit_implied_const_bounds invoked for item that is not conditionally const: {def_id:?}" + ); } let bounds = match tcx.opt_rpitit_info(def_id.to_def_id()) { diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index 46a960d9945..1302027aabb 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -748,6 +748,15 @@ rustc_queries! { } } + /// Compute the conditions that need to hold for a conditionally-const item to be const. + /// That is, compute the set of `~const` where clauses for a given item. + /// + /// This can be thought of as the `~const` equivalent of `predicates_of`. These are the + /// predicates that need to be proven at usage sites, and can be assumed at definition. + /// + /// This query also computes the `~const` where clauses for associated types, which are + /// not "const", but which have item bounds which may be `~const`. These must hold for + /// the `~const` item bound to hold. query const_conditions( key: DefId ) -> ty::ConstConditions<'tcx> { @@ -757,6 +766,11 @@ rustc_queries! { separate_provide_extern } + /// Compute the const bounds that are implied for a conditionally-const item. + /// + /// This can be though of as the `~const` equivalent of `explicit_item_bounds`. These + /// are the predicates that need to proven at definition sites, and can be assumed at + /// usage sites. query explicit_implied_const_bounds( key: DefId ) -> ty::EarlyBinder<'tcx, &'tcx [(ty::PolyTraitRef<'tcx>, Span)]> { diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 29521d42720..981dedd5b5c 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -1567,7 +1567,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { BackendRepr::ScalarPair(a, b) => { !a.is_always_valid(&self.ecx) || !b.is_always_valid(&self.ecx) } - BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => false, + BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => false, } } diff --git a/compiler/rustc_monomorphize/src/mono_checks/abi_check.rs b/compiler/rustc_monomorphize/src/mono_checks/abi_check.rs index 4c8dd933317..9f26da7d5c6 100644 --- a/compiler/rustc_monomorphize/src/mono_checks/abi_check.rs +++ b/compiler/rustc_monomorphize/src/mono_checks/abi_check.rs @@ -18,7 +18,7 @@ fn uses_vector_registers(mode: &PassMode, repr: &BackendRepr) -> bool { cast.prefix.iter().any(|r| r.is_some_and(|x| x.kind == RegKind::Vector)) || cast.rest.unit.kind == RegKind::Vector } - PassMode::Direct(..) | PassMode::Pair(..) => matches!(repr, BackendRepr::Vector { .. }), + PassMode::Direct(..) | PassMode::Pair(..) => matches!(repr, BackendRepr::SimdVector { .. }), } } diff --git a/compiler/rustc_parse/src/errors.rs b/compiler/rustc_parse/src/errors.rs index fc7a3323fc1..8d2fd595942 100644 --- a/compiler/rustc_parse/src/errors.rs +++ b/compiler/rustc_parse/src/errors.rs @@ -2922,8 +2922,9 @@ pub(crate) struct AddBoxNew { #[diag(parse_bad_return_type_notation_output)] pub(crate) struct BadReturnTypeNotationOutput { #[primary_span] - #[suggestion(code = "", applicability = "maybe-incorrect", style = "verbose")] pub span: Span, + #[suggestion(code = "", applicability = "maybe-incorrect", style = "verbose")] + pub suggestion: Span, } #[derive(Diagnostic)] diff --git a/compiler/rustc_parse/src/parser/path.rs b/compiler/rustc_parse/src/parser/path.rs index 1280a1cd312..8091dfa2a4a 100644 --- a/compiler/rustc_parse/src/parser/path.rs +++ b/compiler/rustc_parse/src/parser/path.rs @@ -382,11 +382,14 @@ impl<'a> Parser<'a> { self.psess.gated_spans.gate(sym::return_type_notation, span); + let prev_lo = self.prev_token.span.shrink_to_hi(); if self.eat_noexpect(&token::RArrow) { let lo = self.prev_token.span; let ty = self.parse_ty()?; + let span = lo.to(ty.span); + let suggestion = prev_lo.to(ty.span); self.dcx() - .emit_err(errors::BadReturnTypeNotationOutput { span: lo.to(ty.span) }); + .emit_err(errors::BadReturnTypeNotationOutput { span, suggestion }); } P(ast::GenericArgs::ParenthesizedElided(span)) diff --git a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs index a627e0e69b6..62cbab9b723 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs @@ -206,7 +206,7 @@ impl<'tcx> Stable<'tcx> for rustc_abi::BackendRepr { rustc_abi::BackendRepr::ScalarPair(first, second) => { ValueAbi::ScalarPair(first.stable(tables), second.stable(tables)) } - rustc_abi::BackendRepr::Vector { element, count } => { + rustc_abi::BackendRepr::SimdVector { element, count } => { ValueAbi::Vector { element: element.stable(tables), count } } rustc_abi::BackendRepr::Memory { sized } => ValueAbi::Aggregate { sized }, diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs index 3fa67c624a7..209d7483e61 100644 --- a/compiler/rustc_target/src/callconv/loongarch.rs +++ b/compiler/rustc_target/src/callconv/loongarch.rs @@ -25,7 +25,7 @@ struct CannotUseFpConv; fn is_loongarch_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool { match arg.layout.backend_repr { - BackendRepr::Vector { .. } => true, + BackendRepr::SimdVector { .. } => true, _ => arg.layout.is_aggregate(), } } @@ -80,7 +80,7 @@ where } } }, - BackendRepr::Vector { .. } => return Err(CannotUseFpConv), + BackendRepr::SimdVector { .. } => return Err(CannotUseFpConv), BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields { FieldsShape::Primitive => { unreachable!("aggregates can't have `FieldsShape::Primitive`") diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs index 1c044fe98b3..198d08864be 100644 --- a/compiler/rustc_target/src/callconv/mod.rs +++ b/compiler/rustc_target/src/callconv/mod.rs @@ -357,7 +357,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> { scalar_attrs(&layout, a, Size::ZERO), scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)), ), - BackendRepr::Vector { .. } => PassMode::Direct(ArgAttributes::new()), + BackendRepr::SimdVector { .. } => PassMode::Direct(ArgAttributes::new()), BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout), }; ArgAbi { layout, mode } @@ -759,7 +759,7 @@ impl<'a, Ty> FnAbi<'a, Ty> { if arg_idx.is_none() && arg.layout.size > Primitive::Pointer(AddressSpace::DATA).size(cx) * 2 - && !matches!(arg.layout.backend_repr, BackendRepr::Vector { .. }) + && !matches!(arg.layout.backend_repr, BackendRepr::SimdVector { .. }) { // Return values larger than 2 registers using a return area // pointer. LLVM and Cranelift disagree about how to return @@ -826,7 +826,7 @@ impl<'a, Ty> FnAbi<'a, Ty> { } } - BackendRepr::Vector { .. } => { + BackendRepr::SimdVector { .. } => { // This is a fun case! The gist of what this is doing is // that we want callers and callees to always agree on the // ABI of how they pass SIMD arguments. If we were to *not* diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs index 785175229b0..7368e225efa 100644 --- a/compiler/rustc_target/src/callconv/riscv.rs +++ b/compiler/rustc_target/src/callconv/riscv.rs @@ -31,7 +31,7 @@ struct CannotUseFpConv; fn is_riscv_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool { match arg.layout.backend_repr { - BackendRepr::Vector { .. } => true, + BackendRepr::SimdVector { .. } => true, _ => arg.layout.is_aggregate(), } } @@ -86,7 +86,7 @@ where } } }, - BackendRepr::Vector { .. } => return Err(CannotUseFpConv), + BackendRepr::SimdVector { .. } => return Err(CannotUseFpConv), BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields { FieldsShape::Primitive => { unreachable!("aggregates can't have `FieldsShape::Primitive`") diff --git a/compiler/rustc_target/src/callconv/s390x.rs b/compiler/rustc_target/src/callconv/s390x.rs index 4f69e32b2e3..1ba792c5acc 100644 --- a/compiler/rustc_target/src/callconv/s390x.rs +++ b/compiler/rustc_target/src/callconv/s390x.rs @@ -8,7 +8,7 @@ use crate::spec::HasTargetSpec; fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) { let size = ret.layout.size; - if size.bits() <= 128 && matches!(ret.layout.backend_repr, BackendRepr::Vector { .. }) { + if size.bits() <= 128 && matches!(ret.layout.backend_repr, BackendRepr::SimdVector { .. }) { return; } if !ret.layout.is_aggregate() && size.bits() <= 64 { @@ -40,7 +40,7 @@ where let size = arg.layout.size; if size.bits() <= 128 { - if let BackendRepr::Vector { .. } = arg.layout.backend_repr { + if let BackendRepr::SimdVector { .. } = arg.layout.backend_repr { // pass non-wrapped vector types using `PassMode::Direct` return; } diff --git a/compiler/rustc_target/src/callconv/x86.rs b/compiler/rustc_target/src/callconv/x86.rs index 7e5aab0201b..73aff85a0ad 100644 --- a/compiler/rustc_target/src/callconv/x86.rs +++ b/compiler/rustc_target/src/callconv/x86.rs @@ -109,7 +109,7 @@ where { match layout.backend_repr { BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) => false, - BackendRepr::Vector { .. } => true, + BackendRepr::SimdVector { .. } => true, BackendRepr::Memory { .. } => { for i in 0..layout.fields.count() { if contains_vector(cx, layout.field(cx, i)) { diff --git a/compiler/rustc_target/src/callconv/x86_64.rs b/compiler/rustc_target/src/callconv/x86_64.rs index ab306e20239..300b19f62e7 100644 --- a/compiler/rustc_target/src/callconv/x86_64.rs +++ b/compiler/rustc_target/src/callconv/x86_64.rs @@ -56,7 +56,7 @@ where Primitive::Float(_) => Class::Sse, }, - BackendRepr::Vector { .. } => Class::Sse, + BackendRepr::SimdVector { .. } => Class::Sse, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => { for i in 0..layout.fields.count() { diff --git a/compiler/rustc_target/src/callconv/x86_win64.rs b/compiler/rustc_target/src/callconv/x86_win64.rs index 4d99a9f9ba0..8f8597ea662 100644 --- a/compiler/rustc_target/src/callconv/x86_win64.rs +++ b/compiler/rustc_target/src/callconv/x86_win64.rs @@ -18,7 +18,7 @@ pub(crate) fn compute_abi_info<Ty>(cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<' _ => a.make_indirect(), } } - BackendRepr::Vector { .. } => { + BackendRepr::SimdVector { .. } => { // FIXME(eddyb) there should be a size cap here // (probably what clang calls "illegal vectors"). } diff --git a/compiler/rustc_target/src/callconv/xtensa.rs b/compiler/rustc_target/src/callconv/xtensa.rs index 6c030cb3bf7..b687f0e20c6 100644 --- a/compiler/rustc_target/src/callconv/xtensa.rs +++ b/compiler/rustc_target/src/callconv/xtensa.rs @@ -116,7 +116,7 @@ where fn is_xtensa_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool { match arg.layout.backend_repr { - BackendRepr::Vector { .. } => true, + BackendRepr::SimdVector { .. } => true, _ => arg.layout.is_aggregate(), } } diff --git a/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_gnuspe.rs b/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_gnuspe.rs index 03bae9b5977..3c1d18e0777 100644 --- a/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_gnuspe.rs +++ b/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_gnuspe.rs @@ -24,7 +24,7 @@ pub(crate) fn target() -> Target { options: TargetOptions { abi: "spe".into(), endian: Endian::Big, - features: "+secure-plt".into(), + features: "+secure-plt,+msync".into(), mcount: "_mcount".into(), ..base }, diff --git a/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_muslspe.rs b/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_muslspe.rs index df4fd75b0bd..30d0d9cb60a 100644 --- a/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_muslspe.rs +++ b/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_muslspe.rs @@ -26,6 +26,7 @@ pub(crate) fn target() -> Target { options: TargetOptions { abi: "spe".into(), endian: Endian::Big, + features: "+msync".into(), mcount: "_mcount".into(), ..base }, diff --git a/compiler/rustc_target/src/target_features.rs b/compiler/rustc_target/src/target_features.rs index 65a85151bef..d05466bb484 100644 --- a/compiler/rustc_target/src/target_features.rs +++ b/compiler/rustc_target/src/target_features.rs @@ -461,6 +461,7 @@ const HEXAGON_FEATURES: &[(&str, Stability, ImpliedFeatures)] = &[ static POWERPC_FEATURES: &[(&str, Stability, ImpliedFeatures)] = &[ // tidy-alphabetical-start ("altivec", Unstable(sym::powerpc_target_feature), &[]), + ("msync", Unstable(sym::powerpc_target_feature), &[]), ("partword-atomics", Unstable(sym::powerpc_target_feature), &[]), ("power10-vector", Unstable(sym::powerpc_target_feature), &["power9-vector"]), ("power8-altivec", Unstable(sym::powerpc_target_feature), &["altivec"]), diff --git a/compiler/rustc_trait_selection/src/traits/query/normalize.rs b/compiler/rustc_trait_selection/src/traits/query/normalize.rs index 2ef9d5421ba..6bf76eaee5c 100644 --- a/compiler/rustc_trait_selection/src/traits/query/normalize.rs +++ b/compiler/rustc_trait_selection/src/traits/query/normalize.rs @@ -31,20 +31,19 @@ impl<'a, 'tcx> At<'a, 'tcx> { /// normalized. If you don't care about regions, you should prefer /// `normalize_erasing_regions`, which is more efficient. /// - /// If the normalization succeeds and is unambiguous, returns back - /// the normalized value along with various outlives relations (in - /// the form of obligations that must be discharged). + /// If the normalization succeeds, returns back the normalized + /// value along with various outlives relations (in the form of + /// obligations that must be discharged). /// - /// N.B., this will *eventually* be the main means of - /// normalizing, but for now should be used only when we actually - /// know that normalization will succeed, since error reporting - /// and other details are still "under development". - /// - /// This normalization should *only* be used when the projection does not - /// have possible ambiguity or may not be well-formed. + /// This normalization should *only* be used when the projection is well-formed and + /// does not have possible ambiguity (contains inference variables). /// /// After codegen, when lifetimes do not matter, it is preferable to instead /// use [`TyCtxt::normalize_erasing_regions`], which wraps this procedure. + /// + /// N.B. Once the new solver is stabilized this method of normalization will + /// likely be removed as trait solver operations are already cached by the query + /// system making this redundant. fn query_normalize<T>(self, value: T) -> Result<Normalized<'tcx, T>, NoSolution> where T: TypeFoldable<TyCtxt<'tcx>>, @@ -210,8 +209,6 @@ impl<'a, 'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for QueryNormalizer<'a, 'tcx> { // See note in `rustc_trait_selection::traits::project` about why we // wait to fold the args. - - // Wrap this in a closure so we don't accidentally return from the outer function let res = match kind { ty::Opaque => { // Only normalize `impl Trait` outside of type inference, usually in codegen. diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs index 74ab65b4f58..e317768ff60 100644 --- a/compiler/rustc_ty_utils/src/abi.rs +++ b/compiler/rustc_ty_utils/src/abi.rs @@ -460,7 +460,7 @@ fn fn_abi_sanity_check<'tcx>( // `layout.backend_repr` and ignore everything else. We should just reject //`Aggregate` entirely here, but some targets need to be fixed first. match arg.layout.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => {} + BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => {} BackendRepr::ScalarPair(..) => { panic!("`PassMode::Direct` used for ScalarPair type {}", arg.layout.ty) } diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index 2ab6b8e17c4..1a82d171307 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -553,7 +553,7 @@ fn layout_of_uncached<'tcx>( ) } else { ( - BackendRepr::Vector { element: e_abi, count: e_len }, + BackendRepr::SimdVector { element: e_abi, count: e_len }, dl.llvmlike_vector_align(size), ) }; diff --git a/compiler/rustc_ty_utils/src/layout/invariant.rs b/compiler/rustc_ty_utils/src/layout/invariant.rs index c695e2887fd..7423a156a21 100644 --- a/compiler/rustc_ty_utils/src/layout/invariant.rs +++ b/compiler/rustc_ty_utils/src/layout/invariant.rs @@ -234,7 +234,7 @@ pub(super) fn layout_sanity_check<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayou "`ScalarPair` second field with bad ABI in {inner:#?}", ); } - BackendRepr::Vector { element, count } => { + BackendRepr::SimdVector { element, count } => { let align = layout.align.abi; let size = layout.size; let element_align = element.align(cx).abi; |
