diff options
Diffstat (limited to 'compiler')
153 files changed, 3007 insertions, 2350 deletions
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index a8a1a90572d..0706dc18f0e 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -157,8 +157,10 @@ pub trait LayoutCalculator { // for non-ZST uninhabited data (mostly partial initialization). let absent = |fields: &IndexSlice<FieldIdx, Layout<'_>>| { let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited()); - let is_zst = fields.iter().all(|f| f.0.is_zst()); - uninhabited && is_zst + // We cannot ignore alignment; that might lead us to entirely discard a variant and + // produce an enum that is less aligned than it should be! + let is_1zst = fields.iter().all(|f| f.0.is_1zst()); + uninhabited && is_1zst }; let (present_first, present_second) = { let mut present_variants = variants @@ -357,10 +359,8 @@ pub trait LayoutCalculator { // It'll fit, but we need to make some adjustments. match layout.fields { FieldsShape::Arbitrary { ref mut offsets, .. } => { - for (j, offset) in offsets.iter_enumerated_mut() { - if !variants[i][j].0.is_zst() { - *offset += this_offset; - } + for offset in offsets.iter_mut() { + *offset += this_offset; } } _ => { @@ -504,7 +504,7 @@ pub trait LayoutCalculator { // to make room for a larger discriminant. for field_idx in st.fields.index_by_increasing_offset() { let field = &field_layouts[FieldIdx::from_usize(field_idx)]; - if !field.0.is_zst() || field.align().abi.bytes() != 1 { + if !field.0.is_1zst() { start_align = start_align.min(field.align().abi); break; } @@ -603,12 +603,15 @@ pub trait LayoutCalculator { abi = Abi::Scalar(tag); } else { // Try to use a ScalarPair for all tagged enums. + // That's possible only if we can find a common primitive type for all variants. let mut common_prim = None; let mut common_prim_initialized_in_all_variants = true; for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) { let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else { panic!(); }; + // We skip *all* ZST here and later check if we are good in terms of alignment. + // This lets us handle some cases involving aligned ZST. let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.0.is_zst()); let (field, offset) = match (fields.next(), fields.next()) { (None, None) => { @@ -954,9 +957,6 @@ fn univariant( }; ( - // Place ZSTs first to avoid "interesting offsets", especially with only one - // or two non-ZST fields. This helps Scalar/ScalarPair layouts. - !f.0.is_zst(), // Then place largest alignments first. cmp::Reverse(alignment_group_key(f)), // Then prioritize niche placement within alignment group according to @@ -1073,9 +1073,10 @@ fn univariant( let size = min_size.align_to(align.abi); let mut layout_of_single_non_zst_field = None; let mut abi = Abi::Aggregate { sized }; - // Unpack newtype ABIs and find scalar pairs. + // Try to make this a Scalar/ScalarPair. if sized && size.bytes() > 0 { - // All other fields must be ZSTs. + // We skip *all* ZST here and later check if we are good in terms of alignment. + // This lets us handle some cases involving aligned ZST. let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst()); match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 7f2cab3eb11..571aaf631bd 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -1660,15 +1660,25 @@ pub struct PointeeInfo { impl LayoutS { /// Returns `true` if the layout corresponds to an unsized type. + #[inline] pub fn is_unsized(&self) -> bool { self.abi.is_unsized() } + #[inline] pub fn is_sized(&self) -> bool { self.abi.is_sized() } + /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1). + pub fn is_1zst(&self) -> bool { + self.is_sized() && self.size.bytes() == 0 && self.align.abi.bytes() == 1 + } + /// Returns `true` if the type is a ZST and not unsized. + /// + /// Note that this does *not* imply that the type is irrelevant for layout! It can still have + /// non-trivial alignment constraints. You probably want to use `is_1zst` instead. pub fn is_zst(&self) -> bool { match self.abi { Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false, diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs index 1827e42368f..a6d1ef33f40 100644 --- a/compiler/rustc_ast_lowering/src/lib.rs +++ b/compiler/rustc_ast_lowering/src/lib.rs @@ -153,6 +153,7 @@ trait ResolverAstLoweringExt { fn get_label_res(&self, id: NodeId) -> Option<NodeId>; fn get_lifetime_res(&self, id: NodeId) -> Option<LifetimeRes>; fn take_extra_lifetime_params(&mut self, id: NodeId) -> Vec<(Ident, NodeId, LifetimeRes)>; + fn remap_extra_lifetime_params(&mut self, from: NodeId, to: NodeId); fn decl_macro_kind(&self, def_id: LocalDefId) -> MacroKind; } @@ -213,6 +214,11 @@ impl ResolverAstLoweringExt for ResolverAstLowering { self.extra_lifetime_params_map.remove(&id).unwrap_or_default() } + fn remap_extra_lifetime_params(&mut self, from: NodeId, to: NodeId) { + let lifetimes = self.extra_lifetime_params_map.remove(&from).unwrap_or_default(); + self.extra_lifetime_params_map.insert(to, lifetimes); + } + fn decl_macro_kind(&self, def_id: LocalDefId) -> MacroKind { self.builtin_macro_kinds.get(&def_id).copied().unwrap_or(MacroKind::Bang) } @@ -236,7 +242,7 @@ enum ImplTraitContext { ReturnPositionOpaqueTy { /// Origin: Either OpaqueTyOrigin::FnReturn or OpaqueTyOrigin::AsyncFn, origin: hir::OpaqueTyOrigin, - in_trait: bool, + fn_kind: FnDeclKind, }, /// Impl trait in type aliases. TypeAliasesOpaqueTy { in_assoc_ty: bool }, @@ -312,7 +318,7 @@ impl std::fmt::Display for ImplTraitPosition { } } -#[derive(Debug, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] enum FnDeclKind { Fn, Inherent, @@ -1089,6 +1095,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { // constructing the HIR for `impl bounds...` and then lowering that. let impl_trait_node_id = self.next_node_id(); + // Shift `impl Trait` lifetime captures from the associated type bound's + // node id to the opaque node id, so that the opaque can actually use + // these lifetime bounds. + self.resolver + .remap_extra_lifetime_params(constraint.id, impl_trait_node_id); self.with_dyn_type_scope(false, |this| { let node_id = this.next_node_id(); @@ -1401,13 +1412,13 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { TyKind::ImplTrait(def_node_id, bounds) => { let span = t.span; match itctx { - ImplTraitContext::ReturnPositionOpaqueTy { origin, in_trait } => self + ImplTraitContext::ReturnPositionOpaqueTy { origin, fn_kind } => self .lower_opaque_impl_trait( span, *origin, *def_node_id, bounds, - *in_trait, + Some(*fn_kind), itctx, ), &ImplTraitContext::TypeAliasesOpaqueTy { in_assoc_ty } => self @@ -1416,7 +1427,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { hir::OpaqueTyOrigin::TyAlias { in_assoc_ty }, *def_node_id, bounds, - false, + None, itctx, ), ImplTraitContext::Universal => { @@ -1523,7 +1534,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { origin: hir::OpaqueTyOrigin, opaque_ty_node_id: NodeId, bounds: &GenericBounds, - in_trait: bool, + fn_kind: Option<FnDeclKind>, itctx: &ImplTraitContext, ) -> hir::TyKind<'hir> { // Make sure we know that some funky desugaring has been going on here. @@ -1540,10 +1551,22 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { Vec::new() } hir::OpaqueTyOrigin::FnReturn(..) => { - // in fn return position, like the `fn test<'a>() -> impl Debug + 'a` - // example, we only need to duplicate lifetimes that appear in the - // bounds, since those are the only ones that are captured by the opaque. - lifetime_collector::lifetimes_in_bounds(&self.resolver, bounds) + if let FnDeclKind::Impl | FnDeclKind::Trait = + fn_kind.expect("expected RPITs to be lowered with a FnKind") + { + // return-position impl trait in trait was decided to capture all + // in-scope lifetimes, which we collect for all opaques during resolution. + self.resolver + .take_extra_lifetime_params(opaque_ty_node_id) + .into_iter() + .map(|(ident, id, _)| Lifetime { id, ident }) + .collect() + } else { + // in fn return position, like the `fn test<'a>() -> impl Debug + 'a` + // example, we only need to duplicate lifetimes that appear in the + // bounds, since those are the only ones that are captured by the opaque. + lifetime_collector::lifetimes_in_bounds(&self.resolver, bounds) + } } hir::OpaqueTyOrigin::AsyncFn(..) => { unreachable!("should be using `lower_async_fn_ret_ty`") @@ -1554,7 +1577,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { self.lower_opaque_inner( opaque_ty_node_id, origin, - in_trait, + matches!(fn_kind, Some(FnDeclKind::Trait)), captured_lifetimes_to_duplicate, span, opaque_ty_span, @@ -1802,12 +1825,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { } let fn_def_id = self.local_def_id(fn_node_id); - self.lower_async_fn_ret_ty( - &decl.output, - fn_def_id, - ret_id, - matches!(kind, FnDeclKind::Trait), - ) + self.lower_async_fn_ret_ty(&decl.output, fn_def_id, ret_id, kind) } else { match &decl.output { FnRetTy::Ty(ty) => { @@ -1815,7 +1833,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { let fn_def_id = self.local_def_id(fn_node_id); ImplTraitContext::ReturnPositionOpaqueTy { origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id), - in_trait: matches!(kind, FnDeclKind::Trait), + fn_kind: kind, } } else { let position = match kind { @@ -1883,7 +1901,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { output: &FnRetTy, fn_def_id: LocalDefId, opaque_ty_node_id: NodeId, - in_trait: bool, + fn_kind: FnDeclKind, ) -> hir::FnRetTy<'hir> { let span = self.lower_span(output.span()); let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::Async, span, None); @@ -1898,7 +1916,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { let opaque_ty_ref = self.lower_opaque_inner( opaque_ty_node_id, hir::OpaqueTyOrigin::AsyncFn(fn_def_id), - in_trait, + matches!(fn_kind, FnDeclKind::Trait), captured_lifetimes, span, opaque_ty_span, @@ -1906,7 +1924,9 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { let future_bound = this.lower_async_fn_output_type_to_future_bound( output, span, - if in_trait && !this.tcx.features().return_position_impl_trait_in_trait { + if let FnDeclKind::Trait = fn_kind + && !this.tcx.features().return_position_impl_trait_in_trait + { ImplTraitContext::FeatureGated( ImplTraitPosition::TraitReturn, sym::return_position_impl_trait_in_trait, @@ -1914,7 +1934,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { } else { ImplTraitContext::ReturnPositionOpaqueTy { origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id), - in_trait, + fn_kind, } }, ); diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs index b8cd94e5422..1049e7a8bbe 100644 --- a/compiler/rustc_borrowck/src/region_infer/mod.rs +++ b/compiler/rustc_borrowck/src/region_infer/mod.rs @@ -2249,7 +2249,14 @@ impl<'tcx> RegionInferenceContext<'tcx> { } pub(crate) fn universe_info(&self, universe: ty::UniverseIndex) -> UniverseInfo<'tcx> { - self.universe_causes[&universe].clone() + // Query canonicalization can create local superuniverses (for example in + // `InferCtx::query_response_substitution_guess`), but they don't have an associated + // `UniverseInfo` explaining why they were created. + // This can cause ICEs if these causes are accessed in diagnostics, for example in issue + // #114907 where this happens via liveness and dropck outlives results. + // Therefore, we return a default value in case that happens, which should at worst emit a + // suboptimal error, instead of the ICE. + self.universe_causes.get(&universe).cloned().unwrap_or_else(|| UniverseInfo::other()) } /// Tries to find the terminator of the loop in which the region 'r' resides. diff --git a/compiler/rustc_borrowck/src/type_check/canonical.rs b/compiler/rustc_borrowck/src/type_check/canonical.rs index 16f5e68a06f..b7adc314f07 100644 --- a/compiler/rustc_borrowck/src/type_check/canonical.rs +++ b/compiler/rustc_borrowck/src/type_check/canonical.rs @@ -9,7 +9,7 @@ use rustc_span::Span; use rustc_trait_selection::traits::query::type_op::{self, TypeOpOutput}; use rustc_trait_selection::traits::ObligationCause; -use crate::diagnostics::{ToUniverseInfo, UniverseInfo}; +use crate::diagnostics::ToUniverseInfo; use super::{Locations, NormalizeLocation, TypeChecker}; @@ -46,13 +46,11 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { self.push_region_constraints(locations, category, data); } + // If the query has created new universes and errors are going to be emitted, register the + // cause of these new universes for improved diagnostics. let universe = self.infcx.universe(); - - if old_universe != universe { - let universe_info = match error_info { - Some(error_info) => error_info.to_universe_info(old_universe), - None => UniverseInfo::other(), - }; + if old_universe != universe && let Some(error_info) = error_info { + let universe_info = error_info.to_universe_info(old_universe); for u in (old_universe + 1)..=universe { self.borrowck_context.constraints.universe_causes.insert(u, universe_info.clone()); } @@ -69,15 +67,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { where T: TypeFoldable<TyCtxt<'tcx>>, { - let old_universe = self.infcx.universe(); - let (instantiated, _) = self.infcx.instantiate_canonical_with_fresh_inference_vars(span, canonical); - - for u in (old_universe + 1)..=self.infcx.universe() { - self.borrowck_context.constraints.universe_causes.insert(u, UniverseInfo::other()); - } - instantiated } diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs index 3004291c3e7..28286243e82 100644 --- a/compiler/rustc_borrowck/src/type_check/mod.rs +++ b/compiler/rustc_borrowck/src/type_check/mod.rs @@ -163,10 +163,6 @@ pub(crate) fn type_check<'mir, 'tcx>( debug!(?normalized_inputs_and_output); - for u in ty::UniverseIndex::ROOT..=infcx.universe() { - constraints.universe_causes.insert(u, UniverseInfo::other()); - } - let mut borrowck_context = BorrowCheckContext { universal_regions, location_table, diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs index b7f56a2986c..5d775b9b532 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs @@ -39,7 +39,7 @@ fn clif_sig_from_fn_abi<'tcx>( pub(crate) fn conv_to_call_conv(sess: &Session, c: Conv, default_call_conv: CallConv) -> CallConv { match c { Conv::Rust | Conv::C => default_call_conv, - Conv::RustCold => CallConv::Cold, + Conv::Cold | Conv::PreserveMost | Conv::PreserveAll => CallConv::Cold, Conv::X86_64SysV => CallConv::SystemV, Conv::X86_64Win64 => CallConv::WindowsFastcall, diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs index 3081dcfa2b7..ec2da39398b 100644 --- a/compiler/rustc_codegen_cranelift/src/common.rs +++ b/compiler/rustc_codegen_cranelift/src/common.rs @@ -480,7 +480,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for RevealAllLayoutCx<'tcx> { if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err { self.0.sess.span_fatal(span, err.to_string()) } else { - span_bug!(span, "failed to get layout for `{}`: {}", ty, err) + self.0.sess.span_fatal(span, format!("failed to get layout for `{}`: {}", ty, err)) } } } diff --git a/compiler/rustc_codegen_cranelift/src/unsize.rs b/compiler/rustc_codegen_cranelift/src/unsize.rs index 6aeba13f639..c6133f2b35c 100644 --- a/compiler/rustc_codegen_cranelift/src/unsize.rs +++ b/compiler/rustc_codegen_cranelift/src/unsize.rs @@ -88,7 +88,8 @@ fn unsize_ptr<'tcx>( let src_f = src_layout.field(fx, i); assert_eq!(src_layout.fields.offset(i).bytes(), 0); assert_eq!(dst_layout.fields.offset(i).bytes(), 0); - if src_f.is_zst() { + if src_f.is_1zst() { + // We are looking for the one non-1-ZST field; this is not it. continue; } assert_eq!(src_layout.size, src_f.size); @@ -151,6 +152,7 @@ pub(crate) fn coerce_unsized_into<'tcx>( let dst_f = dst.place_field(fx, FieldIdx::new(i)); if dst_f.layout().is_zst() { + // No data here, nothing to copy/coerce. continue; } diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs index b309695c190..7598c6eee03 100644 --- a/compiler/rustc_codegen_cranelift/src/vtable.rs +++ b/compiler/rustc_codegen_cranelift/src/vtable.rs @@ -51,8 +51,8 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>( 'descend_newtypes: while !arg.layout().ty.is_unsafe_ptr() && !arg.layout().ty.is_ref() { for i in 0..arg.layout().fields.count() { let field = arg.value_field(fx, FieldIdx::new(i)); - if !field.layout().is_zst() { - // we found the one non-zero-sized field that is allowed + if !field.layout().is_1zst() { + // we found the one non-1-ZST field that is allowed // now find *its* non-zero-sized field, or stop if it's a // pointer arg = field; diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs index 6fb1cbfad8c..377dc753f68 100644 --- a/compiler/rustc_codegen_gcc/src/abi.rs +++ b/compiler/rustc_codegen_gcc/src/abi.rs @@ -125,8 +125,8 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Ignore => continue, PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx), PassMode::Pair(..) => { - argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0, true)); - argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1, true)); + argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0)); + argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1)); continue; } PassMode::Indirect { extra_attrs: Some(_), .. } => { diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 0b1f2fe6a87..308cb04cac3 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -821,7 +821,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let mut load = |i, scalar: &abi::Scalar, align| { let llptr = self.struct_gep(pair_type, place.llval, i as u64); - let llty = place.layout.scalar_pair_element_gcc_type(self, i, false); + let llty = place.layout.scalar_pair_element_gcc_type(self, i); let load = self.load(llty, llptr, align); scalar_load_metadata(self, load, scalar); if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load } diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs index 88dcafa7370..dcebd92a61c 100644 --- a/compiler/rustc_codegen_gcc/src/context.rs +++ b/compiler/rustc_codegen_gcc/src/context.rs @@ -7,6 +7,7 @@ use rustc_codegen_ssa::traits::{ BaseTypeMethods, MiscMethods, }; +use rustc_codegen_ssa::errors as ssa_errors; use rustc_data_structures::base_n; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_middle::span_bug; @@ -479,7 +480,7 @@ impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err { self.sess().emit_fatal(respan(span, err.into_diagnostic())) } else { - span_bug!(span, "failed to get layout for `{}`: {}", ty, err) + self.tcx.sess.emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err }) } } } diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index 84d57838512..cc467801beb 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -4,7 +4,7 @@ use gccjit::{Struct, Type}; use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods}; use rustc_middle::bug; use rustc_middle::ty::{self, Ty, TypeVisitableExt}; -use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; +use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::print::with_no_trimmed_paths; use rustc_target::abi::{self, Abi, Align, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants}; use rustc_target::abi::call::{CastTarget, FnAbi, Reg}; @@ -74,8 +74,8 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout Abi::ScalarPair(..) => { return cx.type_struct( &[ - layout.scalar_pair_element_gcc_type(cx, 0, false), - layout.scalar_pair_element_gcc_type(cx, 1, false), + layout.scalar_pair_element_gcc_type(cx, 0), + layout.scalar_pair_element_gcc_type(cx, 1), ], false, ); @@ -150,7 +150,7 @@ pub trait LayoutGccExt<'tcx> { fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>; - fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>; + fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc>; fn gcc_field_index(&self, index: usize) -> u64; fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>; } @@ -182,23 +182,16 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { + // This must produce the same result for `repr(transparent)` wrappers as for the inner type! + // In other words, this should generally not look at the type at all, but only at the + // layout. if let Abi::Scalar(ref scalar) = self.abi { // Use a different cache for scalars because pointers to DSTs // can be either fat or thin (data pointers of fat pointers). if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) { return ty; } - let ty = - match *self.ty.kind() { - ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx)) - } - ty::Adt(def, _) if def.is_box() => { - cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx)) - } - ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())), - _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO), - }; + let ty = self.scalar_gcc_type_at(cx, scalar, Size::ZERO); cx.scalar_types.borrow_mut().insert(self.ty, ty); return ty; } @@ -272,23 +265,10 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { } } - fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> { - // TODO(antoyo): remove llvm hack: - // HACK(eddyb) special-case fat pointers until LLVM removes - // pointee types, to avoid bitcasting every `OperandRef::deref`. - match self.ty.kind() { - ty::Ref(..) | ty::RawPtr(_) => { - return self.field(cx, index).gcc_type(cx); - } - // only wide pointer boxes are handled as pointers - // thin pointer boxes with scalar allocators are handled by the general logic below - ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => { - let ptr_ty = Ty::new_mut_ptr(cx.tcx,self.ty.boxed_ty()); - return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate); - } - _ => {} - } - + fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc> { + // This must produce the same result for `repr(transparent)` wrappers as for the inner type! + // In other words, this should generally not look at the type at all, but only at the + // layout. let (a, b) = match self.abi { Abi::ScalarPair(ref a, ref b) => (a, b), _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self), @@ -367,8 +347,8 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { layout.gcc_field_index(index) } - fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, immediate: bool) -> Type<'gcc> { - layout.scalar_pair_element_gcc_type(self, index, immediate) + fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, _immediate: bool) -> Type<'gcc> { + layout.scalar_pair_element_gcc_type(self, index) } fn cast_backend_type(&self, ty: &CastTarget) -> Type<'gcc> { diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index c6a7dc95d77..863cb7068f8 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -571,7 +571,9 @@ impl From<Conv> for llvm::CallConv { Conv::C | Conv::Rust | Conv::CCmseNonSecureCall | Conv::RiscvInterrupt { .. } => { llvm::CCallConv } - Conv::RustCold => llvm::ColdCallConv, + Conv::Cold => llvm::ColdCallConv, + Conv::PreserveMost => llvm::PreserveMost, + Conv::PreserveAll => llvm::PreserveAll, Conv::AmdGpuKernel => llvm::AmdGpuKernel, Conv::AvrInterrupt => llvm::AvrInterrupt, Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt, diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs index a82d2c5771a..f33075a8879 100644 --- a/compiler/rustc_codegen_llvm/src/back/archive.rs +++ b/compiler/rustc_codegen_llvm/src/back/archive.rs @@ -367,7 +367,7 @@ impl<'a> LlvmArchiveBuilder<'a> { match addition { Addition::File { path, name_in_archive } => { let path = CString::new(path.to_str().unwrap())?; - let name = CString::new(name_in_archive.clone())?; + let name = CString::new(name_in_archive.as_bytes())?; members.push(llvm::LLVMRustArchiveMemberNew( path.as_ptr(), name.as_ptr(), diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index b2d28cef899..c1d3392386c 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -441,7 +441,7 @@ fn thin_lto( for (i, (name, buffer)) in modules.into_iter().enumerate() { info!("local module: {} - {}", i, name); - let cname = CString::new(name.clone()).unwrap(); + let cname = CString::new(name.as_bytes()).unwrap(); thin_modules.push(llvm::ThinLTOModule { identifier: cname.as_ptr(), data: buffer.data().as_ptr(), diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 24fd5bbf8c5..8e8290279ab 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -10,6 +10,7 @@ use crate::value::Value; use cstr::cstr; use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh}; +use rustc_codegen_ssa::errors as ssa_errors; use rustc_codegen_ssa::traits::*; use rustc_data_structures::base_n; use rustc_data_structures::fx::FxHashMap; @@ -1000,7 +1001,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> { if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err { self.sess().emit_fatal(Spanned { span, node: err.into_diagnostic() }) } else { - span_bug!(span, "failed to get layout for `{ty}`: {err:?}") + self.tcx.sess.emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err }) } } } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index 621fd36b2a3..c70cb670e96 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -16,7 +16,7 @@ use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_llvm::RustString; use rustc_middle::bug; -use rustc_middle::mir::coverage::{CodeRegion, CounterId, CoverageKind, ExpressionId, Op, Operand}; +use rustc_middle::mir::coverage::{CounterId, CoverageKind}; use rustc_middle::mir::Coverage; use rustc_middle::ty; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; @@ -104,144 +104,67 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) { let bx = self; + let Some(coverage_context) = bx.coverage_context() else { return }; + let mut coverage_map = coverage_context.function_coverage_map.borrow_mut(); + let func_coverage = coverage_map + .entry(instance) + .or_insert_with(|| FunctionCoverage::new(bx.tcx(), instance)); + let Coverage { kind, code_region } = coverage.clone(); match kind { CoverageKind::Counter { function_source_hash, id } => { - if bx.set_function_source_hash(instance, function_source_hash) { - // If `set_function_source_hash()` returned true, the coverage map is enabled, - // so continue adding the counter. - if let Some(code_region) = code_region { - // Note: Some counters do not have code regions, but may still be referenced - // from expressions. In that case, don't add the counter to the coverage map, - // but do inject the counter intrinsic. - bx.add_coverage_counter(instance, id, code_region); - } - - let coverageinfo = bx.tcx().coverageinfo(instance.def); - - let fn_name = bx.get_pgo_func_name_var(instance); - let hash = bx.const_u64(function_source_hash); - let num_counters = bx.const_u32(coverageinfo.num_counters); - let index = bx.const_u32(id.as_u32()); + debug!( + "ensuring function source hash is set for instance={:?}; function_source_hash={}", + instance, function_source_hash, + ); + func_coverage.set_function_source_hash(function_source_hash); + + if let Some(code_region) = code_region { + // Note: Some counters do not have code regions, but may still be referenced + // from expressions. In that case, don't add the counter to the coverage map, + // but do inject the counter intrinsic. debug!( - "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})", - fn_name, hash, num_counters, index, + "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}", + instance, id, code_region, ); - bx.instrprof_increment(fn_name, hash, num_counters, index); + func_coverage.add_counter(id, code_region); } + // We need to explicitly drop the `RefMut` before calling into `instrprof_increment`, + // as that needs an exclusive borrow. + drop(coverage_map); + + let coverageinfo = bx.tcx().coverageinfo(instance.def); + + let fn_name = bx.get_pgo_func_name_var(instance); + let hash = bx.const_u64(function_source_hash); + let num_counters = bx.const_u32(coverageinfo.num_counters); + let index = bx.const_u32(id.as_u32()); + debug!( + "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})", + fn_name, hash, num_counters, index, + ); + bx.instrprof_increment(fn_name, hash, num_counters, index); } CoverageKind::Expression { id, lhs, op, rhs } => { - bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region); + debug!( + "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; region: {:?}", + instance, id, lhs, op, rhs, code_region, + ); + func_coverage.add_counter_expression(id, lhs, op, rhs, code_region); } CoverageKind::Unreachable => { - bx.add_coverage_unreachable( - instance, - code_region.expect("unreachable regions always have code regions"), + let code_region = + code_region.expect("unreachable regions always have code regions"); + debug!( + "adding unreachable code to coverage_map: instance={:?}, at {:?}", + instance, code_region, ); + func_coverage.add_unreachable_region(code_region); } } } } -// These methods used to be part of trait `CoverageInfoBuilderMethods`, but -// after moving most coverage code out of SSA they are now just ordinary methods. -impl<'tcx> Builder<'_, '_, 'tcx> { - /// Returns true if the function source hash was added to the coverage map (even if it had - /// already been added, for this instance). Returns false *only* if `-C instrument-coverage` is - /// not enabled (a coverage map is not being generated). - fn set_function_source_hash( - &mut self, - instance: Instance<'tcx>, - function_source_hash: u64, - ) -> bool { - if let Some(coverage_context) = self.coverage_context() { - debug!( - "ensuring function source hash is set for instance={:?}; function_source_hash={}", - instance, function_source_hash, - ); - let mut coverage_map = coverage_context.function_coverage_map.borrow_mut(); - coverage_map - .entry(instance) - .or_insert_with(|| FunctionCoverage::new(self.tcx, instance)) - .set_function_source_hash(function_source_hash); - true - } else { - false - } - } - - /// Returns true if the counter was added to the coverage map; false if `-C instrument-coverage` - /// is not enabled (a coverage map is not being generated). - fn add_coverage_counter( - &mut self, - instance: Instance<'tcx>, - id: CounterId, - region: CodeRegion, - ) -> bool { - if let Some(coverage_context) = self.coverage_context() { - debug!( - "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}", - instance, id, region, - ); - let mut coverage_map = coverage_context.function_coverage_map.borrow_mut(); - coverage_map - .entry(instance) - .or_insert_with(|| FunctionCoverage::new(self.tcx, instance)) - .add_counter(id, region); - true - } else { - false - } - } - - /// Returns true if the expression was added to the coverage map; false if - /// `-C instrument-coverage` is not enabled (a coverage map is not being generated). - fn add_coverage_counter_expression( - &mut self, - instance: Instance<'tcx>, - id: ExpressionId, - lhs: Operand, - op: Op, - rhs: Operand, - region: Option<CodeRegion>, - ) -> bool { - if let Some(coverage_context) = self.coverage_context() { - debug!( - "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; \ - region: {:?}", - instance, id, lhs, op, rhs, region, - ); - let mut coverage_map = coverage_context.function_coverage_map.borrow_mut(); - coverage_map - .entry(instance) - .or_insert_with(|| FunctionCoverage::new(self.tcx, instance)) - .add_counter_expression(id, lhs, op, rhs, region); - true - } else { - false - } - } - - /// Returns true if the region was added to the coverage map; false if `-C instrument-coverage` - /// is not enabled (a coverage map is not being generated). - fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool { - if let Some(coverage_context) = self.coverage_context() { - debug!( - "adding unreachable code to coverage_map: instance={:?}, at {:?}", - instance, region, - ); - let mut coverage_map = coverage_context.function_coverage_map.borrow_mut(); - coverage_map - .entry(instance) - .or_insert_with(|| FunctionCoverage::new(self.tcx, instance)) - .add_unreachable_region(region); - true - } else { - false - } - } -} - fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<'tcx> { let tcx = cx.tcx; diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index f8cbcbd5ec8..ed938761694 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -445,9 +445,9 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => { build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id) } - // Box<T, A> may have a non-ZST allocator A. In that case, we + // Box<T, A> may have a non-1-ZST allocator A. In that case, we // cannot treat Box<T, A> as just an owned alias of `*mut T`. - ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => { + ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_1zst() => { build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id) } ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id), diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 84157d1e25c..01cbf7d3b11 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -83,12 +83,17 @@ pub enum LLVMModFlagBehavior { // Consts for the LLVM CallConv type, pre-cast to usize. /// LLVM CallingConv::ID. Should we wrap this? +/// +/// See <https://github.com/llvm/llvm-project/blob/main/llvm/include/llvm/IR/CallingConv.h> #[derive(Copy, Clone, PartialEq, Debug)] #[repr(C)] pub enum CallConv { CCallConv = 0, FastCallConv = 8, ColdCallConv = 9, + PreserveMost = 14, + PreserveAll = 15, + Tail = 18, X86StdcallCallConv = 64, X86FastcallCallConv = 65, ArmAapcsCallConv = 67, diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 831645579b9..dcc62d314ff 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -3,7 +3,7 @@ use crate::context::TypeLowering; use crate::type_::Type; use rustc_codegen_ssa::traits::*; use rustc_middle::bug; -use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; +use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; use rustc_middle::ty::{self, Ty, TypeVisitableExt}; use rustc_target::abi::HasDataLayout; @@ -215,20 +215,16 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { + // This must produce the same result for `repr(transparent)` wrappers as for the inner type! + // In other words, this should generally not look at the type at all, but only at the + // layout. if let Abi::Scalar(scalar) = self.abi { // Use a different cache for scalars because pointers to DSTs // can be either fat or thin (data pointers of fat pointers). if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) { return llty; } - let llty = match *self.ty.kind() { - ty::Ref(..) | ty::RawPtr(_) => cx.type_ptr(), - ty::Adt(def, _) if def.is_box() => cx.type_ptr(), - ty::FnPtr(sig) => { - cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty())) - } - _ => self.scalar_llvm_type_at(cx, scalar), - }; + let llty = self.scalar_llvm_type_at(cx, scalar); cx.scalar_lltypes.borrow_mut().insert(self.ty, llty); return llty; } @@ -303,27 +299,9 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { index: usize, immediate: bool, ) -> &'a Type { - // HACK(eddyb) special-case fat pointers until LLVM removes - // pointee types, to avoid bitcasting every `OperandRef::deref`. - match *self.ty.kind() { - ty::Ref(..) | ty::RawPtr(_) => { - return self.field(cx, index).llvm_type(cx); - } - // only wide pointer boxes are handled as pointers - // thin pointer boxes with scalar allocators are handled by the general logic below - ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => { - let ptr_ty = Ty::new_mut_ptr(cx.tcx, self.ty.boxed_ty()); - return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate); - } - // `dyn* Trait` has the same ABI as `*mut dyn Trait` - ty::Dynamic(bounds, region, ty::DynStar) => { - let ptr_ty = - Ty::new_mut_ptr(cx.tcx, Ty::new_dynamic(cx.tcx, bounds, region, ty::Dyn)); - return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate); - } - _ => {} - } - + // This must produce the same result for `repr(transparent)` wrappers as for the inner type! + // In other words, this should generally not look at the type at all, but only at the + // layout. let Abi::ScalarPair(a, b) = self.abi else { bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self); }; diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl index b6c70c62249..3dc83f50b21 100644 --- a/compiler/rustc_codegen_ssa/messages.ftl +++ b/compiler/rustc_codegen_ssa/messages.ftl @@ -35,6 +35,8 @@ codegen_ssa_extract_bundled_libs_parse_archive = failed to parse archive '{$rlib codegen_ssa_extract_bundled_libs_read_entry = failed to read entry '{$rlib}': {$error} codegen_ssa_extract_bundled_libs_write_file = failed to write file '{$rlib}': {$error} +codegen_ssa_failed_to_get_layout = failed to get layout for {$ty}: {$err} + codegen_ssa_failed_to_write = failed to write {$path}: {$error} codegen_ssa_ignoring_emit_path = ignoring emit path because multiple .{$extension} files were produced diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs index 4c854740753..c6f4bd35e29 100644 --- a/compiler/rustc_codegen_ssa/src/back/metadata.rs +++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs @@ -226,9 +226,7 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static let mut file = write::Object::new(binary_format, architecture, endianness); if sess.target.is_like_osx { - if let Some(build_version) = macho_object_build_version_for_target(&sess.target) { - file.set_macho_build_version(build_version) - } + file.set_macho_build_version(macho_object_build_version_for_target(&sess.target)) } let e_flags = match architecture { Architecture::Mips => { @@ -334,31 +332,28 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static Some(file) } -/// Apple's LD, when linking for Mac Catalyst, requires object files to -/// contain information about what they were built for (LC_BUILD_VERSION): -/// the platform (macOS/watchOS etc), minimum OS version, and SDK version. -/// This returns a `MachOBuildVersion` if necessary for the target. -fn macho_object_build_version_for_target( - target: &Target, -) -> Option<object::write::MachOBuildVersion> { - if !target.llvm_target.ends_with("-macabi") { - return None; - } +/// Since Xcode 15 Apple's LD requires object files to contain information about what they were +/// built for (LC_BUILD_VERSION): the platform (macOS/watchOS etc), minimum OS version, and SDK +/// version. This returns a `MachOBuildVersion` for the target. +fn macho_object_build_version_for_target(target: &Target) -> object::write::MachOBuildVersion { /// The `object` crate demands "X.Y.Z encoded in nibbles as xxxx.yy.zz" /// e.g. minOS 14.0 = 0x000E0000, or SDK 16.2 = 0x00100200 fn pack_version((major, minor): (u32, u32)) -> u32 { (major << 16) | (minor << 8) } - let platform = object::macho::PLATFORM_MACCATALYST; - let min_os = (14, 0); - let sdk = (16, 2); + let platform = + rustc_target::spec::current_apple_platform(target).expect("unknown Apple target OS"); + let min_os = rustc_target::spec::current_apple_deployment_target(target) + .expect("unknown Apple target OS"); + let sdk = + rustc_target::spec::current_apple_sdk_version(platform).expect("unknown Apple target OS"); let mut build_version = object::write::MachOBuildVersion::default(); build_version.platform = platform; build_version.minos = pack_version(min_os); build_version.sdk = pack_version(sdk); - Some(build_version) + build_version } pub enum MetadataPosition { diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index aa003e4e898..cd19885faa0 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -202,7 +202,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( (src, unsized_info(bx, a, b, old_info)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { - assert_eq!(def_a, def_b); + assert_eq!(def_a, def_b); // implies same number of fields let src_layout = bx.cx().layout_of(src_ty); let dst_layout = bx.cx().layout_of(dst_ty); if src_ty == dst_ty { @@ -211,7 +211,8 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let mut result = None; for i in 0..src_layout.fields.count() { let src_f = src_layout.field(bx.cx(), i); - if src_f.is_zst() { + if src_f.is_1zst() { + // We are looking for the one non-1-ZST field; this is not it. continue; } @@ -272,13 +273,14 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { - assert_eq!(def_a, def_b); + assert_eq!(def_a, def_b); // implies same number of fields for i in def_a.variant(FIRST_VARIANT).fields.indices() { let src_f = src.project_field(bx, i.as_usize()); let dst_f = dst.project_field(bx, i.as_usize()); if dst_f.layout.is_zst() { + // No data here, nothing to copy/coerce. continue; } diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index b7d8b9b45bf..41602321ded 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -7,6 +7,7 @@ use rustc_errors::{ IntoDiagnosticArg, }; use rustc_macros::Diagnostic; +use rustc_middle::ty::layout::LayoutError; use rustc_middle::ty::Ty; use rustc_span::{Span, Symbol}; use rustc_type_ir::FloatTy; @@ -1031,6 +1032,15 @@ pub struct TargetFeatureSafeTrait { } #[derive(Diagnostic)] +#[diag(codegen_ssa_failed_to_get_layout)] +pub struct FailedToGetLayout<'tcx> { + #[primary_span] + pub span: Span, + pub ty: Ty<'tcx>, + pub err: LayoutError<'tcx>, +} + +#[derive(Diagnostic)] #[diag(codegen_ssa_error_creating_remark_dir)] pub struct ErrorCreatingRemarkDir { pub error: std::io::Error, diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 6aef1664394..d78a0c49107 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -933,8 +933,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { { for i in 0..op.layout.fields.count() { let field = op.extract_field(bx, i); - if !field.layout.is_zst() { - // we found the one non-zero-sized field that is allowed + if !field.layout.is_1zst() { + // we found the one non-1-ZST field that is allowed // now find *its* non-zero-sized field, or stop if it's a // pointer op = field; @@ -975,10 +975,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { { for i in 0..op.layout.fields.count() { let field = op.extract_field(bx, i); - if !field.layout.is_zst() { - // we found the one non-zero-sized field that is allowed - // now find *its* non-zero-sized field, or stop if it's a - // pointer + if !field.layout.is_1zst() { + // We found the one non-1-ZST field that is allowed. (The rules + // for `DispatchFromDyn` ensure there's exactly one such field.) + // Now find *its* non-zero-sized field, or stop if it's a + // pointer. op = field; continue 'descend_newtypes; } diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 3d0c17e9cfb..bf937c2fc7c 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -145,7 +145,7 @@ impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but - // we need something in the operand. + // we need something sufficiently aligned in the operand. LocalRef::Operand(OperandRef::zero_sized(layout)) } else { LocalRef::PendingOperand diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index f90d1a0fc9c..ef66aa6ce1c 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -50,7 +50,8 @@ pub enum OperandValue<V> { /// from [`ConstMethods::const_poison`]. /// /// An `OperandValue` *must* be this variant for any type for which - /// `is_zst` on its `Layout` returns `true`. + /// `is_zst` on its `Layout` returns `true`. Note however that + /// these values can still require alignment. ZeroSized, } diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index e7c3906d977..a9ecbdc5f35 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -114,7 +114,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { bx.struct_gep(ty, self.llval, 1) } Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { - // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer. + // ZST fields (even some that require alignment) are not included in Scalar, + // ScalarPair, and Vector layouts, so manually offset the pointer. bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())]) } Abi::Scalar(_) | Abi::ScalarPair(..) => { diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 07c61df2140..fc8d3389102 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -1004,6 +1004,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::Aggregate(..) => { let ty = rvalue.ty(self.mir, self.cx.tcx()); let ty = self.monomorphize(ty); + // For ZST this can be `OperandValueKind::ZeroSized`. self.cx.spanned_layout_of(ty, span).is_zst() } } diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl index e5dd5729d89..020402fe25e 100644 --- a/compiler/rustc_const_eval/messages.ftl +++ b/compiler/rustc_const_eval/messages.ftl @@ -384,7 +384,7 @@ const_eval_unreachable_unwind = const_eval_unsigned_offset_from_overflow = `ptr_offset_from_unsigned` called when first pointer has smaller offset than second: {$a_offset} < {$b_offset} - +const_eval_unsized_local = unsized locals are not supported const_eval_unstable_const_fn = `{$def_path}` is not yet stable as a const fn const_eval_unstable_in_stable = diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 4c7e9194401..fc0dba6b67b 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -61,6 +61,7 @@ fn eval_body_using_ecx<'mir, 'tcx>( &ret.clone().into(), StackPopCleanup::Root { cleanup: false }, )?; + ecx.storage_live_for_always_live_locals()?; // The main interpreter loop. while ecx.step()? {} diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs index 4362cae7ed7..c74fed0e47f 100644 --- a/compiler/rustc_const_eval/src/errors.rs +++ b/compiler/rustc_const_eval/src/errors.rs @@ -795,6 +795,7 @@ impl ReportErrorExt for UnsupportedOpInfo { use crate::fluent_generated::*; match self { UnsupportedOpInfo::Unsupported(s) => s.clone().into(), + UnsupportedOpInfo::UnsizedLocal => const_eval_unsized_local, UnsupportedOpInfo::OverwritePartialPointer(_) => const_eval_partial_pointer_overwrite, UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_copy, UnsupportedOpInfo::ReadPointerAsInt(_) => const_eval_read_pointer_as_int, @@ -814,7 +815,7 @@ impl ReportErrorExt for UnsupportedOpInfo { // `ReadPointerAsInt(Some(info))` is never printed anyway, it only serves as an error to // be further processed by validity checking which then turns it into something nice to // print. So it's not worth the effort of having diagnostics that can print the `info`. - Unsupported(_) | ReadPointerAsInt(_) => {} + UnsizedLocal | Unsupported(_) | ReadPointerAsInt(_) => {} OverwritePartialPointer(ptr) | ReadPartialPointer(ptr) => { builder.set_arg("ptr", ptr); } diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs index 98e853dc4d9..25c74b98611 100644 --- a/compiler/rustc_const_eval/src/interpret/cast.rs +++ b/compiler/rustc_const_eval/src/interpret/cast.rs @@ -410,21 +410,25 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.unsize_into_ptr(src, dest, *s, *c) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { - assert_eq!(def_a, def_b); + assert_eq!(def_a, def_b); // implies same number of fields - // unsizing of generic struct with pointer fields - // Example: `Arc<T>` -> `Arc<Trait>` - // here we need to increase the size of every &T thin ptr field to a fat ptr + // Unsizing of generic struct with pointer fields, like `Arc<T>` -> `Arc<Trait>`. + // There can be extra fields as long as they don't change their type or are 1-ZST. + // There might also be no field that actually needs unsizing. + let mut found_cast_field = false; for i in 0..src.layout.fields.count() { let cast_ty_field = cast_ty.field(self, i); - if cast_ty_field.is_zst() { - continue; - } let src_field = self.project_field(src, i)?; let dst_field = self.project_field(dest, i)?; - if src_field.layout.ty == cast_ty_field.ty { + if src_field.layout.is_1zst() && cast_ty_field.is_1zst() { + // Skip 1-ZST fields. + } else if src_field.layout.ty == cast_ty_field.ty { self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?; } else { + if found_cast_field { + span_bug!(self.cur_span(), "unsize_into: more than one field to cast"); + } + found_cast_field = true; self.unsize_into(&src_field, cast_ty_field, &dst_field)?; } } diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index 90848dbfbc7..1c90ce29b02 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -25,8 +25,8 @@ use super::{ Scalar, StackPopJump, }; use crate::errors::{self, ErroneousConstUsed}; -use crate::fluent_generated as fluent; use crate::util; +use crate::{fluent_generated as fluent, ReportErrorExt}; pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> { /// Stores the `Machine` instance. @@ -158,7 +158,8 @@ pub enum StackPopCleanup { #[derive(Clone, Debug)] pub struct LocalState<'tcx, Prov: Provenance = AllocId> { pub value: LocalValue<Prov>, - /// Don't modify if `Some`, this is only used to prevent computing the layout twice + /// Don't modify if `Some`, this is only used to prevent computing the layout twice. + /// Avoids computing the layout of locals that are never actually initialized. pub layout: Cell<Option<TyAndLayout<'tcx>>>, } @@ -177,7 +178,7 @@ pub enum LocalValue<Prov: Provenance = AllocId> { impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> { /// Read the local's value or error if the local is not yet live or not live anymore. - #[inline] + #[inline(always)] pub fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> { match &self.value { LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? @@ -190,7 +191,7 @@ impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> { /// /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from /// anywhere else. You may be invalidating machine invariants if you do! - #[inline] + #[inline(always)] pub fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> { match &mut self.value { LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? @@ -432,6 +433,27 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { .map_or(CRATE_HIR_ID, |def_id| self.tcx.hir().local_def_id_to_hir_id(def_id)) } + /// Turn the given error into a human-readable string. Expects the string to be printed, so if + /// `RUSTC_CTFE_BACKTRACE` is set this will show a backtrace of the rustc internals that + /// triggered the error. + /// + /// This is NOT the preferred way to render an error; use `report` from `const_eval` instead. + /// However, this is useful when error messages appear in ICEs. + pub fn format_error(&self, e: InterpErrorInfo<'tcx>) -> String { + let (e, backtrace) = e.into_parts(); + backtrace.print_backtrace(); + // FIXME(fee1-dead), HACK: we want to use the error as title therefore we can just extract the + // label and arguments from the InterpError. + let handler = &self.tcx.sess.parse_sess.span_diagnostic; + #[allow(rustc::untranslatable_diagnostic)] + let mut diag = self.tcx.sess.struct_allow(""); + let msg = e.diagnostic_message(); + e.add_args(handler, &mut diag); + let s = handler.eagerly_translate_to_string(msg, diag.args()); + diag.cancel(); + s + } + #[inline(always)] pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>] { M::stack(self) @@ -462,7 +484,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } #[inline(always)] - pub(super) fn body(&self) -> &'mir mir::Body<'tcx> { + pub fn body(&self) -> &'mir mir::Body<'tcx> { self.frame().body } @@ -684,15 +706,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { return_to_block: StackPopCleanup, ) -> InterpResult<'tcx> { trace!("body: {:#?}", body); + let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) }; + let locals = IndexVec::from_elem(dead_local, &body.local_decls); // First push a stack frame so we have access to the local args let pre_frame = Frame { body, loc: Right(body.span), // Span used for errors caused during preamble. return_to_block, return_place: return_place.clone(), - // empty local array, we fill it in below, after we are inside the stack frame and - // all methods actually know about the frame - locals: IndexVec::new(), + locals, instance, tracing_span: SpanGuard::new(), extra: (), @@ -707,19 +729,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.eval_mir_constant(&ct, Some(span), None)?; } - // Most locals are initially dead. - let dummy = LocalState { value: LocalValue::Dead, layout: Cell::new(None) }; - let mut locals = IndexVec::from_elem(dummy, &body.local_decls); - - // Now mark those locals as live that have no `Storage*` annotations. - let always_live = always_storage_live_locals(self.body()); - for local in locals.indices() { - if always_live.contains(local) { - locals[local].value = LocalValue::Live(Operand::Immediate(Immediate::Uninit)); - } - } // done - self.frame_mut().locals = locals; M::after_stack_push(self)?; self.frame_mut().loc = Left(mir::Location::START); @@ -886,12 +896,96 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } - /// Mark a storage as live, killing the previous content. - pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> { - assert!(local != mir::RETURN_PLACE, "Cannot make return place live"); + /// In the current stack frame, mark all locals as live that are not arguments and don't have + /// `Storage*` annotations (this includes the return place). + pub fn storage_live_for_always_live_locals(&mut self) -> InterpResult<'tcx> { + self.storage_live(mir::RETURN_PLACE)?; + + let body = self.body(); + let always_live = always_storage_live_locals(body); + for local in body.vars_and_temps_iter() { + if always_live.contains(local) { + self.storage_live(local)?; + } + } + Ok(()) + } + + pub fn storage_live_dyn( + &mut self, + local: mir::Local, + meta: MemPlaceMeta<M::Provenance>, + ) -> InterpResult<'tcx> { trace!("{:?} is now live", local); - let local_val = LocalValue::Live(Operand::Immediate(Immediate::Uninit)); + // We avoid `ty.is_trivially_sized` since that (a) cannot assume WF, so it recurses through + // all fields of a tuple, and (b) does something expensive for ADTs. + fn is_very_trivially_sized(ty: Ty<'_>) -> bool { + match ty.kind() { + ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) + | ty::Uint(_) + | ty::Int(_) + | ty::Bool + | ty::Float(_) + | ty::FnDef(..) + | ty::FnPtr(_) + | ty::RawPtr(..) + | ty::Char + | ty::Ref(..) + | ty::Generator(..) + | ty::GeneratorWitness(..) + | ty::GeneratorWitnessMIR(..) + | ty::Array(..) + | ty::Closure(..) + | ty::Never + | ty::Error(_) => true, + + ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => false, + + ty::Tuple(tys) => tys.last().iter().all(|ty| is_very_trivially_sized(**ty)), + + // We don't want to do any queries, so there is not much we can do with ADTs. + ty::Adt(..) => false, + + ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false, + + ty::Infer(ty::TyVar(_)) => false, + + ty::Bound(..) + | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => { + bug!("`is_very_trivially_sized` applied to unexpected type: {:?}", ty) + } + } + } + + // This is a hot function, we avoid computing the layout when possible. + // `unsized_` will be `None` for sized types and `Some(layout)` for unsized types. + let unsized_ = if is_very_trivially_sized(self.body().local_decls[local].ty) { + None + } else { + // We need the layout. + let layout = self.layout_of_local(self.frame(), local, None)?; + if layout.is_sized() { None } else { Some(layout) } + }; + + let local_val = LocalValue::Live(if let Some(layout) = unsized_ { + if !meta.has_meta() { + throw_unsup!(UnsizedLocal); + } + // Need to allocate some memory, since `Immediate::Uninit` cannot be unsized. + let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?; + Operand::Indirect(*dest_place) + } else { + assert!(!meta.has_meta()); // we're dropping the metadata + // Just make this an efficient immediate. + // Note that not calling `layout_of` here does have one real consequence: + // if the type is too big, we'll only notice this when the local is actually initialized, + // which is a bit too late -- we should ideally notice this alreayd here, when the memory + // is conceptually allocated. But given how rare that error is and that this is a hot function, + // we accept this downside for now. + Operand::Immediate(Immediate::Uninit) + }); + // StorageLive expects the local to be dead, and marks it live. let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val); if !matches!(old, LocalValue::Dead) { @@ -900,6 +994,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Ok(()) } + /// Mark a storage as live, killing the previous content. + #[inline(always)] + pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> { + self.storage_live_dyn(local, MemPlaceMeta::None) + } + pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> { assert!(local != mir::RETURN_PLACE, "Cannot make return place dead"); trace!("{:?} is now dead", local); diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs index 910c3ca5d0a..42950d1ffb0 100644 --- a/compiler/rustc_const_eval/src/interpret/intern.rs +++ b/compiler/rustc_const_eval/src/interpret/intern.rs @@ -378,7 +378,8 @@ pub fn intern_const_alloc_recursive< ecx.tcx.sess.delay_span_bug( ecx.tcx.span, format!( - "error during interning should later cause validation failure: {error:?}" + "error during interning should later cause validation failure: {}", + ecx.format_error(error), ), ); } diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index 6e57a56b445..8d948eb10a7 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -33,7 +33,7 @@ pub enum Immediate<Prov: Provenance = AllocId> { /// A pair of two scalar value (must have `ScalarPair` ABI where both fields are /// `Scalar::Initialized`). ScalarPair(Scalar<Prov>, Scalar<Prov>), - /// A value of fully uninitialized memory. Can have arbitrary size and layout. + /// A value of fully uninitialized memory. Can have arbitrary size and layout, but must be sized. Uninit, } @@ -190,16 +190,19 @@ impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> { impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { #[inline] pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self { + debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout"); ImmTy { imm: val.into(), layout } } - #[inline] + #[inline(always)] pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self { + debug_assert!(layout.is_sized(), "immediates must be sized"); ImmTy { imm, layout } } #[inline] pub fn uninit(layout: TyAndLayout<'tcx>) -> Self { + debug_assert!(layout.is_sized(), "immediates must be sized"); ImmTy { imm: Immediate::Uninit, layout } } @@ -239,6 +242,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { // if the entire value is uninit, then so is the field (can happen in ConstProp) (Immediate::Uninit, _) => Immediate::Uninit, // the field contains no information, can be left uninit + // (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST) _ if layout.is_zst() => Immediate::Uninit, // some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try // to detect those here and also give them no data @@ -290,23 +294,21 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> { self.layout } - fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( - &self, - _ecx: &InterpCx<'mir, 'tcx, M>, - ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> { - assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here - Ok(MemPlaceMeta::None) + #[inline(always)] + fn meta(&self) -> MemPlaceMeta<Prov> { + debug_assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here + MemPlaceMeta::None } - fn offset_with_meta( + fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( &self, offset: Size, meta: MemPlaceMeta<Prov>, layout: TyAndLayout<'tcx>, - cx: &impl HasDataLayout, + ecx: &InterpCx<'mir, 'tcx, M>, ) -> InterpResult<'tcx, Self> { assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway - Ok(self.offset_(offset, layout, cx)) + Ok(self.offset_(offset, layout, ecx)) } fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( @@ -317,49 +319,37 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> { } } -impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> { - // Provided as inherent method since it doesn't need the `ecx` of `Projectable::meta`. - pub fn meta(&self) -> InterpResult<'tcx, MemPlaceMeta<Prov>> { - Ok(if self.layout.is_unsized() { - if matches!(self.op, Operand::Immediate(_)) { - // Unsized immediate OpTy cannot occur. We create a MemPlace for all unsized locals during argument passing. - // However, ConstProp doesn't do that, so we can run into this nonsense situation. - throw_inval!(ConstPropNonsense); - } - // There are no unsized immediates. - self.assert_mem_place().meta - } else { - MemPlaceMeta::None - }) - } -} - impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> { #[inline(always)] fn layout(&self) -> TyAndLayout<'tcx> { self.layout } - fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( - &self, - _ecx: &InterpCx<'mir, 'tcx, M>, - ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> { - self.meta() + #[inline] + fn meta(&self) -> MemPlaceMeta<Prov> { + match self.as_mplace_or_imm() { + Left(mplace) => mplace.meta, + Right(_) => { + debug_assert!(self.layout.is_sized(), "unsized immediates are not a thing"); + MemPlaceMeta::None + } + } } - fn offset_with_meta( + fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( &self, offset: Size, meta: MemPlaceMeta<Prov>, layout: TyAndLayout<'tcx>, - cx: &impl HasDataLayout, + ecx: &InterpCx<'mir, 'tcx, M>, ) -> InterpResult<'tcx, Self> { match self.as_mplace_or_imm() { - Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()), + Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, ecx)?.into()), Right(imm) => { - assert!(!meta.has_meta()); // no place to store metadata here + debug_assert!(layout.is_sized(), "unsized immediates are not a thing"); + assert_matches!(meta, MemPlaceMeta::None); // no place to store metadata here // Every part of an uninit is uninit. - Ok(imm.offset(offset, layout, cx)?.into()) + Ok(imm.offset_(offset, layout, ecx).into()) } } } @@ -587,6 +577,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { let layout = self.layout_of_local(frame, local, layout)?; let op = *frame.locals[local].access()?; + if matches!(op, Operand::Immediate(_)) { + if layout.is_unsized() { + // ConstProp marks *all* locals as `Immediate::Uninit` since it cannot + // efficiently check whether they are sized. We have to catch that case here. + throw_inval!(ConstPropNonsense); + } + } Ok(OpTy { op, layout, align: Some(layout.align.abi) }) } @@ -600,16 +597,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { match place.as_mplace_or_local() { Left(mplace) => Ok(mplace.into()), Right((frame, local, offset)) => { + debug_assert!(place.layout.is_sized()); // only sized locals can ever be `Place::Local`. let base = self.local_to_op(&self.stack()[frame], local, None)?; - let mut field = if let Some(offset) = offset { - // This got offset. We can be sure that the field is sized. - base.offset(offset, place.layout, self)? - } else { - assert_eq!(place.layout, base.layout); - // Unsized cases are possible here since an unsized local will be a - // `Place::Local` until the first projection calls `place_to_op` to extract the - // underlying mplace. - base + let mut field = match offset { + Some(offset) => base.offset(offset, place.layout, self)?, + None => { + // In the common case this hasn't been projected. + debug_assert_eq!(place.layout, base.layout); + base + } }; field.align = Some(place.align); Ok(field) diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 40a7a0f4e56..65c5cd656cb 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -13,7 +13,7 @@ use rustc_middle::mir::interpret::PointerArithmetic; use rustc_middle::ty; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::Ty; -use rustc_target::abi::{self, Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT}; +use rustc_target::abi::{Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT}; use super::{ alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg, @@ -41,33 +41,13 @@ impl<Prov: Provenance> MemPlaceMeta<Prov> { } } + #[inline(always)] pub fn has_meta(self) -> bool { match self { Self::Meta(_) => true, Self::None => false, } } - - pub(crate) fn len<'tcx>( - &self, - layout: TyAndLayout<'tcx>, - cx: &impl HasDataLayout, - ) -> InterpResult<'tcx, u64> { - if layout.is_unsized() { - // We need to consult `meta` metadata - match layout.ty.kind() { - ty::Slice(..) | ty::Str => self.unwrap_meta().to_target_usize(cx), - _ => bug!("len not supported on unsized type {:?}", layout.ty), - } - } else { - // Go through the layout. There are lots of types that support a length, - // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!) - match layout.fields { - abi::FieldsShape::Array { count, .. } => Ok(count), - _ => bug!("len not supported on sized type {:?}", layout.ty), - } - } - } } #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] @@ -111,6 +91,8 @@ pub enum Place<Prov: Provenance = AllocId> { /// (Without that optimization, we'd just always be a `MemPlace`.) /// Note that this only stores the frame index, not the thread this frame belongs to -- that is /// implicit. This means a `Place` must never be moved across interpreter thread boundaries! + /// + /// This variant shall not be used for unsized types -- those must always live in memory. Local { frame: usize, local: mir::Local, offset: Option<Size> }, } @@ -157,7 +139,7 @@ impl<Prov: Provenance> MemPlace<Prov> { } /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space. - #[inline(always)] + #[inline] pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> { match self.meta { MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)), @@ -220,22 +202,20 @@ impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for MPlaceTy<'tcx self.layout } - fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( - &self, - _ecx: &InterpCx<'mir, 'tcx, M>, - ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> { - Ok(self.meta) + #[inline(always)] + fn meta(&self) -> MemPlaceMeta<Prov> { + self.meta } - fn offset_with_meta( + fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( &self, offset: Size, meta: MemPlaceMeta<Prov>, layout: TyAndLayout<'tcx>, - cx: &impl HasDataLayout, + ecx: &InterpCx<'mir, 'tcx, M>, ) -> InterpResult<'tcx, Self> { Ok(MPlaceTy { - mplace: self.mplace.offset_with_meta_(offset, meta, cx)?, + mplace: self.mplace.offset_with_meta_(offset, meta, ecx)?, align: self.align.restrict_for_offset(offset), layout, }) @@ -255,25 +235,30 @@ impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for PlaceTy<'tcx, self.layout } - fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( - &self, - ecx: &InterpCx<'mir, 'tcx, M>, - ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> { - ecx.place_meta(self) + #[inline] + fn meta(&self) -> MemPlaceMeta<Prov> { + match self.as_mplace_or_local() { + Left(mplace) => mplace.meta, + Right(_) => { + debug_assert!(self.layout.is_sized(), "unsized locals should live in memory"); + MemPlaceMeta::None + } + } } - fn offset_with_meta( + fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( &self, offset: Size, meta: MemPlaceMeta<Prov>, layout: TyAndLayout<'tcx>, - cx: &impl HasDataLayout, + ecx: &InterpCx<'mir, 'tcx, M>, ) -> InterpResult<'tcx, Self> { Ok(match self.as_mplace_or_local() { - Left(mplace) => mplace.offset_with_meta(offset, meta, layout, cx)?.into(), + Left(mplace) => mplace.offset_with_meta(offset, meta, layout, ecx)?.into(), Right((frame, local, old_offset)) => { + debug_assert!(layout.is_sized(), "unsized locals should live in memory"); assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway... - let new_offset = cx + let new_offset = ecx .data_layout() .offset(old_offset.unwrap_or(Size::ZERO).bytes(), offset.bytes())?; PlaceTy { @@ -323,7 +308,7 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> { impl<'tcx, Prov: Provenance + 'static> PlaceTy<'tcx, Prov> { /// A place is either an mplace or some local. - #[inline] + #[inline(always)] pub fn as_mplace_or_local( &self, ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>)> { @@ -399,20 +384,6 @@ where Prov: Provenance + 'static, M: Machine<'mir, 'tcx, Provenance = Prov>, { - /// Get the metadata of the given place. - pub(super) fn place_meta( - &self, - place: &PlaceTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> { - if place.layout.is_unsized() { - // For `Place::Local`, the metadata is stored with the local, not the place. So we have - // to look that up first. - self.place_to_op(place)?.meta() - } else { - Ok(MemPlaceMeta::None) - } - } - /// Take a value, which represents a (thin or wide) reference, and make it a place. /// Alignment is just based on the type. This is the inverse of `mplace_to_ref()`. /// @@ -537,8 +508,24 @@ where frame: usize, local: mir::Local, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> { - let layout = self.layout_of_local(&self.stack()[frame], local, None)?; - let place = Place::Local { frame, local, offset: None }; + // Other parts of the system rely on `Place::Local` never being unsized. + // So we eagerly check here if this local has an MPlace, and if yes we use it. + let frame_ref = &self.stack()[frame]; + let layout = self.layout_of_local(frame_ref, local, None)?; + let place = if layout.is_sized() { + // We can just always use the `Local` for sized values. + Place::Local { frame, local, offset: None } + } else { + // Unsized `Local` isn't okay (we cannot store the metadata). + match frame_ref.locals[local].access()? { + Operand::Immediate(_) => { + // ConstProp marks *all* locals as `Immediate::Uninit` since it cannot + // efficiently check whether they are sized. We have to catch that case here. + throw_inval!(ConstPropNonsense); + } + Operand::Indirect(mplace) => Place::Ptr(*mplace), + } + }; Ok(PlaceTy { place, layout, align: layout.align.abi }) } @@ -896,9 +883,7 @@ where // that has different alignment than the outer field. let local_layout = self.layout_of_local(&self.stack()[frame], local, None)?; - if local_layout.is_unsized() { - throw_unsup_format!("unsized locals are not supported"); - } + assert!(local_layout.is_sized(), "unsized locals cannot be immediate"); let mplace = self.allocate(local_layout, MemoryKind::Stack)?; // Preserve old value. (As an optimization, we can skip this if it was uninit.) if !matches!(local_val, Immediate::Uninit) { diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 882097ad2c3..c69321d109e 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -7,12 +7,13 @@ //! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually //! implement the logic on OpTy, and MPlaceTy calls that. +use std::marker::PhantomData; +use std::ops::Range; + use rustc_middle::mir; use rustc_middle::ty; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::Ty; -use rustc_middle::ty::TyCtxt; -use rustc_target::abi::HasDataLayout; use rustc_target::abi::Size; use rustc_target::abi::{self, VariantIdx}; @@ -24,44 +25,59 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug { fn layout(&self) -> TyAndLayout<'tcx>; /// Get the metadata of a wide value. - fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( - &self, - ecx: &InterpCx<'mir, 'tcx, M>, - ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>>; + fn meta(&self) -> MemPlaceMeta<Prov>; + /// Get the length of a slice/string/array stored here. fn len<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( &self, ecx: &InterpCx<'mir, 'tcx, M>, ) -> InterpResult<'tcx, u64> { - self.meta(ecx)?.len(self.layout(), ecx) + let layout = self.layout(); + if layout.is_unsized() { + // We need to consult `meta` metadata + match layout.ty.kind() { + ty::Slice(..) | ty::Str => self.meta().unwrap_meta().to_target_usize(ecx), + _ => bug!("len not supported on unsized type {:?}", layout.ty), + } + } else { + // Go through the layout. There are lots of types that support a length, + // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!) + match layout.fields { + abi::FieldsShape::Array { count, .. } => Ok(count), + _ => bug!("len not supported on sized type {:?}", layout.ty), + } + } } /// Offset the value by the given amount, replacing the layout and metadata. - fn offset_with_meta( + fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( &self, offset: Size, meta: MemPlaceMeta<Prov>, layout: TyAndLayout<'tcx>, - cx: &impl HasDataLayout, + ecx: &InterpCx<'mir, 'tcx, M>, ) -> InterpResult<'tcx, Self>; - fn offset( + #[inline] + fn offset<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( &self, offset: Size, layout: TyAndLayout<'tcx>, - cx: &impl HasDataLayout, + ecx: &InterpCx<'mir, 'tcx, M>, ) -> InterpResult<'tcx, Self> { assert!(layout.is_sized()); - self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx) + self.offset_with_meta(offset, MemPlaceMeta::None, layout, ecx) } - fn transmute( + #[inline] + fn transmute<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( &self, layout: TyAndLayout<'tcx>, - cx: &impl HasDataLayout, + ecx: &InterpCx<'mir, 'tcx, M>, ) -> InterpResult<'tcx, Self> { + assert!(self.layout().is_sized() && layout.is_sized()); assert_eq!(self.layout().size, layout.size); - self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, cx) + self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, ecx) } /// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for @@ -72,6 +88,28 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug { ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>; } +/// A type representing iteration over the elements of an array. +pub struct ArrayIterator<'tcx, 'a, Prov: Provenance + 'static, P: Projectable<'tcx, Prov>> { + base: &'a P, + range: Range<u64>, + stride: Size, + field_layout: TyAndLayout<'tcx>, + _phantom: PhantomData<Prov>, // otherwise it says `Prov` is never used... +} + +impl<'tcx, 'a, Prov: Provenance + 'static, P: Projectable<'tcx, Prov>> + ArrayIterator<'tcx, 'a, Prov, P> +{ + /// Should be the same `ecx` on each call, and match the one used to create the iterator. + pub fn next<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + &mut self, + ecx: &InterpCx<'mir, 'tcx, M>, + ) -> InterpResult<'tcx, Option<(u64, P)>> { + let Some(idx) = self.range.next() else { return Ok(None) }; + Ok(Some((idx, self.base.offset(self.stride * idx, self.field_layout, ecx)?))) + } +} + // FIXME: Working around https://github.com/rust-lang/rust/issues/54385 impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M> where @@ -104,7 +142,7 @@ where // But const-prop actually feeds us such nonsense MIR! (see test `const_prop/issue-86351.rs`) throw_inval!(ConstPropNonsense); } - let base_meta = base.meta(self)?; + let base_meta = base.meta(); // Re-use parent metadata to determine dynamic field layout. // With custom DSTS, this *will* execute user-defined code, but the same // happens at run-time so that's okay. @@ -132,7 +170,7 @@ where base: &P, variant: VariantIdx, ) -> InterpResult<'tcx, P> { - assert!(!base.meta(self)?.has_meta()); + assert!(!base.meta().has_meta()); // Downcasts only change the layout. // (In particular, no check about whether this is even the active variant -- that's by design, // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.) @@ -206,20 +244,13 @@ where pub fn project_array_fields<'a, P: Projectable<'tcx, M::Provenance>>( &self, base: &'a P, - ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, P>> + 'a> - where - 'tcx: 'a, - { + ) -> InterpResult<'tcx, ArrayIterator<'tcx, 'a, M::Provenance, P>> { let abi::FieldsShape::Array { stride, .. } = base.layout().fields else { span_bug!(self.cur_span(), "operand_array_fields: expected an array layout"); }; let len = base.len(self)?; let field_layout = base.layout().field(self, 0); - let tcx: TyCtxt<'tcx> = *self.tcx; - // `Size` multiplication - Ok((0..len).map(move |i| { - base.offset_with_meta(stride * i, MemPlaceMeta::None, field_layout, &tcx) - })) + Ok(ArrayIterator { base, range: 0..len, stride, field_layout, _phantom: PhantomData }) } /// Subslicing diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs index ca7c484ea31..ca66c4cfb63 100644 --- a/compiler/rustc_const_eval/src/interpret/terminator.rs +++ b/compiler/rustc_const_eval/src/interpret/terminator.rs @@ -2,19 +2,21 @@ use std::borrow::Cow; use either::Either; use rustc_ast::ast::InlineAsmOptions; -use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; -use rustc_middle::ty::Instance; use rustc_middle::{ mir, - ty::{self, Ty}, + ty::{ + self, + layout::{FnAbiOf, LayoutOf, TyAndLayout}, + Instance, Ty, + }, }; -use rustc_target::abi; use rustc_target::abi::call::{ArgAbi, ArgAttribute, ArgAttributes, FnAbi, PassMode}; +use rustc_target::abi::{self, FieldIdx}; use rustc_target::spec::abi::Abi; use super::{ - AllocId, FnVal, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemoryKind, OpTy, - Operand, PlaceTy, Provenance, Scalar, StackPopCleanup, + AllocId, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Projectable, + Provenance, Scalar, StackPopCleanup, }; use crate::fluent_generated as fluent; @@ -251,47 +253,93 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { .collect() } - fn check_argument_compat( - caller_abi: &ArgAbi<'tcx, Ty<'tcx>>, - callee_abi: &ArgAbi<'tcx, Ty<'tcx>>, + /// Find the wrapped inner type of a transparent wrapper. + /// Must not be called on 1-ZST (as they don't have a uniquely defined "wrapped field"). + fn unfold_transparent(&self, layout: TyAndLayout<'tcx>) -> TyAndLayout<'tcx> { + match layout.ty.kind() { + ty::Adt(adt_def, _) if adt_def.repr().transparent() => { + assert!(!adt_def.is_enum()); + // Find the non-1-ZST field. + let mut non_1zst_fields = (0..layout.fields.count()).filter_map(|idx| { + let field = layout.field(self, idx); + if field.is_1zst() { None } else { Some(field) } + }); + let first = non_1zst_fields.next().expect("`unfold_transparent` called on 1-ZST"); + assert!( + non_1zst_fields.next().is_none(), + "more than one non-1-ZST field in a transparent type" + ); + + // Found it! + self.unfold_transparent(first) + } + // Not a transparent type, no further unfolding. + _ => layout, + } + } + + /// Check if these two layouts look like they are fn-ABI-compatible. + /// (We also compare the `PassMode`, so this doesn't have to check everything. But it turns out + /// that only checking the `PassMode` is insufficient.) + fn layout_compat( + &self, + caller_layout: TyAndLayout<'tcx>, + callee_layout: TyAndLayout<'tcx>, ) -> bool { - // Heuristic for type comparison. - let layout_compat = || { - if caller_abi.layout.ty == callee_abi.layout.ty { - // No question - return true; + if caller_layout.ty == callee_layout.ty { + // Fast path: equal types are definitely compatible. + return true; + } + + match (caller_layout.abi, callee_layout.abi) { + // If both sides have Scalar/Vector/ScalarPair ABI, we can easily directly compare them. + // Different valid ranges are okay (the validity check will complain if this leads to + // invalid transmutes). Different signs are *not* okay on some targets (e.g. `extern + // "C"` on `s390x` where small integers are passed zero/sign-extended in large + // registers), so we generally reject them to increase portability. + // NOTE: this is *not* a stable guarantee! It just reflects a property of our current + // ABIs. It's also fragile; the same pair of types might be considered ABI-compatible + // when used directly by-value but not considered compatible as a struct field or array + // element. + (abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => { + caller.primitive() == callee.primitive() } - if caller_abi.layout.is_unsized() || callee_abi.layout.is_unsized() { - // No, no, no. We require the types to *exactly* match for unsized arguments. If - // these are somehow unsized "in a different way" (say, `dyn Trait` vs `[i32]`), - // then who knows what happens. - return false; + ( + abi::Abi::Vector { element: caller_element, count: caller_count }, + abi::Abi::Vector { element: callee_element, count: callee_count }, + ) => { + caller_element.primitive() == callee_element.primitive() + && caller_count == callee_count } - if caller_abi.layout.size != callee_abi.layout.size - || caller_abi.layout.align.abi != callee_abi.layout.align.abi - { - // This cannot go well... - return false; + (abi::Abi::ScalarPair(caller1, caller2), abi::Abi::ScalarPair(callee1, callee2)) => { + caller1.primitive() == callee1.primitive() + && caller2.primitive() == callee2.primitive() } - // The rest *should* be okay, but we are extra conservative. - match (caller_abi.layout.abi, callee_abi.layout.abi) { - // Different valid ranges are okay (once we enforce validity, - // that will take care to make it UB to leave the range, just - // like for transmute). - (abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => { - caller.primitive() == callee.primitive() - } - ( - abi::Abi::ScalarPair(caller1, caller2), - abi::Abi::ScalarPair(callee1, callee2), - ) => { - caller1.primitive() == callee1.primitive() - && caller2.primitive() == callee2.primitive() + (abi::Abi::Aggregate { .. }, abi::Abi::Aggregate { .. }) => { + // Aggregates are compatible only if they newtype-wrap the same type, or if they are both 1-ZST. + // (The latter part is needed to ensure e.g. that `struct Zst` is compatible with `struct Wrap((), Zst)`.) + // This is conservative, but also means that our check isn't quite so heavily dependent on the `PassMode`, + // which means having ABI-compatibility on one target is much more likely to imply compatibility for other targets. + if caller_layout.is_1zst() || callee_layout.is_1zst() { + // If either is a 1-ZST, both must be. + caller_layout.is_1zst() && callee_layout.is_1zst() + } else { + // Neither is a 1-ZST, so we can check what they are wrapping. + self.unfold_transparent(caller_layout).ty + == self.unfold_transparent(callee_layout).ty } - // Be conservative - _ => false, } - }; + // What remains is `Abi::Uninhabited` (which can never be passed anyway) and + // mismatching ABIs, that should all be rejected. + _ => false, + } + } + + fn check_argument_compat( + &self, + caller_abi: &ArgAbi<'tcx, Ty<'tcx>>, + callee_abi: &ArgAbi<'tcx, Ty<'tcx>>, + ) -> bool { // When comparing the PassMode, we have to be smart about comparing the attributes. let arg_attr_compat = |a1: &ArgAttributes, a2: &ArgAttributes| { // There's only one regular attribute that matters for the call ABI: InReg. @@ -309,7 +357,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { return true; }; let mode_compat = || match (&caller_abi.mode, &callee_abi.mode) { - (PassMode::Ignore, PassMode::Ignore) => true, + (PassMode::Ignore, PassMode::Ignore) => true, // can still be reached for the return type (PassMode::Direct(a1), PassMode::Direct(a2)) => arg_attr_compat(a1, a2), (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => { arg_attr_compat(a1, a2) && arg_attr_compat(b1, b2) @@ -326,15 +374,29 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { _ => false, }; - if layout_compat() && mode_compat() { + // Ideally `PassMode` would capture everything there is about argument passing, but that is + // not the case: in `FnAbi::llvm_type`, also parts of the layout and type information are + // used. So we need to check that *both* sufficiently agree to ensures the arguments are + // compatible. + // For instance, `layout_compat` is needed to reject `i32` vs `f32`, which is not reflected + // in `PassMode`. `mode_compat` is needed to reject `u8` vs `bool`, which have the same + // `abi::Primitive` but different `arg_ext`. + if self.layout_compat(caller_abi.layout, callee_abi.layout) && mode_compat() { + // Something went very wrong if our checks don't even imply that the layout is the same. + assert!( + caller_abi.layout.size == callee_abi.layout.size + && caller_abi.layout.align.abi == callee_abi.layout.align.abi + && caller_abi.layout.is_sized() == callee_abi.layout.is_sized() + ); return true; + } else { + trace!( + "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}", + caller_abi, + callee_abi + ); + return false; } - trace!( - "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}", - caller_abi, - callee_abi - ); - return false; } /// Initialize a single callee argument, checking the types for compatibility. @@ -344,23 +406,28 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>), >, callee_abi: &ArgAbi<'tcx, Ty<'tcx>>, - callee_arg: &PlaceTy<'tcx, M::Provenance>, + callee_arg: &mir::Place<'tcx>, + callee_ty: Ty<'tcx>, + already_live: bool, ) -> InterpResult<'tcx> where 'tcx: 'x, 'tcx: 'y, { if matches!(callee_abi.mode, PassMode::Ignore) { - // This one is skipped. + // This one is skipped. Still must be made live though! + if !already_live { + self.storage_live(callee_arg.as_local().unwrap())?; + } return Ok(()); } // Find next caller arg. let Some((caller_arg, caller_abi)) = caller_args.next() else { throw_ub_custom!(fluent::const_eval_not_enough_caller_args); }; - // Now, check - if !Self::check_argument_compat(caller_abi, callee_abi) { - let callee_ty = format!("{}", callee_arg.layout.ty); + // Check compatibility + if !self.check_argument_compat(caller_abi, callee_abi) { + let callee_ty = format!("{}", callee_ty); let caller_ty = format!("{}", caller_arg.layout().ty); throw_ub_custom!( fluent::const_eval_incompatible_types, @@ -372,35 +439,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // will later protect the source it comes from. This means the callee cannot observe if we // did in-place of by-copy argument passing, except for pointer equality tests. let caller_arg_copy = self.copy_fn_arg(&caller_arg)?; - // Special handling for unsized parameters. - if caller_arg_copy.layout.is_unsized() { - // `check_argument_compat` ensures that both have the same type, so we know they will use the metadata the same way. - assert_eq!(caller_arg_copy.layout.ty, callee_arg.layout.ty); - // We have to properly pre-allocate the memory for the callee. - // So let's tear down some abstractions. - // This all has to be in memory, there are no immediate unsized values. - let src = caller_arg_copy.assert_mem_place(); - // The destination cannot be one of these "spread args". - let (dest_frame, dest_local, dest_offset) = callee_arg - .as_mplace_or_local() - .right() - .expect("callee fn arguments must be locals"); - // We are just initializing things, so there can't be anything here yet. - assert!(matches!( - *self.local_to_op(&self.stack()[dest_frame], dest_local, None)?, - Operand::Immediate(Immediate::Uninit) - )); - assert_eq!(dest_offset, None); - // Allocate enough memory to hold `src`. - let dest_place = self.allocate_dyn(src.layout, MemoryKind::Stack, src.meta)?; - // Update the local to be that new place. - *M::access_local_mut(self, dest_frame, dest_local)? = Operand::Indirect(*dest_place); + if !already_live { + let local = callee_arg.as_local().unwrap(); + let meta = caller_arg_copy.meta(); + // `check_argument_compat` ensures that if metadata is needed, both have the same type, + // so we know they will use the metadata the same way. + assert!(!meta.has_meta() || caller_arg_copy.layout.ty == callee_ty); + + self.storage_live_dyn(local, meta)?; } + // Now we can finally actually evaluate the callee place. + let callee_arg = self.eval_place(*callee_arg)?; // We allow some transmutes here. // FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This // is true for all `copy_op`, but there are a lot of special cases for argument passing // specifically.) - self.copy_op(&caller_arg_copy, callee_arg, /*allow_transmute*/ true)?; + self.copy_op(&caller_arg_copy, &callee_arg, /*allow_transmute*/ true)?; // If this was an in-place pass, protect the place it comes from for the duration of the call. if let FnArg::InPlace(place) = caller_arg { M::protect_in_place_function_argument(self, place)?; @@ -586,18 +640,50 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // not advance `caller_iter` for ZSTs. let mut callee_args_abis = callee_fn_abi.args.iter(); for local in body.args_iter() { - let dest = self.eval_place(mir::Place::from(local))?; + // Construct the destination place for this argument. At this point all + // locals are still dead, so we cannot construct a `PlaceTy`. + let dest = mir::Place::from(local); + // `layout_of_local` does more than just the substitution we need to get the + // type, but the result gets cached so this avoids calling the substitution + // query *again* the next time this local is accessed. + let ty = self.layout_of_local(self.frame(), local, None)?.ty; if Some(local) == body.spread_arg { + // Make the local live once, then fill in the value field by field. + self.storage_live(local)?; // Must be a tuple - for i in 0..dest.layout.fields.count() { - let dest = self.project_field(&dest, i)?; + let ty::Tuple(fields) = ty.kind() else { + span_bug!( + self.cur_span(), + "non-tuple type for `spread_arg`: {ty:?}" + ) + }; + for (i, field_ty) in fields.iter().enumerate() { + let dest = dest.project_deeper( + &[mir::ProjectionElem::Field( + FieldIdx::from_usize(i), + field_ty, + )], + *self.tcx, + ); let callee_abi = callee_args_abis.next().unwrap(); - self.pass_argument(&mut caller_args, callee_abi, &dest)?; + self.pass_argument( + &mut caller_args, + callee_abi, + &dest, + field_ty, + /* already_live */ true, + )?; } } else { - // Normal argument + // Normal argument. Cannot mark it as live yet, it might be unsized! let callee_abi = callee_args_abis.next().unwrap(); - self.pass_argument(&mut caller_args, callee_abi, &dest)?; + self.pass_argument( + &mut caller_args, + callee_abi, + &dest, + ty, + /* already_live */ false, + )?; } } // If the callee needs a caller location, pretend we consume one more argument from the ABI. @@ -613,7 +699,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { throw_ub_custom!(fluent::const_eval_too_many_caller_args); } // Don't forget to check the return type! - if !Self::check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret) { + if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret) { let callee_ty = format!("{}", callee_fn_abi.ret.layout.ty); let caller_ty = format!("{}", caller_fn_abi.ret.layout.ty); throw_ub_custom!( @@ -630,6 +716,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Nothing to do for locals, they are always properly allocated and aligned. } M::protect_in_place_function_argument(self, destination)?; + + // Don't forget to mark "initially live" locals as live. + self.storage_live_for_always_live_locals()?; }; match res { Err(err) => { @@ -670,15 +759,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } _ => { // Not there yet, search for the only non-ZST field. + // (The rules for `DispatchFromDyn` ensure there's exactly one such field.) let mut non_zst_field = None; for i in 0..receiver.layout.fields.count() { let field = self.project_field(&receiver, i)?; - let zst = - field.layout.is_zst() && field.layout.align.abi.bytes() == 1; + let zst = field.layout.is_1zst(); if !zst { assert!( non_zst_field.is_none(), - "multiple non-ZST fields in dyn receiver type {}", + "multiple non-1-ZST fields in dyn receiver type {}", receiver.layout.ty ); non_zst_field = Some(field); @@ -686,7 +775,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } receiver = non_zst_field.unwrap_or_else(|| { panic!( - "no non-ZST fields in dyn receiver type {}", + "no non-1-ZST fields in dyn receiver type {}", receiver.layout.ty ) }); diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index d3f05af1c72..0d08d6be919 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -911,9 +911,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Complain about any other kind of error -- those are bad because we'd like to // report them in a way that shows *where* in the value the issue lies. Err(err) => { - let (err, backtrace) = err.into_parts(); - backtrace.print_backtrace(); - bug!("Unexpected Undefined Behavior error during validation: {err:?}"); + bug!( + "Unexpected Undefined Behavior error during validation: {}", + self.format_error(err) + ); } } } diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs index 531e2bd3ee0..fc21ad1f183 100644 --- a/compiler/rustc_const_eval/src/interpret/visitor.rs +++ b/compiler/rustc_const_eval/src/interpret/visitor.rs @@ -170,8 +170,9 @@ pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized { } } FieldsShape::Array { .. } => { - for (idx, field) in self.ecx().project_array_fields(v)?.enumerate() { - self.visit_field(v, idx, &field?)?; + let mut iter = self.ecx().project_array_fields(v)?; + while let Some((idx, field)) = iter.next(self.ecx())? { + self.visit_field(v, idx.try_into().unwrap(), &field)?; } } } diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs index b829f24ab7a..770c3f7f02c 100644 --- a/compiler/rustc_const_eval/src/transform/validate.rs +++ b/compiler/rustc_const_eval/src/transform/validate.rs @@ -20,6 +20,8 @@ use rustc_mir_dataflow::{Analysis, ResultsCursor}; use rustc_target::abi::{Size, FIRST_VARIANT}; use rustc_target::spec::abi::Abi; +use crate::util::is_within_packed; + #[derive(Copy, Clone, Debug, PartialEq, Eq)] enum EdgeKind { Unwind, @@ -93,6 +95,7 @@ impl<'tcx> MirPass<'tcx> for Validator { cfg_checker.visit_body(body); cfg_checker.check_cleanup_control_flow(); + // Also run the TypeChecker. for (location, msg) in validate_types(tcx, self.mir_phase, param_env, body) { cfg_checker.fail(location, msg); } @@ -427,14 +430,34 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { self.check_unwind_edge(location, *unwind); // The call destination place and Operand::Move place used as an argument might be - // passed by a reference to the callee. Consequently they must be non-overlapping. - // Currently this simply checks for duplicate places. + // passed by a reference to the callee. Consequently they must be non-overlapping + // and cannot be packed. Currently this simply checks for duplicate places. self.place_cache.clear(); self.place_cache.insert(destination.as_ref()); + if is_within_packed(self.tcx, &self.body.local_decls, *destination).is_some() { + // This is bad! The callee will expect the memory to be aligned. + self.fail( + location, + format!( + "encountered packed place in `Call` terminator destination: {:?}", + terminator.kind, + ), + ); + } let mut has_duplicates = false; for arg in args { if let Operand::Move(place) = arg { has_duplicates |= !self.place_cache.insert(place.as_ref()); + if is_within_packed(self.tcx, &self.body.local_decls, *place).is_some() { + // This is bad! The callee will expect the memory to be aligned. + self.fail( + location, + format!( + "encountered `Move` of a packed place in `Call` terminator: {:?}", + terminator.kind, + ), + ); + } } } @@ -442,7 +465,7 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { self.fail( location, format!( - "encountered overlapping memory in `Call` terminator: {:?}", + "encountered overlapping memory in `Move` arguments to `Call` terminator: {:?}", terminator.kind, ), ); @@ -541,6 +564,8 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { } } +/// A faster version of the validation pass that only checks those things which may break when apply +/// generic substitutions. pub fn validate_types<'tcx>( tcx: TyCtxt<'tcx>, mir_phase: MirPhase, diff --git a/compiler/rustc_const_eval/src/util/alignment.rs b/compiler/rustc_const_eval/src/util/alignment.rs index c1f0ff260d2..2e0643afb39 100644 --- a/compiler/rustc_const_eval/src/util/alignment.rs +++ b/compiler/rustc_const_eval/src/util/alignment.rs @@ -34,6 +34,7 @@ where false } _ => { + // We cannot figure out the layout. Conservatively assume that this is disaligned. debug!("is_disaligned({:?}) - true", place); true } diff --git a/compiler/rustc_data_structures/src/graph/implementation/mod.rs b/compiler/rustc_data_structures/src/graph/implementation/mod.rs index 9ff401c3c7a..3910c6fa46d 100644 --- a/compiler/rustc_data_structures/src/graph/implementation/mod.rs +++ b/compiler/rustc_data_structures/src/graph/implementation/mod.rs @@ -20,7 +20,6 @@ //! the field `next_edge`). Each of those fields is an array that should //! be indexed by the direction (see the type `Direction`). -use crate::snapshot_vec::{SnapshotVec, SnapshotVecDelegate}; use rustc_index::bit_set::BitSet; use std::fmt::Debug; @@ -28,8 +27,8 @@ use std::fmt::Debug; mod tests; pub struct Graph<N, E> { - nodes: SnapshotVec<Node<N>>, - edges: SnapshotVec<Edge<E>>, + nodes: Vec<Node<N>>, + edges: Vec<Edge<E>>, } pub struct Node<N> { @@ -45,20 +44,6 @@ pub struct Edge<E> { pub data: E, } -impl<N> SnapshotVecDelegate for Node<N> { - type Value = Node<N>; - type Undo = (); - - fn reverse(_: &mut Vec<Node<N>>, _: ()) {} -} - -impl<N> SnapshotVecDelegate for Edge<N> { - type Value = Edge<N>; - type Undo = (); - - fn reverse(_: &mut Vec<Edge<N>>, _: ()) {} -} - #[derive(Copy, Clone, PartialEq, Debug)] pub struct NodeIndex(pub usize); @@ -86,11 +71,11 @@ impl NodeIndex { impl<N: Debug, E: Debug> Graph<N, E> { pub fn new() -> Graph<N, E> { - Graph { nodes: SnapshotVec::new(), edges: SnapshotVec::new() } + Graph { nodes: Vec::new(), edges: Vec::new() } } pub fn with_capacity(nodes: usize, edges: usize) -> Graph<N, E> { - Graph { nodes: SnapshotVec::with_capacity(nodes), edges: SnapshotVec::with_capacity(edges) } + Graph { nodes: Vec::with_capacity(nodes), edges: Vec::with_capacity(edges) } } // # Simple accessors diff --git a/compiler/rustc_data_structures/src/marker.rs b/compiler/rustc_data_structures/src/marker.rs index f8c06f9a814..b067f9d4502 100644 --- a/compiler/rustc_data_structures/src/marker.rs +++ b/compiler/rustc_data_structures/src/marker.rs @@ -92,7 +92,6 @@ cfg_if!( [std::collections::BTreeMap<K, V, A> where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend] [Vec<T, A> where T: DynSend, A: std::alloc::Allocator + DynSend] [Box<T, A> where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend] - [crate::sync::Lock<T> where T: DynSend] [crate::sync::RwLock<T> where T: DynSend] [crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool] [rustc_arena::TypedArena<T> where T: DynSend] @@ -171,7 +170,6 @@ cfg_if!( [std::collections::BTreeMap<K, V, A> where K: DynSync, V: DynSync, A: std::alloc::Allocator + Clone + DynSync] [Vec<T, A> where T: DynSync, A: std::alloc::Allocator + DynSync] [Box<T, A> where T: ?Sized + DynSync, A: std::alloc::Allocator + DynSync] - [crate::sync::Lock<T> where T: DynSend] [crate::sync::RwLock<T> where T: DynSend + DynSync] [crate::sync::OneThread<T> where T] [crate::sync::WorkerLocal<T> where T: DynSend] diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs index 52ab5a7fb14..0f769c1f3bf 100644 --- a/compiler/rustc_data_structures/src/sharded.rs +++ b/compiler/rustc_data_structures/src/sharded.rs @@ -2,9 +2,12 @@ use crate::fx::{FxHashMap, FxHasher}; #[cfg(parallel_compiler)] use crate::sync::{is_dyn_thread_safe, CacheAligned}; use crate::sync::{Lock, LockGuard}; +#[cfg(parallel_compiler)] +use itertools::Either; use std::borrow::Borrow; use std::collections::hash_map::RawEntryMut; use std::hash::{Hash, Hasher}; +use std::iter; use std::mem; // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700, @@ -70,19 +73,27 @@ impl<T> Sharded<T> { } } - pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> { + #[inline] + pub fn lock_shards(&self) -> impl Iterator<Item = LockGuard<'_, T>> { match self { - Self::Single(single) => vec![single.lock()], + #[cfg(not(parallel_compiler))] + Self::Single(single) => iter::once(single.lock()), + #[cfg(parallel_compiler)] + Self::Single(single) => Either::Left(iter::once(single.lock())), #[cfg(parallel_compiler)] - Self::Shards(shards) => shards.iter().map(|shard| shard.0.lock()).collect(), + Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.lock())), } } - pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> { + #[inline] + pub fn try_lock_shards(&self) -> impl Iterator<Item = Option<LockGuard<'_, T>>> { match self { - Self::Single(single) => Some(vec![single.try_lock()?]), + #[cfg(not(parallel_compiler))] + Self::Single(single) => iter::once(single.try_lock()), + #[cfg(parallel_compiler)] + Self::Single(single) => Either::Left(iter::once(single.try_lock())), #[cfg(parallel_compiler)] - Self::Shards(shards) => shards.iter().map(|shard| shard.0.try_lock()).collect(), + Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.try_lock())), } } } @@ -101,7 +112,7 @@ pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>; impl<K: Eq, V> ShardedHashMap<K, V> { pub fn len(&self) -> usize { - self.lock_shards().iter().map(|shard| shard.len()).sum() + self.lock_shards().map(|shard| shard.len()).sum() } } diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index 25a08237346..e82b0f6d496 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -26,7 +26,8 @@ //! | `AtomicU64` | `Cell<u64>` | `atomic::AtomicU64` | //! | `AtomicUsize` | `Cell<usize>` | `atomic::AtomicUsize` | //! | | | | -//! | `Lock<T>` | `RefCell<T>` | `parking_lot::Mutex<T>` | +//! | `Lock<T>` | `RefCell<T>` | `RefCell<T>` or | +//! | | | `parking_lot::Mutex<T>` | //! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` | //! | `MTLock<T>` [^1] | `T` | `Lock<T>` | //! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` | @@ -40,11 +41,16 @@ //! [^2] `MTLockRef` is a typedef. pub use crate::marker::*; +use parking_lot::Mutex; +use std::any::Any; use std::collections::HashMap; use std::hash::{BuildHasher, Hash}; use std::ops::{Deref, DerefMut}; use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; +mod lock; +pub use lock::{Lock, LockGuard}; + mod worker_local; pub use worker_local::{Registry, WorkerLocal}; @@ -75,6 +81,13 @@ mod mode { } } + // Whether thread safety might be enabled. + #[inline] + #[cfg(parallel_compiler)] + pub fn might_be_dyn_thread_safe() -> bool { + DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) != DYN_NOT_THREAD_SAFE + } + // Only set by the `-Z threads` compile option pub fn set_dyn_thread_safe_mode(mode: bool) { let set: u8 = if mode { DYN_THREAD_SAFE } else { DYN_NOT_THREAD_SAFE }; @@ -92,16 +105,48 @@ mod mode { pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; +/// A guard used to hold panics that occur during a parallel section to later by unwound. +/// This is used for the parallel compiler to prevent fatal errors from non-deterministically +/// hiding errors by ensuring that everything in the section has completed executing before +/// continuing with unwinding. It's also used for the non-parallel code to ensure error message +/// output match the parallel compiler for testing purposes. +pub struct ParallelGuard { + panic: Mutex<Option<Box<dyn Any + std::marker::Send + 'static>>>, +} + +impl ParallelGuard { + pub fn run<R>(&self, f: impl FnOnce() -> R) -> Option<R> { + catch_unwind(AssertUnwindSafe(f)) + .map_err(|err| { + *self.panic.lock() = Some(err); + }) + .ok() + } +} + +/// This gives access to a fresh parallel guard in the closure and will unwind any panics +/// caught in it after the closure returns. +#[inline] +pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R { + let guard = ParallelGuard { panic: Mutex::new(None) }; + let ret = f(&guard); + if let Some(panic) = guard.panic.into_inner() { + resume_unwind(panic); + } + ret +} + cfg_if! { if #[cfg(not(parallel_compiler))] { + use std::ops::Add; + use std::cell::Cell; + pub unsafe auto trait Send {} pub unsafe auto trait Sync {} unsafe impl<T> Send for T {} unsafe impl<T> Sync for T {} - use std::ops::Add; - /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc. /// It has explicit ordering arguments and is only intended for use with /// the native atomic types. @@ -186,67 +231,38 @@ cfg_if! { where A: FnOnce() -> RA, B: FnOnce() -> RB { - (oper_a(), oper_b()) + let (a, b) = parallel_guard(|guard| { + let a = guard.run(oper_a); + let b = guard.run(oper_b); + (a, b) + }); + (a.unwrap(), b.unwrap()) } #[macro_export] macro_rules! parallel { - ($($blocks:block),*) => { - // We catch panics here ensuring that all the blocks execute. - // This makes behavior consistent with the parallel compiler. - let mut panic = None; - $( - if let Err(p) = ::std::panic::catch_unwind( - ::std::panic::AssertUnwindSafe(|| $blocks) - ) { - if panic.is_none() { - panic = Some(p); - } - } - )* - if let Some(panic) = panic { - ::std::panic::resume_unwind(panic); - } - } + ($($blocks:block),*) => {{ + $crate::sync::parallel_guard(|guard| { + $(guard.run(|| $blocks);)* + }); + }} } pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) { - // We catch panics here ensuring that all the loop iterations execute. - // This makes behavior consistent with the parallel compiler. - let mut panic = None; - t.into_iter().for_each(|i| { - if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) { - if panic.is_none() { - panic = Some(p); - } - } - }); - if let Some(panic) = panic { - resume_unwind(panic); - } + parallel_guard(|guard| { + t.into_iter().for_each(|i| { + guard.run(|| for_each(i)); + }); + }) } pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>( t: T, mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R, ) -> C { - // We catch panics here ensuring that all the loop iterations execute. - let mut panic = None; - let r = t.into_iter().filter_map(|i| { - match catch_unwind(AssertUnwindSafe(|| map(i))) { - Ok(r) => Some(r), - Err(p) => { - if panic.is_none() { - panic = Some(p); - } - None - } - } - }).collect(); - if let Some(panic) = panic { - resume_unwind(panic); - } - r + parallel_guard(|guard| { + t.into_iter().filter_map(|i| guard.run(|| map(i))).collect() + }) } pub use std::rc::Rc as Lrc; @@ -255,15 +271,11 @@ cfg_if! { pub use std::cell::Ref as MappedReadGuard; pub use std::cell::RefMut as WriteGuard; pub use std::cell::RefMut as MappedWriteGuard; - pub use std::cell::RefMut as LockGuard; pub use std::cell::RefMut as MappedLockGuard; - pub use std::cell::OnceCell; + pub use std::cell::OnceCell as OnceLock; use std::cell::RefCell as InnerRwLock; - use std::cell::RefCell as InnerLock; - - use std::cell::Cell; pub type MTLockRef<'a, T> = &'a mut MTLock<T>; @@ -313,10 +325,9 @@ cfg_if! { pub use parking_lot::RwLockWriteGuard as WriteGuard; pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard; - pub use parking_lot::MutexGuard as LockGuard; pub use parking_lot::MappedMutexGuard as MappedLockGuard; - pub use std::sync::OnceLock as OnceCell; + pub use std::sync::OnceLock; pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64}; @@ -355,7 +366,6 @@ cfg_if! { } } - use parking_lot::Mutex as InnerLock; use parking_lot::RwLock as InnerRwLock; use std::thread; @@ -372,7 +382,12 @@ cfg_if! { let (a, b) = rayon::join(move || FromDyn::from(oper_a.into_inner()()), move || FromDyn::from(oper_b.into_inner()())); (a.into_inner(), b.into_inner()) } else { - (oper_a(), oper_b()) + let (a, b) = parallel_guard(|guard| { + let a = guard.run(oper_a); + let b = guard.run(oper_b); + (a, b) + }); + (a.unwrap(), b.unwrap()) } } @@ -407,28 +422,10 @@ cfg_if! { // of a single threaded rustc. parallel!(impl $fblock [] [$($blocks),*]); } else { - // We catch panics here ensuring that all the blocks execute. - // This makes behavior consistent with the parallel compiler. - let mut panic = None; - if let Err(p) = ::std::panic::catch_unwind( - ::std::panic::AssertUnwindSafe(|| $fblock) - ) { - if panic.is_none() { - panic = Some(p); - } - } - $( - if let Err(p) = ::std::panic::catch_unwind( - ::std::panic::AssertUnwindSafe(|| $blocks) - ) { - if panic.is_none() { - panic = Some(p); - } - } - )* - if let Some(panic) = panic { - ::std::panic::resume_unwind(panic); - } + $crate::sync::parallel_guard(|guard| { + guard.run(|| $fblock); + $(guard.run(|| $blocks);)* + }); } }; } @@ -439,34 +436,18 @@ cfg_if! { t: T, for_each: impl Fn(I) + DynSync + DynSend ) { - if mode::is_dyn_thread_safe() { - let for_each = FromDyn::from(for_each); - let panic: Lock<Option<_>> = Lock::new(None); - t.into_par_iter().for_each(|i| if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) { - let mut l = panic.lock(); - if l.is_none() { - *l = Some(p) - } - }); - - if let Some(panic) = panic.into_inner() { - resume_unwind(panic); - } - } else { - // We catch panics here ensuring that all the loop iterations execute. - // This makes behavior consistent with the parallel compiler. - let mut panic = None; - t.into_iter().for_each(|i| { - if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) { - if panic.is_none() { - panic = Some(p); - } - } - }); - if let Some(panic) = panic { - resume_unwind(panic); + parallel_guard(|guard| { + if mode::is_dyn_thread_safe() { + let for_each = FromDyn::from(for_each); + t.into_par_iter().for_each(|i| { + guard.run(|| for_each(i)); + }); + } else { + t.into_iter().for_each(|i| { + guard.run(|| for_each(i)); + }); } - } + }); } pub fn par_map< @@ -478,46 +459,14 @@ cfg_if! { t: T, map: impl Fn(I) -> R + DynSync + DynSend ) -> C { - if mode::is_dyn_thread_safe() { - let panic: Lock<Option<_>> = Lock::new(None); - let map = FromDyn::from(map); - // We catch panics here ensuring that all the loop iterations execute. - let r = t.into_par_iter().filter_map(|i| { - match catch_unwind(AssertUnwindSafe(|| map(i))) { - Ok(r) => Some(r), - Err(p) => { - let mut l = panic.lock(); - if l.is_none() { - *l = Some(p); - } - None - }, - } - }).collect(); - - if let Some(panic) = panic.into_inner() { - resume_unwind(panic); - } - r - } else { - // We catch panics here ensuring that all the loop iterations execute. - let mut panic = None; - let r = t.into_iter().filter_map(|i| { - match catch_unwind(AssertUnwindSafe(|| map(i))) { - Ok(r) => Some(r), - Err(p) => { - if panic.is_none() { - panic = Some(p); - } - None - } - } - }).collect(); - if let Some(panic) = panic { - resume_unwind(panic); + parallel_guard(|guard| { + if mode::is_dyn_thread_safe() { + let map = FromDyn::from(map); + t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect() + } else { + t.into_iter().filter_map(|i| guard.run(|| map(i))).collect() } - r - } + }) } /// This makes locks panic if they are already held. @@ -542,81 +491,6 @@ impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> } } -#[derive(Debug)] -pub struct Lock<T>(InnerLock<T>); - -impl<T> Lock<T> { - #[inline(always)] - pub fn new(inner: T) -> Self { - Lock(InnerLock::new(inner)) - } - - #[inline(always)] - pub fn into_inner(self) -> T { - self.0.into_inner() - } - - #[inline(always)] - pub fn get_mut(&mut self) -> &mut T { - self.0.get_mut() - } - - #[cfg(parallel_compiler)] - #[inline(always)] - pub fn try_lock(&self) -> Option<LockGuard<'_, T>> { - self.0.try_lock() - } - - #[cfg(not(parallel_compiler))] - #[inline(always)] - pub fn try_lock(&self) -> Option<LockGuard<'_, T>> { - self.0.try_borrow_mut().ok() - } - - #[cfg(parallel_compiler)] - #[inline(always)] - #[track_caller] - pub fn lock(&self) -> LockGuard<'_, T> { - if ERROR_CHECKING { - self.0.try_lock().expect("lock was already held") - } else { - self.0.lock() - } - } - - #[cfg(not(parallel_compiler))] - #[inline(always)] - #[track_caller] - pub fn lock(&self) -> LockGuard<'_, T> { - self.0.borrow_mut() - } - - #[inline(always)] - #[track_caller] - pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R { - f(&mut *self.lock()) - } - - #[inline(always)] - #[track_caller] - pub fn borrow(&self) -> LockGuard<'_, T> { - self.lock() - } - - #[inline(always)] - #[track_caller] - pub fn borrow_mut(&self) -> LockGuard<'_, T> { - self.lock() - } -} - -impl<T: Default> Default for Lock<T> { - #[inline] - fn default() -> Self { - Lock::new(T::default()) - } -} - #[derive(Debug, Default)] pub struct RwLock<T>(InnerRwLock<T>); diff --git a/compiler/rustc_data_structures/src/sync/lock.rs b/compiler/rustc_data_structures/src/sync/lock.rs new file mode 100644 index 00000000000..62cd1b993de --- /dev/null +++ b/compiler/rustc_data_structures/src/sync/lock.rs @@ -0,0 +1,276 @@ +//! This module implements a lock which only uses synchronization if `might_be_dyn_thread_safe` is true. +//! It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync` traits. +//! +//! When `cfg(parallel_compiler)` is not set, the lock is instead a wrapper around `RefCell`. + +#[cfg(not(parallel_compiler))] +use std::cell::RefCell; +#[cfg(parallel_compiler)] +use { + crate::cold_path, + crate::sync::DynSend, + crate::sync::DynSync, + parking_lot::lock_api::RawMutex, + std::cell::Cell, + std::cell::UnsafeCell, + std::fmt, + std::intrinsics::{likely, unlikely}, + std::marker::PhantomData, + std::mem::ManuallyDrop, + std::ops::{Deref, DerefMut}, +}; + +#[cfg(not(parallel_compiler))] +pub use std::cell::RefMut as LockGuard; + +#[cfg(not(parallel_compiler))] +#[derive(Debug)] +pub struct Lock<T>(RefCell<T>); + +#[cfg(not(parallel_compiler))] +impl<T> Lock<T> { + #[inline(always)] + pub fn new(inner: T) -> Self { + Lock(RefCell::new(inner)) + } + + #[inline(always)] + pub fn into_inner(self) -> T { + self.0.into_inner() + } + + #[inline(always)] + pub fn get_mut(&mut self) -> &mut T { + self.0.get_mut() + } + + #[inline(always)] + pub fn try_lock(&self) -> Option<LockGuard<'_, T>> { + self.0.try_borrow_mut().ok() + } + + #[inline(always)] + #[track_caller] + pub fn lock(&self) -> LockGuard<'_, T> { + self.0.borrow_mut() + } +} + +/// A guard holding mutable access to a `Lock` which is in a locked state. +#[cfg(parallel_compiler)] +#[must_use = "if unused the Lock will immediately unlock"] +pub struct LockGuard<'a, T> { + lock: &'a Lock<T>, + marker: PhantomData<&'a mut T>, +} + +#[cfg(parallel_compiler)] +impl<'a, T: 'a> Deref for LockGuard<'a, T> { + type Target = T; + #[inline] + fn deref(&self) -> &T { + // SAFETY: We have shared access to the mutable access owned by this type, + // so we can give out a shared reference. + unsafe { &*self.lock.data.get() } + } +} + +#[cfg(parallel_compiler)] +impl<'a, T: 'a> DerefMut for LockGuard<'a, T> { + #[inline] + fn deref_mut(&mut self) -> &mut T { + // SAFETY: We have mutable access to the data so we can give out a mutable reference. + unsafe { &mut *self.lock.data.get() } + } +} + +#[cfg(parallel_compiler)] +impl<'a, T: 'a> Drop for LockGuard<'a, T> { + #[inline] + fn drop(&mut self) { + // SAFETY: We know that the lock is in a locked + // state because it is a invariant of this type. + unsafe { self.lock.raw.unlock() }; + } +} + +#[cfg(parallel_compiler)] +union LockRawUnion { + /// Indicates if the cell is locked. Only used if `LockRaw.sync` is false. + cell: ManuallyDrop<Cell<bool>>, + + /// A lock implementation that's only used if `LockRaw.sync` is true. + lock: ManuallyDrop<parking_lot::RawMutex>, +} + +/// A raw lock which only uses synchronization if `might_be_dyn_thread_safe` is true. +/// It contains no associated data and is used in the implementation of `Lock` which does have such data. +/// +/// A manual implementation of a tagged union is used with the `sync` field and the `LockRawUnion` instead +/// of using enums as it results in better code generation. +#[cfg(parallel_compiler)] +struct LockRaw { + /// Indicates if synchronization is used via `opt.lock` if true, + /// or if a non-thread safe cell is used via `opt.cell`. This is set on initialization and never changed. + sync: bool, + opt: LockRawUnion, +} + +#[cfg(parallel_compiler)] +impl LockRaw { + fn new() -> Self { + if unlikely(super::mode::might_be_dyn_thread_safe()) { + // Create the lock with synchronization enabled using the `RawMutex` type. + LockRaw { + sync: true, + opt: LockRawUnion { lock: ManuallyDrop::new(parking_lot::RawMutex::INIT) }, + } + } else { + // Create the lock with synchronization disabled. + LockRaw { sync: false, opt: LockRawUnion { cell: ManuallyDrop::new(Cell::new(false)) } } + } + } + + #[inline(always)] + fn try_lock(&self) -> bool { + // SAFETY: This is safe since the union fields are used in accordance with `self.sync`. + unsafe { + if likely(!self.sync) { + if self.opt.cell.get() { + false + } else { + self.opt.cell.set(true); + true + } + } else { + self.opt.lock.try_lock() + } + } + } + + #[inline(always)] + fn lock(&self) { + if super::ERROR_CHECKING { + // We're in the debugging mode, so assert that the lock is not held so we + // get a panic instead of waiting for the lock. + assert_eq!(self.try_lock(), true, "lock must not be hold"); + } else { + // SAFETY: This is safe since the union fields are used in accordance with `self.sync`. + unsafe { + if likely(!self.sync) { + if unlikely(self.opt.cell.replace(true)) { + cold_path(|| panic!("lock was already held")) + } + } else { + self.opt.lock.lock(); + } + } + } + } + + /// This unlocks the lock. + /// + /// Safety + /// This method may only be called if the lock is currently held. + #[inline(always)] + unsafe fn unlock(&self) { + // SAFETY: The union use is safe since the union fields are used in accordance with + // `self.sync` and the `unlock` method precondition is upheld by the caller. + unsafe { + if likely(!self.sync) { + debug_assert_eq!(self.opt.cell.get(), true); + self.opt.cell.set(false); + } else { + self.opt.lock.unlock(); + } + } + } +} + +/// A lock which only uses synchronization if `might_be_dyn_thread_safe` is true. +/// It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync`. +#[cfg(parallel_compiler)] +pub struct Lock<T> { + raw: LockRaw, + data: UnsafeCell<T>, +} + +#[cfg(parallel_compiler)] +impl<T> Lock<T> { + #[inline(always)] + pub fn new(inner: T) -> Self { + Lock { raw: LockRaw::new(), data: UnsafeCell::new(inner) } + } + + #[inline(always)] + pub fn into_inner(self) -> T { + self.data.into_inner() + } + + #[inline(always)] + pub fn get_mut(&mut self) -> &mut T { + self.data.get_mut() + } + + #[inline(always)] + pub fn try_lock(&self) -> Option<LockGuard<'_, T>> { + if self.raw.try_lock() { Some(LockGuard { lock: self, marker: PhantomData }) } else { None } + } + + #[inline(always)] + pub fn lock(&self) -> LockGuard<'_, T> { + self.raw.lock(); + LockGuard { lock: self, marker: PhantomData } + } +} + +impl<T> Lock<T> { + #[inline(always)] + #[track_caller] + pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R { + f(&mut *self.lock()) + } + + #[inline(always)] + #[track_caller] + pub fn borrow(&self) -> LockGuard<'_, T> { + self.lock() + } + + #[inline(always)] + #[track_caller] + pub fn borrow_mut(&self) -> LockGuard<'_, T> { + self.lock() + } +} + +#[cfg(parallel_compiler)] +unsafe impl<T: DynSend> DynSend for Lock<T> {} +#[cfg(parallel_compiler)] +unsafe impl<T: DynSend> DynSync for Lock<T> {} + +#[cfg(parallel_compiler)] +impl<T: fmt::Debug> fmt::Debug for Lock<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.try_lock() { + Some(guard) => f.debug_struct("Lock").field("data", &&*guard).finish(), + None => { + struct LockedPlaceholder; + impl fmt::Debug for LockedPlaceholder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("<locked>") + } + } + + f.debug_struct("Lock").field("data", &LockedPlaceholder).finish() + } + } + } +} + +impl<T: Default> Default for Lock<T> { + #[inline] + fn default() -> Self { + Lock::new(T::default()) + } +} diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs index 27d687c3cfe..1f838cc4648 100644 --- a/compiler/rustc_data_structures/src/sync/worker_local.rs +++ b/compiler/rustc_data_structures/src/sync/worker_local.rs @@ -1,4 +1,4 @@ -use crate::sync::Lock; +use parking_lot::Mutex; use std::cell::Cell; use std::cell::OnceCell; use std::ops::Deref; @@ -35,7 +35,7 @@ impl RegistryId { struct RegistryData { thread_limit: usize, - threads: Lock<usize>, + threads: Mutex<usize>, } /// Represents a list of threads which can access worker locals. @@ -65,7 +65,7 @@ thread_local! { impl Registry { /// Creates a registry which can hold up to `thread_limit` threads. pub fn new(thread_limit: usize) -> Self { - Registry(Arc::new(RegistryData { thread_limit, threads: Lock::new(0) })) + Registry(Arc::new(RegistryData { thread_limit, threads: Mutex::new(0) })) } /// Gets the registry associated with the current thread. Panics if there's no such registry. diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs index 841c626d0a3..1a16759d7f9 100644 --- a/compiler/rustc_driver_impl/src/lib.rs +++ b/compiler/rustc_driver_impl/src/lib.rs @@ -85,6 +85,15 @@ pub mod pretty; #[macro_use] mod print; mod session_diagnostics; +#[cfg(all(unix, any(target_env = "gnu", target_os = "macos")))] +mod signal_handler; + +#[cfg(not(all(unix, any(target_env = "gnu", target_os = "macos"))))] +mod signal_handler { + /// On platforms which don't support our signal handler's requirements, + /// simply use the default signal handler provided by std. + pub(super) fn install() {} +} use crate::session_diagnostics::{ RLinkEmptyVersionNumber, RLinkEncodingVersionMismatch, RLinkRustcVersionMismatch, @@ -852,11 +861,9 @@ fn print_crate_info( use rustc_target::spec::current_apple_deployment_target; if sess.target.is_like_osx { - println_info!( - "deployment_target={}", - current_apple_deployment_target(&sess.target) - .expect("unknown Apple target OS") - ) + let (major, minor) = current_apple_deployment_target(&sess.target) + .expect("unknown Apple target OS"); + println_info!("deployment_target={}", format!("{major}.{minor}")) } else { handler .early_error("only Apple targets currently support deployment version info") @@ -1442,72 +1449,6 @@ pub fn init_env_logger(handler: &EarlyErrorHandler, env: &str) { } } -#[cfg(all(unix, any(target_env = "gnu", target_os = "macos")))] -mod signal_handler { - extern "C" { - fn backtrace_symbols_fd( - buffer: *const *mut libc::c_void, - size: libc::c_int, - fd: libc::c_int, - ); - } - - extern "C" fn print_stack_trace(_: libc::c_int) { - const MAX_FRAMES: usize = 256; - static mut STACK_TRACE: [*mut libc::c_void; MAX_FRAMES] = - [std::ptr::null_mut(); MAX_FRAMES]; - unsafe { - let depth = libc::backtrace(STACK_TRACE.as_mut_ptr(), MAX_FRAMES as i32); - if depth == 0 { - return; - } - backtrace_symbols_fd(STACK_TRACE.as_ptr(), depth, 2); - } - } - - /// When an error signal (such as SIGABRT or SIGSEGV) is delivered to the - /// process, print a stack trace and then exit. - pub(super) fn install() { - use std::alloc::{alloc, Layout}; - - unsafe { - let alt_stack_size: usize = min_sigstack_size() + 64 * 1024; - let mut alt_stack: libc::stack_t = std::mem::zeroed(); - alt_stack.ss_sp = alloc(Layout::from_size_align(alt_stack_size, 1).unwrap()).cast(); - alt_stack.ss_size = alt_stack_size; - libc::sigaltstack(&alt_stack, std::ptr::null_mut()); - - let mut sa: libc::sigaction = std::mem::zeroed(); - sa.sa_sigaction = print_stack_trace as libc::sighandler_t; - sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK; - libc::sigemptyset(&mut sa.sa_mask); - libc::sigaction(libc::SIGSEGV, &sa, std::ptr::null_mut()); - } - } - - /// Modern kernels on modern hardware can have dynamic signal stack sizes. - #[cfg(any(target_os = "linux", target_os = "android"))] - fn min_sigstack_size() -> usize { - const AT_MINSIGSTKSZ: core::ffi::c_ulong = 51; - let dynamic_sigstksz = unsafe { libc::getauxval(AT_MINSIGSTKSZ) }; - // If getauxval couldn't find the entry, it returns 0, - // so take the higher of the "constant" and auxval. - // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ - libc::MINSIGSTKSZ.max(dynamic_sigstksz as _) - } - - /// Not all OS support hardware where this is needed. - #[cfg(not(any(target_os = "linux", target_os = "android")))] - fn min_sigstack_size() -> usize { - libc::MINSIGSTKSZ - } -} - -#[cfg(not(all(unix, any(target_env = "gnu", target_os = "macos"))))] -mod signal_handler { - pub(super) fn install() {} -} - pub fn main() -> ! { let start_time = Instant::now(); let start_rss = get_resident_set_size(); diff --git a/compiler/rustc_driver_impl/src/signal_handler.rs b/compiler/rustc_driver_impl/src/signal_handler.rs new file mode 100644 index 00000000000..deca1082221 --- /dev/null +++ b/compiler/rustc_driver_impl/src/signal_handler.rs @@ -0,0 +1,142 @@ +//! Signal handler for rustc +//! Primarily used to extract a backtrace from stack overflow + +use std::alloc::{alloc, Layout}; +use std::{fmt, mem, ptr}; + +extern "C" { + fn backtrace_symbols_fd(buffer: *const *mut libc::c_void, size: libc::c_int, fd: libc::c_int); +} + +fn backtrace_stderr(buffer: &[*mut libc::c_void]) { + let size = buffer.len().try_into().unwrap_or_default(); + unsafe { backtrace_symbols_fd(buffer.as_ptr(), size, libc::STDERR_FILENO) }; +} + +/// Unbuffered, unsynchronized writer to stderr. +/// +/// Only acceptable because everything will end soon anyways. +struct RawStderr(()); + +impl fmt::Write for RawStderr { + fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { + let ret = unsafe { libc::write(libc::STDERR_FILENO, s.as_ptr().cast(), s.len()) }; + if ret == -1 { Err(fmt::Error) } else { Ok(()) } + } +} + +/// We don't really care how many bytes we actually get out. SIGSEGV comes for our head. +/// Splash stderr with letters of our own blood to warn our friends about the monster. +macro raw_errln($tokens:tt) { + let _ = ::core::fmt::Write::write_fmt(&mut RawStderr(()), format_args!($tokens)); + let _ = ::core::fmt::Write::write_char(&mut RawStderr(()), '\n'); +} + +/// Signal handler installed for SIGSEGV +extern "C" fn print_stack_trace(_: libc::c_int) { + const MAX_FRAMES: usize = 256; + // Reserve data segment so we don't have to malloc in a signal handler, which might fail + // in incredibly undesirable and unexpected ways due to e.g. the allocator deadlocking + static mut STACK_TRACE: [*mut libc::c_void; MAX_FRAMES] = [ptr::null_mut(); MAX_FRAMES]; + let stack = unsafe { + // Collect return addresses + let depth = libc::backtrace(STACK_TRACE.as_mut_ptr(), MAX_FRAMES as i32); + if depth == 0 { + return; + } + &STACK_TRACE.as_slice()[0..(depth as _)] + }; + + // Just a stack trace is cryptic. Explain what we're doing. + raw_errln!("error: rustc interrupted by SIGSEGV, printing backtrace\n"); + let mut written = 1; + let mut consumed = 0; + // Begin elaborating return addrs into symbols and writing them directly to stderr + // Most backtraces are stack overflow, most stack overflows are from recursion + // Check for cycles before writing 250 lines of the same ~5 symbols + let cycled = |(runner, walker)| runner == walker; + let mut cyclic = false; + if let Some(period) = stack.iter().skip(1).step_by(2).zip(stack).position(cycled) { + let period = period.saturating_add(1); // avoid "what if wrapped?" branches + let Some(offset) = stack.iter().skip(period).zip(stack).position(cycled) else { + // impossible. + return; + }; + + // Count matching trace slices, else we could miscount "biphasic cycles" + // with the same period + loop entry but a different inner loop + let next_cycle = stack[offset..].chunks_exact(period).skip(1); + let cycles = 1 + next_cycle + .zip(stack[offset..].chunks_exact(period)) + .filter(|(next, prev)| next == prev) + .count(); + backtrace_stderr(&stack[..offset]); + written += offset; + consumed += offset; + if cycles > 1 { + raw_errln!("\n### cycle encountered after {offset} frames with period {period}"); + backtrace_stderr(&stack[consumed..consumed + period]); + raw_errln!("### recursed {cycles} times\n"); + written += period + 4; + consumed += period * cycles; + cyclic = true; + }; + } + let rem = &stack[consumed..]; + backtrace_stderr(rem); + raw_errln!(""); + written += rem.len() + 1; + + let random_depth = || 8 * 16; // chosen by random diceroll (2d20) + if cyclic || stack.len() > random_depth() { + // technically speculation, but assert it with confidence anyway. + // rustc only arrived in this signal handler because bad things happened + // and this message is for explaining it's not the programmer's fault + raw_errln!("note: rustc unexpectedly overflowed its stack! this is a bug"); + written += 1; + } + if stack.len() == MAX_FRAMES { + raw_errln!("note: maximum backtrace depth reached, frames may have been lost"); + written += 1; + } + raw_errln!("note: we would appreciate a report at https://github.com/rust-lang/rust"); + written += 1; + if written > 24 { + // We probably just scrolled the earlier "we got SIGSEGV" message off the terminal + raw_errln!("note: backtrace dumped due to SIGSEGV! resuming signal"); + }; +} + +/// When SIGSEGV is delivered to the process, print a stack trace and then exit. +pub(super) fn install() { + unsafe { + let alt_stack_size: usize = min_sigstack_size() + 64 * 1024; + let mut alt_stack: libc::stack_t = mem::zeroed(); + alt_stack.ss_sp = alloc(Layout::from_size_align(alt_stack_size, 1).unwrap()).cast(); + alt_stack.ss_size = alt_stack_size; + libc::sigaltstack(&alt_stack, ptr::null_mut()); + + let mut sa: libc::sigaction = mem::zeroed(); + sa.sa_sigaction = print_stack_trace as libc::sighandler_t; + sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK; + libc::sigemptyset(&mut sa.sa_mask); + libc::sigaction(libc::SIGSEGV, &sa, ptr::null_mut()); + } +} + +/// Modern kernels on modern hardware can have dynamic signal stack sizes. +#[cfg(any(target_os = "linux", target_os = "android"))] +fn min_sigstack_size() -> usize { + const AT_MINSIGSTKSZ: core::ffi::c_ulong = 51; + let dynamic_sigstksz = unsafe { libc::getauxval(AT_MINSIGSTKSZ) }; + // If getauxval couldn't find the entry, it returns 0, + // so take the higher of the "constant" and auxval. + // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ + libc::MINSIGSTKSZ.max(dynamic_sigstksz as _) +} + +/// Not all OS support hardware where this is needed. +#[cfg(not(any(target_os = "linux", target_os = "android")))] +fn min_sigstack_size() -> usize { + libc::MINSIGSTKSZ +} diff --git a/compiler/rustc_error_codes/src/error_codes.rs b/compiler/rustc_error_codes/src/error_codes.rs index 2daf591bd65..89c44c6ec8a 100644 --- a/compiler/rustc_error_codes/src/error_codes.rs +++ b/compiler/rustc_error_codes/src/error_codes.rs @@ -608,6 +608,7 @@ E0794: include_str!("./error_codes/E0794.md"), // E0420, // merged into 532 // E0421, // merged into 531 // E0427, // merged into 530 +// E0445, // merged into 446 and type privacy lints // E0456, // plugin `..` is not available for triple `..` // E0465, // removed: merged with E0464 // E0467, // removed diff --git a/compiler/rustc_error_codes/src/error_codes/E0445.md b/compiler/rustc_error_codes/src/error_codes/E0445.md index e6a28a9c2c4..d47393194c5 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0445.md +++ b/compiler/rustc_error_codes/src/error_codes/E0445.md @@ -1,10 +1,10 @@ -A private trait was used on a public type parameter bound. +#### Note: this error code is no longer emitted by the compiler. -Erroneous code examples: +A private trait was used on a public type parameter bound. -```compile_fail,E0445 -#![deny(private_in_public)] +Previously erroneous code examples: +``` trait Foo { fn dummy(&self) { } } diff --git a/compiler/rustc_error_codes/src/error_codes/E0446.md b/compiler/rustc_error_codes/src/error_codes/E0446.md index 6ec47c4962c..ebbd83c683c 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0446.md +++ b/compiler/rustc_error_codes/src/error_codes/E0446.md @@ -1,16 +1,16 @@ -A private type was used in a public type signature. +A private type or trait was used in a public associated type signature. Erroneous code example: ```compile_fail,E0446 -#![deny(private_in_public)] -struct Bar(u32); - -mod foo { - use crate::Bar; - pub fn bar() -> Bar { // error: private type in public interface - Bar(0) - } +struct Bar; + +pub trait PubTr { + type Alias; +} + +impl PubTr for u8 { + type Alias = Bar; // error private type in public interface } fn main() {} @@ -22,13 +22,14 @@ This is done by using pub(crate) or pub(in crate::my_mod::etc) Example: ``` -struct Bar(u32); +struct Bar; + +pub(crate) trait PubTr { // only public to crate root + type Alias; +} -mod foo { - use crate::Bar; - pub(crate) fn bar() -> Bar { // only public to crate root - Bar(0) - } +impl PubTr for u8 { + type Alias = Bar; } fn main() {} @@ -38,12 +39,15 @@ The other way to solve this error is to make the private type public. Example: ``` -pub struct Bar(u32); // we set the Bar type public -mod foo { - use crate::Bar; - pub fn bar() -> Bar { // ok! - Bar(0) - } + +pub struct Bar; // we set the Bar trait public + +pub trait PubTr { + type Alias; +} + +impl PubTr for u8 { + type Alias = Bar; } fn main() {} diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs index 0cae06881b1..1811738c011 100644 --- a/compiler/rustc_errors/src/emitter.rs +++ b/compiler/rustc_errors/src/emitter.rs @@ -24,7 +24,7 @@ use rustc_lint_defs::pluralize; use derive_setters::Setters; use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; -use rustc_data_structures::sync::Lrc; +use rustc_data_structures::sync::{DynSend, IntoDynSyncSend, Lrc}; use rustc_error_messages::{FluentArgs, SpanLabel}; use rustc_span::hygiene::{ExpnKind, MacroKind}; use std::borrow::Cow; @@ -188,6 +188,8 @@ impl Margin { const ANONYMIZED_LINE_NUM: &str = "LL"; +pub type DynEmitter = dyn Emitter + DynSend; + /// Emitter trait for emitting errors. pub trait Emitter: Translate { /// Emit a structured diagnostic. @@ -625,7 +627,7 @@ impl ColorConfig { #[derive(Setters)] pub struct EmitterWriter { #[setters(skip)] - dst: Destination, + dst: IntoDynSyncSend<Destination>, sm: Option<Lrc<SourceMap>>, fluent_bundle: Option<Lrc<FluentBundle>>, #[setters(skip)] @@ -655,7 +657,7 @@ impl EmitterWriter { fn create(dst: Destination, fallback_bundle: LazyFallbackBundle) -> EmitterWriter { EmitterWriter { - dst, + dst: IntoDynSyncSend(dst), sm: None, fluent_bundle: None, fallback_bundle, diff --git a/compiler/rustc_errors/src/json.rs b/compiler/rustc_errors/src/json.rs index b8f58e3057c..390bf28df09 100644 --- a/compiler/rustc_errors/src/json.rs +++ b/compiler/rustc_errors/src/json.rs @@ -22,7 +22,7 @@ use crate::{ }; use rustc_lint_defs::Applicability; -use rustc_data_structures::sync::Lrc; +use rustc_data_structures::sync::{IntoDynSyncSend, Lrc}; use rustc_error_messages::FluentArgs; use rustc_span::hygiene::ExpnData; use rustc_span::Span; @@ -38,7 +38,7 @@ use serde::Serialize; mod tests; pub struct JsonEmitter { - dst: Box<dyn Write + Send>, + dst: IntoDynSyncSend<Box<dyn Write + Send>>, registry: Option<Registry>, sm: Lrc<SourceMap>, fluent_bundle: Option<Lrc<FluentBundle>>, @@ -66,7 +66,7 @@ impl JsonEmitter { terminal_url: TerminalUrl, ) -> JsonEmitter { JsonEmitter { - dst: Box::new(io::BufWriter::new(io::stderr())), + dst: IntoDynSyncSend(Box::new(io::BufWriter::new(io::stderr()))), registry, sm: source_map, fluent_bundle, @@ -120,7 +120,7 @@ impl JsonEmitter { terminal_url: TerminalUrl, ) -> JsonEmitter { JsonEmitter { - dst, + dst: IntoDynSyncSend(dst), registry, sm: source_map, fluent_bundle, diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index b7e1b0c8ad1..9bb1a6a2b14 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -30,11 +30,11 @@ pub use emitter::ColorConfig; use rustc_lint_defs::LintExpectationId; use Level::*; -use emitter::{is_case_difference, Emitter, EmitterWriter}; +use emitter::{is_case_difference, DynEmitter, Emitter, EmitterWriter}; use registry::Registry; use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet}; use rustc_data_structures::stable_hasher::{Hash128, StableHasher}; -use rustc_data_structures::sync::{self, IntoDynSyncSend, Lock, Lrc}; +use rustc_data_structures::sync::{Lock, Lrc}; use rustc_data_structures::AtomicRef; pub use rustc_error_messages::{ fallback_fluent_bundle, fluent_bundle, DelayDm, DiagnosticMessage, FluentBundle, @@ -55,7 +55,9 @@ use std::num::NonZeroUsize; use std::panic; use std::path::{Path, PathBuf}; -use termcolor::{Color, ColorSpec}; +// Used by external projects such as `rust-gpu`. +// See https://github.com/rust-lang/rust/pull/115393. +pub use termcolor::{Color, ColorSpec, WriteColor}; pub mod annotate_snippet_emitter_writer; mod diagnostic; @@ -428,7 +430,7 @@ struct HandlerInner { err_count: usize, warn_count: usize, deduplicated_err_count: usize, - emitter: IntoDynSyncSend<Box<dyn Emitter + sync::Send>>, + emitter: Box<DynEmitter>, delayed_span_bugs: Vec<DelayedDiagnostic>, delayed_good_path_bugs: Vec<DelayedDiagnostic>, /// This flag indicates that an expected diagnostic was emitted and suppressed. @@ -594,7 +596,7 @@ impl Handler { self } - pub fn with_emitter(emitter: Box<dyn Emitter + sync::Send>) -> Self { + pub fn with_emitter(emitter: Box<DynEmitter>) -> Self { Self { inner: Lock::new(HandlerInner { flags: HandlerFlags { can_emit_warnings: true, ..Default::default() }, @@ -603,7 +605,7 @@ impl Handler { warn_count: 0, deduplicated_err_count: 0, deduplicated_warn_count: 0, - emitter: IntoDynSyncSend(emitter), + emitter, delayed_span_bugs: Vec::new(), delayed_good_path_bugs: Vec::new(), suppressed_expected_diag: false, diff --git a/compiler/rustc_feature/src/accepted.rs b/compiler/rustc_feature/src/accepted.rs index b1f74643b69..afcf30d0b29 100644 --- a/compiler/rustc_feature/src/accepted.rs +++ b/compiler/rustc_feature/src/accepted.rs @@ -54,7 +54,7 @@ declare_features! ( /// instead of just the platforms on which it is the C ABI. (accepted, abi_sysv64, "1.24.0", Some(36167), None), /// Allows using the `thiscall` ABI. - (accepted, abi_thiscall, "1.19.0", None, None), + (accepted, abi_thiscall, "1.73.0", None, None), /// Allows using ADX intrinsics from `core::arch::{x86, x86_64}`. (accepted, adx_target_feature, "1.61.0", Some(44839), None), /// Allows explicit discriminants on non-unit enum variants. diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs index 168cc16bb36..e745ef1ec07 100644 --- a/compiler/rustc_feature/src/builtin_attrs.rs +++ b/compiler/rustc_feature/src/builtin_attrs.rs @@ -813,6 +813,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ rustc_attr!(TEST, rustc_insignificant_dtor, Normal, template!(Word), WarnFollowing), rustc_attr!(TEST, rustc_strict_coherence, Normal, template!(Word), WarnFollowing), rustc_attr!(TEST, rustc_variance, Normal, template!(Word), WarnFollowing), + rustc_attr!(TEST, rustc_variance_of_opaques, Normal, template!(Word), WarnFollowing), rustc_attr!(TEST, rustc_layout, Normal, template!(List: "field1, field2, ..."), WarnFollowing), rustc_attr!(TEST, rustc_abi, Normal, template!(List: "field1, field2, ..."), WarnFollowing), rustc_attr!(TEST, rustc_regions, Normal, template!(Word), WarnFollowing), diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs index 0bfd62d68b2..a9b25a390b3 100644 --- a/compiler/rustc_hir/src/hir.rs +++ b/compiler/rustc_hir/src/hir.rs @@ -3729,6 +3729,8 @@ impl<'hir> Node<'hir> { Node::Lifetime(lt) => Some(lt.ident), Node::GenericParam(p) => Some(p.name.ident()), Node::TypeBinding(b) => Some(b.ident), + Node::PatField(f) => Some(f.ident), + Node::ExprField(f) => Some(f.ident), Node::Param(..) | Node::AnonConst(..) | Node::ConstBlock(..) @@ -3737,8 +3739,6 @@ impl<'hir> Node<'hir> { | Node::Block(..) | Node::Ctor(..) | Node::Pat(..) - | Node::PatField(..) - | Node::ExprField(..) | Node::Arm(..) | Node::Local(..) | Node::Crate(..) diff --git a/compiler/rustc_hir_analysis/src/astconv/mod.rs b/compiler/rustc_hir_analysis/src/astconv/mod.rs index 668763f9bf6..90f64e18632 100644 --- a/compiler/rustc_hir_analysis/src/astconv/mod.rs +++ b/compiler/rustc_hir_analysis/src/astconv/mod.rs @@ -910,19 +910,24 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { ) -> Ty<'tcx> { let tcx = self.tcx(); let args = self.ast_path_args_for_ty(span, did, item_segment); - let ty = tcx.at(span).type_of(did); - if let DefKind::TyAlias { lazy } = tcx.def_kind(did) - && (lazy || ty.skip_binder().has_opaque_types()) - { - // Type aliases referring to types that contain opaque types (but aren't just directly - // referencing a single opaque type) as well as those defined in crates that have the + if let DefKind::TyAlias { lazy: true } = tcx.def_kind(did) { + // Type aliases defined in crates that have the // feature `lazy_type_alias` enabled get encoded as a type alias that normalization will // then actually instantiate the where bounds of. let alias_ty = tcx.mk_alias_ty(did, args); Ty::new_alias(tcx, ty::Weak, alias_ty) } else { - ty.instantiate(tcx, args) + let ty = tcx.at(span).type_of(did); + if ty.skip_binder().has_opaque_types() { + // Type aliases referring to types that contain opaque types (but aren't just directly + // referencing a single opaque type) get encoded as a type alias that normalization will + // then actually instantiate the where bounds of. + let alias_ty = tcx.mk_alias_ty(did, args); + Ty::new_alias(tcx, ty::Weak, alias_ty) + } else { + ty.instantiate(tcx, args) + } } } @@ -2200,27 +2205,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { err.span_note(span, format!("type parameter `{name}` defined here")); } }); - - match tcx.named_bound_var(hir_id) { - Some(rbv::ResolvedArg::LateBound(debruijn, index, _)) => { - let name = - tcx.hir().name(tcx.hir().local_def_id_to_hir_id(def_id.expect_local())); - let br = ty::BoundTy { - var: ty::BoundVar::from_u32(index), - kind: ty::BoundTyKind::Param(def_id, name), - }; - Ty::new_bound(tcx, debruijn, br) - } - Some(rbv::ResolvedArg::EarlyBound(_)) => { - let def_id = def_id.expect_local(); - let item_def_id = tcx.hir().ty_param_owner(def_id); - let generics = tcx.generics_of(item_def_id); - let index = generics.param_def_id_to_index[&def_id.to_def_id()]; - Ty::new_param(tcx, index, tcx.hir().ty_param_name(def_id)) - } - Some(rbv::ResolvedArg::Error(guar)) => Ty::new_error(tcx, guar), - arg => bug!("unexpected bound var resolution for {hir_id:?}: {arg:?}"), - } + self.hir_id_to_bound_ty(hir_id) } Res::SelfTyParam { .. } => { // `Self` in trait or type alias. @@ -2389,6 +2374,57 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { } } + // Converts a hir id corresponding to a type parameter to + // a early-bound `ty::Param` or late-bound `ty::Bound`. + pub(crate) fn hir_id_to_bound_ty(&self, hir_id: hir::HirId) -> Ty<'tcx> { + let tcx = self.tcx(); + match tcx.named_bound_var(hir_id) { + Some(rbv::ResolvedArg::LateBound(debruijn, index, def_id)) => { + let name = tcx.item_name(def_id); + let br = ty::BoundTy { + var: ty::BoundVar::from_u32(index), + kind: ty::BoundTyKind::Param(def_id, name), + }; + Ty::new_bound(tcx, debruijn, br) + } + Some(rbv::ResolvedArg::EarlyBound(def_id)) => { + let def_id = def_id.expect_local(); + let item_def_id = tcx.hir().ty_param_owner(def_id); + let generics = tcx.generics_of(item_def_id); + let index = generics.param_def_id_to_index[&def_id.to_def_id()]; + Ty::new_param(tcx, index, tcx.hir().ty_param_name(def_id)) + } + Some(rbv::ResolvedArg::Error(guar)) => Ty::new_error(tcx, guar), + arg => bug!("unexpected bound var resolution for {hir_id:?}: {arg:?}"), + } + } + + // Converts a hir id corresponding to a const parameter to + // a early-bound `ConstKind::Param` or late-bound `ConstKind::Bound`. + pub(crate) fn hir_id_to_bound_const( + &self, + hir_id: hir::HirId, + param_ty: Ty<'tcx>, + ) -> Const<'tcx> { + let tcx = self.tcx(); + match tcx.named_bound_var(hir_id) { + Some(rbv::ResolvedArg::EarlyBound(def_id)) => { + // Find the name and index of the const parameter by indexing the generics of + // the parent item and construct a `ParamConst`. + let item_def_id = tcx.parent(def_id); + let generics = tcx.generics_of(item_def_id); + let index = generics.param_def_id_to_index[&def_id]; + let name = tcx.item_name(def_id); + ty::Const::new_param(tcx, ty::ParamConst::new(index, name), param_ty) + } + Some(rbv::ResolvedArg::LateBound(debruijn, index, _)) => { + ty::Const::new_bound(tcx, debruijn, ty::BoundVar::from_u32(index), param_ty) + } + Some(rbv::ResolvedArg::Error(guar)) => ty::Const::new_error(tcx, guar, param_ty), + arg => bug!("unexpected bound var resolution for {:?}: {arg:?}", hir_id), + } + } + /// Parses the programmer's textual representation of a type into our /// internal notion of a type. pub fn ast_ty_to_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> { diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs index 46e8cf81bc1..c0cf1ea34cf 100644 --- a/compiler/rustc_hir_analysis/src/check/check.rs +++ b/compiler/rustc_hir_analysis/src/check/check.rs @@ -823,7 +823,7 @@ fn check_impl_items_against_trait<'tcx>( }; match ty_impl_item.kind { ty::AssocKind::Const => { - let _ = tcx.compare_impl_const(( + tcx.ensure().compare_impl_const(( impl_item.expect_local(), ty_impl_item.trait_item_def_id.unwrap(), )); diff --git a/compiler/rustc_hir_analysis/src/coherence/builtin.rs b/compiler/rustc_hir_analysis/src/coherence/builtin.rs index c930537d4ae..67aef0a7c43 100644 --- a/compiler/rustc_hir_analysis/src/coherence/builtin.rs +++ b/compiler/rustc_hir_analysis/src/coherence/builtin.rs @@ -195,8 +195,8 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef let ty_b = field.ty(tcx, args_b); if let Ok(layout) = tcx.layout_of(param_env.and(ty_a)) { - if layout.is_zst() && layout.align.abi.bytes() == 1 { - // ignore ZST fields with alignment of 1 byte + if layout.is_1zst() { + // ignore 1-ZST fields return false; } } diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs index 7b9f61d7ab2..01e40c62a8b 100644 --- a/compiler/rustc_hir_analysis/src/collect.rs +++ b/compiler/rustc_hir_analysis/src/collect.rs @@ -38,6 +38,7 @@ use rustc_trait_selection::infer::InferCtxtExt; use rustc_trait_selection::traits::error_reporting::suggestions::NextTypeParamName; use rustc_trait_selection::traits::ObligationCtxt; use std::iter; +use std::ops::Bound; mod generics_of; mod item_bounds; @@ -56,6 +57,7 @@ pub fn provide(providers: &mut Providers) { resolve_bound_vars::provide(providers); *providers = Providers { type_of: type_of::type_of, + type_of_opaque: type_of::type_of_opaque, item_bounds: item_bounds::item_bounds, explicit_item_bounds: item_bounds::explicit_item_bounds, generics_of: generics_of::generics_of, @@ -1143,15 +1145,15 @@ fn fn_sig(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<ty::PolyFnSig< } Ctor(data) | Variant(hir::Variant { data, .. }) if data.ctor().is_some() => { - let ty = tcx.type_of(tcx.hir().get_parent_item(hir_id)).instantiate_identity(); + let adt_def_id = tcx.hir().get_parent_item(hir_id).def_id.to_def_id(); + let ty = tcx.type_of(adt_def_id).instantiate_identity(); let inputs = data.fields().iter().map(|f| tcx.type_of(f.def_id).instantiate_identity()); - ty::Binder::dummy(tcx.mk_fn_sig( - inputs, - ty, - false, - hir::Unsafety::Normal, - abi::Abi::Rust, - )) + // constructors for structs with `layout_scalar_valid_range` are unsafe to call + let safety = match tcx.layout_scalar_valid_range(adt_def_id) { + (Bound::Unbounded, Bound::Unbounded) => hir::Unsafety::Normal, + _ => hir::Unsafety::Unsafe, + }; + ty::Binder::dummy(tcx.mk_fn_sig(inputs, ty, false, safety, abi::Abi::Rust)) } Expr(&hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => { diff --git a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs index 495e663666c..1298c086087 100644 --- a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs +++ b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs @@ -162,8 +162,6 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen }; let generics = tcx.generics_of(def_id); - let parent_count = generics.parent_count as u32; - let has_own_self = generics.has_self && parent_count == 0; // Below we'll consider the bounds on the type parameters (including `Self`) // and the explicit where-clauses, but to get the full set of predicates @@ -189,17 +187,6 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen predicates.insert((trait_ref.to_predicate(tcx), tcx.def_span(def_id))); } - // Collect the region predicates that were declared inline as - // well. In the case of parameters declared on a fn or method, we - // have to be careful to only iterate over early-bound regions. - let mut index = parent_count - + has_own_self as u32 - + super::early_bound_lifetimes_from_generics(tcx, ast_generics).count() as u32; - - trace!(?predicates); - trace!(?ast_generics); - trace!(?generics); - // Collect the predicates that were written inline by the user on each // type parameter (e.g., `<T: Foo>`). Also add `ConstArgHasType` predicates // for each const parameter. @@ -208,10 +195,7 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen // We already dealt with early bound lifetimes above. GenericParamKind::Lifetime { .. } => (), GenericParamKind::Type { .. } => { - let name = param.name.ident().name; - let param_ty = ty::ParamTy::new(index, name).to_ty(tcx); - index += 1; - + let param_ty = icx.astconv().hir_id_to_bound_ty(param.hir_id); let mut bounds = Bounds::default(); // Params are implicitly sized unless a `?Sized` bound is found icx.astconv().add_implicitly_sized( @@ -225,23 +209,16 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen predicates.extend(bounds.clauses()); trace!(?predicates); } - GenericParamKind::Const { .. } => { - let name = param.name.ident().name; - let param_const = ty::ParamConst::new(index, name); - + hir::GenericParamKind::Const { .. } => { let ct_ty = tcx .type_of(param.def_id.to_def_id()) .no_bound_vars() .expect("const parameters cannot be generic"); - - let ct = ty::Const::new_param(tcx, param_const, ct_ty); - + let ct = icx.astconv().hir_id_to_bound_const(param.hir_id, ct_ty); predicates.insert(( ty::ClauseKind::ConstArgHasType(ct, ct_ty).to_predicate(tcx), param.span, )); - - index += 1; } } } @@ -252,8 +229,7 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen match predicate { hir::WherePredicate::BoundPredicate(bound_pred) => { let ty = icx.to_ty(bound_pred.bounded_ty); - let bound_vars = icx.tcx.late_bound_vars(bound_pred.hir_id); - + let bound_vars = tcx.late_bound_vars(bound_pred.hir_id); // Keep the type around in a dummy predicate, in case of no bounds. // That way, `where Ty:` is not a complete noop (see #53696) and `Ty` // is still checked for WF. @@ -296,7 +272,7 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen _ => bug!(), }; let pred = ty::ClauseKind::RegionOutlives(ty::OutlivesPredicate(r1, r2)) - .to_predicate(icx.tcx); + .to_predicate(tcx); (pred, span) })) } diff --git a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs index 6dd0c840de6..2bee2727725 100644 --- a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs +++ b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs @@ -849,6 +849,7 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> { fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) { let scope = Scope::TraitRefBoundary { s: self.scope }; self.with(scope, |this| { + walk_list!(this, visit_generic_param, generics.params); for param in generics.params { match param.kind { GenericParamKind::Lifetime { .. } => {} @@ -865,90 +866,86 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> { } } } - for predicate in generics.predicates { - match predicate { - &hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate { - hir_id, - bounded_ty, - bounds, - bound_generic_params, - origin, - .. - }) => { - let (bound_vars, binders): (FxIndexMap<LocalDefId, ResolvedArg>, Vec<_>) = - bound_generic_params - .iter() - .enumerate() - .map(|(late_bound_idx, param)| { - let pair = ResolvedArg::late(late_bound_idx as u32, param); - let r = late_arg_as_bound_arg(this.tcx, &pair.1, param); - (pair, r) - }) - .unzip(); - this.record_late_bound_vars(hir_id, binders.clone()); - // Even if there are no lifetimes defined here, we still wrap it in a binder - // scope. If there happens to be a nested poly trait ref (an error), that - // will be `Concatenating` anyways, so we don't have to worry about the depth - // being wrong. - let scope = Scope::Binder { - hir_id, - bound_vars, - s: this.scope, - scope_type: BinderScopeType::Normal, - where_bound_origin: Some(origin), - }; - this.with(scope, |this| { - this.visit_ty(&bounded_ty); - walk_list!(this, visit_param_bound, bounds); + walk_list!(this, visit_where_predicate, generics.predicates); + }) + } + + fn visit_where_predicate(&mut self, predicate: &'tcx hir::WherePredicate<'tcx>) { + match predicate { + &hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate { + hir_id, + bounded_ty, + bounds, + bound_generic_params, + origin, + .. + }) => { + let (bound_vars, binders): (FxIndexMap<LocalDefId, ResolvedArg>, Vec<_>) = + bound_generic_params + .iter() + .enumerate() + .map(|(late_bound_idx, param)| { + let pair = ResolvedArg::late(late_bound_idx as u32, param); + let r = late_arg_as_bound_arg(self.tcx, &pair.1, param); + (pair, r) }) - } - &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate { - lifetime, - bounds, - .. - }) => { - this.visit_lifetime(lifetime); - walk_list!(this, visit_param_bound, bounds); - - if lifetime.res != hir::LifetimeName::Static { - for bound in bounds { - let hir::GenericBound::Outlives(lt) = bound else { - continue; - }; - if lt.res != hir::LifetimeName::Static { - continue; - } - this.insert_lifetime(lt, ResolvedArg::StaticLifetime); - this.tcx.struct_span_lint_hir( - lint::builtin::UNUSED_LIFETIMES, - lifetime.hir_id, - lifetime.ident.span, - format!( - "unnecessary lifetime parameter `{}`", - lifetime.ident - ), - |lint| { - let help = format!( - "you can use the `'static` lifetime directly, in place of `{}`", - lifetime.ident, - ); - lint.help(help) - }, - ); - } + .unzip(); + self.record_late_bound_vars(hir_id, binders.clone()); + // Even if there are no lifetimes defined here, we still wrap it in a binder + // scope. If there happens to be a nested poly trait ref (an error), that + // will be `Concatenating` anyways, so we don't have to worry about the depth + // being wrong. + let scope = Scope::Binder { + hir_id, + bound_vars, + s: self.scope, + scope_type: BinderScopeType::Normal, + where_bound_origin: Some(origin), + }; + self.with(scope, |this| { + walk_list!(this, visit_generic_param, bound_generic_params); + this.visit_ty(&bounded_ty); + walk_list!(this, visit_param_bound, bounds); + }) + } + &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate { + lifetime, + bounds, + .. + }) => { + self.visit_lifetime(lifetime); + walk_list!(self, visit_param_bound, bounds); + + if lifetime.res != hir::LifetimeName::Static { + for bound in bounds { + let hir::GenericBound::Outlives(lt) = bound else { + continue; + }; + if lt.res != hir::LifetimeName::Static { + continue; } - } - &hir::WherePredicate::EqPredicate(hir::WhereEqPredicate { - lhs_ty, - rhs_ty, - .. - }) => { - this.visit_ty(lhs_ty); - this.visit_ty(rhs_ty); + self.insert_lifetime(lt, ResolvedArg::StaticLifetime); + self.tcx.struct_span_lint_hir( + lint::builtin::UNUSED_LIFETIMES, + lifetime.hir_id, + lifetime.ident.span, + format!("unnecessary lifetime parameter `{}`", lifetime.ident), + |lint| { + let help = format!( + "you can use the `'static` lifetime directly, in place of `{}`", + lifetime.ident, + ); + lint.help(help) + }, + ); } } } - }) + &hir::WherePredicate::EqPredicate(hir::WhereEqPredicate { lhs_ty, rhs_ty, .. }) => { + self.visit_ty(lhs_ty); + self.visit_ty(rhs_ty); + } + } } fn visit_param_bound(&mut self, bound: &'tcx hir::GenericBound<'tcx>) { @@ -986,6 +983,18 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> { intravisit::walk_anon_const(this, c); }); } + + fn visit_generic_param(&mut self, p: &'tcx GenericParam<'tcx>) { + match p.kind { + GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => { + self.resolve_type_ref(p.def_id, p.hir_id); + } + GenericParamKind::Lifetime { .. } => { + // No need to resolve lifetime params, we don't use them for things + // like implicit `?Sized` or const-param-has-ty predicates. + } + } + } } fn object_lifetime_default(tcx: TyCtxt<'_>, param_def_id: LocalDefId) -> ObjectLifetimeDefault { diff --git a/compiler/rustc_hir_analysis/src/collect/type_of.rs b/compiler/rustc_hir_analysis/src/collect/type_of.rs index 2bbdbe3a1f6..d12337687e2 100644 --- a/compiler/rustc_hir_analysis/src/collect/type_of.rs +++ b/compiler/rustc_hir_analysis/src/collect/type_of.rs @@ -1,7 +1,8 @@ use rustc_errors::{Applicability, StashKey}; use rustc_hir as hir; -use rustc_hir::def_id::LocalDefId; +use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_hir::HirId; +use rustc_middle::query::plumbing::CyclePlaceholder; use rustc_middle::ty::print::with_forced_trimmed_paths; use rustc_middle::ty::util::IntTypeExt; use rustc_middle::ty::{self, ImplTraitInTraitData, IsSuggestable, Ty, TyCtxt, TypeVisitableExt}; @@ -388,86 +389,62 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<Ty } }, - Node::Item(item) => { - match item.kind { - ItemKind::Static(ty, .., body_id) => { - if is_suggestable_infer_ty(ty) { - infer_placeholder_type( - tcx, - def_id, - body_id, - ty.span, - item.ident, - "static variable", - ) - } else { - icx.to_ty(ty) - } - } - ItemKind::Const(ty, _, body_id) => { - if is_suggestable_infer_ty(ty) { - infer_placeholder_type( - tcx, def_id, body_id, ty.span, item.ident, "constant", - ) - } else { - icx.to_ty(ty) - } - } - ItemKind::TyAlias(self_ty, _) => icx.to_ty(self_ty), - ItemKind::Impl(hir::Impl { self_ty, .. }) => match self_ty.find_self_aliases() { - spans if spans.len() > 0 => { - let guar = tcx.sess.emit_err(crate::errors::SelfInImplSelf { - span: spans.into(), - note: (), - }); - Ty::new_error(tcx, guar) - } - _ => icx.to_ty(*self_ty), - }, - ItemKind::Fn(..) => { - let args = ty::GenericArgs::identity_for_item(tcx, def_id); - Ty::new_fn_def(tcx, def_id.to_def_id(), args) - } - ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) => { - let def = tcx.adt_def(def_id); - let args = ty::GenericArgs::identity_for_item(tcx, def_id); - Ty::new_adt(tcx, def, args) + Node::Item(item) => match item.kind { + ItemKind::Static(ty, .., body_id) => { + if is_suggestable_infer_ty(ty) { + infer_placeholder_type( + tcx, + def_id, + body_id, + ty.span, + item.ident, + "static variable", + ) + } else { + icx.to_ty(ty) } - ItemKind::OpaqueTy(OpaqueTy { - origin: hir::OpaqueTyOrigin::TyAlias { .. }, - .. - }) => opaque::find_opaque_ty_constraints_for_tait(tcx, def_id), - // Opaque types desugared from `impl Trait`. - ItemKind::OpaqueTy(&OpaqueTy { - origin: - hir::OpaqueTyOrigin::FnReturn(owner) | hir::OpaqueTyOrigin::AsyncFn(owner), - in_trait, - .. - }) => { - if in_trait && !tcx.defaultness(owner).has_value() { - span_bug!( - tcx.def_span(def_id), - "tried to get type of this RPITIT with no definition" - ); - } - opaque::find_opaque_ty_constraints_for_rpit(tcx, def_id, owner) + } + ItemKind::Const(ty, _, body_id) => { + if is_suggestable_infer_ty(ty) { + infer_placeholder_type(tcx, def_id, body_id, ty.span, item.ident, "constant") + } else { + icx.to_ty(ty) } - ItemKind::Trait(..) - | ItemKind::TraitAlias(..) - | ItemKind::Macro(..) - | ItemKind::Mod(..) - | ItemKind::ForeignMod { .. } - | ItemKind::GlobalAsm(..) - | ItemKind::ExternCrate(..) - | ItemKind::Use(..) => { - span_bug!( - item.span, - "compute_type_of_item: unexpected item type: {:?}", - item.kind - ); + } + ItemKind::TyAlias(self_ty, _) => icx.to_ty(self_ty), + ItemKind::Impl(hir::Impl { self_ty, .. }) => match self_ty.find_self_aliases() { + spans if spans.len() > 0 => { + let guar = tcx + .sess + .emit_err(crate::errors::SelfInImplSelf { span: spans.into(), note: () }); + Ty::new_error(tcx, guar) } + _ => icx.to_ty(*self_ty), + }, + ItemKind::Fn(..) => { + let args = ty::GenericArgs::identity_for_item(tcx, def_id); + Ty::new_fn_def(tcx, def_id.to_def_id(), args) } - } + ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) => { + let def = tcx.adt_def(def_id); + let args = ty::GenericArgs::identity_for_item(tcx, def_id); + Ty::new_adt(tcx, def, args) + } + ItemKind::OpaqueTy(..) => tcx.type_of_opaque(def_id).map_or_else( + |CyclePlaceholder(guar)| Ty::new_error(tcx, guar), + |ty| ty.instantiate_identity(), + ), + ItemKind::Trait(..) + | ItemKind::TraitAlias(..) + | ItemKind::Macro(..) + | ItemKind::Mod(..) + | ItemKind::ForeignMod { .. } + | ItemKind::GlobalAsm(..) + | ItemKind::ExternCrate(..) + | ItemKind::Use(..) => { + span_bug!(item.span, "compute_type_of_item: unexpected item type: {:?}", item.kind); + } + }, Node::ForeignItem(foreign_item) => match foreign_item.kind { ForeignItemKind::Fn(..) => { @@ -514,6 +491,51 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<Ty ty::EarlyBinder::bind(output) } +pub(super) fn type_of_opaque( + tcx: TyCtxt<'_>, + def_id: DefId, +) -> Result<ty::EarlyBinder<Ty<'_>>, CyclePlaceholder> { + if let Some(def_id) = def_id.as_local() { + use rustc_hir::*; + + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); + Ok(ty::EarlyBinder::bind(match tcx.hir().get(hir_id) { + Node::Item(item) => match item.kind { + ItemKind::OpaqueTy(OpaqueTy { + origin: hir::OpaqueTyOrigin::TyAlias { .. }, + .. + }) => opaque::find_opaque_ty_constraints_for_tait(tcx, def_id), + // Opaque types desugared from `impl Trait`. + ItemKind::OpaqueTy(&OpaqueTy { + origin: + hir::OpaqueTyOrigin::FnReturn(owner) | hir::OpaqueTyOrigin::AsyncFn(owner), + in_trait, + .. + }) => { + if in_trait && !tcx.defaultness(owner).has_value() { + span_bug!( + tcx.def_span(def_id), + "tried to get type of this RPITIT with no definition" + ); + } + opaque::find_opaque_ty_constraints_for_rpit(tcx, def_id, owner) + } + _ => { + span_bug!(item.span, "type_of_opaque: unexpected item type: {:?}", item.kind); + } + }, + + x => { + bug!("unexpected sort of node in type_of_opaque(): {:?}", x); + } + })) + } else { + // Foreign opaque type will go through the foreign provider + // and load the type from metadata. + Ok(tcx.type_of(def_id)) + } +} + fn infer_placeholder_type<'a>( tcx: TyCtxt<'a>, def_id: LocalDefId, diff --git a/compiler/rustc_hir_analysis/src/variance/mod.rs b/compiler/rustc_hir_analysis/src/variance/mod.rs index d91d9fcbc8e..d69d7ff904a 100644 --- a/compiler/rustc_hir_analysis/src/variance/mod.rs +++ b/compiler/rustc_hir_analysis/src/variance/mod.rs @@ -129,7 +129,15 @@ fn variance_of_opaque(tcx: TyCtxt<'_>, item_def_id: LocalDefId) -> &[ty::Varianc // By default, RPIT are invariant wrt type and const generics, but they are bivariant wrt // lifetime generics. - let mut variances: Vec<_> = std::iter::repeat(ty::Invariant).take(generics.count()).collect(); + let variances = std::iter::repeat(ty::Invariant).take(generics.count()); + + let mut variances: Vec<_> = match tcx.opaque_type_origin(item_def_id) { + rustc_hir::OpaqueTyOrigin::FnReturn(_) | rustc_hir::OpaqueTyOrigin::AsyncFn(_) => { + variances.collect() + } + // But TAIT are invariant for all generics + rustc_hir::OpaqueTyOrigin::TyAlias { .. } => return tcx.arena.alloc_from_iter(variances), + }; // Mark all lifetimes from parent generics as unused (Bivariant). // This will be overridden later if required. diff --git a/compiler/rustc_hir_analysis/src/variance/test.rs b/compiler/rustc_hir_analysis/src/variance/test.rs index d57d05d7605..d98dc0e6b83 100644 --- a/compiler/rustc_hir_analysis/src/variance/test.rs +++ b/compiler/rustc_hir_analysis/src/variance/test.rs @@ -1,9 +1,24 @@ +use rustc_hir::def::DefKind; +use rustc_hir::def_id::CRATE_DEF_ID; use rustc_middle::ty::TyCtxt; use rustc_span::symbol::sym; use crate::errors; pub fn test_variance(tcx: TyCtxt<'_>) { + if tcx.has_attr(CRATE_DEF_ID, sym::rustc_variance_of_opaques) { + for id in tcx.hir().items() { + if matches!(tcx.def_kind(id.owner_id), DefKind::OpaqueTy) { + let variances_of = tcx.variances_of(id.owner_id); + + tcx.sess.emit_err(errors::VariancesOf { + span: tcx.def_span(id.owner_id), + variances_of: format!("{variances_of:?}"), + }); + } + } + } + // For unit testing: check for a special "rustc_variance" // attribute and report an error with various results if found. for id in tcx.hir().items() { diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs index 7cea40fdd64..a3b8c391e02 100644 --- a/compiler/rustc_hir_typeck/src/expr.rs +++ b/compiler/rustc_hir_typeck/src/expr.rs @@ -2310,13 +2310,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let guar = if field.name == kw::Empty { self.tcx.sess.delay_span_bug(field.span, "field name with no name") - } else if self.method_exists( - field, - base_ty, - expr.hir_id, - true, - expected.only_has_type(self), - ) { + } else if self.method_exists(field, base_ty, expr.hir_id, expected.only_has_type(self)) { self.ban_take_value_of_method(expr, base_ty, field) } else if !base_ty.is_primitive_ty() { self.ban_nonexisting_field(field, base, expr, base_ty) @@ -2501,7 +2495,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let mut err = self.private_field_err(field, base_did); // Also check if an accessible method exists, which is often what is meant. - if self.method_exists(field, expr_t, expr.hir_id, false, return_ty) + if self.method_exists(field, expr_t, expr.hir_id, return_ty) && !self.expr_in_place(expr.hir_id) { self.suggest_method_call( diff --git a/compiler/rustc_hir_typeck/src/method/mod.rs b/compiler/rustc_hir_typeck/src/method/mod.rs index 6dd131aa283..86a0e95de1d 100644 --- a/compiler/rustc_hir_typeck/src/method/mod.rs +++ b/compiler/rustc_hir_typeck/src/method/mod.rs @@ -89,14 +89,13 @@ pub enum CandidateSource { } impl<'a, 'tcx> FnCtxt<'a, 'tcx> { - /// Determines whether the type `self_ty` supports a method name `method_name` or not. + /// Determines whether the type `self_ty` supports a visible method named `method_name` or not. #[instrument(level = "debug", skip(self))] pub fn method_exists( &self, method_name: Ident, self_ty: Ty<'tcx>, call_expr_id: hir::HirId, - allow_private: bool, return_type: Option<Ty<'tcx>>, ) -> bool { match self.probe_for_name( @@ -118,7 +117,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } Err(NoMatch(..)) => false, Err(Ambiguity(..)) => true, - Err(PrivateMatch(..)) => allow_private, + Err(PrivateMatch(..)) => false, Err(IllegalSizedBound { .. }) => true, Err(BadReturnType) => false, } diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs index 72a04a02bf4..07c48ec6392 100644 --- a/compiler/rustc_hir_typeck/src/method/suggest.rs +++ b/compiler/rustc_hir_typeck/src/method/suggest.rs @@ -2361,8 +2361,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { Some(output_ty) => self.resolve_vars_if_possible(output_ty), _ => return, }; - let method_exists = - self.method_exists(item_name, output_ty, call.hir_id, true, return_type); + let method_exists = self.method_exists(item_name, output_ty, call.hir_id, return_type); debug!("suggest_await_before_method: is_method_exist={}", method_exists); if method_exists { err.span_suggestion_verbose( diff --git a/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs b/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs index 372539d73b1..445f68160d2 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs @@ -266,12 +266,10 @@ impl<T> Trait<T> for X { } } } - (ty::FnPtr(_), ty::FnDef(def, _)) - if let hir::def::DefKind::Fn = tcx.def_kind(def) => { - diag.note( - "when the arguments and return types match, functions can be coerced \ - to function pointers", - ); + (ty::FnPtr(sig), ty::FnDef(def_id, _)) | (ty::FnDef(def_id, _), ty::FnPtr(sig)) => { + if tcx.fn_sig(*def_id).skip_binder().unsafety() < sig.unsafety() { + diag.note("unsafe functions cannot be coerced into safe function pointers"); + } } _ => {} } diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs index f6cbbad6338..9fcaf643179 100644 --- a/compiler/rustc_interface/src/passes.rs +++ b/compiler/rustc_interface/src/passes.rs @@ -8,7 +8,7 @@ use rustc_borrowck as mir_borrowck; use rustc_codegen_ssa::traits::CodegenBackend; use rustc_data_structures::parallel; use rustc_data_structures::steal::Steal; -use rustc_data_structures::sync::{Lrc, OnceCell, WorkerLocal}; +use rustc_data_structures::sync::{Lrc, OnceLock, WorkerLocal}; use rustc_errors::PResult; use rustc_expand::base::{ExtCtxt, LintStoreExpand}; use rustc_feature::Features; @@ -689,7 +689,7 @@ pub fn create_global_ctxt<'tcx>( lint_store: Lrc<LintStore>, dep_graph: DepGraph, untracked: Untracked, - gcx_cell: &'tcx OnceCell<GlobalCtxt<'tcx>>, + gcx_cell: &'tcx OnceLock<GlobalCtxt<'tcx>>, arena: &'tcx WorkerLocal<Arena<'tcx>>, hir_arena: &'tcx WorkerLocal<rustc_hir::Arena<'tcx>>, ) -> &'tcx GlobalCtxt<'tcx> { @@ -743,12 +743,11 @@ fn analysis(tcx: TyCtxt<'_>, (): ()) -> Result<()> { rustc_passes::hir_id_validator::check_crate(tcx); let sess = tcx.sess; - let mut entry_point = None; sess.time("misc_checking_1", || { parallel!( { - entry_point = sess.time("looking_for_entry_point", || tcx.entry_fn(())); + sess.time("looking_for_entry_point", || tcx.ensure().entry_fn(())); sess.time("looking_for_derive_registrar", || { tcx.ensure().proc_macro_decls_static(()) @@ -863,7 +862,7 @@ fn analysis(tcx: TyCtxt<'_>, (): ()) -> Result<()> { // This check has to be run after all lints are done processing. We don't // define a lint filter, as all lint checks should have finished at this point. - sess.time("check_lint_expectations", || tcx.check_expectations(None)); + sess.time("check_lint_expectations", || tcx.ensure().check_expectations(None)); }); if sess.opts.unstable_opts.print_vtable_sizes { diff --git a/compiler/rustc_interface/src/queries.rs b/compiler/rustc_interface/src/queries.rs index fc71c6c7e9a..2db7aa0e367 100644 --- a/compiler/rustc_interface/src/queries.rs +++ b/compiler/rustc_interface/src/queries.rs @@ -7,7 +7,7 @@ use rustc_codegen_ssa::traits::CodegenBackend; use rustc_codegen_ssa::CodegenResults; use rustc_data_structures::steal::Steal; use rustc_data_structures::svh::Svh; -use rustc_data_structures::sync::{AppendOnlyIndexVec, Lrc, OnceCell, RwLock, WorkerLocal}; +use rustc_data_structures::sync::{AppendOnlyIndexVec, Lrc, OnceLock, RwLock, WorkerLocal}; use rustc_hir::def_id::{StableCrateId, CRATE_DEF_ID, LOCAL_CRATE}; use rustc_hir::definitions::Definitions; use rustc_incremental::DepGraphFuture; @@ -78,7 +78,7 @@ impl<T> Default for Query<T> { pub struct Queries<'tcx> { compiler: &'tcx Compiler, - gcx_cell: OnceCell<GlobalCtxt<'tcx>>, + gcx_cell: OnceLock<GlobalCtxt<'tcx>>, arena: WorkerLocal<Arena<'tcx>>, hir_arena: WorkerLocal<rustc_hir::Arena<'tcx>>, @@ -93,7 +93,7 @@ impl<'tcx> Queries<'tcx> { pub fn new(compiler: &'tcx Compiler) -> Queries<'tcx> { Queries { compiler, - gcx_cell: OnceCell::new(), + gcx_cell: OnceLock::new(), arena: WorkerLocal::new(|_| Arena::default()), hir_arena: WorkerLocal::new(|_| rustc_hir::Arena::default()), parse: Default::default(), diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs index ad35dbbc8f9..5c9b39cdbe3 100644 --- a/compiler/rustc_interface/src/util.rs +++ b/compiler/rustc_interface/src/util.rs @@ -137,10 +137,8 @@ fn get_stack_size() -> Option<usize> { env::var_os("RUST_MIN_STACK").is_none().then_some(STACK_SIZE) } -#[cfg(not(parallel_compiler))] -pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>( +pub(crate) fn run_in_thread_with_globals<F: FnOnce() -> R + Send, R: Send>( edition: Edition, - _threads: usize, f: F, ) -> R { // The "thread pool" is a single spawned thread in the non-parallel @@ -171,18 +169,37 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>( }) } +#[cfg(not(parallel_compiler))] +pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>( + edition: Edition, + _threads: usize, + f: F, +) -> R { + run_in_thread_with_globals(edition, f) +} + #[cfg(parallel_compiler)] pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>( edition: Edition, threads: usize, f: F, ) -> R { - use rustc_data_structures::jobserver; + use rustc_data_structures::{jobserver, sync::FromDyn}; use rustc_middle::ty::tls; use rustc_query_impl::QueryCtxt; use rustc_query_system::query::{deadlock, QueryContext}; let registry = sync::Registry::new(threads); + + if !sync::is_dyn_thread_safe() { + return run_in_thread_with_globals(edition, || { + // Register the thread for use with the `WorkerLocal` type. + registry.register(); + + f() + }); + } + let mut builder = rayon::ThreadPoolBuilder::new() .thread_name(|_| "rustc".to_string()) .acquire_thread_handler(jobserver::acquire_thread) @@ -191,13 +208,13 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>( .deadlock_handler(|| { // On deadlock, creates a new thread and forwards information in thread // locals to it. The new thread runs the deadlock handler. - let query_map = tls::with(|tcx| { + let query_map = FromDyn::from(tls::with(|tcx| { QueryCtxt::new(tcx) .try_collect_active_jobs() .expect("active jobs shouldn't be locked in deadlock handler") - }); + })); let registry = rayon_core::Registry::current(); - thread::spawn(move || deadlock(query_map, ®istry)); + thread::spawn(move || deadlock(query_map.into_inner(), ®istry)); }); if let Some(size) = get_stack_size() { builder = builder.stack_size(size); @@ -209,6 +226,7 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>( // `Send` in the parallel compiler. rustc_span::create_session_globals_then(edition, || { rustc_span::with_session_globals(|session_globals| { + let session_globals = FromDyn::from(session_globals); builder .build_scoped( // Initialize each new worker thread when created. @@ -216,7 +234,9 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>( // Register the thread for use with the `WorkerLocal` type. registry.register(); - rustc_span::set_session_globals_then(session_globals, || thread.run()) + rustc_span::set_session_globals_then(session_globals.into_inner(), || { + thread.run() + }) }, // Run `f` on the first thread in the thread pool. move |pool: &rayon::ThreadPool| pool.install(f), diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs index 4b6917fdfdd..de228bdb850 100644 --- a/compiler/rustc_lint/src/builtin.rs +++ b/compiler/rustc_lint/src/builtin.rs @@ -1001,8 +1001,22 @@ impl EarlyLintPass for UnusedDocComment { warn_if_doc(cx, arm_span, "match arms", &arm.attrs); } + fn check_pat(&mut self, cx: &EarlyContext<'_>, pat: &ast::Pat) { + if let ast::PatKind::Struct(_, _, fields, _) = &pat.kind { + for field in fields { + warn_if_doc(cx, field.span, "pattern fields", &field.attrs); + } + } + } + fn check_expr(&mut self, cx: &EarlyContext<'_>, expr: &ast::Expr) { warn_if_doc(cx, expr.span, "expressions", &expr.attrs); + + if let ExprKind::Struct(s) = &expr.kind { + for field in &s.fields { + warn_if_doc(cx, field.span, "expression fields", &field.attrs); + } + } } fn check_generic_param(&mut self, cx: &EarlyContext<'_>, param: &ast::GenericParam) { diff --git a/compiler/rustc_lint/src/early.rs b/compiler/rustc_lint/src/early.rs index 211ea8f4347..d102e3a6c15 100644 --- a/compiler/rustc_lint/src/early.rs +++ b/compiler/rustc_lint/src/early.rs @@ -228,6 +228,7 @@ impl<'a, T: EarlyLintPass> ast_visit::Visitor<'a> for EarlyContextAndPass<'a, T> }) => self.check_id(closure_id), _ => {} } + lint_callback!(self, check_expr_post, e); } fn visit_generic_arg(&mut self, arg: &'a ast::GenericArg) { diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs index e578f09b58f..84434d585d3 100644 --- a/compiler/rustc_lint/src/lib.rs +++ b/compiler/rustc_lint/src/lib.rs @@ -500,6 +500,11 @@ fn register_builtins(store: &mut LintStore) { "converted into hard error, see issue #82523 \ <https://github.com/rust-lang/rust/issues/82523> for more information", ); + store.register_removed( + "private_in_public", + "replaced with another group of lints, see RFC \ + <https://rust-lang.github.io/rfcs/2145-type-privacy.html> for more information", + ); } fn register_internals(store: &mut LintStore) { diff --git a/compiler/rustc_lint/src/passes.rs b/compiler/rustc_lint/src/passes.rs index 16964565b01..fca0eeeecc4 100644 --- a/compiler/rustc_lint/src/passes.rs +++ b/compiler/rustc_lint/src/passes.rs @@ -153,6 +153,7 @@ macro_rules! early_lint_methods { fn check_pat(a: &ast::Pat); fn check_pat_post(a: &ast::Pat); fn check_expr(a: &ast::Expr); + fn check_expr_post(a: &ast::Expr); fn check_ty(a: &ast::Ty); fn check_generic_arg(a: &ast::GenericArg); fn check_generic_param(a: &ast::GenericParam); diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs index 1ba746eddeb..fc4c29eb36d 100644 --- a/compiler/rustc_lint/src/types.rs +++ b/compiler/rustc_lint/src/types.rs @@ -804,7 +804,7 @@ pub(crate) fn nonnull_optimization_guaranteed<'tcx>( tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed) } -/// `repr(transparent)` structs can have a single non-ZST field, this function returns that +/// `repr(transparent)` structs can have a single non-1-ZST field, this function returns that /// field. pub fn transparent_newtype_field<'a, 'tcx>( tcx: TyCtxt<'tcx>, @@ -813,8 +813,8 @@ pub fn transparent_newtype_field<'a, 'tcx>( let param_env = tcx.param_env(variant.def_id); variant.fields.iter().find(|field| { let field_ty = tcx.type_of(field.did).instantiate_identity(); - let is_zst = tcx.layout_of(param_env.and(field_ty)).is_ok_and(|layout| layout.is_zst()); - !is_zst + let is_1zst = tcx.layout_of(param_env.and(field_ty)).is_ok_and(|layout| layout.is_1zst()); + !is_1zst }) } diff --git a/compiler/rustc_lint/src/unused.rs b/compiler/rustc_lint/src/unused.rs index 6041f80753b..39e6fb805ae 100644 --- a/compiler/rustc_lint/src/unused.rs +++ b/compiler/rustc_lint/src/unused.rs @@ -955,11 +955,14 @@ declare_lint! { pub struct UnusedParens { with_self_ty_parens: bool, + /// `1 as (i32) < 2` parses to ExprKind::Lt + /// `1 as i32 < 2` parses to i32::<2[missing angle bracket] + parens_in_cast_in_lt: Vec<ast::NodeId>, } impl UnusedParens { pub fn new() -> Self { - Self { with_self_ty_parens: false } + Self { with_self_ty_parens: false, parens_in_cast_in_lt: Vec::new() } } } @@ -1055,6 +1058,14 @@ impl UnusedParens { impl EarlyLintPass for UnusedParens { #[inline] fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) { + if let ExprKind::Binary(op, lhs, _rhs) = &e.kind && + (op.node == ast::BinOpKind::Lt || op.node == ast::BinOpKind::Shl) && + let ExprKind::Cast(_expr, ty) = &lhs.kind && + let ast::TyKind::Paren(_) = &ty.kind + { + self.parens_in_cast_in_lt.push(ty.id); + } + match e.kind { ExprKind::Let(ref pat, _, _) | ExprKind::ForLoop(ref pat, ..) => { self.check_unused_parens_pat(cx, pat, false, false, (true, true)); @@ -1101,6 +1112,17 @@ impl EarlyLintPass for UnusedParens { <Self as UnusedDelimLint>::check_expr(self, cx, e) } + fn check_expr_post(&mut self, _cx: &EarlyContext<'_>, e: &ast::Expr) { + if let ExprKind::Binary(op, lhs, _rhs) = &e.kind && + (op.node == ast::BinOpKind::Lt || op.node == ast::BinOpKind::Shl) && + let ExprKind::Cast(_expr, ty) = &lhs.kind && + let ast::TyKind::Paren(_) = &ty.kind + { + let id = self.parens_in_cast_in_lt.pop().expect("check_expr and check_expr_post must balance"); + assert_eq!(id, ty.id, "check_expr, check_ty, and check_expr_post are called, in that order, by the visitor"); + } + } + fn check_pat(&mut self, cx: &EarlyContext<'_>, p: &ast::Pat) { use ast::{Mutability, PatKind::*}; let keep_space = (false, false); @@ -1141,6 +1163,11 @@ impl EarlyLintPass for UnusedParens { } fn check_ty(&mut self, cx: &EarlyContext<'_>, ty: &ast::Ty) { + if let ast::TyKind::Paren(_) = ty.kind && + Some(&ty.id) == self.parens_in_cast_in_lt.last() + { + return; + } match &ty.kind { ast::TyKind::Array(_, len) => { self.check_unused_delims_expr( diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs index 0aa37642b74..7e745de4f1a 100644 --- a/compiler/rustc_lint_defs/src/builtin.rs +++ b/compiler/rustc_lint_defs/src/builtin.rs @@ -983,44 +983,6 @@ declare_lint! { } declare_lint! { - /// The `private_in_public` lint detects private items in public - /// interfaces not caught by the old implementation. - /// - /// ### Example - /// - /// ```rust - /// # #![allow(unused)] - /// struct SemiPriv; - /// - /// mod m1 { - /// struct Priv; - /// impl super::SemiPriv { - /// pub fn f(_: Priv) {} - /// } - /// } - /// # fn main() {} - /// ``` - /// - /// {{produces}} - /// - /// ### Explanation - /// - /// The visibility rules are intended to prevent exposing private items in - /// public interfaces. This is a [future-incompatible] lint to transition - /// this to a hard error in the future. See [issue #34537] for more - /// details. - /// - /// [issue #34537]: https://github.com/rust-lang/rust/issues/34537 - /// [future-incompatible]: ../index.md#future-incompatible-lints - pub PRIVATE_IN_PUBLIC, - Warn, - "detect private items in public interfaces not caught by the old implementation", - @future_incompatible = FutureIncompatibleInfo { - reference: "issue #34537 <https://github.com/rust-lang/rust/issues/34537>", - }; -} - -declare_lint! { /// The `invalid_alignment` lint detects dereferences of misaligned pointers during /// constant evaluation. /// @@ -3415,7 +3377,6 @@ declare_lint_pass! { PATTERNS_IN_FNS_WITHOUT_BODY, POINTER_STRUCTURAL_MATCH, PRIVATE_BOUNDS, - PRIVATE_IN_PUBLIC, PRIVATE_INTERFACES, PROC_MACRO_BACK_COMPAT, PROC_MACRO_DERIVE_RESOLUTION_FALLBACK, @@ -4334,9 +4295,7 @@ declare_lint! { /// ### Example /// /// ```rust,compile_fail - /// # #![feature(type_privacy_lints)] /// # #![allow(unused)] - /// # #![allow(private_in_public)] /// #![deny(private_interfaces)] /// struct SemiPriv; /// @@ -4357,9 +4316,8 @@ declare_lint! { /// Having something private in primary interface guarantees that /// the item will be unusable from outer modules due to type privacy. pub PRIVATE_INTERFACES, - Allow, + Warn, "private type in primary interface of an item", - @feature_gate = sym::type_privacy_lints; } declare_lint! { @@ -4370,8 +4328,6 @@ declare_lint! { /// ### Example /// /// ```rust,compile_fail - /// # #![feature(type_privacy_lints)] - /// # #![allow(private_in_public)] /// # #![allow(unused)] /// #![deny(private_bounds)] /// @@ -4389,9 +4345,8 @@ declare_lint! { /// Having private types or traits in item bounds makes it less clear what interface /// the item actually provides. pub PRIVATE_BOUNDS, - Allow, + Warn, "private type in secondary interface of an item", - @feature_gate = sym::type_privacy_lints; } declare_lint! { diff --git a/compiler/rustc_llvm/build.rs b/compiler/rustc_llvm/build.rs index 4302b161833..f606fa483ca 100644 --- a/compiler/rustc_llvm/build.rs +++ b/compiler/rustc_llvm/build.rs @@ -252,7 +252,10 @@ fn main() { } else if target.contains("windows-gnu") { println!("cargo:rustc-link-lib=shell32"); println!("cargo:rustc-link-lib=uuid"); - } else if target.contains("haiku") || target.contains("darwin") { + } else if target.contains("haiku") + || target.contains("darwin") + || (is_crossed && (target.contains("dragonfly") || target.contains("solaris"))) + { println!("cargo:rustc-link-lib=z"); } else if target.contains("netbsd") { println!("cargo:rustc-link-lib=z"); diff --git a/compiler/rustc_metadata/src/lib.rs b/compiler/rustc_metadata/src/lib.rs index 87373d99743..99fef84931e 100644 --- a/compiler/rustc_metadata/src/lib.rs +++ b/compiler/rustc_metadata/src/lib.rs @@ -42,6 +42,6 @@ pub mod locator; pub use fs::{emit_wrapper_file, METADATA_FILENAME}; pub use native_libs::find_native_static_library; -pub use rmeta::{encode_metadata, EncodedMetadata, METADATA_HEADER}; +pub use rmeta::{encode_metadata, rendered_const, EncodedMetadata, METADATA_HEADER}; fluent_messages! { "../messages.ftl" } diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs index e8f66c36a86..43c1d321f81 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder.rs @@ -9,7 +9,7 @@ use rustc_data_structures::captures::Captures; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::owned_slice::OwnedSlice; use rustc_data_structures::svh::Svh; -use rustc_data_structures::sync::{AppendOnlyVec, AtomicBool, Lock, Lrc, OnceCell}; +use rustc_data_structures::sync::{AppendOnlyVec, AtomicBool, Lock, Lrc, OnceLock}; use rustc_data_structures::unhash::UnhashMap; use rustc_expand::base::{SyntaxExtension, SyntaxExtensionKind}; use rustc_expand::proc_macro::{AttrProcMacro, BangProcMacro, DeriveProcMacro}; @@ -93,7 +93,7 @@ pub(crate) struct CrateMetadata { /// For every definition in this crate, maps its `DefPathHash` to its `DefIndex`. def_path_hash_map: DefPathHashMapRef<'static>, /// Likewise for ExpnHash. - expn_hash_map: OnceCell<UnhashMap<ExpnHash, ExpnIndex>>, + expn_hash_map: OnceLock<UnhashMap<ExpnHash, ExpnIndex>>, /// Used for decoding interpret::AllocIds in a cached & thread-safe manner. alloc_decoding_state: AllocDecodingState, /// Caches decoded `DefKey`s. @@ -250,6 +250,7 @@ impl<'a, 'tcx> Metadata<'a, 'tcx> for (CrateMetadataRef<'a>, TyCtxt<'tcx>) { } impl<T: ParameterizedOverTcx> LazyValue<T> { + #[inline] fn decode<'a, 'tcx, M: Metadata<'a, 'tcx>>(self, metadata: M) -> T::Value<'tcx> where T::Value<'tcx>: Decodable<DecodeContext<'a, 'tcx>>, @@ -294,6 +295,7 @@ unsafe impl<'a, 'tcx, T: Decodable<DecodeContext<'a, 'tcx>>> TrustedLen } impl<T: ParameterizedOverTcx> LazyArray<T> { + #[inline] fn decode<'a, 'tcx, M: Metadata<'a, 'tcx>>( self, metadata: M, @@ -360,8 +362,8 @@ impl<'a, 'tcx> DecodeContext<'a, 'tcx> { self.read_lazy_offset_then(|pos| LazyArray::from_position_and_num_elems(pos, len)) } - fn read_lazy_table<I, T>(&mut self, len: usize) -> LazyTable<I, T> { - self.read_lazy_offset_then(|pos| LazyTable::from_position_and_encoded_size(pos, len)) + fn read_lazy_table<I, T>(&mut self, width: usize, len: usize) -> LazyTable<I, T> { + self.read_lazy_offset_then(|pos| LazyTable::from_position_and_encoded_size(pos, width, len)) } #[inline] @@ -420,6 +422,7 @@ impl<'a, 'tcx> TyDecoder for DecodeContext<'a, 'tcx> { } impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for CrateNum { + #[inline] fn decode(d: &mut DecodeContext<'a, 'tcx>) -> CrateNum { let cnum = CrateNum::from_u32(d.read_u32()); d.map_encoded_cnum_to_current(cnum) @@ -427,18 +430,21 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for CrateNum { } impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for DefIndex { + #[inline] fn decode(d: &mut DecodeContext<'a, 'tcx>) -> DefIndex { DefIndex::from_u32(d.read_u32()) } } impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnIndex { + #[inline] fn decode(d: &mut DecodeContext<'a, 'tcx>) -> ExpnIndex { ExpnIndex::from_u32(d.read_u32()) } } impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ast::AttrId { + #[inline] fn decode(d: &mut DecodeContext<'a, 'tcx>) -> ast::AttrId { let sess = d.sess.expect("can't decode AttrId without Session"); sess.parse_sess.attr_id_generator.mk_attr_id() @@ -672,6 +678,7 @@ impl<'a, 'tcx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyValue<T> { } impl<'a, 'tcx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyArray<T> { + #[inline] fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self { let len = decoder.read_usize(); if len == 0 { LazyArray::default() } else { decoder.read_lazy_array(len) } @@ -680,8 +687,9 @@ impl<'a, 'tcx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyArray<T> { impl<'a, 'tcx, I: Idx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyTable<I, T> { fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self { + let width = decoder.read_usize(); let len = decoder.read_usize(); - decoder.read_lazy_table(len) + decoder.read_lazy_table(width, len) } } diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs index 5ade67c62ff..4f4351633a2 100644 --- a/compiler/rustc_metadata/src/rmeta/encoder.rs +++ b/compiler/rustc_metadata/src/rmeta/encoder.rs @@ -17,8 +17,8 @@ use rustc_hir::def_id::{ CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_ID, CRATE_DEF_INDEX, LOCAL_CRATE, }; use rustc_hir::definitions::DefPathData; -use rustc_hir::intravisit; use rustc_hir::lang_items::LangItem; +use rustc_hir_pretty::id_to_string; use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerFile; use rustc_middle::middle::dependency_format::Linkage; use rustc_middle::middle::exported_symbols::{ @@ -131,7 +131,8 @@ impl<'a, 'tcx, T> Encodable<EncodeContext<'a, 'tcx>> for LazyArray<T> { impl<'a, 'tcx, I, T> Encodable<EncodeContext<'a, 'tcx>> for LazyTable<I, T> { fn encode(&self, e: &mut EncodeContext<'a, 'tcx>) { - e.emit_usize(self.encoded_size); + e.emit_usize(self.width); + e.emit_usize(self.len); e.emit_lazy_distance(self.position); } } @@ -1614,7 +1615,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { record!(self.tables.mir_const_qualif[def_id.to_def_id()] <- qualifs); let body_id = tcx.hir().maybe_body_owned_by(def_id); if let Some(body_id) = body_id { - let const_data = self.encode_rendered_const_for_body(body_id); + let const_data = rendered_const(self.tcx, body_id); record!(self.tables.rendered_const[def_id.to_def_id()] <- const_data); } } @@ -1682,14 +1683,6 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { } } - fn encode_rendered_const_for_body(&mut self, body_id: hir::BodyId) -> String { - let hir = self.tcx.hir(); - let body = hir.body(body_id); - rustc_hir_pretty::to_string(&(&hir as &dyn intravisit::Map<'_>), |s| { - s.print_expr(&body.value) - }) - } - #[instrument(level = "debug", skip(self))] fn encode_info_for_macro(&mut self, def_id: LocalDefId) { let tcx = self.tcx; @@ -2291,3 +2284,97 @@ pub fn provide(providers: &mut Providers) { ..*providers } } + +/// Build a textual representation of an unevaluated constant expression. +/// +/// If the const expression is too complex, an underscore `_` is returned. +/// For const arguments, it's `{ _ }` to be precise. +/// This means that the output is not necessarily valid Rust code. +/// +/// Currently, only +/// +/// * literals (optionally with a leading `-`) +/// * unit `()` +/// * blocks (`{ … }`) around simple expressions and +/// * paths without arguments +/// +/// are considered simple enough. Simple blocks are included since they are +/// necessary to disambiguate unit from the unit type. +/// This list might get extended in the future. +/// +/// Without this censoring, in a lot of cases the output would get too large +/// and verbose. Consider `match` expressions, blocks and deeply nested ADTs. +/// Further, private and `doc(hidden)` fields of structs would get leaked +/// since HIR datatypes like the `body` parameter do not contain enough +/// semantic information for this function to be able to hide them – +/// at least not without significant performance overhead. +/// +/// Whenever possible, prefer to evaluate the constant first and try to +/// use a different method for pretty-printing. Ideally this function +/// should only ever be used as a fallback. +pub fn rendered_const<'tcx>(tcx: TyCtxt<'tcx>, body: hir::BodyId) -> String { + let hir = tcx.hir(); + let value = &hir.body(body).value; + + #[derive(PartialEq, Eq)] + enum Classification { + Literal, + Simple, + Complex, + } + + use Classification::*; + + fn classify(expr: &hir::Expr<'_>) -> Classification { + match &expr.kind { + hir::ExprKind::Unary(hir::UnOp::Neg, expr) => { + if matches!(expr.kind, hir::ExprKind::Lit(_)) { Literal } else { Complex } + } + hir::ExprKind::Lit(_) => Literal, + hir::ExprKind::Tup([]) => Simple, + hir::ExprKind::Block(hir::Block { stmts: [], expr: Some(expr), .. }, _) => { + if classify(expr) == Complex { Complex } else { Simple } + } + // Paths with a self-type or arguments are too “complex” following our measure since + // they may leak private fields of structs (with feature `adt_const_params`). + // Consider: `<Self as Trait<{ Struct { private: () } }>>::CONSTANT`. + // Paths without arguments are definitely harmless though. + hir::ExprKind::Path(hir::QPath::Resolved(_, hir::Path { segments, .. })) => { + if segments.iter().all(|segment| segment.args.is_none()) { Simple } else { Complex } + } + // FIXME: Claiming that those kinds of QPaths are simple is probably not true if the Ty + // contains const arguments. Is there a *concise* way to check for this? + hir::ExprKind::Path(hir::QPath::TypeRelative(..)) => Simple, + // FIXME: Can they contain const arguments and thus leak private struct fields? + hir::ExprKind::Path(hir::QPath::LangItem(..)) => Simple, + _ => Complex, + } + } + + let classification = classify(value); + + if classification == Literal + && !value.span.from_expansion() + && let Ok(snippet) = tcx.sess.source_map().span_to_snippet(value.span) { + // For literals, we avoid invoking the pretty-printer and use the source snippet instead to + // preserve certain stylistic choices the user likely made for the sake legibility like + // + // * hexadecimal notation + // * underscores + // * character escapes + // + // FIXME: This passes through `-/*spacer*/0` verbatim. + snippet + } else if classification == Simple { + // Otherwise we prefer pretty-printing to get rid of extraneous whitespace, comments and + // other formatting artifacts. + id_to_string(&hir, body.hir_id) + } else if tcx.def_kind(hir.body_owner_def_id(body).to_def_id()) == DefKind::AnonConst { + // FIXME: Omit the curly braces if the enclosing expression is an array literal + // with a repeated element (an `ExprKind::Repeat`) as in such case it + // would not actually need any disambiguation. + "{ _ }".to_owned() + } else { + "_".to_owned() + } +} diff --git a/compiler/rustc_metadata/src/rmeta/mod.rs b/compiler/rustc_metadata/src/rmeta/mod.rs index a89e235ff28..71269779d42 100644 --- a/compiler/rustc_metadata/src/rmeta/mod.rs +++ b/compiler/rustc_metadata/src/rmeta/mod.rs @@ -42,7 +42,7 @@ pub use decoder::provide_extern; use decoder::DecodeContext; pub(crate) use decoder::{CrateMetadata, CrateNumMap, MetadataBlob}; use encoder::EncodeContext; -pub use encoder::{encode_metadata, EncodedMetadata}; +pub use encoder::{encode_metadata, rendered_const, EncodedMetadata}; use rustc_span::hygiene::SyntaxContextData; mod decoder; @@ -142,7 +142,11 @@ impl<T> LazyArray<T> { /// eagerly and in-order. struct LazyTable<I, T> { position: NonZeroUsize, - encoded_size: usize, + /// The encoded size of the elements of a table is selected at runtime to drop + /// trailing zeroes. This is the number of bytes used for each table element. + width: usize, + /// How many elements are in the table. + len: usize, _marker: PhantomData<fn(I) -> T>, } @@ -153,9 +157,10 @@ impl<I: 'static, T: ParameterizedOverTcx> ParameterizedOverTcx for LazyTable<I, impl<I, T> LazyTable<I, T> { fn from_position_and_encoded_size( position: NonZeroUsize, - encoded_size: usize, + width: usize, + len: usize, ) -> LazyTable<I, T> { - LazyTable { position, encoded_size, _marker: PhantomData } + LazyTable { position, width, len, _marker: PhantomData } } } diff --git a/compiler/rustc_metadata/src/rmeta/table.rs b/compiler/rustc_metadata/src/rmeta/table.rs index ea66c770b77..e906c906347 100644 --- a/compiler/rustc_metadata/src/rmeta/table.rs +++ b/compiler/rustc_metadata/src/rmeta/table.rs @@ -38,6 +38,12 @@ impl IsDefault for u32 { } } +impl IsDefault for u64 { + fn is_default(&self) -> bool { + *self == 0 + } +} + impl<T> IsDefault for LazyArray<T> { fn is_default(&self) -> bool { self.num_elems == 0 @@ -89,6 +95,20 @@ impl FixedSizeEncoding for u32 { } } +impl FixedSizeEncoding for u64 { + type ByteArray = [u8; 8]; + + #[inline] + fn from_bytes(b: &[u8; 8]) -> Self { + Self::from_le_bytes(*b) + } + + #[inline] + fn write_to_bytes(self, b: &mut [u8; 8]) { + *b = self.to_le_bytes(); + } +} + macro_rules! fixed_size_enum { ($ty:ty { $(($($pat:tt)*))* }) => { impl FixedSizeEncoding for Option<$ty> { @@ -300,21 +320,21 @@ impl FixedSizeEncoding for UnusedGenericParams { // generic `LazyValue<T>` impl, but in the general case we might not need / want // to fit every `usize` in `u32`. impl<T> FixedSizeEncoding for Option<LazyValue<T>> { - type ByteArray = [u8; 4]; + type ByteArray = [u8; 8]; #[inline] - fn from_bytes(b: &[u8; 4]) -> Self { - let position = NonZeroUsize::new(u32::from_bytes(b) as usize)?; + fn from_bytes(b: &[u8; 8]) -> Self { + let position = NonZeroUsize::new(u64::from_bytes(b) as usize)?; Some(LazyValue::from_position(position)) } #[inline] - fn write_to_bytes(self, b: &mut [u8; 4]) { + fn write_to_bytes(self, b: &mut [u8; 8]) { match self { None => unreachable!(), Some(lazy) => { let position = lazy.position.get(); - let position: u32 = position.try_into().unwrap(); + let position: u64 = position.try_into().unwrap(); position.write_to_bytes(b) } } @@ -323,55 +343,75 @@ impl<T> FixedSizeEncoding for Option<LazyValue<T>> { impl<T> LazyArray<T> { #[inline] - fn write_to_bytes_impl(self, b: &mut [u8; 8]) { - let ([position_bytes, meta_bytes], []) = b.as_chunks_mut::<4>() else { panic!() }; - - let position = self.position.get(); - let position: u32 = position.try_into().unwrap(); - position.write_to_bytes(position_bytes); - - let len = self.num_elems; - let len: u32 = len.try_into().unwrap(); - len.write_to_bytes(meta_bytes); + fn write_to_bytes_impl(self, b: &mut [u8; 16]) { + let position = (self.position.get() as u64).to_le_bytes(); + let len = (self.num_elems as u64).to_le_bytes(); + + // Element width is selected at runtime on a per-table basis by omitting trailing + // zero bytes in table elements. This works very naturally when table elements are + // simple numbers but `LazyArray` is a pair of integers. If naively encoded, the second + // element would shield the trailing zeroes in the first. Interleaving the bytes + // of the position and length exposes trailing zeroes in both to the optimization. + // We encode length second because we generally expect it to be smaller. + for i in 0..8 { + b[2 * i] = position[i]; + b[2 * i + 1] = len[i]; + } } - fn from_bytes_impl(position_bytes: &[u8; 4], meta_bytes: &[u8; 4]) -> Option<LazyArray<T>> { - let position = NonZeroUsize::new(u32::from_bytes(position_bytes) as usize)?; - let len = u32::from_bytes(meta_bytes) as usize; + fn from_bytes_impl(position: &[u8; 8], meta: &[u8; 8]) -> Option<LazyArray<T>> { + let position = NonZeroUsize::new(u64::from_bytes(&position) as usize)?; + let len = u64::from_bytes(&meta) as usize; Some(LazyArray::from_position_and_num_elems(position, len)) } } +// Decoding helper for the encoding scheme used by `LazyArray`. +// Interleaving the bytes of the two integers exposes trailing bytes in the first integer +// to the varint scheme that we use for tables. +#[inline] +fn decode_interleaved(encoded: &[u8; 16]) -> ([u8; 8], [u8; 8]) { + let mut first = [0u8; 8]; + let mut second = [0u8; 8]; + for i in 0..8 { + first[i] = encoded[2 * i]; + second[i] = encoded[2 * i + 1]; + } + (first, second) +} + impl<T> FixedSizeEncoding for LazyArray<T> { - type ByteArray = [u8; 8]; + type ByteArray = [u8; 16]; #[inline] - fn from_bytes(b: &[u8; 8]) -> Self { - let ([position_bytes, meta_bytes], []) = b.as_chunks::<4>() else { panic!() }; - if *meta_bytes == [0; 4] { + fn from_bytes(b: &[u8; 16]) -> Self { + let (position, meta) = decode_interleaved(b); + + if meta == [0; 8] { return Default::default(); } - LazyArray::from_bytes_impl(position_bytes, meta_bytes).unwrap() + LazyArray::from_bytes_impl(&position, &meta).unwrap() } #[inline] - fn write_to_bytes(self, b: &mut [u8; 8]) { + fn write_to_bytes(self, b: &mut [u8; 16]) { assert!(!self.is_default()); self.write_to_bytes_impl(b) } } impl<T> FixedSizeEncoding for Option<LazyArray<T>> { - type ByteArray = [u8; 8]; + type ByteArray = [u8; 16]; #[inline] - fn from_bytes(b: &[u8; 8]) -> Self { - let ([position_bytes, meta_bytes], []) = b.as_chunks::<4>() else { panic!() }; - LazyArray::from_bytes_impl(position_bytes, meta_bytes) + fn from_bytes(b: &[u8; 16]) -> Self { + let (position, meta) = decode_interleaved(b); + + LazyArray::from_bytes_impl(&position, &meta) } #[inline] - fn write_to_bytes(self, b: &mut [u8; 8]) { + fn write_to_bytes(self, b: &mut [u8; 16]) { match self { None => unreachable!(), Some(lazy) => lazy.write_to_bytes_impl(b), @@ -381,13 +421,14 @@ impl<T> FixedSizeEncoding for Option<LazyArray<T>> { /// Helper for constructing a table's serialization (also see `Table`). pub(super) struct TableBuilder<I: Idx, T: FixedSizeEncoding> { + width: usize, blocks: IndexVec<I, T::ByteArray>, _marker: PhantomData<T>, } impl<I: Idx, T: FixedSizeEncoding> Default for TableBuilder<I, T> { fn default() -> Self { - TableBuilder { blocks: Default::default(), _marker: PhantomData } + TableBuilder { width: 0, blocks: Default::default(), _marker: PhantomData } } } @@ -415,40 +456,63 @@ impl<I: Idx, const N: usize, T: FixedSizeEncoding<ByteArray = [u8; N]>> TableBui // > store bit-masks of which item in each bucket is actually serialized). let block = self.blocks.ensure_contains_elem(i, || [0; N]); value.write_to_bytes(block); + if self.width != N { + let width = N - trailing_zeros(block); + self.width = self.width.max(width); + } } } pub(crate) fn encode(&self, buf: &mut FileEncoder) -> LazyTable<I, T> { let pos = buf.position(); + + let width = self.width; for block in &self.blocks { - buf.emit_raw_bytes(block); + buf.emit_raw_bytes(&block[..width]); } - let num_bytes = self.blocks.len() * N; + LazyTable::from_position_and_encoded_size( NonZeroUsize::new(pos as usize).unwrap(), - num_bytes, + width, + self.blocks.len(), ) } } +fn trailing_zeros(x: &[u8]) -> usize { + x.iter().rev().take_while(|b| **b == 0).count() +} + impl<I: Idx, const N: usize, T: FixedSizeEncoding<ByteArray = [u8; N]> + ParameterizedOverTcx> LazyTable<I, T> where for<'tcx> T::Value<'tcx>: FixedSizeEncoding<ByteArray = [u8; N]>, { /// Given the metadata, extract out the value at a particular index (if any). - #[inline(never)] pub(super) fn get<'a, 'tcx, M: Metadata<'a, 'tcx>>(&self, metadata: M, i: I) -> T::Value<'tcx> { - trace!("LazyTable::lookup: index={:?} len={:?}", i, self.encoded_size); + trace!("LazyTable::lookup: index={:?} len={:?}", i, self.len); + + // Access past the end of the table returns a Default + if i.index() >= self.len { + return Default::default(); + } - let start = self.position.get(); - let bytes = &metadata.blob()[start..start + self.encoded_size]; - let (bytes, []) = bytes.as_chunks::<N>() else { panic!() }; - bytes.get(i.index()).map_or_else(Default::default, FixedSizeEncoding::from_bytes) + let width = self.width; + let start = self.position.get() + (width * i.index()); + let end = start + width; + let bytes = &metadata.blob()[start..end]; + + if let Ok(fixed) = bytes.try_into() { + FixedSizeEncoding::from_bytes(fixed) + } else { + let mut fixed = [0u8; N]; + fixed[..width].copy_from_slice(bytes); + FixedSizeEncoding::from_bytes(&fixed) + } } /// Size of the table in entries, including possible gaps. pub(super) fn size(&self) -> usize { - self.encoded_size / N + self.len } } diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs index 467962b39bb..3e8f07ed233 100644 --- a/compiler/rustc_middle/src/hir/map/mod.rs +++ b/compiler/rustc_middle/src/hir/map/mod.rs @@ -701,6 +701,8 @@ impl<'hir> Map<'hir> { // expressions. ignore_tail = true; } + + let mut prev_hir_id = None; while let Some((hir_id, node)) = iter.next() { if let (Some((_, next_node)), false) = (iter.peek(), ignore_tail) { match next_node { @@ -715,7 +717,14 @@ impl<'hir> Map<'hir> { | Node::ForeignItem(_) | Node::TraitItem(_) | Node::Expr(Expr { kind: ExprKind::Closure { .. }, .. }) - | Node::ImplItem(_) => return Some(hir_id), + | Node::ImplItem(_) + // The input node `id` must be enclosed in the method's body as opposed + // to some other place such as its return type (fixes #114918). + // We verify that indirectly by checking that the previous node is the + // current node's body + if node.body_id().map(|b| b.hir_id) == prev_hir_id => { + return Some(hir_id) + } // Ignore `return`s on the first iteration Node::Expr(Expr { kind: ExprKind::Loop(..) | ExprKind::Ret(..), .. }) | Node::Local(_) => { @@ -723,6 +732,8 @@ impl<'hir> Map<'hir> { } _ => {} } + + prev_hir_id = Some(hir_id); } None } @@ -1215,7 +1226,6 @@ pub(super) fn crate_hash(tcx: TyCtxt<'_>, _: LocalCrate) -> Svh { tcx.stable_crate_id(LOCAL_CRATE).hash_stable(&mut hcx, &mut stable_hasher); // Hash visibility information since it does not appear in HIR. resolutions.visibilities.hash_stable(&mut hcx, &mut stable_hasher); - resolutions.has_pub_restricted.hash_stable(&mut hcx, &mut stable_hasher); stable_hasher.finish() }); diff --git a/compiler/rustc_middle/src/mir/basic_blocks.rs b/compiler/rustc_middle/src/mir/basic_blocks.rs index 0ad17e819c7..70d8f3bd54b 100644 --- a/compiler/rustc_middle/src/mir/basic_blocks.rs +++ b/compiler/rustc_middle/src/mir/basic_blocks.rs @@ -5,7 +5,7 @@ use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::graph; use rustc_data_structures::graph::dominators::{dominators, Dominators}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; -use rustc_data_structures::sync::OnceCell; +use rustc_data_structures::sync::OnceLock; use rustc_index::{IndexSlice, IndexVec}; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; use smallvec::SmallVec; @@ -23,11 +23,11 @@ pub type SwitchSources = FxHashMap<(BasicBlock, BasicBlock), SmallVec<[Option<u1 #[derive(Clone, Default, Debug)] struct Cache { - predecessors: OnceCell<Predecessors>, - switch_sources: OnceCell<SwitchSources>, - is_cyclic: OnceCell<bool>, - reverse_postorder: OnceCell<Vec<BasicBlock>>, - dominators: OnceCell<Dominators<BasicBlock>>, + predecessors: OnceLock<Predecessors>, + switch_sources: OnceLock<SwitchSources>, + is_cyclic: OnceLock<bool>, + reverse_postorder: OnceLock<Vec<BasicBlock>>, + dominators: OnceLock<Dominators<BasicBlock>>, } impl<'tcx> BasicBlocks<'tcx> { diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs index e6ef5a41ee0..577cd20b74c 100644 --- a/compiler/rustc_middle/src/mir/interpret/error.rs +++ b/compiler/rustc_middle/src/mir/interpret/error.rs @@ -415,6 +415,8 @@ pub enum UnsupportedOpInfo { /// Free-form case. Only for errors that are never caught! // FIXME still use translatable diagnostics Unsupported(String), + /// Unsized local variables. + UnsizedLocal, // // The variants below are only reachable from CTFE/const prop, miri will never emit them. // diff --git a/compiler/rustc_middle/src/query/erase.rs b/compiler/rustc_middle/src/query/erase.rs index 348f79ed6a8..9b666222ad0 100644 --- a/compiler/rustc_middle/src/query/erase.rs +++ b/compiler/rustc_middle/src/query/erase.rs @@ -1,4 +1,5 @@ use crate::mir; +use crate::query::CyclePlaceholder; use crate::traits; use crate::ty::{self, Ty}; use std::mem::{size_of, transmute_copy, MaybeUninit}; @@ -142,6 +143,10 @@ impl EraseType for Result<&'_ ty::List<Ty<'_>>, ty::util::AlwaysRequiresDrop> { [u8; size_of::<Result<&'static ty::List<Ty<'static>>, ty::util::AlwaysRequiresDrop>>()]; } +impl EraseType for Result<ty::EarlyBinder<Ty<'_>>, CyclePlaceholder> { + type Result = [u8; size_of::<Result<ty::EarlyBinder<Ty<'_>>, CyclePlaceholder>>()]; +} + impl<T> EraseType for Option<&'_ T> { type Result = [u8; size_of::<Option<&'static ()>>()]; } diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index 94ae0dcb517..bf340846f10 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -26,7 +26,7 @@ use crate::mir::interpret::{ use crate::mir::interpret::{LitToConstError, LitToConstInput}; use crate::mir::mono::CodegenUnit; use crate::query::erase::{erase, restore, Erase}; -use crate::query::plumbing::{query_ensure, query_get_at, DynamicQuery}; +use crate::query::plumbing::{query_ensure, query_get_at, CyclePlaceholder, DynamicQuery}; use crate::thir; use crate::traits::query::{ CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal, @@ -243,6 +243,16 @@ rustc_queries! { feedable } + /// Specialized instance of `type_of` that detects cycles that are due to + /// revealing opaque because of an auto trait bound. Unless `CyclePlaceholder` needs + /// to be handled separately, call `type_of` instead. + query type_of_opaque(key: DefId) -> Result<ty::EarlyBinder<Ty<'tcx>>, CyclePlaceholder> { + desc { |tcx| + "computing type of opaque `{path}`", + path = tcx.def_path_str(key), + } + } + query collect_return_position_impl_trait_in_trait_tys(key: DefId) -> Result<&'tcx FxHashMap<DefId, ty::EarlyBinder<Ty<'tcx>>>, ErrorGuaranteed> { diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs index a1aac284621..a342b5231e9 100644 --- a/compiler/rustc_middle/src/query/plumbing.rs +++ b/compiler/rustc_middle/src/query/plumbing.rs @@ -19,7 +19,7 @@ use rustc_query_system::dep_graph::SerializedDepNodeIndex; pub(crate) use rustc_query_system::query::QueryJobId; use rustc_query_system::query::*; use rustc_query_system::HandleCycleError; -use rustc_span::{Span, DUMMY_SP}; +use rustc_span::{ErrorGuaranteed, Span, DUMMY_SP}; use std::ops::Deref; pub struct QueryKeyStringCache { @@ -52,7 +52,8 @@ pub struct DynamicQuery<'tcx, C: QueryCache> { pub loadable_from_disk: fn(tcx: TyCtxt<'tcx>, key: &C::Key, index: SerializedDepNodeIndex) -> bool, pub hash_result: HashResult<C::Value>, - pub value_from_cycle_error: fn(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo<DepKind>]) -> C::Value, + pub value_from_cycle_error: + fn(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo<DepKind>], guar: ErrorGuaranteed) -> C::Value, pub format_value: fn(&C::Value) -> String, } @@ -629,3 +630,6 @@ impl<'tcx> TyCtxtAt<'tcx> { .unwrap_or_else(|| bug!("def_kind: unsupported node: {:?}", def_id)) } } + +#[derive(Copy, Clone, Debug, HashStable)] +pub struct CyclePlaceholder(pub ErrorGuaranteed); diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index be839e03cff..e2f9e299566 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -1296,25 +1296,26 @@ macro_rules! sty_debug_print { }; $(let mut $variant = total;)* - let shards = tcx.interners.type_.lock_shards(); - let types = shards.iter().flat_map(|shard| shard.keys()); - for &InternedInSet(t) in types { - let variant = match t.internee { - ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) | - ty::Float(..) | ty::Str | ty::Never => continue, - ty::Error(_) => /* unimportant */ continue, - $(ty::$variant(..) => &mut $variant,)* - }; - let lt = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER); - let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER); - let ct = t.flags.intersects(ty::TypeFlags::HAS_CT_INFER); - - variant.total += 1; - total.total += 1; - if lt { total.lt_infer += 1; variant.lt_infer += 1 } - if ty { total.ty_infer += 1; variant.ty_infer += 1 } - if ct { total.ct_infer += 1; variant.ct_infer += 1 } - if lt && ty && ct { total.all_infer += 1; variant.all_infer += 1 } + for shard in tcx.interners.type_.lock_shards() { + let types = shard.keys(); + for &InternedInSet(t) in types { + let variant = match t.internee { + ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) | + ty::Float(..) | ty::Str | ty::Never => continue, + ty::Error(_) => /* unimportant */ continue, + $(ty::$variant(..) => &mut $variant,)* + }; + let lt = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER); + let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER); + let ct = t.flags.intersects(ty::TypeFlags::HAS_CT_INFER); + + variant.total += 1; + total.total += 1; + if lt { total.lt_infer += 1; variant.lt_infer += 1 } + if ty { total.ty_infer += 1; variant.ty_infer += 1 } + if ct { total.ct_infer += 1; variant.ct_infer += 1 } + if lt && ty && ct { total.all_infer += 1; variant.all_infer += 1 } + } } writeln!(fmt, "Ty interner total ty lt ct all")?; $(writeln!(fmt, " {:18}: {uses:6} {usespc:4.1}%, \ diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index e362b3477c9..9a0e72d7b64 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -4,7 +4,9 @@ use crate::query::TyCtxtAt; use crate::ty::normalize_erasing_regions::NormalizationError; use crate::ty::{self, ConstKind, ReprOptions, Ty, TyCtxt, TypeVisitableExt}; use rustc_error_messages::DiagnosticMessage; -use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic}; +use rustc_errors::{ + DiagnosticArgValue, DiagnosticBuilder, Handler, IntoDiagnostic, IntoDiagnosticArg, +}; use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_index::IndexVec; @@ -265,6 +267,12 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { } } +impl<'tcx> IntoDiagnosticArg for LayoutError<'tcx> { + fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> { + self.to_string().into_diagnostic_arg() + } +} + #[derive(Clone, Copy)] pub struct LayoutCx<'tcx, C> { pub tcx: C, diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index 8fedf4dca95..1fdc4f7500f 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -162,8 +162,6 @@ pub struct ResolverOutputs { #[derive(Debug)] pub struct ResolverGlobalCtxt { pub visibilities: FxHashMap<LocalDefId, Visibility>, - /// This field is used to decide whether we should make `PRIVATE_IN_PUBLIC` a hard error. - pub has_pub_restricted: bool, /// Item with a given `LocalDefId` was defined during macro expansion with ID `ExpnId`. pub expn_that_defined: FxHashMap<LocalDefId, ExpnId>, pub effective_visibilities: EffectiveVisibilities, @@ -238,7 +236,7 @@ pub struct ImplHeader<'tcx> { pub impl_def_id: DefId, pub self_ty: Ty<'tcx>, pub trait_ref: Option<TraitRef<'tcx>>, - pub predicates: Vec<(Predicate<'tcx>, Span)>, + pub predicates: Vec<Predicate<'tcx>>, } #[derive(Copy, Clone, PartialEq, Eq, Debug, TypeFoldable, TypeVisitable)] diff --git a/compiler/rustc_middle/src/values.rs b/compiler/rustc_middle/src/values.rs index 384a368434a..962faf56cb8 100644 --- a/compiler/rustc_middle/src/values.rs +++ b/compiler/rustc_middle/src/values.rs @@ -1,4 +1,5 @@ use crate::dep_graph::DepKind; +use crate::query::plumbing::CyclePlaceholder; use rustc_data_structures::fx::FxHashSet; use rustc_errors::{pluralize, struct_span_err, Applicability, MultiSpan}; use rustc_hir as hir; @@ -8,20 +9,38 @@ use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_query_system::query::QueryInfo; use rustc_query_system::Value; use rustc_span::def_id::LocalDefId; -use rustc_span::Span; +use rustc_span::{ErrorGuaranteed, Span}; use std::fmt::Write; impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Ty<'_> { - fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo<DepKind>]) -> Self { + fn from_cycle_error( + tcx: TyCtxt<'tcx>, + _: &[QueryInfo<DepKind>], + guar: ErrorGuaranteed, + ) -> Self { // SAFETY: This is never called when `Self` is not `Ty<'tcx>`. // FIXME: Represent the above fact in the trait system somehow. - unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(Ty::new_misc_error(tcx)) } + unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(Ty::new_error(tcx, guar)) } + } +} + +impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Result<ty::EarlyBinder<Ty<'_>>, CyclePlaceholder> { + fn from_cycle_error( + _tcx: TyCtxt<'tcx>, + _: &[QueryInfo<DepKind>], + guar: ErrorGuaranteed, + ) -> Self { + Err(CyclePlaceholder(guar)) } } impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::SymbolName<'_> { - fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo<DepKind>]) -> Self { + fn from_cycle_error( + tcx: TyCtxt<'tcx>, + _: &[QueryInfo<DepKind>], + _guar: ErrorGuaranteed, + ) -> Self { // SAFETY: This is never called when `Self` is not `SymbolName<'tcx>`. // FIXME: Represent the above fact in the trait system somehow. unsafe { @@ -33,8 +52,12 @@ impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::SymbolName<'_> { } impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::Binder<'_, ty::FnSig<'_>> { - fn from_cycle_error(tcx: TyCtxt<'tcx>, stack: &[QueryInfo<DepKind>]) -> Self { - let err = Ty::new_misc_error(tcx); + fn from_cycle_error( + tcx: TyCtxt<'tcx>, + stack: &[QueryInfo<DepKind>], + guar: ErrorGuaranteed, + ) -> Self { + let err = Ty::new_error(tcx, guar); let arity = if let Some(frame) = stack.get(0) && frame.query.dep_kind == DepKind::fn_sig @@ -63,7 +86,11 @@ impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::Binder<'_, ty::FnSig<'_>> { } impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Representability { - fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo<DepKind>]) -> Self { + fn from_cycle_error( + tcx: TyCtxt<'tcx>, + cycle: &[QueryInfo<DepKind>], + _guar: ErrorGuaranteed, + ) -> Self { let mut item_and_field_ids = Vec::new(); let mut representable_ids = FxHashSet::default(); for info in cycle { @@ -95,22 +122,35 @@ impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Representability { } impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::EarlyBinder<Ty<'_>> { - fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo<DepKind>]) -> Self { - ty::EarlyBinder::bind(Ty::from_cycle_error(tcx, cycle)) + fn from_cycle_error( + tcx: TyCtxt<'tcx>, + cycle: &[QueryInfo<DepKind>], + guar: ErrorGuaranteed, + ) -> Self { + ty::EarlyBinder::bind(Ty::from_cycle_error(tcx, cycle, guar)) } } impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::EarlyBinder<ty::Binder<'_, ty::FnSig<'_>>> { - fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo<DepKind>]) -> Self { - ty::EarlyBinder::bind(ty::Binder::from_cycle_error(tcx, cycle)) + fn from_cycle_error( + tcx: TyCtxt<'tcx>, + cycle: &[QueryInfo<DepKind>], + guar: ErrorGuaranteed, + ) -> Self { + ty::EarlyBinder::bind(ty::Binder::from_cycle_error(tcx, cycle, guar)) } } impl<'tcx, T> Value<TyCtxt<'tcx>, DepKind> for Result<T, &'_ ty::layout::LayoutError<'_>> { - fn from_cycle_error(_tcx: TyCtxt<'tcx>, _cycle: &[QueryInfo<DepKind>]) -> Self { + fn from_cycle_error( + _tcx: TyCtxt<'tcx>, + _cycle: &[QueryInfo<DepKind>], + _guar: ErrorGuaranteed, + ) -> Self { // tcx.arena.alloc cannot be used because we are not allowed to use &'tcx LayoutError under // min_specialization. Since this is an error path anyways, leaking doesn't matter (and really, // tcx.arena.alloc is pretty much equal to leaking). + // FIXME: `Cycle` should carry the ErrorGuaranteed Err(Box::leak(Box::new(ty::layout::LayoutError::Cycle))) } } diff --git a/compiler/rustc_mir_build/src/build/expr/into.rs b/compiler/rustc_mir_build/src/build/expr/into.rs index a5c86e31a29..1e0a47ead8c 100644 --- a/compiler/rustc_mir_build/src/build/expr/into.rs +++ b/compiler/rustc_mir_build/src/build/expr/into.rs @@ -159,52 +159,44 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } ExprKind::LogicalOp { op, lhs, rhs } => { - // And: - // - // [block: If(lhs)] -true-> [else_block: dest = (rhs)] - // | (false) - // [shortcircuit_block: dest = false] - // - // Or: - // - // [block: If(lhs)] -false-> [else_block: dest = (rhs)] - // | (true) - // [shortcircuit_block: dest = true] - - let (shortcircuit_block, mut else_block, join_block) = ( - this.cfg.start_new_block(), - this.cfg.start_new_block(), - this.cfg.start_new_block(), - ); - - let lhs = unpack!(block = this.as_local_operand(block, &this.thir[lhs])); - let blocks = match op { - LogicalOp::And => (else_block, shortcircuit_block), - LogicalOp::Or => (shortcircuit_block, else_block), + let condition_scope = this.local_scope(); + let source_info = this.source_info(expr.span); + // We first evaluate the left-hand side of the predicate ... + let (then_block, else_block) = + this.in_if_then_scope(condition_scope, expr.span, |this| { + this.then_else_break( + block, + &this.thir[lhs], + Some(condition_scope), + condition_scope, + source_info, + ) + }); + let (short_circuit, continuation, constant) = match op { + LogicalOp::And => (else_block, then_block, false), + LogicalOp::Or => (then_block, else_block, true), }; - let term = TerminatorKind::if_(lhs, blocks.0, blocks.1); - this.cfg.terminate(block, source_info, term); - + // At this point, the control flow splits into a short-circuiting path + // and a continuation path. + // - If the operator is `&&`, passing `lhs` leads to continuation of evaluation on `rhs`; + // failing it leads to the short-circuting path which assigns `false` to the place. + // - If the operator is `||`, failing `lhs` leads to continuation of evaluation on `rhs`; + // passing it leads to the short-circuting path which assigns `true` to the place. this.cfg.push_assign_constant( - shortcircuit_block, + short_circuit, source_info, destination, Constant { - span: expr_span, + span: expr.span, user_ty: None, - literal: match op { - LogicalOp::And => ConstantKind::from_bool(this.tcx, false), - LogicalOp::Or => ConstantKind::from_bool(this.tcx, true), - }, + literal: ConstantKind::from_bool(this.tcx, constant), }, ); - this.cfg.goto(shortcircuit_block, source_info, join_block); - - let rhs = unpack!(else_block = this.as_local_operand(else_block, &this.thir[rhs])); - this.cfg.push_assign(else_block, source_info, destination, Rvalue::Use(rhs)); - this.cfg.goto(else_block, source_info, join_block); - - join_block.unit() + let rhs = unpack!(this.expr_into_dest(destination, continuation, &this.thir[rhs])); + let target = this.cfg.start_new_block(); + this.cfg.goto(rhs, source_info, target); + this.cfg.goto(short_circuit, source_info, target); + target.unit() } ExprKind::Loop { body } => { // [block] diff --git a/compiler/rustc_mir_build/src/build/matches/mod.rs b/compiler/rustc_mir_build/src/build/matches/mod.rs index 3c450740712..5ec216cea61 100644 --- a/compiler/rustc_mir_build/src/build/matches/mod.rs +++ b/compiler/rustc_mir_build/src/build/matches/mod.rs @@ -64,6 +64,43 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { rhs_then_block.unit() } + ExprKind::LogicalOp { op: LogicalOp::Or, lhs, rhs } => { + let local_scope = this.local_scope(); + let (lhs_success_block, failure_block) = + this.in_if_then_scope(local_scope, expr_span, |this| { + this.then_else_break( + block, + &this.thir[lhs], + temp_scope_override, + local_scope, + variable_source_info, + ) + }); + let rhs_success_block = unpack!(this.then_else_break( + failure_block, + &this.thir[rhs], + temp_scope_override, + break_scope, + variable_source_info, + )); + this.cfg.goto(lhs_success_block, variable_source_info, rhs_success_block); + rhs_success_block.unit() + } + ExprKind::Unary { op: UnOp::Not, arg } => { + let local_scope = this.local_scope(); + let (success_block, failure_block) = + this.in_if_then_scope(local_scope, expr_span, |this| { + this.then_else_break( + block, + &this.thir[arg], + temp_scope_override, + local_scope, + variable_source_info, + ) + }); + this.break_for_else(success_block, break_scope, variable_source_info); + failure_block.unit() + } ExprKind::Scope { region_scope, lint_level, value } => { let region_scope = (region_scope, this.source_info(expr_span)); this.in_scope(region_scope, lint_level, |this| { @@ -76,6 +113,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ) }) } + ExprKind::Use { source } => this.then_else_break( + block, + &this.thir[source], + temp_scope_override, + break_scope, + variable_source_info, + ), ExprKind::Let { expr, ref pat } => this.lower_let_expr( block, &this.thir[expr], diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs index f46cf0dc0ff..95dced644e1 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs @@ -720,32 +720,31 @@ fn non_exhaustive_match<'p, 'tcx>( }; }; - let is_variant_list_non_exhaustive = matches!(scrut_ty.kind(), - ty::Adt(def, _) if def.is_variant_list_non_exhaustive() && !def.did().is_local()); - adt_defined_here(cx, &mut err, scrut_ty, &witnesses); - err.note(format!( - "the matched value is of type `{}`{}", - scrut_ty, - if is_variant_list_non_exhaustive { ", which is marked as non-exhaustive" } else { "" } - )); + err.note(format!("the matched value is of type `{}`", scrut_ty)); if !is_empty_match && witnesses.len() == 1 { let mut non_exhaustive_tys = FxHashSet::default(); collect_non_exhaustive_tys(&witnesses[0], &mut non_exhaustive_tys); for ty in non_exhaustive_tys { - if ty == cx.tcx.types.usize || ty == cx.tcx.types.isize { + if ty.is_ptr_sized_integral() { err.note(format!( "`{ty}` does not have a fixed maximum value, so a wildcard `_` is necessary to match \ - exhaustively", - )); + exhaustively", + )); if cx.tcx.sess.is_nightly_build() { err.help(format!( - "add `#![feature(precise_pointer_size_matching)]` to the crate attributes to \ - enable precise `{ty}` matching", - )); + "add `#![feature(precise_pointer_size_matching)]` to the crate attributes to \ + enable precise `{ty}` matching", + )); } + } else if ty == cx.tcx.types.str_ { + err.note(format!( + "`&str` cannot be matched exhaustively, so a wildcard `_` is necessary", + )); + } else if cx.is_foreign_non_exhaustive_enum(ty) { + err.note(format!("`{ty}` is marked as non-exhaustive, so a wildcard `_` is necessary to match exhaustively")); } } } diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs index c08fe54c39c..7736183027f 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs @@ -555,8 +555,8 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { subpattern: pattern, ascription: Ascription { annotation, - /// Note that use `Contravariant` here. See the - /// `variance` field documentation for details. + // Note that use `Contravariant` here. See the + // `variance` field documentation for details. variance: ty::Variance::Contravariant, }, }, diff --git a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs index 08cfe98bb68..21031e8ba9d 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs @@ -618,10 +618,15 @@ impl<'p, 'tcx> Usefulness<'p, 'tcx> { let new_witnesses = if let Constructor::Missing { .. } = ctor { // We got the special `Missing` constructor, so each of the missing constructors // gives a new pattern that is not caught by the match. We list those patterns. - let new_patterns = if pcx.is_non_exhaustive { - // Here we don't want the user to try to list all variants, we want them to add - // a wildcard, so we only suggest that. - vec![DeconstructedPat::wildcard(pcx.ty, pcx.span)] + if pcx.is_non_exhaustive { + witnesses + .into_iter() + // Here we don't want the user to try to list all variants, we want them to add + // a wildcard, so we only suggest that. + .map(|witness| { + witness.apply_constructor(pcx, &Constructor::NonExhaustive) + }) + .collect() } else { let mut split_wildcard = SplitWildcard::new(pcx); split_wildcard.split(pcx, matrix.heads().map(DeconstructedPat::ctor)); @@ -633,7 +638,7 @@ impl<'p, 'tcx> Usefulness<'p, 'tcx> { // constructor, that matches everything that can be built with // it. For example, if `ctor` is a `Constructor::Variant` for // `Option::Some`, we get the pattern `Some(_)`. - let mut new: Vec<DeconstructedPat<'_, '_>> = split_wildcard + let mut new_patterns: Vec<DeconstructedPat<'_, '_>> = split_wildcard .iter_missing(pcx) .filter_map(|missing_ctor| { // Check if this variant is marked `doc(hidden)` @@ -648,27 +653,25 @@ impl<'p, 'tcx> Usefulness<'p, 'tcx> { .collect(); if hide_variant_show_wild { - new.push(DeconstructedPat::wildcard(pcx.ty, pcx.span)); + new_patterns.push(DeconstructedPat::wildcard(pcx.ty, pcx.span)); } - new - }; - - witnesses - .into_iter() - .flat_map(|witness| { - new_patterns.iter().map(move |pat| { - Witness( - witness - .0 - .iter() - .chain(once(pat)) - .map(DeconstructedPat::clone_and_forget_reachability) - .collect(), - ) + witnesses + .into_iter() + .flat_map(|witness| { + new_patterns.iter().map(move |pat| { + Witness( + witness + .0 + .iter() + .chain(once(pat)) + .map(DeconstructedPat::clone_and_forget_reachability) + .collect(), + ) + }) }) - }) - .collect() + .collect() + } } else { witnesses .into_iter() diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs index a793b384d81..0aaa3f75c82 100644 --- a/compiler/rustc_mir_transform/src/const_prop.rs +++ b/compiler/rustc_mir_transform/src/const_prop.rs @@ -376,6 +376,16 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { ) .expect("failed to push initial stack frame"); + for local in body.local_decls.indices() { + // Mark everything initially live. + // This is somewhat dicey since some of them might be unsized and it is incoherent to + // mark those as live... We rely on `local_to_place`/`local_to_op` in the interpreter + // stopping us before those unsized immediates can cause issues deeper in the + // interpreter. + ecx.frame_mut().locals[local].value = + LocalValue::Live(interpret::Operand::Immediate(Immediate::Uninit)); + } + ConstPropagator { ecx, tcx, param_env, local_decls: &dummy_body.local_decls } } diff --git a/compiler/rustc_mir_transform/src/const_prop_lint.rs b/compiler/rustc_mir_transform/src/const_prop_lint.rs index da8913d604b..dec79ddf58c 100644 --- a/compiler/rustc_mir_transform/src/const_prop_lint.rs +++ b/compiler/rustc_mir_transform/src/const_prop_lint.rs @@ -206,6 +206,16 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { ) .expect("failed to push initial stack frame"); + for local in body.local_decls.indices() { + // Mark everything initially live. + // This is somewhat dicey since some of them might be unsized and it is incoherent to + // mark those as live... We rely on `local_to_place`/`local_to_op` in the interpreter + // stopping us before those unsized immediates can cause issues deeper in the + // interpreter. + ecx.frame_mut().locals[local].value = + LocalValue::Live(interpret::Operand::Immediate(Immediate::Uninit)); + } + ConstPropagator { ecx, tcx, @@ -273,7 +283,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { // dedicated error variants should be introduced instead. assert!( !error.kind().formatted_string(), - "const-prop encountered formatting error: {error:?}", + "const-prop encountered formatting error: {}", + self.ecx.format_error(error), ); None } diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs index 9551c7a2a8d..7d4c4a823a8 100644 --- a/compiler/rustc_mir_transform/src/inline.rs +++ b/compiler/rustc_mir_transform/src/inline.rs @@ -388,14 +388,11 @@ impl<'tcx> Inliner<'tcx> { return Err("never inline hint"); } - // Only inline local functions if they would be eligible for cross-crate - // inlining. This is to ensure that the final crate doesn't have MIR that - // reference unexported symbols - if callsite.callee.def_id().is_local() { - let is_generic = callsite.callee.args.non_erasable_generics().next().is_some(); - if !is_generic && !callee_attrs.requests_inline() { - return Err("not exported"); - } + // Reachability pass defines which functions are eligible for inlining. Generally inlining + // other functions is incorrect because they could reference symbols that aren't exported. + let is_generic = callsite.callee.args.non_erasable_generics().next().is_some(); + if !is_generic && !callee_attrs.requests_inline() { + return Err("not exported"); } if callsite.fn_sig.c_variadic() { diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs index f917e52109a..bf88360a8c1 100644 --- a/compiler/rustc_monomorphize/src/collector.rs +++ b/compiler/rustc_monomorphize/src/collector.rs @@ -590,6 +590,8 @@ struct MirUsedCollector<'a, 'tcx> { body: &'a mir::Body<'tcx>, output: &'a mut MonoItems<'tcx>, instance: Instance<'tcx>, + /// Spans for move size lints already emitted. Helps avoid duplicate lints. + move_size_spans: Vec<Span>, } impl<'a, 'tcx> MirUsedCollector<'a, 'tcx> { @@ -604,6 +606,45 @@ impl<'a, 'tcx> MirUsedCollector<'a, 'tcx> { ty::EarlyBinder::bind(value), ) } + + fn check_move_size(&mut self, limit: usize, operand: &mir::Operand<'tcx>, location: Location) { + let limit = Size::from_bytes(limit); + let ty = operand.ty(self.body, self.tcx); + let ty = self.monomorphize(ty); + let Ok(layout) = self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty)) else { return }; + if layout.size <= limit { + return; + } + debug!(?layout); + let source_info = self.body.source_info(location); + debug!(?source_info); + for span in &self.move_size_spans { + if span.overlaps(source_info.span) { + return; + } + } + let lint_root = source_info.scope.lint_root(&self.body.source_scopes); + debug!(?lint_root); + let Some(lint_root) = lint_root else { + // This happens when the issue is in a function from a foreign crate that + // we monomorphized in the current crate. We can't get a `HirId` for things + // in other crates. + // FIXME: Find out where to report the lint on. Maybe simply crate-level lint root + // but correct span? This would make the lint at least accept crate-level lint attributes. + return; + }; + self.tcx.emit_spanned_lint( + LARGE_ASSIGNMENTS, + lint_root, + source_info.span, + LargeAssignmentsLint { + span: source_info.span, + size: layout.size.bytes(), + limit: limit.bytes(), + }, + ); + self.move_size_spans.push(source_info.span); + } } impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> { @@ -803,40 +844,9 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> { fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) { self.super_operand(operand, location); - let limit = self.tcx.move_size_limit().0; - if limit == 0 { - return; - } - let limit = Size::from_bytes(limit); - let ty = operand.ty(self.body, self.tcx); - let ty = self.monomorphize(ty); - let layout = self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty)); - if let Ok(layout) = layout { - if layout.size > limit { - debug!(?layout); - let source_info = self.body.source_info(location); - debug!(?source_info); - let lint_root = source_info.scope.lint_root(&self.body.source_scopes); - debug!(?lint_root); - let Some(lint_root) = lint_root else { - // This happens when the issue is in a function from a foreign crate that - // we monomorphized in the current crate. We can't get a `HirId` for things - // in other crates. - // FIXME: Find out where to report the lint on. Maybe simply crate-level lint root - // but correct span? This would make the lint at least accept crate-level lint attributes. - return; - }; - self.tcx.emit_spanned_lint( - LARGE_ASSIGNMENTS, - lint_root, - source_info.span, - LargeAssignmentsLint { - span: source_info.span, - size: layout.size.bytes(), - limit: limit.bytes(), - }, - ) - } + let move_size_limit = self.tcx.move_size_limit().0; + if move_size_limit > 0 { + self.check_move_size(move_size_limit, operand, location); } } @@ -1363,7 +1373,8 @@ fn collect_used_items<'tcx>( output: &mut MonoItems<'tcx>, ) { let body = tcx.instance_mir(instance.def); - MirUsedCollector { tcx, body: &body, output, instance }.visit_body(&body); + MirUsedCollector { tcx, body: &body, output, instance, move_size_spans: vec![] } + .visit_body(&body); } #[instrument(skip(tcx, output), level = "debug")] diff --git a/compiler/rustc_parse/src/parser/item.rs b/compiler/rustc_parse/src/parser/item.rs index 5db31c23478..233c7016417 100644 --- a/compiler/rustc_parse/src/parser/item.rs +++ b/compiler/rustc_parse/src/parser/item.rs @@ -1851,21 +1851,11 @@ impl<'a> Parser<'a> { attrs: AttrVec, ) -> PResult<'a, FieldDef> { let name = self.parse_field_ident(adt_ty, lo)?; - // Parse the macro invocation and recover if self.token.kind == token::Not { if let Err(mut err) = self.unexpected::<FieldDef>() { - err.subdiagnostic(MacroExpandsToAdtField { adt_ty }).emit(); - self.bump(); - self.parse_delim_args()?; - return Ok(FieldDef { - span: DUMMY_SP, - ident: None, - vis, - id: DUMMY_NODE_ID, - ty: self.mk_ty(DUMMY_SP, TyKind::Err), - attrs, - is_placeholder: false, - }); + // Encounter the macro invocation + err.subdiagnostic(MacroExpandsToAdtField { adt_ty }); + return Err(err); } } self.expect_field_ty_separator()?; diff --git a/compiler/rustc_parse/src/parser/ty.rs b/compiler/rustc_parse/src/parser/ty.rs index 661113666cd..a25b0f1f893 100644 --- a/compiler/rustc_parse/src/parser/ty.rs +++ b/compiler/rustc_parse/src/parser/ty.rs @@ -891,18 +891,32 @@ impl<'a> Parser<'a> { // that we do not use the try operator when parsing the type because // if it fails then we get a parser error which we don't want (we're trying // to recover from errors, not make more). - let path = if self.may_recover() - && matches!(ty.kind, TyKind::Ptr(..) | TyKind::Ref(..)) - && let TyKind::Path(_, path) = &ty.peel_refs().kind { - // Just get the indirection part of the type. - let span = ty.span.until(path.span); - - err.span_suggestion_verbose( - span, - "consider removing the indirection", - "", - Applicability::MaybeIncorrect, - ); + let path = if self.may_recover() { + let (span, message, sugg, path, applicability) = match &ty.kind { + TyKind::Ptr(..) | TyKind::Ref(..) if let TyKind::Path(_, path) = &ty.peel_refs().kind => { + ( + ty.span.until(path.span), + "consider removing the indirection", + "", + path, + Applicability::MaybeIncorrect + ) + } + TyKind::ImplTrait(_, bounds) + if let [GenericBound::Trait(tr, ..), ..] = bounds.as_slice() => + { + ( + ty.span.until(tr.span), + "use the trait bounds directly", + "", + &tr.trait_ref.path, + Applicability::MachineApplicable + ) + } + _ => return Err(err) + }; + + err.span_suggestion_verbose(span, message, sugg, applicability); path.clone() } else { diff --git a/compiler/rustc_privacy/messages.ftl b/compiler/rustc_privacy/messages.ftl index b91e0d18a80..7785f1a7f81 100644 --- a/compiler/rustc_privacy/messages.ftl +++ b/compiler/rustc_privacy/messages.ftl @@ -11,11 +11,6 @@ privacy_in_public_interface = {$vis_descr} {$kind} `{$descr}` in public interfac privacy_item_is_private = {$kind} `{$descr}` is private .label = private {$kind} -privacy_private_in_public_lint = - {$vis_descr} {$kind} `{$descr}` in public interface (error {$kind -> - [trait] E0445 - *[other] E0446 - }) privacy_private_interface_or_bounds_lint = {$ty_kind} `{$ty_descr}` is more private than the item `{$item_descr}` .item_label = {$item_kind} `{$item_descr}` is reachable at visibility `{$item_vis_descr}` diff --git a/compiler/rustc_privacy/src/errors.rs b/compiler/rustc_privacy/src/errors.rs index da18f0c8268..b1242f82f4f 100644 --- a/compiler/rustc_privacy/src/errors.rs +++ b/compiler/rustc_privacy/src/errors.rs @@ -47,21 +47,6 @@ pub struct UnnamedItemIsPrivate { pub kind: &'static str, } -// Duplicate of `InPublicInterface` but with a different error code, shares the same slug. -#[derive(Diagnostic)] -#[diag(privacy_in_public_interface, code = "E0445")] -pub struct InPublicInterfaceTraits<'a> { - #[primary_span] - #[label] - pub span: Span, - pub vis_descr: &'static str, - pub kind: &'a str, - pub descr: DiagnosticArgFromDisplay<'a>, - #[label(privacy_visibility_label)] - pub vis_span: Span, -} - -// Duplicate of `InPublicInterfaceTraits` but with a different error code, shares the same slug. #[derive(Diagnostic)] #[diag(privacy_in_public_interface, code = "E0446")] pub struct InPublicInterface<'a> { @@ -92,14 +77,6 @@ pub struct FromPrivateDependencyInPublicInterface<'a> { } #[derive(LintDiagnostic)] -#[diag(privacy_private_in_public_lint)] -pub struct PrivateInPublicLint<'a> { - pub vis_descr: &'static str, - pub kind: &'a str, - pub descr: DiagnosticArgFromDisplay<'a>, -} - -#[derive(LintDiagnostic)] #[diag(privacy_unnameable_types_lint)] pub struct UnnameableTypesLint<'a> { #[label] diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs index 0eb344ba690..906a36cdb25 100644 --- a/compiler/rustc_privacy/src/lib.rs +++ b/compiler/rustc_privacy/src/lib.rs @@ -22,7 +22,7 @@ use rustc_hir as hir; use rustc_hir::def::{DefKind, Res}; use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId, CRATE_DEF_ID}; use rustc_hir::intravisit::{self, Visitor}; -use rustc_hir::{AssocItemKind, ForeignItemKind, HirIdSet, ItemId, Node, PatKind}; +use rustc_hir::{AssocItemKind, ForeignItemKind, ItemId, Node, PatKind}; use rustc_middle::bug; use rustc_middle::hir::nested_filter; use rustc_middle::middle::privacy::{EffectiveVisibilities, EffectiveVisibility, Level}; @@ -42,8 +42,8 @@ use std::{fmt, mem}; use errors::{ FieldIsPrivate, FieldIsPrivateLabel, FromPrivateDependencyInPublicInterface, InPublicInterface, - InPublicInterfaceTraits, ItemIsPrivate, PrivateInPublicLint, PrivateInterfacesOrBoundsLint, - ReportEffectiveVisibility, UnnameableTypesLint, UnnamedItemIsPrivate, + ItemIsPrivate, PrivateInterfacesOrBoundsLint, ReportEffectiveVisibility, UnnameableTypesLint, + UnnamedItemIsPrivate, }; fluent_messages! { "../messages.ftl" } @@ -364,6 +364,7 @@ trait VisibilityLike: Sized { find.min } } + impl VisibilityLike for ty::Visibility { const MAX: Self = ty::Visibility::Public; fn new_min<const SHALLOW: bool>( @@ -1383,345 +1384,6 @@ impl<'tcx> DefIdVisitor<'tcx> for TypePrivacyVisitor<'tcx> { } /////////////////////////////////////////////////////////////////////////////// -/// Obsolete visitors for checking for private items in public interfaces. -/// These visitors are supposed to be kept in frozen state and produce an -/// "old error node set". For backward compatibility the new visitor reports -/// warnings instead of hard errors when the erroneous node is not in this old set. -/////////////////////////////////////////////////////////////////////////////// - -struct ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { - tcx: TyCtxt<'tcx>, - effective_visibilities: &'a EffectiveVisibilities, - in_variant: bool, - // Set of errors produced by this obsolete visitor. - old_error_set: HirIdSet, -} - -struct ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> { - inner: &'a ObsoleteVisiblePrivateTypesVisitor<'b, 'tcx>, - /// Whether the type refers to private types. - contains_private: bool, - /// Whether we've recurred at all (i.e., if we're pointing at the - /// first type on which `visit_ty` was called). - at_outer_type: bool, - /// Whether that first type is a public path. - outer_type_is_public_path: bool, -} - -impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { - fn path_is_private_type(&self, path: &hir::Path<'_>) -> bool { - let did = match path.res { - Res::PrimTy(..) | Res::SelfTyParam { .. } | Res::SelfTyAlias { .. } | Res::Err => { - return false; - } - res => res.def_id(), - }; - - // A path can only be private if: - // it's in this crate... - if let Some(did) = did.as_local() { - // .. and it corresponds to a private type in the AST (this returns - // `None` for type parameters). - match self.tcx.hir().find(self.tcx.hir().local_def_id_to_hir_id(did)) { - Some(Node::Item(_)) => !self.tcx.visibility(did).is_public(), - Some(_) | None => false, - } - } else { - false - } - } - - fn trait_is_public(&self, trait_id: LocalDefId) -> bool { - // FIXME: this would preferably be using `exported_items`, but all - // traits are exported currently (see `EmbargoVisitor.exported_trait`). - self.effective_visibilities.is_directly_public(trait_id) - } - - fn check_generic_bound(&mut self, bound: &hir::GenericBound<'_>) { - if let hir::GenericBound::Trait(ref trait_ref, _) = *bound { - if self.path_is_private_type(trait_ref.trait_ref.path) { - self.old_error_set.insert(trait_ref.trait_ref.hir_ref_id); - } - } - } - - fn item_is_public(&self, def_id: LocalDefId) -> bool { - self.effective_visibilities.is_reachable(def_id) || self.tcx.visibility(def_id).is_public() - } -} - -impl<'a, 'b, 'tcx, 'v> Visitor<'v> for ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> { - fn visit_generic_arg(&mut self, generic_arg: &'v hir::GenericArg<'v>) { - match generic_arg { - hir::GenericArg::Type(t) => self.visit_ty(t), - hir::GenericArg::Infer(inf) => self.visit_ty(&inf.to_ty()), - hir::GenericArg::Lifetime(_) | hir::GenericArg::Const(_) => {} - } - } - - fn visit_ty(&mut self, ty: &hir::Ty<'_>) { - if let hir::TyKind::Path(hir::QPath::Resolved(_, path)) = ty.kind { - if self.inner.path_is_private_type(path) { - self.contains_private = true; - // Found what we're looking for, so let's stop working. - return; - } - } - if let hir::TyKind::Path(_) = ty.kind { - if self.at_outer_type { - self.outer_type_is_public_path = true; - } - } - self.at_outer_type = false; - intravisit::walk_ty(self, ty) - } - - // Don't want to recurse into `[, .. expr]`. - fn visit_expr(&mut self, _: &hir::Expr<'_>) {} -} - -impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { - type NestedFilter = nested_filter::All; - - /// We want to visit items in the context of their containing - /// module and so forth, so supply a crate for doing a deep walk. - fn nested_visit_map(&mut self) -> Self::Map { - self.tcx.hir() - } - - fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) { - match item.kind { - // Contents of a private mod can be re-exported, so we need - // to check internals. - hir::ItemKind::Mod(_) => {} - - // An `extern {}` doesn't introduce a new privacy - // namespace (the contents have their own privacies). - hir::ItemKind::ForeignMod { .. } => {} - - hir::ItemKind::Trait(.., bounds, _) => { - if !self.trait_is_public(item.owner_id.def_id) { - return; - } - - for bound in bounds.iter() { - self.check_generic_bound(bound) - } - } - - // Impls need some special handling to try to offer useful - // error messages without (too many) false positives - // (i.e., we could just return here to not check them at - // all, or some worse estimation of whether an impl is - // publicly visible). - hir::ItemKind::Impl(ref impl_) => { - // `impl [... for] Private` is never visible. - let self_contains_private; - // `impl [... for] Public<...>`, but not `impl [... for] - // Vec<Public>` or `(Public,)`, etc. - let self_is_public_path; - - // Check the properties of the `Self` type: - { - let mut visitor = ObsoleteCheckTypeForPrivatenessVisitor { - inner: self, - contains_private: false, - at_outer_type: true, - outer_type_is_public_path: false, - }; - visitor.visit_ty(impl_.self_ty); - self_contains_private = visitor.contains_private; - self_is_public_path = visitor.outer_type_is_public_path; - } - - // Miscellaneous info about the impl: - - // `true` iff this is `impl Private for ...`. - let not_private_trait = impl_.of_trait.as_ref().map_or( - true, // no trait counts as public trait - |tr| { - if let Some(def_id) = tr.path.res.def_id().as_local() { - self.trait_is_public(def_id) - } else { - true // external traits must be public - } - }, - ); - - // `true` iff this is a trait impl or at least one method is public. - // - // `impl Public { $( fn ...() {} )* }` is not visible. - // - // This is required over just using the methods' privacy - // directly because we might have `impl<T: Foo<Private>> ...`, - // and we shouldn't warn about the generics if all the methods - // are private (because `T` won't be visible externally). - let trait_or_some_public_method = impl_.of_trait.is_some() - || impl_.items.iter().any(|impl_item_ref| { - let impl_item = self.tcx.hir().impl_item(impl_item_ref.id); - match impl_item.kind { - hir::ImplItemKind::Const(..) | hir::ImplItemKind::Fn(..) => self - .effective_visibilities - .is_reachable(impl_item_ref.id.owner_id.def_id), - hir::ImplItemKind::Type(_) => false, - } - }); - - if !self_contains_private && not_private_trait && trait_or_some_public_method { - intravisit::walk_generics(self, &impl_.generics); - - match impl_.of_trait { - None => { - for impl_item_ref in impl_.items { - // This is where we choose whether to walk down - // further into the impl to check its items. We - // should only walk into public items so that we - // don't erroneously report errors for private - // types in private items. - let impl_item = self.tcx.hir().impl_item(impl_item_ref.id); - match impl_item.kind { - hir::ImplItemKind::Const(..) | hir::ImplItemKind::Fn(..) - if self.item_is_public(impl_item.owner_id.def_id) => - { - intravisit::walk_impl_item(self, impl_item) - } - hir::ImplItemKind::Type(..) => { - intravisit::walk_impl_item(self, impl_item) - } - _ => {} - } - } - } - Some(ref tr) => { - // Any private types in a trait impl fall into three - // categories. - // 1. mentioned in the trait definition - // 2. mentioned in the type params/generics - // 3. mentioned in the associated types of the impl - // - // Those in 1. can only occur if the trait is in - // this crate and will have been warned about on the - // trait definition (there's no need to warn twice - // so we don't check the methods). - // - // Those in 2. are warned via walk_generics and this - // call here. - intravisit::walk_path(self, tr.path); - - // Those in 3. are warned with this call. - for impl_item_ref in impl_.items { - let impl_item = self.tcx.hir().impl_item(impl_item_ref.id); - if let hir::ImplItemKind::Type(ty) = impl_item.kind { - self.visit_ty(ty); - } - } - } - } - } else if impl_.of_trait.is_none() && self_is_public_path { - // `impl Public<Private> { ... }`. Any public static - // methods will be visible as `Public::foo`. - let mut found_pub_static = false; - for impl_item_ref in impl_.items { - if self - .effective_visibilities - .is_reachable(impl_item_ref.id.owner_id.def_id) - || self.tcx.visibility(impl_item_ref.id.owner_id).is_public() - { - let impl_item = self.tcx.hir().impl_item(impl_item_ref.id); - match impl_item_ref.kind { - AssocItemKind::Const => { - found_pub_static = true; - intravisit::walk_impl_item(self, impl_item); - } - AssocItemKind::Fn { has_self: false } => { - found_pub_static = true; - intravisit::walk_impl_item(self, impl_item); - } - _ => {} - } - } - } - if found_pub_static { - intravisit::walk_generics(self, &impl_.generics) - } - } - return; - } - - // `type ... = ...;` can contain private types, because - // we're introducing a new name. - hir::ItemKind::TyAlias(..) => return, - - // Not at all public, so we don't care. - _ if !self.item_is_public(item.owner_id.def_id) => { - return; - } - - _ => {} - } - - // We've carefully constructed it so that if we're here, then - // any `visit_ty`'s will be called on things that are in - // public signatures, i.e., things that we're interested in for - // this visitor. - intravisit::walk_item(self, item); - } - - fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) { - for predicate in generics.predicates { - match predicate { - hir::WherePredicate::BoundPredicate(bound_pred) => { - for bound in bound_pred.bounds.iter() { - self.check_generic_bound(bound) - } - } - hir::WherePredicate::RegionPredicate(_) => {} - hir::WherePredicate::EqPredicate(eq_pred) => { - self.visit_ty(eq_pred.rhs_ty); - } - } - } - } - - fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) { - if self.effective_visibilities.is_reachable(item.owner_id.def_id) { - intravisit::walk_foreign_item(self, item) - } - } - - fn visit_ty(&mut self, t: &'tcx hir::Ty<'tcx>) { - if let hir::TyKind::Path(hir::QPath::Resolved(_, path)) = t.kind { - if self.path_is_private_type(path) { - self.old_error_set.insert(t.hir_id); - } - } - intravisit::walk_ty(self, t) - } - - fn visit_variant(&mut self, v: &'tcx hir::Variant<'tcx>) { - if self.effective_visibilities.is_reachable(v.def_id) { - self.in_variant = true; - intravisit::walk_variant(self, v); - self.in_variant = false; - } - } - - fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) { - let vis = self.tcx.visibility(s.def_id); - if vis.is_public() || self.in_variant { - intravisit::walk_field_def(self, s); - } - } - - // We don't need to introspect into these at all: an - // expression/block context can't possibly contain exported things. - // (Making them no-ops stops us from traversing the whole AST without - // having to be super careful about our `walk_...` calls above.) - fn visit_block(&mut self, _: &'tcx hir::Block<'tcx>) {} - fn visit_expr(&mut self, _: &'tcx hir::Expr<'tcx>) {} -} - -/////////////////////////////////////////////////////////////////////////////// /// SearchInterfaceForPrivateItemsVisitor traverses an item's interface and /// finds any private components in it. /// PrivateItemsInPublicInterfacesVisitor ensures there are no private types @@ -1734,7 +1396,6 @@ struct SearchInterfaceForPrivateItemsVisitor<'tcx> { /// The visitor checks that each component type is at least this visible. required_visibility: ty::Visibility, required_effective_vis: Option<EffectiveVisibility>, - has_old_errors: bool, in_assoc_ty: bool, in_primary_interface: bool, } @@ -1805,7 +1466,7 @@ impl SearchInterfaceForPrivateItemsVisitor<'_> { let hir_id = self.tcx.hir().local_def_id_to_hir_id(local_def_id); let span = self.tcx.def_span(self.item_def_id.to_def_id()); let vis_span = self.tcx.def_span(def_id); - if !vis.is_at_least(self.required_visibility, self.tcx) { + if self.in_assoc_ty && !vis.is_at_least(self.required_visibility, self.tcx) { let vis_descr = match vis { ty::Visibility::Public => "public", ty::Visibility::Restricted(vis_def_id) => { @@ -1819,35 +1480,14 @@ impl SearchInterfaceForPrivateItemsVisitor<'_> { } }; - if self.has_old_errors - || self.in_assoc_ty - || self.tcx.resolutions(()).has_pub_restricted - { - if kind == "trait" { - self.tcx.sess.emit_err(InPublicInterfaceTraits { - span, - vis_descr, - kind, - descr: descr.into(), - vis_span, - }); - } else { - self.tcx.sess.emit_err(InPublicInterface { - span, - vis_descr, - kind, - descr: descr.into(), - vis_span, - }); - } - } else { - self.tcx.emit_spanned_lint( - lint::builtin::PRIVATE_IN_PUBLIC, - hir_id, - span, - PrivateInPublicLint { vis_descr, kind, descr: descr.into() }, - ); - } + self.tcx.sess.emit_err(InPublicInterface { + span, + vis_descr, + kind, + descr: descr.into(), + vis_span, + }); + return false; } let Some(effective_vis) = self.required_effective_vis else { @@ -1918,7 +1558,6 @@ impl<'tcx> DefIdVisitor<'tcx> for SearchInterfaceForPrivateItemsVisitor<'tcx> { struct PrivateItemsInPublicInterfacesChecker<'tcx, 'a> { tcx: TyCtxt<'tcx>, - old_error_set_ancestry: HirIdSet, effective_visibilities: &'a EffectiveVisibilities, } @@ -1934,9 +1573,6 @@ impl<'tcx> PrivateItemsInPublicInterfacesChecker<'tcx, '_> { item_def_id: def_id, required_visibility, required_effective_vis, - has_old_errors: self - .old_error_set_ancestry - .contains(&self.tcx.hir().local_def_id_to_hir_id(def_id)), in_assoc_ty: false, in_primary_interface: true, } @@ -2298,35 +1934,8 @@ fn effective_visibilities(tcx: TyCtxt<'_>, (): ()) -> &EffectiveVisibilities { fn check_private_in_public(tcx: TyCtxt<'_>, (): ()) { let effective_visibilities = tcx.effective_visibilities(()); - - let mut visitor = ObsoleteVisiblePrivateTypesVisitor { - tcx, - effective_visibilities, - in_variant: false, - old_error_set: Default::default(), - }; - tcx.hir().walk_toplevel_module(&mut visitor); - - let mut old_error_set_ancestry = HirIdSet::default(); - for mut id in visitor.old_error_set.iter().copied() { - loop { - if !old_error_set_ancestry.insert(id) { - break; - } - let parent = tcx.hir().parent_id(id); - if parent == id { - break; - } - id = parent; - } - } - - // Check for private types and traits in public interfaces. - let mut checker = PrivateItemsInPublicInterfacesChecker { - tcx, - old_error_set_ancestry, - effective_visibilities, - }; + // Check for private types in public interfaces. + let mut checker = PrivateItemsInPublicInterfacesChecker { tcx, effective_visibilities }; for id in tcx.hir().items() { checker.check_item(id); diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml index ac697a3ae3e..a44dd5ede2f 100644 --- a/compiler/rustc_query_impl/Cargo.toml +++ b/compiler/rustc_query_impl/Cargo.toml @@ -9,7 +9,6 @@ edition = "2021" [dependencies] field-offset = "0.3.5" measureme = "10.0.0" -rustc_ast = { path = "../rustc_ast" } rustc_data_structures = { path = "../rustc_data_structures" } rustc_errors = { path = "../rustc_errors" } rustc_hir = { path = "../rustc_hir" } diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index 775870106b1..9a0fcbb37a7 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -41,7 +41,7 @@ use rustc_query_system::query::{ }; use rustc_query_system::HandleCycleError; use rustc_query_system::Value; -use rustc_span::Span; +use rustc_span::{ErrorGuaranteed, Span}; #[macro_use] mod plumbing; @@ -146,8 +146,9 @@ where self, tcx: TyCtxt<'tcx>, cycle: &[QueryInfo<DepKind>], + guar: ErrorGuaranteed, ) -> Self::Value { - (self.dynamic.value_from_cycle_error)(tcx, cycle) + (self.dynamic.value_from_cycle_error)(tcx, cycle, guar) } #[inline(always)] diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index def6ac280b8..a30ea7c1ddc 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -605,8 +605,8 @@ macro_rules! define_queries { } { |_tcx, _key, _prev_index, _index| None }), - value_from_cycle_error: |tcx, cycle| { - let result: queries::$name::Value<'tcx> = Value::from_cycle_error(tcx, cycle); + value_from_cycle_error: |tcx, cycle, guar| { + let result: queries::$name::Value<'tcx> = Value::from_cycle_error(tcx, cycle, guar); erase(result) }, loadable_from_disk: |_tcx, _key, _index| { diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs index 4ba9d53a92f..d8aa377af42 100644 --- a/compiler/rustc_query_system/src/query/caches.rs +++ b/compiler/rustc_query_system/src/query/caches.rs @@ -2,7 +2,7 @@ use crate::dep_graph::DepNodeIndex; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sharded::{self, Sharded}; -use rustc_data_structures::sync::Lock; +use rustc_data_structures::sync::OnceLock; use rustc_index::{Idx, IndexVec}; use std::fmt::Debug; use std::hash::Hash; @@ -70,8 +70,7 @@ where } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { - let shards = self.cache.lock_shards(); - for shard in shards.iter() { + for shard in self.cache.lock_shards() { for (k, v) in shard.iter() { f(k, &v.0, v.1); } @@ -88,12 +87,12 @@ impl<'tcx, V: 'tcx> CacheSelector<'tcx, V> for SingleCacheSelector { } pub struct SingleCache<V> { - cache: Lock<Option<(V, DepNodeIndex)>>, + cache: OnceLock<(V, DepNodeIndex)>, } impl<V> Default for SingleCache<V> { fn default() -> Self { - SingleCache { cache: Lock::new(None) } + SingleCache { cache: OnceLock::new() } } } @@ -106,16 +105,16 @@ where #[inline(always)] fn lookup(&self, _key: &()) -> Option<(V, DepNodeIndex)> { - *self.cache.lock() + self.cache.get().copied() } #[inline] fn complete(&self, _key: (), value: V, index: DepNodeIndex) { - *self.cache.lock() = Some((value, index)); + self.cache.set((value, index)).ok(); } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { - if let Some(value) = self.cache.lock().as_ref() { + if let Some(value) = self.cache.get() { f(&(), &value.0, value.1) } } @@ -160,8 +159,7 @@ where } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { - let shards = self.cache.lock_shards(); - for shard in shards.iter() { + for shard in self.cache.lock_shards() { for (k, v) in shard.iter_enumerated() { if let Some(v) = v { f(&k, &v.0, v.1); diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs index 7e47d701205..d14c6315dc1 100644 --- a/compiler/rustc_query_system/src/query/config.rs +++ b/compiler/rustc_query_system/src/query/config.rs @@ -8,6 +8,7 @@ use crate::query::DepNodeIndex; use crate::query::{QueryContext, QueryInfo, QueryState}; use rustc_data_structures::fingerprint::Fingerprint; +use rustc_span::ErrorGuaranteed; use std::fmt::Debug; use std::hash::Hash; @@ -57,6 +58,7 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy { self, tcx: Qcx::DepContext, cycle: &[QueryInfo<Qcx::DepKind>], + guar: ErrorGuaranteed, ) -> Self::Value; fn anon(self) -> bool; diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index 1b124892441..9cba549a3b5 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -124,7 +124,6 @@ impl<D: DepKind> QueryJob<D> { } impl QueryJobId { - #[cfg(not(parallel_compiler))] pub(super) fn find_cycle_in_stack<D: DepKind>( &self, query_map: QueryMap<D>, diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 575921b3337..4fa168965a7 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -14,10 +14,11 @@ use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame}; use crate::HandleCycleError; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::sharded::Sharded; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::sync::Lock; #[cfg(parallel_compiler)] -use rustc_data_structures::{cold_path, sharded::Sharded}; +use rustc_data_structures::{cold_path, sync}; use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError}; use rustc_span::{Span, DUMMY_SP}; use std::cell::Cell; @@ -30,10 +31,7 @@ use thin_vec::ThinVec; use super::QueryConfig; pub struct QueryState<K, D: DepKind> { - #[cfg(parallel_compiler)] active: Sharded<FxHashMap<K, QueryResult<D>>>, - #[cfg(not(parallel_compiler))] - active: Lock<FxHashMap<K, QueryResult<D>>>, } /// Indicates the state of a query for a given key in a query map. @@ -52,15 +50,7 @@ where D: DepKind, { pub fn all_inactive(&self) -> bool { - #[cfg(parallel_compiler)] - { - let shards = self.active.lock_shards(); - shards.iter().all(|shard| shard.is_empty()) - } - #[cfg(not(parallel_compiler))] - { - self.active.lock().is_empty() - } + self.active.lock_shards().all(|shard| shard.is_empty()) } pub fn try_collect_active_jobs<Qcx: Copy>( @@ -71,26 +61,10 @@ where ) -> Option<()> { let mut active = Vec::new(); - #[cfg(parallel_compiler)] - { - // We use try_lock_shards here since we are called from the - // deadlock handler, and this shouldn't be locked. - let shards = self.active.try_lock_shards()?; - for shard in shards.iter() { - for (k, v) in shard.iter() { - if let QueryResult::Started(ref job) = *v { - active.push((*k, job.clone())); - } - } - } - } - #[cfg(not(parallel_compiler))] - { - // We use try_lock here since we are called from the - // deadlock handler, and this shouldn't be locked. - // (FIXME: Is this relevant for non-parallel compilers? It doesn't - // really hurt much.) - for (k, v) in self.active.try_lock()?.iter() { + // We use try_lock_shards here since we are called from the + // deadlock handler, and this shouldn't be locked. + for shard in self.active.try_lock_shards() { + for (k, v) in shard?.iter() { if let QueryResult::Started(ref job) = *v { active.push((*k, job.clone())); } @@ -148,8 +122,8 @@ where use HandleCycleError::*; match query.handle_cycle_error() { Error => { - error.emit(); - query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle) + let guar = error.emit(); + query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle, guar) } Fatal => { error.emit(); @@ -157,8 +131,8 @@ where unreachable!() } DelayBug => { - error.delay_as_bug(); - query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle) + let guar = error.delay_as_bug(); + query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle, guar) } } } @@ -184,10 +158,7 @@ where cache.complete(key, result, dep_node_index); let job = { - #[cfg(parallel_compiler)] let mut lock = state.active.get_shard_by_value(&key).lock(); - #[cfg(not(parallel_compiler))] - let mut lock = state.active.lock(); match lock.remove(&key).unwrap() { QueryResult::Started(job) => job, QueryResult::Poisoned => panic!(), @@ -209,10 +180,7 @@ where // Poison the query so jobs waiting on it panic. let state = self.state; let job = { - #[cfg(parallel_compiler)] let mut shard = state.active.get_shard_by_value(&self.key).lock(); - #[cfg(not(parallel_compiler))] - let mut shard = state.active.lock(); let job = match shard.remove(&self.key).unwrap() { QueryResult::Started(job) => job, QueryResult::Poisoned => panic!(), @@ -255,7 +223,6 @@ where #[cold] #[inline(never)] -#[cfg(not(parallel_compiler))] fn cycle_error<Q, Qcx>( query: Q, qcx: Qcx, @@ -336,10 +303,7 @@ where Qcx: QueryContext, { let state = query.query_state(qcx); - #[cfg(parallel_compiler)] let mut state_lock = state.active.get_shard_by_value(&key).lock(); - #[cfg(not(parallel_compiler))] - let mut state_lock = state.active.lock(); // For the parallel compiler we need to check both the query cache and query state structures // while holding the state lock to ensure that 1) the query has not yet completed and 2) the @@ -371,8 +335,18 @@ where } Entry::Occupied(mut entry) => { match entry.get_mut() { - #[cfg(not(parallel_compiler))] QueryResult::Started(job) => { + #[cfg(parallel_compiler)] + if sync::is_dyn_thread_safe() { + // Get the latch out + let latch = job.latch(); + drop(state_lock); + + // Only call `wait_for_query` if we're using a Rayon thread pool + // as it will attempt to mark the worker thread as blocked. + return wait_for_query(query, qcx, span, key, latch, current_job_id); + } + let id = job.id; drop(state_lock); @@ -380,14 +354,6 @@ where // so we just return the error. cycle_error(query, qcx, id, span) } - #[cfg(parallel_compiler)] - QueryResult::Started(job) => { - // Get the latch out - let latch = job.latch(); - drop(state_lock); - - wait_for_query(query, qcx, span, key, latch, current_job_id) - } QueryResult::Poisoned => FatalError.raise(), } } diff --git a/compiler/rustc_query_system/src/values.rs b/compiler/rustc_query_system/src/values.rs index ce551078cc0..07c28fdb73b 100644 --- a/compiler/rustc_query_system/src/values.rs +++ b/compiler/rustc_query_system/src/values.rs @@ -1,12 +1,14 @@ +use rustc_span::ErrorGuaranteed; + use crate::dep_graph::{DepContext, DepKind}; use crate::query::QueryInfo; pub trait Value<Tcx: DepContext, D: DepKind>: Sized { - fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo<D>]) -> Self; + fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo<D>], guar: ErrorGuaranteed) -> Self; } impl<Tcx: DepContext, T, D: DepKind> Value<Tcx, D> for T { - default fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo<D>]) -> T { + default fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo<D>], _guar: ErrorGuaranteed) -> T { tcx.sess().abort_if_errors(); // Ideally we would use `bug!` here. But bug! is only defined in rustc_middle, and it's // non-trivial to define it earlier. diff --git a/compiler/rustc_resolve/src/build_reduced_graph.rs b/compiler/rustc_resolve/src/build_reduced_graph.rs index 476b5f733a6..30db450870b 100644 --- a/compiler/rustc_resolve/src/build_reduced_graph.rs +++ b/compiler/rustc_resolve/src/build_reduced_graph.rs @@ -247,8 +247,6 @@ impl<'a, 'b, 'tcx> BuildReducedGraphVisitor<'a, 'b, 'tcx> { }) } ast::VisibilityKind::Restricted { ref path, id, .. } => { - // Make `PRIVATE_IN_PUBLIC` lint a hard error. - self.r.has_pub_restricted = true; // For visibilities we are not ready to provide correct implementation of "uniform // paths" right now, so on 2018 edition we only allow module-relative paths for now. // On 2015 edition visibilities are resolved as crate-relative by default, diff --git a/compiler/rustc_resolve/src/effective_visibilities.rs b/compiler/rustc_resolve/src/effective_visibilities.rs index eb210532f51..46f5df5ca6f 100644 --- a/compiler/rustc_resolve/src/effective_visibilities.rs +++ b/compiler/rustc_resolve/src/effective_visibilities.rs @@ -128,11 +128,14 @@ impl<'r, 'a, 'tcx> EffectiveVisibilitiesVisitor<'r, 'a, 'tcx> { // If the binding is ambiguous, put the root ambiguity binding and all reexports // leading to it into the table. They are used by the `ambiguous_glob_reexports` // lint. For all bindings added to the table this way `is_ambiguity` returns true. + let is_ambiguity = + |binding: NameBinding<'a>, warn: bool| binding.ambiguity.is_some() && !warn; let mut parent_id = ParentId::Def(module_id); + let mut warn_ambiguity = binding.warn_ambiguity; while let NameBindingKind::Import { binding: nested_binding, .. } = binding.kind { self.update_import(binding, parent_id); - if binding.ambiguity.is_some() { + if is_ambiguity(binding, warn_ambiguity) { // Stop at the root ambiguity, further bindings in the chain should not // be reexported because the root ambiguity blocks any access to them. // (Those further bindings are most likely not ambiguities themselves.) @@ -141,9 +144,9 @@ impl<'r, 'a, 'tcx> EffectiveVisibilitiesVisitor<'r, 'a, 'tcx> { parent_id = ParentId::Import(binding); binding = nested_binding; + warn_ambiguity |= nested_binding.warn_ambiguity; } - - if binding.ambiguity.is_none() + if !is_ambiguity(binding, warn_ambiguity) && let Some(def_id) = binding.res().opt_def_id().and_then(|id| id.as_local()) { self.update_def(def_id, binding.vis.expect_local(), parent_id); } diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs index 22d084c8e0b..486d60eab21 100644 --- a/compiler/rustc_resolve/src/late.rs +++ b/compiler/rustc_resolve/src/late.rs @@ -313,7 +313,7 @@ enum LifetimeRibKind { /// Resolves elided lifetimes to `'static`, but gives a warning that this behavior /// is a bug and will be reverted soon. - AnonymousWarnToStatic(NodeId), + AnonymousWarn(NodeId), /// Signal we cannot find which should be the anonymous lifetime. ElisionFailure, @@ -772,9 +772,10 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast, self.r.record_partial_res(ty.id, PartialRes::new(res)); visit::walk_ty(self, ty) } - TyKind::ImplTrait(..) => { + TyKind::ImplTrait(node_id, _) => { let candidates = self.lifetime_elision_candidates.take(); visit::walk_ty(self, ty); + self.record_lifetime_params_for_impl_trait(*node_id); self.lifetime_elision_candidates = candidates; } TyKind::TraitObject(bounds, ..) => { @@ -909,8 +910,8 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast, &sig.decl.output, ); - if let Some((async_node_id, span)) = sig.header.asyncness.opt_return_id() { - this.record_lifetime_params_for_impl_trait(async_node_id, span); + if let Some((async_node_id, _)) = sig.header.asyncness.opt_return_id() { + this.record_lifetime_params_for_impl_trait(async_node_id); } }, ); @@ -951,8 +952,8 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast, &declaration.output, ); - if let Some((async_node_id, span)) = async_node_id { - this.record_lifetime_params_for_impl_trait(async_node_id, span); + if let Some((async_node_id, _)) = async_node_id { + this.record_lifetime_params_for_impl_trait(async_node_id); } }, ); @@ -1108,6 +1109,7 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast, } }, AssocConstraintKind::Bound { ref bounds } => { + self.record_lifetime_params_for_impl_trait(constraint.id); walk_list!(self, visit_param_bound, bounds, BoundKind::Bound); } } @@ -1152,7 +1154,7 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast, } LifetimeRibKind::AnonymousCreateParameter { .. } | LifetimeRibKind::AnonymousReportError - | LifetimeRibKind::AnonymousWarnToStatic(_) + | LifetimeRibKind::AnonymousWarn(_) | LifetimeRibKind::Elided(_) | LifetimeRibKind::ElisionFailure | LifetimeRibKind::ConcreteAnonConst(_) @@ -1520,7 +1522,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> { // lifetime would be illegal. LifetimeRibKind::Item | LifetimeRibKind::AnonymousReportError - | LifetimeRibKind::AnonymousWarnToStatic(_) + | LifetimeRibKind::AnonymousWarn(_) | LifetimeRibKind::ElisionFailure => Some(LifetimeUseSet::Many), // An anonymous lifetime is legal here, and bound to the right // place, go ahead. @@ -1583,7 +1585,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> { | LifetimeRibKind::Generics { .. } | LifetimeRibKind::ElisionFailure | LifetimeRibKind::AnonymousReportError - | LifetimeRibKind::AnonymousWarnToStatic(_) => {} + | LifetimeRibKind::AnonymousWarn(_) => {} } } @@ -1623,8 +1625,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> { self.record_lifetime_res(lifetime.id, res, elision_candidate); return; } - LifetimeRibKind::AnonymousWarnToStatic(node_id) => { - self.record_lifetime_res(lifetime.id, LifetimeRes::Static, elision_candidate); + LifetimeRibKind::AnonymousWarn(node_id) => { let msg = if elided { "`&` without an explicit lifetime name cannot be used here" } else { @@ -1640,7 +1641,6 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> { span: lifetime.ident.span, }, ); - return; } LifetimeRibKind::AnonymousReportError => { let (msg, note) = if elided { @@ -1838,7 +1838,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> { // impl Foo for std::cell::Ref<u32> // note lack of '_ // async fn foo(_: std::cell::Ref<u32>) { ... } LifetimeRibKind::AnonymousCreateParameter { report_in_path: true, .. } - | LifetimeRibKind::AnonymousWarnToStatic(_) => { + | LifetimeRibKind::AnonymousWarn(_) => { let sess = self.r.tcx.sess; let mut err = rustc_errors::struct_span_err!( sess, @@ -2934,33 +2934,30 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> { kind: LifetimeBinderKind::ConstItem, }, |this| { - this.with_lifetime_rib( - LifetimeRibKind::AnonymousWarnToStatic(item.id), - |this| { - // If this is a trait impl, ensure the const - // exists in trait - this.check_trait_item( - item.id, - item.ident, - &item.kind, - ValueNS, - item.span, - seen_trait_items, - |i, s, c| ConstNotMemberOfTrait(i, s, c), - ); + this.with_lifetime_rib(LifetimeRibKind::AnonymousWarn(item.id), |this| { + // If this is a trait impl, ensure the const + // exists in trait + this.check_trait_item( + item.id, + item.ident, + &item.kind, + ValueNS, + item.span, + seen_trait_items, + |i, s, c| ConstNotMemberOfTrait(i, s, c), + ); - this.visit_generics(generics); - this.visit_ty(ty); - if let Some(expr) = expr { - // We allow arbitrary const expressions inside of associated consts, - // even if they are potentially not const evaluatable. - // - // Type parameters can already be used and as associated consts are - // not used as part of the type system, this is far less surprising. - this.resolve_const_body(expr, None); - } - }, - ); + this.visit_generics(generics); + this.visit_ty(ty); + if let Some(expr) = expr { + // We allow arbitrary const expressions inside of associated consts, + // even if they are potentially not const evaluatable. + // + // Type parameters can already be used and as associated consts are + // not used as part of the type system, this is far less surprising. + this.resolve_const_body(expr, None); + } + }); }, ); } @@ -4367,7 +4364,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> { /// We include all lifetime parameters, either named or "Fresh". /// The order of those parameters does not matter, as long as it is /// deterministic. - fn record_lifetime_params_for_impl_trait(&mut self, impl_trait_node_id: NodeId, span: Span) { + fn record_lifetime_params_for_impl_trait(&mut self, impl_trait_node_id: NodeId) { let mut extra_lifetime_params = vec![]; for rib in self.lifetime_ribs.iter().rev() { @@ -4380,14 +4377,10 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> { extra_lifetime_params.extend(earlier_fresh); } } - LifetimeRibKind::Generics { .. } => {} - _ => { - // We are in a function definition. We should only find `Generics` - // and `AnonymousCreateParameter` inside the innermost `Item`. - span_bug!(span, "unexpected rib kind: {:?}", rib.kind) - } + _ => {} } } + self.r.extra_lifetime_params_map.insert(impl_trait_node_id, extra_lifetime_params); } diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs index 6d3a1d69ef0..b757f42eaa0 100644 --- a/compiler/rustc_resolve/src/lib.rs +++ b/compiler/rustc_resolve/src/lib.rs @@ -989,7 +989,6 @@ pub struct Resolver<'a, 'tcx> { glob_map: FxHashMap<LocalDefId, FxHashSet<Symbol>>, /// Visibilities in "lowered" form, for all entities that have them. visibilities: FxHashMap<LocalDefId, ty::Visibility>, - has_pub_restricted: bool, used_imports: FxHashSet<NodeId>, maybe_unused_trait_imports: FxIndexSet<LocalDefId>, @@ -1342,7 +1341,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { glob_map: Default::default(), visibilities, - has_pub_restricted: false, used_imports: FxHashSet::default(), maybe_unused_trait_imports: Default::default(), @@ -1486,7 +1484,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { let proc_macros = self.proc_macros.iter().map(|id| self.local_def_id(*id)).collect(); let expn_that_defined = self.expn_that_defined; let visibilities = self.visibilities; - let has_pub_restricted = self.has_pub_restricted; let extern_crate_map = self.extern_crate_map; let maybe_unused_trait_imports = self.maybe_unused_trait_imports; let glob_map = self.glob_map; @@ -1504,7 +1501,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { let global_ctxt = ResolverGlobalCtxt { expn_that_defined, visibilities, - has_pub_restricted, effective_visibilities, extern_crate_map, module_children: self.module_children, diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs index 086ce4e6964..86db2edab7b 100644 --- a/compiler/rustc_session/src/session.rs +++ b/compiler/rustc_session/src/session.rs @@ -17,10 +17,10 @@ use rustc_data_structures::fx::{FxHashMap, FxIndexSet}; use rustc_data_structures::jobserver::{self, Client}; use rustc_data_structures::profiling::{duration_to_secs_str, SelfProfiler, SelfProfilerRef}; use rustc_data_structures::sync::{ - self, AtomicU64, AtomicUsize, Lock, Lrc, OneThread, Ordering, Ordering::SeqCst, + AtomicU64, AtomicUsize, Lock, Lrc, OneThread, Ordering, Ordering::SeqCst, }; use rustc_errors::annotate_snippet_emitter_writer::AnnotateSnippetEmitterWriter; -use rustc_errors::emitter::{Emitter, EmitterWriter, HumanReadableErrorType}; +use rustc_errors::emitter::{DynEmitter, EmitterWriter, HumanReadableErrorType}; use rustc_errors::json::JsonEmitter; use rustc_errors::registry::Registry; use rustc_errors::{ @@ -1251,7 +1251,7 @@ fn default_emitter( source_map: Lrc<SourceMap>, bundle: Option<Lrc<FluentBundle>>, fallback_bundle: LazyFallbackBundle, -) -> Box<dyn Emitter + sync::Send> { +) -> Box<DynEmitter> { let macro_backtrace = sopts.unstable_opts.macro_backtrace; let track_diagnostics = sopts.unstable_opts.track_diagnostics; let terminal_url = match sopts.unstable_opts.terminal_urls { @@ -1717,12 +1717,12 @@ impl EarlyErrorHandler { } } -fn mk_emitter(output: ErrorOutputType) -> Box<dyn Emitter + sync::Send + 'static> { +fn mk_emitter(output: ErrorOutputType) -> Box<DynEmitter> { // FIXME(#100717): early errors aren't translated at the moment, so this is fine, but it will // need to reference every crate that might emit an early error for translation to work. let fallback_bundle = fallback_fluent_bundle(vec![rustc_errors::DEFAULT_LOCALE_RESOURCE], false); - let emitter: Box<dyn Emitter + sync::Send> = match output { + let emitter: Box<DynEmitter> = match output { config::ErrorOutputType::HumanReadable(kind) => { let (short, color_config) = kind.unzip(); Box::new(EmitterWriter::stderr(color_config, fallback_bundle).short_message(short)) diff --git a/compiler/rustc_smir/Cargo.toml b/compiler/rustc_smir/Cargo.toml index 80d4e7ed02f..21ec904e43c 100644 --- a/compiler/rustc_smir/Cargo.toml +++ b/compiler/rustc_smir/Cargo.toml @@ -9,6 +9,9 @@ rustc_hir = { path = "../rustc_hir", optional = true } rustc_middle = { path = "../rustc_middle", optional = true } rustc_span = { path = "../rustc_span", optional = true } rustc_target = { path = "../rustc_target", optional = true } +rustc_driver = { path = "../rustc_driver", optional = true } +rustc_interface = { path = "../rustc_interface", optional = true} +rustc_session = {path = "../rustc_session", optional = true} tracing = "0.1" scoped-tls = "1.0" @@ -18,4 +21,7 @@ default = [ "rustc_middle", "rustc_span", "rustc_target", + "rustc_driver", + "rustc_interface", + "rustc_session", ] diff --git a/compiler/rustc_smir/src/rustc_internal/mod.rs b/compiler/rustc_smir/src/rustc_internal/mod.rs index ebacb7cce83..2e219e17a5f 100644 --- a/compiler/rustc_smir/src/rustc_internal/mod.rs +++ b/compiler/rustc_smir/src/rustc_internal/mod.rs @@ -4,13 +4,18 @@ //! until stable MIR is complete. use std::fmt::Debug; -use std::string::ToString; +use std::ops::Index; +use crate::rustc_internal; use crate::{ rustc_smir::Tables, stable_mir::{self, with}, }; +use rustc_driver::{Callbacks, Compilation, RunCompiler}; +use rustc_interface::{interface, Queries}; +use rustc_middle::mir::interpret::AllocId; use rustc_middle::ty::TyCtxt; +use rustc_session::EarlyErrorHandler; pub use rustc_span::def_id::{CrateNum, DefId}; fn with_tables<R>(mut f: impl FnMut(&mut Tables<'_>) -> R) -> R { @@ -20,7 +25,7 @@ fn with_tables<R>(mut f: impl FnMut(&mut Tables<'_>) -> R) -> R { } pub fn item_def_id(item: &stable_mir::CrateItem) -> DefId { - with_tables(|t| t.item_def_id(item)) + with_tables(|t| t[item.0]) } pub fn crate_item(did: DefId) -> stable_mir::CrateItem { @@ -67,23 +72,16 @@ pub fn impl_def(did: DefId) -> stable_mir::ty::ImplDef { with_tables(|t| t.impl_def(did)) } -impl<'tcx> Tables<'tcx> { - pub fn item_def_id(&self, item: &stable_mir::CrateItem) -> DefId { - self.def_ids[item.0] - } - - pub fn trait_def_id(&self, trait_def: &stable_mir::ty::TraitDef) -> DefId { - self.def_ids[trait_def.0] - } +impl<'tcx> Index<stable_mir::DefId> for Tables<'tcx> { + type Output = DefId; - pub fn impl_trait_def_id(&self, impl_def: &stable_mir::ty::ImplDef) -> DefId { - self.def_ids[impl_def.0] - } - - pub fn generic_def_id(&self, generic_def: &stable_mir::ty::GenericDef) -> DefId { - self.def_ids[generic_def.0] + #[inline(always)] + fn index(&self, index: stable_mir::DefId) -> &Self::Output { + &self.def_ids[index.0] } +} +impl<'tcx> Tables<'tcx> { pub fn crate_item(&mut self, did: DefId) -> stable_mir::CrateItem { stable_mir::CrateItem(self.create_def_id(did)) } @@ -136,16 +134,30 @@ impl<'tcx> Tables<'tcx> { stable_mir::ty::ImplDef(self.create_def_id(did)) } + pub fn prov(&mut self, aid: AllocId) -> stable_mir::ty::Prov { + stable_mir::ty::Prov(self.create_alloc_id(aid)) + } + fn create_def_id(&mut self, did: DefId) -> stable_mir::DefId { // FIXME: this becomes inefficient when we have too many ids for (i, &d) in self.def_ids.iter().enumerate() { if d == did { - return i; + return stable_mir::DefId(i); } } let id = self.def_ids.len(); self.def_ids.push(did); - id + stable_mir::DefId(id) + } + + fn create_alloc_id(&mut self, aid: AllocId) -> stable_mir::AllocId { + // FIXME: this becomes inefficient when we have too many ids + if let Some(i) = self.alloc_ids.iter().position(|a| *a == aid) { + return stable_mir::AllocId(i); + }; + let id = self.def_ids.len(); + self.alloc_ids.push(aid); + stable_mir::AllocId(id) } } @@ -154,12 +166,62 @@ pub fn crate_num(item: &stable_mir::Crate) -> CrateNum { } pub fn run(tcx: TyCtxt<'_>, f: impl FnOnce()) { - crate::stable_mir::run(Tables { tcx, def_ids: vec![], types: vec![] }, f); + crate::stable_mir::run(Tables { tcx, def_ids: vec![], alloc_ids: vec![], types: vec![] }, f); } /// A type that provides internal information but that can still be used for debug purpose. -pub type Opaque = impl Debug + ToString + Clone; +#[derive(Clone)] +pub struct Opaque(String); + +impl std::fmt::Display for Opaque { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl std::fmt::Debug for Opaque { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } +} pub(crate) fn opaque<T: Debug>(value: &T) -> Opaque { - format!("{value:?}") + Opaque(format!("{value:?}")) +} + +pub struct StableMir { + args: Vec<String>, + callback: fn(TyCtxt<'_>), +} + +impl StableMir { + /// Creates a new `StableMir` instance, with given test_function and arguments. + pub fn new(args: Vec<String>, callback: fn(TyCtxt<'_>)) -> Self { + StableMir { args, callback } + } + + /// Runs the compiler against given target and tests it with `test_function` + pub fn run(&mut self) { + rustc_driver::catch_fatal_errors(|| { + RunCompiler::new(&self.args.clone(), self).run().unwrap(); + }) + .unwrap(); + } +} + +impl Callbacks for StableMir { + /// Called after analysis. Return value instructs the compiler whether to + /// continue the compilation afterwards (defaults to `Compilation::Continue`) + fn after_analysis<'tcx>( + &mut self, + _handler: &EarlyErrorHandler, + _compiler: &interface::Compiler, + queries: &'tcx Queries<'tcx>, + ) -> Compilation { + queries.global_ctxt().unwrap().enter(|tcx| { + rustc_internal::run(tcx, || (self.callback)(tcx)); + }); + // No need to keep going. + Compilation::Stop + } } diff --git a/compiler/rustc_smir/src/rustc_smir/alloc.rs b/compiler/rustc_smir/src/rustc_smir/alloc.rs new file mode 100644 index 00000000000..166c8bda9e1 --- /dev/null +++ b/compiler/rustc_smir/src/rustc_smir/alloc.rs @@ -0,0 +1,123 @@ +use rustc_middle::mir::interpret::{alloc_range, AllocRange, ConstValue, Pointer}; + +use crate::{ + rustc_smir::{Stable, Tables}, + stable_mir::mir::Mutability, + stable_mir::ty::{Allocation, ProvenanceMap}, +}; + +/// Creates new empty `Allocation` from given `Align`. +fn new_empty_allocation(align: rustc_target::abi::Align) -> Allocation { + Allocation { + bytes: Vec::new(), + provenance: ProvenanceMap { ptrs: Vec::new() }, + align: align.bytes(), + mutability: Mutability::Not, + } +} + +// We need this method instead of a Stable implementation +// because we need to get `Ty` of the const we are trying to create, to do that +// we need to have access to `ConstantKind` but we can't access that inside Stable impl. +#[allow(rustc::usage_of_qualified_ty)] +pub fn new_allocation<'tcx>( + ty: rustc_middle::ty::Ty<'tcx>, + const_value: ConstValue<'tcx>, + tables: &mut Tables<'tcx>, +) -> Allocation { + match const_value { + ConstValue::Scalar(scalar) => { + let size = scalar.size(); + let align = tables + .tcx + .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(ty)) + .unwrap() + .align; + let mut allocation = rustc_middle::mir::interpret::Allocation::uninit(size, align.abi); + allocation + .write_scalar(&tables.tcx, alloc_range(rustc_target::abi::Size::ZERO, size), scalar) + .unwrap(); + allocation.stable(tables) + } + ConstValue::ZeroSized => { + let align = + tables.tcx.layout_of(rustc_middle::ty::ParamEnv::empty().and(ty)).unwrap().align; + new_empty_allocation(align.abi) + } + ConstValue::Slice { data, start, end } => { + let alloc_id = tables.tcx.create_memory_alloc(data); + let ptr = Pointer::new(alloc_id, rustc_target::abi::Size::from_bytes(start)); + let scalar_ptr = rustc_middle::mir::interpret::Scalar::from_pointer(ptr, &tables.tcx); + let scalar_len = rustc_middle::mir::interpret::Scalar::from_target_usize( + (end - start) as u64, + &tables.tcx, + ); + let layout = + tables.tcx.layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(ty)).unwrap(); + let mut allocation = + rustc_middle::mir::interpret::Allocation::uninit(layout.size, layout.align.abi); + allocation + .write_scalar( + &tables.tcx, + alloc_range(rustc_target::abi::Size::ZERO, tables.tcx.data_layout.pointer_size), + scalar_ptr, + ) + .unwrap(); + allocation + .write_scalar( + &tables.tcx, + alloc_range(tables.tcx.data_layout.pointer_size, scalar_len.size()), + scalar_len, + ) + .unwrap(); + allocation.stable(tables) + } + ConstValue::ByRef { alloc, offset } => { + let ty_size = tables + .tcx + .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(ty)) + .unwrap() + .size; + allocation_filter(&alloc.0, alloc_range(offset, ty_size), tables) + } + } +} + +/// Creates an `Allocation` only from information within the `AllocRange`. +pub(super) fn allocation_filter<'tcx>( + alloc: &rustc_middle::mir::interpret::Allocation, + alloc_range: AllocRange, + tables: &mut Tables<'tcx>, +) -> Allocation { + let mut bytes: Vec<Option<u8>> = alloc + .inspect_with_uninit_and_ptr_outside_interpreter( + alloc_range.start.bytes_usize()..alloc_range.end().bytes_usize(), + ) + .iter() + .copied() + .map(Some) + .collect(); + for (i, b) in bytes.iter_mut().enumerate() { + if !alloc + .init_mask() + .get(rustc_target::abi::Size::from_bytes(i + alloc_range.start.bytes_usize())) + { + *b = None; + } + } + let mut ptrs = Vec::new(); + for (offset, prov) in alloc + .provenance() + .ptrs() + .iter() + .filter(|a| a.0 >= alloc_range.start && a.0 <= alloc_range.end()) + { + ptrs.push((offset.bytes_usize() - alloc_range.start.bytes_usize(), tables.prov(*prov))); + } + Allocation { + bytes: bytes, + provenance: ProvenanceMap { ptrs }, + align: alloc.align.bytes(), + mutability: alloc.mutability.stable(tables), + } +} diff --git a/compiler/rustc_smir/src/rustc_smir/mod.rs b/compiler/rustc_smir/src/rustc_smir/mod.rs index 925d4a3bdd8..822a6e48658 100644 --- a/compiler/rustc_smir/src/rustc_smir/mod.rs +++ b/compiler/rustc_smir/src/rustc_smir/mod.rs @@ -1,5 +1,5 @@ //! Module that implements what will become the rustc side of Stable MIR. -//! + //! This module is responsible for building Stable MIR components from internal components. //! //! This module is not intended to be invoked directly by users. It will eventually @@ -9,19 +9,18 @@ use crate::rustc_internal::{self, opaque}; use crate::stable_mir::mir::{CopyNonOverlapping, UserTypeProjection, VariantIdx}; -use crate::stable_mir::ty::{ - allocation_filter, new_allocation, Const, FloatTy, GenericDef, GenericParamDef, IntTy, - Movability, RigidTy, TyKind, UintTy, -}; +use crate::stable_mir::ty::{FloatTy, GenericParamDef, IntTy, Movability, RigidTy, TyKind, UintTy}; use crate::stable_mir::{self, Context}; use rustc_hir as hir; -use rustc_middle::mir::interpret::alloc_range; +use rustc_middle::mir::interpret::{alloc_range, AllocId}; use rustc_middle::mir::{self, ConstantKind}; use rustc_middle::ty::{self, Ty, TyCtxt, Variance}; use rustc_span::def_id::{CrateNum, DefId, LOCAL_CRATE}; use rustc_target::abi::FieldIdx; use tracing::debug; +mod alloc; + impl<'tcx> Context for Tables<'tcx> { fn local_crate(&self) -> stable_mir::Crate { smir_crate(self.tcx, LOCAL_CRATE) @@ -54,7 +53,7 @@ impl<'tcx> Context for Tables<'tcx> { } fn trait_decl(&mut self, trait_def: &stable_mir::ty::TraitDef) -> stable_mir::ty::TraitDecl { - let def_id = self.trait_def_id(trait_def); + let def_id = self[trait_def.0]; let trait_def = self.tcx.trait_def(def_id); trait_def.stable(self) } @@ -68,13 +67,13 @@ impl<'tcx> Context for Tables<'tcx> { } fn trait_impl(&mut self, impl_def: &stable_mir::ty::ImplDef) -> stable_mir::ty::ImplTrait { - let def_id = self.impl_trait_def_id(impl_def); + let def_id = self[impl_def.0]; let impl_trait = self.tcx.impl_trait_ref(def_id).unwrap(); impl_trait.stable(self) } fn mir_body(&mut self, item: &stable_mir::CrateItem) -> stable_mir::mir::Body { - let def_id = self.item_def_id(item); + let def_id = self[item.0]; let mir = self.tcx.optimized_mir(def_id); stable_mir::mir::Body { blocks: mir @@ -102,19 +101,16 @@ impl<'tcx> Context for Tables<'tcx> { ty.stable(self) } - fn generics_of(&mut self, generic_def: &GenericDef) -> stable_mir::ty::Generics { - let def_id = self.generic_def_id(generic_def); - let generic_def = self.tcx.generics_of(def_id); - generic_def.stable(self) + fn generics_of(&mut self, def_id: stable_mir::DefId) -> stable_mir::ty::Generics { + let def_id = self[def_id]; + let generics = self.tcx.generics_of(def_id); + generics.stable(self) } - fn predicates_of( - &mut self, - trait_def: &stable_mir::ty::TraitDef, - ) -> stable_mir::GenericPredicates { - let trait_def_id = self.trait_def_id(trait_def); - let ty::GenericPredicates { parent, predicates } = self.tcx.predicates_of(trait_def_id); - stable_mir::GenericPredicates { + fn predicates_of(&mut self, def_id: stable_mir::DefId) -> stable_mir::ty::GenericPredicates { + let def_id = self[def_id]; + let ty::GenericPredicates { parent, predicates } = self.tcx.predicates_of(def_id); + stable_mir::ty::GenericPredicates { parent: parent.map(|did| self.trait_def(did)), predicates: predicates .iter() @@ -129,6 +125,7 @@ impl<'tcx> Context for Tables<'tcx> { pub struct Tables<'tcx> { pub tcx: TyCtxt<'tcx>, pub def_ids: Vec<DefId>, + pub alloc_ids: Vec<AllocId>, pub types: Vec<Ty<'tcx>>, } @@ -208,8 +205,7 @@ impl<'tcx> Stable<'tcx> for mir::Rvalue<'tcx> { match self { Use(op) => stable_mir::mir::Rvalue::Use(op.stable(tables)), Repeat(op, len) => { - let cnst = ConstantKind::from_const(*len, tables.tcx); - let len = Const { literal: cnst.stable(tables) }; + let len = len.stable(tables); stable_mir::mir::Rvalue::Repeat(op.stable(tables), len) } Ref(region, kind, place) => stable_mir::mir::Rvalue::Ref( @@ -397,8 +393,7 @@ impl<'tcx> Stable<'tcx> for ty::TermKind<'tcx> { match self { ty::TermKind::Ty(ty) => TermKind::Type(tables.intern_ty(*ty)), ty::TermKind::Const(cnst) => { - let cnst = ConstantKind::from_const(*cnst, tables.tcx); - let cnst = Const { literal: cnst.stable(tables) }; + let cnst = cnst.stable(tables); TermKind::Const(cnst) } } @@ -1086,8 +1081,32 @@ impl<'tcx> Stable<'tcx> for ty::Const<'tcx> { type T = stable_mir::ty::Const; fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T { - let cnst = ConstantKind::from_const(*self, tables.tcx); - stable_mir::ty::Const { literal: cnst.stable(tables) } + stable_mir::ty::Const { + literal: match self.kind() { + ty::Value(val) => { + let const_val = tables.tcx.valtree_to_const_val((self.ty(), val)); + stable_mir::ty::ConstantKind::Allocated(alloc::new_allocation( + self.ty(), + const_val, + tables, + )) + } + ty::ParamCt(param) => stable_mir::ty::ConstantKind::ParamCt(opaque(¶m)), + ty::ErrorCt(_) => unreachable!(), + ty::InferCt(_) => unreachable!(), + ty::BoundCt(_, _) => unimplemented!(), + ty::PlaceholderCt(_) => unimplemented!(), + ty::Unevaluated(uv) => { + stable_mir::ty::ConstantKind::Unevaluated(stable_mir::ty::UnevaluatedConst { + ty: tables.intern_ty(self.ty()), + def: tables.const_def(uv.def), + args: uv.args.stable(tables), + promoted: None, + }) + } + ty::ExprCt(_) => unimplemented!(), + }, + } } } @@ -1111,7 +1130,11 @@ impl<'tcx> Stable<'tcx> for mir::interpret::Allocation { type T = stable_mir::ty::Allocation; fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T { - allocation_filter(self, alloc_range(rustc_target::abi::Size::ZERO, self.size()), tables) + alloc::allocation_filter( + self, + alloc_range(rustc_target::abi::Size::ZERO, self.size()), + tables, + ) } } @@ -1158,26 +1181,18 @@ impl<'tcx> Stable<'tcx> for rustc_middle::mir::ConstantKind<'tcx> { type T = stable_mir::ty::ConstantKind; fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T { - match self { - ConstantKind::Ty(c) => match c.kind() { - ty::Value(val) => { - let const_val = tables.tcx.valtree_to_const_val((c.ty(), val)); - stable_mir::ty::ConstantKind::Allocated(new_allocation(self, const_val, tables)) - } - ty::ParamCt(param) => stable_mir::ty::ConstantKind::ParamCt(opaque(¶m)), - ty::ErrorCt(_) => unreachable!(), - _ => unimplemented!(), - }, + match *self { + ConstantKind::Ty(c) => c.stable(tables).literal, ConstantKind::Unevaluated(unev_const, ty) => { stable_mir::ty::ConstantKind::Unevaluated(stable_mir::ty::UnevaluatedConst { - ty: tables.intern_ty(*ty), + ty: tables.intern_ty(ty), def: tables.const_def(unev_const.def), args: unev_const.args.stable(tables), promoted: unev_const.promoted.map(|u| u.as_u32()), }) } - ConstantKind::Val(val, _) => { - stable_mir::ty::ConstantKind::Allocated(new_allocation(self, *val, tables)) + ConstantKind::Val(val, ty) => { + stable_mir::ty::ConstantKind::Allocated(alloc::new_allocation(ty, val, tables)) } } } diff --git a/compiler/rustc_smir/src/stable_mir/mod.rs b/compiler/rustc_smir/src/stable_mir/mod.rs index 44938eaa035..dad7c75c340 100644 --- a/compiler/rustc_smir/src/stable_mir/mod.rs +++ b/compiler/rustc_smir/src/stable_mir/mod.rs @@ -13,14 +13,14 @@ use std::cell::Cell; -use crate::rustc_smir::Tables; - use self::ty::{ - GenericDef, Generics, ImplDef, ImplTrait, PredicateKind, Span, TraitDecl, TraitDef, Ty, TyKind, + GenericPredicates, Generics, ImplDef, ImplTrait, Span, TraitDecl, TraitDef, Ty, TyKind, }; +use crate::rustc_smir::Tables; pub mod mir; pub mod ty; +pub mod visitor; /// Use String for now but we should replace it. pub type Symbol = String; @@ -29,7 +29,12 @@ pub type Symbol = String; pub type CrateNum = usize; /// A unique identification number for each item accessible for the current compilation unit. -pub type DefId = usize; +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct DefId(pub(crate) usize); + +/// A unique identification number for each provenance +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct AllocId(pub(crate) usize); /// A list of crate items. pub type CrateItems = Vec<CrateItem>; @@ -40,12 +45,6 @@ pub type TraitDecls = Vec<TraitDef>; /// A list of impl trait decls. pub type ImplTraitDecls = Vec<ImplDef>; -/// A list of predicates. -pub struct GenericPredicates { - pub parent: Option<TraitDef>, - pub predicates: Vec<(PredicateKind, Span)>, -} - /// Holds information about a crate. #[derive(Clone, PartialEq, Eq, Debug)] pub struct Crate { @@ -109,14 +108,6 @@ pub fn trait_impl(trait_impl: &ImplDef) -> ImplTrait { with(|cx| cx.trait_impl(trait_impl)) } -pub fn generics_of(generic_def: &GenericDef) -> Generics { - with(|cx| cx.generics_of(generic_def)) -} - -pub fn predicates_of(trait_def: &TraitDef) -> GenericPredicates { - with(|cx| cx.predicates_of(trait_def)) -} - pub trait Context { fn entry_fn(&mut self) -> Option<CrateItem>; /// Retrieve all items of the local crate that have a MIR associated with them. @@ -126,8 +117,8 @@ pub trait Context { fn trait_decl(&mut self, trait_def: &TraitDef) -> TraitDecl; fn all_trait_impls(&mut self) -> ImplTraitDecls; fn trait_impl(&mut self, trait_impl: &ImplDef) -> ImplTrait; - fn generics_of(&mut self, generic_def: &GenericDef) -> Generics; - fn predicates_of(&mut self, trait_def: &TraitDef) -> GenericPredicates; + fn generics_of(&mut self, def_id: DefId) -> Generics; + fn predicates_of(&mut self, def_id: DefId) -> GenericPredicates; /// Get information about the local crate. fn local_crate(&self) -> Crate; /// Retrieve a list of all external crates. diff --git a/compiler/rustc_smir/src/stable_mir/ty.rs b/compiler/rustc_smir/src/stable_mir/ty.rs index 5929823b1bb..1db6b1e3d28 100644 --- a/compiler/rustc_smir/src/stable_mir/ty.rs +++ b/compiler/rustc_smir/src/stable_mir/ty.rs @@ -1,10 +1,5 @@ -use rustc_middle::mir::interpret::{alloc_range, AllocRange, ConstValue, Pointer}; - -use super::{mir::Mutability, mir::Safety, with, DefId}; -use crate::{ - rustc_internal::{opaque, Opaque}, - rustc_smir::{Stable, Tables}, -}; +use super::{mir::Mutability, mir::Safety, with, AllocId, DefId}; +use crate::rustc_internal::Opaque; #[derive(Copy, Clone, Debug)] pub struct Ty(pub usize); @@ -120,27 +115,9 @@ pub struct GenericDef(pub(crate) DefId); #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct ConstDef(pub(crate) DefId); -impl TraitDef { - pub fn trait_decl(&self) -> TraitDecl { - with(|cx| cx.trait_decl(self)) - } -} - #[derive(Clone, PartialEq, Eq, Debug)] pub struct ImplDef(pub(crate) DefId); -impl ImplDef { - pub fn trait_impl(&self) -> ImplTrait { - with(|cx| cx.trait_impl(self)) - } -} - -impl GenericDef { - pub fn generics_of(&self) -> Generics { - with(|tcx| tcx.generics_of(self)) - } -} - #[derive(Clone, Debug)] pub struct GenericArgs(pub Vec<GenericArgKind>); @@ -283,7 +260,9 @@ pub struct BoundTy { pub type Bytes = Vec<Option<u8>>; pub type Size = usize; -pub type Prov = Opaque; + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Prov(pub(crate) AllocId); pub type Align = u64; pub type Promoted = u32; pub type InitMaskMaterialized = Vec<u64>; @@ -304,128 +283,6 @@ pub struct Allocation { pub mutability: Mutability, } -impl Allocation { - /// Creates new empty `Allocation` from given `Align`. - fn new_empty_allocation(align: rustc_target::abi::Align) -> Allocation { - Allocation { - bytes: Vec::new(), - provenance: ProvenanceMap { ptrs: Vec::new() }, - align: align.bytes(), - mutability: Mutability::Not, - } - } -} - -// We need this method instead of a Stable implementation -// because we need to get `Ty` of the const we are trying to create, to do that -// we need to have access to `ConstantKind` but we can't access that inside Stable impl. -pub fn new_allocation<'tcx>( - const_kind: &rustc_middle::mir::ConstantKind<'tcx>, - const_value: ConstValue<'tcx>, - tables: &mut Tables<'tcx>, -) -> Allocation { - match const_value { - ConstValue::Scalar(scalar) => { - let size = scalar.size(); - let align = tables - .tcx - .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(const_kind.ty())) - .unwrap() - .align; - let mut allocation = rustc_middle::mir::interpret::Allocation::uninit(size, align.abi); - allocation - .write_scalar(&tables.tcx, alloc_range(rustc_target::abi::Size::ZERO, size), scalar) - .unwrap(); - allocation.stable(tables) - } - ConstValue::ZeroSized => { - let align = tables - .tcx - .layout_of(rustc_middle::ty::ParamEnv::empty().and(const_kind.ty())) - .unwrap() - .align; - Allocation::new_empty_allocation(align.abi) - } - ConstValue::Slice { data, start, end } => { - let alloc_id = tables.tcx.create_memory_alloc(data); - let ptr = Pointer::new(alloc_id, rustc_target::abi::Size::from_bytes(start)); - let scalar_ptr = rustc_middle::mir::interpret::Scalar::from_pointer(ptr, &tables.tcx); - let scalar_len = rustc_middle::mir::interpret::Scalar::from_target_usize( - (end - start) as u64, - &tables.tcx, - ); - let layout = tables - .tcx - .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(const_kind.ty())) - .unwrap(); - let mut allocation = - rustc_middle::mir::interpret::Allocation::uninit(layout.size, layout.align.abi); - allocation - .write_scalar( - &tables.tcx, - alloc_range(rustc_target::abi::Size::ZERO, tables.tcx.data_layout.pointer_size), - scalar_ptr, - ) - .unwrap(); - allocation - .write_scalar( - &tables.tcx, - alloc_range(tables.tcx.data_layout.pointer_size, scalar_len.size()), - scalar_len, - ) - .unwrap(); - allocation.stable(tables) - } - ConstValue::ByRef { alloc, offset } => { - let ty_size = tables - .tcx - .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(const_kind.ty())) - .unwrap() - .size; - allocation_filter(&alloc.0, alloc_range(offset, ty_size), tables) - } - } -} - -/// Creates an `Allocation` only from information within the `AllocRange`. -pub fn allocation_filter<'tcx>( - alloc: &rustc_middle::mir::interpret::Allocation, - alloc_range: AllocRange, - tables: &mut Tables<'tcx>, -) -> Allocation { - let mut bytes: Vec<Option<u8>> = alloc - .inspect_with_uninit_and_ptr_outside_interpreter( - alloc_range.start.bytes_usize()..alloc_range.end().bytes_usize(), - ) - .iter() - .copied() - .map(Some) - .collect(); - for (i, b) in bytes.iter_mut().enumerate() { - if !alloc - .init_mask() - .get(rustc_target::abi::Size::from_bytes(i + alloc_range.start.bytes_usize())) - { - *b = None; - } - } - let mut ptrs = Vec::new(); - for (offset, prov) in alloc - .provenance() - .ptrs() - .iter() - .filter(|a| a.0 >= alloc_range.start && a.0 <= alloc_range.end()) - { - ptrs.push((offset.bytes_usize() - alloc_range.start.bytes_usize(), opaque(prov))); - } - Allocation { - bytes: bytes, - provenance: ProvenanceMap { ptrs }, - align: alloc.align.bytes(), - mutability: alloc.mutability.stable(tables), - } -} - #[derive(Clone, Debug)] pub enum ConstantKind { Allocated(Allocation), @@ -463,6 +320,16 @@ pub struct TraitDecl { pub deny_explicit_impl: bool, } +impl TraitDecl { + pub fn generics_of(&self) -> Generics { + with(|cx| cx.generics_of(self.def_id.0)) + } + + pub fn predicates_of(&self) -> GenericPredicates { + with(|cx| cx.predicates_of(self.def_id.0)) + } +} + pub type ImplTrait = EarlyBinder<TraitRef>; #[derive(Clone, Debug)] @@ -499,8 +366,8 @@ pub struct GenericParamDef { } pub struct GenericPredicates { - pub parent: Option<DefId>, - pub predicates: Vec<PredicateKind>, + pub parent: Option<TraitDef>, + pub predicates: Vec<(PredicateKind, Span)>, } #[derive(Clone, Debug)] diff --git a/compiler/rustc_smir/src/stable_mir/visitor.rs b/compiler/rustc_smir/src/stable_mir/visitor.rs new file mode 100644 index 00000000000..c928eb1381f --- /dev/null +++ b/compiler/rustc_smir/src/stable_mir/visitor.rs @@ -0,0 +1,186 @@ +use std::ops::ControlFlow; + +use crate::rustc_internal::Opaque; + +use super::ty::{ + Allocation, Binder, Const, ConstDef, ExistentialPredicate, FnSig, GenericArgKind, GenericArgs, + Promoted, RigidTy, TermKind, Ty, UnevaluatedConst, +}; + +pub trait Visitor: Sized { + type Break; + fn visit_ty(&mut self, ty: &Ty) -> ControlFlow<Self::Break> { + ty.super_visit(self) + } + fn visit_const(&mut self, c: &Const) -> ControlFlow<Self::Break> { + c.super_visit(self) + } +} + +pub trait Visitable { + fn visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + self.super_visit(visitor) + } + fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break>; +} + +impl Visitable for Ty { + fn visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + visitor.visit_ty(self) + } + fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + match self.kind() { + super::ty::TyKind::RigidTy(ty) => ty.visit(visitor), + super::ty::TyKind::Alias(_, alias) => alias.args.visit(visitor), + super::ty::TyKind::Param(_) => todo!(), + super::ty::TyKind::Bound(_, _) => todo!(), + } + } +} + +impl Visitable for Const { + fn visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + visitor.visit_const(self) + } + fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + match &self.literal { + super::ty::ConstantKind::Allocated(alloc) => alloc.visit(visitor), + super::ty::ConstantKind::Unevaluated(uv) => uv.visit(visitor), + super::ty::ConstantKind::ParamCt(param) => param.visit(visitor), + } + } +} + +impl Visitable for Opaque { + fn super_visit<V: Visitor>(&self, _visitor: &mut V) -> ControlFlow<V::Break> { + ControlFlow::Continue(()) + } +} + +impl Visitable for Allocation { + fn super_visit<V: Visitor>(&self, _visitor: &mut V) -> ControlFlow<V::Break> { + ControlFlow::Continue(()) + } +} + +impl Visitable for UnevaluatedConst { + fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + let UnevaluatedConst { ty, def, args, promoted } = self; + ty.visit(visitor)?; + def.visit(visitor)?; + args.visit(visitor)?; + promoted.visit(visitor) + } +} + +impl Visitable for ConstDef { + fn super_visit<V: Visitor>(&self, _visitor: &mut V) -> ControlFlow<V::Break> { + ControlFlow::Continue(()) + } +} + +impl<T: Visitable> Visitable for Option<T> { + fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + match self { + Some(val) => val.visit(visitor), + None => ControlFlow::Continue(()), + } + } +} + +impl Visitable for Promoted { + fn super_visit<V: Visitor>(&self, _visitor: &mut V) -> ControlFlow<V::Break> { + ControlFlow::Continue(()) + } +} + +impl Visitable for GenericArgs { + fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + self.0.visit(visitor) + } +} + +impl Visitable for GenericArgKind { + fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + match self { + GenericArgKind::Lifetime(lt) => lt.visit(visitor), + GenericArgKind::Type(t) => t.visit(visitor), + GenericArgKind::Const(c) => c.visit(visitor), + } + } +} + +impl Visitable for RigidTy { + fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + match self { + RigidTy::Bool + | RigidTy::Char + | RigidTy::Int(_) + | RigidTy::Uint(_) + | RigidTy::Float(_) + | RigidTy::Never + | RigidTy::Foreign(_) + | RigidTy::Str => ControlFlow::Continue(()), + RigidTy::Array(t, c) => { + t.visit(visitor)?; + c.visit(visitor) + } + RigidTy::Slice(inner) => inner.visit(visitor), + RigidTy::RawPtr(ty, _) => ty.visit(visitor), + RigidTy::Ref(_, ty, _) => ty.visit(visitor), + RigidTy::FnDef(_, args) => args.visit(visitor), + RigidTy::FnPtr(sig) => sig.visit(visitor), + RigidTy::Closure(_, args) => args.visit(visitor), + RigidTy::Generator(_, args, _) => args.visit(visitor), + RigidTy::Dynamic(pred, r, _) => { + pred.visit(visitor)?; + r.visit(visitor) + } + RigidTy::Tuple(fields) => fields.visit(visitor), + RigidTy::Adt(_, args) => args.visit(visitor), + } + } +} + +impl<T: Visitable> Visitable for Vec<T> { + fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + for arg in self { + arg.visit(visitor)?; + } + ControlFlow::Continue(()) + } +} + +impl<T: Visitable> Visitable for Binder<T> { + fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + self.value.visit(visitor) + } +} + +impl Visitable for ExistentialPredicate { + fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + match self { + ExistentialPredicate::Trait(tr) => tr.generic_args.visit(visitor), + ExistentialPredicate::Projection(p) => { + p.term.visit(visitor)?; + p.generic_args.visit(visitor) + } + ExistentialPredicate::AutoTrait(_) => ControlFlow::Continue(()), + } + } +} + +impl Visitable for TermKind { + fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + match self { + TermKind::Type(t) => t.visit(visitor), + TermKind::Const(c) => c.visit(visitor), + } + } +} + +impl Visitable for FnSig { + fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + self.inputs_and_output.visit(visitor) + } +} diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 9d4f49f9453..656deebb5d0 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -1366,6 +1366,7 @@ symbols! { rustc_trivial_field_reads, rustc_unsafe_specialization_marker, rustc_variance, + rustc_variance_of_opaques, rustdoc, rustdoc_internals, rustdoc_missing_doc_code_examples, diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs index 8fab13d5d5d..8d573def9bb 100644 --- a/compiler/rustc_target/src/abi/call/mod.rs +++ b/compiler/rustc_target/src/abi/call/mod.rs @@ -579,10 +579,9 @@ pub enum Conv { C, Rust, - /// For things unlikely to be called, where smaller caller codegen is - /// preferred over raw speed. - /// Stronger than just `#[cold]` because `fn` pointers might be incompatible. - RustCold, + Cold, + PreserveMost, + PreserveAll, // Target-specific calling conventions. ArmAapcs, @@ -605,9 +604,7 @@ pub enum Conv { AvrInterrupt, AvrNonBlockingInterrupt, - RiscvInterrupt { - kind: RiscvInterruptKind, - }, + RiscvInterrupt { kind: RiscvInterruptKind }, } #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)] diff --git a/compiler/rustc_target/src/json.rs b/compiler/rustc_target/src/json.rs index af455b6432f..c6135149081 100644 --- a/compiler/rustc_target/src/json.rs +++ b/compiler/rustc_target/src/json.rs @@ -96,7 +96,9 @@ impl ToJson for crate::abi::call::Conv { let s = match self { Self::C => "C", Self::Rust => "Rust", - Self::RustCold => "RustCold", + Self::Cold => "Cold", + Self::PreserveMost => "PreserveMost", + Self::PreserveAll => "PreserveAll", Self::ArmAapcs => "ArmAapcs", Self::CCmseNonSecureCall => "CCmseNonSecureCall", Self::Msp430Intr => "Msp430Intr", diff --git a/compiler/rustc_target/src/spec/abi.rs b/compiler/rustc_target/src/spec/abi.rs index 550cdf6bda6..956a5cb5c2f 100644 --- a/compiler/rustc_target/src/spec/abi.rs +++ b/compiler/rustc_target/src/spec/abi.rs @@ -14,15 +14,33 @@ pub enum Abi { // hashing tests. These are used in many places, so giving them stable values reduces test // churn. The specific values are meaningless. Rust, - C { unwind: bool }, - Cdecl { unwind: bool }, - Stdcall { unwind: bool }, - Fastcall { unwind: bool }, - Vectorcall { unwind: bool }, - Thiscall { unwind: bool }, - Aapcs { unwind: bool }, - Win64 { unwind: bool }, - SysV64 { unwind: bool }, + C { + unwind: bool, + }, + Cdecl { + unwind: bool, + }, + Stdcall { + unwind: bool, + }, + Fastcall { + unwind: bool, + }, + Vectorcall { + unwind: bool, + }, + Thiscall { + unwind: bool, + }, + Aapcs { + unwind: bool, + }, + Win64 { + unwind: bool, + }, + SysV64 { + unwind: bool, + }, PtxKernel, Msp430Interrupt, X86Interrupt, @@ -32,11 +50,16 @@ pub enum Abi { AvrNonBlockingInterrupt, CCmseNonSecureCall, Wasm, - System { unwind: bool }, + System { + unwind: bool, + }, RustIntrinsic, RustCall, PlatformIntrinsic, Unadjusted, + /// For things unlikely to be called, where reducing register pressure in + /// `extern "Rust"` callers is worth paying extra cost in the callee. + /// Stronger than just `#[cold]` because `fn` pointers might be incompatible. RustCold, RiscvInterruptM, RiscvInterruptS, diff --git a/compiler/rustc_target/src/spec/apple_base.rs b/compiler/rustc_target/src/spec/apple_base.rs index 8a8d1ab95e8..f7dcec307dd 100644 --- a/compiler/rustc_target/src/spec/apple_base.rs +++ b/compiler/rustc_target/src/spec/apple_base.rs @@ -179,20 +179,52 @@ pub fn opts(os: &'static str, arch: Arch) -> TargetOptions { } } -pub fn deployment_target(target: &Target) -> Option<String> { +pub fn sdk_version(platform: u32) -> Option<(u32, u32)> { + // NOTE: These values are from an arbitrary point in time but shouldn't make it into the final + // binary since the final link command will have the current SDK version passed to it. + match platform { + object::macho::PLATFORM_MACOS => Some((13, 1)), + object::macho::PLATFORM_IOS + | object::macho::PLATFORM_IOSSIMULATOR + | object::macho::PLATFORM_TVOS + | object::macho::PLATFORM_TVOSSIMULATOR + | object::macho::PLATFORM_MACCATALYST => Some((16, 2)), + object::macho::PLATFORM_WATCHOS | object::macho::PLATFORM_WATCHOSSIMULATOR => Some((9, 1)), + _ => None, + } +} + +pub fn platform(target: &Target) -> Option<u32> { + Some(match (&*target.os, &*target.abi) { + ("macos", _) => object::macho::PLATFORM_MACOS, + ("ios", "macabi") => object::macho::PLATFORM_MACCATALYST, + ("ios", "sim") => object::macho::PLATFORM_IOSSIMULATOR, + ("ios", _) => object::macho::PLATFORM_IOS, + ("watchos", "sim") => object::macho::PLATFORM_WATCHOSSIMULATOR, + ("watchos", _) => object::macho::PLATFORM_WATCHOS, + ("tvos", "sim") => object::macho::PLATFORM_TVOSSIMULATOR, + ("tvos", _) => object::macho::PLATFORM_TVOS, + _ => return None, + }) +} + +pub fn deployment_target(target: &Target) -> Option<(u32, u32)> { let (major, minor) = match &*target.os { "macos" => { // This does not need to be specific. It just needs to handle x86 vs M1. let arch = if target.arch == "x86" || target.arch == "x86_64" { X86_64 } else { Arm64 }; macos_deployment_target(arch) } - "ios" => ios_deployment_target(), + "ios" => match &*target.abi { + "macabi" => mac_catalyst_deployment_target(), + _ => ios_deployment_target(), + }, "watchos" => watchos_deployment_target(), "tvos" => tvos_deployment_target(), _ => return None, }; - Some(format!("{major}.{minor}")) + Some((major, minor)) } fn from_set_deployment_target(var_name: &str) -> Option<(u32, u32)> { @@ -274,6 +306,11 @@ fn ios_deployment_target() -> (u32, u32) { from_set_deployment_target("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or((7, 0)) } +fn mac_catalyst_deployment_target() -> (u32, u32) { + // If you are looking for the default deployment target, prefer `rustc --print deployment-target`. + from_set_deployment_target("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or((14, 0)) +} + pub fn ios_llvm_target(arch: Arch) -> String { // Modern iOS tooling extracts information about deployment target // from LC_BUILD_VERSION. This load command will only be emitted when diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index 31b6961bb62..fca99381c0c 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -61,6 +61,8 @@ mod aix_base; mod android_base; mod apple_base; pub use apple_base::deployment_target as current_apple_deployment_target; +pub use apple_base::platform as current_apple_platform; +pub use apple_base::sdk_version as current_apple_sdk_version; mod avr_gnu_base; pub use avr_gnu_base::ef_avr_arch; mod bpf_base; @@ -2276,6 +2278,13 @@ impl Target { Abi::Vectorcall { .. } if ["x86", "x86_64"].contains(&&self.arch[..]) => abi, Abi::Fastcall { unwind } | Abi::Vectorcall { unwind } => Abi::C { unwind }, + // The Windows x64 calling convention we use for `extern "Rust"` + // <https://learn.microsoft.com/en-us/cpp/build/x64-software-conventions#register-volatility-and-preservation> + // expects the callee to save `xmm6` through `xmm15`, but `PreserveMost` + // (that we use by default for `extern "rust-cold"`) doesn't save any of those. + // So to avoid bloating callers, just use the Rust convention here. + Abi::RustCold if self.is_like_windows && self.arch == "x86_64" => Abi::Rust, + abi => abi, } } diff --git a/compiler/rustc_trait_selection/messages.ftl b/compiler/rustc_trait_selection/messages.ftl index f4c9dfa3488..2a09a7dcd89 100644 --- a/compiler/rustc_trait_selection/messages.ftl +++ b/compiler/rustc_trait_selection/messages.ftl @@ -40,4 +40,5 @@ trait_selection_no_value_in_rustc_on_unimplemented = this attribute must have a .label = expected value here .note = eg `#[rustc_on_unimplemented(message="foo")]` +trait_selection_ty_alias_overflow = in case this is a recursive type alias, consider using a struct, enum, or union instead trait_selection_unable_to_construct_constant_value = unable to construct a constant value for the unevaluated constant {$unevaluated} diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs index 5c2cbe39953..63ae83c8ef4 100644 --- a/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs +++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs @@ -537,7 +537,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> { /// Iterate over all added goals: returning `Ok(Some(_))` in case we can stop rerunning. /// - /// Goals for the next step get directly added the the nested goals of the `EvalCtxt`. + /// Goals for the next step get directly added to the nested goals of the `EvalCtxt`. fn evaluate_added_goals_step(&mut self) -> Result<Option<Certainty>, NoSolution> { let tcx = self.tcx(); let mut goals = core::mem::replace(&mut self.nested_goals, NestedGoals::new()); diff --git a/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs b/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs index 49ebfa4e6cb..52a11abd545 100644 --- a/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs +++ b/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs @@ -53,7 +53,7 @@ impl<'tcx> SearchGraph<'tcx> { pub(super) fn new(tcx: TyCtxt<'tcx>, mode: SolverMode) -> SearchGraph<'tcx> { Self { mode, - local_overflow_limit: tcx.recursion_limit().0.ilog2() as usize, + local_overflow_limit: tcx.recursion_limit().0.checked_ilog2().unwrap_or(0) as usize, stack: Default::default(), provisional_cache: ProvisionalCache::empty(), } diff --git a/compiler/rustc_trait_selection/src/traits/coherence.rs b/compiler/rustc_trait_selection/src/traits/coherence.rs index 5746781ae35..e90723b0ee8 100644 --- a/compiler/rustc_trait_selection/src/traits/coherence.rs +++ b/compiler/rustc_trait_selection/src/traits/coherence.rs @@ -152,16 +152,14 @@ fn with_fresh_ty_vars<'cx, 'tcx>( .predicates_of(impl_def_id) .instantiate(tcx, impl_args) .iter() - .map(|(c, s)| (c.as_predicate(), s)) + .map(|(c, _)| c.as_predicate()) .collect(), }; - let InferOk { value: mut header, obligations } = selcx - .infcx - .at(&ObligationCause::dummy_with_span(tcx.def_span(impl_def_id)), param_env) - .normalize(header); + let InferOk { value: mut header, obligations } = + selcx.infcx.at(&ObligationCause::dummy(), param_env).normalize(header); - header.predicates.extend(obligations.into_iter().map(|o| (o.predicate, o.cause.span))); + header.predicates.extend(obligations.into_iter().map(|o| o.predicate)); header } @@ -261,17 +259,11 @@ fn overlap<'tcx>( infcx.tcx.def_span(impl2_header.impl_def_id), "the second impl is here", ); - if !failing_obligation.cause.span.is_dummy() { - lint.span_label( - failing_obligation.cause.span, - format!( - "`{}` may be considered to hold in future releases, \ - causing the impls to overlap", - infcx - .resolve_vars_if_possible(failing_obligation.predicate) - ), - ); - } + lint.note(format!( + "`{}` may be considered to hold in future releases, \ + causing the impls to overlap", + infcx.resolve_vars_if_possible(failing_obligation.predicate) + )); lint }, ); @@ -355,8 +347,8 @@ fn impl_intersection_has_impossible_obligation<'cx, 'tcx>( [&impl1_header.predicates, &impl2_header.predicates] .into_iter() .flatten() - .map(|&(predicate, span)| { - Obligation::new(infcx.tcx, ObligationCause::dummy_with_span(span), param_env, predicate) + .map(|&predicate| { + Obligation::new(infcx.tcx, ObligationCause::dummy(), param_env, predicate) }) .chain(obligations.into_iter().cloned()) .find(|obligation: &PredicateObligation<'tcx>| { diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs index 5e075984238..611ec6b00ef 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs @@ -2743,12 +2743,6 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> { } ObligationCauseCode::BindingObligation(item_def_id, span) | ObligationCauseCode::ExprBindingObligation(item_def_id, span, ..) => { - if self.tcx.is_diagnostic_item(sym::Send, item_def_id) - || self.tcx.lang_items().sync_trait() == Some(item_def_id) - { - return; - } - let item_name = tcx.def_path_str(item_def_id); let short_item_name = with_forced_trimmed_paths!(tcx.def_path_str(item_def_id)); let mut multispan = MultiSpan::from(span); diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs index 06a1027e5df..b31c0e655fb 100644 --- a/compiler/rustc_trait_selection/src/traits/project.rs +++ b/compiler/rustc_trait_selection/src/traits/project.rs @@ -659,6 +659,18 @@ impl<'a, 'b, 'tcx> TypeFolder<TyCtxt<'tcx>> for AssocTypeNormalizer<'a, 'b, 'tcx normalized_ty } ty::Weak => { + let recursion_limit = self.interner().recursion_limit(); + if !recursion_limit.value_within_limit(self.depth) { + self.selcx.infcx.err_ctxt().report_overflow_error( + &ty, + self.cause.span, + false, + |diag| { + diag.note(crate::fluent_generated::trait_selection_ty_alias_overflow); + }, + ); + } + let infcx = self.selcx.infcx; self.obligations.extend( infcx.tcx.predicates_of(data.def_id).instantiate_own(infcx.tcx, data.args).map( @@ -678,7 +690,14 @@ impl<'a, 'b, 'tcx> TypeFolder<TyCtxt<'tcx>> for AssocTypeNormalizer<'a, 'b, 'tcx }, ), ); - infcx.tcx.type_of(data.def_id).instantiate(infcx.tcx, data.args).fold_with(self) + self.depth += 1; + let res = infcx + .tcx + .type_of(data.def_id) + .instantiate(infcx.tcx, data.args) + .fold_with(self); + self.depth -= 1; + res } ty::Inherent if !data.has_escaping_bound_vars() => { diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs index 88d03003309..8a24f96743a 100644 --- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs +++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs @@ -548,7 +548,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligation.cause.span, "GATs in trait object shouldn't have been considered", ); - return Err(SelectionError::Unimplemented); + return Err(SelectionError::TraitNotObjectSafe(trait_predicate.trait_ref.def_id)); } // This maybe belongs in wf, but that can't (doesn't) handle diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs index 19385e2d7f2..7e4d926dc8d 100644 --- a/compiler/rustc_trait_selection/src/traits/select/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs @@ -2346,14 +2346,15 @@ impl<'tcx> SelectionContext<'_, 'tcx> { } ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => { - let ty = self.tcx().type_of(def_id); - if ty.skip_binder().references_error() { - return Err(SelectionError::OpaqueTypeAutoTraitLeakageUnknown(def_id)); - } // We can resolve the `impl Trait` to its concrete type, // which enforces a DAG between the functions requiring // the auto trait bounds in question. - t.rebind(vec![ty.instantiate(self.tcx(), args)]) + match self.tcx().type_of_opaque(def_id) { + Ok(ty) => t.rebind(vec![ty.instantiate(self.tcx(), args)]), + Err(_) => { + return Err(SelectionError::OpaqueTypeAutoTraitLeakageUnknown(def_id)); + } + } } }) } diff --git a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs index 729cf2f3313..efab29743f4 100644 --- a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs @@ -472,17 +472,11 @@ pub(crate) fn to_pretty_impl_header(tcx: TyCtxt<'_>, impl_def_id: DefId) -> Opti let mut types_without_default_bounds = FxIndexSet::default(); let sized_trait = tcx.lang_items().sized_trait(); - if !args.is_empty() { + let arg_names = args.iter().map(|k| k.to_string()).filter(|k| k != "'_").collect::<Vec<_>>(); + if !arg_names.is_empty() { types_without_default_bounds.extend(args.types()); w.push('<'); - w.push_str( - &args - .iter() - .map(|k| k.to_string()) - .filter(|k| k != "'_") - .collect::<Vec<_>>() - .join(", "), - ); + w.push_str(&arg_names.join(", ")); w.push('>'); } diff --git a/compiler/rustc_traits/Cargo.toml b/compiler/rustc_traits/Cargo.toml index 37e00c0e4bc..0cdc978a3d0 100644 --- a/compiler/rustc_traits/Cargo.toml +++ b/compiler/rustc_traits/Cargo.toml @@ -8,9 +8,6 @@ tracing = "0.1" rustc_middle = { path = "../rustc_middle" } rustc_data_structures = { path = "../rustc_data_structures" } rustc_hir = { path = "../rustc_hir" } -rustc_ast = { path = "../rustc_ast" } rustc_span = { path = "../rustc_span" } -rustc_target = { path = "../rustc_target" } -smallvec = { version = "1.8.1", features = ["union", "may_dangle"] } rustc_infer = { path = "../rustc_infer" } rustc_trait_selection = { path = "../rustc_trait_selection" } diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs index 4d0b847533b..802391f1aad 100644 --- a/compiler/rustc_ty_utils/src/abi.rs +++ b/compiler/rustc_ty_utils/src/abi.rs @@ -172,7 +172,10 @@ fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv { use rustc_target::spec::abi::Abi::*; match tcx.sess.target.adjust_abi(abi) { RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust, - RustCold => Conv::RustCold, + + // This is intentionally not using `Conv::Cold`, as that has to preserve + // even SIMD registers, which is generally not a good trade-off. + RustCold => Conv::PreserveMost, // It's the ABI's job to select this, not ours. System { .. } => bug!("system abi should be selected elsewhere"), @@ -589,13 +592,13 @@ fn make_thin_self_ptr<'tcx>( for i in 0..fat_pointer_layout.fields.count() { let field_layout = fat_pointer_layout.field(cx, i); - if !field_layout.is_zst() { + if !field_layout.is_1zst() { fat_pointer_layout = field_layout; continue 'descend_newtypes; } } - bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout); + bug!("receiver has no non-1-ZST fields {:?}", fat_pointer_layout); } fat_pointer_layout.ty diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index 6b4273c03e4..ed7b3496894 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -192,7 +192,7 @@ fn layout_of_uncached<'tcx>( let metadata_layout = cx.layout_of(metadata_ty)?; // If the metadata is a 1-zst, then the pointer is thin. - if metadata_layout.is_zst() && metadata_layout.align.abi.bytes() == 1 { + if metadata_layout.is_1zst() { return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr))); } |
